blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6df54212edb20bd5f1d368dfb8c1921adce7b53d
|
2491a35f09dd8067b5d4cda0872956baaedcf8f6
|
/sortingcode/parseDPAFR.py
|
d226badf870bc396160cd80789102f06b1ada15e
|
[
"BSD-2-Clause"
] |
permissive
|
xyza11808/MATLAB
|
2d9e4a2a86b7c355237ed64cf42fdeb0f67b5ddf
|
efd0db355a468f1984e97f5c9cf8d852af3ca607
|
refs/heads/master
| 2023-05-11T14:06:56.614646
| 2023-05-02T01:53:04
| 2023-05-02T01:53:04
| 60,267,919
| 2
| 2
| null | 2016-09-20T06:23:36
| 2016-06-02T13:50:20
|
Matlab
|
UTF-8
|
Python
| false
| false
| 10,499
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 16 17:24:12 2019
@author: Libra
"""
import numpy as np
# import phylib.utils._misc as phyutil
import pandas as pd
import h5py
import matplotlib.pyplot as plt
import matplotlib
FR_Th = 1.0
def trialAlign(trials, oneTS):
oneTS = oneTS[
np.bitwise_and(
oneTS >= trials[0, 0] - 30000 * 5, oneTS <= (trials[-1, 0] + 30000 * 10)
)
] # only the performing period
TSidx = 0
trial_index = 0
trial_id = np.ones_like(oneTS)
while TSidx < len(oneTS) and trial_index < len(trials):
if oneTS[TSidx] < trials[trial_index, 0] + (trials[trial_index, 7] + 8) * 30000:
oneTS[TSidx] -= trials[trial_index, 0]
trial_id[TSidx] = trial_index
TSidx += 1
else:
trial_index += 1
return (oneTS, trial_id)
def baselineVector(oneTS, trial_id, trials, SU_id):
tIdices = range(trials.shape[0])
base = []
for trial_index in tIdices:
for binCount in np.histogram(
oneTS[trial_id == trial_index], bins=4, range=(-60000, -30000)
)[0]:
base.append(binCount)
if len(base) > 0 and np.std(base):
return (np.mean(base), np.std(base))
# breakpoint()
print("Error calculating base vector unit#%d" % (SU_id,))
return (0, 32767)
def toHist(trials, oneTS, trial_id, sample, delay):
sel = np.nonzero(np.bitwise_and(trials[:, 4] == sample, trials[:, 7] == delay))[0]
return (
np.histogram(
oneTS[np.isin(trial_id, sel)],
np.linspace(-60000, 30000 * (delay + 6), num=(delay + 8) * 4 + 1),
)[0]
) / len(sel)
def toHistByPair(trials, oneTS, trial_id, isPaired, delay):
if isPaired:
sel = np.nonzero(
np.bitwise_and(trials[:, 4] != trials[:, 5], trials[:, 7] == delay)
)[0]
else:
sel = np.nonzero(
np.bitwise_and(trials[:, 4] == trials[:, 5], trials[:, 7] == delay)
)[0]
return (
(
np.histogram(
oneTS[np.isin(trial_id, sel)],
np.linspace((delay - 1) * 30000, (delay + 6) * 30000, num=7 * 4 + 1),
)[0]
),
len(sel),
)
def alignHeatmap(spkTS, spkCluster, unitInfo, trials):
# bySample43 = []
bySample46 = []
# bySample83 = []
bySample86 = []
paired = []
nonpaired = []
baseVecAll = []
depth = []
s1s = 30000
spkNThresh = spkTS[-1] / s1s * FR_Th
for SU_id in unitInfo.index:
# breakpoint()
wf = unitInfo.loc[SU_id].get("group") == "good" or (
np.isnan(unitInfo.loc[SU_id]['group'])
and unitInfo.loc[SU_id]["KSLabel"] == "good"
)
spkCount = unitInfo.loc[SU_id]["n_spikes"]
if spkCount > spkNThresh and wf:
oneTSAll = (spkTS[spkCluster == SU_id]).astype(
"int64"
) # oneTSAll, all time stamp of a SU
(oneTS, trial_id) = trialAlign(trials, oneTSAll)
baseVec = baselineVector(oneTS, trial_id, trials, SU_id)
baseVecAll.append(baseVec)
# bySample43.append(toHist(trials, oneTS, trial_id, 4, 3))
bySample46.append(toHist(trials, oneTS, trial_id, 4, 6))
# bySample83.append(toHist(trials, oneTS, trial_id, 8, 3))
bySample86.append(toHist(trials, oneTS, trial_id, 8, 6))
# (p3, t3) = toHistByPair(trials, oneTS, trial_id, True, 3)
(p6, t6) = toHistByPair(trials, oneTS, trial_id, True, 6)
paired.append(np.array(p6) / t6)
# paired.append((np.array(p3) + np.array(p6)) / (t3 + t6))
# (n3, tn3) = toHistByPair(trials, oneTS, trial_id, False, 3)
(n6, tn6) = toHistByPair(trials, oneTS, trial_id, False, 6)
nonpaired.append( np.array(n6) / tn6)
# nonpaired.append((np.array(n3) + np.array(n6)) / (tn3 + tn6))
depth.append(unitInfo.loc[SU_id]["depth"])
depth = np.array(depth)
if depth.shape[0] > 0:
baseVecAll = np.array(baseVecAll)
dIdx = np.argsort(depth)
# bySample43 = np.array(bySample43)
bySample46 = np.array(bySample46)
# by1Sample83 = np.array(bySample83)
bySample86 = np.array(bySample86)
paired = np.array(paired)
nonpaired = np.array(nonpaired)
return (
(
# bySample43[dIdx, :],
bySample46[dIdx, :],
# bySample83[dIdx, :],
bySample86[dIdx, :],
),
(paired[dIdx, :], nonpaired[dIdx, :]),
baseVecAll[dIdx],
depth[dIdx],
)
else:
return ([], [], [], [])
def plotOne(data, delay, ax, ylbl):
im = plt.imshow(data, cmap="jet", aspect="auto", vmin=-3, vmax=3)
if delay == 6:
[
plt.plot([x, x], ax.get_ylim(), "-w")
for x in np.array([2, 3, 9, 10]) * 4 - 0.5
]
ax.set_xticks(np.array([2, 7, 12]) * 4 - 0.5)
ax.set_xticklabels([0, 5, 10])
# ax.set_xlabel('Time (s)')
elif delay == 3:
[
plt.plot([x, x], ax.get_ylim(), "-w")
for x in np.array([2, 3, 6, 7]) * 4 - 0.5
]
ax.set_xticks(np.array([2, 7]) * 4 - 0.5)
ax.set_xticklabels([0, 5])
if ylbl:
ax.set_ylabel("Unit #")
return im
def plotOneSel(A, B, delay, ax, ylbl):
plt.imshow((B - A) / (B + A), cmap="jet", aspect="auto", vmin=-1, vmax=1)
# if delay==6:
# [plt.plot([x,x],ax.get_ylim(),'-w') for x in np.array([2,3,9,10])*4-0.5]
# ax.set_xticks(np.array([2,7,12])*4-0.5)
# ax.set_xticklabels([0,5,10])
#
#
# elif delay==3:
# [plt.plot([x,x],ax.get_ylim(),'-w') for x in np.array([2,3,6,7])*4-0.5]
# ax.set_xticks(np.array([2,7])*4-0.5)
# ax.set_xticklabels([0,5])
[plt.plot([x, x], ax.get_ylim(), "-w") for x in np.array([2, 3]) * 4 - 0.5]
ax.set_xticks(np.array([2, 6]) * 4 - 0.5)
ax.set_xticklabels(["S+0", "S+4"])
if ylbl:
ax.set_ylabel("Unit #")
ax.set_xlabel("Time (s)")
def plotOneSelByPair(A, B, ax):
im = plt.imshow((B - A) / (B + A), cmap="jet", aspect="auto", vmin=-1, vmax=1)
[plt.plot([x, x], ax.get_ylim(), "-w") for x in np.array([2, 3]) * 4 - 0.5]
ax.set_xticks(np.array([2, 7]) * 4 - 0.5)
ax.set_xticklabels(["T+0", "T+5"])
ax.set_xlabel("Time (s)")
return im
def plotBehavior(trials, ax):
correct = np.logical_xor(trials[:, 4] == trials[:, 5], trials[:, 6] == 1)
licks = trials[:, 6] == 1
perf = []
lickPct = []
for ubound in range(16, len(correct), 16):
perf.append(np.mean(correct[ubound - 16 : ubound]))
lickPct.append(np.mean(licks[ubound - 16 : ubound]))
plt.plot(perf, "-k", label="correct rate")
plt.plot(lickPct, "--r", label="lick rate")
ax.legend()
ax.set_ylim(0, 1.0)
ax.set_ylabel("correct rate, lick rate")
ax.set_xlabel("block of 16 trials")
ax.set_title("behavior performance")
def plotHeatmap(trials, raw, byPaired, base, depth):
import os
cwd = os.getcwd()
leafPath = os.path.split(cwd)[1]
fh = plt.figure(3, figsize=[8, 10])
# ax = plt.subplot(3, 3, 1)
# plotOne(((raw[0].transpose() - base[:, 0]) / base[:, 1]).transpose(), 3, ax, True)
# ax.set_title("S1 3s delay")
# ax = plt.subplot(3, 3, 2)
# im = plotOne(
# ((raw[2].transpose() - base[:, 0]) / base[:, 1]).transpose(), 3, ax, False
# )
# plt.colorbar(im, ticks=[-3, 0, 3], format="%d")
# ax.set_title("S2 3s delay")
ax = plt.subplot(3, 3, 1)
plotOne(((raw[0].transpose() - base[:, 0]) / base[:, 1]).transpose(), 6, ax, True)
ax.set_title("S1 6s delay")
ax = plt.subplot(3, 3, 2)
im = plotOne(
((raw[1].transpose() - base[:, 0]) / base[:, 1]).transpose(), 6, ax, False
)
plt.colorbar(im, ticks=[-3, 0, 3], format="%d")
ax.set_title("S2 6s delay")
# depth plot
ax = plt.subplot(3, 3, 6)
plt.plot(depth)
ax.set_ylabel("distance from tip (um)")
ax.set_xlabel("unit #")
plt.minorticks_on()
plt.grid(b=True, which="both")
ax = plt.subplot(3, 3, 4)
im = plotOneSel(
raw[0][:, 0:24], raw[1][:, 0:24],
# raw[0][:, 0:24] + raw[1][:, 0:24],
# raw[2][:, 0:24] + raw[3][:, 0:24],
6,
ax,
False,
)
ax.set_title("sample selectivity")
# plt.colorbar(im,ticks=[-1,0,1],format='%d')
ax = plt.subplot(3, 3, 5)
im = plotOneSelByPair(byPaired[0], byPaired[1], ax)
ax.set_title("pair/non-pair selectivity")
plt.colorbar(im, ticks=[-1, 0, 1], format="%d")
ax = plt.subplot(3, 3, 3)
plotBehavior(trials, ax)
fh.suptitle(leafPath.replace("_cleaned", ""))
plt.tight_layout(rect=[0, 0, 1, 0.95])
#plt.show()
# breakpoint()
fh.savefig(leafPath.replace("_cleaned", "") + ".png", dpi=300, bbox_inches="tight")
# matplotlib.rcParams['pdf.fonttype'] = 42
# matplotlib.rcParams['ps.fonttype'] = 42
# fh.savefig(leafPath.replace("_cleaned", "") + ".pdf", dpi=300, bbox_inches="tight")
plt.close("all")
# return (fh,ax)
def runParse():
# s1s=30000
spkTS = np.load("spike_times.npy")
spkCluster = np.load("spike_clusters.npy")
unitInfo = pd.read_csv("cluster_info.tsv",sep='\t',index_col='id')
trials = np.empty([0])
with h5py.File("events.hdf5", "r") as fe:
dset = fe["trials"]
trials = np.array(dset, dtype="int32")
events=fe['events']
(raw, byPaired, baseVec, depth) = alignHeatmap(spkTS, spkCluster, unitInfo, trials)
if raw and byPaired:
plotHeatmap(trials, raw, byPaired, baseVec, depth)
else:
print("empty SU list\n")
if __name__ == "__main__":
# import os
# os.chdir('K:/neupix/191015-DPA-Learning2_29_g0_imec0_cleaned')
#
s1s = 30000
spkTS = np.load("spike_times.npy")
spkCluster = np.load("spike_clusters.npy")
unitInfo = pd.read_csv("cluster_info.tsv",sep='\s+',index_col='id')
trials = np.empty([0])
with h5py.File("events.hdf5", "r") as fe:
dset = fe["trials"]
trials = np.array(dset, dtype="int64")
(raw, byPaired, baseVec, depth) = alignHeatmap(spkTS, spkCluster, unitInfo, trials)
if byPaired.size > 0:
plotHeatmap(trials, raw, byPaired, baseVec, depth)
else:
print("empty SU list\n")
|
[
"xyza11808@outlook.com"
] |
xyza11808@outlook.com
|
e1a8d279b208225bc5d4daa8f80ee417e088ef6b
|
c5291e50a3c72c885922378573a0ad423fcedf05
|
/change and balance.py
|
177775c7a50892a2f54886b4245a2dc2abf16790
|
[] |
no_license
|
raghurammanyam/django-projects
|
bcc3ed6285882af437a2995514cef33760fb063e
|
dd20ae354f7f111a0176a1cc047c099bd23e9f05
|
refs/heads/master
| 2022-12-12T19:22:31.698114
| 2018-12-09T09:41:45
| 2018-12-09T09:41:45
| 137,443,359
| 0
| 0
| null | 2022-11-22T03:01:07
| 2018-06-15T05:08:15
|
Python
|
UTF-8
|
Python
| false
| false
| 3,697
|
py
|
# coding: utf-8
# In[1]:
from collections import Counter
coins = [2000, 500, 100, 50, 10, 1]
stock={2000:6,500:2,100:20,50:15,10:100,1:200}
def fun():
bill=int(input("your bill in rupees"))
givenamount=0
while givenamount<bill:
givenamount+=int(input("given amount in rupees"))
if givenamount>=bill:
change=givenamount-bill
coinsReturned = []
for i in coins:
if stock[i]>0:
while (change>=i and stock[i]>0):
change=change-i
coinsReturned.append(i)
stock[i]-=1
a=stock
print(a)
print(coinsReturned)
stock.update(a)
print((stock))
def update():
total=[]
full_amount=[]
d=int(input("enter two thousand notes:"))
f=int(input("enter five hundred notes:"))
l=int(input("enter hundred notes:"))
k=int(input("enter fifty rupee notes:"))
j=int(input("enter ten rupee:"))
p=int(input("enter one rupee:"))
new_collec={2000:d,500:f,100:l,50:k,10:j,1:p}
for i,j in new_collec.items():
mul=i*j
total.append(mul)
print("Total deposit amount:",sum(total))
updated=dict(Counter(stock)+Counter(new_collec))
stock.update(updated)
for u,l in stock.items():
apx=u*l
full_amount.append(apx)
print("Total updated amount:",sum(full_amount))
fun()
# In[27]:
update()
# fun()
# In[25]:
fun()
# In[19]:
update()
# In[20]:
fun()
# In[17]:
posting={'Rs500': ['23'],'Rs10': ['4'], 'Rs200': ['3'], 'Rs1': ['6'], 'Rs2000': ['23'], 'Rs50': ['5'], 'Rs100': ['4'], 'Rs5': ['3'], 'Rs2': ['2']}
for x,y in posting.items():
for i in range(0,len(y)):
y[i]=int(y[i])
#print(x,y)
for s in y:
s=int(s)
b={x:s}
posting.update(b)
print(posting)
# In[14]:
posting={'Rs500': ['23'],'Rs10': ['4'], 'Rs200': ['3'], 'Rs1': ['6'], 'Rs2000': ['23'], 'Rs50': ['5'], 'Rs100': ['4'], 'Rs5': ['3'], 'Rs2': ['2']}
for key,values in posting.items():
val=values
for x in val:
y=int(x)
d={key:y}
posting.update(d)
print(posting)
# In[68]:
d={'given_amount': ['50'], 'bill_amount': ['8']}
li=[]
x={}
for key,values in d.items():
val=values
for x in val:
y=int(x)
d={key:y}
li.append(y)
x.update
l=li[0]-li[1]
d['change']=l
print(l)
print(d)
# In[3]:
c={'Rs5': 14, 'Rs10': 16, 'Rs200': 32, 'Rs1': 71, 'Rs50': 12, 'Rs100': 18, 'Rs500': 28, 'Rs2': 18, 'Rs2000': 8, 'id': 1}
l=[]
a=c.keys()
print((a))
b=c.values()
print(b)
import re
d=[re.findall(r'(\w+?)(\d+)', key)[0] for key,values in c.items() if key!='id']
print(d)
for w,q in d:
l.append(int(q))
print("fu:",sorted(l,reverse=True))
print(d)
# In[ ]:
h=dict(d)
print(h)
for x,y in h.items():
j=y
for u in j:
v=int(u)
l.append(v)
print(v)
# In[57]:
c={'Rs5': 14, 'Rs10': 16, 'Rs200': 32, 'Rs1': 71, 'Rs50': 12, 'Rs100': 18, 'Rs500': 28, 'Rs2': 18, 'Rs2000': 8, 'id': 1}
d=[re.findall(r'(\w+?)(\d+)', key)[0] for key,values in c.items() if key!='id']
print(d)
l=[]
p=[]
z=c.values()
print(z)
for w,q in d:
l.append(int(q))
p.append(w)
print("rupees:",p)
print("fu:",(l))
dic=dict(zip(l,z))
print("full:",dic)
print("dic:",dic.items())
i={'Rs' + str(key):values for key,values in dic.items()}
print("sdfhd:",i)
f={ key:values for key,values in c.items() if key!='id' }
print("list :",c.items())
print(f)
# In[ ]:
|
[
"manyamraghuram@gmail.com"
] |
manyamraghuram@gmail.com
|
9d148e5fb969567a9d524538d9ca44bc2836fdaf
|
a0b55f596ed0219d85514cc0ee8b36c093d81d4c
|
/products/migrations/0001_initial.py
|
687d008c6d172364d7c15260681785a538e6d2a5
|
[
"MIT"
] |
permissive
|
ArRosid/ECommerceAPI
|
525e671886486e72bd38c7c8d58821a82de0380f
|
f683eed52e3591f6f45b955fb14f34154fa434ca
|
refs/heads/master
| 2020-12-12T00:09:41.497127
| 2020-01-17T10:23:37
| 2020-01-17T10:23:37
| 233,991,814
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 749
|
py
|
# Generated by Django 3.0.2 on 2020-01-16 07:02
import core.utils
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField()),
('price', models.DecimalField(decimal_places=2, max_digits=20)),
('image', models.ImageField(blank=True, null=True, upload_to=core.utils.upload_image_path)),
],
),
]
|
[
"ahmadrosid30121997@gmail.com"
] |
ahmadrosid30121997@gmail.com
|
07e3831d14c2a6d833cc80356bfca2a5e703195d
|
b4aa99d572df2c38bfae52df42694cdac52fa3ef
|
/poorna9.py
|
f5c9fa8f3d595c926c0a23df9421db0011d22915
|
[] |
no_license
|
pedireddy/guvi1
|
bb6e6ec29ed113dfecde5f7f953391ebc6dc8e12
|
62ffa2af3fed94be6304d1a36ecca0d81656582b
|
refs/heads/master
| 2020-03-24T06:42:29.155798
| 2018-09-01T06:43:38
| 2018-09-01T06:43:38
| 142,538,583
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 46
|
py
|
n=int(raw_input())
x = ((n*(n+1))/2)
print(x)
|
[
"noreply@github.com"
] |
pedireddy.noreply@github.com
|
d6ca4558b4afa8a1ad984676bf5c4a45b2e60538
|
c7ddb449783b3cf47c2ad135f69a994b17872a63
|
/Server.py
|
b7055d328543540b89076a39c8a2d04463859c33
|
[
"MIT"
] |
permissive
|
kumarankitapp/Secure-Instant-messenger
|
ec84c7703e4b97ec2bfd9364a28b8f9283bb9c7e
|
5741265026455a02e0e6b9613cce7e71a176088f
|
refs/heads/master
| 2022-11-01T05:33:17.894960
| 2022-09-28T15:52:34
| 2022-09-28T15:52:34
| 159,742,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,349
|
py
|
import socket
import sys
client_list = {}
buff=4096
try:
server_sock=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
host=socket.gethostname()
host_ip=socket.gethostbyname(host) #getting server host_ip for cross machine usage
except socket.error:
print 'Something went wrong, please try again'
sys.exit(1)
def server_setup(host_addr):
server_sock.bind(host_addr)
def client_details(client_data): #passing all client data in the form username:message
#print client_data
client_port=client_data.split(',')[3]
client_port=client_port[:-2]
client_port=int(client_port.strip())
client_ip=client_data.split(',')[2]
client_ip=client_ip[2:]
client_ip=client_ip[1:-1]
client_ip=str(client_ip)
username=client_data.split(',')[0]
username=username[2:]
username=username.strip()
if client_data.find('SIGN-IN')!=-1:
if client_list.has_key(username):
server_sock.sendto("+> User already Signed In", ((client_ip,client_port)))
else:
client_list[username]=str(client_ip)+':'+str(client_port)
server_sock.sendto('success',((client_ip),client_port))
return
elif client_data.find('list')!=-1:
list_to_send = list(set(client_list.keys()) - set([username]))
list_name=''
for i in list_to_send[:]:
list_name=list_name+' '+i
server_sock.sendto(str(list_name),((client_ip),client_port))
#print str(type(client_ip)) + str(type(client_port))
return
elif client_data.find('send')!=-1:
try:
user_info_to_send= client_list[username]
server_sock.sendto("+>" + str(user_info_to_send), ((client_ip),client_port))
except:
server_sock.sendto("+> Username not present",((client_ip),client_port))
return
def main():
if len(sys.argv)==3 and sys.argv[1]== '-sp':
print 'Server Initialized...'
else:
print 'Usage server.py -sp <port number>'
sys.exit(1)
try:
port= int(sys.argv[2])
except:
print 'Please enter a valid port, Usage ChatServer.py -sp <port number>'
host_addr=host_ip,port
server_setup(host_addr)
while 1:
client_data = str(server_sock.recvfrom(buff))
client_details(client_data)
main()
|
[
"AK@Ankits-MacBook-Air.local"
] |
AK@Ankits-MacBook-Air.local
|
6bb26cbecb57b2bd57f854b5e6d3d006cc8f2152
|
fde4c7e141a671e30505e44fd7ec523e0f5b5595
|
/nicolib/_compat.py
|
78b06a35b9cbbb593008ca9ee5c729865ee9d4fc
|
[
"MIT"
] |
permissive
|
tomoh1r/nicolib.py
|
83d956c33f7b812d0f81afee7bca35eb0542e13b
|
7d2020c43d8e966b64bc9b8d5f4a922aa65fb4cf
|
refs/heads/master
| 2021-05-29T13:05:35.363194
| 2015-11-19T09:31:33
| 2015-11-19T09:31:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
import sys
IS_PY2 = sys.version_info.major == 2
IS_PY3 = sys.version_info.major == 3
if IS_PY2:
from urllib2 import urlopen
elif IS_PY3:
from urllib.request import urlopen
|
[
"quickness.net@gmail.com"
] |
quickness.net@gmail.com
|
407efdcb8f4347abb2d32adb1d31129ca887b147
|
cacdbf688209cce2f39698758346b99de7d5281d
|
/Up and Down.py
|
0adb0239536ab0638fa77ddcddc498da3bba9eeb
|
[] |
no_license
|
Federico-PizarroBejarano/Don-Mills-Online-Judge
|
27d168e390cdf7be104117d6a699fd7df4104b63
|
6e77978a19d29ec3095687b71dc8eff3565f6a60
|
refs/heads/master
| 2021-05-11T09:14:24.849165
| 2018-01-19T03:42:06
| 2018-01-19T03:42:06
| 118,072,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
a = input()
b = input()
c = input()
d = input()
s = input()
Nikky = (a-b)*(s/(a+b))
e = s - ((s/(a+b))*(a+b))
steps1 = tuple([1]*a + [-1]*b)
for i in range(e):
Nikky += steps1[i]
Byron = (c-d)*(s/(c+d))
e = s - ((s/(c+d))*(c+d))
steps2 = tuple([1]*c + [-1]*d)
for i in range(e):
Byron += steps2[i]
if abs(Nikky) > abs(Byron):
print "Nikky"
elif abs(Nikky) < abs(Byron):
print "Byron"
else:
print "Tied"
|
[
"noreply@github.com"
] |
Federico-PizarroBejarano.noreply@github.com
|
f54b2b88344ae2e9c5b8db443dd77c2e5dda8b39
|
f088119d9068c00fa66cacb1be844481239dc451
|
/playerarray.py
|
af8af3608b090500e93321d6a824473abda58968
|
[] |
no_license
|
thamilarasi43/thamilarasi
|
7c9ba952d418ea0d2ccdcd70b10d3eacf46899b5
|
027bcd6f91164e3368f6633b4c79cd1a635a5300
|
refs/heads/master
| 2020-03-09T12:20:57.263416
| 2018-04-28T14:08:03
| 2018-04-28T14:08:03
| 128,783,545
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
def printArray(r, n1):
for i in range(0,n1):
print ("%d"%( r[i]),end=" ")
r = [243, 140, 240, 141, 142, 46, 74]
n1 = len(r)
pancakeSort(r, n1);
print ("Sorted Array ")
printArray(r,n1)
|
[
"noreply@github.com"
] |
thamilarasi43.noreply@github.com
|
98643f08886e47ed505a399a0f20256ac8f81480
|
44349b91f86ad512cb7268951f3b0b918d6343b5
|
/Insecticide Resistance Capston Project Files(Machine Learning, Python)/ResPro Package/paac-calculator.py
|
b5e5c7edd8c58b8551490e9c02e5b0e06e68cbcf
|
[] |
no_license
|
papaniivanderpuye/Public-Projects
|
d6ac3fcd90fc6fbe18279ae9256f10989742dde8
|
94592c6292c42103d3c22c06ef7731836f0798d0
|
refs/heads/master
| 2020-03-20T16:44:39.118989
| 2018-07-10T23:58:22
| 2018-07-10T23:58:22
| 137,545,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,564
|
py
|
import math
import sys
#calculates paac for every sequence in file
def aacSet(sequence,counts): #returns list with frequency of each letter
#counts number of times each letter appears
for aa in sequence:
counts[aa] += 1
#making list for recording numbers
myList =[]
#adding all numbers for frequency of each letter to the list
for key in sorted(counts.keys()):
myList.append(str( counts[key]) )
return myList
#checks if letters are the same
def deltaFunction(R1,R2):
if (R1 == R2):
return 1
else:
return 0
def deltaSet(sequence):
deltaSetList =[]
L =len(sequence)
myLambda = 10
for i in range(myLambda):
total = 0.0
#i am getting the letters calcultations of the sequence based on the formula in
#the pdf jing hu sent me
for j in range(L - (i+1)):
R1 = sequence[j]
# I am adding one because i starts from 0
#so it will not make sense considering the actual mathematical formula
R2 = sequence[j +(i + 1)]
total += deltaFunction(R1,R2)
deltaNumber = total/float(L-i)
deltaSetList.append(str(deltaNumber))
return deltaSetList
#getting the actual values from the hydroFile
def getAAIndexValues(hydroInfoFile):
alist=[]
values = []
fin= open(hydroInfoFile, "r")
firstLine = True
for line in fin:
if(firstLine):
firstLine = False #moves on if it is the first line, otherwise, the values list will be empty before it calculates.
elif ( 'A' not in line):
newLine = line.rstrip('\n').rstrip('*').rstrip('\r')
newList = newLine.split()
for aa in newList:
#adds all vallues to a list, order is as it was read
values.append(float(aa))
else:
#normalizes values
mean = sum(values)/float(len(values))
otherList =[(x-mean)**2 for x in values]
deviation = math.sqrt( sum(otherList)/float(len(otherList)) )
normalized = [(x-mean)/deviation for x in values]
alist.append(normalized)
values=[]
#copying the last lines
mean = sum(values)/len(values)
otherList =[(x-mean)**2 for x in values]
deviation = math.sqrt( sum(otherList)/len(otherList) )
normalized = [(x-mean)/deviation for x in values]
alist.append(normalized)
values=[]
fin.close()
return alist
def hydroFunction(R1,R2,alist,indexDict):
#uses the dictionary to get the index of each letter for the alist
index1 = indexDict[R1]
index2 = indexDict[R2]
product = float(alist[index1]) * float(alist[index2])
return product
def hydroSet(sequence,listOfAAindexes,indexDict):
hydroSetList =[]
L =len(sequence)
myLambda = 10
for x in range(len(listOfAAindexes)):
for i in range(myLambda):
total = 0.0;
#i am getting the letters calcultations of the sequence based on the formula in
#the pdf jing hu sent me
for j in range(L - (i+1)):
R1 = sequence[j]
# I am adding one because i starts from 0
#so it will not make sense considering the actual mathematical formula
R2 = sequence[j +(i + 1)]
total += hydroFunction(R1,R2,listOfAAindexes[x],indexDict)
hydroNumber = total/float(L-i)
hydroSetList.append(str(hydroNumber))
return hydroSetList
def helper(inputFile, output,number,hydroInfoFile):
firstLine = True
sequence =""
listOfAAindexes = getAAIndexValues(hydroInfoFile) #get all 9 sets, list of dictionaries
#this is for the hydro function
indexDict = {'A':0,'R':1,'N':2,'D':3,'C':4,'Q':5,'E':6,'G':7,'H':8,'I':9,'L':10,'K':11,'M':12,'F':13,'P':14,'S':15,'T':16,'W':17,'Y':18,'V':19}
x = 1
for line in inputFile:
if (line.startswith('>') == False):
newLine = line.rstrip('\n').rstrip('*').rstrip('\r')
sequence = sequence + newLine
else:
# resitance number
if firstLine:
#do nothing and anticipate next line
firstLine = False
else:
output.write(number)
dim = 1
#creat dictionary for aac function
counts = { 'R':0,'K':0,'D':0,'E':0 ,'Q':0,'N':0, 'H':0, 'S':0, 'T':0, 'Y':0, 'C':0,'W':0 ,'A':0, 'I':0,'L':0, 'M':0, 'F':0, 'V':0,'P':0, 'G':0 }
aacNum = aacSet(sequence,counts)
deltaSetList = deltaSet(sequence)
hydroSetList = hydroSet(sequence,listOfAAindexes,indexDict)
#this writes the first 20 aac numbers
for value in aacNum:
output.write( " " + str(dim) + ":" + value)
dim += 1
#this writes the delta set
for key in deltaSetList:
output.write( " " + str(dim) + ":" + key)
dim += 1
#this writes the hydro set
for key in hydroSetList:
output.write( " " + str(dim) + ":" + key)
dim += 1
output.write('\n')
#reset sequence
sequence = ""
#for last sequence
output.write(number)
dim = 1
#creat dictionary for aac function
counts = { 'R':0,'K':0,'D':0,'E':0 ,'Q':0,'N':0, 'H':0, 'S':0, 'T':0, 'Y':0, 'C':0,'W':0 ,'A':0, 'I':0,'L':0, 'M':0, 'F':0, 'V':0,'P':0, 'G':0 }
aacNum = aacSet(sequence,counts)
deltaSetList = deltaSet(sequence)
hydroSetList = hydroSet(sequence,listOfAAindexes,indexDict)
#this writes the first 20 aac numbers
for value in aacNum:
output.write( " " + str(dim) + ":" + value)
dim += 1
#this writes the delta set
for key in deltaSetList:
output.write( " " + str(dim) + ":" + key)
dim += 1
#this writes the hydro set
for key in hydroSetList:
output.write( " " + str(dim) + ":" + key)
dim += 1
output.write('\n')
#reset sequence
sequence = ""
#I did not put a return at the end becuase it seems like it rewrites the whole output
#file when it starts again for the -1 labels
def main():
h = "Hydrophobicity/hydroInfoFile.txt"
for i in range(0,100):
print("generating paac...."+ str(i) + "%",end='\r',flush=True)
i1 = open("Samples/resistant-sample" + str(i+1) + ".txt","r")
i2 = open("Samples/nonresistant-sample" + str(i+1) + ".txt","r")
o = open("PAAC/proteinsPAAC" +str(i+1) + ".data","w")
helper(i1,o,"1", h)
helper(i2,o,"-1", h)
i1.close()
i2.close()
o.close()
i1 = open("originalData/resist_protein.txt","r")
i2 = open("originalData/notresist_protein.txt","r")
o = open("independentTest/proteinsPAAC.data","w")
helper(i1,o,"1", h)
helper(i2,o,"-1", h)
i1 = open("originalData/independent_protein_set.txt","r")
o = open("independentTest/proteinsPAAC.test","w")
helper(i1,o,"1", h)
i1.close()
i2.close()
o.close()
main()
|
[
"papaniivanderpuye@users.noreply.github.com"
] |
papaniivanderpuye@users.noreply.github.com
|
9f8b8eb06dff3d95da5421ead1cae1f1fe4b9711
|
0b003b690fe07e7e0be905c298c8b4d5fbf8a764
|
/main.py
|
fe5443dd9cadbfbb633824a103c8480a11526505
|
[] |
no_license
|
mr-tanta/password-manager
|
219e342180a13ac8d20d199898afe1aee55a4d3d
|
ff98da5c972d7d66273cdd9de61fd4e6c8857fee
|
refs/heads/master
| 2023-08-04T20:09:31.155375
| 2021-08-29T06:00:11
| 2021-08-29T06:00:11
| 400,432,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,202
|
py
|
from tkinter import *
from tkinter import messagebox
from random import choice, randint, shuffle
import pyperclip
BLUE = "#f4f9f9"
# ---------------------------- PASSWORD GENERATOR ------------------------------- #
def generate_password():
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
'v',
'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q',
'R',
'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
symbols = ['!', '#', '$', '%', '&', '(', ')', '*', '+']
password_letters = [choice(letters) for _ in range(randint(8, 13))]
password_symbols = [choice(symbols) for _ in range(randint(3, 5))]
password_numbers = [choice(numbers) for _ in range(randint(4, 6))]
password_list = password_letters + password_symbols + password_numbers
shuffle(password_list)
password = "".join(password_list)
password_input.insert(0, password)
pyperclip.copy(password)
# ---------------------------- SAVE PASSWORD ------------------------------- #
def save():
website = website_input.get()
email = email_input.get()
password = password_input.get()
if len(website) == 0 or len(password) == 0:
messagebox.showinfo(title="Oops!", message="Please make sure you haven't left any fields empty.")
else:
is_ok = messagebox.askokcancel(title=website, message=f"These are the details entered: \nEmail: {email} "
f"\nPassword: {password} \nIs it ok to save?")
if is_ok:
with open("data.txt", "a") as data_file:
data_file.write(f"url: {website}\nusername: {email}\npassword: {password}\n\n ----!o ---- o!----\n\n")
website_input.delete(0, END)
password_input.delete(0, END)
# ---------------------------- UI SETUP ------------------------------- #
window = Tk()
window.title("Password Manager")
window.config(padx=40, pady=40, bg=BLUE)
canvas = Canvas(width=200, height=200, bg=BLUE, highlightthickness=0)
logo_img = PhotoImage(file="logo.png")
canvas.create_image(100, 100, image=logo_img)
canvas.grid(column=2, row=1)
website_label = Label(text="Website:", bg=BLUE)
website_label.grid(column=1, row=2)
website_input = Entry(width=35)
website_input.grid(row=2, column=2, columnspan=2)
website_input.get()
website_input.focus()
email_label = Label(text="Email/Username", bg=BLUE)
email_label.grid(column=1, row=3)
email_input = Entry(width=35)
email_input.grid(column=2, row=3, columnspan=2)
email_input.insert(0, "abraham@tantasecure.com")
password_label = Label(text="Password", bg=BLUE)
password_label.grid(column=1, row=4)
password_input = Entry(width=21)
password_input.grid(column=2, row=4)
password_input.get()
add_button = Button(text="Add", width=36, command=save)
add_button.grid(column=2, row=5, columnspan=2)
generate_password_button = Button(text="Generate Password", command=generate_password)
generate_password_button.grid(column=3, row=4)
window.mainloop()
|
[
"80099226+mr-tanta@users.noreply.github.com"
] |
80099226+mr-tanta@users.noreply.github.com
|
ef5cdb2609aa5a6ef0c5dc7f3cc60faebca2ed77
|
de6db2d04bf587f1705f6b6eaae217f558c37bcd
|
/thinClient_server/db.py
|
ee5f5482565525f8ee425be746e1b823371112be
|
[] |
no_license
|
lksmllr/39-Inf-NP-Abschlussprojekt
|
64effcc9f10afbcdf2c13f6a0a0f0aeff5768bf2
|
90670a69181c0278c3124dcffd9934b1c3292b65
|
refs/heads/master
| 2020-03-19T07:47:51.339668
| 2018-07-03T09:11:36
| 2018-07-03T09:11:36
| 136,148,729
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 841
|
py
|
import sqlite3
import click
from flask import current_app, g
from flask.cli import with_appcontext
def get_db():
if 'db' not in g:
g.db = sqlite3.connect(
current_app.config['DATABASE'],
detect_types=sqlite3.PARSE_DECLTYPES
)
g.db.row_factory = sqlite3.Row
return g.db
def close_db(e=None):
db = g.pop('db', None)
if db is not None:
db.close()
def init_db():
db = get_db()
with current_app.open_resource('schema.sql') as f:
db.executescript(f.read().decode('utf8'))
@click.command('init-db')
@with_appcontext
def init_db_command():
"""Clear the existing data and create new tables."""
init_db()
click.echo('Initialized the database.')
def init_app(app):
app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
|
[
"lmueller@techfak.uni-bielefeld.de"
] |
lmueller@techfak.uni-bielefeld.de
|
f94625fd41153f510f19d3d816fc6287997449f6
|
a0aa05e8c7d47f41bf58dc7e70b2f9a4297614f0
|
/receiving terminal/mLib/Huffman.py
|
0a9484bd58f2bd8eba8fd48385a4c648cec845a9
|
[
"MIT"
] |
permissive
|
ChanKamyung/Covert-transmission-based-on-ICMP
|
9765b27b5ed37273b80ae09a7b3be8cc570ac3a8
|
46c3e5244cc1312100f84a43999e5e0188a63b82
|
refs/heads/master
| 2020-12-22T21:56:10.381379
| 2020-03-27T08:45:31
| 2020-03-27T08:45:31
| 236,941,706
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,651
|
py
|
#!/usr/bin/python3
# _*_ coding=utf-8 _*_
import sys
sys.setrecursionlimit(1000000) #压缩大文件实时会出现超出递归深度,故修改限制
#定义哈夫曼树的节点类
class node(object):
def __init__(self, value=None, left=None, right=None, father=None):
self.value = value
self.left = left
self.right = right
self.father = father
def build_father(left, right):
n = node(value= left.value + right.value, left= left, right= right)
left.father = right.father = n
return n
def encode(n):
if n.father == None:
return b''
if n.father.left == n:
return node.encode(n.father) + b'0' #左节点编号'0'
else:
return node.encode(n.father) + b'1' #右节点编号'1'
#哈夫曼树构建
def build_tree(l):
if len(l) == 1:
return l
sorts = sorted(l, key= lambda x: x.value, reverse= False) #升序排列
n = node.build_father(sorts[0], sorts[1])
sorts.pop(0)
sorts.pop(0)
sorts.append(n)
return build_tree(sorts)
def encode(node_dict, debug=False):
ec_dict = {}
for x in node_dict.keys():
ec_dict[x] = node.encode(node_dict[x])
if debug == True: #输出编码表(用于调试)
print(x)
print(ec_dict[x])
return ec_dict
def encodefile(inputfile):
#数据初始化
node_dict = {} #建立原始数据与编码节点的映射,便于稍后输出数据的编码
count_dict = {}
print("Starting encode...")
with open(inputfile,"rb") as f:
bytes_width = 1 #每次读取的字节宽度
i = 0
f.seek(0,2)
count = f.tell() / bytes_width
print('length =', count)
nodes = [] #结点列表,用于构建哈夫曼树
buff = [b''] * int(count)
f.seek(0)
#计算字符频率,并将单个字符构建成单一节点
while i < count:
buff[i] = f.read(bytes_width)
if count_dict.get(buff[i], -1) == -1:
count_dict[buff[i]] = 0
count_dict[buff[i]] = count_dict[buff[i]] + 1
i = i + 1
print("Read OK")
#print(count_dict) #输出权值字典,可注释掉
for x in count_dict.keys():
node_dict[x] = node(count_dict[x])
nodes.append(node_dict[x])
tree = build_tree(nodes) #哈夫曼树构建
ec_dict = encode(node_dict) #构建编码表
print("Encode OK")
head = sorted(count_dict.items(), \
key= lambda x: x[1] ,reverse= True) #对所有根节点降序排序
bit_width = 1
print("head:",head[0][1]) #动态调整编码表的字节长度,优化文件头大小
if head[0][1] > 255:
bit_width = 2
if head[0][1] > 65535:
bit_width = 3
if head[0][1] > 16777215:
bit_width = 4
print("bit_width:", bit_width)
name = inputfile.split('.')
with open(name[0]+'.hfm', 'wb') as o:
name = inputfile.split('/')
o.write((name[-1] + '\n').encode('utf-8')) #写出原文件名
o.write(int.to_bytes(len(ec_dict), 2, byteorder= 'big')) #写出结点数量
o.write(int.to_bytes(bit_width, 1, byteorder= 'big')) #写出编码表字节宽度
for x in ec_dict.keys(): #编码文件头
o.write(x)
o.write(int.to_bytes(count_dict[x], bit_width, byteorder= 'big'))
print('head OK')
raw = 0b1
last = 0
for i in range(int(count)): #开始压缩数据
for x in ec_dict[buff[i]]:
raw = raw << 1
if x == 49:
raw = raw | 1
if raw.bit_length() == 9:
raw = raw & (~(1 << 8))
o.write(int.to_bytes(raw ,1 , byteorder= 'big'))
o.flush()
raw = 0b1
tmp = int(i / len(buff) * 100)
if tmp > last:
print("encode:", tmp, '%') #输出压缩进度
last = tmp
if raw.bit_length() > 1: #处理文件尾部不足一个字节的数据
offset = 8 - (raw.bit_length() - 1)
raw = raw << offset
raw = raw & (~(1 << raw.bit_length() - 1))
o.write(int.to_bytes(raw ,1 , byteorder = 'big'))
o.write(int.to_bytes(offset, 1, byteorder = 'big')) ###
print("File encode successful.")
def decodefile(inputfile):
print("Starting decode...")
with open(inputfile, 'rb') as f:
f.seek(0, 2)
eof = f.tell()
f.seek(-1, 1)
offset = int.from_bytes(f.read(1), byteorder= 'big')
f.seek(0)
name = inputfile.split('/')
outputfile = inputfile.replace(name[-1], \
f.readline().decode('utf-8'))
#删除 NULL byte
outputfile = list(outputfile)
for i in range(len(outputfile) - 1): #最后一个'\n'不用管
if ord(outputfile[i]) < 32:
outputfile[i] = '_'
outputfile = ''.join(outputfile)
with open(inputfile.replace('.hfm', '') + outputfile.replace('\n','') ,'wb') as o:
count = int.from_bytes(f.read(2), \
byteorder= 'big') #取出结点数量
bit_width = int.from_bytes(f.read(1), \
byteorder= 'big') #取出编码表字宽
de_dict = {}
for i in range(int(count)): #解析文件头
key = f.read(1)
value = int.from_bytes(f.read(bit_width), byteorder= 'big')
de_dict[key] = value
node_dict = {}
nodes = []
inverse_dict = {}
for x in de_dict.keys():
node_dict[x] = node(de_dict[x])
nodes.append(node_dict[x])
tree = build_tree(nodes) #重建哈夫曼树
ec_dict = encode(node_dict) #建立编码表
for x in ec_dict.keys(): #反向字典构建
inverse_dict[ec_dict[x]] = x
data = b''
raw = 0
last = 0
for i in range(f.tell(), eof - 1): #efo处记录的是offset,故此处-1
raw = int.from_bytes(f.read(1), byteorder= 'big')
#print("raw:",raw)
j = 8
while j > 0:
if (raw >> (j - 1)) & 1 == 1:
data = data + b'1'
raw = raw & (~(1 << (j - 1)))
else:
data = data + b'0'
raw = raw & (~(1 << (j - 1)))
if inverse_dict.get(data, -1) != -1:
o.write(inverse_dict[data])
o.flush()
if i == eof - 2 and j - offset == 1:
break
#print("decode",data,":",inverse_dict[data])
data = b''
j = j - 1
tep = int(i / eof * 100)
if tep > last:
print("decode:", tep,'%') #输出解压进度
last = tep
raw = 0
print("File decode successful.")
if __name__ == '__main__':
if input("1:压缩文件\t2:解压文件\n请输入你要执行的操作:") == '1':
encodefile(input("请输入要压缩的文件:"))
else:
decodefile(input("请输入要解压的文件:"))
|
[
"noreply@github.com"
] |
ChanKamyung.noreply@github.com
|
f060159b54a34871fdce2f1c0de8394ebff460ee
|
47a17dfa1b0c1620985ebc93966fb5342fe2ace6
|
/models4rl/agents/DQN/dqn.py
|
bd813a2f9fbecbb0259a0529cca69dca3aa82ff7
|
[] |
no_license
|
dododoSA/Models4RL
|
7e1bf6d1343e2da1e6b5bede08c20dd216408abe
|
22a251740758994a84c44fa6607e57d93471de31
|
refs/heads/master
| 2023-01-05T23:01:42.239502
| 2020-10-22T16:31:39
| 2020-10-22T16:31:39
| 288,118,274
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,633
|
py
|
from models4rl.agents.base_agent import BaseAgent
from models4rl.replay_buffers.replay_buffer import ReplayBuffer
from models4rl.explorers.base_explorer import BaseExplorer
import torch
from torch.nn import Module
from torch.optim.optimizer import Optimizer
import copy
from gym.spaces.discrete import Discrete
class DQN(BaseAgent):
def __init__(
self,
action_space:Discrete,
q_network:Module,
optimizer:Optimizer,
criterion, # こいつをどうするか DDQNも
explorer:BaseExplorer,
replay_buffer:ReplayBuffer,
batch_size:int=32,
gamma:float=0.99,
target_update_step_interval:int=0,
target_update_episode_interval:int=5
):
"""
コンストラクタ
Args:
action_space (Discrete): 行動空間 env.action_spaceをそのまま渡せばOK、一次元離散値
q_network (Module): 学習するネットワーク 型はとりあえず親クラスを書いている
optimizer (Optimizer): 最適化アルゴリズム
criterion (Module): 損失関数 型はとりあえず自作するときの親クラスを書いている
explorer (BaseExplorer): 探索アルゴリズム
replay_buffer (ReplayBuffer): 経験再生用のメモリ
batch_size (int): 経験再生からリプレイを行う際にサンプリングする数, 初期値32 (replay bufferに含めてもいいかもしれない)
gamma (float): 割引率, 初期値0.99
target_update_step_interval (int): 学習の目標となるQNetworkを何ステップ毎に更新するか. 0の場合は更新されない. 1ステップが終了するとカウントはリセットされる. 初期値0
target_update_episode_interval (int): 学習の目標となるQNetworkを何エピソード毎に更新するか. 初期値5
"""
assert target_update_step_interval >= 0, 'target_update_step_interval must be positive or 0.'
assert target_update_episode_interval > 0, 'target_update_episode_interval must be positive.'
super(DQN, self).__init__(action_space, explorer, gamma)
self.q_network = q_network
self.optimizer = optimizer
self.criterion = criterion
self.target_network = copy.deepcopy(self.q_network)
self.replay_buffer = replay_buffer
self.batch_size = batch_size
self.state = None
self.action = None
self.target_update_step_interval = target_update_step_interval
self.target_update_episode_interval = target_update_episode_interval
self.episode = 1
self.step = 1 # エピソードごとではなく総ステップ数
def act_and_train(self, observation, reward):
"""
学習と行動を行う関数。引数として渡された状態と報酬は一度保存し、次のステップで使用される。
Args:
observation (numpy.array): 前回行った行動による移り先の状態(現在の状態)
reward (float): 前回行った行動による報酬
Returns:
int: 移り先の状態にて起こす行動
Examples:
# init
obs = env.reset()
reward = 0
done = False
...
# run 必ずこの順番で呼ぶ
action = agent.act_and_train(obs, reward)
obs, reward, done, info = env.step(action)
"""
next_state = torch.tensor(observation).float()
self.replay_buffer.append_and_update(
self.state,
self.action,
next_state,
reward,
q_network=self.q_network,
target_network=self.target_network,
gamma=self.gamma
)
self.replay()
if self.target_update_step_interval and self.step % self.target_update_step_interval == 0:
self._update_target_network()
self.state = next_state
self.action = self.explorer.explore(self.action_space.sample, lambda: self._choice_greedy_action(next_state))
self.step += 1
return self.action
def replay(self):
if len(self.replay_buffer) < self.batch_size:
return
state_batch, next_state_batch, action_batch, reward_batch = self.replay_buffer.get_batch(self.batch_size).values()
self.q_network.eval()
q_values = self.q_network(state_batch).gather(1, action_batch)
self.target_network.eval()
next_q_values = torch.zeros_like(q_values, dtype=float)
next_q_values = self.target_network(next_state_batch).max(1)[0].detach()
target_values = self.gamma * next_q_values + reward_batch
self.q_network.train()
loss = self.criterion(q_values, target_values.unsqueeze(1).float())
self.optimizer.zero_grad()
loss.backward()
for p in self.q_network.parameters():
p.grad.data.clamp_(-1, 1)
self.optimizer.step()
def _update_target_network(self):
self.target_network.load_state_dict(self.q_network.state_dict())
def _choice_greedy_action(self, observation):
self.q_network.eval()
with torch.no_grad():
action = self.q_network(observation).max(dim=0)[1].item() # todo: この辺のテンソルの扱いが雑なので注意
return action
def act_greedily(self, observation):
next_state = torch.tensor(observation).float()
return self._choice_greedy_action(next_state)
def stop_episode_and_train(self, observation, reward):
"""
エピソード終了時の処理(学習も行う)
Args:
observation (ndarray): 観測した状態
reward (float): 即時報酬
"""
next_state = torch.tensor(observation).float()
self.replay_buffer.append_and_update(
self.state,
self.action,
next_state,
reward,
q_network=self.q_network,
target_network=self.target_network,
gamma=self.gamma
)
self.replay()
self.explorer.end_episode()
self.action = None
self.state = None
self.episode += 1
if self.episode % self.target_update_episode_interval == 0:
self._update_target_network()
|
[
"dododosa14@gmail.com"
] |
dododosa14@gmail.com
|
aa66289c1f185034ae18f11611dab6900cc1a9c4
|
329b910c4a74a7ebca9eeb1de43a82d4a3fc8d8a
|
/0x01-python-if_else_loops_functions/1-last_digit.py
|
b1c1ba7deaf4663fee52131892453894df17caee
|
[] |
no_license
|
MarioEstebanSuaza/holbertonschool-higher_level_programming
|
58ff5a4c5b70a89c29881f9958531199a85a0327
|
de8fb6b03f9e680d9cb0f7798440ccc4b3260f48
|
refs/heads/master
| 2022-11-26T00:18:32.893077
| 2020-08-06T23:39:05
| 2020-08-06T23:39:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
#!/usr/bin/python3
import random
number = random.randint(-10000, 10000)
if number < 0:
ln = (number * -1) % 10 * -1
else:
ln = number % 10
if ln > 5:
print("Last digit of", number, "is", ln, "and is greater than 5")
elif ln == 0:
print("Last digit of", number, "is", ln, "and is 0")
elif ln < 6 and ln != 0:
print("Last digit of", number, "is", ln, "and is less than 6 and not 0")
|
[
"andres_felipe0191@hotmail.com"
] |
andres_felipe0191@hotmail.com
|
56d2bc727c3712ef9ae6a951625f0414fac943a8
|
41c0d29efcb3ac0e22237bd3fadc5cdf550698cd
|
/tests/testing_config/custom_components/test/alarm_control_panel.py
|
0e2842f869561a20b2f53b381cca7907aafd5e77
|
[
"Apache-2.0"
] |
permissive
|
EthanW1215/home-assistant
|
7c19ce668821f3063b3d46f9e9a0ef5a6e102689
|
a48ac4d18fab253572780671f896b3a417322699
|
refs/heads/master
| 2020-09-05T09:02:59.513681
| 2019-11-05T18:57:08
| 2019-11-05T18:57:08
| 220,050,094
| 2
| 0
|
Apache-2.0
| 2019-11-06T17:13:33
| 2019-11-06T17:13:32
| null |
UTF-8
|
Python
| false
| false
| 2,494
|
py
|
"""
Provide a mock alarm_control_panel platform.
Call init before using it in your tests to ensure clean test data.
"""
from homeassistant.components.alarm_control_panel import AlarmControlPanel
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from tests.common import MockEntity
ENTITIES = {}
def init(empty=False):
"""Initialize the platform with entities."""
global ENTITIES
ENTITIES = (
{}
if empty
else {
"arm_code": MockAlarm(
name=f"Alarm arm code",
code_arm_required=True,
unique_id="unique_arm_code",
),
"no_arm_code": MockAlarm(
name=f"Alarm no arm code",
code_arm_required=False,
unique_id="unique_no_arm_code",
),
}
)
async def async_setup_platform(
hass, config, async_add_entities_callback, discovery_info=None
):
"""Return mock entities."""
async_add_entities_callback(list(ENTITIES.values()))
class MockAlarm(MockEntity, AlarmControlPanel):
"""Mock Alarm control panel class."""
def __init__(self, **values):
"""Init the Mock Alarm Control Panel."""
self._state = None
MockEntity.__init__(self, **values)
@property
def code_arm_required(self):
"""Whether the code is required for arm actions."""
return self._handle("code_arm_required")
@property
def state(self):
"""Return the state of the device."""
return self._state
def alarm_arm_away(self, code=None):
"""Send arm away command."""
self._state = STATE_ALARM_ARMED_AWAY
self.async_write_ha_state()
def alarm_arm_home(self, code=None):
"""Send arm home command."""
self._state = STATE_ALARM_ARMED_HOME
self.async_write_ha_state()
def alarm_arm_night(self, code=None):
"""Send arm night command."""
self._state = STATE_ALARM_ARMED_NIGHT
self.async_write_ha_state()
def alarm_disarm(self, code=None):
"""Send disarm command."""
if code == "1234":
self._state = STATE_ALARM_DISARMED
self.async_write_ha_state()
def alarm_trigger(self, code=None):
"""Send alarm trigger command."""
self._state = STATE_ALARM_TRIGGERED
self.async_write_ha_state()
|
[
"balloob@gmail.com"
] |
balloob@gmail.com
|
4772ac0f8f8819b65a1babed8879a5bd6ca68e9a
|
e8a6f0d4fac4a185985595d002af84ad02d33556
|
/basicforms/settings.py
|
12c812d98007bf32ce07b1bb2088edabfd34351f
|
[] |
no_license
|
Pratik2080/django-form
|
7ffa3941240e9db94c7488ab373dafc52714eafa
|
4d2d8218d32313afb2b3cc10ec4e6d7b7a8c9944
|
refs/heads/main
| 2023-02-24T14:42:21.416974
| 2021-02-01T10:39:48
| 2021-02-01T10:39:48
| 334,905,248
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,246
|
py
|
"""
Django settings for basicforms project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q717-6cx!4q4$=@4@fq&6i79*832^9*!vk%brkzdt-4*&x27pp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'basicapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'basicforms.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'basicforms.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'staticfiles'
|
[
"pratik.m@lendenclub.com"
] |
pratik.m@lendenclub.com
|
452ceb9dcf5efcbd44cb38c204ccf4e5e73dd5b3
|
2c5ef7ecdf2c2588fd95ad04a563e7c89d95822a
|
/Lesson15_GetandReverse.py
|
ea825e755a58f04dd3604fba9d6a511131edb2f8
|
[] |
no_license
|
edwardgorelik/practice
|
6cdd8d7f45bb09988e1fe22a7b79204932bb530a
|
5d3d5636bfa7b9b87e87a1bf1d6a579c75aec19a
|
refs/heads/master
| 2021-01-25T13:58:59.534701
| 2018-04-29T05:34:53
| 2018-04-29T05:34:53
| 123,636,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
def getLine():
while True:
line = input("Give me a sentence: ")
if not len(line.split()) > 1:
print("Needs to be at least two words in the sentence")
continue
else:
break
return line
def reverseLine(line):
myList = line.split()
newLine = " ".join(myList[::-1])
return newLine
line = getLine()
print(reverseLine(line))
|
[
"e.gorelik11@gmail.com"
] |
e.gorelik11@gmail.com
|
f69b247266479c8b06f621afe813162dae323169
|
efce0eeb3dd40a7067108f6a38951fcac8d2ea57
|
/3_foglalkozás_marcius_21/3_nevsor.py
|
928240eb0ce748f76da331b82ffd316df3af4ff5
|
[] |
no_license
|
mekispeter/Python-SAP-2019
|
f0fc1870f763d3777f708e2ae73f450e471410df
|
c046edc4026b6be93c8efac48c6ef3b57ac64ac4
|
refs/heads/master
| 2020-04-24T06:11:13.209206
| 2019-05-15T14:43:58
| 2019-05-15T14:43:58
| 171,756,122
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
csoport = ["Niki", "Tündi", "Nóri", "Rebeka", "Timi",
"Prisca", "Virág", "Mira", "Kami", "Lilla",
"Odett", "Livi", "Dóri", "Viki", "Tami"]
nev = input("Kit keresel?")
if nev in csoport:
print("Velünk van!")
else:
print("Nem tagja a csoportunknak.")
|
[
"noreply@github.com"
] |
mekispeter.noreply@github.com
|
93caa612177f6f54aab7869362be49eebf2b6f37
|
f7bd260a492362a8d06c3e70561cd61724eaa2b2
|
/terminal/PWMControlGui_gtk.py
|
6283df3bb0e2d97369d5f5eb6836bacb0150c1a6
|
[] |
no_license
|
ersteller/pySerial2AVR
|
48f355119147006d93586c5e4bf608b0a87142ca
|
08bb3ea2b9345451e178d1e2f447af73dba5ca6d
|
refs/heads/master
| 2022-08-04T22:48:53.090657
| 2022-07-20T13:26:58
| 2022-07-20T13:26:58
| 5,402,091
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,230
|
py
|
'''
Created on 25.11.2012
@author: Jan
'''
"""
helloworld with pygtk and glade
This is not threadable
"""
import time
import sys
import os
import threading
from remoAVR import AVR
try:
import pygtk
pygtk.require("2.0")
except:
pass
try:
import gtk
import gtk.glade
except:
sys.exit(1)
class HellowWorldGTK:
"""This is an Hello World GTK application"""
fStopthread = False
def __init__(self):
self.baselen = 0xffff # initial base resolution
#Set the Glade file (relative path)
self.gladefile = os.path.split(__file__)[0] + "\\pwmgui.glade"
self.wTree = gtk.glade.XML(self.gladefile)
#Create our dictionay and connect it
dic = { "on_btnInit_clicked" : self.btnInit_clicked,
"on_window1_destroy" : self.quit,
"on_scalePrescale_value_changed": self.on_prescal_change,
"on_scaleOutA_value_changed": self.on_dutycycle_change_a,
"on_scaleOutB_value_changed": self.on_dutycycle_change_b,
"on_scaleOutC_value_changed": self.on_dutycycle_change_c,
"on_scaleICR_value_changed" : self.on_dutycycle_change_i,
}
self.wTree.signal_autoconnect(dic)
#gtk.gdk.threads_init() # unlocking thread lock in main process
self.mc = AVR()
#self.mc.reset()
def btnInit_clicked(self, widget):
print "PWM init"
self.mc.pwm_init()
def quit(self, widget):
self.fStopthread = True
gtk.main_quit()
def on_prescal_change(self, widget):
cs = int(widget.get_value())
self.mc.pwm_prescaler(cs)
pass
def on_dutycycle_change_a(self, widget):
duty_cycle = widget.get_value()
self.on_dutycycle_change("OCR3A", duty_cycle)
def on_dutycycle_change_b(self, widget):
duty_cycle = widget.get_value()
self.on_dutycycle_change("OCR3B", duty_cycle)
def on_dutycycle_change_c(self, widget):
duty_cycle = widget.get_value()
self.on_dutycycle_change("OCR3C", duty_cycle)
def on_dutycycle_change_i(self, widget):
duty_cycle = int(widget.get_value())
self.baselen = duty_cycle
self.mc.pwm("ICR3", self.baselen)
self.on_dutycycle_change("OCR3A", self.wTree.get_widget("scaleOutA").get_value())
self.on_dutycycle_change("OCR3B", self.wTree.get_widget("scaleOutB").get_value())
self.on_dutycycle_change("OCR3C", self.wTree.get_widget("scaleOutC").get_value())
def on_dutycycle_change(self, param, dc):
print "scaling", dc
self.mc.pwm(param, int(dc * self.baselen / 100))
def main(self):
while not hwg.fStopthread:
print time.time()
time.sleep(5)
if __name__ == "__main__":
hwg = HellowWorldGTK()
gtk.gdk.threads_init()
#t= threading.Thread(target=hwg.main)
#t.start()
gtk.gdk.threads_enter()
gtk.mainloop()
gtk.gdk.threads_leave()
|
[
"Jan@Jans-QPC"
] |
Jan@Jans-QPC
|
3fc39627ead959993c021edf1f2c93d6e75d77d7
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/MuonSpectrometer/MuonCnv/MuonByteStream/share/ReadMuonRDO_jobOptions.py
|
88cedafcca34011aab6b2d80c4eeda885bc8aa15
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949
| 2020-01-19T03:59:35
| 2020-01-19T03:59:35
| 234,836,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
ByteStreamAddressProviderSvc = Service( "ByteStreamAddressProviderSvc" )
ByteStreamAddressProviderSvc.TypeNames += [
"RpcPadContainer/RPCPAD",
"MdtCsmContainer/MDTCSM",
"TgcRdoContainer/TGCRDO",
"CscRawDataContainer/CSCRDO"
]
|
[
"rushioda@lxplus754.cern.ch"
] |
rushioda@lxplus754.cern.ch
|
bb737fe68800e1577799e178e2a02eed7e399cfe
|
1178abea66ba9592a1c2821da036f3660b382abb
|
/manager/views/commissions.py
|
7bb9ba609bdce979d01aaf582108a4a1ef58cf70
|
[
"Apache-2.0"
] |
permissive
|
jordancarlson08/MyStuff
|
fdcfc125637164a6369730b7cb275dfdbfcbedc6
|
4f4f6fdd298ce00e4a1f8a4621aaf94c0ccdb773
|
refs/heads/master
| 2021-01-23T01:00:55.764993
| 2014-11-04T21:25:22
| 2014-11-04T21:25:22
| 16,844,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,234
|
py
|
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from manager.models import *
from account.models import *
from catalog.models import *
from . import templater
from datetime import *
from django.core.mail import send_mail
from base_app.user_util import *
from django.contrib.auth.decorators import login_required, user_passes_test
@login_required
@user_passes_test(manager_check)
def process_request(request):
'''Shows the commissions for a user'''
form = CommissionsForm()
c_list = Commission.objects.all()
tvars = {
'form':form,
}
return templater.render_to_response(request, 'commissions.html', tvars)
@login_required
@user_passes_test(manager_check)
def process_request__get(request):
'''Shows the commissions for a user'''
c_list = Commission.objects.filter(transaction__employee__id=request.urlparams[0])
tvars = {
'c_list':c_list,
}
return templater.render_to_response(request, 'commissions_list.html', tvars)
class CommissionsForm(forms.Form):
'''A form for new stores'''
emp = forms.ModelChoiceField(label='Employee' ,queryset=get_employees(), widget=forms.Select(attrs={'class': 'form-control', 'id':'emp'}))
|
[
"jordancarlson08@gmail.com"
] |
jordancarlson08@gmail.com
|
b1be52483eed9dc9fe64b43eb9be826989148f64
|
eef1bcc789c4f6c72bedc1f00bf259a09ceb3bf3
|
/discordbot.py
|
a65e2f5f1d32419ca0561bb245f1a90d17be17cd
|
[] |
no_license
|
kurosukee/discord_bot_text_to_speach
|
9dc522144710e84dba6389fe811fbd9b9f21e219
|
ceb6bc9dd40c7cccc6ff4b4f50f9a54ac63c5130
|
refs/heads/master
| 2023-08-02T20:45:58.952449
| 2021-09-26T03:44:32
| 2021-09-26T03:44:32
| 396,017,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,487
|
py
|
import discord
import ffmpeg
import re
import os
from discord.ext import commands
from os import getenv
from gtts import gTTS
bot = commands.Bot(command_prefix='!')
voice_client = None
@bot.event
async def on_ready():
print('ログインしました。')
@bot.command()
async def join(ctx):
vc = ctx.author.voice
if vc is None:
await ctx.channel.send('ボイチャ繋がってないわよ')
return
else:
await vc.channel.connect()
return
@bot.command()
async def bye(ctx):
vc = ctx.voice_client
if vc is None:
await ctx.channel.send('ボイチャにいないわよ')
return
else:
await vc.disconnect()
@bot.event
async def on_message(message):
if message.content.startswith('!'):
await bot.process_commands(message)
return
if message.content == 'うー':
await message.channel.send('にゃー')
return
if message.content.startswith('http'):
return
vc = message.guild.voice_client
if vc == None:
return
else:
content = re.sub('\(.*?\)',"顔文字", message.content)
tmpfile = str(message.id) + '.mp3'
tts = gTTS(text=content, lang='ja')
tts.save(tmpfile)
source = discord.FFmpegPCMAudio(tmpfile)
vc.play(source)
while vc.is_playing():
continue
try:
os.remove(tmpfile)
except OSError as e:
return
bot.run(getenv('BOT'))
|
[
"ayaka.nishiyama.1117@gmail.com"
] |
ayaka.nishiyama.1117@gmail.com
|
2a86ba9cb07e96cce5dc1c6921d5dd4a301d23a6
|
523f8f5febbbfeb6d42183f2bbeebc36f98eadb5
|
/139_best.py
|
e93df2a757060670d3f2938475f5496a4d88e1cb
|
[] |
no_license
|
saleed/LeetCode
|
655f82fdfcc3000400f49388e97fc0560f356af0
|
48b43999fb7e2ed82d922e1f64ac76f8fabe4baa
|
refs/heads/master
| 2022-06-15T21:54:56.223204
| 2022-05-09T14:05:50
| 2022-05-09T14:05:50
| 209,430,056
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
class Solution(object):
def wordBreak(self, s, wordDict):
"""
:type s: str
:type wordDict: List[str]
:rtype: List[str]
"""
dp=[False] *(len(s)+1)
dp[0]=True
for i in range(1,len(s)+1):
for j in range(i):
if dp[j] and s[j:i] in wordDict:
dp[i]=True
break
return dp[-1]
|
[
"noelsun@mowennaierdeMacBook-Pro.local"
] |
noelsun@mowennaierdeMacBook-Pro.local
|
1a4dc42e47e33f6a65cd5afb973b8281be750191
|
3732272dc6c937a150a97c4b3254b3270e482b85
|
/LibraryManagmentSystem/urls.py
|
14aecb7a60d2448b3627f64059858cf201a207f5
|
[] |
no_license
|
harshakhmk/Library-Management-System
|
e4d023dbc371bfe5f2a90d4a7b97667ca579362c
|
c7af3fd05a9bd4a58156747df2b5112ab2cea6f9
|
refs/heads/master
| 2023-08-28T07:13:33.862585
| 2021-10-30T08:30:14
| 2021-10-30T08:30:14
| 422,567,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,019
|
py
|
"""LibraryManagmentSystem URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings # add this
from django.conf.urls.static import static
urlpatterns = [
path("admin/", admin.site.urls),
path("accounts/", include("authentication.urls")),
path("", include("library.urls")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"kharshakashyap@gmail.com"
] |
kharshakashyap@gmail.com
|
89654f603bccf8d8c356db6ba1cda09176ed6c69
|
32e9aefcb709457762ddcdd921ece830a051f0f9
|
/class_py/paint.py
|
751057a6eff9534388d3015a7a06bce7fd978d86
|
[] |
no_license
|
kartik-kumar71/CollegeStuff
|
bfcb55c28580899fda08bade855754632d355c6c
|
68f9e586dbca70873b118a96d117a408e0a32e25
|
refs/heads/master
| 2021-07-13T07:17:14.612846
| 2020-10-12T15:01:12
| 2020-10-12T15:01:12
| 214,474,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
# -*- coding: utf-8 -*-
__author__ = "kartik"
def area(a,b,c):
return 2*(b*c + c*a)
def q(u):
return u/110
(x,y,z) = list(map(int,input("Enter length,breadth and height seperated by space :").split()))
quantity = q(area(x,y,z))
if int(quantity) < quantity:
quantity = int(quantity) + 1
print ("Quantity of paint = {} liters".format(quantity))
print ("Total cost = {}".format(quantity*1000))
print ("COST = ₹" + str(quantity*1000))
|
[
"kartikjodhpur@gmail.com"
] |
kartikjodhpur@gmail.com
|
9e7457625be95e310c66d97bd48cf15765de761d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02935/s611440264.py
|
255de086936640d4df37be9408c7ebe068dd3f2a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
def main():
n = int(input())
v = list(map(int, input().split()))
v_s = sorted(v)
w = v_s[0]
for i in range(1, n):
w = (w + v_s[i])/2
print(w)
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
eb363f0f9e4464a9e5b3ca9c0bbaa12c189bfc07
|
5935981c11a940a1b13294f99626dcae54437ef8
|
/Week-8 Assignment.py
|
b40db44b2be1380d15391809f85eaef295877706
|
[] |
no_license
|
mhamzahsiddiqui/NPTEL_Assignment_Pyhon-noc18_cs34
|
d82b95385b56c3fad9f15a07f1fc22ffdc6bba5f
|
88c8c736876092a9337ba24b6020a28f7cb169b8
|
refs/heads/master
| 2020-04-03T06:30:12.881806
| 2018-10-28T13:56:53
| 2018-10-28T13:56:53
| 155,076,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
def readinput():
n = int(input()) # Length
for j in range(n):
nextnum = int(input()) # Read each value
insequence.append(nextnum)
best.append(0) # Initialize best[k] for each position
return
def solve():
for j in range(len(insequence)):
prev = [ best[k] for k in range(j) if insequence[j]%insequence[k] == 0 ]
if prev:
best[j] = 1 + max(prev)
else:
best[j] = 1
insequence = []
best = []
readinput()
solve()
print(max(best))
|
[
"hamzahsiddiqui1998@gmail.com"
] |
hamzahsiddiqui1998@gmail.com
|
e2d4f65bebecf2826bc61e08ca63c651ed0a0cb5
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/4336640da90998dd8c7e29c4d14e227de4291340-<_BiasAddGradGrad>-bug.py
|
63bcda9c022b53ea67111f1556afe10529f8fbc8
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,162
|
py
|
@ops.RegisterGradient('BiasAddGrad')
def _BiasAddGradGrad(op, received_grad):
'Gradient for the BiasAddGrad op.\n\n Args:\n op: BiasAddGrad op for which we are calculating gradients.\n received_grad: The gradients passed to the BiasAddGrad op.\n \n Returns:\n A single gradient Tensor for the input to BiasAddGrad (which\n is the gradient of the bias term in BiasAdd)\n '
try:
data_format = op.get_attr('data_format')
except ValueError:
data_format = None
shape = array_ops.shape(op.inputs[0])
rank = array_ops.rank(op.inputs[0])
bias_shape = array_ops.shape(received_grad)
if (data_format == 'NCHW'):
expanded_shape = array_ops.concat(0, [array_ops.ones_like(shape[:(- 3)]), bias_shape, array_ops.ones_like(shape[(- 2):])])
tile_mults = array_ops.concat(0, [shape[:(- 3)], [1], shape[(- 2):]])
else:
expanded_shape = array_ops.concat(0, [array_ops.ones_like(shape[:(- 1)]), bias_shape])
tile_mults = array_ops.concat(0, [shape[:(- 1)], [1]])
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
return array_ops.tile(expanded_grad, tile_mults)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
f28c91439fbee7d2474372132d36c49a2dee4541
|
ac6a965bbdd36b3bb3fa72ead8507511dd4d7941
|
/docker_django/DataWare/manage.py
|
b47ef44f5055f49bd8f5039e1b58cf8ce99f2845
|
[] |
no_license
|
venkateshchary/django_mysql_docker
|
f7df97742f5812d0c05bfff572e7b3f469acd7fa
|
b9e710b6c1cf6fb50c71aac6baab179a88dd8487
|
refs/heads/master
| 2022-12-04T14:12:36.007993
| 2020-08-30T14:59:02
| 2020-08-30T14:59:02
| 291,491,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 628
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DataWare.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
venkateshchary.noreply@github.com
|
fd855d37c46f8a698b3287ed2ee00f31304614fc
|
b181ba08cdf97582a2c690c52f519717411a5af8
|
/docs/diagram.py
|
2d5a27e752a5771d4215ebc83758d1c1eea4a8e5
|
[
"Apache-2.0"
] |
permissive
|
rodrigodelmonte/site_checker
|
e69bc1529e99ed85ef3e4ed3df7958f4bb394d23
|
8b899ed006c1c7d291f61fcfa5510930f6622597
|
refs/heads/main
| 2023-07-06T19:26:20.761308
| 2021-08-01T20:41:20
| 2021-08-01T20:41:20
| 374,722,123
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,559
|
py
|
from diagrams import Diagram
from diagrams.onprem.compute import Server
from diagrams.onprem.container import Docker
from diagrams.onprem.database import Postgresql
from diagrams.onprem.queue import Kafka
from diagrams.programming.language import Python
with Diagram(filename="docs/site_checker_architecture"):
kafka = Kafka("Kafka")
postgres = Postgresql("PostgreSQL")
producer = Python("site_checker\nproducer")
consumer = Python("site_checker\nconsumer")
website = Server("www.website.com")
website << producer
producer >> kafka >> consumer >> postgres
with Diagram(filename="docs/site_checker_config_ini"):
kafka = Kafka("Kafka")
postgres = Postgresql("PostgreSQL")
producer = Python("site_checker\nproducer")
consumer = Python("site_checker\nconsumer")
websites = [Server(f"www.website_{n}.com\nTherad-{n}") for n in range(3)]
websites << producer
producer >> kafka >> consumer >> postgres
with Diagram(filename="docs/site_checker_cli_parameters"):
kafka = Kafka("Kafka")
postgres = Postgresql("PostgreSQL")
consumer = Python("site_checker\nconsumer")
producer1 = Docker("site_checker\nproducer_1")
producer2 = Docker("site_checker\nproducer_2")
producer3 = Docker("site_checker\nproducer_3")
website1 = Server("www.website_1.com")
website2 = Server("www.website_2.com")
website3 = Server("www.website_3.com")
website1 << producer1 >> kafka
website2 << producer2 >> kafka
website3 << producer3 >> kafka
kafka >> consumer >> postgres
|
[
"rodrigo.monte@onefootball.com"
] |
rodrigo.monte@onefootball.com
|
ced5653a009ed9ba3055f28fc971d2182e449017
|
62bc1cb7dcbd89e38f65b04a78d662defb3975a3
|
/object_detection/detectron2/test/eval.py
|
fbf8777f00a490c2ba098ffd1e525e468c21733c
|
[] |
permissive
|
oakdata/benchmark
|
bfcf326e6367d12bd39b4768d053a94c6c1000a4
|
fdb94230fc716efd6c96af355b106ec43ca64d08
|
refs/heads/main
| 2023-08-05T18:09:36.872039
| 2021-09-30T08:28:11
| 2021-09-30T08:28:11
| 398,959,396
| 12
| 4
|
Apache-2.0
| 2021-09-21T01:36:05
| 2021-08-23T03:13:47
|
Python
|
UTF-8
|
Python
| false
| false
| 4,856
|
py
|
import os
import os.path as osp
import pickle
import numpy as np
import os.path as osp
import json
annodir = '/grogu/user/jianrenw/data/OAK_TEST/Label'
def voc_ap(rec, prec, use_07_metric=False):
"""Compute VOC AP given precision and recall. If use_07_metric is true, uses
the VOC 07 11-point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def parse_rec(file_name,idk,classname,limitset,train_map):
json_name = file_name
f = osp.join(annodir,json_name)
labels = json.load(open(f,'r'))
bbox = []
for label in labels:
obj_id = label['id']
category = label['category']
box2d = label['box2d']
abbox = [box2d['x1'], box2d['y1'], box2d['x2'], box2d['y2']]
if idk == False and classname == category:
bbox.append(abbox)
elif idk == True:
# this category belongs to limitset
if category in limitset.keys():
# this category not belongs to the trainset
if limitset[category] not in train_map.values():
bbox.append(abbox)
return bbox
def voc_eval(classname,class_id,superframe,train_map,outputs,limitset,idk =False,ovthresh=0.5,use_07_metric=False):
#print(len(outputs))
# load annots
recs = {}
for frame in superframe:
recs[frame] = parse_rec(frame,idk,classname,limitset,train_map)
# extract gt objects for this class
class_recs = {}
npos = 0
for frame in superframe:
bbox = np.array(recs[frame])
det = [False] * len(recs[frame])
npos = npos + len(recs[frame])
class_recs[frame] = {"bbox": bbox, "det": det}
# no positive values
if npos == 0:
return -1,-1,-1
# read predictions
BB = []
confidence = []
image_ids = []
for obj in outputs:
image_ids.append(obj['image_id'])
confidence.append(obj['score'])
BB.append([obj['xmin'],obj['ymin'],obj['xmax'],obj['ymax']])
confidence = np.array(confidence)
BB = np.array(BB)
# no valid predictions
if confidence.shape[0] == 0:
return 0, 0, 0
# sort by confidence
sorted_ind = np.argsort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R["bbox"].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
# union
uni = (
(bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)
+ (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0)
- inters
)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R["det"][jmax]:
tp[d] = 1.0
R["det"][jmax] = 1
else:
fp[d] = 1.0
else:
fp[d] = 1.0
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
|
[
"noreply@github.com"
] |
oakdata.noreply@github.com
|
0cc26b8470c3362fd6e6bfae7c8b32fea9bba9c3
|
94f31ec38154916c372610e036956f0addbb3d67
|
/articles/models.py
|
db1ee1ba490fbe70bb73c27a2a8e649e1545b87f
|
[] |
no_license
|
pipecat/my_blog
|
d12e22d89a5ed604914fac558414f1704c5b4fa4
|
8251752412d6b31cb62118e3b2e91c79b09e90f4
|
refs/heads/master
| 2020-06-20T06:40:51.584905
| 2016-12-22T16:22:23
| 2016-12-22T16:22:23
| 74,874,228
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Article(models.Model):
title = models.CharField(max_length = 100)
category = models.TextField(max_length = 50,blank = True)
date_time = models.DateField(auto_now_add = True)
content = models.TextField(blank = True,null = True)
def __unicode__(self):
return self.title
class Meta:
ordering = ['-date_time']
|
[
"pipecat@yahoo.com"
] |
pipecat@yahoo.com
|
e39e4611d0091fccc959e0a510d83386bb822262
|
cd1132c39b02e8997a4da832f9c2b760caba1801
|
/napari/layers/image/experimental/_octree_slice.py
|
093ae90f54625e4fa7d17b920a774a9cc2246535
|
[
"BSD-3-Clause"
] |
permissive
|
HarshCasper/napari
|
8c9f7051afc36d492f9e30760fe07758bb91e338
|
3ed7d2db678f4012753f53b2d40cff9d34a8011f
|
refs/heads/master
| 2023-03-19T01:27:58.473927
| 2021-03-15T05:41:29
| 2021-03-15T05:41:29
| 347,844,575
| 0
| 0
|
BSD-3-Clause
| 2021-03-15T05:39:00
| 2021-03-15T05:11:44
|
Python
|
UTF-8
|
Python
| false
| false
| 7,707
|
py
|
"""OctreeSlice class.
For viewing one slice of a multiscale image using an octree.
"""
import logging
import math
from typing import Callable, Optional
import numpy as np
from ....components.experimental.chunk import ChunkRequest, LayerRef
from ....types import ArrayLike
from .._image_view import ImageView
from ._octree_loader import OctreeLoader
from .octree import Octree
from .octree_chunk import OctreeChunk, OctreeLocation
from .octree_intersection import OctreeIntersection, OctreeView
from .octree_level import OctreeLevel, OctreeLevelInfo
from .octree_util import OctreeMetadata
LOGGER = logging.getLogger("napari.octree.slice")
class OctreeSlice:
"""A viewed slice of a multiscale image using an octree.
Parameters
----------
data
The multi-scale data.
layer_ref : LayerRef
Reference to the layer containing the slice.
meta : OctreeMetadata
The base shape and other info.
image_converter : Callable[[ArrayLike], ArrayLike]
For converting to displaying data.
Attributes
----------
loader : OctreeLoader
Uses the napari ChunkLoader to load OctreeChunks.
"""
def __init__(
self,
data,
layer_ref: LayerRef,
meta: OctreeMetadata,
image_converter: Callable[[ArrayLike], ArrayLike],
):
self.data = data
self._meta = meta
slice_id = id(self)
self._octree = Octree(slice_id, data, meta)
self.loader: OctreeLoader = OctreeLoader(self._octree, layer_ref)
thumbnail_image = np.zeros(
(64, 64, 3)
) # blank until we have a real one
self.thumbnail: ImageView = ImageView(thumbnail_image, image_converter)
@property
def loaded(self) -> bool:
"""True if the data has been loaded.
Because octree multiscale is async, we say we are loaded up front even
though none of our chunks/tiles might be loaded yet.
Returns
-------
bool
True if the data as been loaded.
"""
return self.data is not None
@property
def octree_level_info(self) -> Optional[OctreeLevelInfo]:
"""Information about the current octree level.
Returns
-------
Optional[OctreeLevelInfo]
Information about current octree level, if there is one.
"""
if self._octree is None:
return None
try:
return self._octree.levels[self.octree_level].info
except IndexError as exc:
index = self.octree_level
num_levels = len(self._octree.levels)
raise IndexError(
f"Octree level {index} is not in range(0, {num_levels})"
) from exc
def get_intersection(self, view: OctreeView) -> OctreeIntersection:
"""Return the given view's intersection with the octree.
The OctreeIntersection primarily contains the set of tiles at
some level that need to be drawn to depict view. The "ideal level"
is generally chosen automatically based on the screen resolution
described by the OctreeView.
Parameters
----------
view : OctreeView
Intersect this view with the octree.
Returns
-------
OctreeIntersection
The given view's intersection with the octree.
"""
level = self._get_auto_level(view)
return OctreeIntersection(level, view)
def _get_auto_level(self, view: OctreeView) -> OctreeLevel:
"""Return the automatically selected octree level for this view.
Parameters
----------
view : OctreeView
Get the OctreeLevel for this view.
Returns
-------
OctreeLevel
The automatically chosen OctreeLevel.
"""
index = self._get_auto_level_index(view)
if index < 0 or index >= self._octree.num_levels:
raise ValueError(f"Invalid octree level {index}")
return self._octree.levels[index]
def _get_auto_level_index(self, view: OctreeView) -> int:
"""Return the automatically selected octree level index for this view.
Parameters
----------
view : OctreeView
Get the octree level index for this view.
Returns
-------
int
The automatically chosen octree level index.
"""
if not view.auto_level:
# Return current level, do not update it.
return self.octree_level
# Find the right level automatically. Choose a level where the texels
# in the octree tiles are around the same size as screen pixels.
# We can do this smarter in the future, maybe have some hysteresis
# so you don't "pop" to the next level as easily, so there is some
# sort of dead zone between levels?
ratio = view.data_width / view.canvas[0]
if ratio <= 1:
return 0 # Show the best we've got!
# Choose the right level...
max_level = self._octree.num_levels - 1
return min(math.floor(math.log2(ratio)), max_level)
def _get_octree_chunk(self, location: OctreeLocation) -> OctreeChunk:
"""Return the OctreeChunk at his location.
Do not create the chunk if it doesn't exist.
Parameters
----------
location : OctreeLocation
Return the chunk at this location.
Returns
-------
OctreeChunk
The returned chunk.
"""
level = self._octree.levels[location.level_index]
return level.get_chunk(location.row, location.col, create=False)
def on_chunk_loaded(self, request: ChunkRequest) -> bool:
"""Called when an asynchronous ChunkRequest was loaded.
This overrides Image.on_chunk_loaded() fully.
Parameters
----------
request : ChunkRequest
The request for the chunk that was loaded.
Returns
-------
bool
True if the chunk's data was added to the octree.
"""
location = request.location
if location.slice_id != id(self):
# There was probably a load in progress when the slice was changed.
# The original load finished, but we are now showing a new slice.
# Don't consider it error, just ignore the chunk.
LOGGER.debug(
"on_chunk_loaded: wrong slice_id: %s",
location,
)
return False # Do not add the chunk.
octree_chunk = self._get_octree_chunk(location)
if octree_chunk is None:
# This location in the octree does not contain an OctreeChunk.
# That's unexpected, because locations are turned into
# OctreeChunk's when a load is initiated. So this is an error,
# but log it and keep going, maybe some transient weirdness?
LOGGER.error(
"on_chunk_loaded: missing OctreeChunk: %s",
octree_chunk,
)
return False # Did not add the chunk.
LOGGER.debug("on_chunk_loaded: adding %s", octree_chunk)
# Get the data from the request.
incoming_data = request.chunks.get('data')
# Loaded data should always be an ndarray.
assert isinstance(incoming_data, np.ndarray)
# Add that data to the octree's OctreeChunk. Now the chunk can be draw.
octree_chunk.data = incoming_data
# Setting data should mean:
assert octree_chunk.in_memory
assert not octree_chunk.needs_load
return True # Chunk was added.
|
[
"noreply@github.com"
] |
HarshCasper.noreply@github.com
|
bbf7d8809a382ad8518997c51aaff650c2ad71b0
|
9e0fd9756bd476a13a38897a577abf967db4e52b
|
/unit_ex25.py
|
e5f8f4656913f9c65c3f756dca799b4e9494889f
|
[] |
no_license
|
mattolsen1/LPTHW
|
0aac05bb720723a41f29f23a93e10880225d5dc6
|
678dea077cc05633dd037ed6df18c7cd765724e1
|
refs/heads/master
| 2021-05-16T02:01:40.877877
| 2012-10-12T20:34:28
| 2012-10-12T20:34:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 688
|
py
|
import unittest
def break_words(stuff):
"""This funciton will break up words for us."""
words = stuff.split(' ')
return words
class MyTest(unittest.TestCase):
def setUp(self):
print 'In setUp()'
self.fixture = break_words()
def tearDown(self):
print 'In tearDown()'
del self.fixture
#def testmethod(self):
# self.assertEqual(ex25.break_words(sentance), ['this', 'is', 'a', 'test', 'to', 'show', 'broken', 'words.'], 'test')
def test_break(self):
self.failUnlessEqual(['this', 'is', 'a', 'test', 'to', 'show', 'broken', 'words.'], self.fixture.break_words('This is a test.'))
if __name__ == '__main__':
unittest.main()
|
[
"Matthew.Olsen@demandmedia.com"
] |
Matthew.Olsen@demandmedia.com
|
bc2c3f7f438f5040a3b9814f99f6844f61d1cd69
|
5fef919bd89d4b2f15de2aa8dd93f8084ae89b37
|
/13.py
|
f248cb8b1c86876f46a16790e69caf2fa39f3da1
|
[] |
no_license
|
C109156209/midterm
|
96236efb5bf6e109107aad471bfbf3d25e35656f
|
553fc628154450893600c01bcabc15a98ae9c5bd
|
refs/heads/main
| 2023-05-31T17:05:29.064520
| 2021-06-18T16:17:09
| 2021-06-18T16:17:09
| 378,205,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
s=input("輸入一字元為:")
if(s==s[::-1]):
print("YES")
else:
print("NO")
|
[
"noreply@github.com"
] |
C109156209.noreply@github.com
|
9fbd9b8a26c166b4c93cd96da16720ec3c49112e
|
f903ab3b1ffd595cd0a86d4e59f231b1cc16ce75
|
/tests/unit/test_data/test_broker/__init__.py
|
dba74b15c4b6b090c7201c873bdcd9eac8b54a2f
|
[
"Apache-2.0"
] |
permissive
|
refitt/refitt
|
6cd066f3d69159a03c2764e929635897ade0b11a
|
e3f7c1b8647a7dc6b5e1fe2a0af73855284551b6
|
refs/heads/master
| 2023-06-08T06:42:22.604865
| 2022-11-23T02:20:21
| 2022-11-23T02:20:55
| 234,204,719
| 5
| 2
|
Apache-2.0
| 2023-02-16T07:00:39
| 2020-01-16T01:02:22
|
Python
|
UTF-8
|
Python
| false
| false
| 132
|
py
|
# SPDX-FileCopyrightText: 2019-2022 REFITT Team
# SPDX-License-Identifier: Apache-2.0
"""Unit tests for data broker interfaces."""
|
[
"glentner@purdue.edu"
] |
glentner@purdue.edu
|
1dbd575ef3656fb51a8926baf621bf41b688e8f0
|
4d72c086a2390f838c791e83712a95dbc2355408
|
/src/genie/libs/parser/iosxe/tests/test_show_memory.py
|
72435cf9c999814dc4b0b7558c229497c0263dce
|
[
"Apache-2.0"
] |
permissive
|
tahigash/genieparser
|
5c51fd4ae3c09579447886562daf7076ff6a4a7c
|
736581276403685041218868c107e3f57f34a6d6
|
refs/heads/master
| 2020-12-29T06:34:29.148974
| 2020-05-12T18:56:06
| 2020-05-12T18:56:06
| 238,493,860
| 2
| 0
|
Apache-2.0
| 2020-09-30T20:28:14
| 2020-02-05T16:16:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,966
|
py
|
#!/bin/env python
import unittest
from unittest.mock import Mock
from pyats.topology import Device
from genie.metaparser.util.exceptions import SchemaEmptyParserError,\
SchemaMissingKeyError
from genie.libs.parser.iosxe.show_memory import ShowMemoryStatistics
class test_show_memory_statistics(unittest.TestCase):
dev = Device(name='c3850')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"tracekey": "1#f8e3c2db7822c04e58ce2bd2fc7e476a",
"name": {
"processor": {
"total": 856541768,
"free": 501425640,
"largest": 501041348,
"head": "FF86F21010",
"lowest": 499097976,
"used": 355116128
},
"lsmpi_io": {
"total": 6295128,
"free": 824,
"largest": 412,
"head": "FF867C51A8",
"lowest": 824,
"used": 6294304
}
}
}
golden_output = {'execute.return_value': '''\
Tracekey : 1#f8e3c2db7822c04e58ce2bd2fc7e476a
Head Total(b) Used(b) Free(b) Lowest(b) Largest(b)
Processor FF86F21010 856541768 355116128 501425640 499097976 501041348
lsmpi_io FF867C51A8 6295128 6294304 824 824 412'''
}
def test_empty(self):
self.dev = Mock(**self.empty_output)
obj = ShowMemoryStatistics(device=self.dev)
with self.assertRaises(SchemaEmptyParserError):
parsered_output = obj.parse()
def test_golden(self):
self.maxDiff = None
self.dev = Mock(**self.golden_output)
obj = ShowMemoryStatistics(device=self.dev)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
if __name__ == '__main__':
unittest.main()
|
[
"jeaubin@cisco.com"
] |
jeaubin@cisco.com
|
c96b8b0ee7bac6d00f435d4f10d30b4afb403f34
|
6ec49c1fc8c34441aee2a2905f54e5a38046a58c
|
/Marmoset_Pipeline_2019/code/marmoset/maskMarmosetTargetBySlice.py
|
adc6488444fff901f734beb5b5d9da556e6ceba3
|
[] |
no_license
|
bingxinghuo/Registration_marmoset
|
238728544779ec9047b45111b8b9c0d14390a102
|
35d9f7dd217908f1f86fd34b0b897004e41a96c9
|
refs/heads/master
| 2022-05-15T10:35:09.733268
| 2022-04-26T16:51:18
| 2022-04-26T16:51:18
| 235,623,823
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,777
|
py
|
import SimpleITK as sitk
import numpy as np
import scipy.ndimage
import histFunctions
import ndreg3D
import sys
targetfilename = sys.argv[1]
outputfilename = sys.argv[2]
outputtargetfilename = sys.argv[3]
useWhiten = True
maskBeforeSmooth = True
smoothMask = False
greyBackground = True
smoothAtlasAgain = True
blackVentricles = True
multicolorMask = False
target = sitk.ReadImage(targetfilename)
refImg = sitk.ReadImage(targetfilename)
origRefImg = sitk.ReadImage(targetfilename)
inflectionpoint = histFunctions.getInflectionPoint(target)
mymask = ndreg3D.imgMakeMask(target,threshold=inflectionpoint)
mymaskarray = sitk.GetArrayFromImage(mymask)
refImg = ndreg3D.imgMask(refImg,mymask)
# now go slice by slice to remove the ventricles from the mask
mymaskarray = sitk.GetArrayFromImage(mymask)
structure = [[1,1,1],[1,1,1],[1,1,1]]
#241 - might need to add a zero onto zc1trunc for the case where nothing needs to be cut
labelarray = -1*np.ones(mymaskarray.shape)
for i in range(0,origRefImg.GetSize()[1]):
if np.unique(sitk.GetArrayFromImage(refImg[:,i,:])).shape[0] < 2:
temparray = np.ones((labelarray.shape[0],labelarray.shape[2]))
temparray[2:temparray.shape[0]-2,2:temparray.shape[1]-2] = np.zeros((temparray.shape[0]-4, temparray.shape[1]-4))
labelarray[:,i,:] = temparray
continue
else:
inflectionpoint = histFunctions.getInflectionPointRANSAC(refImg[:,i,:])
myslicemask = ndreg3D.imgMakeSliceMask(refImg[:,i,:],threshold=inflectionpoint, openingRadiusMM = 0.04)
# look for connected components, filter out anything that doesn't have enough pixels
myslicemaskarray = sitk.GetArrayFromImage(myslicemask)
myslicemaskarray = myslicemaskarray + 1
myslicemaskarray[np.where(myslicemaskarray==2)[0],np.where(myslicemaskarray==2)[1]] = 0
label, num_features = scipy.ndimage.measurements.label(myslicemaskarray,structure)
for ii in np.add(range(num_features-1),2):#at this point, 1 = background, 0 = brain
if len(np.where(label==ii)[0]) < 8:
label[np.where(label==ii)[0], np.where(label==ii)[1]] = 0
else:
label[np.where(label==ii)[0], np.where(label==ii)[1]] = 1
# swap back to background = 0, brain = 1
labelarray[:,i,:] = label
# now search for connected components (other than background obviously) and accept the big ones
labelarray[np.where(labelarray==-1)] = 0
labelarrayint = np.copy(labelarray.astype(np.int))
labelarrayimg = sitk.VotingBinaryHoleFilling(sitk.GetImageFromArray(labelarrayint), radius=(1,1,1), majorityThreshold=1, foregroundValue = 0., backgroundValue = 1.)
labelarrayimgout = np.copy(sitk.GetArrayFromImage(labelarrayimg))
labelarrayimgout = labelarrayimgout + 1
labelarrayimgout[np.where(labelarrayimgout==2)] = 0
structure = [[[1,1,1],[1,1,1],[1,1,1]],[[1,1,1],[1,1,1],[1,1,1]],[[1,1,1],[1,1,1],[1,1,1]]]
label, num_features = scipy.ndimage.measurements.label(sitk.GetArrayFromImage(labelarrayimg),structure)
for i in range(num_features):
print(str(i) + "," + str(np.where(label==i)[0].shape[0]))
# now remove all large regions from the original mask (mymaskarray). Ignore regions 0 and 1 because they are brain and background
for i in range(2,num_features):
if np.where(label==i)[0].shape[0] > 250:
mymaskarray[np.where(label==i)] = 0
# after this phase, try replacing the slices in mymaskarray with the slices in labelarrayimg if there are not too many pixels different
pixelsperslice = mymaskarray[:,0,:].shape[0] * mymaskarray[:,0,:].shape[1]
mymaskarraycurated = -1*np.ones(mymaskarray.shape)
for i in range(mymaskarray.shape[1]):
if np.unique(sitk.GetArrayFromImage(origRefImg[:,i,:])).shape[0] < 2:
mymaskarraycurated[:,i,:] = np.copy(mymaskarray[:,i,:])
else:
if np.where((mymaskarray[:,i,:] == labelarrayimgout[:,i,:])==False)[0].shape[0]/float(pixelsperslice) < 0.18:
mymaskarraycurated[:,i,:] = np.copy(labelarrayimgout[:,i,:])
else:
mymaskarraycurated[:,i,:] = np.copy(mymaskarray[:,i,:])
# if i want a multicolor mask then remove regions again
if multicolorMask == True:
for i in range(2,num_features):
if np.where(label==i)[0].shape[0] > 250:
mymaskarraycurated[np.where(label==i)] = -1
# set brain to 2, background to 1, ventricles to 0
mymaskarraycurated[np.where(mymaskarraycurated==1)] = 2
mymaskarraycurated[np.where(mymaskarraycurated==0)] = 1
mymaskarraycurated[np.where(mymaskarraycurated==-1)] = 0
if blackVentricles == True:
mymcmask = np.copy(mymaskarraycurated)
for i in range(2,num_features+1):
if np.where(label==i)[0].shape[0] > 250:
mymcmask[np.where(label==i)] = -1
# set brain to 2, background to 1, ventricles to 0
mymcmask[np.where(mymcmask==1)] = 2
mymcmask[np.where(mymcmask==0)] = 1
mymcmask[np.where(mymcmask==-1)] = 0
# add something here to do ventricle masking slice by slice. maybe in both coronal and sagittal planes
structure = [[1,1,1],[1,1,1],[1,1,1]]
for i in range(mymcmask.shape[1]):
label, num_features = scipy.ndimage.measurements.label(np.squeeze(-1.0*(mymaskarraycurated[:,i,:]-1)),structure) # here 1 = background, 0 = brain, everything else = ventricles
for ii in range(2,num_features+1):
# check if the label touches the boundary of the image
if np.max(np.where(label==ii)[0]) == label.shape[0]-1 or np.max(np.where(label==ii)[1]) == label.shape[1]-1 or np.min(np.where(label==ii)[0]) == 0 or np.min(np.where(label==ii)[1]) == 0:
continue
if len(np.where(label==ii)[0]) > 8:
tempslice = np.squeeze(mymcmask[:,i,:])
tempslice[np.where(label==ii)] = 0
mymcmask[:,i,:] = tempslice
for i in range(mymcmask.shape[0]):
label, num_features = scipy.ndimage.measurements.label(np.squeeze(-1.0*(mymaskarraycurated[i,:,:]-1)),structure) # here 1 = background, 0 = brain, everything else = ventricles
for ii in range(2,num_features+1):
# check if the label touches the boundary of the image
if np.max(np.where(label==ii)[0]) == label.shape[0]-1 or np.max(np.where(label==ii)[1]) == label.shape[1]-1 or np.min(np.where(label==ii)[0]) == 0 or np.min(np.where(label==ii)[1]) == 0:
continue
if len(np.where(label==ii)[0]) > 8:
tempslice = np.squeeze(mymcmask[i,:,:])
tempslice[np.where(label==ii)] = 0
mymcmask[i,:,:] = tempslice
# save for STS
mymcmaskimg = sitk.GetImageFromArray(mymcmask)
mymcmaskimg.SetDirection((1,0,0,0,1,0,0,0,1))
mymcmaskimg.SetOrigin((0,0,0))
mymcmaskimg.SetSpacing(origRefImg.GetSpacing())
#sitk.WriteImage(mymcmaskimg,outputdirectoryname + '/' + patientnumber + '_mymcmask.img')
if smoothMask == True:
mymaskarraycuratedimg = sitk.GetImageFromArray(mymaskarraycurated.astype(np.float32))
mymaskarraycuratedimg.SetDirection((1,0,0,0,1,0,0,0,1))
mymaskarraycuratedimg.SetOrigin((0,0,0))
mymaskarraycuratedimg.SetSpacing(origRefImg.GetSpacing())
ndreg3D.imgWrite(sitk.SmoothingRecursiveGaussian(mymaskarraycuratedimg,0.04),outputfilename)
else:
mymaskarraycuratedimg = sitk.GetImageFromArray(mymaskarraycurated.astype(np.int8))
mymaskarraycuratedimg.SetDirection((1,0,0,0,1,0,0,0,1))
mymaskarraycuratedimg.SetOrigin((0,0,0))
mymaskarraycuratedimg.SetSpacing(origRefImg.GetSpacing())
ndreg3D.imgWrite(mymaskarraycuratedimg,outputfilename)
#mymaskarrayfilled = scipy.ndimage.morphology.binary_fill_holes(mymaskarray,np.ones((5,5,5)))
#mymaskarrayfilledimg = sitk.GetImageFromArray(mymaskarrayfilled.astype('int8'))
#mymaskarrayfilledimg.SetSpacing(target.GetSpacing())
#sitk.WriteImage(mymaskarrayfilledimg,outputfilename)
targetmasked = ndreg3D.imgMask(target,mymaskarraycuratedimg)
sitk.WriteImage(targetmasked,outputtargetfilename)
|
[
"bingxing.huo@gmail.com"
] |
bingxing.huo@gmail.com
|
330810fd3175620b8b684747706ba776ca8b2d9d
|
10539e8f01ad807e687b9d42cdd57454f0d290b2
|
/高频题目/myStack.py
|
bda8f9d435be4e80f7c1fee0cf7b3dbcb55ac4f3
|
[] |
no_license
|
Percygu/my_leetcode
|
680a0d28f62ccbb023a458bb6378693b561968a0
|
24e20e1389fabfb8506b09277cd8e5d5a8d3c6cd
|
refs/heads/master
| 2022-09-12T04:38:43.480510
| 2022-09-06T09:23:28
| 2022-09-06T09:23:28
| 231,324,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
'''
两个队列实现栈
'''
import queue
class MyStack:
def __init__(self):
self.list1 = []
self.list2 = []
def push(self, x: int) -> None:
if not self.list1:
self.list1.append(x)
return
while self.list1:
t = self.list1[0]
self.list2.append(t)
del self.list1[0]
self.list1.append(x)
while self.list2:
t = self.list2[0]
self.list1.append(t)
del self.list2[0]
def pop(self) -> int:
if not self.list1:
return -1
t = self.list1[0]
del self.list1[0]
return t
def top(self) -> int:
return self.list1[0]
def empty(self) -> bool:
return True if len(self.list1) == 0 else False
|
[
"claregu@tencent.com"
] |
claregu@tencent.com
|
465a5a5637b1fc79f04eab5d1b088e2173e28a97
|
a1d0bad7257ddc8b0b66c3be8ed74704ff48baf5
|
/utils.py
|
ba2ae1ff65915613cfa656c6e1c8a1c7878be5a8
|
[
"MIT"
] |
permissive
|
BioMedicalBigDataMiningLab/AMMGC
|
565264db3fe79674497af184de912b41385829a2
|
09b9e4760375f6b56c1bd5b1cbc2084ed3d85692
|
refs/heads/main
| 2023-08-18T05:50:06.294507
| 2021-10-21T01:55:30
| 2021-10-21T01:55:30
| 419,549,183
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,351
|
py
|
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import scipy.sparse as sp
import networkx as nx
import tensorflow.compat.v1 as tf
from sklearn.metrics import roc_auc_score
from sklearn.metrics import average_precision_score
import random
#
# flags = tf.app.flags
# FLAGS = flags.FLAGS
def construct_self_feed_dict(emb, train_drug_miRNA_matrix, positive_mask, negative_mask, placeholders):
"""Construct feed dictionary."""
feed_dict = dict()
feed_dict.update({placeholders['emb']: emb})
feed_dict.update({placeholders['adj_label']: train_drug_miRNA_matrix})
feed_dict.update({placeholders['positive_mask']: positive_mask})
feed_dict.update({placeholders['negative_mask']: negative_mask})
return feed_dict
def construct_attention_feed_dict(emb, train_drug_miRNA_matrix, positive_mask, negative_mask, placeholders):
"""Construct feed dictionary."""
feed_dict = dict()
feed_dict.update({placeholders['emb'][i]: emb[i] for i in range(len(emb))})
feed_dict.update({placeholders['adj_label']: train_drug_miRNA_matrix})
feed_dict.update({placeholders['positive_mask']: positive_mask})
feed_dict.update({placeholders['negative_mask']: negative_mask})
return feed_dict
def constructNet(miRNA_dis_matrix):
miRNA_matrix = np.mat(np.zeros((miRNA_dis_matrix.shape[0], miRNA_dis_matrix.shape[0]), dtype=np.int8))
dis_matrix = np.mat(np.zeros((miRNA_dis_matrix.shape[1], miRNA_dis_matrix.shape[1]), dtype=np.int8))
mat1 = np.hstack((miRNA_matrix, miRNA_dis_matrix))
mat2 = np.hstack((miRNA_dis_matrix.T, dis_matrix))
return np.vstack((mat1, mat2))
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def normalize_features(feat):
degree = np.asarray(feat.sum(1)).flatten()
# set zeros to inf to avoid dividing by zero
degree[degree == 0.] = np.inf
degree_inv = 1. / degree
degree_inv_mat = sp.diags([degree_inv], [0])
feat_norm = degree_inv_mat.dot(feat)
return feat_norm
def matrix_normalize(similarity_matrix):
similarity_matrix[np.isnan(similarity_matrix)] = 0
if similarity_matrix.shape[0] == similarity_matrix.shape[1]:
for i in range(similarity_matrix.shape[0]):
similarity_matrix[i, i] = 0
for i in range(200):
D = np.diag(np.array(np.sum(similarity_matrix, axis=1)).flatten()) # 求得每一行的sum,再使其对角化
D = np.linalg.pinv(np.sqrt(D)) # 开方,再取伪逆矩阵
similarity_matrix = D * similarity_matrix * D
else:
for i in range(similarity_matrix.shape[0]):
if np.sum(similarity_matrix[i], axis=1) == 0:
similarity_matrix[i] = similarity_matrix[i]
else:
similarity_matrix[i] = similarity_matrix[i] / np.sum(similarity_matrix[i], axis=1)
return similarity_matrix
def masked_bilinearsigmoid_cross_entropy(preds, labels, mask, negative_mask):
"""Softmax cross-entropy loss with masking."""
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels)
mask += negative_mask
mask = tf.cast(mask, dtype=tf.float32)
# mask /= tf.reduce_mean(mask)
mask = tf.reshape(mask, shape=[79924])
loss *= mask
return tf.reduce_mean(loss)
def gcn_masked_softmax_cross_entropy(preds, labels, positive_mask, negative_mask, pos_weight):
"""Softmax cross-entropy loss with masking."""
loss = tf.nn.weighted_cross_entropy_with_logits(targets=labels, logits=preds, pos_weight=pos_weight)
# preds = tf.cast(preds, tf.float32)
# labels = tf.cast(labels, tf.float32)
# loss = tf.square(preds - labels)
positive_mask += negative_mask
# print(mask)
mask = tf.cast(positive_mask, dtype=tf.float32)
# mask /= tf.reduce_mean(mask)
mask = tf.reshape(mask, shape=[79924])
loss *= mask
return tf.reduce_mean(loss)
def generate_mask(train_drug_miRNA_matrix, N):
num = 0
mask = np.zeros(train_drug_miRNA_matrix.shape)
while (num < 1 * N):
a = random.randint(0, 105)
b = random.randint(0, 753)
if train_drug_miRNA_matrix[a, b] != 1 and mask[a, b] != 1:
mask[a, b] = 1
num += 1
mask = np.reshape(mask, [-1, 1])
return mask
def load_data(train_arr, test_arr):
"""Load data."""
labels = np.loadtxt("drug-miRNA.txt")
logits_test = sp.csr_matrix((labels[test_arr, 2], (labels[test_arr, 0] - 1, labels[test_arr, 1] - 1)),
shape=(106, 754)).toarray()
logits_test = logits_test.reshape([-1, 1])
# logits_test = np.hstack((logits_test,1-logits_test))
logits_train = sp.csr_matrix((labels[train_arr, 2], (labels[train_arr, 0] - 1, labels[train_arr, 1] - 1)),
shape=(106, 754)).toarray()
logits_train = logits_train.reshape([-1, 1])
# logits_temp_train = logits_train
#
# train_list = []
# train_list.append(logits_temp_train)
train_mask = np.array(logits_train[:, 0], dtype=np.bool).reshape([-1, 1])
test_mask = np.array(logits_test[:, 0], dtype=np.bool).reshape([-1, 1])
# train_mask = np.array(logits_train[:, 0]).reshape([-1, 1])
# test_mask = np.array(logits_test[:, 0]).reshape([-1, 1])
M = sp.csr_matrix((labels[train_arr, 2], (labels[train_arr, 0] - 1, labels[train_arr, 1] - 1)),
shape=(106, 754)).toarray()
return logits_train, logits_test, train_mask, test_mask, labels
def weight_variable_glorot(input_dim, output_dim, name=""):
init_range = np.sqrt(6.0 / (input_dim + output_dim))
initial = tf.random_uniform(
[input_dim, output_dim], minval=-init_range,
maxval=init_range, dtype=tf.float32)
return tf.Variable(initial, name=name)
def dropout_sparse(x, keep_prob, num_nonzero_elems):
noise_shape = [num_nonzero_elems]
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_out * (1. / keep_prob)
def sparse_to_tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
def preprocess_graph(adj):
adj = sp.coo_matrix(adj)
adj_ = adj + sp.eye(adj.shape[0])
rowsum = np.array(adj_.sum(1))
degree_mat_inv_sqrt = sp.diags(np.power(rowsum, -0.5).flatten())
adj_normalized = adj_.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt).tocoo()
return sparse_to_tuple(adj_normalized)
def construct_feed_dict(adj_norm, adj_label, features, positive_mask, negative_mask, placeholders):
feed_dict = dict()
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['adj_norm']: adj_norm})
feed_dict.update({placeholders['adj_label']: adj_label})
feed_dict.update({placeholders['positive_mask']: positive_mask})
feed_dict.update({placeholders['negative_mask']: negative_mask})
return feed_dict
def masked_cross_entropy(preds, labels, label_mask, test_mask):
"""Accuracy with masking."""
preds = tf.cast(preds, tf.float32)
labels = tf.cast(labels, tf.float32)
error = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=preds)
# pos_weight = 1
# error = tf.nn.weighted_cross_entropy_with_logits(logits=preds, targets=labels, pos_weight=pos_weight)
label_mask += test_mask
mask = tf.cast(label_mask, dtype=tf.float32)
mask = tf.reshape(mask, shape=[79924])
error *= mask
return tf.sqrt(tf.reduce_mean(error))
def masked_accuracy(preds, labels, label_mask, test_mask):
preds = tf.cast(preds, tf.float32)
labels = tf.cast(labels, tf.float32)
error = tf.square(preds - labels)
label_mask += test_mask
mask = tf.cast(test_mask, dtype=tf.float32)
mask = tf.reshape(mask, shape=[79924])
error *= mask
return tf.sqrt(tf.reduce_mean(error))
|
[
"noreply@github.com"
] |
BioMedicalBigDataMiningLab.noreply@github.com
|
3980e80dd80ef5cadf3756af2e63b515ee742f92
|
cbfeba3a2132bcf70f3e0abe4bc265af792bfe34
|
/analyse.py
|
30946ae0e1e00e37375d045fc6d19c62323b233d
|
[] |
no_license
|
ADlead/AlittleAnalyse
|
e737e380f805976946798fcd873ea670abebe316
|
28885f8857189df3fecbf58aaa45a733aac90a33
|
refs/heads/master
| 2020-08-02T07:32:10.534828
| 2019-09-27T08:50:23
| 2019-09-27T08:50:23
| 211,276,006
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,845
|
py
|
import jieba
import wordcloud
import re
import pandas as pd
import numpy as np
from PIL import Image
from pyecharts import Map, Bar
# 根据文本生成词云图
def generatewordcloud(filename):
# 文字字体
font = 'msyh.ttc'
# 读取背景图片
img = Image.open('data/image.jpg')
img_array = np.array(img)
# 读取文本
f = open(filename, 'r', encoding='utf-8')
content = f.read().replace(' ','').strip()
# print(content)
words = jieba.lcut(content)
words = ' '.join(words)
wc = wordcloud.WordCloud(
background_color='white',
mask=img_array,
font_path=font
)
wc.generate_from_text(words)
wc.to_file('dazhong-五山.png')
class AlittleAnalyse:
def __init__(self):
self.df = pd.read_csv('alittle.csv', encoding='gbk')
self.df = self.df.dropna(axis=0, how='all')
# print(self.df)
self.address = list(self.df.address)
# print(self.address)
self.districts_list = [each[:3] for each in self.address]
districts_counts = pd.value_counts(self.districts_list)
self.areas = districts_counts.index
self.value = list(districts_counts)
def generatemap(self):
# 制作地图,关于一点点在广州各个区的分布,先建立列表,根据每个区
map = Map('一点点在广州市的分布', width=1200, height=1000)
map.add('一点点在广州市的分布', self.areas, self.value, maptype='广州', visual_range=[min(self.value), max(self.value)],
is_map_symbol_show=False,
is_label_show=True, is_visualmap=True, label_text_color='#509')
# map.show_config()
map.render('一点点在广州市的分布.html')
def generatebar(self):
# 每个区的一点点数量的柱形图
bar = Bar('广州各个区一点点的对比',width=1000,height=800)
bar.add('广州各个区一点点数量的对比',self.areas, self.value,
is_label_show=True,
mark_line=['min','max'], mark_point=['average'],
xaxis_interval=0,
xaxis_rotate=30,yaxis_rotate=30,
)
bar.render('广州各个区一点点数量的对比.html')
def rank_bar(self):
bar = Bar('评分前10的一点点', width=1000, height=900)
pd_rank = self.df['rank']
pd_title = self.df['title']
# print(pd_rank)
rank_series = pd.Series(data=list(pd_rank), index=pd_title)
# print(rank_series.sort_values(ascending=False))
rank_ten = rank_series[:10]
bar.add('评分前10的一点点', rank_ten.index, rank_ten,
is_label_show=True,
xaxis_interval=0,
xaxis_rotate=20, yaxis_rotate=20,
)
bar.render('评分前10的一点点.html')
pass
def comments_rank(self):
bar = Bar('评论数最高前10的一点点', width=1000, height=900)
pd_comments_num = self.df['comments_num']
pd_title = self.df['title']
comments_num_series = pd.Series(data=list(pd_comments_num), index=pd_title)
comments_num_series = comments_num_series.sort_values(ascending=False)
rank_ten = comments_num_series[:10]
bar.add('评论数最高前10的一点点', rank_ten.index, rank_ten,
is_label_show=True,
xaxis_interval=0,
xaxis_rotate=20, yaxis_rotate=20,
)
bar.render('评论数最高前10的一点点.html')
pass
if __name__ == '__main__':
# generatewordcloud('comment-五山.txt')
alittle = AlittleAnalyse()
alittle.generatemap()
# alittle.generatebar()
# alittle.rank_bar()
# alittle.comments_rank()
pass
|
[
"651998382@qq.com"
] |
651998382@qq.com
|
4049908ad1eb290d1d43f4777d5dc90b9e7ded3f
|
7fbec69c220ea20aacf694f6c2025f265cd25d19
|
/Project/backend/api/migrations/0001_initial.py
|
3050c690240b46851c23d15a3659e34a8f83f064
|
[] |
no_license
|
LeonJelsma/EEC-Public
|
7113e212fe1da6f0139e8f84fb6bc72994bfb062
|
e5b5d12a5ae295764a99062c6e40d79fd6a30299
|
refs/heads/master
| 2023-02-21T15:09:35.405803
| 2021-01-26T14:41:43
| 2021-01-26T14:41:43
| 333,094,940
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,490
|
py
|
# Generated by Django 3.1.3 on 2021-01-14 14:54
import api.models
from django.db import migrations, models
import django.db.models.deletion
import django_enumfield.db.fields
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Sensor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('type', django_enumfield.db.fields.EnumField(enum=api.models.SensorType)),
('activated', models.BooleanField()),
('key', models.UUIDField(default=uuid.uuid4, editable=False)),
('room', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.room')),
],
),
migrations.CreateModel(
name='TempMeasurements',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('temperature', models.FloatField()),
('sensor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.sensor')),
],
),
migrations.CreateModel(
name='PowerMeasurements',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('wattage', models.FloatField()),
('sensor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.sensor')),
],
),
migrations.CreateModel(
name='AmbientMeasurements',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('air_quality', models.FloatField()),
('temperature', models.FloatField()),
('humidity', models.FloatField()),
('sensor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.sensor')),
],
),
]
|
[
"leonjelsma@gmail.com"
] |
leonjelsma@gmail.com
|
47cda0955cd4e9f9f85353add7ebd1f3eca8fde1
|
1ac0eb03b9b6650b1841e8a308768bc3af01fa02
|
/bborobotfix.py
|
9d803408c9b60dcba846693314bd003a3cdf6c68
|
[] |
no_license
|
criptik/bbotime
|
85b035c8084c9825bfffd7847b9cbc4e92752c50
|
bc84db6b577c73d5ec2f79175a196f265d17c0dd
|
refs/heads/master
| 2023-08-13T23:37:10.419642
| 2021-10-14T22:16:18
| 2021-10-14T22:16:18
| 286,582,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,776
|
py
|
import sys
import time
import os
import itertools
from pprint import pprint
import bbobase
gibName = 'GiB'
vsstr = ' vs. '
robotData = {}
class BboRobotFixer(object):
def __init__(self, args, travTableData):
self.args = args
self.travTableData = travTableData
self.initRobotData()
# this routine is called if robotScores are supplied,
# use those to try to differentiate between two or more robot pairs
def robotFix(self):
#first check that all supplied robot scores are unique, otherwise we can't deduce anything
if len(set(self.args.robotScores)) != len(self.args.robotScores):
print('Error: Robot Scores must all be unique')
sys.exit(1)
for bdnum in range (1, self.args.boards + 1):
# place rows in big table indexed by boardnumber and North and East names
for row in self.travTableData[bdnum]:
self.buildRobotData(bdnum, row)
if self.args.debug:
print('----- snapshot of robotData ----')
print(robotData)
# this maps a robot score to a list of possible keysets
# ideally this maps to 1 (0 or >1 would be an error)
robScoreKeyLists = {}
for robscore in self.args.robotScores:
robScoreKeyLists[robscore] = []
allLegalKeylists = self.getAllLegalRobotKeylists()
for keylist in allLegalKeylists:
totScore = self.getScoreAllBoards(keylist)
# now see if it matches any total robotScores
# and put it in robScoreKeyLists if it does
for robscore in self.args.robotScores:
if totScore == robscore:
robScoreKeyLists[robscore].append(keylist)
# now check that each robscore does appear exactly once in the robScoreKeyLists
# Note: on success need to eventually go back thru self.travTableData and change GiB names
errorExit = False
for robscore in self.args.robotScores:
keylistArray = robScoreKeyLists[robscore]
if len(keylistArray) == 0:
print(f'Error: no keylists combos match for robot score {robscore}')
errorExit = True
elif len(keylistArray) > 1:
# see if we have a special case where we can just pick one of two
chosenKeylist = self.checkKeylistDiffs(keylistArray, robscore)
if chosenKeylist is not None:
self.fixRobotNamesInTravs(robscore, chosenKeylist)
else:
print(f'Error: multiple keylists combos match for robot score {robscore}')
for keylist in keylistArray:
pprint(keylist)
errorExit = True
else:
# exactly one entry in the list
# fix up the robotnames to be unique
self.fixRobotNamesInTravs(robscore, keylistArray[0])
if errorExit:
sys.exit(1)
def initRobotData(self):
for rndnum in range(1, int(self.args.boards/self.args.bpr) + 1):
robotData[rndnum] = {}
def addRobotScores(self, bdnum, row, dir):
# robotData will be keyed by roundnum and oppName
# and the direction which helps if robot is playing robot
rndnum = int((bdnum-1)/self.args.bpr) + 1
oppdir = 'East' if dir == 'North' else 'North'
key = f'{dir}{vsstr}{row[oppdir].lower()}'
if robotData[rndnum].get(key) == None:
robotData[rndnum][key] = []
# add the score
fscore = float(row['Score'][:-1]) # strip % sign off end
if dir == 'East':
fscore = 100.0 - fscore
robotData[rndnum][key].append(fscore)
# print(bdnum, dir, robotData)
def buildRobotData(self, bdnum, row):
# only do this if one of the two pairs is a robot pair
for dir in ['North', 'East']:
if row[dir] == gibName and row[bbobase.partnerDir[dir]] == gibName:
self.addRobotScores(bdnum, row, dir)
def robKeyOppNamesUnique(self, keylist):
oppMap = {}
for key in keylist:
oppname = key.split(vsstr)[1]
if oppMap.get(oppname) is None:
oppMap[oppname] = 1
else:
return False
# if we get this far, success
return True
def getAllLegalRobotKeylists(self):
# use itertools to get all the combinations
keysets = []
for rndnum in range(1, int(self.args.boards/self.args.bpr) + 1):
keysets.append(list(robotData[rndnum].keys()))
if self.args.debug:
pprint(keysets)
allCombos = list(itertools.product(*keysets))
allLegalKeylists = []
for keylist in allCombos:
# first make sure all the opponent names are unique across rounds
# and if so, combine all the scores for all rounds into one list so we can avg it
if self.robKeyOppNamesUnique(keylist):
allLegalKeylists.append(keylist)
return allLegalKeylists
def getScoreAllBoards(self, keylist):
# for this keylist, combine all the scores for all rounds into one list so we can avg it
rndnum = 1
scores = []
for key in keylist:
scores.extend(robotData[rndnum][key])
rndnum += 1
avg = round(sum(scores) / len(scores), 2)
return avg
def fixRobotNamesInTravs(self, robscore, keylist):
print('robscore=', robscore)
pprint(keylist)
rndnum = 1
for key in keylist:
for bdnum in range(((rndnum-1) * self.args.bpr) + 1, (rndnum * self.args.bpr) + 1):
table_data = self.travTableData[bdnum]
# find the row that has robotName in expected direction
# and playing expected opp
(direction, oppname) = key.split(vsstr)
oppdir = 'East' if direction == 'North' else 'North'
rowsChanged = 0
if self.args.debug:
print(f'bdnum {bdnum}, {robscore}, {key}')
for row in table_data:
# todo: make this more robust if players start with the same substring
if row[direction].startswith(gibName) and row[oppdir].lower().startswith(oppname):
row[direction] = f'{gibName}-{robscore}'
parddir = bbobase.partnerDir[direction]
row[parddir] = f'{gibName}-{robscore}-pard'
rowsChanged += 1
if self.args.debug:
print(f'after: {rowsChanged} ', end='')
pprint(row)
assert(rowsChanged == 1)
rndnum += 1
def checkKeylistDiffs(self, keylistArray, robscore):
if len(keylistArray) > 2:
return None
# see if only differ in one key
(keylist1, keylist2) = keylistArray
numDiffs = 0
for (key1, key2) in zip(keylist1, keylist2):
if key1 != key2:
numDiffs += 1
(dir1, opp1) = key1.split(vsstr)
(dir2, opp2) = key2.split(vsstr)
if opp1 == 'gib' and opp2 == 'gib':
# difference is resolvable, return one based on our position
# in the self.args.robotScores array
scorepos = self.args.robotScores.index(robscore)
candidate = keylistArray[scorepos]
if numDiffs == 1:
print('picked keylist which differed insignificantly')
return candidate
else:
return None
|
[
"tom.deneau@gmail.com"
] |
tom.deneau@gmail.com
|
c61144eb7f25628c20e8d28c3d8594103e3f8761
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2665/60700/255557.py
|
db2e4ffd1c9d52076916379745902972866bd4fa
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
def score(n, z):
c = 0
while z != 1:
if n % z == 0:
n -= 1
z -= 1
c += 1
else:
z -= 1
return c
tests = int(input())
nums = []
for i in range(tests):
nums.append(input())
for i in nums:
num = i.split(' ')
X = int(num[0])
Y = int(num[1])
Z = int(num[2])
c = [score(X, Z), score(Y, Z)]
print(str(c[0])+' '+str(c[1]))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
c4023411ab5a91a0f7cf836536b90a6a4b121250
|
931db63982cb9404135410599631287e098152af
|
/preferences.py
|
70d1267f06d95a3a1b65bdd1726888ac0bc7c483
|
[] |
no_license
|
Readdeo/Mega-Auto-Converter
|
f8fde9530cf1cb7d54ac0d62d9af5456b126ddae
|
600a2d7c8f7967c90826867bd23a3ab547bacc1a
|
refs/heads/master
| 2021-01-19T22:55:34.548658
| 2017-04-20T17:44:23
| 2017-04-20T17:44:23
| 88,893,337
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 915
|
py
|
#Facebook user's ID who receives the sent messages
FB_UID = XXXXX
#Facebook account credentials. This account sends the messages.
fb_email = "XXXXX"
fb_password = "XXXXX"
#Mega.co.nz account credentials
mega_email = "XXXXX"
mega_password = "XXXXX"
# if testMode is true, the script will use other folders in Mega to do the job
test_mode = False
# if this is true, the script will notify you with Facebook messages
send_FB_message = False
mega_folders_test = {'converted':'TEST_CONVERTED', 'raw': 'TEST', 'finished': 'TESTFINISHED',
'cwo': 'TEST_CWO', 'skipped': 'SKIPPED', 'downloaded': 'TEST_DOWNLOADED'}
mega_folders = {'converted':'CONVERTED', 'raw': 'RAW', 'finished': 'FINISHED',
'cwo': 'CURRENTLY_WORKING_ON', 'skipped': 'SKIPPED', 'downloaded': 'DOWNLOADED'}
free_space_percent = 50
# encoding variables for ffmpeg
convert_width_ratio = 0.3
codec = "libx265"
|
[
"readdeo@citromail.hu"
] |
readdeo@citromail.hu
|
17530b0db0edd9795091ae246ac48dcc1bd18834
|
965af75565d1b36de2962cad22902f44dcb80b7e
|
/products/models.py
|
8f6c6f7111b12e07cff154516c6432d1afd823af
|
[] |
no_license
|
Yojanpardo/cost_center
|
675e6e6becdf6d59e76607dba5ced0487402ad97
|
346a52d917e070d244119e5ed08e93a99b701f51
|
refs/heads/master
| 2022-12-01T09:44:15.716651
| 2022-11-22T13:41:21
| 2022-11-22T13:41:21
| 167,300,526
| 0
| 0
| null | 2022-11-22T13:41:22
| 2019-01-24T04:02:54
|
Python
|
UTF-8
|
Python
| false
| false
| 448
|
py
|
from django.db import models
# Create your models here.
class Product(models.Model):
"""docstring for Product"""
name = models.CharField(max_length=30)
description = models.TextField(max_length=255,blank=True,null=True)
image = models.ImageField(blank=True,null=True)
quantity = models.IntegerField()
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
|
[
"Yojan.Pardo@gmail.com"
] |
Yojan.Pardo@gmail.com
|
fb3d2ef22cbf4cf78c3d2d9308d619b96cdbe5c6
|
fb454873b8789ef25c832d2aac64498bb10dc45c
|
/day12.py
|
4dcc5329ca60d20c2ff07a50e1459316c5347d8c
|
[] |
no_license
|
purivikas/stepfunc-data-science-course-work
|
bf9d750591125525deb23d46298e0e3b913507e8
|
274994a4f73c5716d8311a24dae39ec27ad31cb0
|
refs/heads/master
| 2021-11-25T08:53:06.261518
| 2021-11-22T16:57:47
| 2021-11-22T16:57:47
| 102,774,460
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,719
|
py
|
# Functional Programming
# Hadoop - FaaS
my_nums = range(100)
my_new_num = []
for i in my_nums:
my_new_num.append( (i+1)*(i+1))
my_new_num = [ (i+1)*(i+1) for i in my_nums ]
def weired_sqr(n):
return (n+1)*(n+1)
my_new_num = [weired_sqr(m) for m in my_nums if (weired_sqr(m) % 2 == 0) and (weired_sqr(m) %3 == 0)]
student_jobs = [('ahmed','teacher') , ('ben','hacker') ,('tom','trader'),('vikas','trader'),('alex','trader')]
concise_jobs = [(u[0],v[:3]) for (u,v) in student_jobs]
my_string = 'quick brown fox jumps over the lazy dog.'
''' Return a list conatining for every'''
new_string = [ ( i.upper() ,len(i),i[-1]) for i in my_string.split() if i[1] != 'r' and i[1] != 'o']
new_string = [ ( i.upper() ,len(i),i[-1]) for i in my_string.split() if ( 'r' not in i) and ('o' not in i)]
# Map Reduce Filter -
# Mapping value to function ...
def weired_sqr(n):
return (n+1)*(n+1)
list( map(weired_sqr,range(10)))
# Cons - first has to def function
# annonmous
list ( map( lambda x: (x+1)*(x+1), [1,2,3,4,5] ))
list ( map( lambda x: (x.upper(),len(x),x[-1]), 'quick brown fox jumps over the lazy dog'.split() ))
list ( map( lambda x: (x,x+1), [1,2,3,4,5] ))
list ( map( lambda x: (x,x+1), range(10) ))
def my_criteria(n):
if ( n % 2 == 0 ) and ( n % 3 == 0):
return True
return False
list(filter(my_criteria,range(0,100)))
my_str = 'quick brown fox jumps over the lazy dog'
list ( map( lambda x: (x.upper(),len(x),x[-1]),
filter( lambda x: ((x[1] != 'r' ) and (x[1] != 'o')),
'quick brown fox jumps over the lazy dog'.split()) ))
# Reduce Function
from functools import reduce
import functools
reduce( lambda x,y: x*y ,range (1,20))
|
[
"purivikas@hotmail.com"
] |
purivikas@hotmail.com
|
a6604a953db2a729462d86dae03ead7286d62692
|
9329bf73d6d547a868a57e451d22e7c162a8cbd3
|
/textacy/fileio/read.py
|
682262424ac8366e4fb0d088e5c8dce8ffe07ca3
|
[
"Apache-2.0"
] |
permissive
|
honnibal/textacy
|
cf892536fd10478016fa2e7e820585269c44b102
|
ddf3eeb238ac69470f3675fe27d1f99869ef99f7
|
refs/heads/master
| 2021-01-22T10:48:03.415828
| 2017-02-15T09:31:04
| 2017-02-15T09:31:04
| 82,043,411
| 3
| 1
| null | 2017-02-15T09:30:08
| 2017-02-15T09:30:08
| null |
UTF-8
|
Python
| false
| false
| 7,225
|
py
|
"""
Module with functions for reading content from disk in common formats.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from functools import partial
import json
import ijson
from numpy import load as np_load
from scipy.sparse import csc_matrix, csr_matrix
from spacy.tokens.doc import Doc as SpacyDoc
from textacy.compat import csv
from textacy.fileio import open_sesame
JSON_DECODER = json.JSONDecoder()
def read_file(filepath, mode='rt', encoding=None):
"""
Read the full contents of a file. Files compressed with gzip, bz2, or lzma
are handled automatically.
"""
with open_sesame(filepath, mode=mode, encoding=encoding) as f:
return f.read()
def read_file_lines(filepath, mode='rt', encoding=None):
"""
Read the contents of a file, line by line. Files compressed with gzip, bz2,
or lzma are handled automatically.
"""
with open_sesame(filepath, mode=mode, encoding=encoding) as f:
for line in f:
yield line
def read_json(filepath, mode='rt', encoding=None, prefix=''):
"""
Iterate over JSON objects matching the field given by ``prefix``.
Useful for reading a large JSON array one item (with ``prefix='item'``)
or sub-item (``prefix='item.fieldname'``) at a time.
Args:
filepath (str): /path/to/file on disk from which json items will be streamed,
such as items in a JSON array; for example::
[
{"title": "Harrison Bergeron", "text": "The year was 2081, and everybody was finally equal."},
{"title": "2BR02B", "text": "Everything was perfectly swell."}
]
mode (str, optional)
encoding (str, optional)
prefix (str, optional): if '', the entire JSON object will be read in at once;
if 'item', each item in a top-level array will be read in successively;
if 'item.text', each array item's 'text' value will be read in successively
Yields:
next matching JSON object; could be a dict, list, int, float, str,
depending on the value of ``prefix``
Notes:
Refer to ``ijson`` at https://pypi.python.org/pypi/ijson/ for usage details.
"""
with open_sesame(filepath, mode=mode, encoding=encoding) as f:
if prefix == '':
yield json.load(f)
else:
for item in ijson.items(f, prefix):
yield item
def read_json_lines(filepath, mode='rt', encoding=None):
"""
Iterate over a stream of JSON objects, where each line of file ``filepath``
is a valid JSON object but no JSON object (e.g. array) exists at the top level.
Args:
filepath (str): /path/to/file on disk from which json objects will be streamed,
where each line in the file must be its own json object; for example::
{"title": "Harrison Bergeron", "text": "The year was 2081, and everybody was finally equal."}\n
{"title": "2BR02B", "text": "Everything was perfectly swell."}
mode (str, optional)
encoding (str, optional)
Yields:
dict: next valid JSON object, converted to native Python equivalent
"""
with open_sesame(filepath, mode=mode, encoding=encoding) as f:
for line in f:
yield json.loads(line)
def read_json_mash(filepath, mode='rt', encoding=None, buffersize=2048):
"""
Iterate over a stream of JSON objects, all of them mashed together, end-to-end,
on a single line of a file. Bad form, but still manageable.
Args:
filepath (str): /path/to/file on disk from which json objects will be streamed,
where all json objects are mashed together, end-to-end, on a single line,;
for example::
{"title": "Harrison Bergeron", "text": "The year was 2081, and everybody was finally equal."}{"title": "2BR02B", "text": "Everything was perfectly swell."}
mode (str, optional)
encoding (str, optional)
buffersize (int, optional): number of bytes to read in as a chunk
Yields:
dict: next valid JSON object, converted to native Python equivalent
"""
with open_sesame(filepath, mode=mode, encoding=encoding) as f:
buffer = ''
for chunk in iter(partial(f.read, buffersize), ''):
buffer += chunk
while buffer:
try:
result, index = JSON_DECODER.raw_decode(buffer)
yield result
buffer = buffer[index:]
# not enough data to decode => read another chunk
except ValueError:
break
def read_csv(filepath, encoding=None, dialect='excel', delimiter=','):
"""
Iterate over a stream of rows, where each row is an iterable of strings
and/or numbers with individual values separated by ``delimiter``.
Args:
filepath (str): /path/to/file on disk from which rows will be streamed
encoding (str)
dialect (str): a grouping of formatting parameters that determine how
the tabular data is parsed when reading/writing; if 'infer', the
first 1024 bytes of the file is analyzed, producing a best guess for
the correct dialect
delimiter (str): 1-character string used to separate fields in a row
Yields:
List[obj]: next row, whose elements are strings and/or numbers
.. seealso:: https://docs.python.org/3/library/csv.html#csv.reader
"""
with open_sesame(filepath, mode='rt', encoding=encoding, newline='') as f:
if dialect == 'infer':
dialect = csv.Sniffer().sniff(f.read(1024))
f.seek(0)
for row in csv.reader(f, dialect=dialect, delimiter=delimiter):
yield row
def read_spacy_docs(spacy_vocab, filepath):
"""
Stream ``spacy.Doc`` s from disk at ``filepath`` where they were serialized
using Spacy's ``spacy.Doc.to_bytes()`` functionality.
Args:
spacy_vocab (``spacy.Vocab``): the spacy vocab object used to serialize
the docs in ``filepath``
filepath (str): /path/to/file on disk from which spacy docs will be streamed
Yields:
the next deserialized ``spacy.Doc``
"""
with open_sesame(filepath, mode='rb') as f:
for bytes_string in SpacyDoc.read_bytes(f):
yield SpacyDoc(spacy_vocab).from_bytes(bytes_string)
def read_sparse_csr_matrix(filepath):
"""
Read the data, indices, indptr, and shape arrays from a ``.npz`` file on disk
at ``filepath``, and return an instantiated ``scipy.sparse.csr_matrix``.
"""
npz_file = np_load(filepath)
return csr_matrix((npz_file['data'], npz_file['indices'], npz_file['indptr']),
shape=npz_file['shape'])
def read_sparse_csc_matrix(filepath):
"""
Read the data, indices, indptr, and shape arrays from a ``.npz`` file on disk
at ``filepath``, and return an instantiated ``scipy.sparse.csc_matrix``.
"""
npz_file = np_load(filepath)
return csc_matrix((npz_file['data'], npz_file['indices'], npz_file['indptr']),
shape=npz_file['shape'])
|
[
"burton@chartbeat.com"
] |
burton@chartbeat.com
|
6440f146db749c4a4c0fd5af5cec494b2edc0203
|
c7ce8d8e81529591eeb37d3c38b392b32b27343f
|
/Scripts/InsertPathMistakeFixer.py
|
cd565851a8c94f49963a3d24a52e038380e81a64
|
[] |
no_license
|
ZariaHoward/zariahoward.github.io
|
9ab02c621ed361324917e22c1d1653298af516af
|
0690d3f7ccb3fbdbfac52fe2ecc4b6f9b6cb2011
|
refs/heads/master
| 2021-01-10T14:42:56.484596
| 2017-05-16T17:36:28
| 2017-05-16T17:36:28
| 73,032,934
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
from __future__ import print_function
import time
import requests
# import cv2
# import operator
import numpy as np
# Import library to display results
# import matplotlib.pyplot as plt
# get_ipython().magic(u'matplotlib inline')
# Display images within Jupyter
import pickle
from pymongo import MongoClient
import pprint
import json as json_lib
import os
import Queue
import threading
client = MongoClient('localhost:27017')
db = client.Teenie
# broken_objects = list(db.Photos.find("path":"true"))
count=0
with open("/Volumes/HueyFreeman/errorPics.txt","r+") as f:
for subdir, dirs, files in os.walk('/Volumes/HueyFreeman/Teenie_Harris_PNG1024'):
for file in files:
#print os.path.join(subdir, file)
filepath = subdir + os.sep + file
if filepath.endswith(".png"):
# print(db.Photos.find({"path":filepath}))
# print(list(db.Photos.find({"path":filepath})))
if len(list(db.Photos.find({"path":filepath}))) == 0:
f.write(filepath+ "\n")
count += 1
else:
print( len(list(db.Photos.find({"path":filepath}))))
f.close()
print(count)
|
[
"zariah@andrew.cmu.edu"
] |
zariah@andrew.cmu.edu
|
8d53c5bf521e2f3b79a7aada71d071a0a574fa7e
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/resolve/Func.py
|
604361c42001a5098af33c2e197f709e4294373c
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 30
|
py
|
def info():
pass
<ref>info()
|
[
"Anna.Kozlova@jetbrains.com"
] |
Anna.Kozlova@jetbrains.com
|
b66d548a05f1c07c6beef0759e2eabf47607e270
|
4796f60be673a6f6550fa269580710235b472019
|
/CiFa.py
|
6c339ad75285bc0f6ecb02aeda4f519b8d22a600
|
[] |
no_license
|
LonelyHobby/Python
|
c37b1d1c599085bba9633a1a2416f63cd047bdc8
|
fe9e120a31a6ad045f46a995b8219eabec89c96e
|
refs/heads/master
| 2020-04-05T06:28:19.625797
| 2018-11-08T02:51:22
| 2018-11-08T02:51:22
| 156,639,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,016
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:Huihuihui
# Time:2018/10/15
class DFA:
file_object = ''
line_number = 0
state = 0 # 状态
ResWord = ['int', 'if', 'then', 'else', 'end', 'repeat', 'until', 'read', 'write'] # 保留字
error_message = []
annotate_message = []
char_message = []
def __init__(self, file_name):
self.file_object = file_name
self.state = 0
self.line_number = 0
self.error_message = []
self.annotate_message = []
self.char_message = []
def Start_convert(self):
for line in self.file_object:
line = line.strip('\n')
self.line_number += 1
line_length = len(line)
i = 0
string = ''
while i < line_length:
ch = line[i]
i += 1
if self.state == 0:
string = ch
if ch.isalpha():
self.state = 1
elif ch.isdigit():
self.state = 3
elif ch == '+':
self.state = 5
elif ch == '-':
self.state = 9
elif ch == '*':
self.state = 13
elif ch == '/':
self.state = 16
elif ch == '=':
self.state = 20
i -= 1
elif ch == '<':
self.state = 21
i -= 1
elif ch == '{':
self.state = 22
i -= 1
elif ch == '}':
self.state = 23
i -= 1
elif ch == ';':
i -= 1
self.state = 24
elif ch.isspace():
self.state = 25
else:
self.state = 26 # 不可识别状态
i -= 1
elif self.state == 1:
while ch.isalpha() or ch.isdigit():
string += ch
if i < line_length:
ch = line[i]
i += 1
else:
break
self.state = 2
i -= 2
elif self.state == 2:
if string in self.ResWord:
content = '关键字,' + string
else:
content = '标识符,' + string
# print content
self.char_message.append(content)
string = '' # 回到初始情况
self.state = 0 # 回到状态0
elif self.state == 3:
while ch.isdigit():
string += ch
if i < line_length:
ch = line[i]
i += 1
else:
break
self.state = 4
i -= 2 # 回退2个字符
elif self.state == 4:
content = '数字,' + string
self.char_message.append(content)
# print string
string = '' # 回到初始情况
self.state = 0 # 回到状态0
elif self.state == 5:
if ch == '+':
self.state = 6
i -= 1
elif ch == '=':
self.state = 7
i -= 1
else:
self.state = 8
i -= 2
elif self.state == 6: # 判断++
content = '特殊符号,' + string + ch
self.char_message.append(content)
# print string + ch
string = '' # 回到初始情况
self.state = 0 # 回到状态0
elif self.state == 7: # 判断+=
content = '特殊符号,' + string + ch
self.char_message.append(content)
# print string + ch
string = '' # 回到初始情况
self.state = 0 # 回到状态0
elif self.state == 8: # 判断+
content = '特殊符号,' + ch
self.char_message.append(content)
# print ch
string = '' # 回到初始情况
self.state = 0 # 回到状态0
elif self.state == 9:
if ch == '-':
self.state = 10
i -= 1
elif ch == '=':
self.state = 11
i -= 1
else:
self.state = 12
i -= 2
elif self.state == 10:
content = '特殊符号,' + string + ch
self.char_message.append(content)
# print string + ch#判断--
string = '' # 回到初始情况
self.state = 0 # 回到状态0
elif self.state == 11: # 判断-=
content = '特殊符号,' + string + ch
self.char_message.append(content)
# print string + ch
string = '' # 回到初始情况
self.state = 0 # 回到状态0
elif self.state == 12: # 判断-
content = '特殊符号,' + ch
self.char_message.append(content)
# print ch
string = '' # 回到初始情况
self.state = 0 # 回到状态0
elif self.state == 13:
if ch == '=':
self.state = 14
i -= 1
else:
self.state = 15
i -= 2
elif self.state == 14: # 判断*=
content = '特殊符号,' + string + ch
self.char_message.append(content)
# print string + ch
string = '' # 回到初始情况
self.state = 0 # 回到状态0
elif self.state == 15: # 判断*
content = '特殊符号,' + ch
self.char_message.append(content)
# print ch
string = '' # 回到初始情况
self.state = 0 # 回到状态0
elif self.state == 16:
if ch == '/':
self.state = 17
i -= 1
elif ch == '=':
self.state = 18
i -= 1
else:
self.state = 19
i -= 2
elif self.state == 17: # 判断//
content = '特殊符号,' + string + ch
self.char_message.append(content)
content = '注释,' + line[i:]
self.annotate_message.append(content)
# print content
string = '' # 回到初始情况
self.state = 0 # 回到状态0
elif self.state == 18: # 判断/=
content = '特殊符号,' + string + ch
self.char_message.append(content)
# print string + ch
string = '' # 回到初始情况
self.state = 0 # 回到状态0
elif self.state == 19: # 判断/
content = '特殊符号,' + ch
self.char_message.append(content)
# print ch
string = '' # 回到初始情况
self.state = 0 # 回到状态0
elif self.state == 20:
content = '特殊符号,='
self.char_message.append(content)
# print '='
self.state = 0
string = ''
elif self.state == 21:
content = '特殊符号,<'
self.char_message.append(content)
# print '<'
self.state = 0
string = ''
elif self.state == 22:
content = '特殊符号,{'
self.char_message.append(content)
# print '{'
self.state = 0
string = ''
elif self.state == 23:
content = '特殊符号,}'
self.char_message.append(content)
# print '}'
self.state = 0
string = ''
elif self.state == 24:
content = '特殊符号,;'
self.char_message.append(content)
# print ';'
self.state = 0
string = ''
elif self.state == 25:
while ch.isspace():
if i < line_length:
ch = line[i]
i += 1
else:
break
self.state = 0
i -= 1
elif self.state == 26:
content = '行号:' + str(self.line_number) + ',' + ch
self.error_message.append(content)
# print 'error:' + ch
self.state = 0
string = ''
# print self.state
def Get_error(self): # 获取错误信息
return self.error_message
def Get_annotate(self): # 获取注释信息
return self.annotate_message
def Get_char(self): # 获取识别信息
return self.char_message
try:
file_object = open("F:\Source\Python\ProducerCustomer.py")
dfa = DFA(file_object)
dfa.Start_convert()
content = dfa.Get_char()
for item in content:
print(item)
content = dfa.Get_annotate()
for item in content:
print(item)
content = dfa.Get_error()
for item in content:
print(item)
finally:
file_object.close()
|
[
"noreply@github.com"
] |
LonelyHobby.noreply@github.com
|
4519bbb98557cae399ce1a0e15dd213c3636fdb5
|
aef185101e1185aa94d881a51fb9ff81ad952a1b
|
/src/customout.py
|
fae6a97a8175233ade6124acabb30914dc7e54f0
|
[] |
no_license
|
nekonbu72/gevent_websoclet_sample
|
49d9991d0b6a01c5e9c6150cffc4eae2294cc7ca
|
a1b1dda8bf15e779443f6151457ec12248f019e8
|
refs/heads/master
| 2021-06-27T01:36:30.374068
| 2019-11-25T08:11:37
| 2019-11-25T08:11:37
| 223,219,517
| 0
| 0
| null | 2021-03-20T02:20:15
| 2019-11-21T16:41:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,316
|
py
|
import sys
from abc import ABCMeta, abstractmethod
class CustomOutBase(metaclass=ABCMeta):
orgout = sys.stdout
def __init__(self):
# self.orgout = sys.stdout
sys.stdout = self
def __enter__(self):
return self
def __exit__(self, ex_type, ex_value, trace):
sys.stdout = self.orgout
@abstractmethod
def write(self, s: str) -> int:
raise NotImplementedError()
def stdout_temp(newout: CustomOutBase):
def outer(func):
def inner(*args, **kwargs):
with newout:
func(*args, **kwargs)
return inner
return outer
def end_filter(func):
def wrapper(*args, **kwargs):
s = args[1]
if isinstance(s, str):
if s.strip() == "":
return
func(*args, **kwargs)
else:
raise TypeError()
return wrapper
class __CustomOut(CustomOutBase):
def write(self, s: str) -> int:
if s.strip() == "":
return 0
print(f"@@@{s}@@@", file=self.orgout)
return len(s)
if __name__ == "__main__":
@stdout_temp(__CustomOut())
def __printer():
print("Hello, print!")
def __printer2():
print("Bye, print!")
__printer() # @@@Hello, print!@@@
__printer2() # Bye, print!
|
[
"nakatomo222@gmail.com"
] |
nakatomo222@gmail.com
|
b1f427561db4a8a8892aaa15cdc9c5dd62a5d1c3
|
6ffbeab46f8ed26dd3405afe1f278ef3c78f46e6
|
/lesson9/zhangbaocheng/sqlmng/migrations/0001_initial.py
|
55e3e8e1bdd4bd5f0fe83445dd4dc84a894178c9
|
[] |
no_license
|
cnjllin/devops6
|
3b0658572d3fce765325d080e167c2ae6fbff570
|
ba6b2bad4cc126a2067cfe9c081b5248f93435aa
|
refs/heads/master
| 2020-03-17T03:31:06.098229
| 2018-05-12T23:23:16
| 2018-05-12T23:23:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,395
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-02 16:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='dbconf',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, verbose_name='\u540d\u5b57')),
('createtime', models.DateTimeField(auto_now_add=True, verbose_name='\u521b\u5efa\u65f6\u95f4')),
('updatetime', models.DateTimeField(auto_now=True, verbose_name='\u4fee\u6539\u65f6\u95f4')),
('note', models.TextField(blank=True, default='', null=True, verbose_name='\u5907\u6ce8')),
('user', models.CharField(max_length=128)),
('password', models.CharField(max_length=128)),
('host', models.CharField(max_length=16)),
('port', models.CharField(max_length=5)),
('env', models.CharField(blank=True, choices=[('1', '\u751f\u4ea7'), ('2', '\u6d4b\u8bd5')], max_length=1, null=True)),
],
options={
'ordering': ['-id'],
'abstract': False,
},
),
]
|
[
"1032231418@qq.com"
] |
1032231418@qq.com
|
d506aedb7acdb118aa45dbf941f0cec61f9c786f
|
9ed025c56abe977593a2f966e97214fa8f44431c
|
/Labs/Lab 1/problem1.py
|
5000b7840ca29f63429734a078726d66690b6cc7
|
[] |
no_license
|
s4m15v0/CSE221
|
1ce5e273c2c0bd81bc3f7a3f9fa6d377bf77842e
|
55a571e213474e8dfa32a3fcc03941699bfe3110
|
refs/heads/main
| 2023-08-21T04:04:10.609427
| 2021-10-14T19:12:27
| 2021-10-14T19:12:27
| 381,273,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,461
|
py
|
# -*- coding: utf-8 -*-
"""problem1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1DjXtkHr5utg807ms_nKKRaLWUnX-8hUi
"""
def isPalindrome(word):
if(len(word)==0):
return False
N = len(word)
for i in range(0,N//2):
if(word[i] != word[N-1-i]):
return False
return True
#parity check
def parityCheck(number):
if(number%2==0):
return True
else:
return False
# main
_file = open("/content/input.txt",'r')
_read = _file.readlines()
no_lines = len(_read)
# count palindrome,not palindrome, parity , not parity
palindrome_count = 0
not_palindrome_count = 0
float_numbers_count = 0
even_parity = 0
odd_parity = 0
#empty list result
result = []
# for loop to loop the files in the _read
for index in _read:
i = index.split()
_str=""
#using try catch
try:
if(isinstance(int(i[0]),float)):
pass
else:
# calling function for parity check
if(parityCheck(int(i[0]))):
_str +=str(i[0])+" has even parity and "
even_parity+=1
else:
_str +=str(i[0])+" has odd parity and "
odd_parity+=1
#catch any exception error
except:
_str +=str(i[0])+" cannot have parity and "
float_numbers_count+=1
if(isPalindrome(i[1])):
_str += str(i[1])+" is a palindrome"
palindrome_count+=1
else:
_str += str(i[1])+" is not a palindrome"
not_palindrome_count+=1
result.append(_str)
# output.txt in write mode
output_file = open("output.txt",'w')
for i in result:
output_file.write(i)
output_file.write("\n")
# opent the file record.txt in write mode
record_file = open("record.txt","w")
# calculate and write the line to file
record_file.write("Percentage of Odd parity {}%\n".format((odd_parity/no_lines)*100))
record_file.write("Percentage of even parity {}%\n".format((even_parity/no_lines)*100))
record_file.write("Percentage of no parity {}%\n".format((float_numbers_count/no_lines)*100))
record_file.write("Percentage of Palindrome {}%\n".format((palindrome_count/no_lines)*100))
record_file.write("Percentage of non-Palindrome {}%\n".format((not_palindrome_count/no_lines)*100))
# closing all the files
record_file.close()
output_file.close()
_file.close()
|
[
"noreply@github.com"
] |
s4m15v0.noreply@github.com
|
64d8773b96f55433064717a353e34ceaa4635731
|
747febe786dd6b7fd6c63cfe73dbe3023354daa8
|
/src/the_tale/the_tale/game/companions/tests/test_common.py
|
4a3ce502c3ac38d97db8d8214c8c0ee0721ab1cc
|
[
"BSD-3-Clause"
] |
permissive
|
the-tale/the-tale
|
4e4b8d91dc873a5fb935fe58e9721a877baa6d3f
|
e8450bd2332344da805b1851e728da5a3e5bf0ef
|
refs/heads/develop
| 2023-08-01T13:53:46.835667
| 2022-12-25T18:04:56
| 2022-12-25T18:04:56
| 1,949,167
| 98
| 52
|
BSD-3-Clause
| 2023-02-15T18:57:33
| 2011-06-24T18:49:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,144
|
py
|
import smart_imports
smart_imports.all()
class CommonTests(utils_testcase.TestCase):
def setUp(self):
super(CommonTests, self).setUp()
game_logic.create_test_map()
self.account = self.accounts_factory.create_account()
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(self.account.id)
self.hero = self.storage.accounts_to_heroes[self.account.id]
def test_rarities_abilities(self):
for rarity, rarity_abilities in helpers.RARITIES_ABILITIES.items():
companion = logic.create_random_companion_record('%s companion' % rarity,
abilities=rarity_abilities)
self.assertEqual(companion.rarity, rarity)
@mock.patch('the_tale.game.companions.objects.Companion.max_coherence', 100)
@mock.patch('the_tale.game.heroes.abilities.companions.THOUGHTFUL.MULTIPLIER', [1, 1, 1, 1, 1])
@mock.patch('the_tale.game.heroes.abilities.companions._CompanionHealBase.PROBABILITY', [0, 0, 0, 0, 0])
def _test_companion_death_speed(self):
companion_record = logic.create_random_companion_record('test companion',
state=relations.STATE.ENABLED,
dedication=relations.DEDICATION.BRAVE) # ,#,;
companion = logic.create_companion(companion_record)
self.hero.set_companion(companion)
self.hero.preferences.set_companion_dedication(heroes_relations.COMPANION_DEDICATION.NORMAL)
old_health = self.hero.companion.health
while self.hero.companion:
self.hero.companion.coherence = 50
self.storage.process_turn()
game_turn.increment()
self.hero.randomized_level_up()
if not self.hero.is_alive:
if hasattr(self.hero.actions.current_action, 'fast_resurrect'):
self.hero.actions.current_action.fast_resurrect()
if self.hero.companion:
old_health = self.hero.companion.health
|
[
"a.eletsky@gmail.com"
] |
a.eletsky@gmail.com
|
8a63a2dc5d143b6a79f7e87a7ba57b61e201db1a
|
b41ef09b89795987b1e78c6fa7cbe84774d07469
|
/gui.py
|
fde7561af28c27e0aedd7c3fa029acd6a2a8a4af
|
[] |
no_license
|
renjiezhu/Poisson-Spot-Simulator
|
abe54a678ef18676e50ac38acff65fb06dd6a903
|
1d6eae6247f732d9fb8b31dc7a378460a9d5c68b
|
refs/heads/master
| 2020-03-20T08:42:13.589567
| 2018-06-14T06:42:46
| 2018-06-14T06:42:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,597
|
py
|
#
# PHYS 129 final project
# Poisson's Spot
# GUI
#
# June 6, 2018 modified by Renjie Zhu
# June 5, 2018 modified by Renjie Zhu
# May 23, 2018 created by Renjie Zhu
#
import sys
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import (
QApplication,
QWidget,
QLabel,
QMainWindow,
QPushButton,
QLineEdit,
QErrorMessage,
QMenuBar,
QMessageBox,
)
from PyQt5.QtCore import (
pyqtSlot,
)
from disk import Poisson
class Gui(QWidget):
def __init__(self, left=300, top=300, width=450, height=140, title='New Window'):
super().__init__()
self.title = title
self.left = left
self.top = top
self.width = width
self.height = height
self.l_label = QLabel('Distance between disk and screen', self)
self.d_label = QLabel('Diameter of disk', self)
self.w_label = QLabel('Wavelength of incident light', self)
self.l_unit = QLabel('mm', self)
self.d_unit = QLabel('mm', self)
self.w_unit = QLabel('nm', self)
self.l_edit = QLineEdit('1000', self)
self.d_edit = QLineEdit('2', self)
self.w_edit = QLineEdit('600', self)
self.l_value = 0
self.d_value = 0
self.w_value = 0
self.reset_button = QPushButton('Reset', self)
self.compute_button = QPushButton('Compute', self)
self.error_dialog = QErrorMessage()
self.ERROR_MESSAGE = "Make sure you give positive numbers.\nPlease try again."
self.l_MESSAGE = """Please make sure that the distance between the screen
and the disk is a hundred times larger than the diameter of the disk."""
self.help_box = QMessageBox()
self.FRESNEL_MESSAGE = """
For Fresnel diffraction (the diffraction that causes
Poisson's Point to show), the parameters must satisfy.
the following requirement.
\t\td^2
\t\t----- >~ 1,
\t\tl * λ
where
\td is the diameter of disk,
\tl is the distance between disk and screen, and
\tλ is the wavelength of incident light.
Please make sure your input meet this requirement!
"""
self.INFO_TITLE = 'Information about Poisson\'s Spot:'
self.POISSON_MESSAGE = """
This program is used to simulate the Poisson Spot with
parameters given by the user. Parameters include
1. distance between the disk and screen,
2. diameter of the disk,
3. wavelength of incident light.
The plot will be shown as a 400 by 400 grid image of the
Poisson Spot on a screen of 16mm by 16mm.
Please make sure that you give integers as input and
follow the suggestions if you encounter any.
"""
self.warning_box = QMessageBox()
self.WARNING_MESSAGE = """
Due to both hardware and software limitations, the
computation may take a rather long time to run.
Please be patient if the program is not responding,
and expect it to run for about 10 minutes until you
see the result.
The result will be saved to the program directory
as an eps file with parameters listed in the filename.
"""
self.plot_info = """
Parameters:
Distance between disk and screen: %d m
Diameter of disk: %d mm
Wavelength of incident light: %d nm
"""
def main_window(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.reset_button.setToolTip('Reset the entries.')
self.compute_button.setToolTip(
'Compute the Poisson\'s Spot using parameters above.')
self.compute_button.setDefault(True)
self.l_label.move(10, 7)
self.l_edit.move(260, 5)
self.l_unit.move(420, 7)
self.d_label.move(10, 42)
self.d_edit.move(260, 40)
self.d_unit.move(420, 42)
self.w_label.move(10, 77)
self.w_edit.move(260, 75)
self.w_unit.move(420, 77)
self.reset_button.move(200, 110)
self.compute_button.move(310, 110)
self.reset_button.clicked.connect(self.reset_on_click)
self.compute_button.clicked.connect(self.compute_on_click)
self.show()
def fresnel_help_page(self):
self.help_box.setWindowTitle(self.INFO_TITLE)
self.help_box.setIcon(QMessageBox.Information)
self.help_box.setText(self.FRESNEL_MESSAGE)
self.help_box.show()
def info_page(self):
self.info_box.setWindowTitle(self.INFO_TITLE)
self.info_box.setIcon(QMessageBox.Information)
self.info_box.setText(self.POISSON_MESSAGE)
self.info_box.show()
def warning_page(self):
self.warning_box.setWindowTitle('Warning!')
self.warning_box.setIcon(QMessageBox.Warning)
self.warning_box.setText(self.WARNING_MESSAGE)
self.warning_box.setStandardButtons(self.warning_box.Cancel | self.warning_box.Ok)
if self.warning_box.exec_() == self.warning_box.Ok:
self.proceed_on_click()
def start(self):
self.main_window()
@pyqtSlot(name='compute_on_click')
def compute_on_click(self):
try:
l_value_temp = float(self.l_edit.text())
if l_value_temp < 0:
self.value_error_handling()
self.l_edit.setText('1000')
return
except ValueError:
self.value_error_handling()
self.l_edit.setText('1000')
return
try:
d_value_temp = float(self.d_edit.text())
if d_value_temp < 0:
self.value_error_handling()
self.d_edit.setText('2')
return
except ValueError:
self.value_error_handling()
self.d_edit.setText('2')
return
try:
w_value_temp = float(self.w_edit.text())
if w_value_temp < 0:
self.value_error_handling()
self.w_edit.setText('600')
return
except ValueError:
self.value_error_handling()
self.w_edit.setText('600')
return
# Convert to SI Units
# l_value_temp: input (mm) -> m
l_value_temp = l_value_temp * 1e-3
# d_value_temp: input (mm) -> m
d_value_temp = d_value_temp * 1e-3
# w_value_temp: input (nm) -> m
w_value_temp = w_value_temp * 1e-9
if d_value_temp > 0.01 * l_value_temp:
self.value_error_handling(is_l_small=True)
self.reset_input()
return
# Fresnel number requirement
# d^2/(l*w) >~ 1
# Ask for new inputs if this is requirement is not met
# and inform them about this problem
fresnel_num = (d_value_temp ** 2) / (l_value_temp * w_value_temp)
if fresnel_num < 0.95:
self.value_error_handling(is_fresnel=True)
self.reset_input()
return
self.l_value = l_value_temp
self.d_value = d_value_temp
self.w_value = w_value_temp
self.warning_page()
@pyqtSlot(name='reset_on_click')
def reset_on_click(self):
self.reset_input()
@pyqtSlot(name='proceed_on_click')
def proceed_on_click(self):
instance = Poisson(lamda=self.w_value, rad=(self.d_value / 2), L=self.l_value)
matrix = instance.poiss()
self.matrix_plotting(matrix)
def reset_input(self):
self.l_edit.setText('1000')
self.d_edit.setText('2')
self.w_edit.setText('600')
def value_error_handling(self, is_fresnel=False, is_l_small=False):
if is_fresnel:
self.fresnel_help_page()
elif is_l_small:
self.error_dialog.setWindowTitle('ERROR!!!')
self.error_dialog.showMessage(self.l_MESSAGE)
else:
# This part will be trigger if error is not specified.
# Which means value errors in this case.
self.error_dialog.setWindowTitle('ERROR!!!')
self.error_dialog.showMessage(self.ERROR_MESSAGE)
def matrix_plotting(self, matrix):
f1, ax1 = plt.subplots()
ax1.imshow(matrix, interpolation='none', cmap='gray')
ax1.axis('off')
ax1.set_title('The Simulation of Poisson Spot on the Screen of Side Length 16 mm')
f1.text(0.5, 0, self.plot_info % (self.l_value, self.d_value * 1e3, self.w_value * 1e9),
verticalalignment='baseline')
f1.show()
filename = 'poisson_spot_%d_%d_%d.eps' % (self.l_value, self.d_value * 1e3, self.w_value * 1e9)
f1.savefig(filename)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Gui()
sys.exit(app.exec_())
|
[
"renjie.j.zhu@gmail.com"
] |
renjie.j.zhu@gmail.com
|
2b37f66501969b13276ac35249571598deb4ca26
|
5bd9a1e97c6d02cf3cf65fa35bcdfaa8c3df3c03
|
/Python_API/Illumina_NexteraXT/nexteraXT_dna_library_prep_part4.ot2.py
|
685e25958cfa0ff3f784f263eb3b00a12f7a2d4d
|
[] |
no_license
|
YazBraimah/OT2_protocols
|
40bccabc0adf8b4133eaac7634388e1d8e6fb667
|
663cae57f590cff30a004f9f0ba5414a38e88e91
|
refs/heads/master
| 2020-12-13T15:33:20.201307
| 2020-09-10T02:29:29
| 2020-09-10T02:29:29
| 234,459,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,215
|
py
|
from opentrons import labware, instruments
"""
Pool Libraries
"""
# labware setup
in_plate = labware.load('96-PCR-flat', '1')
tuberack = labware.load('tube-rack-2ml', '2')
def run_custom_protocol(
number_of_samples: int=24,
number_of_pools: int=1,
pool_volume: int=5
):
total_tips = number_of_samples * number_of_pools
total_tiprack = total_tips // 96 + (1 if total_tips % 96 > 0 else 0)
tipracks10 = [labware.load('tiprack-10ul', slot)
for slot in ['3', '4', '5', '6', '7', '8', '9', '10', '11'][
:total_tiprack]]
p10 = instruments.P10_Single(
mount='right',
tip_racks=tipracks10)
if number_of_samples <= 24:
input = [well
for col in in_plate.cols('1', to='6')
for well in col.wells('A', to='D')][:number_of_samples]
else:
input = [well for well in in_plate.wells()][:number_of_samples]
# Transfer each library to pooling tube(s)
for tube in tuberack.wells(0, length=number_of_pools):
p10.transfer(pool_volume, input, tube, new_tip='always')
run_custom_protocol(**{'number_of_samples': 24, 'number_of_pools': 1, 'pool_volume': 5})
|
[
"yahmed@syr.edu"
] |
yahmed@syr.edu
|
f9bca7acd955016867e38b15e083abc468e59751
|
71982e1f40d20a03e8de696e4600f0bc96134762
|
/1_interpolation/cmain.py
|
5a3160bfe43e759bf9a4a0f4ab0a6422fadc3982
|
[] |
no_license
|
amaliestokholm/numeric
|
cba762618d3aedd28e742f64b881346d1ff49ca2
|
310ef38bd79badff37c27f9070dbb00441011394
|
refs/heads/master
| 2021-01-18T22:54:05.772793
| 2017-06-30T07:06:12
| 2017-06-30T07:06:12
| 87,079,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 803
|
py
|
import numpy as np
from interp import interpolation as interp
# Define start and end point and the number of points in the function (n) and in the interpolation (N)
n = 50
N = 100
start = 0
end = 4 * np.pi
# Make input -- as an example, the cosine function is used.
x = np.linspace(start, end, n)
y = np.cos(x)
z = np.linspace(start, end, N)
y_dev = - np.sin(x)
y_int = np.sin(x)
# Initialize the class
i_cos = interp(x, y)
# Interpolate
s_cspl, s_csplint, s_cspldev = i_cos.cspline(z)
# Print the values in order to save the stdout
for i in range(N):
if i < n:
print('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s'
% (z[i], s_cspl[i], s_csplint[i], s_cspldev[i], x[i], y[i], y_int[i], y_dev[i]))
else:
print('%s\t%s\t%s\t%s' % (z[i], s_cspl[i], s_csplint[i], s_cspldev[i]))
|
[
"stokholm@post.au.dk"
] |
stokholm@post.au.dk
|
f329904c13684e820a4b6424c8f5e21dfc8aa986
|
2d2f2165d33b75e7ed6b30f5b3c93b022b9021a2
|
/workspaces/migrations/0018_searchresult.py
|
358a093d3ae0186238ed0ea4fdf65bb055a6ba05
|
[
"MIT"
] |
permissive
|
angelxehg/tomatoe-chat-api
|
5634d195601e1b5269e09e4d0ad8a11f8e60fe7b
|
d22a75d8a15d478491f46ef02e597d2a710107e1
|
refs/heads/main
| 2022-12-31T01:31:07.865367
| 2020-10-22T18:53:12
| 2020-10-22T18:53:12
| 265,900,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 574
|
py
|
# Generated by Django 3.0.7 on 2020-08-19 18:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workspaces', '0017_message'),
]
operations = [
migrations.CreateModel(
name='SearchResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=20)),
('content', models.CharField(max_length=500)),
],
),
]
|
[
"50889225+angelxehg@users.noreply.github.com"
] |
50889225+angelxehg@users.noreply.github.com
|
10e5fa2f5498659c925e203978736cee5c3b6290
|
12246c255155df4af076cc75b5085b187d847fb2
|
/algorithm/algorithm_ evaluator_temp.py
|
1e0ba18a58bb9824d4ac41837a52c446a35895df
|
[] |
no_license
|
babakaskari/MachineLearning
|
e349748fa8fae499faf86ff1ae7e0624fea3d6ff
|
c948b4b6c889bddbb09d0876a9448324e80a9b06
|
refs/heads/master
| 2023-03-12T07:50:28.616330
| 2021-02-23T12:25:10
| 2021-02-23T12:25:10
| 315,897,013
| 0
| 0
| null | 2020-11-25T11:02:10
| 2020-11-25T09:57:05
|
Python
|
UTF-8
|
Python
| false
| false
| 8,956
|
py
|
# importing required libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
from sklearn import preprocessing, metrics
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.utils import shuffle
from sklearn.base import BaseEstimator, RegressorMixin
# from xgboost import XGBRegressor
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomTreesEmbedding
import warnings
warnings.filterwarnings('ignore')
# preprocessing
# ## ###########################
df = pd.read_csv("../dataset/Acoustic Logger Data.csv")
df1 = df.loc[df["LvlSpr"] == "Lvl"]
df3 = df.loc[df["LvlSpr"] == "Spr"]
df2 = pd.melt(df1, id_vars=['LvlSpr', 'ID'], value_vars=df.loc[:0, '02-May':].columns.values.tolist(), var_name='Date')
df4 = pd.melt(df3, id_vars=['LvlSpr', 'ID'], value_vars=df.loc[:0, '02-May':].columns.values.tolist(), var_name='Date')
df5 = pd.merge(df2, df4, on= ['ID', 'Date'], suffixes=("_Lvl", "_Spr"))
df6 = df5.drop(['LvlSpr_Lvl', 'LvlSpr_Spr'], axis=1).dropna()
df6['Date'] = pd.to_datetime(df6['Date'], format='%d-%b')
df6['Date'] = df6['Date'].dt.strftime('%d-%m')
df7 = pd.read_csv("../dataset/Leak Alarm Results.csv")
df7['Date Visited'] = pd.to_datetime(df7['Date Visited'], format='%d/%m/%Y')
df7['Date Visited'] = df7['Date Visited'].dt.strftime('%d-%m')
df7 = df7.rename(columns={'Date Visited': 'Date'})
df8 = pd.merge(df6, df7, on=['ID', 'Date'], how='left')
df8 = df8.sort_values(['Leak Alarm', 'Leak Found']).reset_index(drop=True)
# df8["Leak Alarm"] = df8["Leak Alarm"].fillna(-1)
# df8["Leak Found"] = df8["Leak Found"].fillna(-1)
dataset = df8
###################################################### Delete these row indexes from dataFrame
indexNames = dataset[dataset['Leak Found'] == 'N-PRV'].index
dataset.drop(indexNames, index=None, inplace=True)
dataset.reset_index(drop=True, inplace=True)
###################################################### DROPPING LEAK ALARM & LEAK FOUND
dataset["Leak Found"].replace(["Y", "N"], [1, 0], inplace=True)
# dataset["Leak Alarm"].replace(["Y", "N"], [1, 0], inplace=True)
dataset = dataset.drop(['Leak Alarm'], axis=1)
# ############################################################ Convert Date categorical to numerical
# dataset['Date'] = dataset['Date'].str.replace('\D', '').astype(int)
date_encoder = preprocessing.LabelEncoder()
date_encoder.fit(dataset['Date'])
# print(list(date_encoder.classes_))
dataset['Date'] = date_encoder.transform(dataset['Date'])
# print(dataset.to_string(max_rows=200))
print("Number of null values in dataset :\n", dataset.isna().sum())
# ##################################################### CORRELATION MATRIX
# print(dataset.columns.values)
# dataset2 = dataset.drop(["Leak Found"], axis=1)
# df = pd.DataFrame(dataset2, columns=['Date', 'ID', 'value_Lvl', 'value_Spr'])
# corrMatrix = df.corr()
# sns.heatmap(corrMatrix, annot=True, cmap="YlGnBu")
# plt.show()
# ########################################################################
def evaluate_preds(model, x_true, y_true, y_preds):
accuracy = metrics.accuracy_score(y_true, y_preds)
precision = metrics.precision_score(y_true, y_preds)
recall = metrics.recall_score(y_true, y_preds)
f1 = metrics.f1_score(y_true, y_preds)
metric_dict = {"accuracy": round(accuracy, 2),
"precision": round(precision, 2),
"recall": round(recall, 2),
"f1": round(f1, 2)}
print("Model score is : ", model.score(x_true, y_true))
print(f"Accuracy : {accuracy * 100:.2f}%")
print(f"Precision : {precision: .2f}")
print(f"Recall : {recall: .2f}")
print(f"F1 Score : {f1: .2f}")
return metric_dict
# ##################################################### SPLIT THE DATASET
x_labeled_data = dataset.loc[dataset['Leak Found'].notna()]
y_labeled_data = x_labeled_data["Leak Found"]
x_labeled_data = x_labeled_data.drop(["Leak Found"], axis=1)
# ############################################## standard scaler
scaler = StandardScaler()
data_scaled = scaler.fit_transform(x_labeled_data)
x_train = pd.DataFrame(data_scaled)
print("x_train after normalization : ", x_train.head())
print("x_train description after normalization: ", x_labeled_data.describe())
# ############################################################
# x_train = x_train.sample(frac=1)
x_unlabeled_data = dataset.loc[dataset['Leak Found'].isna()]
y_unlabeled_data = x_unlabeled_data.drop(["Leak Found"], axis=1)
x_train, x_test, y_train, y_test = train_test_split(x_labeled_data,
y_labeled_data,
test_size=0.2,
random_state=42)
x_train, x_cv, y_train, y_cv = train_test_split(x_train,
y_train,
stratify=y_train,
test_size=0.2)
rfc = RandomForestClassifier()
rfc.fit(x_train, y_train)
rfc_pred = rfc.predict(x_test)
# gs_rfc_matrices = evaluate_preds(y_test, gs_rfc_pred)
rfc_new_matrices = evaluate_preds(rfc, x_test, y_test, rfc_pred)
# ############################################################
"""
model_factory = [
LogisticRegression(),
KNeighborsClassifier(),
BaggingClassifier(n_estimators=100),
XGBRegressor(nthread=1),
GradientBoostingRegressor(),
RandomForestClassifier(),
BayesianRidge(),
]
"""
estimators = [
('rf', RandomForestClassifier(n_estimators=10, random_state=42)),
]
model_factory = [
RandomForestClassifier(),
AdaBoostClassifier(),
BaggingClassifier(),
ExtraTreesClassifier(),
GradientBoostingClassifier(),
# RandomTreesEmbedding(),
StackingClassifier(estimators=estimators),
VotingClassifier(estimators=estimators),
HistGradientBoostingClassifier(),
]
# #######################################
for model in model_factory:
model.seed = 42
num_folds = 3
scores = cross_val_score(model, x_train, y_train, cv=num_folds, scoring='neg_mean_squared_error')
# print("Score is : ", scores)
score_description = " %0.2f (+/- %0.2f)" % (np.sqrt(scores.mean() * -1), scores.std() * 2)
print('{model:25} CV-5 RMSE: {score}'.format(
model=model.__class__.__name__,
score=score_description
))
# ////////////////////////////////
gs_rfc_grid = {
'n_estimators': [10, 50, 100, 200, 500],
# 'max_depth': ['None', 20, 30],
'max_features': ['auto', 'sqrt', 'log2'],
'min_samples_split': [6],
'class_weight': ['balanced', 'balanced_subsample'],
# 'max_iter': np.arange(100, 2000, 100),
'min_samples_leaf': [1, 2]
}
rfc = RandomForestClassifier()
print("RandomForestClassifier parameters : ", rfc.get_params())
gs_rfc = GridSearchCV(estimator=rfc,
param_grid=gs_rfc_grid,
cv=5,
n_jobs=-1,
verbose=2,)
gs_rfc.fit(x_train, y_train)
print("RandomForestClassifier best parameter is : ", gs_rfc.best_params_)
# print("RandomForestClassifier best parameter is : ", gs_rfc.best_params_['class_weight'])
# print("RandomForestClassifier score : ", gs_rfc.score(x_test))
gs_rfc_pred = gs_rfc.predict(x_test)
print("RandomForestClassifier Prediction : ", gs_rfc_pred)
# gs_rfc_matrices = evaluate_preds(y_test, gs_rfc_pred)
gs_rfc_matrices = evaluate_preds(gs_rfc, x_test, y_test, gs_rfc_pred)
compare_matrices = pd.DataFrame({
"rfc_new": rfc_new_matrices,
"grid search": gs_rfc_matrices,
})
compare_matrices.plot.bar(rot=0)
plt.show()
# ################################################ AdaBoostClassifier starts
abc = AdaBoostClassifier(n_estimators=100, random_state=0)
abc.fit(x_train, y_train)
clf_pred = abc.predict(x_test)
print("AdaBoostClassifier Prediction : ", clf_pred)
# gs_rfc_matrices = evaluate_preds(y_test, gs_rfc_pred)
clf_matrices = evaluate_preds(abc, x_test, y_test, clf_pred)
print("RandomForestClassifier parameters : ", rfc.get_params())
# ################################################ AdaBoostClassifier ends
|
[
"Siamak"
] |
Siamak
|
76fcee228ba1a24b432c4535e92d0ceb516585e1
|
b37105847106177c4860a80cde3dda92ac70070b
|
/auth/app/db/database.py
|
98d4a9235bf24659b9d2148c996b02f3acb14647
|
[] |
no_license
|
webclinic017/fastapi_with_nats
|
8a2cdc5dc68c5d64d9d2fca29f3a6a568699bce7
|
5b59472049fe3b6db050f01232148987a862235a
|
refs/heads/main
| 2023-03-30T04:29:42.713553
| 2021-03-30T13:31:41
| 2021-03-30T13:31:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,110
|
py
|
import os
import configparser
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
# path = os.path.join(os.path.dirname(__file__), '..', 'config', 'config.ini')
# config_ini = configparser.ConfigParser()
# config_ini.read(path)
# POSTGRES_SERVER = config_ini['POSTGRES']['POSTGRES_SERVER']
# POSTGRES_DB = config_ini['POSTGRES']['POSTGRES_DB']
# POSTGRES_SERVER = os.getenv('POSTGRES_SERVER')
# POSTGRES_PORT = config_ini['POSTGRES']['POSTGRES_PORT']
# POSTGRES_USER = config_ini['POSTGRES']['POSTGRES_USER']
# POSTGRES_PASSWORD = config_ini['POSTGRES']['POSTGRES_PASSWORD']
SQLALCHAMY_DATABASE_URL = os.getenv('POSTGRES_URI')
# SQLALCHAMY_DATABASE_URL = f'postgresql://{POSTGRES_USER}:{POSTGRES_PASSWORD}@/{POSTGRES_DB}?host={POSTGRES_SERVER}&port={POSTGRES_PORT}'
engine = create_engine(SQLALCHAMY_DATABASE_URL, echo=True)
Base = declarative_base()
SessionLocal = sessionmaker(bind=engine, autocommit=False, autoflush=False,)
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
|
[
"i.norihi.472@gmail.com"
] |
i.norihi.472@gmail.com
|
0ef076eaa70916b75c905f4b777a1659d48397c3
|
4f41e5842f031739f8153ecac202c4849f849d37
|
/API/api_run.py
|
e44913214424853e82fc523312cf4e86d90af657
|
[] |
no_license
|
khirod-datascience/Rah-model-api
|
e40fd2dff4473d9aaacddbd5ade3534bfd0177a5
|
2de3e48c9c2119b475937f0180454190a7f9c29d
|
refs/heads/main
| 2023-08-31T04:15:58.984547
| 2021-10-29T14:12:43
| 2021-10-29T14:12:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 940
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 27 12:21:00 2021
@author: Devdarshan
"""
from flask import Flask, request
from flask_restful import Resource, Api
import search_by_definition
import fuzzy_wuzzy_Search_by_definition
import next_word_predict
app = Flask(__name__)
api = Api(app)
class predict(Resource):
def get(self, query):
return {'data': search_by_definition.manual_predict(query)}
class predict_fuzzywuzzy(Resource):
def get(self, query):
return {'data': fuzzy_wuzzy_Search_by_definition.search_by_definition_fuzzy_wuzzy(query)}
class predict_nextword(Resource):
def get(self, query):
return {'data': next_word_predict.predict_next(query)}
api.add_resource(predict, '/searchByDefinition/<query>')
api.add_resource(predict_fuzzywuzzy, '/fuzzyWuzzySearchByDefinition/<query>')
api.add_resource(predict_nextword, '/predictNextWord/<query>')
if __name__ == '__main__':
app.run()
|
[
"66660324+khirod-datascience@users.noreply.github.com"
] |
66660324+khirod-datascience@users.noreply.github.com
|
272726f297893f2cbace10bfa1026de380ddc5d4
|
81724f74e7603476c887dbdc2bfe2e913ee50334
|
/Analysis/config/simple_analysis.py
|
84a821f80753410d643f46358e8e686ad5c2e51a
|
[] |
no_license
|
michelif/diphotons
|
7fa6ccc826f212f7e9900f870cc85a7e13738893
|
6102de180e1c1743aca6df6b197aa5aa459dc68f
|
refs/heads/master
| 2020-06-16T18:36:39.747819
| 2017-09-05T15:42:15
| 2017-09-05T15:42:15
| 75,076,132
| 0
| 4
| null | 2016-11-29T11:45:23
| 2016-11-29T11:45:23
| null |
UTF-8
|
Python
| false
| false
| 10,584
|
py
|
#!/usr/bin/env cmsRun
import FWCore.ParameterSet.Config as cms
import FWCore.Utilities.FileUtils as FileUtils
process = cms.Process("Analysis")
process.load("diphotons.Analysis.highMassDiPhotons_cfi")
from flashgg.MicroAOD.flashggPreselectedDiPhotons_cfi import flashggPreselectedDiPhotons
from flashgg.Taggers.diphotonDumper_cfi import diphotonDumper
import flashgg.Taggers.dumperConfigTools as cfgTools
diphotonDumper.dumpTrees = False
diphotonDumper.dumpWorkspace = False
diphotonDumper.quietRooFit = True
diphotonDumper.src=cms.InputTag("hcic4DiPhotons")
cfgTools.addCategories(diphotonDumper,
[## cuts are applied in cascade
("EBHighR9","max(abs(leadingPhoton.superCluster.eta),abs(subLeadingPhoton.superCluster.eta))<1.4442"
"&& min(leadingPhoton.r9,subLeadingPhoton.r9)>0.94",0),
("EBLowR9","max(abs(leadingPhoton.superCluster.eta),abs(subLeadingPhoton.superCluster.eta))<1.4442",0),
("EEHighR9","min(leadingPhoton.r9,subLeadingPhoton.r9)>0.94",0),
("EELowR9","1",0),
],
variables=["mass",
"leadPt :=leadingPhoton.pt",
"subleadPt :=subLeadingPhoton.pt",
"leadCShapeMVA :=leadingPhoton.userFloat('cShapeMVA')",
"subleadCShapeMVA :=subLeadingPhoton.userFloat('cShapeMVA')",
"minR9 :=min(leadingPhoton.r9,subLeadingPhoton.r9)",
"maxEta :=max(abs(leadingPhoton.superCluster.eta),abs(subLeadingPhoton.superCluster.eta))",
"leadBlockChIso := leadingPhotonView.pfChIso03WrtChosenVtx",
"leadBlockPhoIso := leadingPhotonView.pfPhoIso03",
"leadRndConeChIso := leadingPhotonView.extraChgIsoWrtChoosenVtx('rnd03')",
"leadRndConePhoIso:= leadingPhoton.extraPhoIso('rnd03')",
"leadChIso := leadingPhoton.egChargedHadronIso",
"leadChIso := leadingPhoton.egChargedHadronIso",
"leadPhoIso := leadingPhoton.egPhotonIso",
"leadNeutIso := leadingPhoton.egNeutralHadronIso",
"leadHoE := leadingPhoton.hadTowOverEm",
"leadSigmaIeIe := (?leadingPhoton.r9>0.8||leadingPhoton.egChargedHadronIso<20||leadingPhoton.egChargedHadronIso/leadingPhoton.pt<0.3?leadingPhoton.full5x5_sigmaIetaIeta:leadingPhoton.sigmaIetaIeta)",
"leadPixSeed := leadingPhoton.hasPixelSeed",
"leadPassEleVeto := leadingPhoton.passElectronVeto",
"subleadBlockChIso := subLeadingPhotonView.pfChIso03WrtChosenVtx",
"subleadBlockPhoIso := subLeadingPhotonView.pfPhoIso03",
"subleadRndConeChIso := subleadingPhotonView.extraChgIsoWrtChoosenVtx('rnd03')",
"subleadRndConePhoIso:= subleadingPhoton.extraPhoIso('rnd03')",
"subleadChIso := subLeadingPhoton.egChargedHadronIso",
"subleadPhoIso := subLeadingPhoton.egPhotonIso",
"subleadNeutIso := subLeadingPhoton.egNeutralHadronIso",
"subleadHoE := subLeadingPhoton.hadTowOverEm",
"subleadSigmaIeIe := (?subLeadingPhoton.r9>0.8||subLeadingPhoton.egChargedHadronIso<20||subLeadingPhoton.egChargedHadronIso/subLeadingPhoton.pt<0.3?subLeadingPhoton.full5x5_sigmaIetaIeta:subLeadingPhoton.sigmaIetaIeta)",
"subleadPixSeed := subLeadingPhoton.hasPixelSeed",
"subleadPassEleVeto := subLeadingPhoton.passElectronVeto",
],
histograms=["mass>>mass(1500,0,15000)",
"leadPt>>leadPt(145,100,3000)",
"subleadPt>>subleadPt(145,100,3000)",
"leadBlockChIso>>leadBlockChIso(60,-10,50)",
"leadBlockPhoIso>>leadBlockPhoIso(60,-10,50)",
"leadChIso>>leadChIso(60,-10,50)",
"leadPhoIso>>leadPhoIso(60,-10,50)",
"leadNeutIso>>leadNeutIso(60,-10,50)",
"leadHoE>>leadHoE(40,0,0.2)",
"leadSigmaIeIe>>leadSigmaIeIe(50,0,5.e-2)",
"leadPixSeed>>leadPixSeed(2,-0.5,1.5)",
"subleadPassEleVeto>>subleadPassEleVeto(2,-0.5,1.5)",
"subleadBlockChIso>>subleadBlockChIso(60,-10,50)",
"subleadBlockPhoIso>>subleadBlockPhoIso(60,-10,50)",
"subleadChIso>>subleadChIso(60,-10,50)",
"subleadPhoIso>>subleadPhoIso(60,-10,50)",
"subleadNeutIso>>subleadNeutIso(60,-10,50)",
"subleadHoE>>subleadHoE(40,0,0.2)",
"subleadSigmaIeIe>>subleadSigmaIeIe(50,0,5.e-2)",
"subleadPixSeed>>subleadPixSeed(2,-0.5,1.5)",
"subleadPassEleVeto>>subleadPassEleVeto(2,-0.5,1.5)",
"subleadPt:leadPt>>ptSubVsLead(145,100,3000:145,100,3000)",
"minR9>>minR9(110,0,1.1)",
"maxEta>>maxEta(250,0,2.5)"
]
)
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load("Configuration.StandardSequences.GeometryDB_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'POSTLS170_V5::All'
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32( 1000 )
process.source = cms.Source("PoolSource",
fileNames=cms.untracked.vstring(
)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("test.root")
)
process.trigger=diphotonDumper.clone()
process.id=diphotonDumper.clone()
process.triggerMva=diphotonDumper.clone(src=cms.InputTag("hmvaDiPhotons"))
process.mva=diphotonDumper.clone(src=cms.InputTag("hmvaDiPhotons"))
process.egid=diphotonDumper.clone(src=cms.InputTag("egLooseDiPhotons"))
process.kin=diphotonDumper.clone(src=cms.InputTag("kinDiPhotons"))
process.kin.dumpTrees = True
process.isoKinDiphotons = process.tmpKinDiPhotons.clone(src="kinDiPhotons",
cut="leadingPhoton.userFloat('genIso') < 10. && subLeadingPhoton.userFloat('genIso') < 10.")
process.isohCic4Diphotons = process.tmpKinDiPhotons.clone(src="hcic4DiPhotons",
cut="leadingPhoton.userFloat('genIso') < 10. && subLeadingPhoton.userFloat('genIso') < 10.")
process.isohmvaDiphotons = process.tmpKinDiPhotons.clone(src="hmvaDiPhotons",
cut="leadingPhoton.userFloat('genIso') < 10. && subLeadingPhoton.userFloat('genIso') < 10.")
process.isoKin=diphotonDumper.clone(src=cms.InputTag("isoKinDiphotons"))
process.isoId=diphotonDumper.clone(src=cms.InputTag("isohCic4Diphotons"))
process.isoMva=diphotonDumper.clone(src=cms.InputTag("isohmvaDiphotons"))
process.nonIsoKinDiphotons = process.tmpKinDiPhotons.clone(src="kinDiPhotons",
cut="leadingPhoton.userFloat('genIso') >= 10. || subLeadingPhoton.userFloat('gensIso') >= 10.")
process.nonIsohCic4Diphotons = process.tmpKinDiPhotons.clone(src="hcic4DiPhotons",
cut="leadingPhoton.userFloat('genIso') >= 10. || subLeadingPhoton.userFloat('gensIso') >= 10.")
process.nonIsohmvaDiphotons = process.tmpKinDiPhotons.clone(src="hmvaDiPhotons",
cut="leadingPhoton.userFloat('genIso') >= 10. || subLeadingPhoton.userFloat('gensIso') >= 10.")
process.nonIsoKin=diphotonDumper.clone(src=cms.InputTag("nonIsoKinDiphotons"))
process.nonIsoId=diphotonDumper.clone(src=cms.InputTag("nonIsohCic4Diphotons"))
process.nonIsoMva=diphotonDumper.clone(src=cms.InputTag("nonIsohmvaDiphotons"))
process.load('HLTrigger.HLTfilters.hltHighLevel_cfi')
process.hltHighLevel.HLTPaths = ["HLT_DoublePhoton85*","HLT_Photon250_NoHE*"]
process.watchDog = cms.EDAnalyzer("IdleWatchdog",
minIdleFraction=cms.untracked.double(0.5),
tolerance=cms.untracked.int32(10),
checkEvery=cms.untracked.int32(100),
)
process.p1 = cms.Path(
((process.tmpKinDiPhotons*process.kinDiPhotons)
*
(process.kin
+ (process.isoKinDiphotons+process.nonIsoKinDiphotons)*(process.isoKin+process.nonIsoKin)
+ process.egLooseDiPhotons*process.egid
+ process.hmvaDiPhotons
*
(process.mva
+ (process.isohmvaDiphotons+process.nonIsohmvaDiphotons)*(process.isoMva +process.nonIsoMva)
+ (process.hltHighLevel*process.triggerMva)
)
+ process.hcic4DiPhotons
*
(process.id
+ (process.isohCic4Diphotons+process.nonIsohCic4Diphotons)*(process.isoId +process.nonIsoId)
+ (process.hltHighLevel*process.trigger)
)
))
* process.watchDog
)
from diphotons.MetaData.JobConfig import customize
customize.setDefault("maxEvents",100)
customize.setDefault("targetLumi",1.e+3)
customize(process)
|
[
"pasquale.musella@cern.ch"
] |
pasquale.musella@cern.ch
|
53b08521815f7bd672a1d8354b51b3693af42b6a
|
3af4bc44120806de09e9e9542719d80964f0f7cd
|
/python/skripte_datensammlung/tweet_extractor.py
|
3c4eb28861605cbba5c3d78e2d2c15ae6a822fc5
|
[
"MIT"
] |
permissive
|
TimSchmittmann/R-vs-Python
|
4dfa80e8a97c5651b6675a2e32c5132571e8d19d
|
4bacfaf236839a912f5a6a323d9756a47ab2d880
|
refs/heads/master
| 2020-04-10T09:13:45.422939
| 2018-12-10T11:20:55
| 2018-12-10T11:20:55
| 160,929,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,921
|
py
|
# -*- coding: utf-8 -*-
'''
@author: Tim Schmittmann
'''
from __future__ import print_function
import twitter
import time
from langdetect import detect
import csv
import config
import math
import sys
import subprocess
from enum import Enum
Mode = Enum('Mode', 'PAST RECENT')
# Create an Api instance.
api = twitter.Api(consumer_key=config.CONSUMER_KEY,
consumer_secret=config.CONSUMER_SECRET,
access_token_key=config.ACCESS_TOKEN,
access_token_secret=config.ACCESS_TOKEN_SECRET)
emoji_cnt_csv_path = "data/manual_settings/emoji_cnt_used_for_twitter_api_requests.csv"
tweets_write_csv_base_path = "data/emoji_tweets/tweets_q3_emojis_"
tweets_write_csv_ext = ".csv"
mode = Mode.RECENT
def read_max_id(tweet_csv_filepath, mode=Mode.PAST):
max_id = 9999999999999999999
if mode == Mode.PAST:
# go into the past starting from oldest tweet in file or starting from most recent if there is no file
try:
with open(tweet_csv_filepath, 'r', encoding='utf-8', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
for row in reader:
try:
max_id = min(int(row[0]), max_id)
except:
pass
except Exception as e:
print(str(e))
return max_id
#return False # most recent tweets only
def read_min_id(tweet_csv_filepath, mode=Mode.PAST):
if mode == Mode.PAST:
# go into the past as far as API allows
return 0
# go into the past until you find already processed tweets
# in that case min_id is the highest tweet id found in the csv
min_id = 0
try:
with open(tweet_csv_filepath, 'r', encoding='utf-8', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
for row in reader:
try:
min_id = max(int(row[0]), min_id)
except:
pass
except Exception as e:
print(str(e))
return min_id
def get_all_emojis(emoji_cnt_csv_path):
emojis = []
with open(emoji_cnt_csv_path, 'r', encoding='utf-8', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
for row in reader:
emojis.append(row[0])
return emojis
def init_and_exec_requests_and_writes(tweets_write_csv_path, query_emojis, mode=Mode.PAST):
max_id = read_max_id(tweets_write_csv_path, mode)
min_id = read_min_id(tweets_write_csv_path, mode)
print("MaxID: "+str(max_id))
print("MinID: "+str(min_id))
fieldnames = [
'tweet_id',
'tweet_full_text',
'tweet_created_at',
'tweet_is_quote_status',
'tweet_retweet_count',
'tweet_favorite_count',
'tweet_favorited',
'tweet_retweeted',
'tweet_possibly_sensitive',
'user_id',
'user_description',
'user_followers_count',
'user_friends_count',
'user_listed_count',
'user_favourites_count',
'user_statuses_count',
]
with open(tweets_write_csv_path, 'a', encoding='utf-8', newline='', buffering=1) as csvfile:
writer = csv.DictWriter(csvfile, delimiter=';', fieldnames=fieldnames,
quoting=csv.QUOTE_MINIMAL)
writer.writeheader()
request_tweets(min_id, max_id, query_emojis, lambda search_result: write_tweet(writer, search_result))
def build_request_query(min_id, max_id, query_emojis):
q = "%20OR%20".join(query_emojis)
query = "lang=de&q="+q+"%20-filter%3Aretweets%20-filter%3Areplies&result_type=recent&count=100&tweet_mode=extended"
#query = "lang=de&q=a%20OR%20b%20OR%20c%20OR%20d%20OR%20e%20OR%20f%20OR%20g%20OR%20h%20OR%20i%20OR%20j%20OR%20k%20OR%20l%20OR%20m%20OR%20n%20OR%20o%20OR%20p%20OR%20q%20OR%20r%20OR%20s%20OR%20t%20OR%20u%20OR%20v%20OR%20w%20OR%20x%20OR%20y%20OR%20z%20-filter%3Aretweets%20-filter%3Areplies&result_type=recent&count=100"
if max_id is not False:
query += "&max_id="+str(max_id)
if min_id is not False:
query += "&since_id="+str(min_id)
return query
def request_tweets(min_id, max_id, query_emojis, write_cb, nr_of_requests = 999999):
for i in range(nr_of_requests):
print("Request "+str(i))
query = build_request_query(min_id, max_id, query_emojis)
print(query)
try:
search_result = api.GetSearch(raw_query=query)
if max_id is not False:
# Move further into the past with each request
max_id = search_result[-1].id - 1
if(len(search_result) == 0):
print("Reached already processed tweets")
break;
# elif min_id is not False:
# Get the most up to date tweets per request
# min_id = search_result[0].id + 1
#print(max_id)
#print([s for s in search_result])
write_cb(search_result)
time.sleep(5)
except Exception as e:
print(str(e))
if str(e) == 'list index out of range':
break;
time.sleep(30)
def write_tweet(writer, search_result):
for tweet_dict in search_result:
tweet_dict = tweet_dict.AsDict()
try:
if detect(tweet_dict['full_text']) != 'de':
continue
except:
print("This text throws an error:", tweet_dict['full_text'])
continue
tweet = {}
if 'id' in tweet_dict:
tweet['tweet_id'] = tweet_dict['id']
if 'full_text' in tweet_dict:
tweet['tweet_full_text'] = tweet_dict['full_text']
if 'created_at' in tweet_dict:
tweet['tweet_created_at'] = tweet_dict['created_at']
if 'is_quote_status' in tweet_dict:
tweet['tweet_is_quote_status'] = tweet_dict['is_quote_status']
if 'retweet_count' in tweet_dict:
tweet['tweet_retweet_count'] = tweet_dict['retweet_count']
if 'favorite_count' in tweet_dict:
tweet['tweet_favorite_count'] = tweet_dict['favorite_count']
if 'favorited' in tweet_dict:
tweet['tweet_favorited'] = tweet_dict['favorited']
if 'retweeted' in tweet_dict:
tweet['tweet_retweeted'] = tweet_dict['retweeted']
if 'possibly_sensitive' in tweet_dict:
tweet['tweet_possibly_sensitive'] = tweet_dict['possibly_sensitive']
if 'id' in tweet_dict['user']:
tweet['user_id'] = tweet_dict['user']['id']
if 'description' in tweet_dict['user']:
tweet['user_description'] = tweet_dict['user']['description']
if 'followers_count' in tweet_dict['user']:
tweet['user_followers_count'] = tweet_dict['user']['followers_count']
if 'friends_count' in tweet_dict['user']:
tweet['user_friends_count'] = tweet_dict['user']['friends_count']
if 'listed_count' in tweet_dict['user']:
tweet['user_listed_count'] = tweet_dict['user']['listed_count']
if 'favourites_count' in tweet_dict['user']:
tweet['user_favourites_count'] = tweet_dict['user']['favourites_count']
if 'statuses_count' in tweet_dict['user']:
tweet['user_statuses_count'] = tweet_dict['user']['statuses_count']
#print(str(tweet['id'])+": "+tweet_dict['created_at'])
writer.writerow(tweet)
# fieldnames.update(tweet.keys())
# tweets.append(tweet)
def extract_inner_tweet_fields():
'''
for key in tweetDict:
if key == 'user':
for innerKey in tweetDict[key]:
tweet[key + "_" + innerKey] = tweetDict[key][innerKey]
elif key == 'urls' or key == 'user_mentions':
for j in range(len(tweetDict[key])):
for innerKey in tweetDict[key][j]:
tweet[key + "_" + str(j) + "_" + innerKey] = tweetDict[key][j][innerKey]
else:
tweet[key] = tweetDict[key]
'''
def main(emoji_cnt_csv_path, tweets_write_csv_base_path, tweets_write_csv_ext, mode):
emojis = get_all_emojis(emoji_cnt_csv_path)
for j in range(0, math.ceil(len(emojis) / 45)):
from_emoji = j*45
to_emoji = min((j+1)*45, len(emojis))
print("From emoji "+str(from_emoji)+" to emoji "+str(to_emoji))
query_emojis = emojis[from_emoji:to_emoji]
if len(query_emojis) > 0:
tweets_write_csv_path = tweets_write_csv_base_path+str(from_emoji)+"-"+str(to_emoji)+tweets_write_csv_ext
init_and_exec_requests_and_writes(tweets_write_csv_path, query_emojis, mode)
if __name__ == '__main__':
while True:
main(emoji_cnt_csv_path, tweets_write_csv_base_path, tweets_write_csv_ext, mode)
|
[
"tim.schmittmann@gmx.de"
] |
tim.schmittmann@gmx.de
|
ae54176dcfa773ec0636d58d7dd0a27b87fb7233
|
e7a8ab3898ef331ca11c63808c4d9449794308c2
|
/Leetcode/637-average_levels_binary_tree.py
|
b76472e42da39f5cd2cd1522e1bf704f3bfca5c9
|
[
"MIT"
] |
permissive
|
EdwaRen/Competitve-Programming
|
615695e00b13bda8024055f9634a7de30534977c
|
e8bffeb457936d28c75ecfefb5a1f316c15a9b6c
|
refs/heads/master
| 2021-06-06T11:23:18.758911
| 2021-05-29T14:27:04
| 2021-05-29T14:27:04
| 97,161,907
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 831
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
import collections
class Solution(object):
def averageOfLevels(self, root):
"""
:type root: TreeNode
:rtype: List[float]
"""
level = deque()
level.append(root)
res = []
while level:
n = len(level)
level_sum = 0
for i in range(n):
cur = level.popleft()
level_sum += cur.val
if cur.right: level.append(cur.right)
if cur.left: level.append(cur.left)
res.append(level_sum / float(n))
return res
|
[
"eddie.ren.2013@gmail.com"
] |
eddie.ren.2013@gmail.com
|
7cbbea8b7e51984f9d29d90a14d37aec9d670e29
|
51b0cc866c1e24e09f67d947600ce0b070df1b76
|
/problems/problem_005.py
|
6a64158d92b9dc418a59b341dbf18340a3cf119b
|
[] |
no_license
|
zzggbb/euler-problems
|
b4dfd9fd6221cd43713aa201c9c7acf8a48219dc
|
a7e9da92b0b40a00cf0ffeea4b85a69c3b3f66df
|
refs/heads/master
| 2020-03-26T10:30:24.163715
| 2013-10-27T07:06:33
| 2013-10-27T07:06:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
def getpassed(min,max):
passed = 0
number = max
while passed != 20:
for i in range(min,(max + 1)):
if number % i == 0:
passed = passed + 1
#elif number % i != 0:
else:
number = number + 1
passed = 0
print ("The number " + str(number) + " is the lowest number"
"divisible by all numbers from " + str(min) + " to " + str(max))
|
[
"zbradley96@gmail.com"
] |
zbradley96@gmail.com
|
94f35b705932d2f07602d05cfde485af977a3f1e
|
d18d057168b5f856d9f2b37310e58e6c41f1387b
|
/streamz_ext/graph.py
|
550910ce3089851126d03d11c19c065f638ad7f1
|
[] |
no_license
|
Zatsepingroup/streamz_ext
|
9cdfb440fa1152db690de906697956f0d10a0356
|
ec604c4dd8486070613302414d318e6478e80005
|
refs/heads/master
| 2020-03-26T21:28:07.088748
| 2018-08-15T20:26:16
| 2018-08-15T20:26:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,859
|
py
|
from weakref import ref
import matplotlib.pyplot as plt
import networkx as nx
from grave import plot_network
from streamz import combine_latest
from streamz.graph import *
from streamz.graph import _clean_text
from streamz_ext import Stream
def create_graph_nodes(node, graph, prior_node=None, pc=None):
"""Create graph from a single node, searching up and down the chain
with weakrefs to nodes in the graph nodes
Parameters
----------
node: Stream instance
graph: networkx.DiGraph instance
"""
edge_kwargs = {}
if node is None:
return
t = hash(node)
graph.add_node(
t,
label=_clean_text(str(node)),
shape=node._graphviz_shape,
orientation=str(node._graphviz_orientation),
style=node._graphviz_style,
fillcolor=node._graphviz_fillcolor,
node=ref(node),
)
if prior_node:
tt = hash(prior_node)
# If we emit on something other than all the upstreams vis it
if (
isinstance(node, combine_latest)
and node.emit_on != node.upstreams
and prior_node in node.emit_on
):
edge_kwargs["style"] = "dashed"
if graph.has_edge(t, tt):
return
if pc == "downstream":
graph.add_edge(tt, t)
else:
graph.add_edge(t, tt)
for nodes, pc in zip(
[list(node.downstreams), list(node.upstreams)],
["downstream", "upstreams"],
):
for node2 in nodes:
if node2 is not None:
create_graph_nodes(node2, graph, node, pc=pc)
def readable_graph(node, source_node=False):
"""Create human readable version of this object's task graph.
Parameters
----------
node: Stream instance
A node in the task graph
"""
import networkx as nx
g = nx.DiGraph()
if source_node:
create_edge_label_graph(node, g)
else:
create_graph(node, g)
mapping = {k: "{}".format(g.node[k]["label"]) for k in g}
idx_mapping = {}
for k, v in mapping.items():
if v in idx_mapping.keys():
idx_mapping[v] += 1
mapping[k] += "-{}".format(idx_mapping[v])
else:
idx_mapping[v] = 0
gg = {k: v for k, v in mapping.items()}
rg = nx.relabel_nodes(g, gg, copy=True)
return rg, gg
class LiveGraphPlot(object):
"""Live plotting of the streamz graph status"""
def __init__(
self,
graph,
layout="spectral",
node_style=None,
edge_style=None,
node_label_style=None,
edge_label_style=None,
ax=None,
force_draw=False,
):
"""
Parameters
----------
graph : nx.Graph
The graph to be plotted
layout : string or callable, optional, default: "spectral"
Specifies the type of layout to use for plotting.
It must be one of "spring", "circular", "random", "kamada_kawai",
"shell", "spectral", or a callable.
node_style : dict or callable, optional
The style parameters for nodes, if callable must return a dict
edge_style : dict or callable, optional
The style parameters for edges, if callable must return a dict
node_label_style : dict or callable, optional
The style parameters for node labels, if callable must return a dict
edge_label_style : dict or callable, optional
The style parameters for edge labels, if callable must return a dict
ax : matplotlib axis object, optional
The axis to plot on. If not provided produce fig and ax internally.
force_draw : bool, optional
If True force drawing every time graph is updated, else only draw
when idle. Defaults to False
"""
self.force_draw = force_draw
if edge_label_style is None:
edge_label_style = {}
if node_label_style is None:
node_label_style = {}
if edge_style is None:
edge_style = {}
if node_style is None:
node_style = {}
self.node_style = node_style
self.edge_style = edge_style
self.node_label_style = node_label_style
self.edge_label_style = edge_label_style
self.layout = layout
self.graph = graph
if not ax:
fig, ax = plt.subplots()
self.ax = ax
self.art = plot_network(
self.graph,
node_style=self.node_style,
edge_style=self.edge_style,
node_label_style=self.node_label_style,
edge_label_style=self.edge_label_style,
layout=self.layout,
ax=self.ax,
)
self.update()
def update(self):
"""Update the graph plot"""
# TODO: reuse the current node positions (if no new nodes added)
self.art._reprocess()
if self.force_draw:
plt.draw()
else:
self.ax.figure.canvas.draw_idle()
def decorate_nodes(graph, update_decorator=None, emit_decorator=None):
"""Decorate node methods for nodes in a graph
Parameters
----------
graph : nx.Graph instance
The graph who's nodes are to be updated
update_decorator : callable, optional
The function to wrap the update method. If None no decorator is applied.
emit_decorator : callable, optional
The function to wrap the _emit method. If None no decorator is applied.
Returns
-------
"""
for n, attrs in graph.nodes.items():
nn = attrs["node"]()
if nn.__class__ != Stream:
if update_decorator:
nn.update = update_decorator(attrs["node"]().update)
if emit_decorator:
nn._emit = emit_decorator(attrs["node"]()._emit)
status_color_map = {"running": "yellow", "waiting": "green", "error": "red"}
def node_style(node_attrs):
d = {
"size": 2000,
"color": status_color_map.get(node_attrs.get("status", "NA"), "k"),
}
return d
def run_vis(node, source_node=False, **kwargs):
"""Start the visualization of a pipeline
Parameters
----------
node : Stream instance
A node in the pipeline
source_node : bool
If True the input node is the source node and numbers the
graph edges accordingly, defaults to False
kwargs : Any
kwargs passed to LiveGraphPlot
Returns
-------
"""
g, gg = readable_graph(node, source_node=source_node)
fig, ax = plt.subplots()
gv = LiveGraphPlot(g, ax=ax, **kwargs)
def update_decorator(func):
node = hash(func.__self__)
node_name = gg[node]
# @wraps
def wrapps(*args, **kwargs):
g.nodes[node_name]["status"] = "running"
gv.update()
try:
ret = func(*args, **kwargs)
except Exception as e:
g.nodes[node_name]["status"] = "error"
gv.update()
raise e
else:
g.nodes[node_name]["status"] = "waiting"
gv.update()
return ret
return wrapps
def emit_decorator(func):
node = hash(func.__self__)
node_name = gg[node]
def wrapps(*args, **kwargs):
g.nodes[node_name]["status"] = "waiting"
gv.update()
try:
ret = func(*args, **kwargs)
except Exception as e:
g.nodes[node_name]["status"] = "error"
gv.update()
raise e
else:
return ret
return wrapps
node_g = nx.DiGraph()
create_graph_nodes(node, node_g)
decorate_nodes(node_g, update_decorator, emit_decorator)
return gv
|
[
"cjwright4242@gmail.com"
] |
cjwright4242@gmail.com
|
a51b2c3e4ff8d91dfe61a02a122b2ce309ab5088
|
9ea59574bd785066a9e4c4f52361b42626f1581a
|
/movieproject/movieproject/urls.py
|
c248138f79310c44d359e7a4638143b02a79e54b
|
[] |
no_license
|
Aiswaryakpda/movie
|
ba83fb760c496e3bd821df2a0f6f508e8cc3a7cc
|
ef1b780d91110c61bef827731d474a4a24ce94bf
|
refs/heads/master
| 2023-04-20T16:20:48.220993
| 2021-05-11T04:35:21
| 2021-05-11T04:35:21
| 366,255,206
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,057
|
py
|
"""movieproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.urls import path,include
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('',include("movieapp.urls"))
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL,document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
[
"aiswaryakpda@gmail.com"
] |
aiswaryakpda@gmail.com
|
646a519b6dc1de4f1717b86e076192d7f5b2795a
|
ad08ee023345fcc42fdac6fab527809b2d502fa5
|
/peek_plugin_diagram/_private/tuples/GroupDispsTuple.py
|
5d5ba422a06fe8a6234f76e9e8c127f578073dc3
|
[] |
no_license
|
Synerty/peek-plugin-diagram
|
fcaefc414334e4584976d0b0567712bb47a3082a
|
8efffa2bb3877b7fcd3736b80df53adc784ca69c
|
refs/heads/master
| 2021-07-03T17:42:17.129328
| 2020-10-27T04:55:52
| 2020-10-27T04:55:52
| 191,874,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 563
|
py
|
from typing import List, Any
from vortex.Tuple import addTupleType, TupleField, Tuple
from peek_plugin_diagram._private.PluginNames import diagramTuplePrefix
@addTupleType
class GroupDispsTuple(Tuple):
""" Group Disps Tuple
This tuple stores a list of DispGroups that are in the 'ID:dispgroup' grid key
in that coord set.
"""
__tupleType__ = diagramTuplePrefix + "GroupDispsTuple"
coordSetId: int = TupleField()
# A GridTuple, already encoded and ready for storage in the clients cache
encodedGridTuple: str = TupleField()
|
[
"jarrod.chesney@synerty.com"
] |
jarrod.chesney@synerty.com
|
78d9aa0431e117f3a130b270ab269f30ffc85cbe
|
f4823aed72eb36944f8aebf1a2f40f1a16f5a27a
|
/setup.py
|
1987097452d097324f3ed237170ea4b765cf6606
|
[
"BSD-3-Clause"
] |
permissive
|
dbotwinick/python-botwinick-gis
|
4191ed07408816cd1e1af086b8308751e776e44d
|
4e26bff851d676880c40bf360f401fd3814d3a94
|
refs/heads/main
| 2023-02-25T07:05:11.689669
| 2021-02-02T19:38:30
| 2021-02-02T19:38:30
| 304,963,109
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 972
|
py
|
# author: Drew Botwinick, Botwinick Innovations
# license: 3-clause BSD
import setuptools
with open("README.md", 'r') as f:
readme_txt = f.read()
setuptools.setup(
name="botwinick_gis",
version="0.0.4",
author="Drew Botwinick",
author_email="foss@drewbotwinick.com",
description="GIS support package",
long_description=readme_txt,
long_description_content_type="text/markdown",
url="https://github.com/dbotwinick/python-botwinick-gis",
packages=setuptools.find_packages(),
install_requires=['numpy', 'pyshp', 'pyproj', 'botwinick_math'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: GIS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: BSD License',
"Operating System :: OS Independent",
],
python_requires='>=2.7, >=3.6',
)
|
[
"drew@polymathcoder.com"
] |
drew@polymathcoder.com
|
3a27ef8a71a4b63d5aaec3a4c06030dae22e0a90
|
a79bb3cc1ddfd42d4c1d50efa1b3d7acf01a8186
|
/Pendulum-Damping/PendulumDamping.py
|
ea752a8a24fa28b91df107da580bfdaf9601b21c
|
[] |
no_license
|
bshreddy/PhysicsSims
|
dc085bc202e94d27c93364ace3c686df0c4b0113
|
e31c1574eb39b720545ecc59acb64f65f3ecb962
|
refs/heads/master
| 2023-07-25T08:48:38.877187
| 2020-05-28T17:28:17
| 2020-05-28T17:28:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,946
|
py
|
import math, time
import pyglet
from pyglet.window import FPSDisplay, key
class PendulumDamping:
def __init__(self, position=(0, 0), length=1, radius=20, theta = math.pi/8, angular_velocity=0, damping=0.1):
self.x, self.y = position
self.length = length
self.radius = radius
self.theta = theta
self.res = 200
self.angular_velocity = angular_velocity
self.damping = damping
def draw(self, offset=(0, 0)):
offset_x, offset_y = offset
x1, y1 = self.x + offset_x, self.y + offset_y
x2, y2 = self.x + math.sin(self.theta) * self.length * 4 + offset_x, self.y - math.cos(
self.theta) * self.length * 4 + offset_y
self.makeVertices(x2, y2)
pyglet.gl.glLineWidth(4)
pyglet.graphics.draw(2, pyglet.gl.GL_LINES,
('v2f', (x1, y1, x2, y2)),
)
circle = pyglet.graphics.vertex_list(self.res, ('v2f', self.verts))
circle.draw(pyglet.gl.GL_POLYGON)
def step(self, dt):
angular_accleration = (-980*.35 / self.length) * math.sin(self.theta) - self.damping * self.angular_velocity
self.angular_velocity += angular_accleration * dt
self.theta += self.angular_velocity * dt
def makeVertices(self, X, Y):
self.verts = []
for i in range(self.res):
angle = math.radians(i / self.res * 360.0)
x = self.radius * math.cos(angle) + X
y = self.radius * math.sin(angle) + Y
self.verts += [x, y]
class Simulation(pyglet.window.Window):
def __init__(self, width=1152, height=720, fullscreen=False):
super().__init__(width, height, vsync=False, fullscreen=fullscreen, caption="Simple Pendulum")
self.fps = FPSDisplay(self)
self.T = 0
self.pendulums = [PendulumDamping(length=100)]
@property
def center(self):
return self.width // 2, self.height // 2
def update(self, dt):
for pendulum in self.pendulums:
pendulum.step(dt)
self.T += dt
def on_draw(self):
self.clear()
for pendulum in self.pendulums:
pendulum.draw((self.width // 2, self.height))
label = pyglet.text.Label(f"T = {self.T:.3f}",
font_name='Halvetica Nenu',
font_size=16, x=self.width - 10, y=10,
anchor_x="right", anchor_y="bottom")
label.draw()
self.fps.draw()
def on_key_press(self, symbol, modifiers):
if symbol == key.Q and modifiers == key.MOD_COMMAND:
pyglet.app.exit()
def on_key_release(self, symbol, modifiers):
if symbol == key.SPACE:
pyglet.clock.schedule_interval(self.update, 1 / 500.0)
if __name__ == "__main__":
sim = Simulation()
# pyglet.clock.set_fps_limit(500)
pyglet.app.run()
|
[
"sh.bheemreddy@icloud.com"
] |
sh.bheemreddy@icloud.com
|
984977077d80e16cbee6059035c1ad8932d90f6b
|
5360ce3212a97984d725a10ae3f07001c668682d
|
/StructVBERT/lxrt/optimization.py
|
923854cf7d54ea65f4676b82ad5b1940aa028a48
|
[
"Apache-2.0"
] |
permissive
|
RunxinXu/AliceMind
|
ac4c6f35d271717ee3174a2c1ec1c25ea60033d4
|
4bd9f94d0f34d2a2846c5d892ca16121b18b6e10
|
refs/heads/main
| 2023-08-14T07:32:10.625425
| 2021-09-28T14:50:41
| 2021-09-28T14:50:41
| 411,323,713
| 0
| 0
|
Apache-2.0
| 2021-09-28T14:49:04
| 2021-09-28T14:49:03
| null |
UTF-8
|
Python
| false
| false
| 8,155
|
py
|
# coding=utf-8
# Copyright 2019 project LXRT
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
import logging
logger = logging.getLogger(__name__)
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
""" Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps.
Learning rate is 1. afterwards. """
if x < warmup:
return x/warmup
return 1.0
def warmup_linear(x, warmup=0.002):
""" Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.
After `t_total`-th training step, learning rate is zero. """
if x < warmup:
return x/warmup
return max((x-1.)/(warmup-1.), 0)
SCHEDULES = {
'warmup_cosine': warmup_cosine,
'warmup_constant': warmup_constant,
'warmup_linear': warmup_linear,
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
warned_for_t_total = False
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# LXRT: grad is clipped outside.
# Add grad clipping
# if group['max_grad_norm'] > 0:
# clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# bugfix ref: https://arxiv.org/pdf/2006.05987.pdf, changed by ming
# update = (next_m / (1 - beta1 ** (state['step'] + 1))) / ((next_v / (1 - beta2 ** (state['step'] + 1))).sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
progress = state['step']/group['t_total']
lr_scheduled = group['lr'] * schedule_fct(progress, group['warmup'])
# warning for exceeding t_total (only active with warmup_linear
if group['schedule'] == "warmup_linear" and progress > 1. and not warned_for_t_total:
logger.warning(
"Training beyond specified 't_total' steps with schedule '{}'. Learning rate set to {}. "
"Please set 't_total' of {} correctly.".format(group['schedule'], lr_scheduled, self.__class__.__name__))
warned_for_t_total = True
# end warning
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
|
[
"lfl259702@alibaba-inc.com"
] |
lfl259702@alibaba-inc.com
|
91134b05463c18e8d0906f98b8b542a239af5beb
|
61f30ca19d9553ffb55dd2deccf895405a199719
|
/src/update_workflows.py
|
50900ea70258032fae567a2ac3fcd22e3a388813
|
[
"MIT"
] |
permissive
|
deanishe/alfred-packal-search
|
6fc1d0c3f12a061275b5731c2603517e0d18762e
|
88427bcbf127e3d15674c71566f253cf312f96d6
|
refs/heads/master
| 2021-06-10T09:02:04.105014
| 2017-09-17T12:01:34
| 2017-09-17T12:01:34
| 17,991,728
| 43
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,495
|
py
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright © 2014 deanishe@deanishe.net
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2014-04-07
#
"""update_workflows.py [--force-update]
Usage:
update_workflows.py [--force-update]
"""
from __future__ import print_function, unicode_literals
import subprocess
import sys
import os
from datetime import datetime
from plistlib import readPlist, readPlistFromString
try:
from xml.etree import cElementTree as ET
except ImportError:
from xml.etree import ElementTree as ET
from workflow import web, Workflow
from common import (CACHE_MAXAGE, Version, STATUS_SPLITTER, STATUS_UNKNOWN,
STATUS_UPDATE_AVAILABLE, STATUS_UP_TO_DATE,
STATUS_NOT_INSTALLED)
log = None
MANIFEST_URL = 'https://raw.github.com/packal/repository/master/manifest.xml'
# WORKFLOW_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ALFRED_PREFS = os.path.expanduser(
'~/Library/Preferences/com.runningwithcrayons.Alfred-Preferences-3.plist')
class Constant(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return 'Constant({0:r})'.format(self.value)
def __str__(self):
return str(self.value)
NOT_INSTALLED = Constant('NOT INSTALLED')
def read_plist(path):
"""Convert plist to XML and read its contents."""
cmd = [b'plutil', b'-convert', b'xml1', b'-o', b'-', path]
xml = subprocess.check_output(cmd)
return readPlistFromString(xml)
def get_workflow_directory():
"""Return path to Alfred's workflow directory."""
prefs = read_plist(ALFRED_PREFS)
syncdir = prefs.get('syncfolder')
if not syncdir:
log.debug('Alfred sync folder not found')
return None
syncdir = os.path.expanduser(syncdir)
wf_dir = os.path.join(syncdir, 'Alfred.alfredpreferences/workflows')
log.debug('Workflow sync dir : %r', wf_dir)
if os.path.exists(wf_dir):
log.debug('Workflow directory retrieved from Alfred preferences')
return wf_dir
log.debug('Alfred.alfredpreferences/workflows not found')
return None
def packal_metadata(xmlpath):
"""Return ``dict`` of metadata in ``package.xml`` file created by Packal"""
tree = ET.parse(xmlpath)
root = tree.getroot()
data = {}
for elem in root:
data[elem.tag] = elem.text
data['version'] = Version(data['version'])
return data
def get_installed_workflows():
"""Return ``dict`` of installed workflows
``{bundleid : version}``
``version`` is ``None`` if workflow isn't from Packal.org
"""
workflows = {}
workflow_dir = get_workflow_directory()
log.debug('reading workflows installed in %r ...', workflow_dir)
for name in os.listdir(workflow_dir):
path = os.path.join(workflow_dir, name)
if not os.path.isdir(path):
continue
info_plist = os.path.join(path, 'info.plist')
packal_xml = os.path.join(path, 'packal', 'package.xml')
if not os.path.exists(info_plist):
continue
try:
bundleid = readPlist(info_plist)['bundleid']
if not bundleid:
log.warning('no bundleid in info.plist : %s', path)
continue
except Exception as err:
log.error('bad info.plist in workflow %r: %s', path, err)
continue
try:
metadata = {'version': None, 'bundle': bundleid}
if os.path.exists(packal_xml):
metadata.update(packal_metadata(packal_xml))
except Exception as err:
log.error('bad packal/package.xml in workflow %r: %s', path, err)
continue
workflows[metadata['bundle']] = metadata['version']
log.debug('%d workflows installed locally', len(workflows))
return workflows
def get_packal_workflows():
"""Return list of workflows available on Packal.org"""
workflows = []
r = web.get(MANIFEST_URL)
r.raise_for_status()
manifest = ET.fromstring(r.content)
# these elements contain multiple, |||-delimited items
list_elements = ('categories', 'tags', 'osx')
for workflow in manifest:
d = {}
for elem in workflow:
if elem.tag in list_elements:
if not elem.text:
d[elem.tag] = []
else:
d[elem.tag] = [s.strip() for s in elem.text.split('|||')]
# text elements
elif elem.text:
d[elem.tag] = elem.text
else:
d[elem.tag] = ''
# convert timestamp to datetime
d['updated'] = datetime.fromtimestamp(float(d['updated']))
d['version'] = Version(d['version'])
workflows.append(d)
log.debug('{} workflows available on Packal.org'.format(len(workflows)))
return workflows
def get_workflows():
"""Return list of workflows on on Packal.org with update status."""
local_workflows = get_installed_workflows()
packal_workflows = get_packal_workflows()
for packal_workflow in packal_workflows:
# set version number
bundle = packal_workflow.get('bundle')
local_version = local_workflows.get(bundle, NOT_INSTALLED)
packal_version = packal_workflow['version']
log.debug('workflow `{0}` packal : {1} local : {2}'.format(
packal_workflow['bundle'],
packal_workflow['version'],
local_version))
# log.debug('local version : {0}'.format(local_version))
if local_version is NOT_INSTALLED:
packal_workflow['status'] = STATUS_NOT_INSTALLED
elif not local_version:
packal_workflow['status'] = STATUS_SPLITTER
elif packal_version > local_version:
packal_workflow['status'] = STATUS_UPDATE_AVAILABLE
elif packal_version == local_version:
packal_workflow['status'] = STATUS_UP_TO_DATE
else:
packal_workflow['status'] = STATUS_UNKNOWN
return packal_workflows
def main(wf):
from docopt import docopt
args = docopt(__doc__, argv=wf.args)
if args.get('--force-update'):
max_age = 1
log.debug('Forcing update of Packal workflows')
else:
max_age = CACHE_MAXAGE
wf.cached_data('workflows', get_workflows, max_age=max_age)
if __name__ == '__main__':
wf = Workflow()
log = wf.logger
sys.exit(wf.run(main))
|
[
"deanishe@deanishe.net"
] |
deanishe@deanishe.net
|
10721ca073cda40b158e49125801f77e6089f7c7
|
ddddaa700e4642f46a2c1e1e0271a7c8ea62ba0f
|
/harness/tests/experiment/fixtures/pytorch_onevar_model.py
|
ee5b464720be7a6043571f803f38299cc38f51aa
|
[
"Apache-2.0"
] |
permissive
|
determined-ai/determined
|
9d563cb5ffd074c88ee5edc9bf22ab9c3cb78c7e
|
8239b1993f4f44390f4e88901ffaf3b12429b83c
|
refs/heads/main
| 2023-08-21T12:13:36.651298
| 2023-08-21T08:34:16
| 2023-08-21T08:34:16
| 253,846,879
| 2,531
| 330
|
Apache-2.0
| 2023-09-14T21:54:17
| 2020-04-07T16:12:29
|
Go
|
UTF-8
|
Python
| false
| false
| 24,135
|
py
|
# type: ignore
"""
A one-variable linear model with no bias. The dataset emits only pairs of (data, label) = (1, 1),
meaning that the one weight in the model should approach 1 as gradient descent continues.
We will use the mean squared error as the loss. Since each record is the same, the "mean" part of
mean squared error means we can analyze every batch as if were just one record.
Now, we can calculate the mean squared error to ensure that we are getting the gradient we are
expecting.
let:
R = learning rate (constant)
l = loss
w0 = the starting value of the one weight
w' = the updated value of the one weight
then calculate the loss:
(1) l = (label - (data * w0)) ** 2
take derivative of loss WRT w
(2) dl/dw = - 2 * data * (label - (data * w0))
gradient update:
(3) update = -R * dl/dw = 2 * R * data * (label - (data * w0))
Finally, we can calculate the updated weight (w') in terms of w0:
(4) w' = w0 + update = w0 + 2 * R * data * (label - (data * w0))
"""
import logging
from typing import Any, Dict, Iterable, List, Optional, Tuple, cast
import numpy as np
import torch
from determined import experimental, pytorch
from determined.common import yaml
from determined.pytorch import samplers
from tests.experiment.fixtures import pytorch_counter_callback
try:
import apex
except ImportError: # pragma: no cover
pass
class OnesDataset(torch.utils.data.Dataset):
def __len__(self) -> int:
return 64
def __getitem__(self, index: int) -> Tuple:
return torch.Tensor([float(1)]), torch.Tensor([float(1)])
class TriangleLabelSum(pytorch.MetricReducer):
"""Return a sum of (label_sum * batch_index) for every batch (labels are always 1 here)."""
@staticmethod
def expect(batch_size, idx_start, idx_end):
"""What to expect during testing."""
return sum(batch_size * idx for idx in range(idx_start, idx_end))
def __init__(self) -> None:
self.reset()
def reset(self) -> None:
self.sum = 0
# We don't actually expose a batch_idx for evaluation, so we track the number of batches
# since the last reset(), which is only accurate during evaluation workloads or the very
# first training workload.
self.count = 0
def update(self, label_sum: torch.Tensor, batch_idx: Optional[int]) -> None:
self.sum += label_sum * (batch_idx if batch_idx is not None else self.count)
self.count += 1
def per_slot_reduce(self) -> Any:
return self.sum
def cross_slot_reduce(self, per_slot_metrics) -> Any:
return sum(per_slot_metrics)
def triangle_label_sum(updates: List) -> Any:
out = 0
for update_idx, (label_sum, batch_idx) in enumerate(updates):
if batch_idx is not None:
out += batch_idx * label_sum
else:
out += update_idx * label_sum
return out
class StepableLRScheduler(torch.optim.lr_scheduler._LRScheduler):
def get_lr(self) -> List[float]:
return [self._step_count for _ in self.base_lrs]
def get_onevar_model(n=1) -> torch.nn.Module:
model = torch.nn.Linear(n, n, False)
# Manually initialize the weight(s) to 0.
model.weight.data.fill_(0)
return model
class MetricsCallback(pytorch.PyTorchCallback):
def __init__(self):
self.validation_metrics = []
self.training_metrics = []
self.batch_metrics = []
def on_validation_end(self, metrics: Dict[str, Any]) -> None:
self.validation_metrics.append(metrics)
def on_training_workload_end(
self, avg_metrics: Dict[str, Any], batch_metrics: Dict[str, Any]
) -> None:
self.training_metrics.append(avg_metrics)
self.batch_metrics += batch_metrics
class CheckpointCallback(pytorch.PyTorchCallback):
def __init__(self):
self.uuids = []
def on_checkpoint_upload_end(self, uuid: str) -> None:
self.uuids.append(uuid)
class BaseOneVarTrial(pytorch.PyTorchTrial):
def __init__(self, context: pytorch.PyTorchTrialContext) -> None:
self.context = context
# The "features" hparam is only for TestPyTorchTrial.test_restore_invalid_checkpoint
self.model = context.wrap_model(
get_onevar_model(n=self.context.get_hparams().get("features", 1))
)
self.lr = 0.001
opt = torch.optim.SGD(self.model.parameters(), self.lr)
self.opt = context.wrap_optimizer(opt)
self.loss_fn = torch.nn.MSELoss()
self.cls_reducer = context.wrap_reducer(TriangleLabelSum(), name="cls_reducer")
self.fn_reducer = context.wrap_reducer(triangle_label_sum, name="fn_reducer")
self.hparams = self.context.get_hparams()
self.metrics_callback = MetricsCallback()
self.checkpoint_callback = CheckpointCallback()
if self.hparams.get("disable_dataset_reproducibility_checks"):
self.context.experimental.disable_dataset_reproducibility_checks()
def train_batch(
self, batch: pytorch.TorchData, epoch_idx: int, batch_idx: int
) -> Dict[str, torch.Tensor]:
data, label = batch
self.cls_reducer.update(sum(label), batch_idx)
self.fn_reducer.update((sum(label), batch_idx))
# Measure the weight right now.
w_before = self.model.weight.data.item()
# Calculate expected values for loss (eq 1) and weight (eq 4).
loss_exp = (label[0] - data[0] * w_before) ** 2
w_exp = w_before + 2 * self.lr * data[0] * (label[0] - (data[0] * w_before))
output = self.model(data)
loss = self.loss_fn(output, label)
self.context.backward(loss)
self.context.step_optimizer(self.opt)
# Measure the weight after the update.
w_after = self.model.weight.data.item()
# Return values that we can compare as part of the tests.
return {
"loss": loss,
"loss_exp": loss_exp,
"w_before": w_before,
"w_after": w_after,
"w_exp": w_exp,
"output": output,
}
@staticmethod
def check_batch_metrics(
metrics: Dict[str, Any],
batch_idx: int,
metric_keyname_pairs: Iterable[Tuple[str, str]],
atol=1e-6,
) -> None:
"""Check that given metrics are equal or close enough to each other."""
for k_a, k_b in metric_keyname_pairs:
m_a, m_b = metrics[k_a], metrics[k_b]
try:
assert torch.isclose(
m_a, m_b, atol=atol
), f"Metrics {k_a}={m_a} and {k_b}={m_b} do not match at batch {batch_idx}"
except TypeError:
assert np.allclose(
m_a, m_b, atol=atol
), f"Metrics {k_a}={m_a} and {k_b}={m_b} do not match at batch {batch_idx}"
def build_training_data_loader(self) -> torch.utils.data.DataLoader:
if self.hparams["dataloader_type"] == "determined":
return pytorch.DataLoader(
OnesDataset(), batch_size=self.context.get_per_slot_batch_size()
)
elif self.hparams["dataloader_type"] == "torch":
dataset = OnesDataset()
seed = self.context.get_trial_seed()
num_workers = self.context.distributed.get_size()
rank = self.context.distributed.get_rank()
batch_size = self.context.get_per_slot_batch_size()
skip_batches = self.context.get_initial_batch()
sampler = torch.utils.data.SequentialSampler(dataset)
sampler = samplers.ReproducibleShuffleSampler(sampler, seed)
sampler = samplers.RepeatSampler(sampler)
sampler = samplers.DistributedSampler(sampler, num_workers=num_workers, rank=rank)
batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size, drop_last=False)
batch_sampler = samplers.SkipBatchSampler(batch_sampler, skip_batches)
return torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler)
else:
raise ValueError(f"unknown dataloader_type: {self.hparams['dataloader_type']}")
def build_validation_data_loader(self) -> torch.utils.data.DataLoader:
if self.hparams["dataloader_type"] == "determined":
return pytorch.DataLoader(
OnesDataset(), batch_size=self.context.get_per_slot_batch_size()
)
elif self.hparams["dataloader_type"] == "torch":
dataset = OnesDataset()
num_workers = self.context.distributed.get_size()
rank = self.context.distributed.get_rank()
batch_size = self.context.get_per_slot_batch_size()
sampler = torch.utils.data.SequentialSampler(dataset)
sampler = samplers.DistributedSampler(sampler, num_workers=num_workers, rank=rank)
batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size, drop_last=False)
return torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler)
else:
raise ValueError(f"unknown dataloader_type: {self.hparams['dataloader_type']}")
def build_callbacks(self) -> Dict[str, pytorch.PyTorchCallback]:
return {"metrics": self.metrics_callback, "checkpoint": self.checkpoint_callback}
class OneVarTrial(BaseOneVarTrial):
_searcher_metric = "val_loss"
def evaluate_batch(self, batch: pytorch.TorchData, batch_idx: int) -> Dict[str, Any]:
data, label = batch
self.cls_reducer.update(sum(label), None)
self.fn_reducer.update((sum(label), None))
loss = self.loss_fn(self.model(data), label)
return {"val_loss": loss}
class OneVarTrialWithMultiValidation(OneVarTrial):
def evaluate_batch(self, batch: pytorch.TorchData, batch_idx: int) -> Dict[str, Any]:
data, labels = batch
output = self.model(data)
val_loss = self.loss_fn(output, labels)
mse = torch.mean(torch.square(output - labels))
return {"val_loss": val_loss, "mse": mse}
class OneVarTrialPerMetricReducers(OneVarTrialWithMultiValidation):
def evaluation_reducer(self) -> Dict[str, pytorch.Reducer]:
return {"val_loss": pytorch.Reducer.AVG, "mse": pytorch.Reducer.AVG}
class OneVarTrialWithTrainingMetrics(OneVarTrial):
def train_batch(
self, batch: pytorch.TorchData, epoch_idx: int, batch_idx: int
) -> Dict[str, torch.Tensor]:
data, labels = batch
output = self.model(data)
labels = cast(torch.Tensor, labels)
loss = self.loss_fn(output, labels)
mse = torch.mean(torch.square(output - labels))
self.context.backward(loss)
self.context.step_optimizer(self.opt)
return {"loss": loss, "mse": mse}
class AMPTestDataset(OnesDataset):
STAGE_DATUM = {
"one": 1.0,
"zero": 0.0,
"small": 2e-14,
"large": 2e4,
}
def __init__(self, stages: List[str], aggregation_freq: int = 1) -> None:
self.stages = stages
self._agg_freq = aggregation_freq
def __len__(self) -> int:
return len(self.stages) * self._agg_freq
def __getitem__(self, index: int) -> Tuple:
x = self.STAGE_DATUM[self.stages[index // self._agg_freq]]
return torch.Tensor([float(x)]), torch.Tensor([float(x)])
class OneVarAMPBaseTrial(OneVarTrial):
_init_scale = None
_growth_interval = None
_stages = (
5 * ["one"]
+ 1 * ["large"]
+ 4 * ["one"]
+ 1 * ["small"]
+ 4 * ["one"]
+ 1 * ["zero"]
+ 4 * ["one"]
+ []
)
def __init__(self, context: pytorch.PyTorchTrialContext):
super().__init__(context)
self._agg_freq = self.context._aggregation_frequency
def build_training_data_loader(self) -> torch.utils.data.DataLoader:
return pytorch.DataLoader(
AMPTestDataset(self._stages, self._agg_freq),
batch_size=self.context.get_per_slot_batch_size(),
)
class OneVarApexAMPTrial(OneVarAMPBaseTrial):
_growth_interval = 2000
def __init__(self, context: pytorch.PyTorchTrialContext) -> None:
super().__init__(context)
self.model, self.optimizer = self.context.configure_apex_amp(
models=self.model,
optimizers=self.opt,
opt_level="O2",
)
def train_batch(
self, batch: pytorch.TorchData, epoch_idx: int, batch_idx: int
) -> Dict[str, torch.Tensor]:
scale_before = apex.amp.state_dict()["loss_scaler0"]["loss_scale"]
metrics = super().train_batch(batch, epoch_idx, batch_idx)
metrics["scale_before"] = scale_before
metrics["scale"] = apex.amp.state_dict()["loss_scaler0"]["loss_scale"]
metrics["stage"] = self._stages[batch_idx // self._agg_freq]
return metrics
class OneVarAutoAMPTrial(OneVarAMPBaseTrial):
_init_scale = 65536
_growth_interval = 4
def __init__(self, context: pytorch.PyTorchTrialContext) -> None:
context.experimental.use_amp()
# HACK: overwrite the scaler with a manually configured one, which
# is not something we don't actually allow with the use_amp() API.
context._scaler = torch.cuda.amp.GradScaler(
init_scale=self._init_scale,
growth_interval=self._growth_interval,
)
super().__init__(context)
self.scaler = self.context._scaler
def train_batch(
self, batch: pytorch.TorchData, epoch_idx: int, batch_idx: int
) -> Dict[str, torch.Tensor]:
scale_before = self.scaler.get_scale()
metrics = super().train_batch(batch, epoch_idx, batch_idx)
metrics["scale_before"] = scale_before
# self.scaler.update() gets called after this method returns
metrics["stage"] = self._stages[batch_idx // self._agg_freq]
return metrics
class OneVarManualAMPTrial(OneVarAMPBaseTrial):
_init_scale = 65536
_growth_interval = 4
def __init__(self, context: pytorch.PyTorchTrialContext) -> None:
self.scaler = context.wrap_scaler(
torch.cuda.amp.GradScaler(
init_scale=self._init_scale, growth_interval=self._growth_interval
)
)
super().__init__(context)
def train_batch(
self, batch: pytorch.TorchData, epoch_idx: int, batch_idx: int
) -> Dict[str, torch.Tensor]:
data, label = batch
scale_before = self.scaler.get_scale()
# Measure the weight right now.
w_before = self.model.weight.data.item()
# Calculate expected values for loss (eq 1) and weight (eq 4).
loss_exp = (label[0] - data[0] * w_before) ** 2
w_exp = w_before + 2 * self.lr * data[0] * (label[0] - (data[0] * w_before))
with torch.cuda.amp.autocast():
output = self.model(data)
loss = self.loss_fn(output, label)
scaled_loss = self.scaler.scale(loss)
self.context.backward(scaled_loss)
self.context.step_optimizer(self.opt, scaler=self.scaler)
if (batch_idx + 1) % self._agg_freq == 0:
self.scaler.update()
# Measure the weight after the update.
w_after = self.model.weight.data.item()
# Return values that we can compare as part of the tests.
return {
"stage": self._stages[batch_idx // self._agg_freq],
"scale_before": scale_before,
"scale": self.scaler.get_scale(),
"loss": loss,
"loss_exp": loss_exp,
"w_before": w_before,
"w_after": w_after,
"w_exp": w_exp,
"output": output,
}
def evaluate_batch(self, batch: pytorch.TorchData, batch_idx: int) -> Dict[str, Any]:
data, label = batch
with torch.cuda.amp.autocast():
output = self.model(data)
loss = self.loss_fn(output, label)
return {"val_loss": loss}
class OneVarApexAMPWithNoopScalerTrial(OneVarApexAMPTrial):
def __init__(self, context: pytorch.PyTorchTrialContext) -> None:
self.scaler = context.wrap_scaler(
torch.cuda.amp.GradScaler(
init_scale=self._init_scale,
growth_interval=self._growth_interval,
enabled=False,
)
)
super().__init__(context)
class OneVarManualAMPWithNoopApexTrial(OneVarManualAMPTrial):
def __init__(self, context: pytorch.PyTorchTrialContext) -> None:
super().__init__(context)
self.model, self.optimizer = self.context.configure_apex_amp(
models=self.model,
optimizers=self.opt,
opt_level="O2",
enabled=False,
)
class OneVarTrialCustomEval(BaseOneVarTrial):
_searcher_metric = "val_loss"
def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader) -> Dict[str, Any]:
loss_sum = 0.0
for data, labels in iter(data_loader):
if torch.cuda.is_available():
data, labels = data.cuda(), labels.cuda()
output = self.model(data)
loss_sum += self.loss_fn(output, labels)
loss = loss_sum / len(data_loader)
return {"val_loss": loss}
class OneVarTrialAccessContext(BaseOneVarTrial):
_searcher_metric = "val_loss"
def __init__(self, context: pytorch.PyTorchTrialContext) -> None:
super().__init__(context)
self.model_a = self.context.wrap_model(get_onevar_model())
self.model_b = self.context.wrap_model(get_onevar_model())
self.opt_a = self.context.wrap_optimizer(
torch.optim.SGD(self.model_a.parameters(), self.context.get_hparam("learning_rate"))
)
self.opt_b = self.context.wrap_optimizer(
torch.optim.SGD(self.model_b.parameters(), self.context.get_hparam("learning_rate"))
)
self.lrs_a = self.context.wrap_lr_scheduler(
StepableLRScheduler(self.opt_a),
step_mode=pytorch.LRScheduler.StepMode(
self.context.get_hparam("lr_scheduler_step_mode")
),
)
self.lrs_b = self.context.wrap_lr_scheduler(
StepableLRScheduler(self.opt_b),
step_mode=pytorch.LRScheduler.StepMode(
self.context.get_hparam("lr_scheduler_step_mode")
),
)
def train_batch(
self, batch: pytorch.TorchData, epoch_idx: int, batch_idx: int
) -> Dict[str, torch.Tensor]:
assert self.context.models
assert self.context.optimizers
assert self.context.lr_schedulers
data, labels = batch
output = self.model_a(data)
loss = torch.nn.functional.binary_cross_entropy(output, labels.contiguous().view(-1, 1))
self.context.backward(loss)
self.context.step_optimizer(self.opt_a)
return {"loss": loss}
def evaluate_batch(self, batch: pytorch.TorchData, batch_idx: int) -> Dict[str, Any]:
assert self.context.models
assert self.context.optimizers
assert self.context.lr_schedulers
data, labels = batch
output = self.model_a(data)
loss = self.loss_fn(output, labels)
return {"val_loss": loss}
class OneVarTrialGradClipping(OneVarTrial):
def train_batch(
self, batch: pytorch.TorchData, epoch_idx: int, batch_idx: int
) -> Dict[str, torch.Tensor]:
data, labels = batch
output = self.model(data)
loss = self.loss_fn(output, labels)
self.context.backward(loss)
if "gradient_clipping_l2_norm" in self.context.get_hparams():
self.context.step_optimizer(
self.opt,
clip_grads=lambda params: torch.nn.utils.clip_grad_norm_(
params, self.context.get_hparam("gradient_clipping_l2_norm")
),
)
elif "gradient_clipping_value" in self.context.get_hparams():
self.context.step_optimizer(
self.opt,
clip_grads=lambda params: torch.nn.utils.clip_grad_value_(
params, self.context.get_hparam("gradient_clipping_value")
),
)
else:
self.context.step_optimizer(self.opt)
return {"loss": loss}
class OneVarTrialWithNonScalarValidation(BaseOneVarTrial):
_searcher_metric = "mse"
def __init__(self, context: pytorch.PyTorchTrialContext) -> None:
super().__init__(context)
self.model = self.context.wrap_model(get_onevar_model())
self.opt = self.context.wrap_optimizer(
torch.optim.SGD(self.model.parameters(), self.context.get_hparam("learning_rate"))
)
def train_batch(
self, batch: pytorch.TorchData, epoch_idx: int, batch_idx: int
) -> Dict[str, torch.Tensor]:
data, labels = batch
output = self.model(data)
loss = self.loss_fn(output, labels)
self.context.backward(loss)
self.context.step_optimizer(self.opt)
return {"loss": loss}
def evaluate_full_dataset(self, data_loader: torch.utils.data.DataLoader) -> Dict[str, Any]:
predictions = []
mse_sum = 0.0
for data, labels in iter(data_loader):
if torch.cuda.is_available():
data, labels = data.cuda(), labels.cuda()
output = self.model(data)
predictions.append(output)
mse_sum += torch.mean(torch.square(output - labels))
mse = mse_sum / len(data_loader)
return {"predictions": predictions, "mse": mse}
class OneVarTrialWithLRScheduler(OneVarTrial):
def __init__(self, context: pytorch.PyTorchTrialContext) -> None:
super().__init__(context)
self.model = self.context.wrap_model(get_onevar_model())
self.opt = self.context.wrap_optimizer(
torch.optim.SGD(self.model.parameters(), self.context.get_hparam("learning_rate"))
)
self.lr_scheduler = self.context.wrap_lr_scheduler(
StepableLRScheduler(self.opt),
step_mode=pytorch.LRScheduler.StepMode(
self.context.get_hparam("lr_scheduler_step_mode")
),
)
def train_batch(
self, batch: pytorch.TorchData, epoch_idx: int, batch_idx: int
) -> Dict[str, torch.Tensor]:
metrics = super().train_batch(batch, epoch_idx, batch_idx)
lr = self.lr_scheduler.get_last_lr()[0]
metrics["lr"] = lr
if (
self.context.get_hparam("lr_scheduler_step_mode")
== pytorch.LRScheduler.StepMode.MANUAL_STEP
):
self.lr_scheduler.step()
return metrics
class EphemeralLegacyCallbackCounter(pytorch.PyTorchCallback):
"""
Callback with legacy signature for on_training_epoch_start
that takes no arguments. It is ephemeral: it does not implement
state_dict and load_state_dict.
"""
def __init__(self) -> None:
self.legacy_on_training_epochs_start_calls = 0
def on_training_epoch_start(self) -> None: # noqa # This is to test for a deprecation warning.
logging.debug(f"calling {__name__} without arguments")
self.legacy_on_training_epochs_start_calls += 1
class OneVarTrialCallbacks(OneVarTrial):
def __init__(self, context: pytorch.PyTorchTrialContext) -> None:
super().__init__(context)
self.counter = pytorch_counter_callback.Counter()
self.legacy_counter = EphemeralLegacyCallbackCounter()
def build_callbacks(self) -> Dict[str, pytorch.PyTorchCallback]:
return {"counter": self.counter, "legacyCounter": self.legacy_counter}
if __name__ == "__main__":
conf = yaml.safe_load(
"""
description: test-native-api-local-test-mode
hyperparameters:
global_batch_size: 32
dataloader_type: determined
scheduling_unit: 1
searcher:
name: single
metric: val_loss
max_length:
batches: 1
smaller_is_better: true
max_restarts: 0
"""
)
experimental.create(OneVarTrial, conf, context_dir=".", local=True, test=True)
|
[
"noreply@github.com"
] |
determined-ai.noreply@github.com
|
62fce9ac99608d53e65fe6b48b63b38cc4e332ca
|
4d49d4d59c9517fe99884cd69ad88644265c6755
|
/week1/Group2/boj2798_brong.py
|
a451bc3f9f1e209e3a85cae3ecef797c367d88b8
|
[] |
no_license
|
all1m-algorithm-study/2021-1-Algorithm-Study
|
3f34655dc0a3d8765143f4230adaa96055d13626
|
73c7cac1824827cb6ed352d49c0ead7003532a35
|
refs/heads/main
| 2023-06-03T18:45:28.852381
| 2021-06-11T06:28:44
| 2021-06-11T06:28:44
| 348,433,854
| 8
| 16
| null | 2021-06-11T06:28:45
| 2021-03-16T17:23:37
|
Python
|
UTF-8
|
Python
| false
| false
| 307
|
py
|
N,M = map(int,input().split())
arr = list(map(int,input().split()))
ans = []
for i in range (N):
for j in range (i+1,N):
for k in range (j+1,N):
ans.append(arr[i]+arr[j]+arr[k])
a = 0
for i in range(len(ans)):
if ans[i] <= M :
if ans[i]>a:
a= ans[i]
print(a)
|
[
"jeongdahyun@jeongdahyeon-ui-MacBookPro.local"
] |
jeongdahyun@jeongdahyeon-ui-MacBookPro.local
|
fc4c668b7469e44b583cac6cfce0ea57dc23329d
|
ed1d17be06bdd652e29a377669bc5c6a6eaa699a
|
/CA117/plural_012.py
|
b64bb240008bc1527af411c3c7864b32f329a0d3
|
[] |
no_license
|
DanielSammon576/DCU-College-Work
|
ace8d621ada68db903ea07d55d1da4ca68da5ef7
|
6a943176e7a596ecd082b6ed7c1d6e8d059a1236
|
refs/heads/master
| 2023-01-19T00:30:59.013111
| 2020-11-16T10:24:39
| 2020-11-16T10:24:39
| 214,476,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 703
|
py
|
#!/usr/bin/env python
import sys
def pluralise(s):
a = ["ch", "sh", "x", "s", "z"]
b = ["a", "e", "i", "o", "u"]
i = 0
while i < len(s):
if s[len(s) - 2:] in a:
s = s + "es"
elif s[len(s) - 1:] in a:
s = s + "es"
elif s[len(s) - 1:] == "y" and s[len(s) - 2:len(s) - 1] not in b:
s = s[:len(s) - 1] + "ies"
elif s[len(s) - 1:] == "f":
s = s[:len(s) - 1] + "ves"
elif s[len(s) - 1:] == "o":
s = s + "es"
elif s[len(s) - 2:] == "fe":
s = s[:len(s) - 2] + "ves"
else:
s = s + "s"
return s
def main():
lines = sys.stdin.readlines()
i = 0
while i < len(lines):
line = lines[i].strip()
print(pluralise(line))
i = i + 1
if __name__ == '__main__':
main()
|
[
"daniel.sammon4@mail.dcu.ie"
] |
daniel.sammon4@mail.dcu.ie
|
0ee3c126ecddab29197a9ee4b8ee61de880ddc76
|
660e35c822423685aea19d038daa8356722dc744
|
/analytic_sale/setup.py
|
39356dab2189f7166057353a8b8a9f25d6589bba
|
[] |
no_license
|
saifkazi/tryton_modules
|
a05cb4a90ae2c46ba39d60d2005ffc18ce5e44bb
|
94bd3a4e3fd86556725cdff33b314274dcb20afd
|
refs/heads/main
| 2023-05-05T12:20:02.059236
| 2021-05-19T10:46:37
| 2021-05-19T10:46:37
| 368,768,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,159
|
py
|
#!/usr/bin/env python3
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
import io
import os
import re
from configparser import ConfigParser
from setuptools import setup, find_packages
def read(fname):
return io.open(
os.path.join(os.path.dirname(__file__), fname),
'r', encoding='utf-8').read()
def get_require_version(name):
if minor_version % 2:
require = '%s >= %s.%s.dev0, < %s.%s'
else:
require = '%s >= %s.%s, < %s.%s'
require %= (name, major_version, minor_version,
major_version, minor_version + 1)
return require
config = ConfigParser()
config.read_file(open(os.path.join(os.path.dirname(__file__), 'tryton.cfg')))
info = dict(config.items('tryton'))
for key in ('depends', 'extras_depend', 'xml'):
if key in info:
info[key] = info[key].strip().splitlines()
version = info.get('version', '0.0.1')
major_version, minor_version, _ = version.split('.', 2)
major_version = int(major_version)
minor_version = int(minor_version)
name = 'trytond_analytic_sale'
download_url = 'http://downloads.tryton.org/%s.%s/' % (
major_version, minor_version)
if minor_version % 2:
version = '%s.%s.dev0' % (major_version, minor_version)
download_url = (
'hg+http://hg.tryton.org/modules/%s#egg=%s-%s' % (
name[8:], name, version))
local_version = []
if os.environ.get('CI_JOB_ID'):
local_version.append(os.environ['CI_JOB_ID'])
else:
for build in ['CI_BUILD_NUMBER', 'CI_JOB_NUMBER']:
if os.environ.get(build):
local_version.append(os.environ[build])
else:
local_version = []
break
if local_version:
version += '+' + '.'.join(local_version)
requires = []
for dep in info.get('depends', []):
if not re.match(r'(ir|res)(\W|$)', dep):
requires.append(get_require_version('trytond_%s' % dep))
requires.append(get_require_version('trytond'))
tests_require = [get_require_version('proteus')]
dependency_links = []
if minor_version % 2:
dependency_links.append(
'https://trydevpi.tryton.org/?local_version='
+ '.'.join(local_version))
setup(name=name,
version=version,
description='Tryton module to add analytic accounting on sale',
long_description=read('README.rst'),
author='Tryton',
author_email='bugs@tryton.org',
url='http://www.tryton.org/',
download_url=download_url,
project_urls={
"Bug Tracker": 'https://bugs.tryton.org/',
"Documentation": 'https://docs.tryton.org/',
"Forum": 'https://www.tryton.org/forum',
"Source Code": 'https://hg.tryton.org/modules/analytic_sale',
},
keywords='tryton analytic account sale',
package_dir={'trytond.modules.analytic_sale': '.'},
packages=(
['trytond.modules.analytic_sale']
+ ['trytond.modules.analytic_sale.%s' % p for p in find_packages()]
),
package_data={
'trytond.modules.analytic_sale': (info.get('xml', [])
+ ['tryton.cfg', 'view/*.xml', 'locale/*.po', 'tests/*.rst']),
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Plugins',
'Framework :: Tryton',
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
'Intended Audience :: Legal Industry',
'License :: OSI Approved :: '
'GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: Bulgarian',
'Natural Language :: Catalan',
'Natural Language :: Chinese (Simplified)',
'Natural Language :: Czech',
'Natural Language :: Dutch',
'Natural Language :: English',
'Natural Language :: Finnish',
'Natural Language :: French',
'Natural Language :: German',
'Natural Language :: Hungarian',
'Natural Language :: Indonesian',
'Natural Language :: Italian',
'Natural Language :: Persian',
'Natural Language :: Polish',
'Natural Language :: Portuguese (Brazilian)',
'Natural Language :: Russian',
'Natural Language :: Slovenian',
'Natural Language :: Spanish',
'Natural Language :: Turkish',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Office/Business',
'Topic :: Office/Business :: Financial :: Accounting',
],
license='GPL-3',
python_requires='>=3.6',
install_requires=requires,
dependency_links=dependency_links,
zip_safe=False,
entry_points="""
[trytond.modules]
analytic_sale = trytond.modules.analytic_sale
""",
test_suite='tests',
test_loader='trytond.test_loader:Loader',
tests_require=tests_require,
)
|
[
"saif.kazi76@gmail.com"
] |
saif.kazi76@gmail.com
|
904275f8e6333b8e92e842c880ec03b335a7b698
|
7f4c82f7eb8d2805e378586f14e214cdaacfdb4a
|
/books/model/NotesAndTerms.py
|
208ecff371824b226ffa14dd76e82e5e1e100bc2
|
[
"MIT"
] |
permissive
|
deepubansal/books-python-wrappers
|
5a922267ec8382b3542638d894c96f4891b57bf5
|
51210c8d557a32564f976a56214d3c0807f46a90
|
refs/heads/master
| 2022-12-05T11:25:01.694021
| 2020-08-29T07:35:23
| 2020-08-29T07:35:23
| 288,738,813
| 0
| 0
|
MIT
| 2020-08-29T07:35:24
| 2020-08-19T13:26:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,148
|
py
|
#$Id$
class NotesAndTerms:
"""This class is used to create object for notes and terms."""
def __init__(self):
"""Initialize parameters for notes and terms."""
self.notes = ''
self.terms = ''
def set_notes(self, notes):
"""Set notes.
Args:
notes(str): Notes.
"""
self.notes = notes
def get_notes(self):
"""Get notes.
Returns:
str: Notes.
"""
return self.notes
def set_terms(self, terms):
"""Set terms.
Args:
terms(str): Terms.
"""
self.terms = terms
def get_terms(self):
"""Get terms.
Returns:
str: Terms.
"""
return self.terms
def to_json(self):
"""This method is used to convert notes and terms object to json form.
Returns:
dict: Dictionary containing json object for notes and terms.
"""
data = {}
if self.notes != '':
data['notes'] = self.notes
if self.terms != '':
data['terms'] = self.terms
return data
|
[
"sahaya.ramesh@zohocorp.com"
] |
sahaya.ramesh@zohocorp.com
|
0b39b398e8d03bd39a28a6aeba56a856cb111b56
|
7cdc9c4594a5e1cd6bbf1385c5721f4c30b02975
|
/parsifal/reviews/templatetags/get_user_article_evaluation.py
|
7a5f68ef649a0fbf7d39e5c0a54501856990f9ba
|
[
"MIT"
] |
permissive
|
thiagok2/parsifal-mec
|
18602b53f042a91d4616214b849607c6b691b018
|
71d06e1094b512476e97204d63d01ff4a251dc9e
|
refs/heads/master
| 2021-08-18T08:21:49.044031
| 2021-08-13T12:42:58
| 2021-08-13T12:42:58
| 191,060,711
| 2
| 3
|
MIT
| 2019-12-02T20:14:47
| 2019-06-09T22:15:47
|
Python
|
UTF-8
|
Python
| false
| false
| 341
|
py
|
from django import template
from parsifal.reviews.models import Article, ArticleEvaluation
register = template.Library()
@register.assignment_tag
def get_user_article_evaluation(article, user_id):
status = Article.objects \
.get(id=article.id) \
.get_user_evaluation(user_id=user_id)
return status
|
[
"tyagogoulartn@gmail.com"
] |
tyagogoulartn@gmail.com
|
fb7f43e871da42fedc416737fd42d21b0a39a0ff
|
1e4306db679c2f8e895af30cb70df68af5f6bd14
|
/JogoVelha.py
|
f2a049b5f6b3c0bbe75de863cce4c8efcad16654
|
[] |
no_license
|
Lucas-CSa/minimax-ai
|
0970b634c77acf8cd32eb4d17716d0b806b772a7
|
31a83ffd403a29a192b00c913c680e6cbeef7fd2
|
refs/heads/master
| 2023-05-04T15:00:18.363160
| 2021-05-12T18:37:59
| 2021-05-12T18:37:59
| 366,817,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,787
|
py
|
import numpy as np
import datetime
from LuskinhaPlayer import BigBrainMoves
from RandomPlayerJV import RandomPlayerJV
from termcolor import colored
class JogoVelha:
def __init__(self, player1, player2):
self.board = np.zeros( (9) )
self.players = [player1, player2]
def printSymbol(number):
if number==1:
return colored('●', 'red')
elif number==2:
return colored('△', 'blue')
else:
return ' '
def printBoard(self):
print('\n-----------')
for i in range(0,3):
print(JogoVelha.printSymbol(self.board[i])+" | ", end='')
print('\n-----------')
for i in range(3,6):
print(JogoVelha.printSymbol(self.board[i])+" | ", end='')
print('\n-----------')
for i in range(6,9):
print(JogoVelha.printSymbol(self.board[i])+" | ", end='')
print('\n-----------')
def movement(self, player, space):
#
# Only accepts player equal 1 or 2
# and position between 0 and 8 where:
#
# | 0 | 1 | 2 |
# | 3 | 4 | 5 |
# | 6 | 7 | 8 |
#
try:
if(player not in (1,2)):
raise Exception('Only players 1 or 2')
if (self.board[space] == 0):
self.board[space] = player
else:
raise Exception('Player '+str(player)+', you only can play in an empty space')
except IndexError:
raise Exception('Player '+str(player)+', you only can choose a number between 0 and 8')
def hasWinner(self):
if((self.board[0] == self.board[1] == self.board[2]) & (self.board[0] != 0)):
return True
elif((self.board[3] == self.board[4] == self.board[5]) & (self.board[3] != 0)):
return True
elif((self.board[6] == self.board[7] == self.board[8]) & (self.board[6] != 0)):
return True
elif((self.board[0] == self.board[3] == self.board[6]) & (self.board[0] != 0)):
return True
elif((self.board[1] == self.board[4] == self.board[7]) & (self.board[1] != 0)):
return True
elif((self.board[2] == self.board[5] == self.board[8]) & (self.board[2] != 0)):
return True
elif((self.board[0] == self.board[4] == self.board[8]) & (self.board[0] != 0)):
return True
elif((self.board[2] == self.board[4] == self.board[6]) & (self.board[2] != 0)):
return True
else:
return False
def isDraw(self):
for i in range(0,9):
if self.board[i] == 0:
return False
if self.hasWinner():
return False
else:
return True
def game(self):
k=1
while (not self.hasWinner()) and (not self.isDraw()):
k = (int)(not k)
inicio = datetime.datetime.now()
self.movement(k+1, self.players[k].move(k+1, self.board))
dur = (datetime.datetime.now() -inicio).total_seconds()
if(dur > 10):
print('Player '+ self.players[k].name() + ' duration (seconds): '+ str(dur))
self.printBoard()
if self.isDraw():
print('Draw!')
# returning the winner name only for Tournament purpose
return 'draw'
else:
print('Player '+ JogoVelha.printSymbol(k+1)+ ": " + self.players[k].name() + ' is the winner!')
# returning the winner name only for Tournament purpose
return self.players[k].name()
def main():
JogoVelha(BigBrainMoves(), RandomPlayerJV()).game()
JogoVelha(BigBrainMoves(), RandomPlayerJV()).game()
if __name__ == '__main__':
main()
|
[
"lucas.cisa19@gmail"
] |
lucas.cisa19@gmail
|
a293d517bedd10d6570037e13c9af25a60b78578
|
0db4f248a6f8c036ff47a90e5f85e1e0b8e9b8a4
|
/label_visualize/audio/test.py
|
75110e91446d7606a03d851456e723cdb588f83c
|
[] |
no_license
|
vunguyen1408/no-more-weekend
|
9c6afec60d92c7788d0c8b03472b200840d89d11
|
2b5025798e232ffc23cb1b498d2adb89078f602a
|
refs/heads/master
| 2021-08-08T11:54:38.244834
| 2017-11-10T07:22:00
| 2017-11-10T07:22:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
import os, os.path
import pandas as pd
import json
import subprocess
# def getLength(filename):
# result = subprocess.Popen("ffprobe " + filename)
# return result
filename = '/u01/oracle/oradata/APEX/MARKETING_TOOL_02_JSON/2016-10-01/videos/2016-10-01_9.mp4'
def getLength(input_video):
result = subprocess.check_output(['ffprobe', '-i', input_video, '-show_entries', 'format=duration', '-v', 'quiet', '-of', 'csv=%s' % ("p=0")])
print (result)
return float(result)
print (getLength(filename))
|
[
"ltduong1994@gmail.com"
] |
ltduong1994@gmail.com
|
b62b7c93a6b16398a156ceaca267898e51a6cae8
|
b3350357cf9f44a2357032460274113258db4949
|
/JavaAPIScraper/java_parser_2.py
|
38df83bdb59111e8edcf6646969f5e5b043860c3
|
[] |
no_license
|
heenasurve/JAVA-API-Visualization
|
3f7be37362357adcc2a267ded08aec492b50d290
|
5a4527b3e928278582d9fe399d8466e407cbce16
|
refs/heads/master
| 2021-09-13T06:49:24.385831
| 2018-04-26T05:22:35
| 2018-04-26T05:22:35
| 123,084,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,138
|
py
|
from py2neo import Graph,Node,Relationship, NodeSelector
import re
import sys
import plyj.parser
import plyj.model as m
files = ['rmi_server.java', 'rmi_client.java', 'rmi_server_interface.java']
descriptions = [
'implements a remote object and registers it with the RMI registry',
'looks up and invokes a remote object.',
'provides the connection between the client and the server'
]
graph = Graph(password="test")
selector = NodeSelector(graph)
p = plyj.parser.Parser()
roles = []
for file in files:
with open(file) as f:
content = f.read().splitlines()
roles.append(file.split(".")[0])
print("Roles : ")
for role in roles:
print(role)
print("\n\n")
for file in files:
with open(file) as f:
content = f.read().splitlines()
roles.append(file.split(".")[0])
save_content = content
imports = []
classes = []
interfaces = []
extends = []
implements =[]
types = []
methods_in_sequence = []
for line in content:
pattern = "java.rmi[.]?\w*"
prog = re.compile(pattern)
terms = line.split(" ")
for term in terms:
if prog.match(term):
imports.append(term)
if len(imports)>0:
print("Imports : ")
for package in imports:
print(package)
tree = p.parse_file(file)
for type_decl in tree.type_declarations:
types.append(type_decl)
if type(type_decl) is plyj.model.ClassDeclaration:
classes.append(type_decl)
if type(type_decl) is plyj.model.InterfaceDeclaration:
interfaces.append(type_decl)
if len(interfaces) > 0:
print("Interfaces : ")
for interface in interfaces:
print(interface.name)
if len(classes) > 0:
print("Classes : ")
for class_type in classes:
print(class_type.name)
for type_decl in tree.type_declarations:
if type_decl.extends is not None:
if type(type_decl.extends) is plyj.model.Type:
extends.append(type_decl.extends.name.value)
else:
if type(type_decl.extends) is list:
extends.append(type_decl.extends[0].name.value)
if hasattr(type_decl, "implements") and len(type_decl.implements) is not 0:
for interface_impl in type_decl.implements:
implements.append(interface_impl.name.value)
for extended_class in extends:
print(' -> extends ' + extended_class)
for impl_interface in implements:
print(' -> implements ' + impl_interface)
methods = []
inner_statements = []
for type_decl in tree.type_declarations:
no_of_method_decls = 0
index = 0
for declaratn in type_decl.body:
if type(declaratn) is m.MethodDeclaration:
no_of_method_decls += 1
for method_decl in [decl for decl in type_decl.body if type(decl) is m.MethodDeclaration]:
param_strings = []
for param in method_decl.parameters:
if type(param.type) is str:
param_strings.append(param.type + ' ' + param.variable.name)
else:
param_strings.append(param.type.name.value + ' ' + param.variable.name)
methods.append(method_decl.name + '(' + ', '.join(param_strings) + ')')
#print(' ' + method_decl.name + '(' + ', '.join(param_strings) + ')')
if method_decl.body is not None:
for statement in method_decl.body:
constructed_statement = ""
constructed_statement = "\n"
# assuming statements contained inside a block
if hasattr(statement, "block") and statement.block.statements is not None:
for stat in statement.block.statements:
if hasattr(stat, "result"):
constructed_statement += "return " + stat.result.value
# statment that declares a variable that holds the result of a function call
if type(stat) is plyj.model.VariableDeclaration:
variable_decl = stat.variable_declarators[0]
# variable declaration Type
constructed_statement += stat.type.name.value
# variable declaration - variable name
constructed_statement += " " + variable_decl.variable.name + " = "
if hasattr(variable_decl, "initializer"):
# standard variable declaration
if type(variable_decl.initializer) is plyj.model.Literal:
constructed_statement += variable_decl.initializer.value + "\n"
# an array access declaration
if type(variable_decl.initializer) is plyj.model.ArrayAccess:
initializer = variable_decl.initializer
constructed_statement += initializer.target.value + "(" + initializer.index.value + ")" + "\n"
# object creation statement
if type(variable_decl.initializer) is plyj.model.InstanceCreation:
initializer = variable_decl.initializer.type.name.value
constructed_statement += "new " + initializer + "()" + "\n"
# method_invocation_statement
if type(variable_decl.initializer) is plyj.model.MethodInvocation:
arguments = variable_decl.initializer.arguments
constructed_statement += variable_decl.initializer.target.value + "." + variable_decl.initializer.name
constructed_statement += "("
for arg in arguments:
constructed_statement += arg.value + ", "
constructed_statement += ")" + "\n"
if hasattr(variable_decl.initializer, "expression") and \
type(
variable_decl.initializer.expression) is plyj.model.MethodInvocation:
expression = variable_decl.initializer.expression
arguments = expression.arguments
constructed_statement += expression.target.value + "." + expression.name
constructed_statement += "("
for arg in arguments:
constructed_statement += arg.value + ", "
constructed_statement += ")" + "\n"
# no variable holds the result of the method invocation
if type(stat) is plyj.model.MethodInvocation:
arguments = stat.type_arguments
constructed_statement += stat.target.value + "." + stat.name
constructed_statement += "("
for arg in arguments:
constructed_statement += arg.value + ", "
constructed_statement += ")" + "\n"
# statement not contained in a block
else:
# plain return statement
stat = statement
if hasattr(stat, "result"):
constructed_statement += "return " + stat.result.value + "\n"
if type(stat) is plyj.model.VariableDeclaration:
variable_decl = stat.variable_declarators[0]
# variable declaration Type
constructed_statement += stat.type.name.value
# variable declaration - variable name
constructed_statement += " " + variable_decl.variable.name + " = "
if hasattr(variable_decl, "initializer"):
# standard variable declaration
if type(variable_decl.initializer) is plyj.model.Literal:
constructed_statement += variable_decl.initializer.value + "\n"
# an array access declaration
if type(variable_decl.initializer) is plyj.model.ArrayAccess:
initializer = variable_decl.initializer
constructed_statement += initializer.target.value + "(" +initializer.index.value+ ")" + "\n"
# object creation statement
if type(variable_decl.initializer) is plyj.model.InstanceCreation:
initializer = variable_decl.initializer.type.name.value
constructed_statement += "new " + initializer + "()" + "\n"
# method_invocation_statement
if type(variable_decl.initializer) is plyj.model.MethodInvocation:
arguments = variable_decl.initializer.arguments
constructed_statement += variable_decl.initializer.target.value + "." + variable_decl.initializer.name
constructed_statement += "("
for arg in arguments:
constructed_statement += arg.value + ", "
constructed_statement += ")" + "\n"
if hasattr(variable_decl.initializer, "expression") and \
type(
variable_decl.initializer.expression) is plyj.model.MethodInvocation:
expression = variable_decl.initializer.expression
arguments = expression.arguments
constructed_statement += expression.target.value + "." + expression.name
constructed_statement += "("
for arg in arguments:
constructed_statement += arg.value + ", "
constructed_statement += ")" + "\n"
# # no variable holds the result of the method invocation
# if type(stat) is plyj.model.MethodInvocation:
# # arguments = variable_decl.arguments
# constructed_statement += stat.target.value + "." + stat.name
# # constructed_statement += '[' + ', '.join(arguments) + ']'
#
# # variable decaration statement
# if type(statement) is plyj.model.VariableDeclaration:
# variable_decl = statement.variable_declarators[0]
# constructed_statement += statement.type.name.value + " "
# constructed_statement += variable_decl.variable.name + " " + variable_decl.initializer
#
# # no variable holds the result of the method invocation
# if type(statement) is plyj.model.MethodInvocation:
# expression = variable_decl.initializer.expression
# arguments = expression.arguments
# constructed_statement += expression.target.value + "." + expression.name
# constructed_statement += '[' + ', '.join(arguments) + ']'
# inner_method = Node("contained_statement",
# name=stat.target.value + "." + stat.name,
# arguments=expression.arguments
# )
# graph.create(inner_method)
# inner_statements.append(inner_method)
inner_statements.append(constructed_statement)
for idx,method in enumerate(methods):
print(' ' + "-> " + method)
if len(inner_statements) > 0:
print(inner_statements[idx] + "\n")
|
[
"hfsurve@hotmail.com"
] |
hfsurve@hotmail.com
|
90745e3748852df86cbde84effdd8c05856a95bb
|
07e4bfaa0a9e36becef7f4163c42535810a8bc07
|
/application.py
|
a34959d4e8d9a1977bed57524ce6e6c08ffaa0ae
|
[] |
no_license
|
uorocketry/UIgnite
|
8bef44668f4e1f46b9b8e904fc5f961948179eda
|
93cbe9f908c21c56501d4dab286aeabf4b8d78be
|
refs/heads/master
| 2021-09-24T11:59:03.593599
| 2018-10-09T16:19:09
| 2018-10-09T16:19:09
| 108,095,504
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,944
|
py
|
#!/usr/bin/python
import argparse
from Tkinter import *
from Utility import *
import serial
import sys
import time
parser = argparse.ArgumentParser(description='establish serial communication with Serial port based on platform and port no.')
parser.add_argument('platform', help="w -> windows | u-> unix | uw -> bash on windows (build 16299 and later)")
parser.add_argument('port_number', help="if using unix, it is X in ttyUSBX | if using windows, it is X in COMX")
args = parser.parse_args()
def add_text(words, box):
box.insert(CURRENT, words)
box.pack()
def ping(serial,message, box):
add_text("\nPinging Igniter", box)
serial.flushInput()
serial.write(message)
add_text("\nwaiting for reply", box)
try:
serial.readline()
add_text("\n IGNITER: %s" % serial.readline(), box)
except Exception as e:
add_text( "\nIGNITER: could not read line", box)
return
def fire(serial, message, box):
add_text("\nStarting Ignition!",box)
serial.write(message)
add_text("\nwaiting for confirmation",box)
try:
add_text("\n IGNITER: %s" % serial.readline(), box)
except Exception as e:
add_text( "\nIGNITER: could not read line", box)
return
def stop(serial,message1, message2, box):
add_text("\nSTOPPING!",box)
serial.write(message1)
add_text("\nwaiting for confirmation",box)
try:
add_text("\n IGNITER: %s" % serial.readline(), box)
except Exception as e:
add_text( "\nIGNITER: could not read line", box)
time.sleep(0.05)
add_text("\nCLOSING!", box)
serial.write(message2)
add_text("\nwaiting for confirmation",box)
try:
add_text("\n IGNITER: %s" % serial.readline(), box)
except Exception as e:
add_text( "\nIGNITER: could not read line", box)
return
def main(argv):
print (argv)
temp = (configure_usb(argv[0], argv[1]))
ser = serial.Serial(temp,timeout=2)
try:
ser.close()
ser.open()
except Exception as e:
print("port error")
root = Tk()
root.title("UIgnite")
frame = Frame(root)
frame.pack()
topframe = Frame(root)
bottomframe = Frame(root)
bottomframe.pack( side = BOTTOM )
topframe.pack(side = TOP)
text = Text(bottomframe)
text.insert(INSERT, "Hello.....")
text.pack()
redbutton = Button(topframe, text="STOP", fg="red",command=lambda:stop(ser,"1","5",text))
redbutton.pack( side = LEFT)
greenbutton = Button(topframe, text="START", fg="green", command=lambda: fire(ser,"2",text))
greenbutton.pack( side = LEFT )
bluebutton = Button(topframe, text="PING", fg="blue", command=lambda: ping(ser,"6",text))
bluebutton.pack( side = RIGHT )
root.mainloop()
if __name__ == "__main__":
#print(sys.argv[1],sys.argv[2])
main(sys.argv[1:])
|
[
"pbuzuloiu@yahoo.ca"
] |
pbuzuloiu@yahoo.ca
|
4be1b8d89abbdf4762c8e8d0166a630f7f34f665
|
b194de597ed9f327859d697f6f1980ff686fb50d
|
/תרגול4.py
|
030ff223cfbbff1f2511047700cf9cdc86809a17
|
[] |
no_license
|
barjakobovitz/limodim
|
a43cb64fb488bada356caa7534c3ba777ee97f74
|
32ba6c74fc4bfe17505b0827bc4f6cac27e7a9d7
|
refs/heads/main
| 2023-01-24T02:55:08.158186
| 2020-11-24T15:27:49
| 2020-11-24T15:27:49
| 315,673,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
#ex1
lst=["what","is","The","matrix","neo"]
a=1
z=3
low= min(a,z)
high= max(a,z)
new_list= sorted(lst[low:high+1], key=str.lower)
print (lst)
print (new_list)
#ex2
phone_book=[["bar","0545900961"],["zvi","0545680300"]]
name="zvi"
found= False
phone= ""
for contact in phone_book:
if contact[0]==name:
found=True
phone=contact[1]
if found==True:
print ("name: ", contact[0], "phone: ", contact[1])
break
if found==False:
print ("didnt found")
|
[
"noreply@github.com"
] |
barjakobovitz.noreply@github.com
|
c1f4ea1630b5cb9c568febf67aadb059bc3f6f4d
|
214a1f5f650729e25858aea46ce1be6359e760cb
|
/ex3.py
|
8206702c9758994d74109ec43f02a488e6bfd91d
|
[] |
no_license
|
deekshithas/pycode
|
e7e3fdc9f18611ffafbab4380edcc2eb674f8685
|
868ce9e4f557495b9dc12547a2b7881fb0eba0cd
|
refs/heads/master
| 2020-03-27T10:11:47.283796
| 2018-08-28T06:38:44
| 2018-08-28T06:38:44
| 146,402,211
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 619
|
py
|
import cv2
#scaling,resizing and interpolation
img=cv2.imread("flower.jpg")
cv2.imshow("original image",img)
cv2.waitKey(0)
img_scaled=cv2.resize(img,None,fx=0.75,fy=0.75)
cv2.imshow("scaling linear interpolation",img_scaled)
cv2.waitKey()
#lets double the size of our image
img_scaled1=cv2.resize(img,None,fx=2,fy=2,interpolation=cv2.INTER_CUBIC)
cv2.imshow("scaling-cubic interpolation",img_scaled1)
cv2.waitKey()
#lets skew the resized by setting exact dimensions
img_scaled2=cv2.resize(img,(900,400),interpolation=cv2.INTER_AREA)
cv2.imshow("scaling-skewed size",img_scaled2)
cv2.waitKey()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
deekshithas.noreply@github.com
|
470da951c844255c64b9acddbcfc9b3911f81172
|
bf84f1f2eb99c877e4cda94f656764fd7dd6f00a
|
/mcq/main.py
|
e602e12655ba40bfce616e8731ab76e9fdd44910
|
[] |
no_license
|
Jubayer247/mixed
|
8ac2c925f92d8e9ab23e3fde28ccae192e4e567e
|
6a24e1005e7f69ddc68d85e65e56c6d589c97abb
|
refs/heads/master
| 2023-07-21T23:23:12.462707
| 2021-08-31T00:03:28
| 2021-08-31T00:03:28
| 401,512,987
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 76
|
py
|
from gtts import gTTS
tts = gTTS('hello', lang='en')
tts.save('hello.mp3')
|
[
"mdjubayer247@gmail.com"
] |
mdjubayer247@gmail.com
|
62a85ce9f95f05f7ee06d3dd88a2fec14efc7fae
|
1f4ca645e123812ee7234670797375452a143676
|
/asking/forms.py
|
57b74a1b1d8e2d12e5e2c5071bb9d99681cc25ee
|
[] |
no_license
|
GeniailBonehead/asking
|
fa71aa9c376887258dbda6d1fc3fca0603c5a4a1
|
bde1bd1379cd624eebda129f5dac7dbd931014bd
|
refs/heads/main
| 2023-08-28T03:13:16.033814
| 2021-11-10T20:23:59
| 2021-11-10T20:23:59
| 426,740,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,034
|
py
|
from django import forms
from django.contrib.auth.forms import AuthenticationForm
class UserLoginForm(AuthenticationForm):
username = forms.CharField(label='Имя пользователя', widget=forms.TextInput(attrs={'class': 'form-control'}))
password = forms.CharField(label='Пароль', widget=forms.PasswordInput(attrs={'class': 'form-control'}))
class QuestionForm(forms.Form):
answer = forms.CharField(label='Введите ответ на вопрос', max_length=100)
question_id = forms.IntegerField()
class AskingForm(forms.Form):
name = forms.CharField(max_length=60)
question_text = forms.CharField(label='description', max_length=500)
start_date = forms.DateTimeField(label='date start')
finish_date = forms.DateTimeField(label='date finish')
class QuestionChangeForm(forms.Form):
question_text = forms.CharField(label='description', max_length=500)
answer_type = forms.IntegerField(label='answer type')
answer_choices = forms.CharField(label='answer choices')
|
[
"dstrannik10@gmail.com"
] |
dstrannik10@gmail.com
|
2dffe16cd46d1be730bec0b9cf0f13dde7be83e0
|
93b37621314b34e40d7f2f0c8fa97948dab9c63f
|
/sniffer/wsgi.py
|
1a3ef204b65614700c132a51a3e46eb4688445a0
|
[] |
no_license
|
BraFei/sniffer
|
21898fcdd2a99f27ab6bb17618032c5555f7b928
|
e3ba58b60989c2585b444baa0030067865554157
|
refs/heads/master
| 2020-03-21T17:10:37.403760
| 2018-07-08T08:04:19
| 2018-07-08T08:04:19
| 138,817,807
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
"""
WSGI config for sniffer project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sniffer.settings")
application = get_wsgi_application()
|
[
"1836088871@qq.com"
] |
1836088871@qq.com
|
36fa121df7b2ce09f5d72136d2a1aaec557c7cf8
|
c9acd0abd258f1400e9ba4d653d6fd24d0d3a7a9
|
/domiporta/category.py
|
8ca6b1c0a9d1ff931e2db55a10fe278b0022b414
|
[
"MIT"
] |
permissive
|
nieruchomosci/pydomiporta
|
4206f51310af22c446bd753ed1ab353337f9bb05
|
458231ce9174e72ef4f6c98dd142339929a22894
|
refs/heads/master
| 2020-03-11T16:06:48.428938
| 2017-11-24T10:14:59
| 2017-11-24T10:14:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,229
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
from bs4 import BeautifulSoup
import domiporta
import domiporta.utils
log = logging.getLogger(__file__)
logging.basicConfig(level=logging.DEBUG)
def get_category(url=None, category='nieruchomosci', transaction_type='wszystkie', voivodeship=None, city=None,
street=None, filters=None):
""" Parses available offer urls from given category search page
:param url: Url to search web page
:param category: Type of property of interest (Mieszkanie/Dom/Garaż/Działka)
:param transaction_type: Type of transaction
:param voivodeship: Voivodeship
:param city: City
:param street: Street
:param filters: Dictionary with additional filters
:type url: str, None
:type category:str, None
:type transaction_type: str, None
:type voivodeship: str, None
:type city: str, None
:type street: str, None
:type filters: dict, None
:return: List of urls of all offers for given parameters
:rtype: list
"""
if url is None:
url = domiporta.utils.get_url(category, transaction_type, voivodeship, city, street, filters)
page, max_number_page, offers = 1, None, []
while max_number_page is None or page <= max_number_page:
page_number = "PageNumber=" + str(page)
join_param = '?' if '?' not in url else '&'
page_url = "{0}{1}{2}".format(url, join_param, page_number)
offers_urls, markup = get_offers_from_category(page_url)
offers += offers_urls
if page == 1:
max_number_page = domiporta.utils.get_max_number_page(markup)
# assert False
page += 1
return offers
def get_offers_from_category(url):
""" Parses available offer urls from given category from given page
:param url: Defined url for Domiporta page with offers
:type url: str
:return: List of urls from given page
:rtype: list
"""
markup = BeautifulSoup(domiporta.utils.get_content_from_source(url), 'html.parser')
offers_urls = []
offers = markup.find_all('div', class_='detail-card')
for offer in offers:
offers_urls.append(offer.find('a').get('href'))
return offers_urls, markup
|
[
"mail@pythonic.ninja"
] |
mail@pythonic.ninja
|
0a4fae9b40cd22fc05ec3b4c4b51ec2141535af4
|
752d9e6413de28397da8804e7d73784064459773
|
/pyQtTuts-v04.py
|
c1cb44f6e9accb3892be1313d731ca913e2555c4
|
[] |
no_license
|
SlyCodePanda/a-mess-of-pyqt-stuff
|
2e3404fa4833d39e1b9bac828f247028f583ffe4
|
19a0aecb2b826854186911dbee23f068c456092f
|
refs/heads/master
| 2021-01-01T06:56:46.071814
| 2017-07-18T05:45:36
| 2017-07-18T05:45:36
| 97,557,167
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,843
|
py
|
import sys
from PyQt4 import QtGui, QtCore
# Window inherits from the Qt widget 'MainWindow'
class Window(QtGui.QMainWindow):
# Any core application stuff goes in the 'init' function.
def __init__(self):
# With 'super' what we return is the parent object and so our parent object would be out main window object.
super(Window, self).__init__()
# x: 50, y: 50, width: 500, height: 300.
self.setGeometry(50, 50, 500, 300)
self.setWindowTitle("PyQt tuts")
self.setWindowIcon(QtGui.QIcon('index.png'))
# Main menu.
# Adding Items to the main menu:
extractAction = QtGui.QAction("&GET TO THE CHOPPAH!!!", self)
extractAction.setShortcut("Ctrl+Q")
extractAction.setStatusTip('Leave The App')
extractAction.triggered.connect(self.close_application)
anotherAction = QtGui.QAction("It's a trap!!", self)
anotherAction.setShortcut("Ctrl+A")
anotherAction.setStatusTip('This does nothing lol')
anotherAction.triggered.connect(self.get_tricky)
# Editor.
openEditor = QtGui.QAction("&Editor", self)
openEditor.setShortcut("Ctrl+E")
openEditor.setStatusTip('Open Editor')
openEditor.triggered.connect(self.editor)
# Open.
openFile = QtGui.QAction("&Open", self)
openFile.setShortcut("Ctrl+O")
openFile.setStatusTip('Open File')
openFile.triggered.connect(self.file_open)
# Save.
saveFile = QtGui.QAction("&Save", self)
saveFile.setShortcut("Ctrl+S")
saveFile.setStatusTip('Save File')
saveFile.triggered.connect(self.file_save)
self.statusBar()
# Menu definitions:
mainMenu = self.menuBar()
fileMenu = mainMenu.addMenu('&File')
editMenu = mainMenu.addMenu('&Edit')
fileMenu.addAction(extractAction)
editMenu.addAction(anotherAction)
fileMenu.addAction(openFile)
fileMenu.addAction(saveFile)
editorMenu = mainMenu.addMenu("&Editor")
editorMenu.addAction(openEditor)
self.home()
# Stuff that is specific to that certain page that you are on.
def home(self):
btn = QtGui.QPushButton("Quit", self)
# What happens when the button is clicked.
btn.clicked.connect(self.close_application)
btn.resize(btn.minimumSizeHint())
btn.move(0,100)
# Adding the Toolbar:
extractAction = QtGui.QAction(QtGui.QIcon('todachoppa.png'), 'Flea the Scene', self)
extractAction.triggered.connect(self.close_application)
self.toolBar = self.addToolBar("Extraction")
self.toolBar.addAction(extractAction)
# Adding Font Widget:
fontChoice = QtGui.QAction('Font', self)
fontChoice.triggered.connect(self.font_choice)
self.toolBar.addAction(fontChoice)
# Adding Colour Picker Widget. Changes the background colour of the font background:
colour = QtGui.QColor(0, 0, 0)
fontColour = QtGui.QAction('Font bg Colour', self)
fontColour.triggered.connect(self.colour_picker)
self.toolBar.addAction(fontColour)
# Adding Check Box:
checkBox = QtGui.QCheckBox('Enlarge Window', self)
checkBox.move(300, 25)
checkBox.stateChanged.connect(self.enlarge_window)
# Adding Progress Bar:
self.progress = QtGui.QProgressBar(self)
self.progress.setGeometry(200, 80, 250, 20)
self.btn = QtGui.QPushButton("Download", self)
self.btn.move(200, 120)
self.btn.clicked.connect(self.download)
# Adding Drop Down Button. That allows you to set the GUI style :
# This prints out the default style that you GUI is set to to your terminal.
print(self.style().objectName())
self.styleChoice = QtGui.QLabel("GTK+", self)
comboBox = QtGui.QComboBox(self)
# A selection of GUI styles from the QStyleFactory.
comboBox.addItem("motif")
comboBox.addItem("Windows")
comboBox.addItem("cde")
comboBox.addItem("Plastique")
comboBox.addItem("Cleanlooks")
comboBox.addItem("gtk+")
comboBox.move(20, 250)
self.styleChoice.move(50, 150)
comboBox.activated[str].connect(self.style_choice)
# Adding Calendar Widget:
cal = QtGui.QCalendarWidget(self)
cal.move(500, 200)
cal.resize(200, 200)
self.show()
def colour_picker(self):
colour = QtGui.QColorDialog.getColor()
self.styleChoice.setStyleSheet("QWidget { background-color: %s}" % colour.name())
def editor(self):
self.textEdit = QtGui.QTextEdit()
self.setCentralWidget(self.textEdit)
def file_open(self):
name = QtGui.QFileDialog.getOpenFileName(self, 'Open File')
# Opening with the intention to read ('r').
file = open(name, 'r')
# Need to call the editor, because it is not there by default.
self.editor()
with file:
text = file.read()
self.textEdit.setText(text)
def file_save(self):
name = QtGui.QFileDialog.getSaveFileName(self, 'Save File')
# Intention to write the file ('w')
file = open(name, 'w')
text = self.textEdit.toPlainText()
file.write(text)
file.close()
def font_choice(self):
font, valid = QtGui.QFontDialog.getFont()
if valid:
self.styleChoice.setFont(font)
def style_choice(self, text):
self.styleChoice.setText(text)
# Sets the style of your GUI.
QtGui.QApplication.setStyle(QtGui.QStyleFactory.create(text))
def download(self):
self.completed = 0
while self.completed < 100:
self.completed += 0.0001
self.progress.setValue(self.completed)
def enlarge_window(self, state):
if state == QtCore.Qt.Checked:
self.setGeometry(50, 50, 1000, 600)
else:
self.setGeometry(50, 50, 500, 300)
def close_application(self):
# Pop-Up Message:
choice = QtGui.QMessageBox.question(self, 'Extract',
"Get into the chopper?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if choice == QtGui.QMessageBox.Yes:
print( "Extracting now")
sys.exit()
else:
pass
def get_tricky(self):
self.setWindowTitle("OH SHIT DAT TRICKY!")
# Our 'main'.
def run():
app = QtGui.QApplication(sys.argv)
GUI = Window()
sys.exit(app.exec_())
run()
'''NOTES:
To convert a .ui file to a .py file you need to run the pyuic using the following command:
pyuic4 -o ui_form.py form.ui
|
[
"reneemarsland@hotmail.com"
] |
reneemarsland@hotmail.com
|
1738a57b1e32175ba5e0dc80f365532ff1433e5a
|
feb72ff3447efeaf4e8087f7f9cd0f83e19b4920
|
/personal/RebeccaTippens/new_averaged_spectra.py
|
27966ca752346a02a995b53b74f51e5ff51beaaa
|
[] |
no_license
|
gnarayan/astr596
|
81fe27bf67e7caa68e8c314cdbe68e818f0512dd
|
1f76df17903875162885308479831d8bb345c2b7
|
refs/heads/master
| 2021-01-20T22:02:00.941404
| 2017-08-29T17:00:21
| 2017-08-29T17:00:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,832
|
py
|
#Use numpy and scipy
import math
import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
#Read data from first file into two arrays
#could also use array = np.loadtext()
x1=[]
y1=[]
for line in open('/Users/Rebecca/astr596/data/sn2011by-hst+lick.flm'):
columns = line.split()
if len(columns) == 2:
if float(columns[0]) <= 5715: # truncation
x1.append(float(columns[0]))
y1.append(float(columns[1]))
#Read data from second file into two arrays
#could also use array = np.loadtext()
x2 = []
y2 = []
for line in open('/Users/Rebecca/astr596/data/cfa/sn1995Y/sn1995Y-19950916.49-fast.flm'):
columns = line.split()
if len(columns) == 2:
x2.append(float(columns[0]))
y2.append(float(columns[1]))
def avg(*vals):
return float(sum(vals))/float(len(vals))
#print avg(1,2,3,9.5) #test line for avg function
x3 = []
y3 = []
for x1_val in range(0,len(x1)):
for x2_val in range (0,len(x2)):
if x1[x1_val] == x2[x2_val]:
x3.append(x1[x1_val])
y3.append(avg(y1[x1_val],y2[x2_val]))
x3_string = str(x3)
y3_string = str(y3)
x2_val += 1
x1_val += 1
f3 = interp1d(x3, y3, kind='linear')
x4 = x3
y4 = f3(x3)
#--------------- the point at which my code becomes useful again
#Read averaged data to file -- change this to .flm or whatever
np.savetxt('averaged_spectra.txt', np.transpose([x3,y3]), fmt="%d %26.18e")
#Plot arrays one by the other
plt.subplot(111)
plt.plot(x1, y1, color='c', linewidth=1)
plt.plot(x2, y2, color='m', linewidth=1)
plt.plot(x3, y3, color='#ff6600', linewidth=1)
plt.plot(x4, y4, color='b', linewidth=1)
plt.yscale('log')
plt.xlabel('Wavelength ' + '(' + u'\u212B' + ')')
plt.ylabel('log (Flux)')
plt.subplot(112)
#pyplot.savefig('averaged_spectra.eps', format='eps') # saves plot as .eps
plt.savefig('averaged_spectra.pdf', format='PDF')
plt.show()
|
[
"Rebecca.Tippens@gmail.com"
] |
Rebecca.Tippens@gmail.com
|
83d0a32ef2d365d17caa9d311c367ed5828559ac
|
847f93bb462bfc1922f49beb00b775c390ac556e
|
/coding_python/2729.py
|
432364d0cc643695f0cf788cb23cdc83a7ca0ef6
|
[] |
no_license
|
PaengE/AlgorithmPractice
|
873a7d4b7f5710a1fcf9842eaf2930d739d8edb2
|
2ba166d14c7911af74eb595e4202a1b875fce785
|
refs/heads/master
| 2023-07-08T10:09:15.874030
| 2021-08-11T08:10:15
| 2021-08-11T08:10:15
| 200,774,172
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
n, m = map(int, input().split())
li = list(map(int, input().split()))
max = 0
for i in range(0, n):
for j in range(i+1, n):
for k in range(j+1, n):
tmp = li[i] + li[j] + li[k]
if(tmp <= m and max < tmp):
max = tmp
print(max)
|
[
"ckddn2820@gmail.com"
] |
ckddn2820@gmail.com
|
38dca13d0bf9d00206d097874ed1f6efdea2317e
|
375f29a4c87700ec20a0b399012c6b37e1cda012
|
/python programming/beginner/reverse number.py
|
2eb4e88abf8e314c9dfbf8e5442d9d31b5504c12
|
[] |
no_license
|
inbavalli/python
|
33afdc4a4a844fef291418fabd2a8f124d0f0d3d
|
3fe298ebf976b1a70559265ee65f7762a2afd7f2
|
refs/heads/master
| 2018-11-11T10:09:12.370195
| 2018-08-22T11:15:28
| 2018-08-22T11:15:28
| 120,161,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 42
|
py
|
main()
for v in range(20,0,-1):
print(v)
|
[
"noreply@github.com"
] |
inbavalli.noreply@github.com
|
f5db9709025ffe004bd79e61971e97422496b87b
|
b6c3f197e6e7be78ba5565994fd2b435bf65043b
|
/models/multiatt/model_noPadding.py
|
25ac77a178f251be5c11018b60a91daccd28b930
|
[] |
no_license
|
tanjatang/DMVCR
|
f225fbef1380be6174218065a8e2b82dc5f71e13
|
297d11e299d5c5614e8a4f0d49fadef81e2a959a
|
refs/heads/main
| 2023-07-10T10:34:20.693260
| 2021-08-23T10:11:34
| 2021-08-23T10:11:34
| 378,831,014
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,159
|
py
|
"""
Let's get the relationships yo
"""
from typing import Dict, List, Any
import math
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
import torch.nn.parallel
from allennlp.data.vocabulary import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import TextFieldEmbedder, Seq2SeqEncoder, FeedForward, InputVariationalDropout, TimeDistributed
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.modules.matrix_attention import BilinearMatrixAttention
from utils.detector import SimpleDetector
from allennlp.nn.util import masked_softmax, weighted_sum, replace_masked_values
from allennlp.nn import InitializerApplicator
import os
# import pickle
import pickle
import ipdb
#######################################3
from PIL import Image
import pylab
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib import pyplot
from matplotlib.patches import Rectangle
#######################################
SAVE_ROOT = "/phys/ssd/tangxueq/tmp/vcr/vcrimage/rationale"
@Model.register("MultiHopAttentionQA")
class AttentionQA(Model):
def __init__(self,
vocab: Vocabulary,
span_encoder: Seq2SeqEncoder,
reasoning_encoder: Seq2SeqEncoder,
# lstm_encoder: Seq2SeqEncoder,
input_dropout: float = 0.3,
hidden_dim_maxpool: int =512,
class_embs: bool=True,
reasoning_use_obj: bool=True,
reasoning_use_answer: bool=True,
reasoning_use_question: bool=True,
pool_reasoning: bool = True,
pool_answer: bool = True,
pool_question: bool = False,
initializer: InitializerApplicator = InitializerApplicator(),
):
super(AttentionQA, self).__init__(vocab)
################################################################################################################################
path = '/home/tangxueq/MA_tang/r2c/models/saves/memory_cell.npz'
self.memory_cell = torch.nn.Parameter(torch.Tensor(self.memory_cell_load(path)))
print(self.memory_cell)
###############################################################################################################################
# if os.path.isfile(self.memory_cell_path):
# print('load memory_cell from {0}'.format(self.memory_cell_path))
# memory_init = np.load(self.memory_cell_path)['memory_cell'][()]
# else:
# print('create a new memory_cell')
# memory_init = np.random.rand(1000, 1024) / 100
# memory_init = np.float32(memory_init)
# self.memory_cell = torch.from_numpy(memory_init).cuda().requires_grad_()
#################################################################################################################################
self.detector = SimpleDetector(pretrained=True, average_pool=True, semantic=class_embs, final_dim=512)
###################################################################################################
INPUT_SIZE =512 #768
self.lstm = nn.LSTM(
input_size=INPUT_SIZE,
hidden_size=512,
num_layers=2, # hidden_layer的数目
batch_first=True, # 输入数据的维度一般是(batch, time_step, input),该属性表征batch是否放在第一个维度
bidirectional=True
)
############################################################################################################################3
self.linear = torch.nn.Linear(512,256)
self.mask_linear = torch.nn.Linear(1,512)
self.encoder_layer_vc = nn.TransformerEncoderLayer(d_model=512, nhead=8)
self.transformer_encoder_vc = nn.TransformerEncoder(self.encoder_layer_vc, num_layers=3)
self.AvgPool =nn.AdaptiveAvgPool1d(1)
# self.encoder_layer_cv = nn.TransformerEncoderLayer(d_model=768, nhead=8)
# self.transformer_encoder_cv = nn.TransformerEncoder(self.encoder_layer_cv, num_layers=2)
self.image_AttFlat = AttFlat(512)
self.qa_AttFlat = AttFlat(768)
self.proj_norm = LayerNorm(1024)
self.proj = nn.Linear(1024, 4)
############################################################################################################################
self.norm = torch.nn.BatchNorm1d(256)
self.lstm_norm = torch.nn.BatchNorm1d(200)
self.rnn_input_dropout = TimeDistributed(InputVariationalDropout(input_dropout)) if input_dropout > 0 else None
self.span_encoder = TimeDistributed(span_encoder)
self.span_reshape = TimeDistributed(torch.nn.Linear(512, 768))
self.qao_reshape = torch.nn.Linear(512,768)
# self.out = nn.Linear(50, 1)
# self.reasoning_encoder = TimeDistributed(lstm_encoder)
self.span_attention = BilinearMatrixAttention(
matrix_1_dim=span_encoder.get_output_dim(),
matrix_2_dim=span_encoder.get_output_dim(),
)
self.obj_attention = BilinearMatrixAttention(
matrix_1_dim=span_encoder.get_output_dim(),
matrix_2_dim=self.detector.final_dim,
)
self.reasoning_use_obj = reasoning_use_obj
self.reasoning_use_answer = reasoning_use_answer
self.reasoning_use_question = reasoning_use_question
self.pool_reasoning = pool_reasoning
self.pool_answer = pool_answer
self.pool_question = pool_question
#[96,4,50,1024]
dim = 512#768*2 #sum([d for d, to_pool in [(reasoning_encoder.get_output_dim(), self.pool_reasoning),
# (span_encoder.get_output_dim(), self.pool_answer),
# (span_encoder.get_output_dim(), self.pool_question)] if to_pool])
# self.final_mlp = torch.nn.Sequential(
# torch.nn.Flatten(),
# torch.nn.Dropout(input_dropout, inplace=False),
# torch.nn.Linear(dim, hidden_dim_maxpool),
# torch.nn.ReLU(inplace=True),
# torch.nn.Dropout(input_dropout, inplace=False),
# torch.nn.Linear(hidden_dim_maxpool, 4),
# )
# self.final_mlp = torch.nn.Sequential(
# torch.nn.Flatten(),
# torch.nn.Dropout(input_dropout, inplace=False),
# torch.nn.Linear(dim, hidden_dim_maxpool),
# torch.nn.LeakyReLU(inplace=True),
# # torch.nn.Dropout(input_dropout, inplace=False),
# torch.nn.Linear(hidden_dim_maxpool, 4),
# )
# self.final_mlp = torch.nn.Sequential(
# torch.nn.Flatten(),
# torch.nn.Dropout(input_dropout, inplace=False),
# torch.nn.Linear(dim,4))
#------------------------------------------------------------------
self.final_mlp = torch.nn.Sequential(
torch.nn.Dropout(input_dropout, inplace=False),
torch.nn.Linear(dim, hidden_dim_maxpool),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(input_dropout, inplace=False),
torch.nn.Linear(hidden_dim_maxpool, 1),
)
#----------------------------------------------------------------------
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
#####################################################################
initializer(self)
##############################################################################
# self.memory_cell_path = getattr(opt, 'memory_cell_path', '0')
def memory_cell_load(self,path):
if os.path.isfile(path):
print('load memory_cell from {0}'.format(path))
# memory_init = np.load(self.memory_cell_path)['memory_cell']
memory_init = torch.load(path)['memory_cell'][()]
else:
print('create a new memory_cell')
# memory_init = np.random.rand(10000, 1024)/ 100
# memory_init = torch.random(10000,1024)/100
memory_init = torch.rand(200, 256)/100
# memory_init = memory_init.long()
# self.memory_cell = torch.from_numpy(memory_init).cuda().requires_grad_()
return memory_init
def _collect_obj_reps(self, span_tags, object_reps):
"""
Collect span-level object representations
:param span_tags: [batch_size, ..leading_dims.., L]
:param object_reps: [batch_size, max_num_objs_per_batch, obj_dim]
:return:
"""
span_tags_fixed = torch.clamp(span_tags, min=0) # In case there were masked values here
row_id = span_tags_fixed.new_zeros(span_tags_fixed.shape)
row_id_broadcaster = torch.arange(0, row_id.shape[0], step=1, device=row_id.device)[:, None]
# Add extra diminsions to the row broadcaster so it matches row_id
leading_dims = len(span_tags.shape) - 2
for i in range(leading_dims):
row_id_broadcaster = row_id_broadcaster[..., None]
row_id += row_id_broadcaster
return object_reps[row_id.view(-1), span_tags_fixed.view(-1)].view(*span_tags_fixed.shape, -1)
def embed_span(self, span, span_tags, span_mask, object_reps):
"""
:param span: Thing that will get embed and turned into [batch_size, ..leading_dims.., L, word_dim]
:param span_tags: [batch_size, ..leading_dims.., L]
:param object_reps: [batch_size, max_num_objs_per_batch, obj_dim]
:param span_mask: [batch_size, ..leading_dims.., span_mask
:return:
"""
retrieved_feats = self._collect_obj_reps(span_tags, object_reps)
span_rep = torch.cat((span['bert'], retrieved_feats), -1)
# add recurrent dropout here
if self.rnn_input_dropout:
span_rep = self.rnn_input_dropout(span_rep)
# x = self.span_encoder(span_rep, span_mask)
# x = self.span_reshape(x)
return self.span_encoder(span_rep, span_mask), retrieved_feats
def padding(self,q_rep,a_rep):
# max_len = max(max([i.size(2) for i in q_rep]), max([i.size(2) for i in a_rep]))
max_len = max(a_rep.size(2),q_rep.size(2))
a1, b1, c1, d1 = a_rep.size()
a2, b2, c2, d2 = q_rep.size()
padding_a = torch.zeros(a1, b1, max_len - c1, d1).float().cuda()
padding_q = torch.zeros(a2, b2, max_len - c2, d2).float().cuda()
q_rep_new = torch.cat((q_rep, padding_q), dim=2)
a_rep_new = torch.cat((a_rep, padding_a), dim=2)
qa_rep = torch.cat((q_rep_new, a_rep_new), dim=3) # [batch_size, 8, seq_len, 1536]
return qa_rep
def Dictionary(self,h,M): #[96*4,768] M[10000,768]
h_size = h.size() # [96*4,768]
# h = h.view(-1, h_size[3]) # [(96*4),768]
att = torch.mm(h, torch.t(M)) # [(96*4*50),768] * [768,10000]
att = F.softmax(att, dim=1) # [96*4*50,10000]
att_res = torch.mm(att, M) # [96*4,10000]*[10000,768] -> #[96*4,768]
# att_res = att_res.view([-1, h_size[1]*h_size[2], h_size[3]]) #[96*4,768]
return att_res
def make_mask(self, feature):
return (torch.sum(
torch.abs(feature),
dim=-1
) == 0).unsqueeze(1).unsqueeze(2)
def forward(self,
training_mode,
# images: torch.Tensor,
obj_reps: Dict[str, torch.Tensor],
objects: torch.LongTensor,
segms: torch.Tensor,
boxes: torch.Tensor,
box_mask: torch.LongTensor,
question: Dict[str, torch.Tensor],
question_tags: torch.LongTensor,
question_mask: torch.LongTensor,
answers: Dict[str, torch.Tensor],
answer_tags: torch.LongTensor,
answer_mask: torch.LongTensor,
metadata: List[Dict[str, Any]] = None,
label: torch.LongTensor = None) -> Dict[str, torch.Tensor]:
"""
:param images: [batch_size, 3, im_height, im_width]
:param objects: [batch_size, max_num_objects] Padded objects
:param boxes: [batch_size, max_num_objects, 4] Padded boxes
:param box_mask: [batch_size, max_num_objects] Mask for whether or not each box is OK
:param question: AllenNLP representation of the question. [batch_size, num_answers, seq_length]
:param question_tags: A detection label for each item in the Q [batch_size, num_answers, seq_length]
:param question_mask: Mask for the Q [batch_size, num_answers, seq_length]
:param answers: AllenNLP representation of the answer. [batch_size, num_answers, seq_length]
:param answer_tags: A detection label for each item in the A [batch_size, num_answers, seq_length]
:param answer_mask: Mask for the As [batch_size, num_answers, seq_length]
:param metadata: Ignore, this is about which dataset item we're on
:param label: Optional, which item is valid
:return: shit
"""
# Trim off boxes that are too long. this is an issue b/c dataparallel, it'll pad more zeros that are
# not needed
# print("question:\n", len(question), "\n")
# print(question['bert'].size())
# print("answers:\n", len(answers), "\n")
# print(answers['bert'].size())
max_len = int(box_mask.sum(1).max().item())
objects = objects[:, :max_len]
box_mask = box_mask[:, :max_len]
boxes = boxes[:, :max_len]
segms = segms[:, :max_len]
for tag_type, the_tags in (('question', question_tags), ('answer', answer_tags)):
if int(the_tags.max()) > max_len:
raise ValueError("Oh no! {}_tags has maximum of {} but objects is of dim {}. Values are\n{}".format(
tag_type, int(the_tags.max()), objects.shape, the_tags
))
# obj_reps = self.detector(images=images, boxes=boxes, box_mask=box_mask, classes=objects, segms=segms)
##################################################################################################################
'''try idea to create new model with dictionary'''
'''padding q_rep and a_rep with 0'''
# max_len = max(max([i.size(2) for i in question['bert']]),max([i.size(2) for i in answers['bert']]))
# a, b, c, d = a_rep.size()
# padding = torch.zeros(a,b,max_len,d).float()
# q_rep_new = torch.cat((q_rep,padding),dim = 2)
# a_rep_new = torch.cat((a_rep,padding), dim = 2)
# print(question['bert'].size())
if training_mode == 0:
# qa_rep = self.padding(question['bert'],answers['bert']) #[batch_size, 8, seq_len, 768]
qa_rep = torch.cat((question['bert'],answers['bert']),dim = 2)
elif training_mode == 1:
q_rep, q_obj_reps = self.embed_span(question, question_tags, question_mask, obj_reps['obj_reps'])
a_rep, a_obj_reps = self.embed_span(answers, answer_tags, answer_mask, obj_reps['obj_reps'])
##########################################################################3
qa_similarity = self.span_attention(
q_rep.view(q_rep.shape[0] * q_rep.shape[1], q_rep.shape[2], q_rep.shape[3]),
a_rep.view(a_rep.shape[0] * a_rep.shape[1], a_rep.shape[2], a_rep.shape[3]),
).view(a_rep.shape[0], a_rep.shape[1], q_rep.shape[2], a_rep.shape[2])
qa_attention_weights = masked_softmax(qa_similarity, question_mask[..., None], dim=2) # formula
attended_q = torch.einsum('bnqa,bnqd->bnad', (qa_attention_weights, q_rep))
# Have a second attention over the objects, do A by Objs
# [batch_size, 4, answer_length, num_objs]
atoo_similarity = self.obj_attention(a_rep.view(a_rep.shape[0], a_rep.shape[1] * a_rep.shape[2], -1),
obj_reps['obj_reps']).view(a_rep.shape[0], a_rep.shape[1],
a_rep.shape[2],
obj_reps['obj_reps'].shape[1])
atoo_attention_weights = masked_softmax(atoo_similarity, box_mask[:, None, None])
attended_o = torch.einsum('bnao,bod->bnad', (atoo_attention_weights, obj_reps['obj_reps']))
# print('qa_similarity.size(): ', qa_similarity.size(),'\n', 'attended_q.size(): ', attended_q.size(),'\n',
# 'qa_attention_weights.size(): ', qa_attention_weights.size(),'\n','atoo_attention_weights.size() :',atoo_attention_weights.size(),'\n','attended_o.size() ',attended_o.size())
# reasoning_inp = torch.cat([x for x, to_pool in [(a_rep, self.reasoning_use_answer),
# (attended_o, self.reasoning_use_obj),
# (attended_q, self.reasoning_use_question)]
# if to_pool], -1)
#
#
# print("..........", q_rep,"////////",a_rep)
# qa_rep = torch.cat((attended_q,attended_o),dim = 2)
qa_rep = torch.cat((q_rep,a_rep),dim = 2)
# reasoning_inp = torch.cat([x for x, to_pool in [(a_rep, self.reasoning_use_answer),
# (attended_o, self.reasoning_use_obj),
# (attended_q, self.reasoning_use_question)]
# if to_pool], -1)
#
# print(qa_rep.size(),'reasoning_inp.size()') [96,4,29,1536]
# qa_rep = self.qao_reshape(qao_rep)
if self.rnn_input_dropout is not None:
qa_rep = self.rnn_input_dropout(qa_rep)
#################################################################################
#clip
# a1, a2, a3, a4 = qa_rep.size()
# if a3 >= 50:
# qa_rep = qa_rep[:, :, 0:50, :].contiguous()
#
# else:
# padding_i = torch.zeros(a1, a2, 50 - a3, a4).cuda()
# qa_rep = torch.cat((qa_rep, padding_i), dim=2)
# # print(qa_rep.size(),'qa_rep.size()')
##################################################################
qa_rep_size = qa_rep.size()
qa_rep = qa_rep.view(qa_rep_size[0]*qa_rep_size[1],qa_rep_size[2],qa_rep_size[3]) #qa_rep[96*4,50,768]
#########################################################################################################
#
# output, (h_n, h_c) = self.lstm(qa_rep) # self.reasoning_encoder(qa_rep) # # #
# output = output[:, -1, :] # 取最后一层输出 [96*4,1024]
###########################################################################################################
'''这一段是transformer Encoder实现'''
transformer = self.transformer_encoder_vc(qa_rep)
# #####################################################################################################3
'''这是average pooling'''
output = torch.transpose(transformer,1,2).contiguous()
output = self.AvgPool(output)
output = torch.transpose(output, 1, 2).contiguous()
output = output.squeeze()#[96 x 4, 512]
##########################################################################################################
# qa_feature_mask = self.make_mask(transformer)
# # qa_image_mask = self.make_mask(qa_visual_context)
#
# # qa_image_res = self.image_AttFlat(qa_visual_context, qa_image_mask) # [b,dim]
# qa_feature_res = self.image_AttFlat(transformer, qa_feature_mask)
# qa_feature_res = qa_feature_res.view(-1, 4, 1024)
# # qa_image_res = torch.unsqueeze(qa_image_res, 1)
# # qa_image_res = qa_image_res.repeat(1, 4, 1)
#
# qa_proj_feat = qa_feature_res # + qa_image_res
# proj_feat = self.proj_norm(qa_proj_feat)
# output_res = proj_feat
#
# #######################################################################################################
'''LSTM'''
# qa_feature ,(h_n, h_c)= self.lstm(qa_rep)
# qa_feature = qa_feature[:,-1,:]
##############################################################################################################
# '''这一段是 DICT'''
#
# att = output @ self.memory_cell.T
#
# att = self.lstm_norm(att)
#
#
# att = F.softmax(att, dim=1) # [96*4,10000]
#
#
# output_res = att @ self.memory_cell
#
# # print('output_res:', output_res.size())
#########################################################
output_res = output.view(-1,4,512)
logits = self.final_mlp(output_res).squeeze(2)# [94,4,1024]
# print(logits.size(),'kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk')
class_probabilities = F.softmax(logits,dim = -1)
output_dict = {"label_logits": logits, "label_probs": class_probabilities,
'cnn_regularization_loss': obj_reps['cnn_regularization_loss'],
# Uncomment to visualize attention, if you want
# 'qa_attention_weights': qa_attention_weights,
# 'atoo_attention_weights': atoo_attention_weights,
}
if label is not None:
loss = self._loss(logits, label.long().view(-1))
self._accuracy(logits, label)
output_dict["loss"] = loss[None]
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {'accuracy': self._accuracy.get_metric(reset)}
class AttFlat(nn.Module):
def __init__(self, hidden_size):
super(AttFlat, self).__init__()
self.hidden_size = hidden_size
self.flat_mlp_size = 512
self.flat_glimpses = 1
self.drop_out = 0.1
self.flat_out_size = 1024
self.mlp = MLP(
in_size=self.hidden_size,
mid_size=self.flat_mlp_size,
out_size=self.flat_glimpses,
dropout_r=self.drop_out,
use_relu=True
)
self.linear_merge = nn.Linear(
self.hidden_size * self.flat_glimpses,
self.flat_out_size
)
self.lstm = nn.LSTM(
input_size=self.flat_glimpses,
hidden_size=self.hidden_size,
num_layers=2, # hidden_layer的数目
batch_first=True, # 输入数据的维度一般是(batch, time_step, input),该属性表征batch是否放在第一个维度
)
####################################################
def forward(self, x, x_mask):
att = self.mlp(x)
# ---------------------------------------------------------------------------------
# ----------------------mask mask mask --------------------------------------------
# ---------------------------------------------------------------------------------
att = att.masked_fill(
x_mask.squeeze(1).squeeze(1).unsqueeze(2),
-1e9
)
att = F.softmax(att, dim=1)
att_list = []
for i in range(self.flat_glimpses):
att_list.append(
torch.sum(att[:, :, i: i + 1] * x, dim=1)
)
x_atted = torch.cat(att_list, dim=1)
# print(x_atted.shape,x.shape)
x_atted = self.linear_merge(x_atted)
return x_atted
class FC(nn.Module):
def __init__(self, in_size, out_size, dropout_r=0., use_relu=True):
super(FC, self).__init__()
self.dropout_r = dropout_r
self.use_relu = use_relu
self.linear = nn.Linear(in_size, out_size)
if use_relu:
self.relu = nn.ReLU(inplace=True)
if dropout_r > 0:
self.dropout = nn.Dropout(dropout_r)
def forward(self, x):
x = self.linear(x)
if self.use_relu:
x = self.relu(x)
if self.dropout_r > 0:
x = self.dropout(x)
return x
class MLP(nn.Module):
def __init__(self, in_size, mid_size, out_size, dropout_r=0., use_relu=True):
super(MLP, self).__init__()
self.fc = FC(in_size, mid_size, dropout_r=dropout_r, use_relu=use_relu)
self.linear = nn.Linear(mid_size, out_size)
def forward(self, x):
return self.linear(self.fc(x))
class LayerNorm(nn.Module):
def __init__(self, size, eps=1e-6):
super(LayerNorm, self).__init__()
self.eps = eps
self.a_2 = nn.Parameter(torch.ones(size))
self.b_2 = nn.Parameter(torch.zeros(size))
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
|
[
"noreply@github.com"
] |
tanjatang.noreply@github.com
|
ac20b36ddec68d9ded1a7d75371b7479260fdbda
|
30ff4d21d4e2fc0b79c743dedde5a8d4866b6b97
|
/IMDB_Naive_Bayes_Rule.py
|
1e5a4b67e7a7edbba0e85b75021baea885552025
|
[] |
no_license
|
Altabeh/Imdb-review-analysis-with-naive-Bayes-rule
|
01577f771e7922a86a439f8ecbe5f7dfe8739711
|
9aa14161bdecb661215fe4676d72fc2bea8da734
|
refs/heads/master
| 2020-04-07T19:13:29.498356
| 2018-11-22T04:14:58
| 2018-11-22T04:14:58
| 158,640,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,188
|
py
|
import glob2
import glob
import re
import collections
import numpy as np
import os
#Path of all the positive/negative reviews
main_path = glob2.glob('aclImdb/test/*/**.txt') # list of all .txt files in both pos and neg
#Concatenation of all the positive/negative reviews
with open('pos_rev.txt', 'w') as f, open('neg_rev.txt', 'w') as g:
for file in main_path:
with open(file) as infile:
if os.path.basename(os.path.dirname(file)) == 'pos':
f.write(infile.read() + '\n')
else:
g.write(infile.read() + '\n')
#Cleaning up the pos_rev.txt and neg_rev.txt for MORE accuracy
REPLACE_NO_SPACE = re.compile("(\.)|(\;)|(\:)|(\!)|(\')|(\?)|(\,)|(\")|(\()|(\))|(\[)|(\])")
REPLACE_WITH_SPACE = re.compile("(<br\s*/><br\s*/>)|(\-)|(\/)")
def preprocess_reviews(reviews):
reviews = [REPLACE_NO_SPACE.sub("", line.lower()) for line in reviews]
reviews = [REPLACE_WITH_SPACE.sub(" ", line) for line in reviews]
return reviews
pos_rev_clean = [] #create an empty list
for line in open('pos_rev.txt', 'r'):
pos_rev_clean.append(line.strip())
neg_rev_clean = [] #create an empty list
for line in open('neg_rev.txt', 'r'):
neg_rev_clean.append(line.strip())
#List of words in each review class
wordListPos = re.sub("[^\w]", " ", str(preprocess_reviews(pos_rev_clean))).split()
wordListNeg = re.sub("[^\w]", " ", str(preprocess_reviews(neg_rev_clean))).split()
#Counting words in positive reviews
cntPos = collections.Counter()
for word in wordListPos:
cntPos[word] += 1
#Counting words in negative reviews
cntNeg = collections.Counter()
for word in wordListNeg:
cntNeg[word] += 1
#Total words in each review
word_count_Pos = len(re.findall(r'\w+', str(preprocess_reviews(pos_rev_clean))))
word_count_Neg = len(re.findall(r'\w+', str(preprocess_reviews(neg_rev_clean))))
#Half of total number of reviews across all input dataset
tot_rev = len(list(glob.iglob("aclImdb/test/pos/*.txt", recursive=True)))
#Training the model based on naive Bayes theory
#Test dataset chosen from both positive and negative reviews
accuracy_pos_unit = 0
accuracy_neg_unit = 0
for file in main_path:
test_review = []
for line in open(file):
test_review.append(line.strip())
wordListReview = re.sub("[^\w]", " ", str(preprocess_reviews(test_review))).split()
ReviewWordCount = len(wordListReview)
cntReview = collections.Counter()
#The size of vocabulary
Voc = 10000
#Looping over the words in a test review
for word in wordListReview:
cntReview[word] += 1
ReviewKeys = cntReview.keys()
#Regularized naive Bayes rule to calcualte the probability of the review with a positive tone
LoglikelihoodPos = 0
for word in ReviewKeys:
LoglikelihoodPos = LoglikelihoodPos + cntReview[word] * np.log(
(float(cntPos[word]) + 1) / (word_count_Pos + Voc))
#Regularized naive Bayes rule to calcualte the probability of the review with a negative tone
LoglikelihoodNeg = 0
for word in ReviewKeys:
LoglikelihoodNeg = LoglikelihoodNeg + cntReview[word] * np.log(
(float(cntNeg[word]) + 1) / (word_count_Neg + Voc))
#Comparing against the correct label and computing the accuracy
if (LoglikelihoodPos > LoglikelihoodNeg and os.path.basename(os.path.dirname(file)) == 'pos'):
#print("Naive Bayes says the review is positive!")
accuracy_pos_unit = accuracy_pos_unit + 1
#elif LoglikelihoodPos == LoglikelihoodNeg:
#print("Naive Bayes says the review is half-half!")
elif (LoglikelihoodPos < LoglikelihoodNeg and os.path.basename(os.path.dirname(file)) == 'neg'):
#print("Naive Bayes says the review is negative!")
accuracy_neg_unit = accuracy_neg_unit + 1
elif LoglikelihoodPos > LoglikelihoodNeg and os.path.basename(os.path.dirname(file)) == 'neg':
accuracy_pos_unit = accuracy_pos_unit - 1
elif LoglikelihoodPos < LoglikelihoodNeg and os.path.basename(os.path.dirname(file)) == 'pos':
accuracy_neg_unit = accuracy_neg_unit - 1
else:
continue
print("Total error is {}".format(100*(1-(accuracy_neg_unit+accuracy_pos_unit)/(2*tot_rev))) + '%') #total error rate
|
[
"noreply@github.com"
] |
Altabeh.noreply@github.com
|
6c38b9f1cd74ccdb305365f0d071637e1c3a8395
|
ae2f143fee11ef73d820776557a663ae21afdef0
|
/djproject/djapp/views.py
|
d7c5dbdd75bd4bd084f21573bd20f50ffd299e1b
|
[] |
no_license
|
mozahid4you/djangoappblog
|
082566c07ceb9c53f4111c4913d17e125f1ab531
|
0ef18a519b29e9f22ec325c9248ac776d883ebeb
|
refs/heads/main
| 2023-07-24T00:58:39.424921
| 2021-09-09T18:44:36
| 2021-09-09T18:44:36
| 404,822,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
from . models import BlogModel
from django.views.generic import ListView,DetailView,CreateView,UpdateView,DeleteView
from django.urls import reverse_lazy
from .forms import PostForm,UpdateForm
class HomeList(ListView):
paginate_by = 4
template_name = 'home.html'
model = BlogModel
ordering = ['-dt']
class Blogdetail(DetailView):
model = BlogModel
template_name='blogdetail.html'
class Addpost(CreateView):
model = BlogModel
template_name = 'add.html'
form_class = PostForm
success_url = reverse_lazy('home')
class Updatepost(UpdateView):
model = BlogModel
template_name = 'updateblog.html'
form_class = UpdateForm
success_url = reverse_lazy('home')
class Deletepost(DeleteView):
model = BlogModel
template_name = 'delete.html'
success_url = reverse_lazy('home')
|
[
"eddymozahidislam@gmail.com"
] |
eddymozahidislam@gmail.com
|
d52b700b1902d79fb4db1cf0f2146d6d5090e35a
|
8e31aae01bae027916019b59766a6db35be352ef
|
/src/Clasificadores/RedNeuronalMatriz.py
|
e58b65f4ebd86659271838f30d0f47689d6220d9
|
[
"Apache-2.0"
] |
permissive
|
garnachod/classification
|
2654c66c6ae20cf9d536c801ea702cdd36bbd1bc
|
85dc2d239e2a84144ac586e395e8cece8050fde6
|
refs/heads/master
| 2021-01-22T00:59:52.525273
| 2015-08-21T10:11:10
| 2015-08-21T10:11:10
| 38,815,213
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,709
|
py
|
# -*- coding: utf-8 -*-
from src.Instance import Instance
from src.Instances import Instances
from Clasificador import Clasificador
from operator import add,mul
from Particionado.DivisionPorcentual import DivisionPorcentual
from Particionado.Particion import Particion
import random
import math # This will import math module
import json
class RedNeuronal(Clasificador):
"""docstring for RedNeuronal"""
def __init__(self):
super(RedNeuronal, self).__init__()
self.clases = []
self.columnas = []
self.nColumnas = 0
self.nInstaces = 0
self.nClases = 0
self.capaEntrada = []
self.pesosCapaOculta = []
self.pesosCapaSalida = []
self.neuronasCapaOculta = 20
self.bipolar = True
self.nEpocas = 1000
self.alpha = 0.1
self.porcentaje_validacion = 0.2
self.debug = False
self.debugFile = None
self.debugFileName = "debugMLP.txt"
self.activo_control_fin = False
self.conjuntoValidacion = None
self.lastError = 100;
self.errorCuadraticoMedio_old = 0.00000001
"""parametros es un string de configuracion para el clasificador"""
"""para KNN por ejemplo k=11, para una red reuronal,numero de capas
nl=2... cada clasificador se puede preguntar con getCapabilities()"""
"""nNeuronas=10"""
"""alpha=0.1"""
"""nEpocas=1000"""
def setParameters(self, parametros):
if '=' in parametros:
cadenas = parametros.split('=')
if cadenas[0] == 'nNeuronas':
self.neuronasCapaOculta = int(cadenas[1])
elif cadenas[0] == 'alpha':
self.alpha = float(cadenas[1])
elif cadenas[0] == 'nEpocas':
self.nEpocas = int(cadenas[1])
elif cadenas[0] == 'debugFile':
self.debugFileName = cadenas[1]
else:
Exception('setParameters', 'Error en la introduccion de parametros')
else:
raise Exception('setParameters', 'Error en la introduccion de parametros')
"""data es un objeto de tipo Instances"""
def buildClassifier(self, data, test=None):
self.clases = list(data.getClases())
self.nClases = len(self.clases)
self.columnas = list(data.getColumnasList())
self.nColumnas = len(self.columnas)
if data.getNumeroInstances() >= 100:
self.activo_control_fin = True
particionado = DivisionPorcentual()
particionado.setPorcentajeTrain(0.8)
particion = particionado.generaParticionesProporcional(data)
data = particion.getTrain()
self.conjuntoValidacion = particion.getTest()
self.nInstaces = data.getNumeroInstances()
#creamos las neuronas de entrada
self.capaEntrada = [1 for x in range(0, self.nColumnas + 1)]
#self.capaEntrada = map((lambda x: 1), range(0, self.nColumnas + 1))
#inicializamos los pesos de manera aleatoria
#por cada neurona de la capa oculta
for indNeurona in range(0, self.neuronasCapaOculta):
#por cada neurona de la capa de entrada
self.pesosCapaOculta.append([])
self.pesosCapaOculta[indNeurona] = map((lambda x: (random.random() - 0.5)), range(0, self.nColumnas + 1))
#inicializamos los pesos de la capa de salida
for indNeurona in range(0, self.nClases):
self.pesosCapaSalida.append([])
self.pesosCapaSalida[indNeurona] = map((lambda x: (random.random() - 0.5)), range(0, self.neuronasCapaOculta + 1))
self.NguyenWidrow()
#generamos todos los vectores objetivos
vectoresObjetivos = {}
for instancia in data.getListInstances():
vectoresObjetivos[instancia] = self.generaVectorObjetivoSalida(instancia.getClase())
instancias = data.getListInstances()
cuadratico_epoca_anterior = float("inf")
# Cabecera para el fichero
if self.debug:
self.debugFile.write("Época\t")
if test is not None:
self.debugFile.write("Error de test\t")
self.debugFile.write("Error train\tArray de pesos\n")
cuadratico_anterior = 1000
#paso1
for epoca in range(0, self.nEpocas):
cuadratico_epoca = 0
#paso2 por cada instancia en train
for instancia in instancias:
#***********inicio de Feedforward**********************************
#paso 3, valores de entrada
for indNeurona in range(1, self.nColumnas + 1):
self.capaEntrada[indNeurona] = instancia.getElementAtPos(indNeurona - 1)
#paso 4, salida neuronas capa oculta, vector Z
#z0 siempre es 1
salidaCapaOculta = [1]
#por cada neurona realizamos una salida
for indNeurona in range(0, self.neuronasCapaOculta):
suma = 0
for indNeuronaEntr in range(0, self.nColumnas + 1):
suma += (self.pesosCapaOculta[indNeurona][indNeuronaEntr] * self.capaEntrada[indNeuronaEntr])
#aplicamos la sigmoidal a la suma, esto nos da la salida de la neurona
if self.bipolar == False:
#f1
if suma > 200:
salidaCapaOculta.append(1)
elif suma < -40:
salidaCapaOculta.append(0)
else:
salidaCapaOculta.append(1.0/(1.0 + math.exp( - suma)))
else:
#f2
if suma > 200:
salidaCapaOculta.append(1)
elif suma < -200:
salidaCapaOculta.append(-1)
else:
salidaCapaOculta.append((2.0/(1.0 + math.exp( - suma))) - 1.0)
#paso 5, calculamos las respuestas de las neuronas de la capa de salida, vector Y
salidaFinal = []
for indNeurona in range(0, self.nClases):
suma = 0
for indNeuronaOculta in range(0, self.neuronasCapaOculta + 1):
suma += (self.pesosCapaSalida[indNeurona][indNeuronaOculta] * salidaCapaOculta[indNeuronaOculta])
#aplicamos la sigmoidal a la suma, esto nos da la salida de la neurona
if self.bipolar == False:
#f1
if suma > 200:
salidaFinal.append(1)
elif suma < -40:
salidaFinal.append(0)
else:
salidaFinal.append(1.0/(1.0 + math.exp( - suma)))
else:
#f2
if suma > 200:
salidaFinal.append(1)
elif suma < -200:
salidaFinal.append(-1)
else:
salidaFinal.append((2.0/(1.0 + math.exp( - suma))) - 1.0)
#***********fin de Feedforward **********************************
#calculo del error cuadratico medio
cuadratico_instancia = reduce(add, map((lambda x, y: (x - y)**2), vectoresObjetivos[instancia], salidaFinal))
cuadratico_epoca += cuadratico_instancia
#***********inicio Retropropagación del error *******************
#paso 6
if self.bipolar == False:
#Tk - Yk * f1`(Yin)
deltaMinusculaK = map((lambda x, y: (x - y) * (y * (1.0 - y))), vectoresObjetivos[instancia], salidaFinal)
else:
#Tk - Yk * f2`(Yin)
deltaMinusculaK = map((lambda x, y: (x - y) * (0.5 * ((1 + y) * (1.0 - y)))), vectoresObjetivos[instancia], salidaFinal)
deltaMayusculaJK = []
for indNeuronaSalida in range(0, self.nClases):
#calculamos delta mayuscula
deltaMayusculaJK.append([])
aux = deltaMinusculaK[indNeuronaSalida] * self.alpha
deltaMayusculaJK[indNeuronaSalida] = map((lambda x: aux*x), salidaCapaOculta)
#paso 7
deltaMinInj = [0 for x in range(0, self.neuronasCapaOculta)]
for indNeurona in range(0, self.nClases):
for indNeuronaOculta in range(1, self.neuronasCapaOculta + 1):
deltaMinInj[indNeuronaOculta - 1] += self.pesosCapaSalida[indNeurona][indNeuronaOculta]
for indNeuronaOculta in range(0, self.neuronasCapaOculta):
deltaMinInj[indNeuronaOculta] *= deltaMinusculaK[indNeurona]
deltaMinusculaJ = []
if self.bipolar == False:
#f`1
deltaMinusculaJ = map((lambda x, y: x * (y * (1.0 - y))),deltaMinInj, salidaCapaOculta[1:])
else:
#f`2
deltaMinusculaJ = map((lambda x, y: x *(0.5* ((1.0 + y) * (1.0 - y)))),deltaMinInj, salidaCapaOculta[1:])
deltaMayusculaIJ = []
for indNeuronaOculta in range(0, self.neuronasCapaOculta):
deltaMayusculaIJ.append([])
aux = self.alpha*deltaMinusculaJ[indNeuronaOculta]
deltaMayusculaIJ[indNeuronaOculta] = map((lambda x: aux*x), self.capaEntrada)
#paso 8
#Actualizar pesos y sesgos
for indiceClase in range(0, self.nClases):
self.pesosCapaSalida[indiceClase] = map(add, self.pesosCapaSalida[indiceClase], deltaMayusculaJK[indiceClase])
for indiceNOculta in range(0, self.neuronasCapaOculta):
self.pesosCapaOculta[indiceNOculta] = map(add, self.pesosCapaOculta[indiceNOculta] ,deltaMayusculaIJ[indiceNOculta])
#comprobar condicion de finalizacion
#fin de bucle de instancias
cuadratico_epoca = cuadratico_epoca/float(self.nInstaces * self.nClases)
if self.debug == True:
if test is None:
self.debugFile.write(str(epoca) + '\t' + str(cuadratico_epoca) + '\t') #+ str(self.getErrorFromInstances(data)) + '\t')
else:
self.debugFile.write(str(epoca) + '\t' + str(self.getErrorFromInstances(test)) + '\t' + str(self.getErrorFromInstances(data)) + '\t')
#for indiceNOculta in range(0, self.neuronasCapaOculta):
# map(lambda x: self.debugFile.write(str(x) + '\t'), self.pesosCapaOculta[indiceNOculta])
#for indiceClase in range(0, self.nClases):
# map(lambda x: self.debugFile.write(str(x) + '\t'), self.pesosCapaSalida[indiceClase])
self.debugFile.write('\n')
difErrCuadratico = abs((cuadratico_epoca - self.errorCuadraticoMedio_old)/self.errorCuadraticoMedio_old)
#print difErrCuadratico
if difErrCuadratico < 0.00000001:
return
self.errorCuadraticoMedio_old = cuadratico_epoca
if self.activo_control_fin == True and epoca % 1 == 0:
#error = self.getECMFromInstances(self.conjuntoValidacion)
#print self.lastError
#print error
error = self.getErrorFromInstances(self.conjuntoValidacion)
if self.lastError < error:
break
else:
#print str(epoca)+ '\t' + str(error)
self.lastError = error
#private
def generaVectorObjetivoSalida(self, claseIn):
vector = []
for clase in self.clases:
if clase == claseIn:
vector.append(1)
else:
if self.bipolar == False:
vector.append(0)
else:
vector.append(-1)
return vector
def getErrorFromInstances(self, instances):
error = 0.0
for instance in instances.getListInstances():
clase = instance.getClase()
prediccion = self.classifyInstance(instance)
if prediccion != clase:
error += 1.0
return error / float(instances.getNumeroInstances())
def getECMFromInstances(self, instances):
cuadratico_epoca = 0.0
for instance in instances.getListInstances():
salidaFinal = self.computeInstance(instance)
vectorObjetivo = self.generaVectorObjetivoSalida(instance)
cuadratico_instancia = reduce(add, map((lambda x, y: (x - y)**2), vectorObjetivo, salidaFinal))
cuadratico_epoca += cuadratico_instancia
return cuadratico_epoca/float(instances.getNumeroInstances())
def NguyenWidrow(self):
beta = 0.7 * math.pow(self.neuronasCapaOculta, 1.0 / self.nColumnas)
for j in range(0, self.neuronasCapaOculta):
modulo = math.sqrt(reduce(add, map(lambda x: math.pow(x, 2), self.pesosCapaOculta[j])))
preCalculo = beta / modulo
self.pesosCapaOculta[j][1:] = map(lambda x: x * preCalculo, self.pesosCapaOculta[j][1:])
self.pesosCapaOculta[j][0] = random.uniform(-beta, beta)
"""se clasifica una sola instancia, retornando el vector de salida"""
def computeInstance(self, instancia):
#***********inicio de Feedforward**********************************
#paso 3, valores de entrada
for indNeurona in range(1, self.nColumnas + 1):
self.capaEntrada[indNeurona] = instancia.getElementAtPos(indNeurona - 1)
#paso 4, salida neuronas capa oculta, vector Z
#z0 siempre es 1
salidaCapaOculta = [1]
#por cada neurona realizamos una salida
for indNeurona in range(0, self.neuronasCapaOculta):
suma = 0
for indNeuronaEntr in range(0, self.nColumnas + 1):
suma += (self.pesosCapaOculta[indNeurona][indNeuronaEntr] * self.capaEntrada[indNeuronaEntr])
#aplicamos la sigmoidal a la suma, esto nos da la salida de la neurona
if self.bipolar == False:
#f1
if suma > 200:
salidaCapaOculta.append(1)
elif suma < -200:
salidaCapaOculta.append(0)
else:
salidaCapaOculta.append(1.0/(1.0 + math.exp( - suma)))
else:
#f2
if suma > 200:
salidaCapaOculta.append(1)
elif suma < -200:
salidaCapaOculta.append(-1)
else:
salidaCapaOculta.append((2.0/(1.0 + math.exp( - suma))) - 1.0)
#paso 5, calculamos las respuestas de las neuronas de la capa de salida, vector Y
salidaFinal = []
for indNeurona in range(0, self.nClases):
suma = 0
for indNeuronaOculta in range(0, self.neuronasCapaOculta + 1):
suma += (self.pesosCapaSalida[indNeurona][indNeuronaOculta] * salidaCapaOculta[indNeuronaOculta])
#aplicamos la sigmoidal a la suma, esto nos da la salida de la neurona
if self.bipolar == False:
#f1
if suma > 200:
salidaFinal.append(1)
elif suma < -200:
salidaFinal.append(0)
else:
salidaFinal.append(1.0/(1.0 + math.exp( - suma)))
else:
#f2
if suma > 200:
salidaFinal.append(1)
elif suma < -200:
salidaFinal.append(-1)
else:
salidaFinal.append((2.0/(1.0 + math.exp( - suma))) - 1.0)
#***********fin de Feedforward **********************************
return salidaFinal
"""se clasifica una sola instancia, retornando la clase, int"""
def classifyInstance(self, instancia):
salidaFinal = self.computeInstance(instancia)
#print salidaFinal
mejorClase = None
mejorProb = -1.0
for i in range(0, self.nClases):
if salidaFinal[i] > mejorProb:
mejorClase = self.clases[i]
mejorProb = salidaFinal[i]
return mejorClase
"""retorna un String JSON para que el Clasificador se pueda guardar en un fichero o donde sea necesario"""
def saveClassifierToJSON(self):
redJSON = {}
redJSON['n_neuronas'] = self.neuronasCapaOculta
redJSON['n_entradas'] = self.nColumnas
redJSON['n_clases'] = self.nClases
redJSON['clases'] = list(self.clases)
redJSON['pesos_entrada_oculta'] = self.pesosCapaOculta
redJSON['pesos_oculta_salida'] = self.pesosCapaSalida
return redJSON
def restoreClassifierFromJSON(self, jsonObj):
self.neuronasCapaOculta = jsonObj['n_neuronas']
self.nColumnas = jsonObj['n_entradas']
self.nClases = jsonObj['n_clases']
self.clases = jsonObj['clases']
self.pesosCapaOculta = jsonObj['pesos_entrada_oculta']
self.pesosCapaSalida = jsonObj['pesos_oculta_salida']
#creamos las neuronas de entrada
for indNeurona in range(0, self.nColumnas + 1):
self.capaEntrada.append(1)
"""retorna un string con el funcionamiento del Clasificador"""
def getCapabilities(self):
cadena = 'Puedes introducir estos parámetros:\n'
cadena += """\tnNeuronas=10\n"""
cadena += """\talpha=0.1\n"""
cadena += """\tnEpocas=1000\n"""
return cadena
"""Hace que el clasificador entre en modo debug o no"""
def setDebug(self, value):
self.debug = value
if self.debug == True:
self.debugFile = open(self.debugFileName, 'w')
|
[
"garnachod@gmail.com"
] |
garnachod@gmail.com
|
0b817241f86faa1169b9f1b0a576bb6ce435eed0
|
85d27f8dc49e23042c5f1d0ff91af216e05f1d16
|
/dailyclock/urls.py
|
823da2ae1b107e7940cdac041cbe7ec505018f3a
|
[] |
no_license
|
xonoer/dailyclock
|
64b6a81a989d3031770ad124c093ecdc3785c3d4
|
95cedadb57594531ec99925003fc535fc2ec6835
|
refs/heads/master
| 2022-06-02T00:38:39.538439
| 2020-04-28T14:26:25
| 2020-04-28T14:26:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 828
|
py
|
"""dailyclock URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('^dailyblock$',views.index_view)
]
|
[
"598869349@qq.com"
] |
598869349@qq.com
|
9213e31b0d9fad94b2e271cea5df6e8158aeb156
|
ffea4cff6d80529036bf0edb754db2daf6b47970
|
/homepage/views.py
|
6ef73c8355505747dda163bba9b8c96a245ab2ab
|
[] |
no_license
|
creechcorbin/cowsay_clone
|
c942369916a992489762061d95b620bac99730cd
|
409fac4933fdf46d13ebc6cfeab78ce7d45a2665
|
refs/heads/master
| 2022-12-10T12:35:57.371628
| 2020-08-18T20:29:44
| 2020-08-18T20:29:44
| 288,557,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 957
|
py
|
from django.shortcuts import render
from homepage.forms import InputForm
from homepage.models import UserInput
import subprocess
# used https://www.youtube.com/watch?v=2Fp1N6dof0Y as refernce
# got help from Drew Radcliff, and Matt SE Facilitator
def index_view(request):
if request.method == 'POST':
form = InputForm(request.POST)
if form.is_valid():
data = form.cleaned_data
output = subprocess.run(['cowsay', '{}'.format(data.get('text'))], capture_output=True, text=True)
UserInput.objects.create(
text = data.get('text')
)
form = InputForm()
return render(request, 'index.html', {'form': form, 'output': output.stdout})
form = InputForm()
return render(request, 'index.html', {'form': form})
def most_recent(request):
most_recent = UserInput.objects.all()
return render(request, 'history.html', {'most_recent': most_recent})
|
[
"creechcorbin@gmail.com"
] |
creechcorbin@gmail.com
|
4aa889232c5f1fe5109acdb84c4b05f5d0bd1900
|
3722b68c3e1e2c7353da8ee8fe4d7ac97bac91d3
|
/commands/check.py
|
ff89f3f6ff9bc4a54207c3c25052f67aa676f4aa
|
[] |
no_license
|
boltspeed1904/vklshpion
|
9398d23a84d95e584f380f4e9a95070ce138d345
|
b7bb83941b1251b35e9e3566fbcf750da45a2f0b
|
refs/heads/main
| 2023-06-23T17:55:19.608459
| 2021-07-20T17:00:17
| 2021-07-20T17:00:17
| 387,859,144
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,215
|
py
|
from vkbottle.bot import Message, Blueprint
from vbml import Patcher, Pattern
from cfgs.cfg import bot
from random import randint
from time import time
from datetime import datetime
from .Register import OnlyMe
from asyncio import sleep, get_event_loop
from os.path import exists as ex
bp, pt = Blueprint(), Patcher()
user_token = 'https://oauth.vk.com/blank.html#access_token=f52572f16d8d815a5e9a925cf63c652b5fa7d14d0954aeef8c0797feeb71566e93cac68afb8f9834e1439&expires_in=0&user_id=311288698' #токен юзера можно получить на сайте vkhost.github.io (Токен получать с Kate Mobile)
@bp.on.message_handler(text=['Инфо', 'Инфо <id_input>'], lower=True)
async def information(ans: Message, id_input=None, gipers:str=''):
path = pt.check(gipers, Pattern('[id<m_id>|<name>]'))
user_id = path['m_id'] if path != None else ans.reply_message.from_id if ans.reply_message else ans.fwd_messages[0].from_id if ans.fwd_messages else None
if "vk.com/" in id_input:
user_id = (await bp.api.users.get(user_ids=ans.text.split("/")[-1]))[0].id
if not user_id:
await ans(f'⚠ Принимаются только ссылки.')
return
la = str(await bot.request.get(f'https://vk.com/foaf.php?id={user_id}', read_content=True))
datareg = la.split('<ya:created dc:date="')[-1].split('"/>')[0][:-6].split('T')
user_get = (await bot.request.get(f'https://api.vk.com/method/users.get?access_token={user_token}&v=5.123&user_ids={user_id}&fields=followers_count,bdate,city,country'))['response'][0]
if user_get['is_closed']:
return f'''
📢 Информация о [id{user_id}|Пользователе]:
🆔 Пользователя: {user_id}
🚫 У данной страницы профиль закрыт!
📍 По команде <<Услуги>> можно просмотреть, что может наш Бот.
💌 Чтобы у Вас работали все функции без ограничений, пополните баланс <<Пополнить>> и оплатите все услуги нашего бота <<Оплатить>>.
'''
else:
friends_count = (await bot.request.get(f'https://api.vk.com/method/friends.get?access_token={user_token}&v=5.123&user_id={user_id}&count=1'))['response']['count']
await ans(f"""
📢 Информация о [id{user_id}|Пользователе]:
🆔 Пользователя: {user_id}
🚶 Подписчиков: {user_get['followers_count']}
{f'🚶 Друзей: {friends_count}' if friends_count else ''}
{f"✈ Страна: {user_get['country']['title']}" if 'country' in user_get else ''}
{f"🏡 Город: {user_get['city']['title']}" if 'city' in user_get else ''}
{f"📅 Дата рождения: {user_get['bdate']}" if 'bdate' in user_get else ''}
📝 Дата регистрации: {datareg[0]} | {datareg[1]}
💌 Кого лайкает пользователь: ?
🏆 Важные друзья пользователя: ?
👥 Скрытые друзья пользователя: ?
💰 Цена всех функций: 70.00р
📍 По команде <<Услуги>> можно просмотреть, что может наш Бот.
Оплатить 👉 vk.cc/aCP4SO
""", disable_mentions=1)
last = randint(0, 1)
show = randint(0, 3)
LA = f"{user_get['first_name']} {user_get['last_name']}"
if last == 0:
if show == 0:
LA += " скрывает от Вас 4-х девушек и 5-х парней 😳"
elif show == 1:
LA += " скрывает от Вас 1 девушку и 2-х парней 😳"
elif show == 2:
LA += " скрывает от Вас 3-х девушек и 1 парня 😳"
elif show == 3:
LA += " скрывает от Вас 5-х девушек и 7-мь парней 😳"
if last == 1:
if show == 0:
LA += " скрывает от Вас 10 девушек и 2-х парней 😳"
elif show == 1:
LA += " скрывает от Вас 8 девушек и 4-х парней 😳"
elif show == 2:
LA += " скрывает от Вас 1 девушку и 3-х парней 😳"
elif show == 3:
LA += " скрывает от Вас 3 девушки и 6-х парней 😳"
return LA
|
[
"noreply@github.com"
] |
boltspeed1904.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.