max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
core/intents/welcome.py
|
p-panagiotis/venom-virtual-assistant
| 0
|
12785451
|
from datetime import datetime
from core.modules.output_mod import output
def greet(master):
hour = datetime.now().hour
if 5 <= hour < 12:
output(f"Good morning {master}")
elif 12 <= hour < 18:
output(f"Good afternoon {master}")
else:
output(f"Good evening {master}")
| 3.484375
| 3
|
src/tf_components/bijector/Bijector.py
|
YorkUCVIL/Wavelet-Flow
| 59
|
12785452
|
<filename>src/tf_components/bijector/Bijector.py<gh_stars>10-100
import tensorflow as tf
from tf_components.Layer import *
class Bijector(Layer):
def __init__(self,collection=None,name="Layer"):
super().__init__(collection=collection,name=name)
def __call__(self,*args,**kwargs):
'''
default to call forward
'''
return self.forward(*args,**kwargs)
def forward(self):
'''
implement forward pass
'''
raise NotImplementedError
def inverse(self):
'''
implement inverse pass
'''
raise NotImplementedError
| 2.375
| 2
|
mseg_semantic/utils/normalization_utils.py
|
weblucas/mseg-semantic
| 391
|
12785453
|
<reponame>weblucas/mseg-semantic<gh_stars>100-1000
#!/usr/bin/python3
import numpy as np
import torch
from typing import Optional, Tuple
def get_imagenet_mean_std() -> Tuple[Tuple[float,float,float], Tuple[float,float,float]]:
""" See use here in Pytorch ImageNet script:
https://github.com/pytorch/examples/blob/master/imagenet/main.py#L197
Returns:
- mean: Tuple[float,float,float],
- std: Tuple[float,float,float] = None
"""
value_scale = 255
mean = [0.485, 0.456, 0.406]
mean = [item * value_scale for item in mean]
std = [0.229, 0.224, 0.225]
std = [item * value_scale for item in std]
return mean, std
def normalize_img( input: torch.Tensor,
mean: Tuple[float,float,float],
std: Optional[Tuple[float,float,float]] = None):
""" Pass in by reference Torch tensor, and normalize its values.
Args:
- input: Torch tensor of shape (3,M,N), must be in this order, and
of type float (necessary).
- mean: mean values for each RGB channel
- std: standard deviation values for each RGB channel
Returns:
- None
"""
if std is None:
for t, m in zip(input, mean):
t.sub_(m)
else:
for t, m, s in zip(input, mean, std):
t.sub_(m).div_(s)
| 2.65625
| 3
|
reroute.py
|
ItsCinnabar/Novoserve-Auto-Reroute
| 1
|
12785454
|
<reponame>ItsCinnabar/Novoserve-Auto-Reroute
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
import pyderman
#Install chrome driver
driver_path = pyderman.install(browser=pyderman.chrome,overwrite=True,verbose=False)
options=Options()
options.headless=True
with webdriver.Chrome(executable_path=driver_path, options=options) as driver:
url = "http://lg.novoserve.com/rerouteintel.php"
driver.get(url)
passed=False
for _ in range(15):
if b"The results are in!" in driver.page_source.encode("utf-8"):
passed = True
break
else:
time.sleep(60)
if not passed:
raise StopIteration("Failed to set routes")
| 2.609375
| 3
|
src/waldur_openstack/openstack/migrations/0005_ipmapping.py
|
opennode/waldur-openstack
| 1
|
12785455
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import waldur_core.core.fields
class Migration(migrations.Migration):
dependencies = [
('structure', '0035_settings_tags_and_scope'),
('openstack', '0004_dr_and_volume_backups'),
]
operations = [
migrations.CreateModel(
name='IpMapping',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('uuid', waldur_core.core.fields.UUIDField()),
('public_ip', models.GenericIPAddressField(protocol='IPv4')),
('private_ip', models.GenericIPAddressField(protocol='IPv4')),
('project', models.ForeignKey(related_name='+', to='structure.Project')),
],
options={
'abstract': False,
},
),
]
| 1.804688
| 2
|
signal_filter/filter_mpi_version_no_mpi.py
|
pycroscopy/distUSID
| 1
|
12785456
|
import h5py
from signal_filter.fft import LowPassFilter
from signal_filter.mpi_signal_filter import SignalFilter
h5_path = 'giv_raw.h5'
h5_f = h5py.File(h5_path, mode='r+')
h5_grp = h5_f['Measurement_000/Channel_000']
h5_main = h5_grp['Raw_Data']
samp_rate = h5_grp.attrs['IO_samp_rate_[Hz]']
num_spectral_pts = h5_main.shape[1]
frequency_filters = [LowPassFilter(num_spectral_pts, samp_rate, 10E+3)]
noise_tol = 1E-6
sig_filt = SignalFilter(h5_main, frequency_filters=frequency_filters,
noise_threshold=noise_tol, write_filtered=True,
write_condensed=False, num_pix=1, verbose=True)
h5_filt_grp = sig_filt.compute()
# VERIFICATION here:
row_ind = 20
actual_line = h5_filt_grp['Filtered_Data'][row_ind]
h5_ref_path = '/home/syz/giv/pzt_nanocap_6_just_translation_filt_resh_copy.h5'
h5_ref_file = h5py.File(h5_ref_path, mode='r')
h5_ref_grp = h5_ref_file[h5_filt_grp.name]
ref_line = h5_ref_grp['Filtered_Data'][row_ind]
import numpy as np
print('Actual line close to reference:')
print(np.max(np.abs(actual_line - ref_line)))
print(np.allclose(actual_line, ref_line))
"""
single_AO = h5_grp['Spectroscopic_Values'][0, :500]
import numpy as np
row_ind = 20
# read data for a specific scan line
raw_line_resp = h5_main[row_ind]
# break this up into pixels:
raw_line_mat = np.reshape(raw_line_resp, (-1, single_AO.size))
filt_line_resp = h5_filt_grp['Filtered_Data'][row_ind]
filt_line_mat = np.reshape(filt_line_resp, (-1, single_AO.size))
import pyUSID as usid
fig, axes = usid.plot_utils.plot_curves(single_AO, [raw_line_mat, filt_line_mat], use_rainbow_plots=False, x_label='Bias (V)',
y_label='Current (nA)', subtitle_prefix='Pixel', title=None, num_plots=9)
fig.savefig('result.png', format='png', )
savefig(os.path.join(other_figures_folder, file_name + '.png'), format='png', dpi=300)
"""
h5_f.close()
| 2.375
| 2
|
work/spiders/year_2018/month_9/date_13/test2.py
|
yorunw/runscrapider
| 0
|
12785457
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from scrapy_splash import SplashRequest
from work.items import ShopItem
import re
class Test2Spider(scrapy.Spider):
name = 'test2'
allowed_domains = ['www.countryattire.com']
start_urls = ['https://www.countryattire.com/']
custom_settings = {
'MYSQL_TABLE': 'test',
'ITEM_PIPELINES': {}
}
def parse(self, response):
nav = response.xpath('//ul[@id="pronav"]/li')[2:8]
nav.pop(3)
nav_level_1_list = nav
for nav_level_1 in nav_level_1_list:
cat1 = nav_level_1.xpath('./a/span/text()').get().strip()
nav_level_2_list = nav_level_1.xpath('.//div[@id="menu"]/div')[1:]
for nav_level_2 in nav_level_2_list:
c2 = nav_level_2.xpath('./div/div/span/text()').get()
if c2 is None:
c2 = nav_level_2.xpath('./div/div/span/a/text()').get()
if c2 is None:
c2 = nav_level_2.xpath('./div/div/span/span/text()').get()
cat2 = c2.strip()
nav_level_3_list = nav_level_2.xpath('./div/span')
if not nav_level_3_list:
nav_level_2_url = nav_level_2.xpath('./a/@href').get()
self.logger.info(f'{cat1}---{cat2}')
meta = {'cat1': cat1, 'cat2': cat2}
yield SplashRequest(response.urljoin(nav_level_2_url), self.parse_product_url, meta=meta)
for nav_level_3 in nav_level_3_list:
cat3 = nav_level_3.xpath('./a/text()').get().strip()
nav_level_3_url = nav_level_3.xpath('./a/@href').get()
self.logger.info(f'{cat1}---{cat2}---{cat3}')
meta = {'cat1': cat1, 'cat2': cat2, 'cat3': cat3}
yield SplashRequest(response.urljoin(nav_level_3_url), self.parse_product_url, meta=meta)
def parse_product_url(self, response):
product_list = response.xpath('//div[@class="products-grid"]/div')
for product in product_list:
product_url = product.xpath('./a/@href').get()
self.logger.info('product url is %s' % product_url)
# yield SplashRequest(response.urljoin(product_url), self.parse_product_info, meta=response.meta)
next_page = response.xpath('//a[@class="next i-next"]/@href').get()
# self.logger.info('next page is %s' % next_page)
if next_page is not None:
yield SplashRequest(response.urljoin(next_page), self.parse_product_url, meta=response.meta)
def parse_product_info(self, response):
item = ShopItem()
item['PageUrl'] = response.url
item['cat1'] = response.meta['cat1']
item['cat2'] = response.meta['cat2']
item['cat3'] = response.meta['cat3'] or ''
item['brand'] = response.xpath('').get().strip()
item['gender'] = item['cat1']
item['producttype'] = item['cat2']
item['title'] = response.xpath('').get()
item['price'] = response.xpath('').get()
item['short_content'] = ''
item['content'] = response.xpath('').get()
pictures = response.xpath('').getall()
picture = response.xpath('').getall()
item['pictures'] = pictures or picture
item['color'] = ''
item['size'] = response.xpath('').getall()
yield item
| 2.59375
| 3
|
semester/migrations/0001_initial.py
|
aashutoshrathi/Student-Lifecycle-Management
| 9
|
12785458
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-10-08 18:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=30, null=True)),
('code', models.CharField(blank=True, max_length=6, null=True)),
('days_given', models.IntegerField(blank=True, null=True)),
('ref_books', models.TextField(blank=True, max_length=500, null=True)),
('is_elective', models.BooleanField(default=False)),
('credits', models.IntegerField(blank=True, default=0, null=True)),
('classification', models.CharField(choices=[(b'Theory', b'Theory'), (b'Lab', b'LAB')], default=b'th', max_length=6, null=True)),
],
options={
'verbose_name': 'Course',
'verbose_name_plural': 'Courses',
},
),
migrations.CreateModel(
name='Semester',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, choices=[(b'I', b'Semester I'), (b'II', b'Semester II'), (b'III', b'Semester III'), (b'IV', b'Semester IV'), (b'V', b'Semester V'), (b'VI', b'Semester VI'), (b'VII', b'Semester VII'), (b'VIII', b'Semester VIII')], default=b'Semester I', max_length=15, null=True)),
('spi', models.CharField(blank=True, max_length=10, null=True)),
],
options={
'ordering': ('name',),
'verbose_name': 'Semester',
'verbose_name_plural': 'Semesters',
},
),
migrations.AddField(
model_name='course',
name='semester',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='semester.Semester'),
),
]
| 1.648438
| 2
|
end_to_end/glove_single.py
|
Bharathgc/Coreference-resolution
| 1
|
12785459
|
#import tf_glove
import os
import sys
import pickle
import subprocess
import re
import math
#pickle_path=sys.argv[1] #path to corpus.pkl file
pairs_path=sys.argv[1] #path to pairs folder
output_path=sys.argv[2] #path where output folder
'''
with open(pickle_path) as f:
corpus = pickle.load(f)
'''
#path=sys.argv[2]
#data = sys.argv[3]
'''
model = tf_glove.GloVeModel(embedding_size=30, context_size=1)
model.fit_to_corpus(corpus)
model.train(num_epochs=100)
'''
def glove_vector(c1,c2):
vector1=[0]*30
c1=re.sub(r'[^\w]', ' ', c1)
c2=re.sub(r'[^\w]', ' ', c2)
for si in c1:
vector1+=model.embedding_for(si)
vector2=[0]*30
for si in c2:
vector2+=model.embedding_for(si)
return vector1+vector2
def longestSubstringFinder(string1, string2):
answer = ""
len1, len2 = len(string1), len(string2)
for i in range(len1):
match = ""
for j in range(len2):
if (i + j < len1 and string1[i + j] == string2[j]):
match += string2[j]
else:
if (len(match) > len(answer)): answer = match
match = ""
return answer
def doeslastmatch(string1,string2): #Checks if last word matches
s1=re.sub(r'[^\w]', ' ', string1) #Removing symbols from string
s2=re.sub(r'[^\w]', ' ', string2)
s1=string1.split(" ")
s2=string1.split(" ")
if s1[-1]==s2[-1]:
return 1
else:
return 0
def doesfirstmatch(string1,string2): #Checks if first word matches
s1=re.sub(r'[^\w]', ' ', string1) #Removing symbols from string
s2=re.sub(r'[^\w]', ' ', string2)
s1=string1.split(" ")
s2=string1.split(" ")
if s1[-1]==s2[-1]:
return 1
else:
return 0
ctypedict={'coref pronoun': 0, 'coref person':1,'coref treatment':2,'coref problem':3,'coref test':4,'null':5}
def num_words_concept(string1):
s1=re.sub(r'[^\w]', ' ', string1)
words=s1.split(" ")
return len(words)
for i in os.listdir(pairs_path):
print i
f=open(pairs_path+"/"+i,'r')
if os.stat(pairs_path+"/"+i).st_size == 0:
continue
fp=open(output_path+'/feature'+i,'w+')
for line in f:
feature_vector=[]
pipe=line.split("|")
if pipe[-2]=="5":
continue
c1=pipe[0]
c2=pipe[2]
ctype=pipe[4]
c1line=pipe[1].split(" ")
c2line=pipe[3].split(" ")
c1ls=c1line[0].split(":")
c2ls=c2line[0].split(":")
c1le=c1line[1].split(":")
c2le=c2line[1].split(":")
#feature_vector=feature_vector+glove_vector(c1,c2)
common=longestSubstringFinder(c1,c2)
len_common=len(common) #Length of longest common substring of each concept
max_mention_length=max(len(c1),len(c2)) #Which of the concept mention is longer in length
part_common=max_mention_length-len_common
feature_vector.append(float(ctype))
feature_vector.append(len_common)
feature_vector.append(part_common)
feature_vector.append(doesfirstmatch(c1,c2))
feature_vector.append(doeslastmatch(c1,c2))
feature_vector.append(len(c1))
feature_vector.append(len(c2))
feature_vector.append(num_words_concept(c1))
feature_vector.append(num_words_concept(c2))
feature_vector.append(float(c1ls[0]))
feature_vector.append(float(c1ls[1]))
feature_vector.append(float(c1le[0]))
feature_vector.append(float(c1le[1]))
feature_vector.append(float(c2ls[0]))
feature_vector.append(float(c2ls[1]))
feature_vector.append(float(c2le[0]))
feature_vector.append(float(c2le[1]))
feature_vector.append(abs(float(c1ls[0])-float(c2ls[0])))
words_common=re.sub(r'[^\w]', ' ', common)
feature_vector.append(len(words_common))
feature_vector.append(num_words_concept(words_common))
lf=(float((num_words_concept(words_common)+1))/float((num_words_concept(c1)+num_words_concept(c2))))
feature_vector.append(float(num_words_concept(c1))/(float(num_words_concept(c1)+num_words_concept(c2))))
#feature_vector.append(math.log(lf))
if(c1==c2):
feature_vector.append(1)
else:
feature_vector.append(0)
feature_vector.append(lf)
feature_vector.append(c1)
feature_vector.append(c1line[0])
feature_vector.append(c1line[1])
feature_vector.append(c2)
feature_vector.append(c2line[0])
feature_vector.append(c2line[1])
#feature_vector.append(pipe[-1])
for feature in feature_vector:
fp.write("%s|" % feature)
fp.write('\n')
fp.close()
f.close()
'''
embed=[]
embedstr fp.write("%s," % feature)ing=[]
file = open(path, 'r')
vector=[0]*50
for line in file:
line=line.strip("\n")
s=line.split(" ")
count=0
for si in s:
if(si=='\n'):
continue
try:
vector+=model.embedding_for(si)
count+=1
except Exception as e:
print e
continue
if(count==0):
continue
#vector=vector/count
embed.append(vector)
embedstring.append(line)
#phrases.append(line)
#for p in phrases:
gfile=open(data+"path.glove","w")
for j in range(0,len(embed)):
for i in embed[j]:
gfile.write("%s,"%i)
gfile.write(embedstring[j]+"\n")
#gfile.close()
#print(embed[0])
'''
| 2.46875
| 2
|
Photoelectric.py
|
EroSkulled/PHY224-324
| 0
|
12785460
|
<reponame>EroSkulled/PHY224-324
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import pandas as pd
def f(x, a, b):
return a * x + b
def error(ydata):
v_error = np.empty(len(ydata))
for i in range(len(ydata)):
v_error[i] = max(ydata[i] * 0.0010, 0.01)
return v_error
data = pd.read_csv(r"C:\Users\Walter\IdeaProjects\PHY224\Photoelectric\EXP1.csv", delimiter=',', skiprows=0)
plt.title('LED Color stopping Voltage vs. wave length')
plt.ylabel('Stop Voltage (V)')
plt.xlabel('Wave Length (nm)')
xdata1 = data["wavelength "]
ydata1 = data["V stop"]
plt.errorbar(xdata1, ydata1, label='Test 1', yerr=error(ydata1), linestyle='None', marker=".")
data = pd.read_csv(r"C:\Users\Walter\IdeaProjects\PHY224\Photoelectric\EXP2.csv", delimiter=',', skiprows=0)
xdata2 = data["wavelength"]
ydata2 = data["Vstop"]
plt.errorbar(xdata2, ydata2, label='Test 2', yerr=error(ydata2), linestyle='None', marker=".")
data = pd.read_csv(r"C:\Users\Walter\IdeaProjects\PHY224\Photoelectric\EXP3.csv", delimiter=',', skiprows=0)
xdata3 = data["wavelength"]
ydata3 = data["Vstop"]
plt.errorbar(xdata3, ydata3, label='Test 3', yerr=error(ydata3), linestyle='None', marker=".")
xdata = (xdata1 + xdata2 + xdata3) / 3
ydata = (ydata1 + ydata2 + ydata3) / 3
plt.plot(xdata, ydata, label='Average')
plt.legend()
plt.show()
# Computing frequency from wavelength
frequency = (3 * 10 ** 8) / xdata
# Estimated errors from equipment
v_error = np.empty(len(ydata))
for i in range(len(ydata)):
v_error[i] = max(ydata[i] * 0.0010, 0.01)
# Linear regression
p_opt, p_cov = curve_fit(f, frequency, ydata, (0, 0), v_error, True)
lin_output = f(frequency, p_opt[0], p_opt[1])
# Outputting Planck's Constant h
h = p_opt[0] * (1.6 * 10 ** (-19))
h_error = p_cov[0, 0] * (1.6 * 10 ** (-19))
print('Estimated Plancks Constant: ', h, '(J*s) +/-', h_error, '(J**s)')
# Outputting the Work Function
wf = -p_opt[1] * (1.6 * 10 ** (-19))
wf_error = p_cov[1, 1] * (1.6 * 10 ** (-19))
print('Estimated Work Function: ', wf, '(J) +/-', wf_error, '(J)')
# Outputting the cut-off frequency
f0 = -(1.6 * 10 ** (-19)) * p_opt[1] / h
f0_error = p_cov[1, 1] * (1.6 * 10 ** (-19)) / h
print('Estimated Cut-off Frequency: ', f0, '(Hz) +/-', f0_error, '(Hz)')
# Calculating chi squared
chi_sq = (1 / 2) * (np.sum(((ydata - lin_output) / v_error) ** 2))
print('Chi squared for linear regression: ', chi_sq)
plt.title('Curve Fit vs. Original Data')
plt.ylabel('Stop Voltage (V)')
plt.xlabel('Wave Length (nm)')
plt.errorbar(xdata, ydata, label='Average', yerr=v_error, linestyle='None', marker=".")
plt.plot(xdata, lin_output, label='Curve fit ')
plt.legend()
plt.show()
| 2.5
| 2
|
jsonmsgpack.py
|
JuncoJet/python-performance-tuning
| 3
|
12785461
|
import timeit,json
import msgpack
def test1():
global dic,data1
data1=json.dumps(dic)
return data1
def test2():
global dic,data2
data2=msgpack.packb(dic)
return data2
def test3():
global data1
return json.loads(data1)
def test4():
global data2
return msgpack.unpackb(data2)
times=1000*1000
data1,data2=None,None
dic={'id':1,'data':{'title':'h1','content':'abcdefg'*100,'list':[1,2,'3'*33,'4']}}
t1=timeit.Timer("test1()","from __main__ import test1")
print '1',t1.timeit(times)
t2=timeit.Timer("test2()","from __main__ import test2")
print '2',t2.timeit(times)
t3=timeit.Timer("test3()","from __main__ import test3")
print '3',t3.timeit(times)
t4=timeit.Timer("test4()","from __main__ import test4")
print '4',t4.timeit(times)
| 2.484375
| 2
|
ProgsByDataset/ArxivMAG/convert_mallet_to_lda.py
|
ashwath92/MastersThesis
| 5
|
12785462
|
<reponame>ashwath92/MastersThesis<filename>ProgsByDataset/ArxivMAG/convert_mallet_to_lda.py
import gensim
ldamallet = LdaMallet.load('/home/ashwath/Programs/ArxivCS/LDA/ldamallet_arxiv.model')
lda = gensim.models.wrappers.ldamallet.malletmodel2ldamodel(ldamallet, gamma_threshold=0.001, iterations=50)
lda.save('lda_arxiv.model')
| 1.453125
| 1
|
salda_engine/personal/models.py
|
landges/salda
| 0
|
12785463
|
from django.db import models
import datetime
from django.contrib.auth.models import User
# Create your models here.
class Review(models.Model):
user = models.ForeignKey(User,blank=True,null=True,on_delete=models.CASCADE)
mark = models.IntegerField()
text = models.TextField()
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.user.username + str(self.mark)
| 2.453125
| 2
|
algorithms/dynamic_programming/longest_consecutive_subsequence.py
|
ruler30cm/python-ds
| 1,723
|
12785464
|
<filename>algorithms/dynamic_programming/longest_consecutive_subsequence.py
"""
Given an array of integers, find the length of the longest sub-sequence
such that elements in the subsequence are consecutive integers, the
consecutive numbers can be in any order.
The idea is to store all the elements in a set first. Then as we are iterating
over the array, we check two things -
1. a number x can be a starting number in a sequence if x-1 is not present in the
set. If this is the case, create a loop and check how many elements from x to x+j are
in the set
2. if x -1 is there in the set, do nothing as this number is not a starting element
and must have been considered in a different sequence
"""
def find_seq(arr, n):
s = set()
for num in arr:
s.add(num)
ans = 0
elements = []
for i in range(n):
temp = []
if arr[i] - 1 not in s:
j = arr[i]
while j in s:
temp.append(j)
j += 1
if j - arr[i] > ans:
ans = j - arr[i]
elements = temp.copy()
return ans, elements
arr = [36, 41, 56, 35, 44, 33, 34, 92, 43, 32, 42]
ans, elements = find_seq(arr, len(arr))
print('Length - ', ans)
print('Elements - ', elements)
| 4.125
| 4
|
primrose/templates/run_primrose.py
|
astro313/primrose
| 38
|
12785465
|
"""
Run a job: i.e. run a configuration file through the DAGRunner
"""
import argparse
import logging
import warnings
######################################
######################################
# Important:
#
# If your configuration uses custom node classes, be sure to set environment variable
# PRIMROSE_EXT_NODE_PACKAGE to the location of your package before running primrose.
# Example:
# ```
# export PRIMROSE_EXT_NODE_PACKAGE=src/mypackage
# python run_primrose.py --config_loc my_config.json
# ```
#
######################################
######################################
from primrose.configuration.configuration import Configuration
from primrose.dag_runner import DagRunner
from primrose.dag.config_layer_traverser import ConfigLayerTraverser
from primrose.dag.depth_first_traverser import DepthFirstTraverser
warnings.filterwarnings("ignore")
def parse_arguments():
"""
Parse command line arguments
Returns:
argument objects with flags as attributes
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--config_loc", help="Location of the configuration file", required=True
)
parser.add_argument(
"--is_dry_run",
help="do a dry run of the DAG which will validatre config and log which nodes would be run",
default=False,
type=lambda x: (str(x).lower() == "true"),
)
known_args, pipeline_args = parser.parse_known_args()
return known_args, pipeline_args
def main():
"""
Run a job: i.e. run a configuration file through the DAGRunner
"""
args, _ = parse_arguments()
logging.basicConfig(
format="%(asctime)s %(levelname)s %(filename)s %(funcName)s: %(message)s",
level=logging.INFO,
)
configuration = Configuration(config_location=args.config_loc)
DagRunner(configuration).run(dry_run=args.is_dry_run)
if __name__ == "__main__":
main()
| 2.671875
| 3
|
fndiff/fndiffer.py
|
ofi/fndiff
| 0
|
12785466
|
"""
fndiff.py -- Main class and function
See LICENSE for copyright details.
"""
import re
from .fnselection import FilenamesSelection, FilenamesSelectionError
class FilenamesDiffError(Exception):
def __init__(self, errString, args):
self.__errstring = errString
self.__args = str(args)
def __str__(self):
argstr = ""
if self.__args:
argstr = argstr.join(self.__args)
return "{}, arguments: {}".format(self.__errstring, argstr)
class FilenamesDiffer:
"""Main class of operation."""
def __init__(self, **kwargs):
try:
srcdir = kwargs['srcdir']
dstdir = kwargs['dstdir']
pattern = kwargs['pattern']
reflag = kwargs['reflag']
except KeyError as err:
raise FilenamesDiffError(
"Argument to constructor is missing (was: {}).".format(err),
kwargs)
try:
self.__src_selection = FilenamesSelection(srcdir, pattern, reflag)
self.__dst_selection = FilenamesSelection(dstdir, pattern, reflag)
except FilenamesSelectionError as err:
raise FilenamesDiffError(
"Error creating file selection (was: {}).".format(err),
kwargs)
if str(self.__src_selection) == str(self.__dst_selection):
raise FilenamesDiffError(
"Sorry, this version can't operate in one single direcory"
+ " with only one pattern.",
kwargs)
def not_in_target(self):
"""Return a list of Path objects, representing all files from source
directory, which are not represented in the destination directory
according to their pattern matches."""
result = []
try:
src_dict = self.__src_selection.eval()
dst_dict = self.__dst_selection.eval()
result = [ src_dict[x] for x in src_dict.keys()
if not x in dst_dict ]
except Exception as err:
raise FilenamesDiffError(
"Invalid comparison: {}.".format(err),
[self.__src_selection, self.__dst_selection])
return result
def filenames_diff(sdir, tdir, pat, rexflag):
""" Top level function to produce list of resulting file pathes as strings.
"""
result = []
differ = FilenamesDiffer(srcdir=sdir,
dstdir=tdir, pattern=pat, reflag=rexflag)
result = differ.not_in_target()
return result
| 2.8125
| 3
|
continuum/scenarios/base.py
|
lebrice/continuum
| 0
|
12785467
|
<filename>continuum/scenarios/base.py
import abc
from typing import Callable, List, Tuple, Union
import numpy as np
from torchvision import transforms
from continuum.datasets import _ContinuumDataset
from continuum.task_set import TaskSet
class _BaseCLLoader(abc.ABC):
"""Abstract loader.
DO NOT INSTANTIATE THIS CLASS.
:param cl_dataset: A Continuum dataset.
:param nb_tasks: The number of tasks to do.
:param train_transformations: The PyTorch transformations exclusive to the
train set.
:param common_transformations: The PyTorch transformations common to the
train set and the test set.
:param train: Boolean flag whether to use the train or test subset.
"""
def __init__(
self,
cl_dataset: _ContinuumDataset,
nb_tasks: int,
train_transformations: List[Callable] = None,
common_transformations: List[Callable] = None,
train=True
) -> None:
self.cl_dataset = cl_dataset
self._nb_tasks = nb_tasks
if train_transformations is None:
train_transformations = []
if common_transformations is None:
common_transformations = self.cl_dataset.transformations
if len(common_transformations) == 0:
self.train_trsf, self.test_trsf = None, None
else:
self.train_trsf = transforms.Compose(train_transformations + common_transformations)
self.test_trsf = transforms.Compose(common_transformations)
self.train = train
@abc.abstractmethod
def _setup(self, nb_tasks: int) -> int:
raise NotImplementedError
@property
def nb_classes(self) -> int:
"""Total number of classes in the whole continual setting."""
return len(np.unique(self.dataset[1])) # type: ignore
@property
def nb_tasks(self) -> int:
"""Number of tasks in the whole continual setting."""
return len(self)
def __len__(self) -> int:
"""Returns the number of tasks.
:return: Number of tasks.
"""
return self._nb_tasks
def __iter__(self):
"""Used for iterating through all tasks with the CLLoader in a for loop."""
self._counter = 0
return self
def __next__(self) -> TaskSet:
"""An iteration/task in the for loop."""
if self._counter >= len(self):
raise StopIteration
task = self[self._counter]
self._counter += 1
return task
def __getitem__(self, task_index: Union[int, slice]):
"""Returns a task by its unique index.
:param task_index: The unique index of a task. As for List, you can use
indexing between [0, len], negative indexing, or
even slices.
:return: A train PyTorch's Datasets.
"""
data = self._select_data_by_task(task_index)
return TaskSet(
*data,
self.train_trsf if self.train else self.test_trsf,
data_type=self.cl_dataset.data_type
)
def _select_data_by_task(self, task_index: Union[int, slice]):
"""Selects a subset of the whole data for a given task.
:param task_index: The unique index of a task. As for List, you can use
indexing between [0, len], negative indexing, or
even slices.
:return: A tuple of numpy array, the first item being the data and the
second the associated targets.
"""
x, y, t = self.dataset # type: ignore
if isinstance(task_index, slice):
start = task_index.start or 0
stop = task_index.stop or len(self) + 1
step = task_index.step or 1
task_indexes = list(range(start, stop, step))
task_indexes = [
t if t >= 0 else _handle_negative_indexes(t, len(self)) for t in task_indexes
]
indexes = np.where(np.isin(t, task_indexes))[0]
else:
if task_index < 0:
task_index = _handle_negative_indexes(task_index, len(self))
indexes = np.where(t == task_index)[0]
selected_x = x[indexes]
selected_y = y[indexes]
selected_t = t[indexes]
if self.cl_dataset.need_class_remapping: # TODO: to remove with TransformIncremental
# A remapping of the class ids is done to handle some special cases
# like PermutedMNIST or RotatedMNIST.
selected_y = self.cl_dataset.class_remapping(selected_y)
return selected_x, selected_y, selected_t
def _handle_negative_indexes(index: int, total_len: int) -> int:
while index < 0:
index += total_len
return index
| 2.453125
| 2
|
test/mqtt_test.py
|
spbrogan/rvc2mqtt
| 6
|
12785468
|
"""
Unit tests for the mqtt support class
Copyright 2022 <NAME>
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import context # add rvc2mqtt package to the python path using local reference
from rvc2mqtt.mqtt import *
## can't figure out how to unit test this..probably need to mock...but given this class is tightly coupled with
## paho mqtt not sure how useful....anyway..below is hack to test it with real mqtt server
class device1(object):
def __init__(self, name: str, mqtt_support:MQTT_Support):
self.device_topic = mqtt_support.make_device_topic_root(name)
self.status_topic = mqtt_support.make_device_topic_string(name, None, True)
self.set_topic = mqtt_support.make_device_topic_string(name, None, False)
mqtt_support.client.publish(self.status_topic, "unknown", retain=True)
mqtt_support.register(self, self.set_topic, self.got_message)
self.mqtt = mqtt_support
def got_message(self, topic, payload):
print(f"hello from device1 {topic} --- {payload.decode('utf-8')}")
self.mqtt.client.publish(self.status_topic, payload, retain=True)
if __name__ == '__main__':
#unittest.main()
mqs = MqttInitalize(Test_MQTT_Support.MQTT_BRIDGE_SETTINGS)
mqs.client.loop_start()
d1 = device1("try1", mqs)
mqs.client.publish(d1.set_topic, "here", retain=False)
import time
time.sleep(5)
mqs.shutdown()
| 3.078125
| 3
|
cmstack/codegen/tabla/gendot.py
|
he-actlab/cdstack
| 1
|
12785469
|
<reponame>he-actlab/cdstack<filename>cmstack/codegen/tabla/gendot.py
from collections import deque
header = 'digraph G {' + '\n'
footer = '}' + '\n'
def gendot(dfg, cycle2id):
#dfg = copy.copy(importedDFG)
strList = []
strList.append(header)
bfs(dfg, strList)
# append rank here
rank = genrank(cycle2id)
strList.append(rank)
strList.append(footer)
dotCode = ''.join(strList)
return dotCode
def bfs(dfg, strList):
initnodes = dfg.get_nodes_in_cycle(0)
queue = deque(initnodes)
visitedList = set([])
idDict = {}
for node in initnodes:
idDict[node] = genlabel(node)
idDict['Sink'] = '"sink"'
while len(queue) > 0:
currNode = queue.popleft()
# Connecting currNode with children
left = idDict[currNode]
for child in currNode.children:
if child not in visitedList and child != 'Sink':
queue.append(child)
visitedList.add(child)
# Child node doesn't have operation label
if child not in idDict:
idDict[child] = genlabel(child)
right = idDict[child]
# flow is a line
flow = str.format('{} -> {};\n', left, right)
strList.append(flow)
visitedList.add(currNode)
def genlabel(node):
if node.pe is not None:
label = '{"' + str(node.id) + '" [label="' + node.op + ' ' + str(node.pe.id) +'"]' + '}'
else:
label = '{"' + str(node.id) + '" [label="' + node.op +'"]' + '}'
return label
def genrank(cycle2id):
rankCode = ''
rankSink = '{rank = sink; "sink";};\n'
# cycle2id is a dictionary of cycle to node id list
for cycle in cycle2id:
rankTempl = '{rank = same; '
idList = cycle2id[cycle]
sameRankIds = ''
for id in idList:
sameRankIds += '"' + str(id) + '"' + '; '
rankTempl += sameRankIds + '};\n'
rankCode += rankTempl
rankCode += rankSink
return rankCode
| 2.421875
| 2
|
draw canvas and various shapes.py
|
gptshubham595/MachineLearn
| 0
|
12785470
|
<reponame>gptshubham595/MachineLearn
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 14 23:45:25 2018
@author: gptshubham595
"""
import cv2
import numpy as np
def main():
#draing canvas size ,BGR,unsigned int
img=np.zeros((512,512,3),np.uint8)
#circle X,Y Radius BGR filled
cv2.circle(img,(100,100),(100),(0,255,0),-1)
#draw line X1,Y1 to X2,Y2 and in BGR Thickness
cv2.line(img,(0,99),(99,0),(0,0,255),4)
#x1,y1 and x4,y4
cv2.rectangle(img,(40,60),(200,200),(10,150,100),1)
#0Rotation 360angle of ellipse portion
cv2.ellipse(img,(250,250),(50,20),0,0,360,(127,127,127),-1)
#making an array for coordinates
points=np.array([[80,2],[125,30],[40,62],[53,12],[64,52]],np.int32)
points=points.reshape((-1,1,2))
cv2.polylines(img,[points],True,(0,255,255))
text='Testing Values'
#where to show ,text ,center,font,size,BGR
cv2.putText(img,text,(200,200),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0))
cv2.imshow('Pic',img)
if cv2.waitKey(1) ==27:
cv2.destroyWindow('Pic')
if __name__=="__main__":
main()
| 3.15625
| 3
|
pep.lib/proc/procSCISAT.py
|
alpha-zou/TAMP
| 1
|
12785471
|
#!/usr/bin/env python
import os, sys, subprocess
from os.path import basename,dirname
import h5py
from netCDF4 import Dataset
import numpy as np
import numpy.ma as ma
import gdal
from gdalconst import *
from osgeo import ogr, osr
from datetime import datetime, date
def createImgSCISAT(fileAbsPath):
# read info from netcdf
ncfile = Dataset(fileAbsPath, 'r')
latitude = ncfile.groups['ACE-FTS-v2.2'].latitude
longitude = ncfile.groups['ACE-FTS-v2.2'].longitude
datestart = datetime.strptime(ncfile.groups['ACE-FTS-v2.2'].start_time,'%Y-%m-%d %H:%M:%S+00')
dateend = datetime.strptime(ncfile.groups['ACE-FTS-v2.2'].end_time,'%Y-%m-%d %H:%M:%S+00')
ozone = ncfile.groups['ACE-FTS-v2.2'].groups['Data-L2_1km_grid'].variables['O3'][:]
heightLevels = ncfile.groups['ACE-FTS-v2.2'].groups['Data-L2_1km_grid'].variables['z'][:]
numBand = len(ozone)
ncfile.close()
#common vars
no_value = -9999
minValue = ma.min(ozone)
maxValue = ma.max(ozone)
ma.set_fill_value(ozone, no_value)
ozone = ozone.filled()
#ma.set_fill_value(heightLevels, no_value)
#heightLevels = heightLevels.filled()
sizeX = 1
sizeY = 1
dataType = gdal.GDT_Float32
resolution = 1.0 # in degree
driver = gdal.GetDriverByName('GTiff' )
outFile = 'ACE-FTS_L2_ozone_'+datestart.strftime('%Y%m%d.%H%M%S')+'.tif'
#create tiff
dst_ds = driver.Create(outFile, sizeX, sizeY, numBand, dataType)
for i in range(numBand):
dst_ds.GetRasterBand(i+1).WriteArray(np.expand_dims(np.expand_dims(ozone[i],axis=0),axis=0))
# The computed stat produces this warning
# Warning 1: Lost metadata writing to GeoTIFF ... too large to fit in tag.
# An additional *.aux.xml is added
#if ozone[i] != no_value:
# dst_ds.GetRasterBand(i+1).ComputeStatistics(False)
dst_ds.GetRasterBand(i+1).SetNoDataValue(no_value)
#set geotrasform matrix
top_left_x = longitude - (resolution / 2)
w_e_pixel_resolution = resolution
top_left_y = latitude - (resolution / 2)
n_s_pixel_resolution = - resolution
coord = [top_left_x, w_e_pixel_resolution, 0, top_left_y,0, n_s_pixel_resolution]
dst_ds.SetGeoTransform(coord)
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
dst_ds.SetProjection(srs.ExportToWkt())
#set metadata
dst_ds.SetMetadataItem('GLOBAL_MAX',str(maxValue))
dst_ds.SetMetadataItem('GLOBAL_MIN',str(minValue))
dst_ds.SetMetadataItem('TIME_END', dateend.strftime('%Y-%m-%dT%H:%M:%SZ'))
dst_ds.SetMetadataItem('TIME_START', datestart.strftime('%Y-%m-%dT%H:%M:%SZ'))
dst_ds.SetMetadataItem('VERTICAL_LEVELS_NUMBER', str(len(heightLevels)))
dst_ds.SetMetadataItem('VERTICAL_LEVELS', ','.join(str(x) for x in heightLevels))
dst_ds =None
return [outFile]
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit('\nUsage: %s L2_SCISAT_file \n' % sys.argv[0] )
else:
if not os.path.exists(sys.argv[1]):
sys.exit('\nERROR: File %s was not found!\n' % sys.argv[1])
fileAbsPath = sys.argv[1]
outFileName = createImgSCISAT(fileAbsPath)
exit(0)
# else:
# Module is imported from another module
| 2.15625
| 2
|
envs/classic_controls/CartPole/agent.py
|
Robotics-Engineering-Lab/bonsai-connectors
| 8
|
12785472
|
import logging
from os import write
import requests
from typing import Any, Dict
from cartpole import CartPole
from tensorboardX import SummaryWriter
class BonsaiAgent(object):
""" The agent that gets the action from the trained brain exported as docker image and started locally
"""
def act(self, state) -> Dict[str, Any]:
action = self.predict(state)
#simulator expects action to be integer
action["command"] = int(action["command"])
return action
def predict(self, state):
#local endpoint when running trained brain locally in docker container
url = "http://localhost:5000/v1/prediction"
response = requests.get(url, json=state)
action = response.json()
return action
class RandomAgent(object):
"""The world's simplest agent!"""
def __init__(self, cartpole: CartPole):
self.cartpole = cartpole
def act(self, state):
return cartpole.gym_to_action(cartpole._env.action_space.sample())
if __name__ == '__main__':
logging.basicConfig()
log = logging.getLogger("cartpole")
log.setLevel(level='INFO')
writer = SummaryWriter()
# we will use our environment (wrapper of OpenAI env)
cartpole = CartPole()
# specify which agent you want to use,
# BonsaiAgent that uses trained Brain or
# RandomAgent that randomly selects next action
agent = BonsaiAgent()
episode_count = 100
try:
for i in range(episode_count):
#start a new episode and get the new state
cartpole.episode_start()
state = cartpole.get_state()
cum_reward = 0
while True:
#get the action from the agent (based on the current state)
action = agent.act(state)
#do the next step of the simulation and get the new state
cartpole.episode_step(action)
state = cartpole.get_state()
#get the last reward and add it the episode reward
reward = cartpole.get_last_reward()
cum_reward += reward
if cartpole.halted():
writer.add_scalar("reward", cum_reward, i )
break
writer.flush()
cartpole.episode_finish("")
writer.close()
except KeyboardInterrupt:
print("Stopped")
| 2.765625
| 3
|
tracardi/service/wf/domain/error_debug_info.py
|
bytepl/tracardi
| 153
|
12785473
|
<filename>tracardi/service/wf/domain/error_debug_info.py
from pydantic import BaseModel
class ErrorDebugInfo(BaseModel):
msg: str
line: int
file: str
| 1.757813
| 2
|
pypy/module/struct/ieee.py
|
camillobruni/pygirl
| 12
|
12785474
|
"""
Packing and unpacking of floats in the IEEE 32-bit and 64-bit formats.
"""
import math
from pypy.rlib.rarithmetic import r_longlong, isinf, isnan, INFINITY, NAN
def pack_float(result, number, size, bigendian):
"""Append to 'result' the 'size' characters of the 32-bit or 64-bit
IEEE representation of the number.
"""
if size == 4:
bias = 127
exp = 8
prec = 23
else:
bias = 1023
exp = 11
prec = 52
if isnan(number):
sign = 0x80
man, e = 1.5, bias + 1
else:
if number < 0:
sign = 0x80
number *= -1
elif number == 0.0:
for i in range(size):
result.append('\x00')
return
else:
sign = 0x00
if isinf(number):
man, e = 1.0, bias + 1
else:
man, e = math.frexp(number)
if 0.5 <= man and man < 1.0:
man *= 2
e -= 1
man -= 1
e += bias
power_of_two = r_longlong(1) << prec
mantissa = r_longlong(power_of_two * man + 0.5)
if mantissa >> prec :
mantissa = 0
e += 1
for i in range(size-2):
result.append(chr(mantissa & 0xff))
mantissa >>= 8
x = (mantissa & ((1<<(15-exp))-1)) | ((e & ((1<<(exp-7))-1))<<(15-exp))
result.append(chr(x))
x = sign | e >> (exp - 7)
result.append(chr(x))
if bigendian:
first = len(result) - size
last = len(result) - 1
for i in range(size // 2):
(result[first + i], result[last - i]) = (
result[last - i], result[first + i])
def unpack_float(input, bigendian):
"""Interpret the 'input' string into a 32-bit or 64-bit
IEEE representation a the number.
"""
size = len(input)
bytes = []
if bigendian:
reverse_mask = size - 1
else:
reverse_mask = 0
nonzero = False
for i in range(size):
x = ord(input[i ^ reverse_mask])
bytes.append(x)
nonzero |= x
if not nonzero:
return 0.0
if size == 4:
bias = 127
exp = 8
prec = 23
else:
bias = 1023
exp = 11
prec = 52
mantissa_scale_factor = 0.5 ** prec # this is constant-folded if it's
# right after the 'if'
mantissa = r_longlong(bytes[size-2] & ((1<<(15-exp))-1))
for i in range(size-3, -1, -1):
mantissa = mantissa << 8 | bytes[i]
mantissa = 1 + mantissa * mantissa_scale_factor
mantissa *= 0.5
e = (bytes[-1] & 0x7f) << (exp - 7)
e += (bytes[size-2] >> (15 - exp)) & ((1<<(exp - 7)) -1)
e -= bias
e += 1
sign = bytes[-1] & 0x80
if e == bias + 2:
if mantissa == 0.5:
number = INFINITY
else:
return NAN
else:
number = math.ldexp(mantissa,e)
if sign : number = -number
return number
| 3.34375
| 3
|
cifar10/selfsup/transforms.py
|
phymhan/essl
| 0
|
12785475
|
import torchvision.transforms as T
def aug_transform(crop, base_transform, cfg, extra_t=[]):
""" augmentation transform generated from config """
return T.Compose(
[
T.RandomApply(
[T.ColorJitter(cfg.cj0, cfg.cj1, cfg.cj2, cfg.cj3)], p=cfg.cj_p
),
T.RandomGrayscale(p=cfg.gs_p),
T.RandomResizedCrop(
crop,
scale=(cfg.crop_s0, cfg.crop_s1),
ratio=(cfg.crop_r0, cfg.crop_r1),
interpolation=3,
),
T.RandomHorizontalFlip(p=cfg.hf_p),
*extra_t,
base_transform(),
]
)
class MultiSample:
""" generates n samples with augmentation """
def __init__(self, transform, n=2):
self.transform = transform
self.num = n
def __call__(self, x):
return tuple(self.transform(x) for _ in range(self.num))
| 2.5625
| 3
|
config/__init__.py
|
MCC-WH/Token
| 30
|
12785476
|
import argparse
from torch import cuda
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--directory', metavar='EXPORT_DIR', help='destination where trained network should be saved')
parser.add_argument('--training-dataset', default='GLDv2', help='training dataset: (default: GLDv2)')
parser.add_argument('--imsize', default=1024, type=int, metavar='N', help='maximum size of longer image side used for training (default: 1024)')
parser.add_argument('--num-workers', default=8, type=int, metavar='N', help='number of data loading workers (default: 8)')
parser.add_argument('--device', type=str, default='cuda' if cuda.is_available() else 'cpu')
parser.add_argument('--num-epochs', default=100, type=int, metavar='N', help='number of total epochs to run (default: 100)')
parser.add_argument('--batch-size', '-b', default=5, type=int, metavar='N', help='number of (q,p,n1,...,nN) tuples in a mini-batch (default: 5)')
parser.add_argument('--update-every', '-u', default=1, type=int, metavar='N', help='update model weights every N batches, used to handle really large batches, ' + 'batch_size effectively becomes update_every x batch_size (default: 1)')
parser.add_argument('--resume', default=None, type=str, metavar='FILENAME', help='name of the latest checkpoint (default: None)')
parser.add_argument('--warmup-epochs', type=int, default=0, help='learning rate will be linearly scaled during warm up period')
parser.add_argument('--val-epoch', type=int, default=1)
parser.add_argument('--warmup-lr', type=float, default=0, help='Initial warmup learning rate')
parser.add_argument('--base-lr', type=float, default=1e-6)
parser.add_argument('--final-lr', type=float, default=0)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight-decay', type=float, default=1e-6)
parser.add_argument('--rank', type=int, default=None)
parser.add_argument('--world_size', type=int, default=None)
parser.add_argument('--gpu', type=int, default=None)
parser.add_argument('--dist_backend', type=str, default='nccl')
parser.add_argument('--dist_url', type=str, default='tcp://127.0.0.1:29324')
parser.add_argument('--distributed', action='store_true')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--clip_max_norm', type=float, default=0)
args = parser.parse_args()
return args
| 2.484375
| 2
|
app.py
|
frostblooded/led_music_alarm_server
| 0
|
12785477
|
import os
from flask import Flask
from flask import request
from flask import render_template
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'GET':
return render_template('index.html')
elif request.method == 'POST':
data = request.form
text = '''[spotify]
username = {}
password = {}
client_id = {}
client_secret = {}
private_session = true
[mpd]
enabled = true
'''.format(data['username'],
data['password'],
data['client_id'],
data['client_secret'])
text_file = open("/etc/mopidy/mopidy.conf", "w")
text_file.write(text)
text_file.close()
print("Service restart:", os.system('sudo systemctl restart mopidy.service'))
return render_template('index.html', flash="Credentials set!")
@app.route('/alarm', methods=['POST'])
def alarm():
cron_text = '{} {} * * * mpc clear && mpc add {} && mpc play'.format(
request.form['minutes'],
request.form['hours'],
request.form['spotify_uri']
)
remove_cron_command = 'sudo crontab -r'
cron_set_command = '(sudo crontab -l ; echo "{}") | sort - | uniq - | sudo crontab -'.format(cron_text)
print("Removing old crontabs:", os.system(remove_cron_command))
print("Setting crontab:", os.system(cron_set_command))
return render_template('index.html', flash="Alarm set!")
| 2.34375
| 2
|
SampleAIs/Sample_Daddies/__init__.py
|
YSabarad/monopyly
| 4
|
12785478
|
<filename>SampleAIs/Sample_Daddies/__init__.py
from .generous_daddy import GenerousDaddyAI
from .mean_daddy import MeanDaddyAI
| 1.125
| 1
|
homura/__init__.py
|
Xiangyu-Han/homura
| 1
|
12785479
|
<gh_stars>1-10
from .register import Registry
from .utils import TensorDataClass, TensorTuple, distributed_print, enable_accimage, get_args, get_environ, \
get_git_hash, get_global_rank, get_local_rank, get_num_nodes, get_world_size, if_is_master, init_distributed, \
is_accimage_available, is_distributed, is_distributed_available, is_faiss_available, is_master, set_deterministic, \
set_seed
Registry.import_modules('homura.vision')
# to avoid circular import
from . import reporters, trainers, optim, lr_scheduler
| 1.257813
| 1
|
1 Scripto/01.py
|
peterszerzo/rhino-pythonscript-tutorials
| 2
|
12785480
|
"""
Rhino Python Script Tutorial
Exercise 01
Draw point at origin.
Important note: Python is very sensitive to indentation.
Notice how the rs.AddPoint statement is indented inwards.
This means that it is part of the Main method.
All this will be clear in due time.
"""
import rhinoscriptsyntax as rs
import math
def Main():
rs.AddPoint([0,0,0])
Main()
| 3.03125
| 3
|
examples/simulation_utils.py
|
ZhaozhiQIAN/torchdiffeq
| 0
|
12785481
|
import time
import random
import numpy as np
import pandas as pds
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchdiffeq import odeint_adjoint as odeint
from config import D_TYPE
def get_data():
Arg = namedtuple('Arg', ['method', 'data_size', 'batch_time', 'batch_size',
'niters', 'test_freq', 'viz', 'gpu', 'adjoint'])
args = Arg('dopri5', 1000, 20, 1, 2000, 50, False, 1, True)
true_y0 = torch.tensor([[0.0, 5.0]])
t = torch.linspace(0, 5, args.data_size + 1)
class Lambda(nn.Module):
def __init__(self, w0, f0, w1, dr):
super(Lambda, self).__init__()
self.w0 = torch.tensor(w0)
self.f0 = torch.tensor(f0)
self.w1 = torch.tensor(w1)
self.dr = torch.tensor(dr)
def force(self, t):
return self.f0 * torch.sin(self.w1 * t)
def forward(self, t, y):
dy0_dt = y[:, 1]
dy1_dt = -2. * self.w0 * y[:, 1] * self.dr - self.w0 ** 2 * y[:, 0] + self.force(t)
return torch.cat((dy0_dt.reshape(-1, 1), dy1_dt.reshape(-1, 1)), axis=1)
# This numerical solution given the true DE.
with torch.no_grad():
lam = Lambda(5., 5., 3., 0.01)
true_y = odeint(lam, true_y0, t, method='dopri5')
dat_dict = dict()
for s in range(args.data_size - args.batch_time):
batch_t = t[s:s+args.batch_time]
batch_y = torch.stack([true_y[s + i] for i in range(args.batch_time)], dim=0)
dim = batch_y.shape[-1]
x_reshaped = batch_y.reshape(args.batch_time, 1, 1, dim)
dat_dict[str(s)] = dict(t=batch_t, x=x_reshaped)
return dat_dict
| 2.5
| 2
|
Ex_40.py
|
soldierloko/Curso-em-Video
| 0
|
12785482
|
<reponame>soldierloko/Curso-em-Video<filename>Ex_40.py
#Crie um programa que leia 2 notas de um aluno e calcule sua média, mostrando uma mensagem no final, de acordo com a média atingida:
# <5: REPROVADO
#>5 E <7: RECUPERAÇÃO
#>6,59: APROVADO
n1 = float(input('Digite a primeira nota do aluno: '))
n2 = float(input('Digite a segunda nota do aluno: '))
vMedia = (n1+n2)/2
if vMedia < 5:
print('REPROVADO')
elif vMedia >= 5 and vMedia<=6.59:
print('RECUPERAÇÃO')
else:
print('APROVADO')
| 3.65625
| 4
|
gestao_rh/urls.py
|
jesielcarlos/gestao_rh
| 0
|
12785483
|
<reponame>jesielcarlos/gestao_rh
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('',include('apps.core.urls')),
path('funcionarios/',include('apps.funcionarios.urls')),
path('departamentos/',include('apps.departamentos.urls')),
path('empresa/',include('apps.empresa.urls')),
path('documento/',include('apps.documentos.urls')),
path('horas-extras/',include('apps.registro_hora_extra.urls')),
path('admin/', admin.site.urls),
path('accounts/',include('django.contrib.auth.urls')),
] + static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| 1.742188
| 2
|
src/deprecated/rnn.py
|
yulinliu101/DeepTP
| 46
|
12785484
|
<reponame>yulinliu101/DeepTP<filename>src/deprecated/rnn.py
# Note: All calls to tf.name_scope or tf.summary.* support TensorBoard visualization.
import os
import tensorflow as tf
from configparser import ConfigParser
# from keras.layers import CuDNNLSTM
# from models.RNN.utils import variable_on_gpu
def variable_on_cpu(name, shape, initializer):
"""
Next we concern ourselves with graph creation.
However, before we do so we must introduce a utility function ``variable_on_gpu()``
used to create a variable in CPU memory.
"""
# Use the /cpu:0 device for scoped operations
with tf.device('/cpu:0'):
# Create or get apropos variable
var = tf.get_variable(name=name, shape=shape, initializer=initializer)
return var
def variable_on_gpu(name, shape, initializer):
"""
Next we concern ourselves with graph creation.
However, before we do so we must introduce a utility function ``variable_on_gpu()``
used to create a variable in CPU memory.
"""
# Use the /cpu:0 device for scoped operations
with tf.device('/device:GPU:0'):
# Create or get apropos variable
var = tf.get_variable(name=name, shape=shape, initializer=initializer)
return var
def LSTM(conf_path, batch_x, seq_length, n_input, initial_state = None, train = True):
# batch_x has the shape of [batch, time, n_dim]
parser = ConfigParser(os.environ)
parser.read(conf_path)
n_mixture = parser.getint('lstm', 'n_mixture')
n_layers = parser.getint('lstm', 'n_lstm_layers')
relu_clip = parser.getint('lstm', 'relu_clip')
if train:
dropout = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
else:
dropout = [0., 0., 0., 0., 0., 0., 0.]
b1_stddev = 0.01
h1_stddev = 0.01
b2_stddev = 0.01
h2_stddev = 0.01
b3_stddev = 0.01
h3_stddev = 0.01
b4_stddev = 0.01
h4_stddev = 0.01
b5_stddev = 0.01
h5_stddev = 0.01
b_out_stddev = 0.01
h_out_stddev = 0.01
n_hidden_1 = parser.getint('lstm', 'n_hidden_1')
n_hidden_2 = parser.getint('lstm', 'n_hidden_2')
n_cell_dim = parser.getint('lstm', 'n_cell_dim')
n_hidden_3 = int(eval(parser.get('lstm', 'n_hidden_3')))
n_hidden_4 = parser.getint('lstm', 'n_hidden_4')
n_prob_param = parser.getint('lstm', 'n_prob_param')
n_out = n_prob_param * n_mixture + 1
# Input shape: [batch_size, n_steps, n_input]
# # n_input is the # of (original) features per frame: default to be 26
batch_x_shape = tf.shape(batch_x)
# # Reshaping `batch_x` to a tensor with shape `[n_steps*batch_size, n_input + 2*n_input*n_context]`.
# # This is done to prepare the batch for input into the first layer which expects a tensor of rank `2`.
# # Permute n_steps and batch_size
# batch_x = tf.transpose(batch_x, [1, 0, 2])
# Reshape to prepare input for first layer
# batch_x = tf.reshape(batch_x, [-1, n_input]) # (n_steps*batch_size, n_input)
batch_x = tf.reshape(batch_x, [-1, n_input]) # (batch_size*time, n_input)
# clipped RELU activation and dropout.
# 1st layer
with tf.name_scope('embedding'):
b1 = variable_on_gpu('b1', [n_hidden_1], tf.random_normal_initializer(stddev=b1_stddev))
h1 = variable_on_gpu('h1', [n_input, n_hidden_1],
tf.random_normal_initializer(stddev=h1_stddev))
layer_emb = tf.nn.elu(tf.nn.xw_plus_b(batch_x, h1, b1))
# layer_emb = tf.minimum(tf.nn.relu(tf.add(tf.matmul(batch_x, h1), b1)), relu_clip)
layer_emb = tf.nn.dropout(layer_emb, (1.0 - dropout[0]))
# with tf.device('/cpu:0'):
tf.summary.histogram("weights", h1)
tf.summary.histogram("biases", b1)
tf.summary.histogram("activations", layer_emb)
with tf.name_scope('fc2'):
b2 = variable_on_gpu('b2', [n_hidden_2], tf.random_normal_initializer(stddev=b2_stddev))
h2 = variable_on_gpu('h2', [n_hidden_1, n_hidden_2],
tf.random_normal_initializer(stddev=h2_stddev))
layer_fc2 = tf.nn.elu(tf.nn.xw_plus_b(layer_emb, h2, b2))
layer_fc2 = tf.nn.dropout(layer_fc2, (1.0 - dropout[1]))
# with tf.device('/cpu:0'):
tf.summary.histogram("weights", h2)
tf.summary.histogram("biases", b2)
tf.summary.histogram("activations", layer_fc2)
# Create the forward and backward LSTM units. Inputs have length `n_cell_dim`.
# LSTM forget gate bias initialized at `1.0` (default), meaning less forgetting
# at the beginning of training (remembers more previous info)
# Input shape: [batch_size, n_steps, n_input]
# batch_x_shape = tf.shape(batch_x)
# # # Only work on cudnn 7.0.4
# with tf.name_scope('multilayer_lstm_cudnn'):
# layer_fc2 = tf.transpose(tf.reshape(layer_fc2, [batch_x_shape[0], -1, n_hidden_2]), (1,0,2))
# model = tf.contrib.cudnn_rnn.CudnnLSTM(num_layers = n_layers,
# num_units = n_cell_dim,
# input_mode = 'linear_input',
# direction = 'unidirectional',
# dropout = dropout[2],
# kernel_initializer = tf.random_normal_initializer(stddev=0.1),
# bias_initializer = tf.random_normal_initializer(stddev=0.1))
# outputs, final_state = model(inputs=layer_fc2)
# outputs = tf.reshape(tf.transpose(outputs, (1,0,2)), [-1, n_cell_dim]) # [batch*time, n_cell_dim]
# tf.summary.histogram("activations", outputs)
with tf.name_scope('multilayer_lstm'):
# as the LSTM expects its input to be of shape `[batch_size, time, input_size]`.
layer_fc2 = tf.reshape(layer_fc2, [batch_x_shape[0], -1, n_hidden_2])
# `layer_fc2` is now reshaped into `[n_steps, batch_size, n_hidden_2]`,
cells = []
for _ in range(n_layers):
cell = tf.nn.rnn_cell.BasicLSTMCell(n_cell_dim, state_is_tuple = True)
cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob = 1 - dropout[2])
cells.append(cell)
stack = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True)
# Get layer activations (second output is the final state of the layer, do not need)
outputs, final_state = tf.nn.dynamic_rnn(cell = stack,
inputs = layer_fc2,
initial_state=initial_state,
sequence_length = seq_length,
dtype = tf.float32,
time_major=False
)
# outputs has the shape of [batch_size, time, n_cell_dim]
# states has the shape of [batch_size, cell.state_size]
# Reshape to apply the same weights over the timesteps
outputs = tf.reshape(outputs, [-1, n_cell_dim]) # [batch*time, n_cell_dim]
# with tf.device('/cpu:0'):
tf.summary.histogram("activations", outputs)
with tf.name_scope('fc3'):
# Now we feed `outputs` to the fifth hidden layer with clipped RELU activation and dropout
b3 = variable_on_gpu('b3', [n_hidden_3], tf.random_normal_initializer(stddev=b3_stddev))
h3 = variable_on_gpu('h3', [n_cell_dim, n_hidden_3], tf.random_normal_initializer(stddev=h3_stddev))
layer_fc3 = tf.nn.elu(tf.nn.xw_plus_b(outputs, h3, b3))
layer_fc3 = tf.nn.dropout(layer_fc3, (1.0 - dropout[4]))
# with tf.device('/cpu:0'):
tf.summary.histogram("weights", h3)
tf.summary.histogram("biases", b3)
tf.summary.histogram("activations", layer_fc3)
with tf.name_scope('fc4'):
# Now we feed `outputs` to the fifth hidden layer with clipped RELU activation and dropout
b4 = variable_on_gpu('b4', [n_hidden_4], tf.random_normal_initializer(stddev=b4_stddev))
h4 = variable_on_gpu('h4', [n_hidden_3, n_hidden_4], tf.random_normal_initializer(stddev=h4_stddev))
layer_fc4 = tf.nn.elu(tf.nn.xw_plus_b(layer_fc3, h4, b4))
layer_fc4 = tf.nn.dropout(layer_fc4, (1.0 - dropout[5]))
# with tf.device('/cpu:0'):
tf.summary.histogram("weights", h4)
tf.summary.histogram("biases", b4)
tf.summary.histogram("activations", layer_fc4)
####################################
########## LOGIT LAYER ###########
####################################
with tf.name_scope('output_layer'):
b_out = variable_on_gpu('b_out', [n_out], tf.random_normal_initializer(stddev=b_out_stddev))
h_out = variable_on_gpu('h_out', [n_hidden_4, n_out], tf.random_normal_initializer(stddev=h_out_stddev))
layer_out = tf.nn.xw_plus_b(layer_fc4, h_out, b_out) # shape of [batch*time, n_out]
tf.summary.histogram("weights", h_out)
tf.summary.histogram("biases", b_out)
tf.summary.histogram("activations", layer_out)
# # shape of [batch_size*time, 1 + n_mixture * n_prob_param]
# n_prob_param = 1 (pi) + 4 (mu) + 10 (cov)
with tf.name_scope('mixture_coef'):
end_layer, \
pi_layer, \
mu_layer, \
L_layer = get_mixture_coef(layer_out, n_mixture)
with tf.name_scope('mixture_distribution'):
MVN_pdf = tf.contrib.distributions.MultivariateNormalTriL(loc = mu_layer, scale_tril = L_layer)
summary_op = tf.summary.merge_all()
return end_layer, pi_layer, mu_layer, L_layer, MVN_pdf, final_state, summary_op
# return end_layer, pi_layer, mu_lat_layer, mu_lon_layer, mu_alt_layer, mu_st_layer, sigma_lat_layer, sigma_lon_layer, sigma_alt_layer, sigma_st_layer, rho_latlon_layer, rho_lonalt_layer, rho_altlat_layer, rho_stlat_layer, rho_stlon_layer, rho_stalt_layer, final_state, summary_op
def get_mixture_coef(layer_out, n_mixture):
split_shape = [1, n_mixture, n_mixture * 4, n_mixture * 10]
end_layer, \
pi_layer, \
mu_layer, \
L_layer = tf.split(value = layer_out,
num_or_size_splits = split_shape,
axis = 1)
end_layer = tf.nn.sigmoid(end_layer)
pi_layer = tf.nn.softmax(pi_layer)
mu_layer = tf.reshape(mu_layer, (-1, n_mixture, 4)) # [batch*time, 4, n_mixture]
L_layer = tf.reshape(L_layer, (-1, n_mixture, 10))
L_layer = tf.contrib.distributions.fill_triangular(L_layer)
L_layer = tf.contrib.distributions.matrix_diag_transform(L_layer, transform=tf.nn.softplus)
return end_layer, pi_layer, mu_layer, L_layer
# split_shape = [1]
# split_shape.extend([n_mixture]*(n_prob_param//3))
# split_shape.append(n_mixture * n_prob_param*2//3)
# end_layer, \
# pi_layer, \
# mu_lat_layer, \
# mu_lon_layer, \
# mu_alt_layer, \
# mu_st_layer, \
# L_layer = tf.split(value = layer_out,
# num_or_size_splits = split_shape,
# axis = 1)
# mu_flat = tf.concat([mu_lat_layer, mu_lon_layer, mu_alt_layer, mu_st_layer], axis = 1) # [batch*time, n_mixture * 4]
# mu_layer = tf.transpose(tf.reshape(mu_flat, (-1, 4, n_mixture)), perm = [0,2,1]) # [batch*time, 4, n_mixture]
# sigma_alt_layer = tf.exp(sigma_alt_layer)
# sigma_lat_layer = tf.exp(sigma_lat_layer)
# sigma_lon_layer = tf.exp(sigma_lon_layer)
# sigma_st_layer = tf.exp(sigma_st_layer)
# rho_latlon_layer = tf.nn.tanh(rho_latlon_layer)
# rho_lonalt_layer = tf.nn.tanh(rho_lonalt_layer)
# rho_altlat_layer = tf.nn.tanh(rho_altlat_layer)
# rho_stlat_layer = tf.nn.tanh(rho_stlat_layer)
# rho_stlon_layer = tf.nn.tanh(rho_stlon_layer)
# rho_stalt_layer = tf.nn.tanh(rho_stalt_layer)
# b_end_stddev = 0.01
# h_end_stddev = 0.01
# b_pi_stddev = 0.01
# h_pi_stddev = 0.01
# b_mu_lat_stddev = 0.01
# h_mu_lat_stddev = 0.01
# b_mu_lon_stddev = 0.01
# h_mu_lon_stddev = 0.01
# b_mu_alt_stddev = 0.01
# h_mu_alt_stddev = 0.01
# b_sigma_lat_stddev = 0.01
# h_sigma_lat_stddev = 0.01
# b_sigma_lon_stddev = 0.01
# h_sigma_lon_stddev = 0.01
# b_sigma_alt_stddev = 0.01
# h_sigma_alt_stddev = 0.01
# b_rho_lat_stddev = 0.01
# h_rho_lat_stddev = 0.01
# b_rho_lon_stddev = 0.01
# h_rho_lon_stddev = 0.01
# b_rho_alt_stddev = 0.01
# h_rho_alt_stddev = 0.01
# with tf.name_scope('fc_end'):
# # Now we feed `outputs` to the fifth hidden layer with clipped RELU activation and dropout
# b_end = variable_on_gpu('b_end', [1], tf.random_normal_initializer(stddev=b_end_stddev))
# h_end = variable_on_gpu('h_end', [n_hidden_4, 1], tf.random_normal_initializer(stddev=h_end_stddev))
# end_layer = tf.nn.sigmoid(tf.add(tf.matmul(layer_fc4, h_end), b_end))
# with tf.device('/cpu:0'):
# tf.summary.histogram("weights", h_end)
# tf.summary.histogram("biases", b_end)
# tf.summary.histogram("activations", end_layer)
# end_layer = tf.reshape(end_layer, [-1, batch_x_shape[0], n_mixture])
# end_layer = tf.transpose(end_layer, [1, 0, 2])
# # shape of [batch_size, time, n_dim]
# with tf.name_scope('fc_pi'):
# # Now we feed `outputs` to the fifth hidden layer with clipped RELU activation and dropout
# b_pi = variable_on_gpu('b_pi', [n_mixture], tf.random_normal_initializer(stddev=b_pi_stddev))
# h_pi = variable_on_gpu('h_pi', [n_hidden_4, n_mixture], tf.random_normal_initializer(stddev=h_pi_stddev))
# pi_layer = tf.nn.softmax(tf.add(tf.matmul(layer_fc4, h_pi), b_pi))
# with tf.device('/cpu:0'):
# tf.summary.histogram("weights", h_pi)
# tf.summary.histogram("biases", b_pi)
# tf.summary.histogram("activations", pi_layer)
# pi_layer = tf.reshape(pi_layer, [-1, batch_x_shape[0], n_mixture])
# pi_layer = tf.transpose(pi_layer, [1, 0, 2])
# # shape of [batch_size, time, n_dim]
# ###############################################################
# with tf.name_scope('fc_mu_lat'):
# # Now we feed `outputs` to the fifth hidden layer with clipped RELU activation and dropout
# b_mu_lat = variable_on_gpu('b_mu_lat', [n_mixture], tf.random_normal_initializer(stddev=b_mu_lat_stddev))
# h_mu_lat = variable_on_gpu('h_lat', [n_hidden_4, n_mixture], tf.random_normal_initializer(stddev=h_mu_lat_stddev))
# mu_lat_layer = (tf.add(tf.matmul(layer_fc4, h_mu_lat), b_mu_lat))
# with tf.device('/cpu:0'):
# tf.summary.histogram("weights", h_mu_lat)
# tf.summary.histogram("biases", b_mu_lat)
# tf.summary.histogram("activations", mu_lat_layer)
# mu_lat_layer = tf.reshape(mu_lat_layer, [-1, batch_x_shape[0], n_mixture])
# mu_lat_layer = tf.transpose(mu_lat_layer, [1, 0, 2])
# # shape of [batch_size, time, n_dim]
# with tf.name_scope('fc_mu_lon'):
# # Now we feed `outputs` to the fifth hidden layer with clipped RELU activation and dropout
# b_mu_lon = variable_on_gpu('b_mu_lon', [n_mixture], tf.random_normal_initializer(stddev=b_mu_lon_stddev))
# h_mu_lon = variable_on_gpu('h_lon', [n_hidden_4, n_mixture], tf.random_normal_initializer(stddev=h_mu_lon_stddev))
# mu_lon_layer = (tf.add(tf.matmul(layer_fc4, h_mu_lon), b_mu_lon))
# with tf.device('/cpu:0'):
# tf.summary.histogram("weights", h_mu_lon)
# tf.summary.histogram("biases", b_mu_lon)
# tf.summary.histogram("activations", mu_lon_layer)
# mu_lon_layer = tf.reshape(mu_lon_layer, [-1, batch_x_shape[0], n_mixture])
# mu_lon_layer = tf.transpose(mu_lon_layer, [1, 0, 2])
# # shape of [batch_size, time, n_dim]
# with tf.name_scope('fc_mu_alt'):
# # Now we feed `outputs` to the fifth hidden layer with clipped RELU activation and dropout
# b_mu_alt = variable_on_gpu('b_mu_alt', [n_mixture], tf.random_normal_initializer(stddev=b_mu_alt_stddev))
# h_mu_alt = variable_on_gpu('h_alt', [n_hidden_4, n_mixture], tf.random_normal_initializer(stddev=h_mu_alt_stddev))
# mu_alt_layer = (tf.add(tf.matmul(layer_fc4, h_mu_alt), b_mu_alt))
# with tf.device('/cpu:0'):
# tf.summary.histogram("weights", h_mu_alt)
# tf.summary.histogram("biases", b_mu_alt)
# tf.summary.histogram("activations", mu_alt_layer)
# mu_alt_layer = tf.reshape(mu_alt_layer, [-1, batch_x_shape[0], n_mixture])
# mu_alt_layer = tf.transpose(mu_alt_layer, [1, 0, 2])
# # shape of [batch_size, time, n_dim]
# with tf.name_scope('fc_mu_st'):
# # Now we feed `outputs` to the fifth hidden layer with clipped RELU activation and dropout
# b_mu_st = variable_on_gpu('b_mu_st', [n_mixture], tf.random_normal_initializer(stddev=b_mu_st_stddev))
# h_mu_st = variable_on_gpu('h_st', [n_hidden_4, n_mixture], tf.random_normal_initializer(stddev=h_mu_st_stddev))
# mu_st_layer = (tf.add(tf.matmul(layer_fc4, h_mu_st), b_mu_st))
# with tf.device('/cpu:0'):
# tf.summary.histogram("weights", h_mu_st)
# tf.summary.histogram("biases", b_mu_st)
# tf.summary.histogram("activations", mu_st_layer)
# mu_st_layer = tf.reshape(mu_st_layer, [-1, batch_x_shape[0], n_mixture])
# mu_st_layer = tf.transpose(mu_st_layer, [1, 0, 2])
# # shape of [batch_size, time, n_dim]
# ###############################################################
# with tf.name_scope('fc_sigma_lat'):
# # Now we feed `outputs` to the fifth hidden layer with clipped RELU activation and dropout
# b_sigma_lat = variable_on_gpu('b_sigma_lat', [n_mixture], tf.random_normal_initializer(stddev=b_sigma_lat_stddev))
# h_sigma_lat = variable_on_gpu('h_lat', [n_hidden_4, n_mixture], tf.random_normal_initializer(stddev=h_sigma_lat_stddev))
# sigma_lat_layer = tf.nn.softplus(tf.add(tf.matmul(layer_fc4, h_sigma_lat), b_sigma_lat))
# with tf.device('/cpu:0'):
# tf.summary.histogram("weights", h_sigma_lat)
# tf.summary.histogram("biases", b_sigma_lat)
# tf.summary.histogram("activations", sigma_lat_layer)
# sigma_lat_layer = tf.reshape(sigma_lat_layer, [-1, batch_x_shape[0], n_mixture])
# sigma_lat_layer = tf.transpose(sigma_lat_layer, [1, 0, 2])
# # shape of [batch_size, time, n_dim]
# with tf.name_scope('fc_sigma_lon'):
# # Now we feed `outputs` to the fifth hidden layer with clipped RELU activation and dropout
# b_sigma_lon = variable_on_gpu('b_sigma_lon', [n_mixture], tf.random_normal_initializer(stddev=b_sigma_lon_stddev))
# h_sigma_lon = variable_on_gpu('h_lon', [n_hidden_4, n_mixture], tf.random_normal_initializer(stddev=h_sigma_lon_stddev))
# sigma_lon_layer = tf.nn.softplus(tf.add(tf.matmul(layer_fc4, h_sigma_lon), b_sigma_lon))
# with tf.device('/cpu:0'):
# tf.summary.histogram("weights", h_sigma_lon)
# tf.summary.histogram("biases", b_sigma_lon)
# tf.summary.histogram("activations", sigma_lon_layer)
# sigma_lon_layer = tf.reshape(sigma_lon_layer, [-1, batch_x_shape[0], n_mixture])
# sigma_lon_layer = tf.transpose(sigma_lon_layer, [1, 0, 2])
# # shape of [batch_size, time, n_dim]
# with tf.name_scope('fc_sigma_alt'):
# # Now we feed `outputs` to the fifth hidden layer with clipped RELU activation and dropout
# b_sigma_alt = variable_on_gpu('b_sigma_alt', [n_mixture], tf.random_normal_initializer(stddev=b_sigma_alt_stddev))
# h_sigma_alt = variable_on_gpu('h_alt', [n_hidden_4, n_mixture], tf.random_normal_initializer(stddev=h_sigma_alt_stddev))
# sigma_alt_layer = tf.nn.softplus(tf.add(tf.matmul(layer_fc4, h_sigma_alt), b_sigma_alt))
# with tf.device('/cpu:0'):
# tf.summary.histogram("weights", h_sigma_alt)
# tf.summary.histogram("biases", b_sigma_alt)
# tf.summary.histogram("activations", sigma_alt_layer)
# sigma_alt_layer = tf.reshape(sigma_alt_layer, [-1, batch_x_shape[0], n_mixture])
# sigma_alt_layer = tf.transpose(sigma_alt_layer, [1, 0, 2])
# # shape of [batch_size, time, n_dim]
# with tf.name_scope('fc_sigma_st'):
# # Now we feed `outputs` to the fifth hidden layer with clipped RELU activation and dropout
# b_sigma_st = variable_on_gpu('b_sigma_st', [n_mixture], tf.random_normal_initializer(stddev=b_sigma_st_stddev))
# h_sigma_st = variable_on_gpu('h_st', [n_hidden_4, n_mixture], tf.random_normal_initializer(stddev=h_sigma_st_stddev))
# sigma_st_layer = tf.nn.softplus(tf.add(tf.matmul(layer_fc4, h_sigma_st), b_sigma_st))
# with tf.device('/cpu:0'):
# tf.summary.histogram("weights", h_sigma_st)
# tf.summary.histogram("biases", b_sigma_st)
# tf.summary.histogram("activations", sigma_st_layer)
# sigma_st_layer = tf.reshape(sigma_st_layer, [-1, batch_x_shape[0], n_mixture])
# sigma_st_layer = tf.transpose(sigma_st_layer, [1, 0, 2])
# # shape of [batch_size, time, n_dim]
# ###############################################################
# with tf.name_scope('fc_rho_lat'):
# # rho_latlon
# # Now we feed `outputs` to the fifth hidden layer with clipped RELU activation and dropout
# b_rho_lat = variable_on_gpu('b_rho_lat', [n_mixture], tf.random_normal_initializer(stddev=b_rho_lat_stddev))
# h_rho_lat = variable_on_gpu('h_lat', [n_hidden_4, n_mixture], tf.random_normal_initializer(stddev=h_rho_lat_stddev))
# rho_lat_layer = tf.nn.tanh(tf.add(tf.matmul(layer_fc4, h_rho_lat), b_rho_lat))
# with tf.device('/cpu:0'):
# tf.summary.histogram("weights", h_rho_lat)
# tf.summary.histogram("biases", b_rho_lat)
# tf.summary.histogram("activations", rho_lat_layer)
# rho_lat_layer = tf.reshape(rho_lat_layer, [-1, batch_x_shape[0], n_mixture])
# rho_lat_layer = tf.transpose(rho_lat_layer, [1, 0, 2])
# # shape of [batch_size, time, n_dim]
# with tf.name_scope('fc_rho_lon'):
# # rho_lonalt
# b_rho_lon = variable_on_gpu('b_rho_lon', [n_mixture], tf.random_normal_initializer(stddev=b_rho_lon_stddev))
# h_rho_lon = variable_on_gpu('h_lon', [n_hidden_4, n_mixture], tf.random_normal_initializer(stddev=h_rho_lon_stddev))
# rho_lon_layer = tf.nn.tanh(tf.add(tf.matmul(layer_fc4, h_rho_lon), b_rho_lon))
# with tf.device('/cpu:0'):
# tf.summary.histogram("weights", h_rho_lon)
# tf.summary.histogram("biases", b_rho_lon)
# tf.summary.histogram("activations", rho_lon_layer)
# rho_lon_layer = tf.reshape(rho_lon_layer, [-1, batch_x_shape[0], n_mixture])
# rho_lon_layer = tf.transpose(rho_lon_layer, [1, 0, 2])
# # shape of [batch_size, time, n_dim]
# with tf.name_scope('fc_rho_alt'):
# # rho_latalt
# b_rho_alt = variable_on_gpu('b_rho_alt', [n_mixture], tf.random_normal_initializer(stddev=b_rho_alt_stddev))
# h_rho_alt = variable_on_gpu('h_alt', [n_hidden_4, n_mixture], tf.random_normal_initializer(stddev=h_rho_alt_stddev))
# rho_alt_layer = tf.nn.tanh(tf.add(tf.matmul(layer_fc4, h_rho_alt), b_rho_alt))
# with tf.device('/cpu:0'):
# tf.summary.histogram("weights", h_rho_alt)
# tf.summary.histogram("biases", b_rho_alt)
# tf.summary.histogram("activations", rho_alt_layer)
# rho_alt_layer = tf.reshape(rho_alt_layer, [-1, batch_x_shape[0], n_mixture])
# rho_alt_layer = tf.transpose(rho_alt_layer, [1, 0, 2])
# # shape of [batch_size, time, n_dim]
# # with tf.device('/cpu:0'):
# summary_op = tf.summary.merge_all()
# Output shape: [batch_size, n_steps, n_hidden_6]
# return end_layer, pi_layer, mu_lat_layer, mu_lon_layer, mu_alt_layer, mu_st_layer, sigma_lat_layer, sigma_lon_layer, sigma_alt_layer, sigma_st_layer, rho_lat_layer, rho_lon_layer, rho_alt_layer, summary_op
| 3
| 3
|
lib/upy2/dependency.py
|
friedrichromstedt/upy
| 3
|
12785485
|
<reponame>friedrichromstedt/upy
# Developed since: Feb 2010
import numpy
__all__ = ['Dependency']
class Dependency(object):
""" The class :class:`Dependency` represents the dependence of an
uncertain quantity on uncertainty sources of unity variance by a
derivative. When the :attr:`derivative` is real-valued, the
induced variance of the quantity is equal to ``derivative ** 2``.
For non-real derivatives, an induced variance of the quantity
cannot be given.
A single :class:`Dependency` can store *one* dependency per
element. The uncertainty sources are identified by integers
called *names*. The name ``0`` represents the *absent* dependency
of the respective element. Dependencies can be combined be means
of :meth:`add`, here the argument Dependency is incorporated into
the Dependency whose :meth:`add` is called as far as possible by
filling elements with zero name and by adding derivatives of
elements with matching name; :meth:`add` returns the remnant
Dependency, with all elements used cleared.
Dependencies can be *multiplied* and *masked*, and a range of
``ndarray`` methods is supported. """
def __init__(self,
names=None, derivatives=None,
shape=None, dtype=None):
""" A ``Dependency`` can be initialised in two ways:
1. Providing its *shape*; or
2. specifying *names* and *derivatives*.
When both *names* as well as *derivatives* aren't ``None``,
they will both be transformed into ndarrays, where *dtype* is
used for the derivatives ndarray. When their shapes are
different, a ``ValueError`` will be raised. If *dtype* is
``None``, the dtype of the derivatives ndarray won't be
overridden. The dtype of the *names* array *never* will be
overridden.
When at least one of *names* and *derivatives* is ``None``,
*shape* will be used to provide an empty Dependency of the
given *dtype* (with all names set to zero and with zero
derivatives). In this case, the *names* will have dtype
``numpy.int``.
In all other cases, the Dependency cannot be initialised
and ``ValueError`` will be raised. """
if names is not None and derivatives is not None:
self.names = numpy.asarray(names)
self.derivatives = numpy.asarray(derivatives, dtype=dtype)
if self.names.shape != self.derivatives.shape:
raise ValueError(
'Shape mismatch in initialising a '
'Dependency: names.shape = {0}, derivatives.'
'shape = {1}'.format(
self.names.shape, self.derivatives.shape))
elif shape is not None:
self.names = numpy.zeros(shape, dtype=numpy.int)
self.derivatives = numpy.zeros(shape, dtype=dtype)
# leaving *dtype* ``None`` leads to a derivatives
# ndarray with "standard" dtype (``numpy.float``).
else:
raise ValueError("Dependency: Unable to initialise from "
"the arguments provided")
self.shape = self.derivatives.shape
self.dtype = self.derivatives.dtype
self.ndim = self.derivatives.ndim
def is_empty(self):
""" Returns whether all elements of *self.names* are equal to
zero. This means, that the Dependency does not induce any
uncertainty. """
return not self.names.any()
def is_nonempty(self):
""" Returns whether any alements of *self.names* aren't equal
to zero. In this case, the Dependency induces some
uncertainty. """
return self.names.any()
#
# Obtaining the variances ...
#
@property
def variance(self):
""" When *self.derivatives* is real-valued, this method
returns the variance ``self.derivatives ** 2`` induced by this
Dependency for elements with nonzero *name*; the variance
returned is masked out to zero for elements with zero name.
For non-real derivatives, no such variance can be given. """
if not numpy.isrealobj(self.derivatives):
# It might be complex.
raise ValueError(
'Refusing to calculate the variance of a non-real '
'Dependency')
return (self.names != 0) * self.derivatives ** 2
#
# Complex numbers ...
#
@property
def real(self):
""" Returns the real part of this Dependency. Both the names
as well as the real part of *self.derivatives* will be copied. """
return Dependency(
names=self.names.copy(),
derivatives=self.derivatives.real.copy(),
# ``array.real`` returns a *view*::
#
# >>> z = numpy.asarray(1 + 1j)
# >>> r = z.real
# >>> z[()] = 2 + 1j
# >>> r
# array(2.0)
)
@property
def imag(self):
""" Returns the imaginary part of this Dependency. """
return Dependency(
names=self.names.copy(),
derivatives=self.derivatives.imag.copy(),
)
def conj(self):
""" Returns the complex conjugate. """
return Dependency(
names=self.names.copy(),
derivatives=self.derivatives.conj(),
# This copies the real component.
)
conjugate = conj
# :func:`numpy.conj` looks for :attr:`conjugate`, not
# :attr:`conj`.
#
# Binary arithmetics ...
#
def add(self, other, key=None):
""" This method incorporates another ``Dependency`` *other*
into *self* in-place as far as possible by:
1. Filling elements of *self* with zero name by elements of
*other* (replacing both the *name* as well as the
*derivative*);
2. adding the derivatives of *other* to elements of *self*
with matching name.
Returned is a copy of *other* with used-up elements masked
out.
When *key* is given, *other* will be added to the sub arrays
of *self.names* and *self.derivatives* indexed by *key*.
The *other* needs to be broadcastable to the shape of *self*
indexed by *key*. The ``Dependency`` returned will *always*
have this shape. """
if key is None:
# Index everything.
key = ()
# We do not apply shape checking whether the part of *self*
# indexed by *key* has the same shape as *other*. This
# permits broadcasting of *other*.
# First, add on same name ...
matching_mask = (self.names[key] == other.names)
# This might involve broadcasting of ``other.names``.
self.derivatives[key] += matching_mask * other.derivatives
# If the shape of ``matching_mask * other.derivatives`` is
# too large, numpy will complain. In all other cases, the
# result of ``matching_mask * other.derivatives`` will fit
# ``self.derivatives[key]``.
# Mark the cells as used.
other = other & (1 - matching_mask)
# From now on, *other* has the shape of ``(1 -
# matching_mask)``, which is identical to the shape of
# ``self[key]``. The ``&`` operation might involve
# broadcasting inside of ``__and__``.
# Second, try to fill empty space ...
#
# An element is *empty* when its *name* is *zero*.
empty_mask = (self.names[key] == 0)
other_filled_mask = (other.names != 0)
fillin_mask = empty_mask * other_filled_mask
self.names[key] += fillin_mask * other.names
self.derivatives[key] += fillin_mask * other.derivatives
# Do use augmented assignment ``+=`` because portions
# where the augmenting arrays are zero are to be preserved
# *without change*.
# Mark the cells as used.
other = other & (1 - fillin_mask)
# Finished processing *other*.
return other
# The *other* is, now, of the same shape as ``self[key]``,
# since the ``&`` operation above has been carried out.
def __and__(self, mask):
""" Returns a copy of *self* where names and derivatives are
masked by *mask*: Parts of self's names and derivatives where
*mask* is zero are returned zero. """
return Dependency(
names=(self.names * mask),
derivatives=(self.derivatives * mask),
)
def __mul__(self, other):
""" Returns a copy of *self* with the derivatives set to the
product of ``self.derivatives`` and *other*.
The shapes of *self* and *other* need not be equal as long as
they can be broadcast. The :attr:`names` ndarray of *self*
will be broadcast to the result shape as well. """
result_derivatives = self.derivatives * other
(bc_names, bc_derivatives) = numpy.broadcast_arrays(
self.names, result_derivatives)
# The shape of *bc_derivatives* will always be equal to the
# shape of *result_derivatives*, since *self.derivatives* and
# *self.names* have equal shape. As a safety measure, we
# assert this fact:
assert(bc_derivatives.shape == result_derivatives.shape)
# With this assertion, we can skip copying *bc_derivatives* by
# means of ``numpy.array``, since all elements refer to a
# unique memory location. This does not hold necessarily for
# *bc_names*, so we copy *bc_names*. Copying *bc_names* is a
# necessity anyhow to avoid crosstalk. *result_derivatives*
# already satisfies this requirement.
return Dependency(
names=numpy.array(bc_names),
derivatives=bc_derivatives,
)
# Reverse multiplication is unsupported. It would not work with
# ndarrays as first operand (see 228ad14).
# Augmented arithmetics will be emulated by using standard
# arithmetics.
#
# Keying methods ...
#
def __getitem__(self, key):
""" Returns a new Dependency with *key* applied both to the
:attr:`derivatives` as well as to the :attr:`names` of *self*.
The results will be copied. """
return Dependency(
names=self.names[key].copy(),
derivatives=self.derivatives[key].copy(),
)
def clear(self, key):
""" Set *self.names* and *self.derivatives* to zero at the
positions indexed by *key*. """
self.names[key] = 0
self.derivatives[key] = 0
def __len__(self):
return self.shape[0]
#
# ndarray methods ...
#
def copy(self):
""" Returns a Dependency constructed from copies of the names
and derivatives of *self*. """
return Dependency(
names=self.names.copy(),
derivatives=self.derivatives.copy())
def compress(self, *compress_args, **compress_kwargs):
""" Returns a Dependency constructed from the *compressed*
names and derivatives of *self*. """
# :meth:`ndarray.compress` returns a copy by itself.
return Dependency(
names=self.names.compress(
*compress_args, **compress_kwargs),
derivatives=self.derivatives.compress(
*compress_args, **compress_kwargs))
def flatten(self, *flatten_args, **flatten_kwargs):
""" Returns a Dependency constructed from the *flattened*
names and derivatives of *self*. """
# :meth:`ndarray.flatten` returns a copy by itself.
return Dependency(
names=self.names.flatten(
*flatten_args, **flatten_kwargs),
derivatives=self.derivatives.flatten(
*flatten_args, **flatten_kwargs))
# Notice that :meth:`ndarray.ravel` returns a copy *only if
# needed*, just as :func:`numpy.ravel` does, while
# :meth:`ndarray.flatten` returns a copy *always*. Notice also,
# that there is no :func:`numpy.flatten`.
#
# Notice further, that :func:`numpy.ravel` does not make use of a
# :meth:`ravel` of the operand provided; instead, it returns a
# ``dtype=object`` array always.
def repeat(self, *repeat_args, **repeat_kwargs):
""" Returns a Dependency constructed from the *repeated* names
and derivatives of *self*. """
# It appears that :meth:`ndarray.repeat` returns a copy
# *always*.
return Dependency(
names=self.names.repeat(
*repeat_args, **repeat_kwargs),
derivatives=self.derivatives.repeat(
*repeat_args, **repeat_kwargs))
def reshape(self, *reshape_args, **reshape_kwargs):
""" Returns a Dependency constructed from the *reshaped* names
and derivatives of *self*. The results will be copied. """
return Dependency(
names=self.names.reshape(
*reshape_args, **reshape_kwargs).copy(),
derivatives=self.derivatives.reshape(
*reshape_args, **reshape_kwargs).copy())
def transpose(self, *transpose_args, **transpose_kwargs):
""" Returns a Dependency constructed from the *transposed*
names and derivatives of *self*. The results will be copied.
"""
return Dependency(
names=self.names.transpose(
*transpose_args, **transpose_kwargs).copy(),
derivatives=self.derivatives.transpose(
*transpose_args, **transpose_kwargs).copy())
#
# String conversion ...
#
def __repr__(self):
return "<{shape}-shaped {dtype}-typed Dependency>".format(
shape=self.shape, dtype=self.dtype)
| 3.28125
| 3
|
osnovno.py
|
kopriveclucija/PROJEKTNA
| 0
|
12785486
|
import json
class Model:
def __init__(self, zacetni_seznam_nalog, tema=''):
self.naloge = zacetni_seznam_nalog
self.aktualna_naloga = None
self.tema = tema
def dodaj_novo_nalogo(self, naloga):
self.naloge.append(naloga)
def v_slovar(self):
seznam_nalog = [
naloga.v_slovar() for naloga in self.naloge
]
return {
"naloge": seznam_nalog,
"tema": self.tema,
}
def stevilo_vseh_nalog(self):
return len(self.naloge)
@staticmethod
def iz_slovarja(slovar):
sez = [
Naloga.iz_slovarja(sl_naloga) for sl_naloga in slovar["naloge"]
]
return Model(
sez,
slovar["tema"],
)
def shrani_v_datoteko(self, ime_datoteke='stanje.json'):
with open(ime_datoteke, "w") as dat:
slovar = self.v_slovar()
json.dump(slovar, dat)
@staticmethod
def preberi_iz_datoteke(ime_datoteke='stanje.json'):
with open(ime_datoteke) as dat:
slovar = json.load(dat)
return Model.iz_slovarja(slovar)
def preveri_podatke_nove_naloge(self, ime):
napake = {}
if not ime:
napake['ime'] = 'Ime ne sme bitit prazno!'
elif len(ime) > 20:
napake['ime'] = 'Ime lahko vsebuje najvec 20 znakov.'
return napake
@staticmethod
def naredi_svezega():
n1 = Naloga("Napoleon", "Kdaj se je rodil?", "15.8.1769")
n2 = Naloga('New York', "Kje lezi?", "Severna Amerika")
n3 = Naloga('Olimpijske igre',
"Kdo je osvoji zlato medaljo za Slovenijo?", "<NAME>")
n4 = Naloga(
'You Tube', "Kako je ime prvemu videu objavlenemu na You Tubu?", "Me at the ZOO")
n5 = Naloga('Kardashianovi', "Koliko otrok ima <NAME>?", "6")
n6 = Naloga('Ameriski predsedniki',
"Kako je bilo ima prvemu ameriskemu predsedniku?", "<NAME>")
seznam = [n1, n2, n3, n4, n5, n6]
m = Model(seznam, "test")
return m
class Naloga:
def __init__(self, ime, besedilo, pravilna_resitev, moja_resitev=None):
self.ime = ime
self.besedilo = besedilo
self.pravilna_resitev = pravilna_resitev
self.moja_resitev = moja_resitev
def naloga_je_resena(self):
return self.moja_resitev and self.pravilna_resitev.lower().strip() == self.moja_resitev.lower().strip()
def v_slovar(self):
return {
"ime": self.ime,
"besedilo": self.besedilo,
"pravilna resitev": self.pravilna_resitev,
"moja resitev": self.moja_resitev,
}
@staticmethod
def iz_slovarja(slovar):
return Naloga(
slovar["ime"],
slovar["besedilo"],
slovar["pravilna resitev"],
slovar["moja resitev"],
)
| 2.734375
| 3
|
Email_counter.py
|
farhan1503001/Database-with-Python-Coursera.
| 0
|
12785487
|
<gh_stars>0
import sqlite3
#Importing the database
connector=sqlite3.connect('email_db.sqlite')
#Initiating the cursor
curr=connector.cursor()
#Drop table if count exists
curr.execute('DROP TABLE IF EXISTS Counts')
#Now create table count
curr.execute('create table Counts (org TEXT, count INTEGER)')
#now open our file
filename=open(file='mbox.txt')
i=0
for line in filename:
if not line.startswith('From: '): continue
pieces=line.split()
org=pieces[1].split(sep='@')[-1]
#org=temp[-2]+temp[-1]
curr.execute('select * from Counts where org =?',(org,))
row=curr.fetchone()
if row is None:
curr.execute('Insert into Counts(org,count) values (?,1)',(org,))
else:
curr.execute('Update Counts set count=count+1 where org=?',(org,))
connector.commit()
sqlstr = 'SELECT org, count FROM Counts ORDER BY count DESC LIMIT 10'
for row in curr.execute(sqlstr):
print(str(row[0]), row[1])
curr.close()
| 3.234375
| 3
|
src/outpost/django/campusonline/api.py
|
medunigraz/outpost.django.campusonline
| 0
|
12785488
|
from django.utils import timezone
from django_filters.rest_framework import DjangoFilterBackend
from drf_haystack.viewsets import HaystackViewSet
from rest_flex_fields.views import FlexFieldsMixin
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.viewsets import ReadOnlyModelViewSet
from rest_framework_extensions.cache.mixins import CacheResponseMixin
from outpost.django.api.permissions import ExtendedDjangoModelPermissions
from outpost.django.base.decorators import docstring_format
from . import filters, key_constructors, models, serializers
# from rest_framework_extensions.mixins import (
# CacheResponseAndETAGMixin,
# )
# from rest_framework_extensions.cache.mixins import (
# CacheResponseMixin,
# )
class RoomCategoryViewSet(CacheResponseMixin, ReadOnlyModelViewSet):
queryset = models.RoomCategory.objects.all()
serializer_class = serializers.RoomCategorySerializer
object_cache_key_func = key_constructors.PersonKeyConstructor()
list_cache_key_func = key_constructors.PersonKeyConstructor()
permission_classes = (AllowAny,)
class RoomViewSet(ReadOnlyModelViewSet):
queryset = models.Room.objects.all()
serializer_class = serializers.RoomSerializer
permission_classes = (AllowAny,)
filter_fields = ("category",)
class FloorViewSet(ReadOnlyModelViewSet):
queryset = models.Floor.objects.all()
serializer_class = serializers.FloorSerializer
permission_classes = (AllowAny,)
class BuildingViewSet(ReadOnlyModelViewSet):
queryset = models.Building.objects.all()
serializer_class = serializers.BuildingSerializer
permission_classes = (AllowAny,)
@docstring_format(
model=models.Function.__doc__,
filter=filters.FunctionFilter.__doc__,
serializer=serializers.FunctionSerializer.__doc__,
)
class FunctionViewSet(FlexFieldsMixin, ReadOnlyModelViewSet):
"""
List organizational functions from CAMPUSonline.
{model}
{filter}
{serializer}
"""
queryset = models.Function.objects.all()
serializer_class = serializers.FunctionSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.FunctionFilter
permission_classes = (IsAuthenticated,)
permit_list_expands = ("persons",)
@docstring_format(
model=models.Organization.__doc__,
filter=filters.OrganizationFilter.__doc__,
serializer=serializers.OrganizationSerializer.__doc__,
)
class OrganizationViewSet(FlexFieldsMixin, ReadOnlyModelViewSet):
"""
List organizations from CAMPUSonline.
{model}
{filter}
{serializer}
"""
queryset = models.Organization.objects.all()
serializer_class = serializers.OrganizationSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.OrganizationFilter
permission_classes = (AllowAny,)
permit_list_expands = ("persons", "persons_leave", "publication_authorship")
def get_serializer_class(self):
if self.request.user and self.request.user.is_authenticated():
return serializers.AuthenticatedOrganizationSerializer
else:
return self.serializer_class
def get_serializer_context(self):
return {"request": self.request}
@docstring_format(
model=models.Person.__doc__,
filter=filters.PersonFilter.__doc__,
serializer=serializers.PersonSerializer.__doc__,
)
class PersonViewSet(FlexFieldsMixin, ReadOnlyModelViewSet):
"""
List staff accounts from CAMPUSonline.
{model}
{filter}
{serializer}
"""
queryset = models.Person.objects.all()
serializer_class = serializers.PersonSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.PersonFilter
permission_classes = (AllowAny,)
permit_list_expands = (
"functions",
"organizations",
"organizations_leave",
"classifications",
"expertise",
"knowledge",
"education",
)
def get_serializer_class(self):
if self.request.user and self.request.user.is_authenticated():
return serializers.AuthenticatedPersonSerializer
else:
return self.serializer_class
def get_serializer_context(self):
return {"request": self.request}
def get_queryset(self):
qs = super().get_queryset()
if self.request.user and self.request.user.is_authenticated():
return qs
else:
return qs.filter(employed=True)
@docstring_format(
model=models.Student.__doc__,
filter=filters.StudentFilter.__doc__,
serializer=serializers.StudentSerializer.__doc__,
)
class StudentViewSet(ReadOnlyModelViewSet):
"""
List student accounts from CAMPUSonline.
{model}
{filter}
{serializer}
"""
queryset = models.Student.objects.all()
serializer_class = serializers.StudentSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.StudentFilter
permission_classes = (IsAuthenticated,)
def get_serializer_class(self):
if self.request.user and self.request.user.is_authenticated():
return serializers.AuthenticatedStudentSerializer
else:
return self.serializer_class
@docstring_format(filter=filters.PersonOrganizationFunctionFilter.__doc__)
class PersonOrganizationFunctionViewSet(ReadOnlyModelViewSet):
"""
Map person to organizational unit and function through CAMPUSonline.
{filter}
"""
queryset = models.PersonOrganizationFunction.objects.all()
serializer_class = serializers.PersonOrganizationFunctionSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.PersonOrganizationFunctionFilter
permission_classes = (IsAuthenticated,)
@docstring_format(
model=models.DistributionList.__doc__,
filter=filters.DistributionListFilter.__doc__,
serializer=serializers.DistributionListSerializer.__doc__,
)
class DistributionListViewSet(
CacheResponseMixin, FlexFieldsMixin, ReadOnlyModelViewSet
):
"""
List distribution lists from CAMPUSonline.
{model}
{filter}
{serializer}
"""
queryset = models.DistributionList.objects.all()
serializer_class = serializers.DistributionListSerializer
object_cache_key_func = key_constructors.DistributionListKeyConstructor()
list_cache_key_func = key_constructors.DistributionListKeyConstructor()
filter_backends = (DjangoFilterBackend,)
filter_class = filters.DistributionListFilter
permission_classes = (IsAuthenticated,)
permit_list_expands = ("persons", "students")
@docstring_format(model=models.Event.__doc__, filter=filters.EventFilter.__doc__)
class EventViewSet(ReadOnlyModelViewSet):
"""
List events from CAMPUSonline.
{model}
{filter}
"""
queryset = models.Event.objects.all()
serializer_class = serializers.EventSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.EventFilter
permission_classes = (AllowAny,)
def get_queryset(self):
return self.queryset.filter(show_end__gte=timezone.now())
class CourseGroupTermViewSet(ReadOnlyModelViewSet):
queryset = models.CourseGroupTerm.objects.all()
serializer_class = serializers.CourseGroupTermSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.CourseGroupTermFilter
permission_classes = (IsAuthenticated,)
@docstring_format(model=models.Bulletin.__doc__, filter=filters.BulletinFilter.__doc__)
class BulletinViewSet(ReadOnlyModelViewSet):
"""
List official bulletins from CAMPUSonline.
{model}
{filter}
"""
queryset = models.Bulletin.objects.all()
serializer_class = serializers.BulletinSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.BulletinFilter
@docstring_format(
model=models.BulletinPage.__doc__, filter=filters.BulletinPageFilter.__doc__
)
class BulletinPageViewSet(ReadOnlyModelViewSet):
"""
List official bulletin pages with extracted text from CAMPUSonline.
{model}
{filter}
"""
queryset = models.BulletinPage.objects.all()
serializer_class = serializers.BulletinPageSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.BulletinPageFilter
permission_classes = (AllowAny,)
class BulletinPageSearchViewSet(HaystackViewSet):
index_models = [models.BulletinPage]
serializer_class = serializers.BulletinPageSearchSerializer
permission_classes = (AllowAny,)
@docstring_format(
model=models.FinalThesis.__doc__,
filter=filters.FinalThesisFilter.__doc__,
serializer=serializers.FinalThesisSerializer.__doc__,
)
class FinalThesisViewSet(FlexFieldsMixin, ReadOnlyModelViewSet):
"""
List final thesis from CAMPUSonline.
{model}
{filter}
{serializer}
"""
queryset = models.FinalThesis.objects.all()
serializer_class = serializers.FinalThesisSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = filters.FinalThesisFilter
permission_classes = (IsAuthenticated,)
permit_list_expands = ("author", "tutor", "organization")
| 1.851563
| 2
|
dev/models/ffn/conv_ffn.py
|
michaelwiest/microbiome_rnn
| 0
|
12785489
|
from __future__ import print_function
import torch.autograd as autograd
from torch.autograd import Variable
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import random
import numpy as np
import sys
import os
from ffn import FFN
sys.path.insert(1, os.path.join(sys.path[0], '../..'))
from helpers.model_helper import *
import csv
class ConvFFN(FFN):
'''
This subclass inherits from the LSTM class and
adds 1d convolution over the input time.
'''
def __init__(self, hidden_dim, otu_handler, slice_len,
use_gpu=False):
super(ConvFFN, self).__init__(hidden_dim, otu_handler,
slice_len,
use_gpu=use_gpu)
self.conv_element = nn.Sequential(
nn.Conv1d(self.otu_handler.num_strains, hidden_dim,
kernel_size=4, stride=2, padding=3),
nn.ReLU(),
nn.Conv1d(self.hidden_dim, self.hidden_dim,
kernel_size=2, stride=1, padding=1),
nn.ReLU(),
nn.Conv1d(self.hidden_dim, self.hidden_dim,
kernel_size=2, stride=1, padding=1),
nn.ReLU(),
nn.Conv1d(self.hidden_dim, self.hidden_dim,
kernel_size=2, stride=1, padding=1),
nn.ReLU(),
nn.Conv1d(self.hidden_dim, self.hidden_dim,
kernel_size=2, stride=1, padding=1),
nn.ReLU(),
nn.Conv1d(self.hidden_dim, self.hidden_dim,
kernel_size=1, stride=1, padding=0),
nn.ReLU(),
)
self.time_transformer = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU()
)
self.deconv_element = nn.Sequential(
nn.ConvTranspose1d(self.hidden_dim, self.hidden_dim, kernel_size=1, stride=1, padding=0),
nn.ReLU(),
nn.ConvTranspose1d(self.hidden_dim, self.hidden_dim,
kernel_size=2, stride=1, padding=1),
nn.ReLU(),
nn.ConvTranspose1d(self.hidden_dim, self.hidden_dim,
kernel_size=2, stride=1, padding=1),
nn.ReLU(),
nn.ConvTranspose1d(self.hidden_dim, self.hidden_dim,
kernel_size=2, stride=1, padding=1),
nn.ReLU(),
nn.ConvTranspose1d(self.hidden_dim, self.hidden_dim,
kernel_size=2, stride=1, padding=1),
nn.ReLU(),
nn.ConvTranspose1d(self.hidden_dim, self.otu_handler.num_strains,
kernel_size=4, stride=2, padding=3),
nn.ReLU()
)
# self.lin_final = nn.Linear(self.otu_handler.num_strains,
# self.otu_handler.num_strains)
def forward(self, data):
# data is shape: sequence_size x batch x num_strains
data = data.transpose(0, 1).transpose(1, 2)
# print(data.size())
data = self.conv_element(data)
# print(data.size())
data = data.transpose(0, 2).transpose(1, 2)
data = self.time_transformer(data)
# print(data.size())
data = self.deconv_element(data.transpose(0,1).transpose(1,2))
# print(data.size())
return data
| 2.609375
| 3
|
fly_plot_lib/animate_matrix.py
|
ROB7-StayHumble/multi_tracker
| 0
|
12785490
|
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
import time
NAN = np.nan
def get_indices(x, y, xmesh, ymesh, radius=1, colors=None):
# pull out non NAN numbers only
x = x[np.isfinite(x)]
y = y[np.isfinite(y)]
ix = [np.argmin( np.abs( xmesh-xval ) ) for xval in x]
iy = [np.argmin( np.abs( ymesh-yval ) ) for yval in y]
'''
ix_enlarged = []
iy_enlarged = []
if colors is not None:
colors_enlarged = []
for n, i in enumerate(ix):
min_i = np.max([0, i-radius])
max_i = np.min([len(xmesh), i+radius])
a = np.arange(min_i, max_i)
ix_enlarged.extend(a)
if colors is not None:
colors_enlarged.extend([colors[n]]*len(a))
for i in iy:
min_i = np.max([0, i-radius])
max_i = np.min([len(ymesh), i+radius])
a = np.arange(min_i, max_i)
iy_enlarged.extend(a)
#if len(ix) == 1:
# return ix[0], iy[0]
#else:
if colors is None:
return ix_enlarged, iy_enlarged
else:
return ix_enlarged, iy_enlarged, colors_enlarged
'''
return ix, iy
def synchronize_frames(x, y, sync_frames, padval=NAN, colors=None, n_frames_before_sync_to_show='all'):
xsync = []
ysync = []
if colors is not None:
colors_sync = []
largest_sync_frame = np.max(sync_frames)
for i, xi in enumerate(x):
padding = [padval]*(largest_sync_frame - sync_frames[i])
xsync.append( np.hstack((padding, x[i])) )
ysync.append( np.hstack((padding, y[i])) )
if colors is not None:
colors_sync.append( np.hstack((padding, colors[i])) )
# pad back
lengths = [len(x) for x in xsync]
length_of_longest_sequence = np.max(lengths)
for i, xi in enumerate(xsync):
padding = [padval]*(length_of_longest_sequence - len(xi))
xsync[i] = np.hstack((xsync[i], padding))
ysync[i] = np.hstack((ysync[i], padding))
if colors is not None:
colors_sync[i] = np.hstack((colors_sync[i], padding))
if n_frames_before_sync_to_show != 'all':
first_frame = largest_sync_frame - n_frames_before_sync_to_show
for i, xi in enumerate(xsync):
xsync[i] = xsync[i][first_frame:]
ysync[i] = ysync[i][first_frame:]
if colors is not None:
colors_sync[i] = colors_sync[i][first_frame:]
if colors is None:
return xsync, ysync
else:
return xsync, ysync, colors_sync
def animate_matrix(x, y, colors=None, xlim=[0,1], ylim=[0,1], resolution=0.005, filename='', sync_frames=[], framerate=100, ghost_tail=20, radius=2, static_indices=[], static_color=[0,0,255], colormap='hot', colornorm=[0,1], n_frames_before_sync_to_show='all'):
xmesh = np.arange(xlim[0], xlim[1], resolution)
ymesh = np.arange(ylim[0], ylim[1], resolution)
mat = np.ones([len(ymesh), len(xmesh), 3], dtype=np.uint8)
mat *= 255
kernel = np.ones((5,5),np.uint8)
norm = matplotlib.colors.Normalize(colornorm[0], colornorm[1])
color_mappable = matplotlib.cm.ScalarMappable(norm, plt.get_cmap(colormap))
print 'synchronizing trajectories'
if colors is None:
xsync, ysync = synchronize_frames(x, y, sync_frames, n_frames_before_sync_to_show=n_frames_before_sync_to_show)
xsync = np.array(xsync)
ysync = np.array(ysync)
else:
xsync, ysync, colors_sync = synchronize_frames(x, y, sync_frames, colors=colors, n_frames_before_sync_to_show=n_frames_before_sync_to_show)
xsync = np.array(xsync)
ysync = np.array(ysync)
colors_sync = np.array(colors_sync)
#this works:
#writer = cv2.VideoWriter(filename,cv.CV_FOURCC('P','I','M','1'),sampleRate,(panelsFrames.shape[1],panelsFrames.shape[0]),True) # works for Linux
# but this works better:
print 'initializing writer'
writer = cv2.VideoWriter(filename,cv.CV_FOURCC('m','p','4','v'),framerate,(mat.shape[1], mat.shape[0]),True) # works on Linux and Windows
print filename
nframes = len(xsync[0])
for frame in range(nframes):
s = str(frame) + ' of ' + str(nframes)
print s
mat[:,:,:] = 255
if len(static_indices) > 0:
c = [static_color for i in range(len(static_indices[0]))]
mat[static_indices[1], static_indices[0], :] = np.array(c)
first_frame = np.max([0, frame-ghost_tail])
last_frame = frame
if 1:
x = xsync[:, first_frame:last_frame]
y = ysync[:, first_frame:last_frame]
x = np.reshape(x, x.shape[0]*x.shape[1])
y = np.reshape(y, y.shape[0]*y.shape[1])
if colors is not None:
c = colors_sync[:, first_frame:last_frame]
c = np.reshape(c, c.shape[0]*c.shape[1])
rgba = color_mappable.to_rgba(c,bytes=True)
if colors is None:
indicesx, indicesy = get_indices(np.array(x), np.array(y), xmesh, ymesh, radius)
else:
indicesx, indicesy = get_indices(np.array(x), np.array(y), xmesh, ymesh, radius)
if len(indicesx) < 1:
continue
if colors is None:
mat[indicesy, indicesx, :] = 0
else:
rgba = rgba[np.isfinite(x)][:,[2,1,0]]
mat[indicesy, indicesx, :] = rgba
mat = cv2.erode(mat, kernel, radius)
# using uint8 for the values in the frame seems to work best. Also, I think rgb should be ordered bgr....
matflipped = np.array(np.flipud(mat))
writer.write(matflipped)
del(x)
del(y)
writer.release()
def animate_matrix_2views(x, y, z,
colors=None,
xlim=[0,1],
ylim=[0,1],
zlim=[0,1],
resolution=0.005,
filename='',
sync_frames=[],
framerate=100,
ghost_tail=20,
radius=2,
artist_function_xy=None,
artist_function_xz=None,
colormap='hot',
colornorm=[0,1],
n_frames_before_sync_to_show='all'):
def stack_mats(mat_xy, mat_xz):
# add border to mats
mat_xy[:,0,:] = 0
mat_xy[:,-1,:] = 0
mat_xy[0,:,:] = 0
mat_xy[-1,:,:] = 0
mat_xz[:,0,:] = 0
mat_xz[:,-1,:] = 0
mat_xz[0,:,:] = 0
mat_xz[-1,:,:] = 0
mat = np.vstack((mat_xy, mat_xz))
return mat
xmesh = np.arange(xlim[0], xlim[1], resolution)
ymesh = np.arange(ylim[0], ylim[1], resolution)
zmesh = np.arange(zlim[0], zlim[1], resolution)
mat_xy = np.ones([len(ymesh), len(xmesh), 3], dtype=np.uint8)
mat_xy *= 255
mat_xz = np.ones([len(zmesh), len(xmesh), 3], dtype=np.uint8)
mat_xz *= 255
kernel = np.ones((5,5),np.uint8)
norm = matplotlib.colors.Normalize(colornorm[0], colornorm[1])
color_mappable = matplotlib.cm.ScalarMappable(norm, plt.get_cmap(colormap))
print 'synchronizing trajectories'
if colors is None:
xsync, ysync = synchronize_frames(x, y, sync_frames, n_frames_before_sync_to_show=n_frames_before_sync_to_show)
xsync, zsync = synchronize_frames(x, z, sync_frames, n_frames_before_sync_to_show=n_frames_before_sync_to_show)
xsync = np.array(xsync)
ysync = np.array(ysync)
zsync = np.array(zsync)
else:
xsync, ysync, colors_sync = synchronize_frames(x, y, sync_frames, colors=colors, n_frames_before_sync_to_show=n_frames_before_sync_to_show)
xsync, zsync, colors_sync = synchronize_frames(x, z, sync_frames, colors=colors, n_frames_before_sync_to_show=n_frames_before_sync_to_show)
xsync = np.array(xsync)
ysync = np.array(ysync)
zsync = np.array(zsync)
colors_sync = np.array(colors_sync)
#this works:
#writer = cv2.VideoWriter(filename,cv.CV_FOURCC('P','I','M','1'),sampleRate,(panelsFrames.shape[1],panelsFrames.shape[0]),True) # works for Linux
# but this works better:
print 'initializing writer'
mat = stack_mats(mat_xy, mat_xz)
writer = cv2.VideoWriter(filename,cv.CV_FOURCC('m','p','4','v'),framerate,(mat.shape[1], mat.shape[0]),True) # works on Linux and Windows
print filename
nframes = len(xsync[0])
for frame in range(nframes):
s = str(frame) + ' of ' + str(nframes)
print s
mat_xy[:,:,:] = 255
mat_xz[:,:,:] = 255
first_frame = np.max([0, frame-ghost_tail])
last_frame = frame
if 1:
x = xsync[:, first_frame:last_frame]
y = ysync[:, first_frame:last_frame]
z = zsync[:, first_frame:last_frame]
x = np.reshape(x, x.shape[0]*x.shape[1])
y = np.reshape(y, y.shape[0]*y.shape[1])
z = np.reshape(z, z.shape[0]*z.shape[1])
if colors is not None:
c = colors_sync[:, first_frame:last_frame]
c = np.reshape(c, c.shape[0]*c.shape[1])
rgba = color_mappable.to_rgba(c,bytes=True)
if colors is None:
indicesx, indicesy = get_indices(np.array(x), np.array(y), xmesh, ymesh, radius)
indicesx, indicesz = get_indices(np.array(x), np.array(z), xmesh, zmesh, radius)
else:
indicesx, indicesy = get_indices(np.array(x), np.array(y), xmesh, ymesh, radius)
indicesx, indicesz = get_indices(np.array(x), np.array(z), xmesh, zmesh, radius)
if len(indicesx) < 1:
continue
if colors is None:
mat_xy[indicesy, indicesx, :] = 0
mat_xz[indicesz, indicesx, :] = 0
else:
rgba = rgba[np.isfinite(x)][:,[2,1,0]]
mat_xy[indicesy, indicesx, :] = rgba
mat_xz[indicesz, indicesx, :] = rgba
mat_xy = cv2.erode(mat_xy, kernel, radius)
mat_xz = cv2.erode(mat_xz, kernel, radius)
if artist_function_xy is not None:
mat_xy = artist_function_xy(mat_xy)
if artist_function_xz is not None:
mat_xz = artist_function_xz(mat_xz)
mat = stack_mats(mat_xy, mat_xz)
matflipped = np.array(np.flipud(mat))
writer.write(matflipped)
del(x)
del(y)
del(z)
writer.release()
| 2.40625
| 2
|
polarimeter/plotter.py
|
malgorzatakim/polarimeter
| 0
|
12785491
|
<gh_stars>0
import matplotlib.pyplot as plt
from math import sqrt, ceil
class Plotter(object):
def __init__(self):
self.plots = []
def addPlot(self, data, axes, title):
self.plots.append((data, axes, title))
def show(self):
fig = plt.figure()
count = len(self.plots)
size = int(ceil(sqrt(count)))
for i in range(count):
ax = fig.add_subplot(size, size, i + 1)
plot = self.plots[i]
ax.plot(*plot[0])
plt.axis(plot[1])
plt.title(plot[2])
plt.show()
| 3.28125
| 3
|
deblur/test/test_support_files.py
|
TaskeHAMANO/deblur
| 77
|
12785492
|
# -----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import TestCase, main
from deblur.support_files import pos_db, neg_db
import os.path
class supportFilesTests(TestCase):
"""Test the supporting data files
"""
def test_reference_(self):
"""Test if the positive and negative filtering
reference fasta files exist
"""
# the positive filtering fasta file
self.assertTrue(os.path.isfile(pos_db))
# the negative filtering fasta file
self.assertTrue(os.path.isfile(neg_db))
if __name__ == '__main__':
main()
| 2.578125
| 3
|
pyconverter/converter.py
|
sibyjackgrove/py-power-electronic-converter
| 1
|
12785493
|
"""Class for converter."""
import numpy as np
import math
import cmath
import scipy
import logging
from scipy import signal
from scipy.integrate import odeint,ode
#from converter_utilities import plot_signal, plot_FFT
import converter_utilities
import config
from models import InverterModels
class PowerElectronicConverter:
"""
Converter base class.
Attributes:
count (int): Number of converter objects.
"""
count = 0 #Object count
def __init__(self,model_type):
"""Creates an instance of `Converter`.
Args:
fsw (float): Switching frequency in Hz.
Raises:
ValueError: If parameters corresponding to `Sinverter_rated` are not available.
"""
PowerElectronicConverter.count = PowerElectronicConverter.count+1 #Increment count to keep track of number of converter model instances
self.name = 'converter_'+str(PowerElectronicConverter.count) #Generate a name for the instance
self.model_type = model_type
"""
if self.model_type is 'switching':
assert self.signal_type is 'square_wave' or self.signal_type is 'sinePWM', 'Switching model needs square or sine PWM as switching signal!'
if self.model_type is 'average':
assert self.signal_type is 'duty_cycle', 'Average model needs duty_cycle as switching signal!'
"""
def check_model_type(self,model_type):
"""Check if model type is valid."""
assert model_type in self.model_types, f'{model_type} is not a valid model type!'
def show_spec(self):
"""Print the specs."""
print('Model type:{}'.format(self.model_type))
print('Switching signal type:{}'.format(self.signal_type))
def calc_primary(self,signal):
"""Calculate the primary switch."""
assert isinstance(signal,bool), 'Switching signal must be boolean.'
Sprimary = int(signal)
return Sprimary
def calc_complimentary(self,signal):
"""Calculate the complimentary."""
assert isinstance(signal,bool), 'Switching signal must be boolean.'
Scomplimentary = int(not signal)
return Scomplimentary
def calc_average(self,m):
"""Calculate average voltage."""
return Vdc
#Current controller dynamics
class PowerElectronicInverter(PowerElectronicConverter,InverterModels):
"""
Inverter class.
Attributes:
():
"""
Rf = 0.01
Lf = 1.0e-3
Rload = 1.0
inverter_types = ['single_phase_half_bridge','single_phase_full_bridge',
'three_phase_full_bridge']
model_types = ['EMT_switching','EMT_average','dynamic_phasor']
def __init__(self,Vdc,model_type = 'EMT_average',inverter_type='single_phase_half_bridge'):
"""Creates an instance of `Converter`.
Args:
Vdc (float): DC link voltage.
Raises:
ValueError: To be added.
"""
self.check_model_type(model_type)
super().__init__(model_type) #Initialize converter class (base class)
self.update_Vdc(Vdc)
self.inverter_type =inverter_type
@property #Decorator used for auto updating
def y(self):
"""List of initial states"""
return [self.ia, 0.0]
def update_Vdc(self,Vdc):
"""Update DC link voltage."""
self.Vdc = Vdc
"""
def control_signal_calc(self,signals,t):
Calculate control signal.
if self.model_type is 'EMT_switching':
signals = self.switching_signal_calc(signals,t)
control_signal = signals['switching']
elif self.model_type is 'EMT_average':
signals = self.average_signal_calc(signals,t)
control_signal = signals['modulating']
elif self.model_type is 'dynamicphasor':
pass
return control_signal
"""
def setup_model(self):
"""Initialize mode."""
self.initialize_model()
self.vt_calc = self.select_vt_model()
self.vpcc_calc = self.select_vpcc_model()
self.ODE_model = self.select_ODE_model()
#self.control_signal_calc = self.select_control_signal()
def select_control_signal(self):
"""Select the control signal suitable for the problem."""
if self.model_type is 'EMT_switching':
if self.inverter_type == 'single_phase_half_bridge':
control_signal = self.switching_signal_single_phase
elif self.inverter_type == 'single_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
elif self.model_type is 'EMT_average':
if self.inverter_type == 'single_phase_half_bridge':
control_signal = self.modulating_signal_single_phase
elif self.inverter_type == 'single_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
elif self.model_type is 'dynamic_phasor':
if self.inverter_type == 'single_phase_half_bridge':
control_signal = self.phasor_signal_single_phase
elif self.inverter_type == 'single_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
return control_signal
def select_vt_model(self):
"""Get the terminal voltage model."""
if self.model_type == 'EMT_switching':
if self.inverter_type == 'single_phase_half_bridge':
vt_model = self.single_phase_half_bridge_switching
elif self.inverter_type == 'single_phase_full_bridge':
vt_model = self.single_phase_full_bridge_switching
elif self.inverter_type == 'three_phase_full_bridge':
vt_model = self.three_phase_full_bridge_switching
else:
print(f'{self.inverter_type} not found for model type {self.model_type}!')
elif self.model_type == 'EMT_average':
if self.inverter_type == 'single_phase_half_bridge':
vt_model = self.single_phase_half_bridge_average
elif self.inverter_type == 'single_phase_full_bridge':
vt_model = self.single_phase_full_bridge_average
elif self.inverter_type == 'three_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
else:
print(f'{self.inverter_type} not found for model type {self.model_type}!')
elif self.model_type == 'dynamicphasor':
if self.inverter_type == 'single_phase_half_bridge':
vt_model = self.single_phase_half_bridge_phasor
elif self.inverter_type == 'single_phase_full_bridge':
vt_model = self.single_phase_full_bridge_phasor
elif self.inverter_type == 'three_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
else:
print(f'{self.inverter_type} not found for model type {self.model_type}!')
print(type(vt_model))
return vt_model
def select_vpcc_model(self,grid=None):
"""Get the PCC voltage model."""
if not grid:
vpcc_model = self.v_load_model()
return vpcc_model
def select_ODE_model(self):
"""Select ODE model."""
if self.model_type is 'EMT_switching' or self.model_type is 'EMT_average':
if self.inverter_type is 'single_phase_half_bridge' or self.inverter_type is 'single_phase_full_bridge':
ODE_model = self.ODE_model_single_phase_EMT
elif self.inverter_type is 'three_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
elif self.model_type is 'dynamic_phasor':
if self.inverter_type is 'single_phase_half_bridge' or self.inverter_type is 'single_phase_full_bridge':
ODE_model = self.ODE_model_single_phase_dynamicphasor
elif self.inverter_type is 'three_phase_full_bridge':
raise NotImplementedError(f'{self.inverter_type} is not implemented!')
return ODE_model
def initialize_model(self):
"""Initialize mode."""
if self.model_type is 'EMT_switching' or self.model_type is 'EMT_average':
if self.inverter_type is 'single_phase_half_bridge' or self.inverter_type is 'single_phase_full_bridge':
self.ia = 0.0
elif self.inverter_type is 'three_phase_full_bridge':
raise NotImplementedError
elif self.model_type is 'dynamic_phasor':
if self.inverter_type is 'single_phase_half_bridge' or self.inverter_type is 'single_phase_full_bridge':
self.iaR = 0.0
self.iaI = 0.0
if self.inverter_type is 'three_phase_full_bridge':
raise NotImplementedError
"""
def vta_calc(self,Vdc,control_signal):
Calculate inverter terminal voltage.
if self.model_type is 'switching':
vta = self.half_bridge_switching(Vdc,control_signal)
elif self.model_type is 'average':
vta = self.half_bridge_average(Vdc,control_signal)
return vta
"""
def v_load_model(self):
"""Calculate voltage across load at PCC."""
return self.Rload*self.ia
def ODE_model_switching(self,y,t):
"""ODE model of inverter branch."""
self.ia,dummy = y # unpack current values of y
Vdc = 100.0 #Get DC link voltage
switching_signal = self.control_signal_calc(t)
self.vta = self.half_bridge_switching(Vdc,switching_signal)
self.va = self.PCC_voltage_calc(self.ia,t)
dia = (1/self.Lf)*(-self.Rf*self.ia -self.va + self.vta)
result = [dia,dummy]
return np.array(result)
def ODE_model_average(self,y,t):
"""ODE model of inverter branch."""
self.ia,dummy = y # unpack current values of y
Vdc = 100.0 #Get DC link voltage
modulating_signal = self.control_signal_calc(t)
self.vta = self.half_bridge_average(Vdc,modulating_signal)
self.va = self.PCC_voltage_calc(self.ia,t)
dia = (1/self.Lf)*(-self.Rf*self.ia -self.va + self.vta)
result = [dia,dummy]
return np.array(result)
def power_calc(self,v,i):
"""Calcuate instantaneous power."""
return v*i
def show_states(self):
"""Show states."""
print('Inverter states:{}'.format(self.y))
| 3.03125
| 3
|
IFSensor/utils.py
|
andrevdl/IFSensor
| 0
|
12785494
|
<reponame>andrevdl/IFSensor
from enum import Flag, auto
from werkzeug.exceptions import BadRequest
from functools import wraps
from flask import request, jsonify
class CRUDMethods(Flag):
READ = auto()
CREATE = auto()
UPDATE = auto()
DELETE = auto()
SEARCH_WITHOUT_ID = auto()
SEARCH_BY_ID = auto()
SEARCH = SEARCH_WITHOUT_ID | SEARCH_BY_ID
READ_CREATE = READ | CREATE
READ_CREATE_UPDATE = READ | CREATE | UPDATE
ALL = READ | CREATE | UPDATE | DELETE
POST_EXECUTE = CREATE # For now, may change later
GET_EXECUTE = READ # For now, may change later
def is_flag_set(set, flag):
return (set & flag) == flag
def register_api(app, crud, view, endpoint, url, pk='id', pk_type='int'):
view_func = view.as_view(endpoint)
if is_flag_set(crud, CRUDMethods.SEARCH_WITHOUT_ID):
app.add_url_rule(url, view_func=view_func, methods=['GET',])
if is_flag_set(crud, CRUDMethods.READ) and not is_flag_set(crud, CRUDMethods.SEARCH_BY_ID):
app.add_url_rule(url, defaults={pk: None},
view_func=view_func, methods=['GET',])
if is_flag_set(crud, CRUDMethods.CREATE):
app.add_url_rule(url, view_func=view_func, methods=['POST',])
if is_flag_set(crud, CRUDMethods.READ) or is_flag_set(crud, CRUDMethods.SEARCH_BY_ID):
if is_flag_set(crud, CRUDMethods.UPDATE) and is_flag_set(crud, CRUDMethods.DELETE):
app.add_url_rule('%s<%s:%s>' % (url, pk_type, pk), view_func=view_func,
methods=['GET', 'PUT', 'DELETE'])
elif is_flag_set(crud, CRUDMethods.UPDATE):
app.add_url_rule('%s<%s:%s>' % (url, pk_type, pk), view_func=view_func,
methods=['GET', 'PUT'])
elif is_flag_set(crud, CRUDMethods.DELETE):
app.add_url_rule('%s<%s:%s>' % (url, pk_type, pk), view_func=view_func,
methods=['GET', 'DELETE'])
else:
app.add_url_rule('%s<%s:%s>' % (url, pk_type, pk), view_func=view_func,
methods=['GET'])
def validate_json(f):
@wraps(f)
def wrapper(*args, **kw):
try:
request.json.get
except Exception:
msg = "payload must be a valid json"
return jsonify({"error": msg}), 400
return f(*args, **kw)
return wrapper
| 2.1875
| 2
|
todocli/tests/test_todo/test_commands.py
|
BalenD/TODO-cli
| 0
|
12785495
|
import pytest
from todocli.todo import commands
class TestCommandparser(object):
successful_user_input = ['-f', 'file1', '-e', '.py', '-m', ]
successfully_parsed_args = commands.command_interpreter(successful_user_input)
no_args_parsed_args = commands.command_interpreter([])
# successful run
def test_HasNamesAttribute(self):
assert hasattr(self.successfully_parsed_args, 'names')
def test_HasExtensionsAttribute(self):
assert hasattr(self.successfully_parsed_args, 'extensions')
def test_HasIsFolderAttribute(self):
assert hasattr(self.successfully_parsed_args, 'is_folder')
def test_HasNewConfigAttribute(self):
assert hasattr(self.successfully_parsed_args, 'new_config')
def test_FileNamePresent(self):
assert 'file1' in self.successfully_parsed_args.names
def test_ExtensionPresent(self):
assert '.py' in self.successfully_parsed_args.extensions
def test_IsFolderIsTrue(self):
assert self.successfully_parsed_args.is_folder == True
def test_NewConfigIsFalse(self):
assert self.successfully_parsed_args.new_config == False
# no filename arguement
def test_NoFileNameArguement(self):
assert self.no_args_parsed_args.names is None
# no extension argument
def test_NoExtensionsArgument(self):
assert self.no_args_parsed_args.extensions is None
# no is_folder argument
def test_NoIsFolderArguement(self):
assert self.no_args_parsed_args.is_folder is None
# no new_config argument
def test_NoNewConfigArgeuement(self):
assert self.no_args_parsed_args.new_config is False
# no debug argument
def test_NoDebugArguement(self):
assert self.no_args_parsed_args.debug_mode is False
# no file name in input
def test_NoFileName(self):
no_file_name_user_input = ['-f', '-e', '.py', '-m', ]
with pytest.raises(SystemExit):
commands.command_interpreter(no_file_name_user_input)
# No extensions in input
def test_NoExtensions(self):
no_extension_user_input = ['-f', 'File1', '-e', '-m', ]
with pytest.raises(SystemExit):
commands.command_interpreter(no_extension_user_input)
| 2.625
| 3
|
aclients/err_msg.py
|
tinybees/aclients
| 9
|
12785496
|
<filename>aclients/err_msg.py
#!/usr/bin/env python3
# coding=utf-8
"""
@author: guoyanfeng
@software: PyCharm
@time: 18-12-25 下午2:42
可配置消息模块
"""
__all__ = ("mysql_msg", "mongo_msg", "http_msg", "schema_msg")
# mysql 从1到100
mysql_msg = {
1: {"msg_code": 1, "msg_zh": "MySQL插入数据失败.", "msg_en": "MySQL insert data failed.",
"description": "MySQL插入数据时最终失败的提示"},
2: {"msg_code": 2, "msg_zh": "MySQL更新数据失败.", "msg_en": "MySQL update data failed.",
"description": "MySQL更新数据时最终失败的提示"},
3: {"msg_code": 3, "msg_zh": "MySQL删除数据失败.", "msg_en": "MySQL delete data failed.",
"description": "MySQL删除数据时最终失败的提示"},
4: {"msg_code": 4, "msg_zh": "MySQL查找单条数据失败.", "msg_en": "MySQL find one data failed.",
"description": "MySQL查找单条数据时最终失败的提示"},
5: {"msg_code": 5, "msg_zh": "MySQL查找多条数据失败.", "msg_en": "MySQL find many data failed.",
"description": "MySQL查找多条数据时最终失败的提示"},
6: {"msg_code": 6, "msg_zh": "MySQL执行SQL失败.", "msg_en": "MySQL execute sql failed.",
"description": "MySQL执行SQL失败的提示"},
}
# mongo 从100到200
mongo_msg = {
100: {"msg_code": 100, "msg_zh": "MongoDB插入数据失败.", "msg_en": "MongoDB insert data failed.",
"description": "MongoDB插入数据时最终失败的提示"},
101: {"msg_code": 101, "msg_zh": "MongoDB更新数据失败.", "msg_en": "MongoDB update data failed.",
"description": "MongoDB更新数据时最终失败的提示"},
102: {"msg_code": 102, "msg_zh": "MongoDB删除数据失败.", "msg_en": "MongoDB delete data failed.",
"description": "MongoDB删除数据时最终失败的提示"},
103: {"msg_code": 103, "msg_zh": "MongoDB查找单条数据失败.", "msg_en": "MongoDB find one data failed.",
"description": "MongoDB查找单条数据时最终失败的提示"},
104: {"msg_code": 104, "msg_zh": "MongoDB查找多条数据失败.", "msg_en": "MongoDB find many data failed.",
"description": "MongoDB查找多条数据时最终失败的提示"},
105: {"msg_code": 105, "msg_zh": "MongoDB聚合查询数据失败.", "msg_en": "MongoDB aggregate query data failed.",
"description": "MongoDB聚合查询数据时最终失败的提示"},
}
# request and schema 从200到300
http_msg = {
200: {"msg_code": 200, "msg_zh": "获取API响应结果失败.", "msg_en": "Failed to get API response result.",
"description": "async request 获取API响应结果失败时的提示"},
}
schema_msg = {
# schema valication message
201: {"msg_code": 201, "msg_zh": "数据提交有误,请重新检查.", "msg_en": "Request body validation error, please check!",
"description": "marmallow校验body错误时的提示"},
202: {"msg_code": 202, "msg_zh": "数据提交未知错误,请重新检查.",
"msg_en": "Request body validation unknow error, please check!",
"description": "marmallow校验body未知错误时的提示"},
}
| 2.109375
| 2
|
nautilus/api/util/graph_entity.py
|
AlecAivazis/python
| 9
|
12785497
|
<reponame>AlecAivazis/python
# external imports
import json
import asyncio
# local imports
from nautilus.api.util import parse_string
from nautilus.conventions.actions import query_action_type
class GraphEntity:
"""
This entity describes an entity path between a source node and
another entity in the api graph and allows for "equality" checks
that verify if there is a matching entity.
Example:
.. code-block:: python
source = GraphEntity(model_type='CatPhoto', id=1)
# check if there is a user with id 5 associated with the photo
assert 5 in source.owner.foo(arg=2)
"""
def __init__(self, service, model_type=None, id=None, _api_path=None):
# save the event broker reference
self.service = service
# if there is a source specification
if model_type and id:
# the internal api needs to start at the appropriate node
self._api_path = [{"name": model_type, "args": {"id": id}}]
# they could also specify the api path to start from
elif _api_path:
# set the path to the given value
self._api_path = _api_path
# otherwise we weren't given a valid starting point
else:
# yell loudly
raise ValueError("GraphEntity need to start at a given path or model_type/id")
def __getattr__(self, attr):
"""
Attribute retrieval is overwritten to build the path we care about
"""
# add a node with no arguments to the api path
self._api_path.append({
"name": attr,
"args": {},
})
# return the entity so we can continue building the path
return GraphEntity(service=self.service, _api_path=self._api_path)
@property
def _query(self):
"""
This attribute provides the graphql query corresponding to the api path
"""
return "query { %s }" % self._summarize_node(self._api_path)
def _summarize_node(self, node_list):
# if there are not entries in the node list (the base case)
if not node_list:
# return the id field
return 'id'
# grab the top of the node list
node = node_list.pop(0)
# if there are arguments for the node
if node['args']:
# add the starting parenthesis
arg_string = '('
# construct the argument string
for key, value in node['args'].items():
# add the key to the arg string
arg_string += "%s : %s" % (key, json.dumps(value))
# close the parenthesis
arg_string += ')'
# otherwise there are no arguments for the node
else:
# just use an empty string
arg_string = ''
return "%s %s { %s }" % (node['name'], arg_string, self._summarize_node(node_list))
def __call__(self, **kwds):
"""
Calling the entity adds the arguments to the head of its path.
"""
# set the args of the tail of the path to the given keywords
self._api_path[-1]['args'] = kwds
# return the entity so we can continue building the path
return GraphEntity(service=self.service, _api_path=self._api_path)
async def _has_id(self, *args, **kwds):
"""
Equality checks are overwitten to perform the actual check in a
semantic way.
"""
# if there is only one positional argument
if len(args) == 1:
# parse the appropriate query
result = await parse_string(
self._query,
self.service.object_resolver,
self.service.connection_resolver,
self.service.mutation_resolver,
obey_auth=False
)
# go to the bottom of the result for the list of matching ids
return self._find_id(result['data'], args[0])
# otherwise
else:
# treat the attribute like a normal filter
return self._has_id(**kwds)
def _find_id(self, result, uid):
"""
This method performs a depth-first search for the given uid in the dictionary of results.
"""
# if the result is a list
if isinstance(result, list):
# if the list has a valid entry
if any([self._find_id(value, uid) for value in result]):
# then we're done
return True
# otherwise results could be dictionaries
if isinstance(result, dict):
# the children of the result that are lists
list_children = [value for value in result.values() if isinstance(value, list)]
# go to every value that is a list
for value in list_children:
# if the value is a match
if self._find_id(value, uid):
# we're done
return True
# the children of the result that are dicts
dict_children = [value for value in result.values() if isinstance(value, dict)]
# perform the check on every child that is a dict
for value in dict_children:
# if the child is a match
if self._find_id(value, uid):
# we're done
return True
# if there are no values that are lists and there is an id key
if not list_children and not dict_children and 'id' in result:
# the value of the remote id field
result_id = result['id']
# we've found a match if the id field matches (cast to match type)
return result_id == type(result_id)(uid)
# we didn't find the result
return False
| 2.65625
| 3
|
zonarPy/ac_basic.py
|
SvenGastauer/zonarPy
| 0
|
12785498
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 4 12:48:05 2020
@author: sven
"""
import numpy as np
def nearfield(f,c,theta):
"""
Compute the nearfield
Parameters
----------
f : numeric
Transducer Frequency in kHz [kHz].
c : numeric
Ambient sound speed [m/s].
theta : numeric
3dB angle or beam width in degrees [degrees].
Returns
-------
Rnf : numeric
Range of the nearfield for the given conditions in meters [m].
"""
lmbd = c/ ( f * 1000)
k = 2* np.pi / lmbd
a = 1.6 / (k * np.sin((theta * np.pi/180) / 2))
Rnf = (2*a)**2 / lmbd
return Rnf
def eba(f,c,theta):
"""
Compute the equivalent beam angle for a circular transducer.
Parameters
----------
f : numeric
Transducer Frequency in kHz [kHz].
c : numeric
Ambient sound speed [m/s].
theta : numeric
3dB angle or beam width in degrees [degrees].
Returns
-------
EBA : numeric
equivalent beam angle in dB [dB].
"""
lmbd = c/ ( f * 1000)
k = 2* np.pi / lmbd
a = 1.6 / (k * np.sin((theta * np.pi/180) / 2))
EBA = 10 * np.log10( 5.78 / ( ( k * a ) ** 2))#equivalent beam angle in steradians
return EBA
def vol_samp(f,c,theta,tau,R,start=0):
f = f*1000
Rtot = R+start
Vtot = 10**(eba(f,c,theta)/10) * Rtot**2 * c * tau / 2
V0 = 10**(eba(f,c,theta)/10) * start**2 * c * tau / 2
V = Vtot - V0
return V
def footprint_radius(theta,R):
return R * np.tan(theta * np.pi / 180 / 2)
def footprint_area(theta, R):
return np.pi * footprint_radius(theta,R)**2
'''
vol_samp(f=200,c=1450,theta=9.8,tau=6/1000,R=10)
vol_samp(f=1000,c=1450,theta=4,tau=6/1000,R=10)
#Zonar
nearfield(200,1480,9.8)
nearfield(1000,1480,4)
c=1450;f=200000
0.045**2/(c/f)
c=1450;f=1000000
0.022**2/(c/f)
'''
| 2.8125
| 3
|
api/v1/viewsets/session/assets.py
|
blockomat2100/vulnman
| 0
|
12785499
|
<filename>api/v1/viewsets/session/assets.py
from rest_framework import mixins
from apps.assets import models
from api.v1.generics import ProjectSessionViewSet
from api.v1.serializers import assets as serializers
class HostViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin, ProjectSessionViewSet):
serializer_class = serializers.HostSerializer
queryset = models.Host.objects.all()
object_permissions_required = ["projects.view_project"]
class ServiceViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin, ProjectSessionViewSet):
serializer_class = serializers.ServiceSerializer
queryset = models.Service.objects.all()
object_permissions_required = ["projects.view_project"]
class WebApplicationViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin, ProjectSessionViewSet):
serializer_class = serializers.WebApplicationSerializer
queryset = models.WebApplication.objects.all()
object_permissions_required = ["projects.view_project"]
class WebRequestViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin, ProjectSessionViewSet):
serializer_class = serializers.WebRequestSerializer
queryset = models.WebRequest.objects.all()
object_permissions_required = ["projects.view_project"]
| 2.03125
| 2
|
nucleus/iam/reset.py
|
1x-eng/PROTON
| 31
|
12785500
|
<gh_stars>10-100
#
# Copyright (c) 2018, <NAME> All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import falcon
import json
from nucleus.db.connection_manager import ConnectionManager
from nucleus.email.email import ProtonEmail
from nucleus.generics.utilities import MyUtilities
from nucleus.iam.password_manager import PasswordManager
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import Table
__author__ = "<NAME>, <EMAIL>"
__copyright__ = "Copyright (C) 2018 <NAME> | http://www.apricity.co.in"
__license__ = "BSD 3-Clause License"
__version__ = "1.0"
class ProtonUserReset(ConnectionManager, PasswordManager, ProtonEmail, MyUtilities):
def __init__(self):
super(ProtonUserReset, self).__init__()
self.__alchemy_engine = self.alchemy_engine()
self.iam_user_reset_logger = self.get_logger(log_file_name='iam_user_reset_logs.log',
log_file_path='{}/trace/iam_user_reset_logs.log'.format(
self.ROOT_DIR))
def proton_reset_user(self, db_flavour, schema_name, reset_payload):
try:
if isinstance(reset_payload, dict):
reset_password_metadata = {
'user_name': str,
'email': str,
'password': str
}
validation_results = self.validate_proton_payload_type(reset_password_metadata, reset_payload)
if validation_results['status'] and db_flavour == 'postgresql':
schema_status = self.__alchemy_engine[db_flavour].dialect.has_schema(
self.__alchemy_engine[db_flavour], schema_name)
metadata = MetaData(self.__alchemy_engine[db_flavour], reflect=True, schema=schema_name)
metadata.reflect(self.__alchemy_engine[db_flavour])
connection = self.__alchemy_engine[db_flavour].connect()
with connection.begin() as transaction:
if schema_status:
user_registry_table = Table('PROTON_user_registry', metadata)
query_user_id = select([user_registry_table.c.id]).where(
user_registry_table.c.email == reset_payload['email'])
user_id = (connection.execute(query_user_id)).fetchall()
if len(user_id) == 0:
return json.dumps({
'message': 'Unable to reset password.',
'reason': {
'message': 'Invalid Email. There is no registered user with that email.'
},
'status': False
})
else:
login_registry_table = Table('PROTON_login_registry', metadata)
query_user_existence = select([login_registry_table.c.user_registry_id]).where(
login_registry_table.c.user_name == reset_payload['user_name'])
user_existence = (connection.execute(query_user_existence)).fetchall()
if len(user_existence) == 0:
return json.dumps({
'message': 'Unable to reset password.',
'reason': {
'message': 'Invalid Username. Please enter the username and '
'email as provided during signup.'
},
'status': False
})
else:
if user_id[0][0] != user_existence[0][0]:
return json.dumps({
'message': 'Unable to reset password.',
'reason': {
'message': 'Given email and username do not match. Please enter the '
'username and email as provided during signup.'
},
'status': False
})
else:
password_update_query = login_registry_table.update().where(
login_registry_table.c.user_registry_id == user_existence[0][0]).values(
password=self.<PASSWORD>(reset_payload['password']))
password_update_results = (connection.execute(password_update_query))
if self.iam_user_reset_logger.info(password_update_results.rowcount) != 0:
return json.dumps({
'message': 'Password reset successful.',
'status': True,
})
else:
return json.dumps({
'message': 'Password reset unsuccessful due to server side error.',
'status': False,
})
else:
return json.dumps({
'message': 'Unable to reset password.',
'reason': {
'message': 'Required payload for password reset is: {}'.format(str(reset_password_metadata))
},
'status': False
})
return json.dumps({
'message': 'Unable to reset password.',
'reason': {
'message': """POST payload for /reset route must be:
{
"db_flavour": "postgresql",
"reset_payload": {
"user_name": "<username>",
"email": "<<EMAIL>>",
"password": "<<PASSWORD>>"
}
}
"""
},
'status': False
})
except Exception as e:
self.iam_user_reset_logger.exception('[Proton IAM] - Exception while resetting PROTON user password. '
'Details: {}'.format(str(e)))
class IctrlProtonPasswordReset(ProtonUserReset):
def __init__(self):
super(IctrlProtonPasswordReset, self).__init__()
def on_get(self, req, resp):
resp.status = falcon.HTTP_SERVICE_UNAVAILABLE
def on_post(self, req, resp):
try:
post_payload = json.loads(req.stream.read())
results = self.proton_reset_user(post_payload['db_flavour'],
'iam',
post_payload['reset_payload'])
resp.body = results
resp.status = falcon.HTTP_201
except Exception as e:
resp.body = json.dumps({
'message': "POST request must contain 'db_flavour'[PROTON supports `sqlite` or `postgresql`] "
"and 'reset_payload'"
})
resp.status = falcon.HTTP_403
| 1.140625
| 1
|
pacote-download/ex(1-100)/ex076.py
|
gssouza2051/python-exercicios
| 0
|
12785501
|
'''Crie um programa que tenha uma tupla única com nomes de produtos e seus respectivos preços,
na sequência. No final, mostre uma listagem de preços, organizando os dados em forma tabular.'''
listagem= ('lápis',2.50,
'borracha',1.50,
'caderno',12.00,
'caneta',2.00,
'estojo',10.50)
print('-'*30)
print('LISTAGEM DE PREÇOS')
print('-'*30)
for pos in range(0, len (listagem)):
if pos % 2==0 :
print(f'{listagem[pos]:.<30}')
else:
print(f' R${listagem[pos]:>10}')
| 3.921875
| 4
|
weather-tracker/main.py
|
JAbrokwah/python-projects
| 0
|
12785502
|
from weather_tracker import weather_tracker, output_file_path
from wt_exceptions import WeatherException, LocationException
def check_selection_value(value):
return value in [1, 2]
if __name__ == '__main__':
print("Welcome to the Automatic Weather Machine! We find your location (Based on IP Address) and tell you the "
"weather for your area for the week")
output = None
try:
print('1 - Print to Console')
print('2 - Output to TXT File')
while output is None or type(output) != int or output > 2 or output < 1:
try:
output = int(input("Output Selection: "))
if not check_selection_value(output):
output = None
print("Provide a valid selection for output!")
except ValueError:
print("{} is not a number, please enter a number only".format(output))
result = weather_tracker(output)
if len(result) != 3:
print('You can find your forecast in the file: {}'.format(output_file_path))
else:
forecast = result[0]
city = result[1]
district = result[2]
print("Here is the forecast for {}, {}:".format(city, district))
for day in forecast:
print(day)
except (LocationException, WeatherException) as error:
print(error.args[0])
| 3.75
| 4
|
gensound/__init__.py
|
macrat/PyGenSound
| 0
|
12785503
|
<reponame>macrat/PyGenSound
""" Generate sound like a chiptune
Read an audio file or generate sound, compute it, and write to file.
"""
from gensound.sound import *
from gensound.effect import *
from gensound.exceptions import *
| 2.1875
| 2
|
econtools/metrics/tests/data/src_tsls.py
|
fqueiro/econtools
| 93
|
12785504
|
import pandas as pd
import numpy as np
class regout(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
stat_names=['coeff', 'se', 't', 'p>t', 'CI_low', 'CI_high']
var_names=['mpg', 'length', '_cons']
tsls_std = regout(
summary=pd.DataFrame(np.array([
[-1319.865169393102,
1906.786380881755,
-.6921935160784805,
.4910734473693195,
-5121.889227450638,
2482.158888664433,
],
[-217.1947537663291,
420.1260089670161,
-.5169752624941175,
.6067801835089433,
-1054.902223005562,
620.5127154729038,
],
[75092.75604853875,
119511.8053379244,
.6283291917163411,
.5318043826192644,
-163207.0155842729,
313392.5276813505,
],
]),
columns=stat_names,
index=var_names),
vce=pd.DataFrame(np.array([
[3635834.30231614,
799471.1768877679,
-227680006.992276,
],
[799471.1768877679,
176505.8634105533,
-50197751.5841309,
],
[-227680006.992276,
-50197751.5841309,
14283071615.12995,
],
]),
columns=var_names,
index=var_names),
N=74,
r2=np.nan,
r2_a=np.nan,
mss=-1031817172.794085,
tss=np.nan,
rss=1666882568.915706,
kappa=np.nan,
F=3.97987798611259,
pF=.0230019984382644,
)
tsls_robust = regout(
summary=pd.DataFrame(np.array([
[-1319.865169393102,
2357.647789772478,
-.5598228773265894,
.5773622437125422,
-6020.881343525829,
3381.151004739624,
],
[-217.1947537663291,
503.6720846601052,
-.4312225362120266,
.6676130605679584,
-1221.488366543325,
787.0988590106673,
],
[75092.75604853875,
144765.6412502902,
.5187194654752942,
.6055693972498957,
-213561.7342143963,
363747.2463114738,
],
]),
columns=stat_names,
index=var_names),
vce=pd.DataFrame(np.array([
[5558503.100619048,
1185986.375722446,
-341107563.0831394,
],
[1185986.375722446,
253685.5688658562,
-72904288.91181517,
],
[-341107563.0831394,
-72904288.91181517,
20957090886.60773,
],
]),
columns=var_names,
index=var_names),
N=74,
r2=np.nan,
r2_a=np.nan,
mss=-1031817172.794085,
tss=np.nan,
rss=1666882568.915706,
kappa=np.nan,
F=3.406896316082843,
pF=.0386511725211229,
)
tsls_cluster = regout(
summary=pd.DataFrame(np.array([
[-1319.865169393102,
2257.567862016117,
-.5846403076514384,
.5625396644960171,
-5902.971584635199,
3263.241245848994,
],
[-217.1947537663291,
486.3497477085017,
-.4465814052329,
.6579283787885248,
-1204.537232491913,
770.1477249592547,
],
[75092.75604853875,
139493.4175166438,
.5383247280437371,
.5937601902027558,
-208093.9367907353,
358279.4488878128,
],
]),
columns=stat_names,
index=var_names),
vce=pd.DataFrame(np.array([
[5096612.65160802,
1096219.32167181,
-314686204.683651,
],
[1096219.32167181,
236536.0770961233,
-67830404.58467865,
],
[-314686204.683651,
-67830404.58467865,
19458413530.47272,
],
]),
columns=var_names,
index=var_names),
N=74,
r2=np.nan,
r2_a=np.nan,
mss=-1031817172.794085,
tss=np.nan,
rss=1666882568.915706,
kappa=np.nan,
F=3.125695274137819,
pF=.0563657644983311,
)
| 2.203125
| 2
|
omaha_server/omaha/migrations/0023_auto_20150922_1014.py
|
makar21/omaha-server
| 8
|
12785505
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('omaha', '0022_auto_20150909_0755'),
]
operations = [
migrations.AddField(
model_name='request',
name='ip',
field=models.GenericIPAddressField(null=True, blank=True),
),
]
| 1.601563
| 2
|
test_it.py
|
greghaskins/gibberish
| 52
|
12785506
|
import gibberish
def test_generate_word():
word = gibberish.generate_word()
assert len(word)
assert word.isalpha()
def test_generate_words():
word_list = gibberish.generate_word()
assert len(word_list)
for word in word_list:
assert len(word)
assert word.isalpha()
| 3.234375
| 3
|
tests/derive/test_assign_work_person_facing_now.py
|
ONS-SST/cis_households
| 0
|
12785507
|
<filename>tests/derive/test_assign_work_person_facing_now.py
from chispa import assert_df_equality
from cishouseholds.derive import assign_work_person_facing_now
def test_assign_work_person_facing_now(spark_session):
expected_df = spark_session.createDataFrame(
data=[
("<=15y", 15, "Yes, care/residential home, resident-facing", "<=15y"),
("Yes", 27, "Yes, care/residential home, resident-facing", "Yes"),
("No", 49, "Yes, other social care, resident-facing", "Yes"),
("No", 33, "No", "No"),
(">=75y", 80, "No", ">=75y"),
(None, 80, "No", "No"),
(">=75y", 99, "Yes, care/residential home, non-resident-facing", ">=75y"),
],
schema="work_patient string, age integer, work_social string, facing string",
)
output_df = assign_work_person_facing_now(expected_df.drop("facing"), "facing", "work_patient", "work_social")
assert_df_equality(output_df, expected_df)
| 2.625
| 3
|
inference.py
|
cpuimage/SINet
| 13
|
12785508
|
<filename>inference.py
# -*- coding: utf-8 -*-
import time
import os
import numpy as np
import tensorflow as tf
import cv2
def export_tflite(output_resolution, num_classes, checkpoint_dir):
from model import Model
model = Model(output_resolution=output_resolution, num_classes=num_classes)
ckpt = tf.train.Checkpoint(model=model, optimizer=tf.keras.optimizers.Adam())
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_dir, max_to_keep=5)
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
tf.get_logger().info("Latest checkpoint restored:{}".format(ckpt_manager.latest_checkpoint))
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_LATENCY]
tflite_model = converter.convert()
open("model.tflite", "wb").write(tflite_model)
tf.get_logger().info('export tflite done.')
else:
tf.get_logger().info('Not restoring from saved checkpoint')
class Segmentator(object):
def __init__(self):
self.height = 256
self.width = 256
def load_tflite(self, model_path="model.tflite"):
# Load TFLite model and allocate tensors.
self.interpreter = tf.lite.Interpreter(model_path=model_path)
self.interpreter.allocate_tensors()
# Get input and output tensors.
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
# Get Input shape
self.input_shape = self.input_details[0]['shape']
self.height, self.width = self.input_shape[1], self.input_shape[2]
def tflite_forward(self, img_data):
input_data = img_data.reshape(self.input_shape).astype('float32')
self.interpreter.set_tensor(self.input_details[0]['index'], input_data)
self.interpreter.invoke()
mask = (self.interpreter.get_tensor(self.output_details[0]['index']))
return mask
def cv_load_image_rgb(self, filename):
image = cv2.cvtColor(cv2.imread(filename, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
return np.asarray(image, np.float32)
def build_debug_display(self, img_data, disp_image, disp_mask, h, w, maintain_resolution=True):
if maintain_resolution:
disp_mask = np.clip(np.expand_dims(cv2.resize(disp_mask, (w, h), interpolation=cv2.INTER_CUBIC), -1), 0,
1)
disp_image = cv2.cvtColor(np.asarray(img_data, np.uint8), cv2.COLOR_RGBA2RGB)
else:
disp_image = cv2.cvtColor(np.asarray(disp_image * 255., np.uint8), cv2.COLOR_RGBA2RGB)
overlay = disp_image.copy()
disp_mask_rgb = (np.tile(disp_mask, 3) * 255.).astype(np.uint8)
disp_image_mask = (disp_image * disp_mask).astype(np.uint8)
overlay = (np.broadcast_to([1, 0, 1], overlay.shape) * disp_image_mask).astype(np.uint8)
alpha = 0.7
cv2.addWeighted(disp_image, alpha, overlay, 1 - alpha, 0, overlay)
extracted_pixels_color = np.broadcast_to([[207, 207, 207]], overlay.shape) * (1. - disp_mask)
extracted_pixels = extracted_pixels_color + disp_image_mask
outputs = np.uint8(disp_image), np.uint8(disp_mask_rgb), np.uint8(
overlay), np.uint8(extracted_pixels)
return outputs
def run_inference(self, data, only_mask=True, debug_display=True):
if isinstance(data, str):
img_data = tf.cast(self.cv_load_image_rgb(data), tf.float32)
else:
img_data = data
height, width = img_data.shape[:2]
disp_image = np.asarray(tf.image.resize(img_data / 255., size=[int(self.height), int(self.width)]), np.float32)
start = time.perf_counter()
mask = self.tflite_forward(disp_image)
print('Time: {:.3f} secs'.format(time.perf_counter() - start))
disp_mask = np.squeeze(np.clip(mask, 0., 1.), 0)
if debug_display:
alpha_disp_image, alpha_disp_mask_rgb, alpha_overlay, alpha_extracted_pixels = self.build_debug_display(
img_data, disp_image, disp_mask, height, width)
outputs = np.concatenate((alpha_disp_image, alpha_disp_mask_rgb, alpha_overlay, alpha_extracted_pixels),
axis=1)
return outputs.astype(np.uint8)
mask = np.asarray(tf.image.resize(disp_mask, size=[height, width]) * 255., np.uint8)
if only_mask:
outputs = mask
else:
outputs = np.concatenate((img_data, mask), axis=-1)
return outputs.astype(np.uint8)
def main():
output_resolution = 512
num_classes = 1
os.environ['CUDA_VISIBLE_DEVICES'] = ''
export_model = True
if export_model:
checkpoint_path = "training/cp-{step:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
export_tflite(output_resolution, num_classes, checkpoint_dir=checkpoint_dir)
print('************ Segnet ************')
segnet = Segmentator()
segnet.load_tflite()
root_path = "./data/test/coco_test"
image_path = os.path.join(root_path, "image")
save_path = os.path.join(root_path, "probs")
import shutil
shutil.rmtree(save_path, True)
if not os.path.exists(save_path):
os.makedirs(save_path)
sub_file = os.listdir(image_path)
for name in sub_file:
print("Working on file", name)
input_filename = os.path.join(image_path, name)
png_filename = os.path.splitext(name)[0] + ".png"
save_filename = os.path.join(save_path, png_filename)
if os.path.exists(input_filename) and not os.path.exists(save_filename):
probs = segnet.run_inference(input_filename, only_mask=True)
png_image = tf.image.encode_png(probs)
tf.io.write_file(save_filename, png_image)
if __name__ == '__main__':
main()
| 2.328125
| 2
|
test_data.py
|
dutch213/pt-voicebox
| 0
|
12785509
|
text_small = 'A penny saved is a penny earned.'
| 1.117188
| 1
|
backend/main.py
|
pauanawat/FookBace
| 0
|
12785510
|
<gh_stars>0
from aiohttp import web
from app.config.application import app_config
def main():
app = web.Application()
app_config(app)
web.run_app(app)
if __name__ == '__main__':
main()
| 1.679688
| 2
|
exercicios-Python/desaf021.py
|
marcelo-py/Exercicios-Python
| 0
|
12785511
|
#import pygame
#pygame.mixer.init()
#pygame.mixer.music.load('desaf021.mp3')
#pygame.mixer.music.play()
#while pygame.mixer.music.get_busy(): pass
import playsound
playsound.playsound('desaf021.mp3')
| 2.359375
| 2
|
tests/test_models.py
|
dimdamop/single-neuron
| 0
|
12785512
|
# Author: <NAME>
# MIT license (see LICENCE.txt in the top-level folder)
import unittest
import numpy as np
from numpy import random
from numpy import linalg as LA
from sklearn.linear_model import LinearRegression, LogisticRegression
from single_neuron import models as models
from single_neuron import math_utils as math_utils
datasets_n = 50
max_ds_n = 10000
max_features_n = 100
max_abs_value = 1000
min_epochs = 100
max_epochs = 10000
min_lr = 1e-9
max_lr = 1e-5
def generate_synthetic_datasets(N_max, m_max, gaussian=False):
N_train = random.randint(3, N_max + 1)
N_valid = random.randint(3, N_max + 1)
m = random.randint(2, m_max + 1)
if gaussian:
# we are generating a synthetic dataset based on a multivariate Gaussian
# distribution. In order to generate the latter, we need a mean vector
# (easy) and a positive definite matrix for the covariances. This matrix
# is way more tricky to sample and I don' t know what is the best way.
# My current brute-force approach is the following: (a) I sample m
# vectors; (b) I take all the possible inner products (Gram matrix) as
# the covariance matrix and (c) if the covariance matrix is singular, I
# go back to step (b).
mu = 2 * (random.rand(m) - 0.5) * max_abs_value
Cov = np.zeros([m, m])
while LA.matrix_rank(Cov) != m:
a = 2 * (random.rand(m) - 0.5) * max_abs_value
X = a * random.rand(m, m)
Cov = X.T.dot(X)
train_ds = random.multivariate_normal(mu, Cov, N_train)
valid_ds = random.multivariate_normal(mu, Cov, N_valid)
else:
# uniformly random datasets
train_ds = 2 * (random.rand(N_train, m) - 0.5) * max_abs_value
valid_ds = 2 * (random.rand(N_valid, m) - 0.5) * max_abs_value
return train_ds, valid_ds
class TestLinearNeuron(unittest.TestCase):
def setUp(self):
"""
Prepare a few synthetic datasets for the tests. Two categories of
datasets: One random without any implied structure and one that arises
from a predefined distribution.
"""
self.train_X = []
self.valid_X = []
self.train_y = []
self.valid_y = []
for ds_i in range(0, datasets_n):
# make sure that there are some datasets with extremely small values
if ds_i < 10:
N_max = 7
else:
N_max = max_ds_n
if ds_i < 10:
m_max = 2
else:
m_max = max_features_n
#gaussian = random.rand() < 0.5
gaussian = True
train_ds, valid_ds = generate_synthetic_datasets(N_max, m_max,
gaussian)
# we use the last column as the target variable
self.train_X.append(train_ds[:, :-1])
self.valid_X.append(valid_ds[:, :-1])
self.train_y.append(train_ds[:, -1])
self.valid_y.append(valid_ds[:, -1])
self.lin_model = LinearRegression()
def test_rmse_is_equal_with_sklearn(self):
pass
def test_params_are_equal_with_sklearn(self):
pass
def test_initialization_does_not_matter(self):
pass
class TestReluNeuron(unittest.TestCase):
def test_rmse_is_equal_with_sklearn(self):
pass
def test_initialization_with_negatives_leads_to_zero_gradients(self):
pass
def test_initialization_does_not_matter(self):
pass
class TestLogisticNeuron(unittest.TestCase):
def test_ce_is_equal_with_sklearn(self):
pass
def test_initialization_does_not_matter(self):
pass
| 2.703125
| 3
|
test/test_modify_contact.py
|
IvanZyfra/py_training
| 0
|
12785513
|
<reponame>IvanZyfra/py_training<filename>test/test_modify_contact.py
# -*- coding: utf-8 -*-
from model.contact import Contact
def test_modify_contact(app):
app.contact.modify_first_group(
Contact(first_name="Test_name", middle_name="Test_name", last_name="Test_name", nickname="", title="",
company="", address="", tel_home="", tel_mobile="",
tel_work="", tel_fax="", email="", email2="",
email3="", homepage="", address2="", phone2="",
notes=""))
| 1.84375
| 2
|
utils/logging.py
|
csalt-research/OpenASR-py
| 2
|
12785514
|
import logging
logger = logging.getLogger()
log_format = logging.Formatter("[%(asctime)s %(levelname)s] %(message)s")
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_format)
logger.handlers = [console_handler]
| 2.40625
| 2
|
exploits/sword/sessionregeneration.py
|
PinkRoccade-Local-Government-OSS/PinkWave
| 1
|
12785515
|
<filename>exploits/sword/sessionregeneration.py
"""
Logs into webapplication and verifies that session id is changed to prevent session fixation.
* Valid login required
"""
__author__ = "sword"
import sys
from os.path import dirname,abspath
# Importing PinkWave extensions
sys.path.append(dirname(dirname(dirname(abspath(__file__)))))
from extensions.Util import Util,report,payloads
import extensions.Request as Request
def labels():
return ['A2 - Broken Authentication and Session Management']
def options():
return ['target','requestNames','creds']
def start(pentest):
r1 = Request.get(pentest.target)
r2 = Request.post(pentest.target,pentest.requestNames, pentest.creds)
if r1.cookies == r2.cookies:
report("Cookies not altered")
| 2.625
| 3
|
abc/077/B.py
|
tonko2/AtCoder
| 2
|
12785516
|
N = int(input())
for i in range(1, 100000):
if i * i > N:
print((i - 1) ** 2)
exit()
| 3.0625
| 3
|
web/accounts/templatetags/dashbar_tag.py
|
MattYu/django-docker-nginx-postgres-letsEncrypt-jobBoard
| 1
|
12785517
|
from django import template
from joblistings.models import Job
from companies.models import Company
register = template.Library()
@register.inclusion_tag('dashbar.html')
def get_dashbar(*args, **kwargs):
location = kwargs['location']
return {
'location': location
}
| 1.742188
| 2
|
py/py_0405_a_rectangular_tiling.py
|
lcsm29/project-euler
| 0
|
12785518
|
# Solution of;
# Project Euler Problem 405: A rectangular tiling
# https://projecteuler.net/problem=405
#
# We wish to tile a rectangle whose length is twice its width. Let T(0) be the
# tiling consisting of a single rectangle. For n > 0, let T(n) be obtained
# from T(n-1) by replacing all tiles in the following manner:The following
# animation demonstrates the tilings T(n) for n from 0 to 5:Let f(n) be the
# number of points where four tiles meet in T(n). For example, f(1) = 0, f(4)
# = 82 and f(109) mod 177 = 126897180. Find f(10k) for k = 1018, give your
# answer modulo 177.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 405
timed.caller(dummy, n, i, prob_id)
| 3.234375
| 3
|
setup.py
|
remico/studio-installer
| 0
|
12785519
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of "Linux Studio Installer" project
#
# Author: <NAME> <<EMAIL>>
# License: MIT License
#
# SPDX-License-Identifier: MIT
# License text is available in the LICENSE file and online:
# http://www.opensource.org/licenses/MIT
#
# Copyright (c) 2020 remico
import platform
import setuptools
from pathlib import Path
if 'linux' not in platform.system().lower():
raise OSError('The package requires GNU Linux. Aborting installation...')
def data_files():
return [
('studioinstaller-data', [str(f) for f in Path("preseed").glob("*") if f.is_file()]),
('studioinstaller-data/calamares', [str(f) for f in Path("preseed/calamares").glob("*") if f.is_file()])
]
# make the distribution platform dependent
try:
from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
class bdist_wheel(_bdist_wheel):
def finalize_options(self):
_bdist_wheel.finalize_options(self)
self.root_is_pure = False
# self.plat_name_supplied = True
# self.plat_name = "manylinux1_x86_64"
except ImportError:
bdist_wheel = None
setuptools.setup(
data_files=data_files(),
cmdclass={
'bdist_wheel': bdist_wheel
}
)
| 2
| 2
|
gnn_agglomeration/dataset/node_embeddings/hdf5_like_in_memory.py
|
bentaculum/gnn_agglomeration
| 2
|
12785520
|
import logging
import numpy as np
from time import time as now
from gunpowder.batch import Batch
from gunpowder.profiling import Timing
from gunpowder.array import Array
from gunpowder.nodes.hdf5like_source_base import Hdf5LikeSource
from gunpowder.compat import ensure_str
from gunpowder.coordinate import Coordinate
from gunpowder.ext import ZarrFile
logger = logging.getLogger(__name__)
class Hdf5InMemory(Hdf5LikeSource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.in_mem_datasets = {}
def setup(self):
super().setup()
with self._open_file(self.filename) as data_file:
for (array_key, ds_name) in self.datasets.items():
if ds_name not in data_file:
raise RuntimeError("%s not in %s" %
(ds_name, self.filename))
spec = self._Hdf5LikeSource__read_spec(
array_key, data_file, ds_name)
# logger.info(spec)
# logger.info(spec.roi)
# logger.info(spec.roi.get_offset())
# logger.info((spec.roi - spec.roi.get_offset()) /
# spec.voxel_size)
start = now()
logger.info(
f'start loading {ds_name} into memory')
self.in_mem_datasets[array_key] = self._Hdf5LikeSource__read(
data_file,
self.datasets[array_key],
(spec.roi - spec.roi.get_offset()) / spec.voxel_size,
)
logger.info(
f'loaded {ds_name} into memory in {now() - start} s')
def provide(self, request):
timing = Timing(self)
timing.start()
batch = Batch()
for (array_key, request_spec) in request.array_specs.items():
voxel_size = self.spec[array_key].voxel_size
# scale request roi to voxel units
dataset_roi = request_spec.roi / voxel_size
# shift request roi into dataset
dataset_roi = (
dataset_roi -
self.spec[array_key].roi.get_offset() / voxel_size
)
# create array spec
array_spec = self.spec[array_key].copy()
array_spec.roi = request_spec.roi
# add array to batch
batch.arrays[array_key] = Array(
self.__read(array_key, dataset_roi), array_spec
)
logger.debug("done")
timing.stop()
batch.profiling_stats.add(timing)
return batch
def __read(self, array_key, roi):
in_mem_array = self.in_mem_datasets[array_key]
c = len(in_mem_array.shape) - self.ndims
if self.channels_first:
array = np.asarray(
in_mem_array[(slice(None),) * c + roi.to_slices()])
else:
array = np.asarray(
in_mem_array[roi.to_slices() + (slice(None),) * c])
array = np.transpose(
array, axes=[
i + self.ndims for i in range(c)] + list(range(self.ndims))
)
return array
def __repr__(self):
return self.filename
class InMemZarrSource(Hdf5InMemory):
'''A `zarr <https://github.com/zarr-developers/zarr>`_ data source.
Provides arrays from zarr datasets. If the attribute ``resolution`` is set
in a zarr dataset, it will be used as the array's ``voxel_size``. If the
attribute ``offset`` is set in a dataset, it will be used as the offset of
the :class:`Roi` for this array. It is assumed that the offset is given in
world units.
Args:
filename (``string``):
The zarr directory.
datasets (``dict``, :class:`ArrayKey` -> ``string``):
Dictionary of array keys to dataset names that this source offers.
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`, optional):
An optional dictionary of array keys to array specs to overwrite
the array specs automatically determined from the data file. This
is useful to set a missing ``voxel_size``, for example. Only fields
that are not ``None`` in the given :class:`ArraySpec` will be used.
channels_first (``bool``, optional):
Specifies the ordering of the dimensions of the HDF5-like data source.
If channels_first is set (default), then the input shape is expected
to be (channels, spatial dimensions). This is recommended because of
better performance. If channels_first is set to false, then the input
data is read in channels_last manner and converted to channels_first.
'''
def _get_voxel_size(self, dataset):
if 'resolution' not in dataset.attrs:
return None
if self.filename.endswith('.n5'):
return Coordinate(dataset.attrs['resolution'][::-1])
else:
return Coordinate(dataset.attrs['resolution'])
def _get_offset(self, dataset):
if 'offset' not in dataset.attrs:
return None
if self.filename.endswith('.n5'):
return Coordinate(dataset.attrs['offset'][::-1])
else:
return Coordinate(dataset.attrs['offset'])
def _open_file(self, filename):
return ZarrFile(ensure_str(filename), mode='r')
| 1.96875
| 2
|
Ecomm/products/views.py
|
team-mbm/Ecomm
| 0
|
12785521
|
<gh_stars>0
from django.shortcuts import render, get_object_or_404
from django.http import Http404
from .models import Product
# Create your views here.
def index(request):
items = Product.objects.all()
print (request.__dict__)
return render(request, 'products/index.html',{'items':items})
def all_products(request):
items = Product.objects.all()
return render(request, 'products/all_products.html',{'items':items})
def description(request, product_id):
item = get_object_or_404(Product, pk=product_id)
print (item)
related_items =[ goods for goods in Product.objects.filter(category=item.category) if goods.id != item.id ]
print (related_items)
return render(request, 'products/single-product.html', {'item':item, 'related_items':related_items})
def category_items(request, category_name):
items = Product.objects.filter(category=category_name)
return render(request, 'products/category.html',{'items':items})
| 2.171875
| 2
|
ckanext/iati/logic/csv_action.py
|
derilinx/ckanext-ia
| 2
|
12785522
|
from flask import make_response
from ckan.common import config, c
import ckan.plugins as p
import ckan.model as model
import ckan.authz as authz
import ckan.logic as logic
import ckan.lib.jobs as jobs
from ckanext.iati.helpers import extras_to_dict, parse_error_object_to_list
from ckanext.iati import helpers as h
from ckanext.iati.logic import action
import sqlalchemy
import csv
import StringIO
from collections import OrderedDict
import json
from xlwt import Workbook
import io
import datetime as dt
import os, codecs
import logging
log = logging.getLogger(__name__)
_and_ = sqlalchemy.and_
_not_empty = p.toolkit.get_validator('not_empty')
_ignore_empty = p.toolkit.get_validator('ignore_empty')
_ignore_missing = p.toolkit.get_validator('ignore_missing')
_int_validator = p.toolkit.get_validator('int_validator')
ValidationError = logic.ValidationError
class FormatError(Exception):
pass
class PublishersListDownload:
def __init__(self, download_format, request_recent_publisher=False):
self.request_type_recent_publisher = request_recent_publisher
self.download_format = self._get_format(download_format)
self._site_url = config.get('ckan.site_url')
self._datasets_link = self._site_url + "/publisher/{}"
self._func_mapping = {
'extras_publisher_organization_type':h.get_organization_type_title,
'extras_publisher_country':h.get_country_title
}
self._set_mapping()
def _set_mapping(self):
"""
Set csv column headers accoring to the request type.
If the request is from recent publishers )only for sysadmins), we need first_published_date column
:return:
"""
self._headers = ['Publisher', 'IATI Organisation Identifier', 'Organization Type',
'HQ Country or Region', 'Datasets Count', 'Datasets Link']
self._mapping = ['display_name', 'extras_publisher_iati_id', 'extras_publisher_organization_type',
'extras_publisher_country', 'package_count']
if self.request_type_recent_publisher:
self._headers.insert(4, "First Published Date")
self._mapping.insert(4, "extras_publisher_first_publish_date")
self._headers = tuple(self._headers)
self._mapping = tuple(self._mapping)
def _get_xml_value(self, val):
val = val.replace('&', "&")
return val
def _get_xml_name(self, val):
val = val.lower()
return val.replace(" ", '-')
def _get_format(self, download_format):
try:
download_format = download_format.lower()
_formats = ('csv', 'json', 'xml', 'xls')
if download_format not in _formats:
raise FormatError
return download_format
except Exception as e:
raise FormatError(e)
@staticmethod
def _get_publisher_data():
"""
We cannot use API organization_list with all_fields=True, because it will be expensive process
to by pass max limits
:return: dict
"""
# TODO: Need optimization
# First get package count and then join with Group with ownr_org
package_count = model.Session.query(model.Group, model.Package.owner_org,
sqlalchemy.func.count(model.Package.id).label('package_count')).join(
model.Package, model.Group.id == model.Package.owner_org).filter(
_and_(
model.Group.is_organization == True, model.Group.state == 'active',
model.Package.private == False, model.Package.state == 'active'
)
).group_by(model.Group.id, model.Package.owner_org).subquery()
organization = model.Session.query(model.Group, package_count.c.package_count).join(
package_count, model.Group.id == package_count.c.id).join(model.GroupExtra)
log.info(organization.as_scalar())
return organization.all()
def _prepare(self, data):
"""
Prepare the data for download
:param data:
:return:
"""
clean_data = []
extras = dict(data.Group._extras)
for key in self._mapping[:-1]:
val = ''
if hasattr(data.Group, key):
val = getattr(data.Group, key).encode('utf-8')
if "extras_" in key:
val = extras.get(key.replace("extras_", ''), '')
if val:
val = val.value.encode('utf-8')
if key in self._func_mapping:
val = self._func_mapping.get(key)(val)
clean_data.append(val)
clean_data.append(data.package_count)
clean_data.append(self._datasets_link.format(data.Group.name))
return clean_data
def csv(self):
"""
CSV download.
Sysadmin recent publisher is allowed to download only csv
:return:
"""
f = StringIO.StringIO()
writer = csv.writer(f)
writer.writerow(list(self._headers))
_org_data = PublishersListDownload._get_publisher_data()
rows = []
for org in _org_data:
if org.Group.state == 'active' and int(org.package_count) > 0:
org_data = self._prepare(org)
if self.request_type_recent_publisher:
rows.append(org_data)
else:
writer.writerow(org_data)
# This is expensive but we need sorting for first published
# date since its hard to get sorted for GroupExtra table
if self.request_type_recent_publisher:
rows = sorted(rows, key=lambda entry: entry[4], reverse=True)
for csv_row in rows:
writer.writerow(csv_row)
output = f.getvalue()
f.close()
response = make_response(output)
response.headers['Content-type'] = 'text/csv'
return response
def json(self):
"""
Json download
:return:
"""
f = StringIO.StringIO()
json_data = []
_org_data = PublishersListDownload._get_publisher_data()
for org in _org_data:
if org.Group.state == 'active' and int(org.package_count) > 0:
json_data.append(OrderedDict(zip(self._headers, self._prepare(org))))
json.dump(json_data, f)
output = f.getvalue()
f.close()
response = make_response(output)
response.headers['Content-type'] = 'application/json'
return response
def xml(self):
"""
xml format download
:return:
"""
f = StringIO.StringIO()
fields = list(self._headers)
fields.pop(1)
xml = ['<?xml version="1.0" encoding="UTF-8" ?>']
_observations = ' <{}>{}</{}>'
xml.append('<iati-publishers-list>')
_org_data = PublishersListDownload._get_publisher_data()
for org in _org_data:
if org.Group.state == 'active' and int(org.package_count) > 0:
_dt = self._prepare(org)
_dt[4] = str(int(_dt[4])) # Package count to string
_iati_identifier = _dt.pop(1)
xml.append('<iati-identifier id="{}">'.format(_iati_identifier))
for _index, _field in enumerate(fields):
field = self._get_xml_name(_field)
if field == "Datasets Link":
xml.append('<iati-publisher-page xmlns:xlink="http://www.w3.org/1999/xlink">')
xml.append(' <iati-publisher-page xlink:type="simple" '
'xlink:href="{}">{}</iati-publisher-page>'.format(_dt[_index],
self._get_xml_value(_dt[0])))
xml.append('</iati-publisher-page>')
else:
xml.append(_observations.format(field, self._get_xml_value(_dt[_index]), field))
xml.append('</iati-identifier>')
xml.append('</iati-publishers-list>')
f.write("\n".join(xml))
output = f.getvalue()
f.close()
response = make_response(output)
response.headers['Content-type'] = 'text/xml'
return response
def xls(self):
"""
xls format download
:return:
"""
f = StringIO.StringIO()
wb = Workbook(encoding='utf-8')
sheet1 = wb.add_sheet('IATI Publishers List')
_org_data = PublishersListDownload._get_publisher_data()
# Write Headers
for _index, _field in enumerate(self._headers):
sheet1.write(0, _index, _field)
# Write Rows and Values
_row = 1
for org in _org_data:
if org.Group.state == 'active' and int(org.package_count) > 0:
_dt = self._prepare(org)
# Write Items
for _col, _item in enumerate(_dt):
sheet1.write(_row, _col, _item)
_row += 1
wb.save(f)
output = f.getvalue()
f.close()
response = make_response(output)
return response
def download(self):
response = getattr(PublishersListDownload, self.download_format)(self)
file_name = 'iati_publishers_list'
response.headers['Content-disposition'] = 'attachment;filename={}.{}'.format(file_name,
self.download_format)
return response
class PublisherRecordsDownload:
CSV_MAPPING = [
('registry-publisher-id', 'organization', 'name'),
('registry-file-id', 'package', 'name'),
('title', 'package', 'title'),
('description', 'package', 'notes'),
('contact-email', 'package', 'author_email'),
('state', 'package', 'state'),
('source-url', 'resources', 'url'),
('file-type', 'package', 'filetype'),
('recipient-country', 'package', 'country'),
('default-language', 'package', 'language'),
('secondary-publisher', 'package', 'secondary_publisher'),
]
OPTIONAL_COLUMNS = ['state', 'description', 'default-language', 'secondary-publisher']
MAX_ROWS = int(config.get('ckanext.iati.max_rows_csv_upload', 101))
def __init__(self):
pass
def _get_packages_for_org(self, context, org_name):
"""
:param context:
:param org_name:
:return:
"""
rows = 100
start = 0
packages = []
data_dict = {
'q': '*:*',
'fq': 'organization:' + org_name,
'rows': rows,
'start': start,
}
def do_query(context, data_dict):
return p.toolkit.get_action('package_search')(context, data_dict)
pending = True
while pending:
query = do_query(context, data_dict)
if len(query['results']):
packages.extend(query['results'])
data_dict['start'] += rows
else:
pending = False
return packages
def write_to_csv(self, publisher):
"""
:param publisher:
:return:
"""
context = {'model': model, 'user': c.user or c.author}
try:
if publisher == 'all':
package_ids = p.toolkit.get_action('package_list')(context, {})
packages = []
for pkg_id in package_ids:
try:
package = p.toolkit.get_action('package_show')(context, {'id': pkg_id})
package.pop('state', None)
packages.append(package)
except p.toolkit.NotAuthorized:
log.warn('User %s not authorized to read package %s' % (c.user, pkg_id))
continue
elif publisher == 'template':
# Just return an empty CSV file with just the headers
packages = []
else:
packages = self._get_packages_for_org(context, publisher)
f = io.BytesIO()
fieldnames = [n[0] for n in self.CSV_MAPPING if n[0] != 'state']
writer = csv.DictWriter(f, fieldnames=fieldnames, quoting=csv.QUOTE_ALL)
headers = dict((n[0], n[0]) for n in self.CSV_MAPPING if n[0] != 'state')
writer.writerow(headers)
for package in packages:
if package:
row = {}
extras_dict = extras_to_dict(package)
for fieldname, entity, key in self.CSV_MAPPING:
if key == 'state':
continue
value = None
if entity == 'organization':
if len(package['organization']):
value = package['organization']['name']
elif entity == 'resources':
if len(package['resources']) and key in package['resources'][0]:
value = package['resources'][0][key]
else:
if key in package:
value = package[key]
elif key in extras_dict:
value = extras_dict[key]
row[fieldname] = value
for field_to_check in ('title', 'description'):
if fieldname == field_to_check and row.get(field_to_check):
row[field_to_check] = row[field_to_check].encode('utf-8')
writer.writerow(row)
output = f.getvalue()
f.close()
return output
except p.toolkit.ObjectNotFound:
p.toolkit.abort(404, 'Organization not found')
class PublisherRecordsUpload(PublisherRecordsDownload):
def __init__(self, *args, **kwargs):
PublisherRecordsDownload.__init__(self)
def _validate_users(self):
"""
Validate user access -
:return: None
"""
log.info("Validating the logged in user")
if not c.user:
return p.toolkit.abort(401, 'You are not logged. Please login')
self.is_sysadmin = authz.is_sysadmin(c.user)
context = {'model': model, 'user': c.user or c.author}
self.authz_orgs = p.toolkit.get_action('organization_list_for_user')(context, {})
if not self.is_sysadmin and not self.authz_orgs:
return p.toolkit.abort(403, 'You are not authorized. You are not an admin of any publisher.')
return None
def _validate_csv_files(self, csv_file):
"""
Validate uploaded csv files.
:return:
"""
log.info("Validating the uploaded csv files")
if not hasattr(csv_file, 'filename'):
raise ValidationError("No CSV file provided. Please upload a CSV file.")
# Verify csv file extension
if os.path.splitext(csv_file.filename)[-1].lower() != '.csv':
raise ValidationError(
"Uploaded file is not a csv file. Please upload a csv file"
)
# Validate csv columns
# Validate Mandatory fields.
bom_length = len(codecs.BOM_UTF8)
data = csv_file.read()
if data.startswith(codecs.BOM_UTF8):
data = data[bom_length:]
if not data:
raise ValidationError("CSV file is empty")
buffer = io.BytesIO(data)
log.info("Validating CSV file....")
reader = csv.reader(buffer)
columns = next(reader)
# Validate columns
if not columns:
buffer.close()
raise ValidationError("Mandatory fields are missing. "
"Download csv upload template (verify mandatory columns) and "
"upload the file accordingly.")
for _col in self.CSV_MAPPING:
is_optional = _col[0] in self.OPTIONAL_COLUMNS
in_columns = _col[0] in columns
if not is_optional and not in_columns:
buffer.close()
raise ValidationError("Mandatory/unrecognized CSV columns. Given csv fields: {}")
# Validate no of rows
row_count = sum(1 for _ in reader)
log.info("Number of rows in csv: {}".format(str(row_count)))
if row_count > self.MAX_ROWS:
raise ValidationError(
"Exceeded the limit. Maximum allowed rows is 50"
)
return data
| 1.867188
| 2
|
bag_of_words.py
|
pieroit/python-base
| 0
|
12785523
|
<reponame>pieroit/python-base
def bag_of_words(s, sw=[]):
clean = s.lower()
symbols = '!?()[]\',;.:\"-_'
for symbol in symbols:
clean = clean.replace(symbol, '')
tokens = clean.split(' ')
for stopword in sw:
if stopword in tokens:
tokens.remove(stopword)
return tokens
if __name__ == '__main__':
sentence = 'Ciao mi piace il gelato (al cioccolato)!'
bow = bag_of_words(sentence, sw=['il', 'la', 'mi', 'al'])
print(bow)
| 3.296875
| 3
|
charts/backend.py
|
nosoyyo/nembee
| 0
|
12785524
|
import uvicorn
from uvicorn.reloaders.statreload import StatReload
from uvicorn.main import run, get_logger
from starlette.applications import Starlette
from starlette.responses import JSONResponse
from utils.tiempo import eightDigits
from utils.pipeline import MongoDBPipeline
from settings import BACKEND_PORT
app = Starlette(debug='true')
@app.route('/toplist')
async def toplist(request):
try:
day = request.query_params['day']
except KeyError:
day = eightDigits()
try:
chart = request.query_params['chart']
except KeyError:
chart = 'rise'
m = MongoDBPipeline()
query = f'{day}.{chart}'
result = m.ls(query)
for item in result:
item.pop('_id')
return JSONResponse(result)
if __name__ == '__main__':
reloader = StatReload(get_logger('debug'))
reloader.run(run, {
'app': app,
'host': '127.0.0.1',
'port': BACKEND_PORT,
'log_level': 'debug',
'debug': 'true'
})
uvicorn.run(app=app, host='127.0.0.1', port=BACKEND_PORT, debug='true')
| 2.078125
| 2
|
Dataset/Leetcode/train/100/536.py
|
kkcookies99/UAST
| 0
|
12785525
|
<gh_stars>0
class Solution(object):
def XXX(self, p, q):
"""
:type p: TreeNode
:type q: TreeNode
:rtype: bool
"""
def ergodic(p, q):
if p is None:
return q is None
if q is None:
return p is None
return p.val == q.val and ergodic(p.left, q.left) and ergodic(p.right, q.right)
return ergodic(p, q)
| 3.03125
| 3
|
guppe/atividades/secao_7/ex019.py
|
WesleyLucas97/cursos_python
| 0
|
12785526
|
<reponame>WesleyLucas97/cursos_python<filename>guppe/atividades/secao_7/ex019.py
"""
Faça um vetor de tamanho 50 preenchido com o seguinte valor: (i + 5 *i)%(i + 1), sendo i a posição do elemento no vetor.
Em seguida imprima o vetor na tela.
"""
lista = []
for i in range(50):
n1 = (i + 5 * i) % (i + 1)
lista.append(n1)
print(lista)
| 3.640625
| 4
|
networks/load_ckpt_unstrict.py
|
bigvideoresearch/SCC
| 5
|
12785527
|
<filename>networks/load_ckpt_unstrict.py
import torch
from runner_master import runner
def load_ckpt_unstrict(network, ckpt, network_name='main'):
state_dict = torch.load(ckpt, map_location='cuda:{}'.format(torch.cuda.current_device()))
if 'network' in state_dict:
state_dict = state_dict['network']
if network_name in state_dict:
state_dict = state_dict[network_name]
network.load_state_dict(state_dict, strict=False)
runner.patch_network_init('load_ckpt_unstrict', load_ckpt_unstrict)
| 2.1875
| 2
|
data/data_preprocess.py
|
adnansherif1/detectormer
| 9
|
12785528
|
<reponame>adnansherif1/detectormer
input = "./reentrancy/SmartContract_fragment.txt"
out = "out.txt"
f = open(input, "r")
f_w = open(out, "a")
lines = f.readlines()
count = 1
for i in range(len(lines)):
if lines[i].strip() == "---------------------------------":
count += 1
result = lines[i + 1].strip().split(" ")
result1 = count
result2 = result[1]
f_w.write("---------------------------------" + "\n")
f_w.write(str(result1) + " " + str(result2) + "\n")
elif ".sol" not in lines[i]:
f_w.write(lines[i])
| 2.78125
| 3
|
tasks/task0022.py
|
jtprogru/interview-task
| 3
|
12785529
|
"""
A phrase is a palindrome if, after converting all uppercase letters into lowercase
letters and removing all non-alphanumeric characters, it reads the same forward and backward.
Alphanumeric characters include letters and numbers.
Given a string s, return true if it is a palindrome, or false otherwise.
Example 1:
Input: s = "A man, a plan, a canal: Panama"
Output: true
Explanation: "amanaplanacanalpanama" is a palindrome.
Example 2:
Input: s = "race a car"
Output: false
Explanation: "raceacar" is not a palindrome.
Example 3:
Input: s = " "
Output: true
Explanation: s is an empty string "" after removing non-alphanumeric characters.
Since an empty string reads the same forward and backward, it is a palindrome.
Constraints:
- 1 <= s.length <= 2 * 105
- s consists only of printable ASCII characters.
"""
def solution(s: str) -> bool:
x = ""
diff = ord("a") - ord("A")
for i in s:
if ord("a") <= ord(i) <= ord("z") or ord("0") <= ord(i) <= ord("9"):
x += i
elif ord("A") <= ord(i) <= ord("Z"):
i = chr(diff + ord(i))
x += i
return x == x[::-1]
| 4.25
| 4
|
access_integration/access_integration/doctype/access_integration/test_access_integration.py
|
mhbu50/access_integration
| 0
|
12785530
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Accurate Systems and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Access integration')
class TestAccessintegration(unittest.TestCase):
pass
| 1.476563
| 1
|
python/day06.py
|
devries/advent_of_code_2021
| 1
|
12785531
|
#!/usr/bin/env python
def main():
with open('../inputs/day06.txt', 'r') as f:
content = f.read()
numbers = [int(v) for v in content.split(',')]
population = [0]*9
for n in numbers:
population[n]+=1
# Part A
pop_a = population[:]
evolve(pop_a, 80)
print("Part A: ", sum(pop_a))
# Part B
pop_b = population[:]
evolve(pop_b, 256)
print("Part B: ", sum(pop_b))
def evolve(population, ngen):
for g in range(ngen):
pop0 = population[0]
for i, v in enumerate(population):
if i==0:
continue
population[i-1]=v
population[8]=pop0
population[6]+=pop0
if __name__=="__main__":
main()
| 3.3125
| 3
|
sibyl/util/DownloadProgressBar.py
|
fahminlb33/sibyl_eeg
| 1
|
12785532
|
<gh_stars>1-10
from tqdm import tqdm
class DownloadProgressBar(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
| 2.75
| 3
|
mylights.py
|
brettonw/mywizlight
| 0
|
12785533
|
<reponame>brettonw/mywizlight
import sys;
from pywizlight.bulb import PilotBuilder, PilotParser, wizlight
lightIpsByName = {
"office": "192.168.1.217",
"nook": "192.168.1.230",
"nook": "192.168.1.230",
"nook": "192.168.1.230",
"nook": "192.168.1.230",
"nook": "192.168.1.230",
"nook": "192.168.1.230",
"nook": "192.168.1.230",
"nook": "192.168.1.230",
"nook": "192.168.1.230",
"nook": "192.168.1.230",
"nook": "192.168.1.230"
};
lightNamesByIp = {v: k for k, v in lightIpsByName.items()};
def makeLight (name):
ip = lightIpsByName[name];
print ("Light: {} ({})".format (name, ip));
return wizlight (ip);
def getLight ():
# default to the office light (for testing)
if (len (sys.argv) > 1):
# read the second argument
light = sys.argv[1];
if light in lightIpsByName:
return makeLight (light);
if light in lightNamesByIp:
return makeLight (lightNamesByIp[light]);
print ("Unknown light ({})".format (light));
else:
print ("ERROR: No light specified");
return None;
| 2.859375
| 3
|
api/curve/config.py
|
newstartcheng/Curve
| 1
|
12785534
|
<filename>api/curve/config.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Curve
~~~~
configure file
:copyright: (c) 2017 by Baidu, Inc.
:license: Apache, see LICENSE for more details.
"""
import os
SQLITE_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'curve.db')
STATIC_FOLDER = 'web'
STATIC_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'web')
INDEX_PAGE = '/' + STATIC_FOLDER + '/index.html'
DEFAULT_HOST = '0.0.0.0'
DEFAULT_PORT = 8080
| 1.664063
| 2
|
pulumi/datadog_gcp_integration/module.py
|
saiko-tech/pulumi-datadog-gcp-integration
| 0
|
12785535
|
import json, base64
import pulumi
from pulumi.output import Output
from pulumi.resource import ComponentResource, ResourceOptions
import pulumi_gcp as gcp
import pulumi_datadog as datadog
import pulumi_random
class GCPLogSinkToDataDog(ComponentResource):
def __init__(
self,
name: str,
opts: ResourceOptions = None):
super().__init__('datadog_gcp_integration:index:GCPLogSinkToDataDog', name, None, opts)
topic = gcp.pubsub.Topic(
f'{name}-topic',
name='export-logs-to-datadog',
opts=ResourceOptions(parent=self))
dd_api_key = pulumi.Config(name='datadog').require('apiKey')
push_to_dd = gcp.pubsub.Subscription(
f'{name}-subscription',
name='export-logs-to-datadog.push-to-dd',
topic=topic.id,
push_config=gcp.pubsub.SubscriptionPushConfigArgs(
push_endpoint=f'https://gcp-intake.logs.datadoghq.eu/api/v2/logs?dd-api-key={dd_api_key}&dd-protocol=gcp'),
expiration_policy=gcp.pubsub.SubscriptionExpirationPolicyArgs(
ttl=''),
retry_policy=gcp.pubsub.SubscriptionRetryPolicyArgs(
minimum_backoff='10s',
maximum_backoff='600s'),
opts=ResourceOptions(parent=self))
project = gcp.organizations.get_project()
pubsub_sa = f'serviceAccount:service-{project.number}@gcp-sa-pubsub.iam.gserviceaccount.com'
gcp.pubsub.SubscriptionIAMBinding(
f'{name}-subscriber-ack',
subscription=push_to_dd.id,
members=[pubsub_sa],
role='roles/pubsub.subscriber',
opts=ResourceOptions(parent=self))
log_sink = gcp.logging.ProjectSink(
f'{name}-log-sink',
name='export-logs-to-datadog',
destination=Output.concat('pubsub.googleapis.com/', topic.id),
unique_writer_identity=True,
opts=ResourceOptions(parent=self, depends_on=[push_to_dd]))
gcp.pubsub.TopicIAMMember(
f'{name}-log-sink-pubsub-publisher',
topic=topic.id,
role='roles/pubsub.publisher',
member=log_sink.writer_identity,
opts=ResourceOptions(parent=self))
class DataDogGCPIntegration(ComponentResource):
def __init__(
self,
name: str,
enable_log_sink: bool = False,
opts: ResourceOptions = None):
super().__init__('datadog_gcp_integration:index:DataDogGCPIntegration', name, None, opts)
suffix = pulumi_random.RandomString(
f'{name}-gcp-sa-suffix',
length=3,
min_lower=3,
opts=ResourceOptions(parent=self))
gcp_sa = gcp.serviceaccount.Account(
f'{name}-gcp-sa',
account_id=Output.concat('datadog-integration-', suffix.result),
description='DataDog GCP Integration SA',
opts=ResourceOptions(parent=self))
roles = [
'roles/cloudasset.viewer',
'roles/compute.viewer',
'roles/container.viewer',
'roles/monitoring.viewer',
]
iam_members = []
for role in roles:
member = gcp.projects.IAMMember(
f'{name}-gcp-sa-role-{role}',
role=role,
member=gcp_sa.email.apply(lambda email: f'serviceAccount:{email}'),
opts=ResourceOptions(parent=self))
iam_members.append(member)
gcp_sa_key = gcp.serviceaccount.Key(
f'{name}-gcp-sa-key',
service_account_id=gcp_sa.name,
opts=ResourceOptions(parent=self))
gcp_sa_pk = gcp_sa_key.private_key.apply(lambda k: json.loads(base64.b64decode(k)))
gcp_integration = datadog.gcp.Integration(
f'{name}-datadog-gcp-integration',
client_email=gcp_sa_pk.apply(lambda k: k['client_email']),
client_id=gcp_sa_pk.apply(lambda k: k['client_id']),
private_key=gcp_sa_pk.apply(lambda k: k['private_key']),
private_key_id=gcp_sa_pk.apply(lambda k: k['private_key_id']),
project_id=gcp_sa_pk.apply(lambda k: k['project_id']),
opts=ResourceOptions(parent=self, depends_on=iam_members))
if enable_log_sink:
GCPLogSinkToDataDog(
f'{name}-export-gcp-logs-to-datadog',
opts=ResourceOptions(parent=self, depends_on=[gcp_integration]))
| 1.828125
| 2
|
team.py
|
maxwilliams94/ultimatePy
| 1
|
12785536
|
<filename>team.py
"""
Hold references to fixtures, win/loss record, name, group affiliation of a Team
"""
class Team(object):
def __init__(self, team_id, name):
self.id = team_id
self.name = name
| 2.8125
| 3
|
tests/test_context.py
|
agronholm/asphalt
| 226
|
12785537
|
<reponame>agronholm/asphalt<filename>tests/test_context.py<gh_stars>100-1000
from __future__ import annotations
import asyncio
import sys
from collections.abc import Callable
from concurrent.futures import Executor, ThreadPoolExecutor
from inspect import isawaitable
from itertools import count
from threading import Thread, current_thread
from typing import AsyncGenerator, AsyncIterator, Dict, NoReturn, Optional, Tuple, Union
from unittest.mock import patch
import pytest
import pytest_asyncio
from async_generator import yield_
from asphalt.core import (
Context,
Dependency,
NoCurrentContext,
ResourceConflict,
ResourceNotFound,
TeardownError,
callable_name,
context_teardown,
current_context,
executor,
get_resource,
inject,
resource,
)
from asphalt.core.context import ResourceContainer, require_resource
@pytest.fixture
def context() -> Context:
return Context()
@pytest_asyncio.fixture
async def special_executor(context: Context) -> AsyncIterator[ThreadPoolExecutor]:
executor = ThreadPoolExecutor(1)
context.add_resource(executor, "special", types=[Executor])
yield executor
executor.shutdown()
class TestResourceContainer:
@pytest.mark.parametrize("thread", [False, True], ids=["eventloop", "worker"])
@pytest.mark.parametrize(
"context_attr", [None, "attrname"], ids=["no_attr", "has_attr"]
)
@pytest.mark.asyncio
async def test_generate_value(self, thread: bool, context_attr: str | None) -> None:
container = ResourceContainer(
lambda ctx: "foo", (str,), "default", context_attr, True
)
context = Context()
if thread:
value = await context.call_in_executor(container.generate_value, context)
else:
value = container.generate_value(context)
assert value == "foo"
assert context.get_resource(str) == "foo"
if context_attr:
assert getattr(context, context_attr) == "foo"
def test_repr(self) -> None:
container = ResourceContainer("foo", (str,), "default", "attrname", False)
assert repr(container) == (
"ResourceContainer(value='foo', types=[str], name='default', "
"context_attr='attrname')"
)
def test_repr_factory(self) -> None:
container = ResourceContainer(
lambda ctx: "foo", (str,), "default", "attrname", True
)
assert repr(container) == (
"ResourceContainer(factory=test_context.TestResourceContainer"
".test_repr_factory.<locals>.<lambda>, types=[str], name='default', "
"context_attr='attrname')"
)
class TestContext:
@pytest.mark.asyncio
async def test_parent(self) -> None:
"""Test that the parent property points to the parent context instance, if any."""
async with Context() as parent:
async with Context() as child:
assert parent.parent is None
assert child.parent is parent
@pytest.mark.parametrize(
"exception", [None, Exception("foo")], ids=["noexception", "exception"]
)
@pytest.mark.asyncio
async def test_close(self, context: Context, exception: Exception | None) -> None:
"""
Test that teardown callbacks are called in reverse order when a context is closed.
"""
def callback(exception=None):
called_functions.append((callback, exception))
async def async_callback(exception=None):
called_functions.append((async_callback, exception))
called_functions: list[tuple[Callable, BaseException | None]] = []
context.add_teardown_callback(callback, pass_exception=True)
context.add_teardown_callback(async_callback, pass_exception=True)
await context.close(exception)
assert called_functions == [(async_callback, exception), (callback, exception)]
@pytest.mark.asyncio
async def test_close_while_running_teardown(self, context: Context) -> None:
"""
Test that trying to close the context from a teardown callback raises a
RuntimeError.
"""
async def try_close_context() -> None:
with pytest.raises(RuntimeError, match="this context is already closing"):
await context.close()
context.add_teardown_callback(try_close_context)
await context.close()
@pytest.mark.asyncio
async def test_teardown_callback_exception(self, context: Context) -> None:
"""
Test that all callbacks are called even when some teardown callbacks raise
exceptions, and that a TeardownError is raised in such a case, containing the
exception objects.
"""
def callback1() -> None:
items.append(1)
def callback2() -> NoReturn:
raise Exception("foo")
context.add_teardown_callback(callback1)
context.add_teardown_callback(callback2)
context.add_teardown_callback(callback1)
context.add_teardown_callback(callback2)
items: list[int] = []
with pytest.raises(TeardownError) as exc:
await context.close()
assert "foo" in str(exc.value)
assert items == [1, 1]
assert len(exc.value.exceptions) == 2
@pytest.mark.asyncio
async def test_close_closed(self, context: Context) -> None:
"""Test that closing an already closed context raises a RuntimeError."""
assert not context.closed
await context.close()
assert context.closed
with pytest.raises(RuntimeError) as exc:
await context.close()
exc.match("this context has already been closed")
def test_contextmanager_exception(self, context, event_loop):
close_future = event_loop.create_future()
close_future.set_result(None)
exception = Exception("foo")
with patch.object(context, "close", return_value=close_future):
with pytest.raises(Exception) as exc, pytest.deprecated_call():
with context:
raise exception
# close.assert_called_once_with(exception)
assert exc.value is exception
@pytest.mark.asyncio
async def test_async_contextmanager_exception(self, event_loop, context):
"""Test that "async with context:" calls close() with the exception raised in the block."""
close_future = event_loop.create_future()
close_future.set_result(None)
exception = Exception("foo")
with patch.object(context, "close", return_value=close_future) as close:
with pytest.raises(Exception) as exc:
async with context:
raise exception
close.assert_called_once_with(exception)
assert exc.value is exception
@pytest.mark.parametrize("types", [int, (int,), ()], ids=["type", "tuple", "empty"])
@pytest.mark.asyncio
async def test_add_resource(self, context, event_loop, types):
"""Test that a resource is properly added in the context and listeners are notified."""
event_loop.call_soon(context.add_resource, 6, "foo", None, types)
event = await context.resource_added.wait_event()
assert event.resource_types == (int,)
assert event.resource_name == "foo"
assert not event.is_factory
assert context.get_resource(int, "foo") == 6
@pytest.mark.asyncio
async def test_add_resource_name_conflict(self, context: Context) -> None:
"""Test that adding a resource won't replace any existing resources."""
context.add_resource(5, "foo")
with pytest.raises(ResourceConflict) as exc:
context.add_resource(4, "foo")
exc.match(
"this context already contains a resource of type int using the name 'foo'"
)
@pytest.mark.asyncio
async def test_add_resource_none_value(self, context: Context) -> None:
"""Test that None is not accepted as a resource value."""
exc = pytest.raises(ValueError, context.add_resource, None)
exc.match('"value" must not be None')
@pytest.mark.asyncio
async def test_add_resource_context_attr(self, context: Context) -> None:
"""Test that when resources are added, they are also set as properties of the context."""
with pytest.deprecated_call():
context.add_resource(1, context_attr="foo")
assert context.foo == 1
def test_add_resource_context_attr_conflict(self, context: Context) -> None:
"""
Test that the context won't allow adding a resource with an attribute name that conflicts
with an existing attribute.
"""
context.a = 2
with pytest.raises(ResourceConflict) as exc, pytest.deprecated_call():
context.add_resource(2, context_attr="a")
exc.match("this context already has an attribute 'a'")
assert context.get_resource(int) is None
@pytest.mark.asyncio
async def test_add_resource_type_conflict(self, context: Context) -> None:
context.add_resource(5)
with pytest.raises(ResourceConflict) as exc:
context.add_resource(6)
exc.match(
"this context already contains a resource of type int using the name 'default'"
)
@pytest.mark.parametrize(
"name", ["a.b", "a:b", "a b"], ids=["dot", "colon", "space"]
)
@pytest.mark.asyncio
async def test_add_resource_bad_name(self, context, name):
with pytest.raises(ValueError) as exc:
context.add_resource(1, name)
exc.match(
'"name" must be a nonempty string consisting only of alphanumeric characters '
"and underscores"
)
@pytest.mark.asyncio
async def test_add_resource_parametrized_generic_type(
self, context: Context
) -> None:
resource = {"a": 1}
resource_type = Dict[str, int]
context.add_resource(resource, types=[resource_type])
assert context.require_resource(resource_type) is resource
assert context.get_resource(resource_type) is resource
assert await context.request_resource(resource_type) is resource
assert context.get_resource(Dict) is None
assert context.get_resource(dict) is None
@pytest.mark.asyncio
async def test_add_resource_factory(self, context: Context) -> None:
"""Test that resources factory callbacks are only called once for each context."""
def factory(ctx):
assert ctx is context
return next(counter)
counter = count(1)
with pytest.deprecated_call():
context.add_resource_factory(factory, int, context_attr="foo")
assert context.foo == 1
assert context.foo == 1
assert context.__dict__["foo"] == 1
@pytest.mark.asyncio
async def test_add_resource_factory_parametrized_generic_type(
self, context: Context
) -> None:
resource = {"a": 1}
resource_type = Dict[str, int]
context.add_resource_factory(lambda ctx: resource, types=[resource_type])
assert context.require_resource(resource_type) is resource
assert context.get_resource(resource_type) is resource
assert await context.request_resource(resource_type) is resource
assert context.get_resource(Dict) is None
assert context.get_resource(dict) is None
@pytest.mark.parametrize(
"name", ["a.b", "a:b", "a b"], ids=["dot", "colon", "space"]
)
@pytest.mark.asyncio
async def test_add_resource_factory_bad_name(self, context, name):
with pytest.raises(ValueError) as exc:
context.add_resource_factory(lambda ctx: 1, int, name)
exc.match(
'"name" must be a nonempty string consisting only of alphanumeric characters '
"and underscores"
)
@pytest.mark.asyncio
async def test_add_resource_factory_coroutine_callback(
self, context: Context
) -> None:
async def factory(ctx):
return 1
with pytest.raises(TypeError) as exc:
context.add_resource_factory(factory, int)
exc.match('"factory_callback" must not be a coroutine function')
@pytest.mark.asyncio
async def test_add_resource_factory_empty_types(self, context: Context) -> None:
with pytest.raises(ValueError) as exc:
context.add_resource_factory(lambda ctx: 1, ())
exc.match("no resource types were specified")
@pytest.mark.asyncio
async def test_add_resource_factory_context_attr_conflict(
self, context: Context
) -> None:
with pytest.deprecated_call():
context.add_resource_factory(lambda ctx: None, str, context_attr="foo")
with pytest.raises(ResourceConflict) as exc, pytest.deprecated_call():
await context.add_resource_factory(
lambda ctx: None, str, context_attr="foo"
)
exc.match(
"this context already contains a resource factory for the context attribute 'foo'"
)
@pytest.mark.asyncio
async def test_add_resource_factory_type_conflict(self, context: Context) -> None:
context.add_resource_factory(lambda ctx: None, (str, int))
with pytest.raises(ResourceConflict) as exc:
await context.add_resource_factory(lambda ctx: None, int)
exc.match("this context already contains a resource factory for the type int")
@pytest.mark.asyncio
async def test_add_resource_factory_no_inherit(self, context: Context) -> None:
"""
Test that a subcontext gets its own version of a factory-generated resource even if a
parent context has one already.
"""
with pytest.deprecated_call():
context.add_resource_factory(id, int, context_attr="foo")
async with context, Context() as subcontext:
assert context.foo == id(context)
assert subcontext.foo == id(subcontext)
@pytest.mark.asyncio
async def test_add_resource_return_type_single(self, context: Context) -> None:
def factory(ctx: Context) -> str:
return "foo"
async with context:
context.add_resource_factory(factory)
assert context.require_resource(str) == "foo"
@pytest.mark.asyncio
async def test_add_resource_return_type_union(self, context: Context) -> None:
def factory(ctx: Context) -> Union[int, float]:
return 5
async with context:
context.add_resource_factory(factory)
assert context.require_resource(int) == 5
assert context.require_resource(float) == 5
@pytest.mark.skipif(sys.version_info < (3, 10), reason="Requires Python 3.10+")
@pytest.mark.asyncio
async def test_add_resource_return_type_uniontype(self, context: Context) -> None:
def factory(ctx: Context) -> int | float:
return 5
async with context:
context.add_resource_factory(factory)
assert context.require_resource(int) == 5
assert context.require_resource(float) == 5
@pytest.mark.asyncio
async def test_add_resource_return_type_optional(self, context: Context) -> None:
def factory(ctx: Context) -> Optional[str]:
return "foo"
async with context:
context.add_resource_factory(factory)
assert context.require_resource(str) == "foo"
@pytest.mark.asyncio
async def test_getattr_attribute_error(self, context: Context) -> None:
async with context, Context() as child_context:
pytest.raises(AttributeError, getattr, child_context, "foo").match(
"no such context variable: foo"
)
@pytest.mark.asyncio
async def test_getattr_parent(self, context: Context) -> None:
"""
Test that accessing a nonexistent attribute on a context retrieves the value from parent.
"""
async with context, Context() as child_context:
context.a = 2
assert child_context.a == 2
@pytest.mark.asyncio
async def test_get_resources(self, context: Context) -> None:
context.add_resource(9, "foo")
context.add_resource_factory(lambda ctx: len(ctx.context_chain), int, "bar")
context.require_resource(int, "bar")
async with context, Context() as subctx:
subctx.add_resource(4, "foo")
assert subctx.get_resources(int) == {1, 4}
@pytest.mark.asyncio
async def test_require_resource(self, context: Context) -> None:
context.add_resource(1)
assert context.require_resource(int) == 1
def test_require_resource_not_found(self, context: Context) -> None:
"""Test that ResourceNotFound is raised when a required resource is not found."""
exc = pytest.raises(ResourceNotFound, context.require_resource, int, "foo")
exc.match("no matching resource was found for type=int name='foo'")
assert exc.value.type == int
assert exc.value.name == "foo"
@pytest.mark.asyncio
async def test_request_resource_parent_add(self, context, event_loop):
"""
Test that adding a resource to the parent context will satisfy a resource request in a
child context.
"""
async with context, Context() as child_context:
task = event_loop.create_task(child_context.request_resource(int))
event_loop.call_soon(context.add_resource, 6)
resource = await task
assert resource == 6
@pytest.mark.asyncio
async def test_request_resource_factory_context_attr(
self, context: Context
) -> None:
"""Test that requesting a factory-generated resource also sets the context variable."""
with pytest.deprecated_call():
context.add_resource_factory(lambda ctx: 6, int, context_attr="foo")
await context.request_resource(int)
assert context.__dict__["foo"] == 6
@pytest.mark.asyncio
async def test_call_async_plain(self, context: Context) -> None:
def runs_in_event_loop(worker_thread: Thread, x: int, y: int) -> int:
assert current_thread() is not worker_thread
return x + y
def runs_in_worker_thread() -> int:
worker_thread = current_thread()
return context.call_async(runs_in_event_loop, worker_thread, 1, y=2)
assert await context.call_in_executor(runs_in_worker_thread) == 3
@pytest.mark.asyncio
async def test_call_async_coroutine(self, context: Context) -> None:
async def runs_in_event_loop(worker_thread, x, y):
assert current_thread() is not worker_thread
await asyncio.sleep(0.1)
return x + y
def runs_in_worker_thread() -> int:
worker_thread = current_thread()
return context.call_async(runs_in_event_loop, worker_thread, 1, y=2)
assert await context.call_in_executor(runs_in_worker_thread) == 3
@pytest.mark.asyncio
async def test_call_async_exception(self, context: Context) -> None:
def runs_in_event_loop() -> NoReturn:
raise ValueError("foo")
with pytest.raises(ValueError) as exc:
await context.call_in_executor(context.call_async, runs_in_event_loop)
assert exc.match("foo")
@pytest.mark.asyncio
async def test_call_in_executor(self, context: Context) -> None:
"""Test that call_in_executor actually runs the target in a worker thread."""
worker_thread = await context.call_in_executor(current_thread)
assert worker_thread is not current_thread()
@pytest.mark.parametrize(
"use_resource_name", [True, False], ids=["direct", "resource"]
)
@pytest.mark.asyncio
async def test_call_in_executor_explicit(self, context, use_resource_name):
executor = ThreadPoolExecutor(1)
context.add_resource(executor, types=[Executor])
context.add_teardown_callback(executor.shutdown)
executor_arg = "default" if use_resource_name else executor
worker_thread = await context.call_in_executor(
current_thread, executor=executor_arg
)
assert worker_thread is not current_thread()
@pytest.mark.asyncio
async def test_call_in_executor_context_preserved(self, context: Context) -> None:
"""
Test that call_in_executor runs the callable in a copy of the current (PEP 567)
context.
"""
async with Context() as ctx:
assert await context.call_in_executor(current_context) is ctx
@pytest.mark.asyncio
async def test_threadpool(self, context: Context) -> None:
event_loop_thread = current_thread()
async with context.threadpool():
assert current_thread() is not event_loop_thread
@pytest.mark.asyncio
async def test_threadpool_named_executor(
self, context: Context, special_executor: Executor
) -> None:
special_executor_thread = special_executor.submit(current_thread).result()
async with context.threadpool("special"):
assert current_thread() is special_executor_thread
class TestExecutor:
@pytest.mark.asyncio
async def test_no_arguments(self, context: Context) -> None:
@executor
def runs_in_default_worker() -> None:
assert current_thread() is not event_loop_thread
current_context()
event_loop_thread = current_thread()
async with context:
await runs_in_default_worker()
@pytest.mark.asyncio
async def test_named_executor(
self, context: Context, special_executor: Executor
) -> None:
@executor("special")
def runs_in_default_worker(ctx: Context) -> None:
assert current_thread() is special_executor_thread
assert current_context() is ctx
special_executor_thread = special_executor.submit(current_thread).result()
async with context:
await runs_in_default_worker(context)
@pytest.mark.asyncio
async def test_executor_missing_context(self, context: Context):
@executor("special")
def runs_in_default_worker() -> None:
current_context()
with pytest.raises(RuntimeError) as exc:
async with context:
await runs_in_default_worker()
exc.match(
r"the first positional argument to %s\(\) has to be a Context instance"
% callable_name(runs_in_default_worker)
)
class TestContextTeardown:
@pytest.mark.parametrize(
"expected_exc", [None, Exception("foo")], ids=["no_exception", "exception"]
)
@pytest.mark.asyncio
async def test_function(self, expected_exc: Exception | None) -> None:
phase = received_exception = None
@context_teardown
async def start(ctx: Context) -> AsyncIterator[None]:
nonlocal phase, received_exception
phase = "started"
exc = yield
phase = "finished"
received_exception = exc
context = Context()
await start(context)
assert phase == "started"
await context.close(expected_exc)
assert phase == "finished"
assert received_exception == expected_exc
@pytest.mark.parametrize(
"expected_exc", [None, Exception("foo")], ids=["no_exception", "exception"]
)
@pytest.mark.asyncio
async def test_method(self, expected_exc: Exception | None) -> None:
phase = received_exception = None
class SomeComponent:
@context_teardown
async def start(self, ctx: Context) -> AsyncIterator[None]:
nonlocal phase, received_exception
phase = "started"
exc = yield
phase = "finished"
received_exception = exc
context = Context()
await SomeComponent().start(context)
assert phase == "started"
await context.close(expected_exc)
assert phase == "finished"
assert received_exception == expected_exc
def test_plain_function(self) -> None:
def start(ctx) -> None:
pass
pytest.raises(TypeError, context_teardown, start).match(
" must be an async generator function"
)
@pytest.mark.asyncio
async def test_bad_args(self) -> None:
with pytest.deprecated_call():
@context_teardown
async def start(ctx: Context) -> None:
pass
with pytest.raises(RuntimeError) as exc:
await start(None)
exc.match(
r"the first positional argument to %s\(\) has to be a Context instance"
% callable_name(start)
)
@pytest.mark.asyncio
async def test_exception(self) -> None:
@context_teardown
async def start(ctx: Context) -> AsyncIterator[None]:
raise Exception("dummy error")
yield
context = Context()
with pytest.raises(Exception) as exc_info:
await start(context)
exc_info.match("dummy error")
@pytest.mark.asyncio
async def test_missing_yield(self) -> None:
with pytest.deprecated_call():
@context_teardown
async def start(ctx: Context) -> None:
pass
await start(Context())
@pytest.mark.asyncio
async def test_py35_generator(self) -> None:
with pytest.deprecated_call():
@context_teardown
async def start(ctx: Context) -> None:
await yield_()
await start(Context())
@pytest.mark.parametrize(
"resource_func",
[
pytest.param(Context.get_resource, id="get_resource"),
pytest.param(Context.require_resource, id="require_resource"),
pytest.param(Context.request_resource, id="request_resource"),
],
)
@pytest.mark.asyncio
async def test_get_resource_at_teardown(self, resource_func) -> None:
resource: str
async def teardown_callback() -> None:
nonlocal resource
resource = resource_func(ctx, str)
if isawaitable(resource):
resource = await resource
async with Context() as ctx:
ctx.add_resource("blah")
ctx.add_teardown_callback(teardown_callback)
assert resource == "blah"
@pytest.mark.parametrize(
"resource_func",
[
pytest.param(Context.get_resource, id="get_resource"),
pytest.param(Context.require_resource, id="require_resource"),
pytest.param(Context.request_resource, id="request_resource"),
],
)
@pytest.mark.asyncio
async def test_generate_resource_at_teardown(self, resource_func) -> None:
resource: str
async def teardown_callback() -> None:
nonlocal resource
resource = resource_func(ctx, str)
if isawaitable(resource):
resource = await resource
async with Context() as ctx:
ctx.add_resource_factory(lambda context: "blah", [str])
ctx.add_teardown_callback(teardown_callback)
assert resource == "blah"
class TestContextFinisher:
@pytest.mark.parametrize(
"expected_exc", [None, Exception("foo")], ids=["no_exception", "exception"]
)
@pytest.mark.asyncio
async def test_context_teardown(self, expected_exc: Exception | None) -> None:
phase = received_exception = None
@context_teardown
async def start(ctx: Context) -> AsyncIterator[None]:
nonlocal phase, received_exception
phase = "started"
exc = yield
phase = "finished"
received_exception = exc
context = Context()
await start(context)
assert phase == "started"
await context.close(expected_exc)
assert phase == "finished"
assert received_exception == expected_exc
@pytest.mark.asyncio
async def test_current_context() -> None:
pytest.raises(NoCurrentContext, current_context)
async with Context() as parent_ctx:
assert current_context() is parent_ctx
async with Context() as child_ctx:
assert current_context() is child_ctx
assert current_context() is parent_ctx
pytest.raises(NoCurrentContext, current_context)
@pytest.mark.asyncio
async def test_get_resource() -> None:
async with Context() as ctx:
ctx.add_resource("foo")
assert get_resource(str) == "foo"
assert get_resource(int) is None
@pytest.mark.asyncio
async def test_require_resource() -> None:
async with Context() as ctx:
ctx.add_resource("foo")
assert require_resource(str) == "foo"
pytest.raises(ResourceNotFound, require_resource, int)
def test_explicit_parent_deprecation() -> None:
parent_ctx = Context()
pytest.warns(DeprecationWarning, Context, parent_ctx)
@pytest.mark.asyncio
async def test_context_stack_corruption(event_loop):
async def generator() -> AsyncGenerator:
async with Context():
yield
gen = generator()
await event_loop.create_task(gen.asend(None))
async with Context() as ctx:
with pytest.warns(
UserWarning, match="Potential context stack corruption detected"
):
try:
await event_loop.create_task(gen.asend(None))
except StopAsyncIteration:
pass
assert current_context() is ctx
pytest.raises(NoCurrentContext, current_context)
class TestDependencyInjection:
@pytest.mark.asyncio
async def test_static_resources(self) -> None:
@inject
async def injected(
foo: int, bar: str = resource(), *, baz: str = resource("alt")
) -> Tuple[int, str, str]:
return foo, bar, baz
async with Context() as ctx:
ctx.add_resource("bar_test")
ctx.add_resource("baz_test", "alt")
foo, bar, baz = await injected(2)
assert foo == 2
assert bar == "bar_test"
assert baz == "baz_test"
@pytest.mark.asyncio
async def test_sync_injection(self) -> None:
@inject
def injected(
foo: int, bar: str = resource(), *, baz: str = resource("alt")
) -> Tuple[int, str, str]:
return foo, bar, baz
async with Context() as ctx:
ctx.add_resource("bar_test")
ctx.add_resource("baz_test", "alt")
foo, bar, baz = injected(2)
assert foo == 2
assert bar == "bar_test"
assert baz == "baz_test"
@pytest.mark.asyncio
async def test_missing_annotation(self) -> None:
async def injected(
foo: int, bar: str = resource(), *, baz=resource("alt")
) -> None:
pass
pytest.raises(TypeError, inject, injected).match(
f"Dependency for parameter 'baz' of function "
f"'{__name__}.{self.__class__.__name__}.test_missing_annotation.<locals>"
f".injected' is missing the type annotation"
)
@pytest.mark.asyncio
async def test_missing_resource(self) -> None:
@inject
async def injected(foo: int, bar: str = resource()) -> None:
pass
with pytest.raises(ResourceNotFound) as exc:
async with Context():
await injected(2)
exc.match("no matching resource was found for type=str name='default'")
@pytest.mark.parametrize(
"annotation",
[
pytest.param(Optional[str], id="optional"),
# pytest.param(Union[str, int, None], id="union"),
pytest.param(
"str | None",
id="uniontype.10",
marks=[
pytest.mark.skipif(
sys.version_info < (3, 10), reason="Requires Python 3.10+"
)
],
),
],
)
@pytest.mark.parametrize(
"sync",
[
pytest.param(True, id="sync"),
pytest.param(False, id="async"),
],
)
@pytest.mark.asyncio
async def test_inject_optional_resource_async(
self, annotation: type, sync: bool
) -> None:
if sync:
@inject
def injected(
res: annotation = resource(), # type: ignore[valid-type]
) -> annotation: # type: ignore[valid-type]
return res
else:
@inject
async def injected(
res: annotation = resource(), # type: ignore[valid-type]
) -> annotation: # type: ignore[valid-type]
return res
async with Context() as ctx:
retval = injected() if sync else (await injected())
assert retval is None
ctx.add_resource("hello")
retval = injected() if sync else (await injected())
assert retval == "hello"
def test_resource_function_not_called(self) -> None:
async def injected(foo: int, bar: str = resource) -> None:
pass
with pytest.raises(TypeError) as exc:
inject(injected)
exc.match(
f"Default value for parameter 'bar' of function "
f"{__name__}.{self.__class__.__name__}.test_resource_function_not_called"
f".<locals>.injected was the 'resource' function – did you forget to add "
f"the parentheses at the end\\?"
)
def test_missing_inject(self) -> None:
def injected(foo: int, bar: str = resource()) -> None:
bar.lower()
with pytest.raises(AttributeError) as exc:
injected(1)
exc.match(
r"Attempted to access an attribute in a resource\(\) marker – did you "
r"forget to add the @inject decorator\?"
)
def test_no_resources_declared(self) -> None:
def injected(foo: int) -> None:
pass
match = (
f"{__name__}.{self.__class__.__name__}.test_no_resources_declared.<locals>"
f".injected does not have any injectable resources declared"
)
with pytest.warns(UserWarning, match=match):
func = inject(injected)
assert func is injected
def test_dependency_deprecated() -> None:
with pytest.deprecated_call():
async def foo(res: str = Dependency()) -> None:
pass
| 2.046875
| 2
|
python/scripts/dev/m3dev_tuning.py
|
ahoarau/m3meka
| 1
|
12785538
|
<filename>python/scripts/dev/m3dev_tuning.py
#!/usr/bin/python
import m3.toolbox as m3t
import m3.actuator_ec as m3aec
import m3.actuator as m3a
import m3.ctrl_simple as m3cs
import m3.pwr as m3power
import argparse
class M3Tuning:
def __init__(self):
self.comps = {'act': {'name': 'm3actuator_', 'type':m3a.M3Actuator, 'child_name':'act_ec_component' },
'act_ec': {'name': 'm3actuator_ec_', 'type':m3aec.M3ActuatorEc, 'child_name':None},
'ctrl': {'name': 'm3ctrl_simple_', 'type':m3cs.M3CtrlSimple, 'child_name':'act_component'},
'pwr': {'name': 'm3pwr_', 'type': m3power.M3Pwr}}
parser = argparse.ArgumentParser()
parser.add_argument('-v','--verbose',action='store_true')
self.args = parser.parse_args()
# 'arm' : {'name': 'm3rw_arm_ra0', 'type': m3arm.M3RwArm, 'child_name':'chain_component'},
# 'chain': {'name': 'm3rw_joint_chain', 'type': m3jc.M3RwJointChain, 'child_name':'joint_components'},
# 'joint': {'name': 'm3rw_joint', 'type': m3j.M3RwJoint, 'child_name':'control_component'},
# 'pwr': {'name': 'm3rw_pwr_ra0', 'type': m3power.M3RwPwr}}
def get_component(self,name):
if name[-3:-1] == '_j':
self.comp_name = name
else:
cnames = self.proxy.get_available_components(name)
if len(cnames)==0:
print 'No ' + name + ' components found. Exiting...'
exit()
self.comp_name = m3t.user_select_components_interactive(cnames,single=True)[0]
comp_type = [k for k, v in self.comps.iteritems() if self.comp_name.startswith(v['name'])][0]
self.get_children_components(self.comp_name, comp_type)
def get_all_components(self,name):
self.cnames = self.proxy.get_available_components(name)
if len(self.cnames)==0:
print 'No ' + name + ' components found. Exiting...'
raise Exception('no available components')
for cname in self.cnames:
self.child_names[cname] = self.get_all_children_components(cname)
# component list would be 'act','act_ec','ctrl','pwr'
def start_all_components(self,component_list,operational_list):
for cname in self.cnames:
if self.args.verbose: print "starting component " + cname
# accomplishes this: self.act=m3s.M3Actuator(self.comp_name)
self.act_list[cname] = m3s.M3Actuator(cname)
setattr(self, k, v['type'](v['name']) )
self.comps[k]['comp'] = getattr(self,k)
self.proxy.subscribe_status(cname)
self.proxy.publish_command(cname)
self.proxy.publish_param(cname)
if operational_list is None or k in operational_list:
self.proxy.make_operational(v['name'])
if 'pwr' in component_list:
print 'making power operational'
self.proxy.make_operational(self.pwr.get_actuator_ec_name())
self.pwr.set_motor_power_on()
self.proxy.step()
def start_components(self,component_list,operational_list):
for k in component_list:
if self.args.verbose: print "starting component " + self.comps[k]['name']
v = self.comps[k]
# accomplishes this: self.act=m3s.M3Actuator(self.comp_name)
setattr(self, k, v['type'](v['name']) )
self.comps[k]['comp'] = getattr(self,k)
self.proxy.subscribe_status(getattr(self,k))
self.proxy.publish_command(getattr(self,k))
self.proxy.publish_param(getattr(self,k))
if operational_list is None or k in operational_list:
self.proxy.make_operational(v['name'])
if 'pwr' in component_list:
print 'making power operational'
#self.proxy.make_operational(self.pwr.get_actuator_ec_name())
self.pwr.set_motor_power_on()
self.proxy.step()
def get_children_components(self,comp_name,comp_type):
if self.args.verbose: print "component name is " + str(comp_name)
if self.args.verbose: print "component type is " + comp_type
if self.args.verbose: print "component prefix is " + self.comps[comp_type]['name']
self.joint_suffix = comp_name.replace(self.comps[comp_type]['name'],"") # more generic than comp_name[-2:]
if self.args.verbose: print "joint suffix is " + self.joint_suffix
for k in ['act','act_ec','ctrl']:
self.comps[k]['name'] = self.comps[k]['name'] + self.joint_suffix
pwr_name = m3t.get_actuator_ec_pwr_component_name(self.comps['act_ec']['name'])
self.comps['pwr']['name'] = pwr_name
def get_all_children_components():
child_dict = {}
self.param_dict=self.proxy.get_param_dict()
if re.match(r"m3rw_actuator_ra0_j\d",comp_name):
child_dict['act_ec'] = self.param_dict[comp_name]['act_ec_component']
| 2.3125
| 2
|
video_from_lv.py
|
pengzhou93/dancenet
| 499
|
12785539
|
import tensorflow as tf
import numpy as np
from model import decoder,vae
import cv2
vae.load_weights("vae_cnn.h5")
lv = np.load("lv.npy")
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video = cv2.VideoWriter("output.avi", fourcc, 30.0, (208, 120))
for i in range(1000):
data = lv[i].reshape(1,128)
img = decoder.predict(data)
img = np.array(img).reshape(120,208,1)
img = img * 255
img = np.array(img).astype("uint8")
img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
video.write(img)
video.release()
| 2.546875
| 3
|
botUtils.py
|
fuji97/weedlebot
| 0
|
12785540
|
<filename>botUtils.py<gh_stars>0
import logging
import models
import os
OWNER = int(os.environ.get("ID_OWNER", 0))
logger = logging.getLogger(__name__)
def checkPermission(user, level, chat=None):
if user.id == OWNER:
return True
if chat:
role = next(member for member in chat.users if member.user == user).user_role
if role.value <= level:
return True
else:
return False
else:
if user.general_role.value <= level:
return True
else:
return False
class permissions(object):
def __init__(self, session, user_id, permission, **kwargs):
self.session = session
self.user_id = user_id
self.permission = permission
self.chat_id = kwargs['chat_id'] if 'chat_id' in kwargs else None
self.alternative = kwargs['alternative'] if 'alternative' in kwargs else None
def __call__(self, f):
def wrap(*args):
have_permissions = False
if 'chat_id' in kwargs:
res = session.query(models.ChatMember).filter_by(
user_id=self.user_id, chat_id=self.chat_id).first()
if res is not None:
if res.user_role.value <= self.permission.value:
have_permissions = True
else:
res = session.query(models.User).filter_by(user_id=self.user_id).first()
if res is not None:
if res.general_role.value <= self.permission.value:
have_permissions = True
if have_permissions is True:
f(*args)
elif self.alternative is not None:
self.alternative(*args)
| 2.375
| 2
|
tests/test_domains.py
|
Marak/hook.io-sdk-python
| 1
|
12785541
|
<gh_stars>1-10
#!/usr/bin/env python
def test_domains(sdk):
res = sdk.domains.all(anonymous=True)
assert 'error' in res
assert res['error'] is True
assert res['type'] == 'unauthorized-role-access'
assert res['role'] == 'domain::find'
assert 'domain::find' in res['message']
res = sdk.domains.all()
assert type(res) == list
| 2.203125
| 2
|
layers/bert_base.py
|
shuopwang/ABSA
| 1
|
12785542
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
USE_CUDA = torch.cuda.is_available()
device = torch.device("cuda" if USE_CUDA else "cpu")
class Bert_Base(nn.Module):
def __init__(self, opt):
super(Bert_Base, self).__init__()
self.opt = opt
#self.tokenizer = BertTokenizer.from_pretrained(model_path)
def forward(self, inputs, use_hidden_state=False):
text_raw_indices, text_raw_indices_mask, aspect_position_text = inputs[0], inputs[1], inputs[2]
ctx = self.opt.bse.get_vector(text_raw_indices)
ctx_len = torch.sum(text_raw_indices_mask != 0, dim=1)
vectors = []
aspect_vectors = []
asp_len = []
for idx, vector in enumerate(ctx):
# print(aspect_position_text[idx])
# print(vector.size())
#vector = torch.stack(vector)
left, right = aspect_position_text[idx].split('_')
vector = [np.asarray(each, dtype=float) for each in vector]
aspect_vector = vector[int(left):int(right)]
# if self.opt.device:
# vector = vector.cpu()
# aspect_vector = aspect_vector.cpu()
pad_number = self.opt.max_seq_len - len(vector) + 2
#ctx_len.append(len(vector))
vector = np.asarray(vector, dtype=float)
vector = vector[1:-1]
vector = np.concatenate((vector, np.zeros((pad_number, self.opt.embed_dim))))
vector = vector.astype('float32')
vector = torch.from_numpy(vector)
#pad_tuple = (0, 0, left, 0)
#vector = F.pad(vector, pad_tuple, 'constant', 0)
pad_number = self.opt.max_seq_len - len(aspect_vector)
asp_len.append(len(aspect_vector))
aspect_vector = np.asarray(aspect_vector)
aspect_vector = np.concatenate((aspect_vector, np.zeros((pad_number, self.opt.embed_dim))))
aspect_vector = aspect_vector.astype('float32')
aspect_vector = torch.from_numpy(aspect_vector)
if self.opt.device:
vector = vector.to(self.opt.device)
aspect_vector = aspect_vector.to(self.opt.device)
vectors.append(vector)
aspect_vectors.append(aspect_vector)
ctx = torch.stack(vectors)
asp = torch.stack(aspect_vectors)
asp_len = torch.from_numpy(np.asarray(asp_len))
#ctx_len = torch.from_numpy(np.asarray(ctx_len))
if self.opt.device:
asp_len = asp_len.to(self.opt.device)
ctx_len = ctx_len.to(self.opt.device)
ctx.requires_grad = False
asp.requires_grad = False
# print(vectors.size())
# print(aspect_vectors.size())
return ctx, asp, ctx_len, asp_len
| 2.421875
| 2
|
src/aioros_tf2/action_client.py
|
mgrrx/aioros_tf2
| 0
|
12785543
|
from typing import Optional
from aioros_action import ActionClient
from aioros_action import create_client
from aioros import NodeHandle
from aioros_tf2.abc import BufferInterface
from aioros_tf2.exceptions import ConnectivityException
from aioros_tf2.exceptions import ExtrapolationException
from aioros_tf2.exceptions import InvalidArgumentException
from aioros_tf2.exceptions import LookupException
from aioros_tf2.exceptions import TimeoutException
from aioros_tf2.exceptions import TransformException
from genpy import Duration
from genpy import Time
from geometry_msgs.msg import TransformStamped
from tf2_msgs.msg import LookupTransformAction
from tf2_msgs.msg import LookupTransformGoal
from tf2_msgs.msg import TF2Error
class BufferActionClient(BufferInterface):
def __init__(self, ns: str) -> None:
self._ns = ns
self._action_client: Optional[ActionClient] = None
async def init(
self,
node_handle: NodeHandle
) -> None:
self._action_client = await create_client(
node_handle,
self._ns,
LookupTransformAction)
async def close(self) -> None:
if self._action_client:
await self._action_client.close()
self._action_client = None
async def wait_for_server(self) -> None:
await self._action_client.wait_for_server()
async def lookup_transform(
self,
target_frame: str,
source_frame: str,
time: Time,
timeout: Optional[Duration] = None
) -> TransformStamped:
return await self._call_action(
LookupTransformGoal(
target_frame=target_frame,
source_frame=source_frame,
source_time=time,
timeout=timeout or Duration(),
advanced=False))
async def lookup_transform_full(
self,
target_frame: str,
target_time: Time,
source_frame: str,
source_time: Time,
fixed_frame: str,
timeout: Optional[Duration] = None
) -> TransformStamped:
return await self._call_action(
LookupTransformGoal(
target_frame=target_frame,
source_frame=source_frame,
source_time=source_time,
timeout=timeout or Duration(),
target_time=target_time,
fixed_frame=fixed_frame,
advanced=True))
async def can_transform(
self,
target_frame: str,
source_frame: str,
time: Time,
timeout: Optional[Duration] = None
) -> bool:
try:
self.lookup_transform(
target_frame,
source_frame,
time,
timeout)
return True
except TransformException:
return False
async def can_transform_full(
self,
target_frame: str,
target_time: Time,
source_frame: str,
source_time: Time,
fixed_frame: str,
timeout: Optional[Duration] = None
) -> bool:
try:
self.lookup_transform_full(
target_frame,
target_time,
source_frame,
source_time,
fixed_frame,
timeout)
return True
except TransformException:
return False
async def _call_action(
self,
goal: LookupTransformGoal
) -> TransformStamped:
goal_handle = self._action_client.send_goal(goal)
result = await goal_handle.wait_for_result()
if result.error.error != TF2Error.NO_ERROR:
if result.error.error == TF2Error.LOOKUP_ERROR:
raise LookupException(result.error.error_string)
elif result.error.error == TF2Error.CONNECTIVITY_ERROR:
raise ConnectivityException(result.error.error_string)
elif result.error.error == TF2Error.EXTRAPOLATION_ERROR:
raise ExtrapolationException(result.error.error_string)
elif result.error.error == TF2Error.INVALID_ARGUMENT_ERROR:
raise InvalidArgumentException(result.error.error_string)
elif result.error.error == TF2Error.TIMEOUT_ERROR:
raise TimeoutException(result.error.error_string)
else:
raise TransformException(result.error.error_string)
return result.transform
async def create_buffer_action_client(
node_handle: NodeHandle,
ns: str,
) -> BufferActionClient:
buffer_action_client = BufferActionClient(ns)
await buffer_action_client.init(node_handle)
return buffer_action_client
| 2.125
| 2
|
api/src/opentrons/protocol_engine/commands/aspirate.py
|
Opentrons/protocol_framework
| 0
|
12785544
|
"""Aspirate command request, result, and implementation models."""
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, Type
from typing_extensions import Literal
from .pipetting_common import (
PipetteIdMixin,
VolumeMixin,
FlowRateMixin,
WellLocationMixin,
BaseLiquidHandlingResult,
)
from .command import AbstractCommandImpl, BaseCommand, BaseCommandCreate
if TYPE_CHECKING:
from ..execution import PipettingHandler
AspirateCommandType = Literal["aspirate"]
class AspirateParams(PipetteIdMixin, VolumeMixin, FlowRateMixin, WellLocationMixin):
"""Parameters required to aspirate from a specific well."""
pass
class AspirateResult(BaseLiquidHandlingResult):
"""Result data from execution of an Aspirate command."""
pass
class AspirateImplementation(AbstractCommandImpl[AspirateParams, AspirateResult]):
"""Aspirate command implementation."""
def __init__(self, pipetting: PipettingHandler, **kwargs: object) -> None:
self._pipetting = pipetting
async def execute(self, params: AspirateParams) -> AspirateResult:
"""Move to and aspirate from the requested well."""
volume = await self._pipetting.aspirate(
pipette_id=params.pipetteId,
labware_id=params.labwareId,
well_name=params.wellName,
well_location=params.wellLocation,
volume=params.volume,
flow_rate=params.flowRate,
)
return AspirateResult(volume=volume)
class Aspirate(BaseCommand[AspirateParams, AspirateResult]):
"""Aspirate command model."""
commandType: AspirateCommandType = "aspirate"
params: AspirateParams
result: Optional[AspirateResult]
_ImplementationCls: Type[AspirateImplementation] = AspirateImplementation
class AspirateCreate(BaseCommandCreate[AspirateParams]):
"""Create aspirate command request model."""
commandType: AspirateCommandType = "aspirate"
params: AspirateParams
_CommandCls: Type[Aspirate] = Aspirate
| 2.703125
| 3
|
nonce_source.py
|
jake-billings/research-blockchain
| 5
|
12785545
|
import os
import random
sys_random = random.SystemRandom()
from PIL import Image
import images
import cStringIO
class NonceSource:
def __init__(self, root):
self.interesting_filenames = os.listdir(root)
self.interesting_images = []
for name in self.interesting_filenames:
self.interesting_images.append(Image.open(os.path.join(root,name)))
def provide_image(self, limit=10, width=80, height=80):
for i in range(0,limit):
image = sys_random\
.choice(self.interesting_images)\
.resize((width,height))\
.convert('RGB')
if images.is_interesting(image):
return image
def provide_nonce(self, limit=10, width=80, height=80):
buffer = cStringIO.StringIO()
img = self.provide_image(limit, width, height)
img.save(buffer, format="JPEG")
return buffer.getvalue()
if __name__=="__main__":
import encode
print encode.encode(NonceSource("nonce_sources").provide_nonce())
| 2.75
| 3
|
CNN_DataPreparation/ConcatenateDataSet.py
|
amrkh97/Lipify-LipReading
| 0
|
12785546
|
import glob
import os
import time
import cv2
import numpy as np
from Pre_Processing import frameManipulator
commands = ['bin', 'lay', 'place', 'set']
prepositions = ['at', 'by', 'in', 'with']
colors = ['blue', 'green', 'red', 'white']
adverbs = ['again', 'now', 'please', 'soon']
alphabet = [chr(x) for x in range(ord('a'), ord('z') + 1)]
numbers = ['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']
categories = ['Adverb', 'Alphabet', 'Commands', 'Colors', 'Numbers', 'Prepositions']
commonCNNDataPath = 'D:/CNN-Test-Images/'
def getVideoFrames(videoPath):
"""Function to return a video's frames in a list"""
vidcap = cv2.VideoCapture(videoPath)
success, image = vidcap.read()
allFrames = []
while success:
allFrames.append(image)
success, image = vidcap.read()
return allFrames
def stackFramesToImage(listOfFrames):
"""Function to concat frames into a single picture"""
if len(listOfFrames) < frameManipulator.FPS:
return None
newList = [np.hstack(listOfFrames[:5]), np.hstack(listOfFrames[5:10]), np.hstack(listOfFrames[10:15]),
np.hstack(listOfFrames[15:20]), np.hstack(listOfFrames[20:25]), np.hstack(listOfFrames[25:30])]
return np.vstack(newList)
def saveImage(image, imagePath):
"""Function to save an image in grayscale to a specific path"""
if len(image.shape) == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
index = len(os.listdir(imagePath))
imagePath = imagePath + '/{}.jpg'.format(index)
cv2.imwrite(imagePath, image)
def createCNNDataDirectories():
"""Function to create label directories for each category for training the CNN"""
for command in commands:
dirName = commonCNNDataPath + '/Commands/{}/'.format(command)
if not os.path.exists(dirName):
os.makedirs(dirName)
for preposition in prepositions:
dirName = commonCNNDataPath + '/Prepositions/{}/'.format(preposition)
if not os.path.exists(dirName):
os.makedirs(dirName)
for color in colors:
dirName = commonCNNDataPath + '/Colors/{}/'.format(color)
if not os.path.exists(dirName):
os.makedirs(dirName)
for adverb in adverbs:
dirName = commonCNNDataPath + '/Adverb/{}/'.format(adverb)
if not os.path.exists(dirName):
os.makedirs(dirName)
for letter in alphabet:
dirName = commonCNNDataPath + '/Alphabet/{}/'.format(letter)
if not os.path.exists(dirName):
os.makedirs(dirName)
for number in numbers:
dirName = commonCNNDataPath + '/Numbers/{}/'.format(number)
if not os.path.exists(dirName):
os.makedirs(dirName)
def extractLipsHaarCascade(haarDetector, frame):
"""Function to extract lips from a frame"""
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
roi_gray = 0
faces = haarDetector.detectMultiScale(gray, 1.3, 5)
if len(faces) == 0:
roi_gray = cv2.resize(gray, (150, 100))
return roi_gray
for (x, y, w, h) in faces:
roi_gray = gray[y + (2 * h // 3):y + h, x:x + w]
roi_gray = cv2.resize(roi_gray, (150, 100))
return roi_gray
def prepareSingleVideoForCNN(path, haarDetector):
"""Function to prepare a single video to be redy for CNN training"""
vidData = frameManipulator.getVideoDataFromPath(path)
videoFrames = getVideoFrames(path)
videoFrames = [extractLipsHaarCascade(haarDetector, x) for x in videoFrames]
if len(videoFrames) != 0:
stackedImage = stackFramesToImage(videoFrames)
videoLabel = vidData.identifier.split('_')[0]
imageSavePath = commonCNNDataPath + vidData.category + '/{}'.format(videoLabel)
saveImage(stackedImage, imageSavePath)
else:
print("Error in finding video with path: {}".format(path))
def prepareDataSetForCNN(firstSpeaker, secondSpeaker):
"""Function that traverses the whole dataset and creates new directory for the CNN"""
detector = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
for i in range(firstSpeaker, secondSpeaker):
for category in categories:
sTime = time.time()
videoPath = "../New-DataSet-Videos/S{}/{}/".format(i, category) + "*.mp4"
vidList = glob.glob(videoPath)
def f(x):
return x.replace("\\", '/')
vidList = [f(x) for x in vidList]
for j in vidList:
prepareSingleVideoForCNN(j, detector)
print("Finished category : {}, for speaker: {}".format(category, i))
print("In:{} Seconds".format(time.time() - sTime))
print("Finished Speaker {}".format(i))
def main():
startTime = time.time()
firstSpeaker = 23
secondSpeaker = 24
createCNNDataDirectories()
prepareDataSetForCNN(firstSpeaker, secondSpeaker)
print("Finished preparing the videos in {} seconds".format(time.time() - startTime))
if __name__ == "__main__":
main()
| 2.65625
| 3
|
pressiotools/io/array_read.py
|
Pressio/pressio-hyperreduction
| 0
|
12785547
|
import numpy as np
import math
from pressiotools import linalg as la
def read_binary_array(fileName, nCols):
# read a numpy array from a binary file "fileName"
if nCols==1:
return np.fromfile(fileName)
else:
array = np.fromfile(fileName)
nRows = int(len(array) / float(nCols))
return array.reshape((nCols,nRows)).T
def read_ascii_array(fileName, nCols):
# read a numpy array from an ascii file "fileName"
return np.asfortranarray(np.loadtxt(fileName))
def read_array(fileName, nCols, isBinary=True):
if isBinary:
return read_binary_array(fileName,nCols)
else:
return read_ascii_array(fileName,nCols)
def read_array_distributed(comm, rootFileName, nCols, isBinary=True):
# Read an array from binary or ascii files with the name specified
# by the string rootFileName
# Each local array segment will be read from a file rootFileName.XX.YY,
# where XX is the number of ranks and YY is the local rank
rank = comm.Get_rank()
size = comm.Get_size()
nDigit = int(math.log10(size)) + 1
myFileName = "{}.{}.{:0{width}d}".format(rootFileName,size,rank,width=nDigit)
myArr = read_array(myFileName,nCols,isBinary)
if nCols==1:
return la.Vector(myArr)
else:
return la.MultiVector(myArr)
| 2.96875
| 3
|
tests/conftest.py
|
vmware/pyloginsight
| 16
|
12785548
|
<filename>tests/conftest.py
# -*- coding: utf-8 -*-
from mock_loginsight_server import MockedConnection
from pyloginsight.connection import Connection, Credentials
from pyloginsight.exceptions import ServerWarning, AlreadyBootstrapped
import sys
from collections import namedtuple
import logging
import pytest
from requests.adapters import HTTPAdapter
import socket
import urllib3
import warnings
ROOTLOGGER = True
if ROOTLOGGER:
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stderr)
formatter = logging.Formatter(u'%(asctime)s %(name)s %(levelname)s: %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
logging.captureWarnings(True)
socket.setdefaulttimeout(1)
urllib3.disable_warnings()
warnings.simplefilter("ignore", ServerWarning)
ConnectionContainer = namedtuple("Connections", ["clazz", "hostname", "port", "auth", "verify"])
def pytest_addoption(parser):
parser.addoption(
"--server", action="append", metavar="SERVER:PORT", default=[],
help="Also run tests against https://SERVER:PORT, can be listed multiple times. Mock server @ mockserverlocal:9543")
parser.addoption(
"--username", action="store", default="admin",
help="Used with --server")
parser.addoption(
"--password", action="store", default="<PASSWORD>!",
help="Used with --server")
parser.addoption(
"--provider", action="store", default="Local",
help="Used with --server")
parser.addoption(
"--license", action="store", required=False,
help="Apply license to --server if needed"),
parser.addoption(
"--packs", nargs='+', action="store", default=['sample_content.vlcp'],
help="A list of content pack files to test."
)
def identifiers_for_server_list(val):
return "{}({}:{})".format(str(val.clazz.__name__), val.hostname, val.port)
def pytest_generate_tests(metafunc):
# Creates a "servers" fixture on the fly when another fixture uses it.
if 'servers' in metafunc.fixturenames:
configs = []
servers = metafunc.config.getoption('server')
if servers == []:
print("No servers specified.")
servers = ["mockserverlocal:9543"]
for s in servers:
print("Running tests against server {}".format(s))
hostname, port = s.split(":")
# magic hostname
if hostname == "mockserverlocal":
clazz = MockedConnection
else:
clazz = Connection
configs.append(
ConnectionContainer(
clazz,
hostname,
int(port),
Credentials(
metafunc.config.getoption("username"),
metafunc.config.getoption("password"),
metafunc.config.getoption("provider"),
),
False
)
)
metafunc.parametrize("servers",
argvalues=configs,
ids=identifiers_for_server_list)
class SetHostHeaderAdapter(HTTPAdapter):
def __init__(self, host_header, *args, **kwargs):
self.host_header = host_header
super(SetHostHeaderAdapter, self).__init__(*args, **kwargs)
def send(self, request, **kwargs):
request.headers['Host'] = self.host_header
return super(SetHostHeaderAdapter, self).send(request, **kwargs)
@pytest.fixture
def licensekey(pytestconfig):
"""License key which should be applied to the remote server(s)."""
return pytestconfig.getoption('license')
@pytest.fixture
def connection(servers, licensekey):
"""A pyloginsight.connection to a remote server."""
c = servers
connection_instance = c.clazz(c.hostname, auth=c.auth, port=c.port, verify=c.verify)
# Lie about port number
if c.clazz is Connection:
adapter = SetHostHeaderAdapter("%s:9543" % c.hostname)
connection_instance._requestsession.mount(connection_instance._apiroot, adapter)
ensure_server_bootstrapped_and_licensed(connection_instance, licensekey)
return connection_instance
# Matrix of bad credentials multipled by server list
@pytest.fixture(params=[Credentials("fake", "fake", "Local"), None])
def wrong_credential_connection(servers, request, licensekey):
"""A pyloginsight.connection to a remote server, with non-functional credentials."""
c = servers._replace(auth=request.param)
connection_instance = c.clazz(c.hostname, auth=c.auth, port=c.port, verify=c.verify)
# Lie about port number
if c.clazz is Connection:
adapter = SetHostHeaderAdapter("%s:9543" % c.hostname)
connection_instance._requestsession.mount(connection_instance._apiroot, adapter)
ensure_server_bootstrapped_and_licensed(connection_instance, licensekey)
return connection_instance
def ensure_server_bootstrapped_and_licensed(connection, licensekey):
"""
:param connection: a pyloginsight.Connection instance
"""
return True
# Only need to check once
if connection._apiroot in ensure_server_bootstrapped_and_licensed.cache:
return True
try:
connection.bootstrap(email="<EMAIL>")
except AlreadyBootstrapped:
pass
connection.wait_until_started()
if 'ACTIVE' != connection.server.license.licenseState:
connection.server.license.append(str(licensekey))
ensure_server_bootstrapped_and_licensed.cache.append(connection._apiroot)
ensure_server_bootstrapped_and_licensed.cache = []
| 1.921875
| 2
|
题源分类/剑指offer/python/面试题26:复杂链表的复制.py
|
ZhengyangXu/Algorithm-Daily-Practice
| 0
|
12785549
|
# 面试题26:复杂链表的复制
# 题目:请实现函数ComplexListNode*Clone(ComplexListNode*pHead),
# 复制一个复杂链表。在复杂链表中,每个结点除了有一个m_pNext指针指向下一个结点外,
# 还有一个m_pSibling 指向链表中的任意结点或者NULL。结点的C++定义如下:
class Node:
def __init__(self,val,next=None,random=None):
self.val = val
self.next = next
self.random = random
def copyRandomList(head):
if not head:
return
p = head
while p:
node = Node(p.val)
node.next = p.next
p.next = node
p = node.next
p = head
while p:
node = p.next
node.random = p.random.next if p.random else None
p = node.next
p,p1 = head,Node(-1)
cur = p1
while p:
cur.next = p.next
cur = cur.next
p.next = cur.next
p = p.next
return p1.next
def copyRandomList2(head):
if not head:
return
visitedHash = {}
if __name__ == "__main__":
node1 = Node(1)
node2 = Node(2)
node3 = Node(3)
node4 = Node(4)
node1.next = node2
node2.next = node3
node3.next = node4
node1.random = node3
node2.random = node4
node = copyRandomList(node1)
while node:
print(node.val)
node = node.next
| 3.375
| 3
|
api/api.py
|
PyGera/fantacalcio-bot
| 2
|
12785550
|
from flask import Flask, request, jsonify
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
@app.route('/api/data', methods=['GET'])
def data():
query = ''
with open('stats.json', 'r') as db:
query = db.read()
print(query)
return query
@app.route('/api/sendData', methods=['POST'])
def receiveData():
data = request.get_json()
print(data)
with open('stats.json', 'w') as db:
db.write(f'''{{
"servers": {data['servers']},
"users": {data['users']}
}}
''')
return 'Thank you'
app.run("0.0.0.0", 8080, False)
| 2.84375
| 3
|