blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f38311cf03e40957574b711cba320b7410cfb08c
|
8d2e5b5ea408579faa699c09bdbea39e864cdee1
|
/ufora/distributed/Storage/ObjectStore.py
|
97ec956873351689b12fc96db5421275df57f6d9
|
[
"dtoa",
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
iantuioti/ufora
|
2218ef4c7e33c171268ce11458e9335be7421943
|
04db96ab049b8499d6d6526445f4f9857f1b6c7e
|
refs/heads/master
| 2021-01-17T17:08:39.228987
| 2017-01-30T16:00:45
| 2017-01-30T16:00:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ObjectStore(object):
def readValue(self, key):
assert False, "Must be implemented by derived class"
def writeValue(self, key, value):
assert False, "Must be implemented by derived class"
def deleteValue(self, key):
assert False, "Must be implemented by derived class"
def listValues(self, prefix=''):
assert False, "Must be implemented by derived class"
|
[
"braxton.mckee@gmail.com"
] |
braxton.mckee@gmail.com
|
5d3773bbd8dde3adbd0edcad7fb4192e0541adbf
|
ea04cdba4ca6419c34155310f50485a89b3965d4
|
/use/ReinforceLearning/DQN/demoDQN/RL_DQNv2.py
|
d0e546699ece088e918dc99259875f056c8231ca
|
[] |
no_license
|
conancheng/pyGreat
|
1274e5fafbf4e879afd8195df8fa086092933247
|
b5fa974876fb9a56ebc0dc0229664a4bbd475145
|
refs/heads/master
| 2023-03-13T07:04:40.615439
| 2021-03-06T09:14:46
| 2021-03-06T09:14:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,610
|
py
|
# 用nn为DQN构造网络
import torch
import torch.nn as nn
import numpy as np
from mazeEnv import Maze # 这是一个我自己写的环境
class DQN():
def __init__(self,
dim_state,
n_actions,
batch_size=32,
learning_rate=0.9,
epsilon=0.9,
gamma=0.9,
target_replace_iter=100,
memory_size=2000, ):
# 调用类内自写函数生成网络
self.eval_net, self.target_net = self.bulid_Net(dim_state, n_actions), self.bulid_Net(dim_state, n_actions)
self.dim_state = dim_state # 状态维度
self.n_actions = n_actions # 可选动作数
self.batch_size = batch_size # 小批量梯度下降,每个“批”的size
self.learning_rate = learning_rate # 学习率
self.epsilon = epsilon # 贪婪系数
self.gamma = gamma # 回报衰减率
self.memory_size = memory_size # 记忆库的规格
self.taget_replace_iter = target_replace_iter # target网络延迟更新的间隔步数
self.learn_step_counter = 0 # 在计算隔n步跟新的的时候用到
self.memory_counter = 0 # 用来计算存储索引
self.memory = np.zeros((self.memory_size, self.dim_state * 2 + 2)) # 初始化记忆库
self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=self.learning_rate) # 网络优化器
self.loss_func = nn.MSELoss() # 网络的损失函数
# 选择动作
def choose_action(self, x):
x = torch.unsqueeze(torch.FloatTensor(x), 0)
if np.random.uniform() < self.epsilon: # greedy概率有eval网络生成动作
actions_value = self.eval_net.forward(x)
action = torch.max(actions_value, 1)[1]
action = int(action)
else: # (1-greedy)概率随机选择动作
action = np.random.randint(0, self.n_actions)
return action
# 学习,更新网络参数
def learn(self):
# 目标网络参数更新(经过self.taget_replace_iter步之后,为target_net网络更新参数)
if self.learn_step_counter % self.taget_replace_iter == 0:
self.target_net.load_state_dict(self.eval_net.state_dict())
self.learn_step_counter += 1
# 从记忆库中提取一个batch的数据
data_size = self.memory_size if self.memory_counter>self.memory_size else self.memory_counter
sample_index = np.random.choice(data_size, self.batch_size)
b_memory = self.memory[sample_index, :]
b_s = torch.FloatTensor(b_memory[:, :self.dim_state])
b_a = torch.LongTensor(b_memory[:, self.dim_state:self.dim_state + 1].astype(int))
b_r = torch.FloatTensor(b_memory[:, self.dim_state + 1:self.dim_state + 2])
b_s_ = torch.FloatTensor(b_memory[:, -self.dim_state:])
# 获得q_eval、q_target,计算loss
q_eval = self.eval_net(b_s).gather(1, b_a)
q_next = self.target_net(b_s_).detach()
q_target = b_r + self.gamma * q_next.max(1)[0].view(self.batch_size, 1)
loss = self.loss_func(q_eval, q_target)
# 反向传递,更新eval网络
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# 存储一步的信息到记忆库
def store_transition(self, s, a, r, s_):
transition = np.hstack((s, [a, r], s_))
# 存储记忆(如果第一轮存满了,就覆盖存入)
index = self.memory_counter % self.memory_size
self.memory[index, :] = transition
self.memory_counter += 1
# 构建网络
def bulid_Net(self, dim_state, n_actions):
return torch.nn.Sequential(
torch.nn.Linear(dim_state, 50),
torch.nn.ReLU(),
torch.nn.Linear(50, n_actions),
)
if __name__ == '__main__':
env = Maze()
dqn = DQN(env.n_states, env.n_actions)
print('Collecting experience...')
for i_episode in range(400):
s = env.reset() # 重置初始状态
ep_r = 0
while True:
env.render() # 刷新画面
a = dqn.choose_action(s) # 选择动作
s_, r, done = env.step(a) # 执行动作,获得下一个状态s_,回报r,是否结束标记done
dqn.store_transition(s, a, r, s_) # 存储 一步 的信息
ep_r += r # ep_r,一轮中的总回报
if dqn.memory_counter > dqn.memory_size: # 当记忆库存满(非必要等到存满)的时候,开始训练
dqn.learn()
if done:
if i_episode%20==0:
print('Ep: ', i_episode + 1, '| Ep_r: ', round(ep_r, 2))
if done: # 如果done(智能到达终点/掉入陷阱),结束本轮
break
s = s_
# 测试部分
print('Testing . . .')
# dqn.epsilon = 1
rs = []
for state in range(50): # 打算循环测试50次测一测平均回报
s = env.reset()
ep_r = 0
while True:
env.render()
a = dqn.choose_action(s)
s_, r, done = env.step(a)
ep_r += r
# 测试阶段就不再有存储和学习了
if done:
print(ep_r)
rs.append(ep_r)
break
s = s_
env.close()
print(np.average(rs))
# v1: -25.63
# v2_liner: -25.21
# v2_relu:
|
[
"darcyzhang@DarcydeMacBook-Air.local"
] |
darcyzhang@DarcydeMacBook-Air.local
|
93a15d96682f3c35ca46309bf519578757a080e1
|
3c55be0eb8997ffdaf67440bfcc705ae2dc3a4cf
|
/Python语言程序设计/Week6/DictReverse.py
|
283e223a3f709733f5f9f268dfe059e4e549e914
|
[] |
no_license
|
YanZheng-16/LearningPython
|
1a1886c83d8eb7f79282374c5bdf590973af8cc9
|
3ab5d4a1f3394319ea097bdac4ea60abbfc78abb
|
refs/heads/master
| 2022-07-13T00:00:35.367754
| 2020-05-16T14:01:24
| 2020-05-16T14:01:24
| 264,438,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
# 字典翻转输出
d1 = eval(input())
try:
d2 = dict(zip(d1.values(), d1.keys()))
print(d2)
except:
print("输入错误")
|
[
"noreply@github.com"
] |
YanZheng-16.noreply@github.com
|
3895afbeddd52c3b7f2621e5e00daa94caec1f17
|
823cec73f05695388bfae1c5cea1056ea05c1f89
|
/tests/test_models/test_engine/test_db_storage.py
|
6e387e17d52fbd7dfdfe4d8c75c56a0c16eb6bc7
|
[] |
no_license
|
dspham/AirBnB_clone_v2
|
bb78d2793ae09378e6adeb83a8d06a7f5ad2ca22
|
0cf48f892ff5d75660d1d999aacab12dcc8de56a
|
refs/heads/master
| 2020-04-14T18:58:25.910087
| 2019-02-04T08:23:40
| 2019-02-04T08:23:40
| 164,039,797
| 0
| 1
| null | 2019-01-14T23:45:33
| 2019-01-04T01:06:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,988
|
py
|
#!/usr/bin/python3
"""test for db stroage"""
import unittest
import pep8
import os
import json
from models.base_model import BaseModel
from models.user import User
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.place import Place
from models.review import Review
from models.engine.db_storage import DBStorage
class TestDBStorage(unittest.TestCase):
"""this will test the DBStorage"""
@classmethod
def setUpClass(cls):
"""setup for the test"""
cls.storage = DBStorage()
cls.__session = Session()
self.stored = DBStorage()
self.__session = Session()
Session = sessionmaker(bind=self.__engine)
self.__session = Session()
self.stored.reload()
self.state1 = State1()
self.state1.name = "California"
self.state2 = State2()
self.state2.name = "Arizona"
def tearDown(self):
"""tear down method"""
pass
# del self.stored
def testAttributes(self):
"""Tests if required functions exits"""
self.assertTrue(hasattr())
def test_pep8_DBStorage(self):
"""Tests for pep8 styling"""
style = pep8.StyleGuide(quiet=True)
p = style.check_files(['models/engine/db_storage.py'])
self.assertEqual(p.total_errors, 0, "Fails PEP8 compliance")
def test_all(self):
"""Tests for all in DBStorage"""
self.objs = self.storage.all()
self.assertIsNotNone(self.objs)
self.assertEqual(type(self.objs), dict)
def test_new(self):
"""Tests for new objects in DBStorage"""
pass
def test_save(self):
"""Tests for saving objects in DBStorage"""
pass
def test_delete(self):
"""Tests for deleting objects in DBStorage"""
pass
def test_reload(self):
"""Tests for reloading objects in DBStorage"""
pass
if __name__ == "__main__":
unittest.main()
|
[
"dsvpham@gmail.com"
] |
dsvpham@gmail.com
|
61cd387b4cf112eff88e9847662d96fc18e518d6
|
daffe9d6895fed5cab27b267f2d60d4e8abbd44d
|
/catalog/views.py
|
cb04e544f5b65c4811ce88cc2b69b96186b34353
|
[] |
no_license
|
QueenOfPentacles/django_library
|
4edae749a98c1cbd52b98d94d7597eb8eac90f00
|
cde7593f30a2a38f223211c1b109a411d2a499fd
|
refs/heads/master
| 2021-04-06T06:54:48.779745
| 2018-03-09T19:06:49
| 2018-03-09T19:06:49
| 124,580,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,586
|
py
|
from django.shortcuts import render
# Create your views here.
from .models import Book, Author, BookInstance, Genre
def index(request):
"""
View function for home page of site.
"""
# Generate counts of some of the main objects
num_books=Book.objects.all().count()
num_genres=Genre.objects.all().count()
num_instances=BookInstance.objects.all().count()
num_intros=Book.objects.filter(title__icontains='intro').count()
# Available books (status = 'a')
num_instances_available=BookInstance.objects.filter(status__exact='a').count()
num_authors=Author.objects.count() # The 'all()' is implied by default.
# Number of visits to this view, as counted in the session variable.
num_visits=request.session.get('num_visits', 0)
request.session['num_visits'] = num_visits+1
# Render the HTML template index.html with the data in the context variable
return render(
request,
'index.html',
context={'num_books':num_books,'num_genres':num_genres,'num_instances':num_instances,'num_intros':num_intros,'num_instances_available':num_instances_available,'num_authors':num_authors,'num_visits':num_visits},
)
from django.views import generic
class BookListView(generic.ListView):
model = Book
paginate_by = 5
class BookDetailView(generic.DetailView):
model = Book
class AuthorListView(generic.ListView):
model = Author
paginate_by = 5
class AuthorDetailView(generic.DetailView):
model = Author
from django.contrib.auth.mixins import LoginRequiredMixin
class LoanedBooksByUserListView(LoginRequiredMixin,generic.ListView):
"""
Generic class-based view listing books on loan to current user.
"""
model = BookInstance
template_name ='catalog/bookinstance_list_borrowed_user.html'
paginate_by = 5
def get_queryset(self):
return BookInstance.objects.filter(borrower=self.request.user).filter(status__exact='o').order_by('due_back')
from django.contrib.auth.mixins import PermissionRequiredMixin
class LoanedBooksAllListView(PermissionRequiredMixin,generic.ListView):
"""
Generic class-based view listing all books on loan visible only to users with can_mark_returned permission.
"""
model = BookInstance
permission_required = 'catalog.can_mark_returned'
template_name ='catalog/bookinstance_list_borrowed_all.html'
paginate_by = 5
def get_queryset(self):
return BookInstance.objects.filter(status__exact='o').order_by('due_back')
from django.contrib.auth.decorators import permission_required
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
import datetime
from .forms import RenewBookForm
@permission_required('catalog.can_mark_returned')
#This decorator checks whether a user has a particular permission. Permission names take the form "<app label>.<permission codename>" (e.g. polls.can_vote is a permission on a model in the polls application). The decorator may also take an iterable of permissions, in which case the user must have all of the permissions in order to access the view.
def renew_book_librarian(request, pk):
#View function for renewing a specific BookInstance by librarian
book_inst=get_object_or_404(BookInstance, pk = pk)
# If this is a POST request then process the Form data
if request.method == 'POST':
# Create a form instance and populate it with data from the request (binding):
form = RenewBookForm(request.POST)
# Check if the form is valid:
if form.is_valid():
# process the data in form.cleaned_data as required (here we just write it to the model due_back field)
book_inst.due_back = form.cleaned_data['renewal_date']
book_inst.save()
# redirect to a new URL:
return HttpResponseRedirect(reverse('all-borrowed') )
# If this is a GET (or any other method) create the default form.
else:
proposed_renewal_date = datetime.date.today() + datetime.timedelta(weeks=3)
form = RenewBookForm(initial={'renewal_date': proposed_renewal_date,})
return render(request, 'catalog/book_renew_librarian.html', {'form': form, 'bookinst':book_inst})
from django.contrib.auth.decorators import permission_required
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from .models import Book
class BookCreate(PermissionRequiredMixin,CreateView):
model = Book
fields = '__all__'
permission_required = 'catalog.can_mark_returned'
template_name ='catalog/book_form.html'
class BookAllListView(PermissionRequiredMixin,generic.ListView):
model = Book
permission_required = 'catalog.can_mark_returned'
template_name ='catalog/book_list_all.html'
paginate_by = 10
class BookUpdate(PermissionRequiredMixin,UpdateView):
model = Book
fields = '__all__'
permission_required = 'catalog.can_mark_returned'
class BookDelete(PermissionRequiredMixin,DeleteView):
model = Book
success_url = reverse_lazy('books')
permission_required = 'catalog.can_mark_returned'
from django.contrib.auth.decorators import permission_required
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from .models import Author
class AuthorCreate(PermissionRequiredMixin,CreateView):
#the PermissionRequiredMixin checks whether the user accessing a view has all given permissions. You should specify the permission (or an iterable of permissions,in which case the user must have all of the permissions in order to access the view.) using the permission_required parameter: permission_required = 'polls.can_vote', or for multiple permissions: permission_required = ('polls.can_open', 'polls.can_edit')
model = Author
fields = '__all__'
permission_required = 'catalog.can_mark_returned'
template_name ='catalog/author_form.html'
class AuthorAllListView(PermissionRequiredMixin,generic.ListView):
model = Author
permission_required = 'catalog.can_mark_returned'
template_name ='catalog/author_list_all.html'
paginate_by = 10
class AuthorUpdate(PermissionRequiredMixin,UpdateView):
model = Author
fields = '__all__'
permission_required = 'catalog.can_mark_returned'
class AuthorDelete(PermissionRequiredMixin,DeleteView):
model = Author
success_url = reverse_lazy('authors')
permission_required = 'catalog.can_mark_returned'
|
[
"mmcbean@fixedearthenterprises.com"
] |
mmcbean@fixedearthenterprises.com
|
f4dc2c684a4a39d9c4d888e91ee360bccd665276
|
92c0dd6e8f182a3cb907bf8279f09065222f53a8
|
/data_util/COCO/image_process.py
|
137db3db85da30d8f14a7aacd248aee20158a432
|
[] |
no_license
|
hukim1112/lab4
|
d91a6d24de5df126927c9eb7631248b5cd1e2ba8
|
e15ac6ab833e68fc204b9a9ba178a7896b4116a5
|
refs/heads/master
| 2020-09-06T08:07:00.283240
| 2019-12-27T01:01:19
| 2019-12-27T01:01:19
| 220,371,574
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,612
|
py
|
import numpy as np
import cv2
import random
import os
import tensorflow as tf
from config.coco_config import config
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.]).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
print(scale)
scale = np.array([scale, scale])
src_w = scale[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale * shift
src[1, :] = center + src_dir + scale * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def render_gaussian_heatmap(coord, output_shape, config=config()):
x = tf.constant([i for i in range(output_shape[1])], tf.float32)
y = tf.constant([i for i in range(output_shape[0])], tf.float32)
xx,yy = tf.meshgrid(x,y)
xx = tf.reshape(xx, (*output_shape,1))
yy = tf.reshape(yy, (*output_shape,1))
x = tf.floor(tf.reshape(coord[:,0],[1,1,config.num_kps]) / config.input_shape[1] * output_shape[1] + 0.5)
y = tf.floor(tf.reshape(coord[:,1],[1,1,config.num_kps]) / config.input_shape[0] * output_shape[0] + 0.5)
heatmap = tf.exp(-(((xx-x)/config.sigma)**2)/2 -(((yy-y)/config.sigma)**2)/2)
return heatmap * 255.
def cropped_image_and_pose_coord(file_path, bbox, joints, config=config()):
file_path = file_path.numpy().decode("utf-8")
img = cv2.imread(os.path.join(config.image_path, file_path), cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
if img is None:
print('cannot read ' + os.path.join(config.image_path, str(file_path)))
assert 0
x, y, w, h = bbox
aspect_ratio = config.input_shape[1]/config.input_shape[0]
center = np.array([x + w * 0.5, y + h * 0.5])
if w > aspect_ratio * h:
h = w / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
scale = np.array([w,h]) * 1.25
rotation = 0
joints = np.array(joints).reshape(config.num_kps, 3).astype(np.float32)
#data augmentation
scale = scale * np.clip(np.random.randn()*config.scale_factor + 1, 1-config.scale_factor, 1+config.scale_factor)
rotation = np.clip(np.random.randn()*config.rotation_factor, -config.rotation_factor*2, config.rotation_factor*2)\
if random.random() <= 0.6 else 0
if random.random() <= 0.5:
img = img[:, ::-1, :]
center[0] = img.shape[1] - 1 - center[0]
joints[:,0] = img.shape[1] - 1 - joints[:,0]
for (q, w) in config.kps_symmetry:
joints_q, joints_w = joints[q,:].copy(), joints[w,:].copy()
joints[w,:], joints[q,:] = joints_q, joints_w
trans = get_affine_transform(center, scale, rotation, (config.input_shape[1], config.input_shape[0]))
cropped_img = cv2.warpAffine(img, trans, (config.input_shape[1], config.input_shape[0]), flags=cv2.INTER_LINEAR)
for i in range(config.num_kps):
if joints[i,2] > 0:
joints[i,:2] = affine_transform(joints[i,:2], trans)
joints[i,2] *= ((joints[i,0] >= 0) & (joints[i,0] < config.input_shape[1]) & (joints[i,1] >= 0) & (joints[i,1] < config.input_shape[0]))
target_coord = joints[:,:2].astype(np.int16)
target_valid = joints[:,2]
return [cropped_img[:,:,::-1], target_coord]
def normalize_input(self, img):
return img - np.array([[[123.68, 116.78, 103.94]]])
def denormalize_input(self, img):
return img + np.array([[[123.68, 116.78, 103.94]]])
|
[
"hyounguk1112@gmail.com"
] |
hyounguk1112@gmail.com
|
8e99cddfa1ea4686427cb27459e45b7386c3b75c
|
1a38e02f8af17171fad250a5b8ba68b3c7ccf79c
|
/test.py
|
4f940e8654cbb75d1b843478fea63ba879127291
|
[] |
no_license
|
theinvisible/nautilus-advacl
|
216b12cd02ebd39dda2423d624e9bb34ca66841b
|
d1e1e02ac67d1ecd20b987928fc8618ade7b5664
|
refs/heads/master
| 2020-03-10T18:50:33.285349
| 2018-04-14T17:04:04
| 2018-04-14T17:04:04
| 129,535,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,466
|
py
|
'''
Created on 19.01.2013
@author: rene
'''
#!/usr/bin/python2
import sys
import os
from gi.repository import Nautilus, GObject, Gtk
#
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/nautilus-advacl")
import nautilusadvacllib
from nautiluspropaddacl import NautilusWindowAddACL
print sys.path
def tvObjects_sel_changed(sel):
#print "selection changed2!!!"
global tvObjects
tv, iter = sel.get_selected()
if not iter:
return
model = tvObjects.get_model()
objACL = model.get_value(iter, 0)
#print "selected", model.get_value(iter, 1)
tvPermissions_set_permision(objACL.perm)
def tvPermissions_set_permision(objPerm):
global tvPermissions
model = tvPermissions.get_model()
model[0][1] = objPerm.read
model[1][1] = objPerm.write
model[2][1] = objPerm.execute
def on_cell_toggled(widget, path):
global tvPermissions
model = tvPermissions.get_model()
iter = model.get_iter(path)
state = model.get_value(iter, 1)
if state == True:
model.set_value(iter, 1, False)
elif state == False:
model.set_value(iter, 1, True)
def btnObjAdd_clicked(button):
builder.add_from_file("/home/rene/DEV/eclipse/nautilus-advacl/nautilus-advacl/nautilus-prop-add-acl.glade")
#bbox = builder.get_objects()
bbox = builder.get_object("boxMain")
win_add_acl = NautilusWindowAddACL()
win_add_acl.set_modal(True)
win_add_acl.add(bbox)
win_add_acl.show()
builder = Gtk.Builder()
#builder.add_objects_from_file("/home/rene/DEV/eclipse/nautilus-advacl/nautilus-prop.glade", ["boxMain"])
builder.add_from_file("/home/rene/DEV/eclipse/nautilus-advacl/nautilus-advacl/nautilus-prop.glade")
#bbox = builder.get_objects()
bbox = builder.get_object("window1")
bbox.connect("destroy", Gtk.main_quit)
bbox.set_position(Gtk.WindowPosition.CENTER)
bbox.show()
# Treeview
#store = Gtk.ListStore(str)
#store.append(["test1"])
#store.append(["test2"])
#store.append(["test3"])
tvObjects = builder.get_object("tvObjects")
#tvObjects.set_model(store)
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("Objekt", renderer, text=1)
tvObjects.append_column(column)
selection = tvObjects.get_selection()
selection.connect("changed", tvObjects_sel_changed)
# Treeview2
store2 = Gtk.ListStore(str, bool)
store2.append(["Lesen", False])
store2.append(["Schreiben", False])
store2.append(["Ausfuehren", False])
tvPermissions = builder.get_object("tvPermissions")
tvPermissions.set_model(store2)
renderer2 = Gtk.CellRendererText()
column2 = Gtk.TreeViewColumn("Objekt", renderer, text=0)
column2.set_min_width(250)
tvPermissions.append_column(column2)
renderer_toggle = Gtk.CellRendererToggle()
renderer_toggle.connect("toggled", on_cell_toggled)
column_toggle = Gtk.TreeViewColumn("Zulassen", renderer_toggle, active=1)
tvPermissions.append_column(column_toggle)
#renderer_toggle2 = Gtk.CellRendererToggle()
#column_toggle2 = Gtk.TreeViewColumn("Verweigern", renderer_toggle2, active=2)
#tvPermissions.append_column(column_toggle2)
btnObjAdd = builder.get_object("btnObjAdd")
btnObjAdd.connect("clicked", btnObjAdd_clicked)
lib = nautilusadvacllib.AdvACLLibrary()
perms = lib.get_permissions("/home/rene/tmp/test")
store = Gtk.ListStore(GObject.TYPE_PYOBJECT, str)
for perm in perms:
print perm.realm, perm.object, perm.perm
store.append([perm, perm.object])
tvObjects.set_model(store)
Gtk.main()
|
[
"rene.hadler@iteas.at"
] |
rene.hadler@iteas.at
|
d722c85ab95a8494dc14a237f678c295b45a9b00
|
32e015e596843a0f32864a9023cee77bf8867e7c
|
/lambdata_alfaroqueIslam/lambdata_test.py
|
ae6ce67acc4ebbc478a9f58c7a76d595e555dfc1
|
[
"MIT"
] |
permissive
|
Simon-Minchk/lambdata-1
|
2d90070f3a77184f2fbabdcd019d92d59a5632be
|
329f3f385e98ef076ee4a406c13b4e0d75dd4538
|
refs/heads/master
| 2022-11-04T21:52:34.551759
| 2020-07-10T23:58:15
| 2020-07-10T23:58:15
| 278,754,621
| 0
| 0
|
MIT
| 2020-07-10T23:48:37
| 2020-07-10T23:48:36
| null |
UTF-8
|
Python
| false
| false
| 1,266
|
py
|
#!/usr/bin/env python
"""Tests for lambdata modules."""
import unittest
# unittest supports a type of tests called unit tests
# A unit is the smallest cohesive piece of code we can test
# (usually something like a function or method)
# Other types of tests (you won't write now, just to be aware):
# - Integration: testing multiple pieces working together
# - End to end: testing a full "flow"/use case
# There are also manual/non-code tests that are common
# - User acceptance testing: show it to a user, get feedback
# - Manual running and checking
from example_module import increment
class ExampleModuleTests(unittest.TestCase):
"""Making sure our example module works as expected."""
def test_increment(self):
"""Testing that the increment function adds one to a number."""
# Unit tests work by having some logic/values
# that use the code being tested
x1 = 7
y1 = increment(x1)
x2 = -10
y2 = increment(x2)
# And then making sure the output is as expected with assertions
self.assertEqual(y1, 8)
self.assertEqual(y2, -9)
if __name__ == '__main__':
# This conditional is for code that will be run
# when we execute our file from the command line
unittest.main()
|
[
"noreply@github.com"
] |
Simon-Minchk.noreply@github.com
|
cf1a16e1a3643d810cd62c5b1087a5c9ef9da00c
|
f2624b34d0b064210b040041e8473ddbb4abe00c
|
/docker/distill/distill/algorithms/graphs/graph.py
|
3c4473098589e3f35e49636f1c5a6182100df154
|
[
"Apache-2.0"
] |
permissive
|
99Kies/incubator-flagon-tap
|
a28bb3284f58fd7f867810c402504ebcfb9965d3
|
e76e8649e0ee1cce2a3e114ccba7dd5297e40b50
|
refs/heads/master
| 2022-04-28T21:49:00.249241
| 2020-04-29T18:01:08
| 2020-04-29T18:01:08
| 259,998,859
| 0
| 0
|
Apache-2.0
| 2020-04-29T17:45:06
| 2020-04-29T17:45:05
| null |
UTF-8
|
Python
| false
| false
| 960
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class GraphAnalytics (object):
"""
Distill's graph analytics package. Apply graph algorithms to User Ale log data segmented with
Stout.
"""
@staticmethod
def foo ():
pass
|
[
"arthivez@gmail.com"
] |
arthivez@gmail.com
|
421fa1e8c1e66c99b5597376a9d4fbb9187514fb
|
5fea39b14a341fe139805e4052d3819e7f1202ad
|
/symbol_net3.py
|
555f5b5566ad4bfd07d2f06fa9a5f21a4299f5ee
|
[] |
no_license
|
lkct/CV_DL-ResNet
|
29c4632b552e3fd6793634b00353738411fd0649
|
e0c77431f86a542d337510a443374b1919afa35f
|
refs/heads/master
| 2022-10-19T06:44:08.796160
| 2018-07-03T02:20:28
| 2018-07-03T02:20:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,515
|
py
|
'''
Reproducing paper:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. "Identity Mappings in Deep Residual Networks"
'''
import mxnet as mx
def residual_unit(data, num_filter, stride, dim_match, name, bottle_neck=True, bn_mom=0.9, workspace=512):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tupe
Stride used in convolution
dim_match : Boolen
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
if bottle_neck:
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False,
eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.Activation(
data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=int(num_filter*0.25),
kernel=(1, 1), stride=(1, 1), pad=(0, 0),
no_bias=True, workspace=workspace, name=name + '_conv1')
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False,
eps=2e-5, momentum=bn_mom, name=name + '_bn2')
act2 = mx.sym.Activation(
data=bn2, act_type='relu', name=name + '_relu2')
conv2 = mx.sym.Convolution(data=act2, num_filter=int(num_filter*0.25),
kernel=(3, 3), stride=stride, pad=(1, 1),
no_bias=True, workspace=workspace, name=name + '_conv2')
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False,
eps=2e-5, momentum=bn_mom, name=name + '_bn3')
act3 = mx.sym.Activation(
data=bn3, act_type='relu', name=name + '_relu3')
conv3 = mx.sym.Convolution(data=act3, num_filter=int(num_filter),
kernel=(1, 1), stride=(1, 1), pad=(0, 0),
no_bias=True, workspace=workspace, name=name + '_conv3')
return conv3
else:
raise ValueError("must have bottleneck structure")
def transition_block(num_stage, data, num_filter, stride, name, bn_mom=0.9, workspace=512):
"""Return transition_block unit sym for building DenseNet
Parameters
----------
num_stage : int
Number of stage
data : str
Input data
num : int
Number of output channels
stride : tuple
Stride used in convolution
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False,
eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=act1, num_filter=int(num_filter),
kernel=(1, 1), stride=stride, pad=(0, 0),
no_bias=True, workspace=workspace, name=name + '_conv1')
return mx.sym.Pooling(conv1, global_pool=False,
kernel=(2, 2), stride=(2, 2),
pool_type='avg', name=name + '_pool%d' % (num_stage + 1))
def conv(data, name, num_filter=12, bn_mom=0.9, workspace=1024): # need beautify
name = name + 'conv'
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False,
eps=2e-5, momentum=bn_mom, name=name + '_bn1')
act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = mx.sym.Convolution(data=data, num_filter=num_filter,
kernel=(1, 1), stride=(1, 1), pad=(0, 0),
no_bias=True, workspace=workspace, name=name + '_conv1')
return conv1
def net3(units, num_stage, filter_list, num_class, bottle_neck=True, bn_mom=0.9, workspace=512):
"""Return ResNet symbol of cifar10 and imagenet
Parameters
----------
units : list
Number of units in each stage
num_stage : int
Number of stage
filter_list : list
Channel size of each stage
num_class : int
Ouput size of symbol
workspace : int
Workspace used in convolution operator
"""
num_unit = len(units)
assert(num_unit == num_stage)
data = mx.sym.Variable(name='data')
data = mx.sym.BatchNorm(data=data, fix_gamma=True,
eps=2e-5, momentum=bn_mom, name='bn_data')
body = mx.sym.Convolution(data=data, num_filter=filter_list[0],
kernel=(3, 3), stride=(1, 1), pad=(1, 1),
no_bias=True, name="conv0", workspace=workspace)
for i in range(num_stage):
if i != 0:
body = transition_block(i, body, filter_list[i + 1], stride=(
1, 1), name='stage%d_trans' % (i + 1), bn_mom=bn_mom, workspace=workspace)
con = conv(body, name='stage%d_trans' % (i + 1))
body = residual_unit(body, filter_list[i + 1], (1, 1), False,
name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck, workspace=workspace)
con = mx.sym.Concat(
con, conv(body, name='stage%d_unit%d' % (i + 1, 1)))
for j in range(units[i] - 1):
body = residual_unit(body, filter_list[i + 1], (1, 1), True, name='stage%d_unit%d' % (i + 1, j + 2),
bottle_neck=bottle_neck, workspace=workspace)
con = mx.sym.Concat(
con, conv(body, name='stage%d_unit%d' % (i + 1, j + 2)))
body = con
bn1 = mx.sym.BatchNorm(data=body, fix_gamma=False,
eps=2e-5, momentum=bn_mom, name='bn1')
relu1 = mx.sym.Activation(data=bn1, act_type='relu', name='relu1')
# Although kernel is not used here when global_pool=True, we should put one
pool1 = mx.sym.Pooling(data=relu1, global_pool=True, kernel=(7, 7),
pool_type='avg', name='pool1')
flat = mx.sym.Flatten(data=pool1)
fc1 = mx.sym.FullyConnected(data=flat, num_hidden=num_class, name='fc1')
return mx.sym.SoftmaxOutput(data=fc1, name='softmax')
|
[
"liangkct@yahoo.com"
] |
liangkct@yahoo.com
|
38eb06772fe9695d641d05efc1b30e49c510dc4c
|
38346ccf93e0c0d49a378b2532fe215669018829
|
/nipype/pipeline/plugins/tests/test_pbs.py
|
51b0ed20e2473ee5ea45242b2ba0ef6b4adef897
|
[
"BSD-3-Clause"
] |
permissive
|
swederik/nipype
|
de509c2605bc83448240c7c3c68ee8d220d48ef3
|
872720a6fc00b00e029fb67742deedee524b2a9f
|
refs/heads/master
| 2020-12-25T10:08:44.268742
| 2014-05-22T14:05:58
| 2014-05-22T14:05:58
| 1,421,176
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,552
|
py
|
import os
from shutil import rmtree
from tempfile import mkdtemp
from time import sleep
import nipype.interfaces.base as nib
from nipype.testing import assert_equal, skipif
import nipype.pipeline.engine as pe
class InputSpec(nib.TraitedSpec):
input1 = nib.traits.Int(desc='a random int')
input2 = nib.traits.Int(desc='a random int')
class OutputSpec(nib.TraitedSpec):
output1 = nib.traits.List(nib.traits.Int, desc='outputs')
class TestInterface(nib.BaseInterface):
input_spec = InputSpec
output_spec = OutputSpec
def _run_interface(self, runtime):
runtime.returncode = 0
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output1'] = [1, self.inputs.input1]
return outputs
@skipif(True)
def test_run_pbsgraph():
cur_dir = os.getcwd()
temp_dir = mkdtemp(prefix='test_engine_')
os.chdir(temp_dir)
pipe = pe.Workflow(name='pipe')
mod1 = pe.Node(interface=TestInterface(),name='mod1')
mod2 = pe.MapNode(interface=TestInterface(),
iterfield=['input1'],
name='mod2')
pipe.connect([(mod1,mod2,[('output1','input1')])])
pipe.base_dir = os.getcwd()
mod1.inputs.input1 = 1
execgraph = pipe.run(plugin="PBSGraph")
names = ['.'.join((node._hierarchy,node.name)) for node in execgraph.nodes()]
node = execgraph.nodes()[names.index('pipe.mod1')]
result = node.get_output('output1')
yield assert_equal, result, [1, 1]
os.chdir(cur_dir)
rmtree(temp_dir)
|
[
"satra@mit.edu"
] |
satra@mit.edu
|
0c322885b6dff55190cb9d78585f345cebcb7934
|
9513a84cf1b7f263e119ce5b07740c753016430f
|
/venv/Swap in list.py
|
d8d3f7bd2803580a324688e1510ac623d2ac9db4
|
[] |
no_license
|
AkhilRaja003/Assignment1
|
8941321adbc2e5b302278918e9221357c11ff8b1
|
b69986322133f681d13a65bac091d29e47ecb90d
|
refs/heads/master
| 2022-10-17T01:41:28.500722
| 2020-06-10T02:33:14
| 2020-06-10T02:33:14
| 271,158,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
a = int(input("enter 1st position number:"))
b = int(input("enter 1st position number:"))
list = ["Akhil","Sunny","Abhi","Sisi","Naveen","Navya","Rakesh","Rahul","Nani"]
list[a],list[b] = list[b],list[a]
print(list)
print("**********************************************")
|
[
"akhilraja.3@gmail.com"
] |
akhilraja.3@gmail.com
|
bddb6b3e4b40c1815827ed490e17d5d4a23faf90
|
951ccc44913ea0d1ca274f34b87e880d6aa0632d
|
/traffic_duplication/results/multistream/plot_loss.py
|
632b7fd194cd0658960e3df7db911018f71376fe
|
[] |
no_license
|
datwelk/thesis
|
67d641a6ebed948ca15c11e13da8de88de844e4d
|
2bd1f0d4df8afe12a7de7a1fcee5254c1265bae5
|
refs/heads/master
| 2021-05-01T04:19:30.876191
| 2017-01-16T23:30:58
| 2017-01-16T23:30:58
| 58,924,236
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,021
|
py
|
from matplotlib import mlab
import matplotlib.pyplot as plt
import numpy as np
import math
import argparse, sys
from scipy.stats import norm
# Provide result of 1 of the streams, others should be in same directory
parser = argparse.ArgumentParser(description='Plot out of order packets')
parser.add_argument('c', help='Controller output')
parser.add_argument('B', help='Input filename')
parser.add_argument('n', help='No streams',default=7,type=int)
args = parser.parse_args(sys.argv[1:])
tableau20 = [(31, 119, 180), (255, 127, 14), (44, 160, 44), (214, 39, 40),
(148, 103, 189), (140, 86, 75), (23, 190, 207)]
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
plt.figure(figsize=(12, 9))
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.grid(True)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
#ax.set_xlim([0, 50])
z = 0
for j in range(0, args.n):
sent = []
received = []
loss = []
with open (args.c) as f:
for line in f:
components = line.split(' ')
sent.append(int(components[j]))
with open (args.B[:-5] + str(j) + '.txt') as f:
for line in f:
components = line.split(' ')
received.append(int(components[0]))
assert(len(sent) == len(received) + 1)
for i in range(0, len(received)):
count_lost = max(0, sent[i] - received[i])
pc = count_lost / float(sent[i]) * 100
if pc >= 20:
z += 1
print "Loss: " + str(pc) + " Received: " + str(received[i]) + " Sent: " + str(sent[i])
loss.append(pc)
weights = np.ones_like(loss)/float(len(loss))
#binwidth = 0.2
plt.hist(loss, weights=weights,bins=200,color=tableau20[j], alpha=0.4)
print "No measurements >= 20 percent loss: " + str(z)
plt.xlabel('Percentage of packets lost', fontsize=16)
plt.ylabel('Relative frequency', fontsize=16)
plt.show()
|
[
"datwelk@me.com"
] |
datwelk@me.com
|
c5960e6fe2804953fb8cfe13379870fb9720d5d2
|
43213f687f4f7eed8f9ecd810aa252fe863cd3ef
|
/deeplab/core/DataParallelExecutorGroup.py
|
32ad6179acc841c663bd32d0ebaa1f704f2893a3
|
[] |
no_license
|
eglrp/DRN
|
e5611e65f093084ccfb3f6131e5d1a6cbdcf7f05
|
daef466b21b34a1824a9163e1193ac9322b5bf4e
|
refs/heads/master
| 2020-04-10T05:06:38.912915
| 2018-06-14T07:06:39
| 2018-06-14T07:06:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,726
|
py
|
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Zheng Zhang
# --------------------------------------------------------
import logging
import numpy as np
import mxnet as mx
from mxnet import context as ctx
from mxnet import ndarray as nd
from mxnet.io import DataDesc
from mxnet.executor_manager import _split_input_slice
def _load_general(data, targets, major_axis):
"""Load a list of arrays into a list of arrays specified by slices"""
for d_src, d_targets in zip(data, targets):
if isinstance(d_targets, nd.NDArray):
d_src.copyto(d_targets)
elif isinstance(d_src, (list, tuple)):
for src, dst in zip(d_src, d_targets):
src.copyto(dst)
else:
raise NotImplementedError
def _load_data(batch, targets, major_axis):
"""Load data into sliced arrays"""
_load_general(batch.data, targets, major_axis)
def _load_label(batch, targets, major_axis):
"""Load label into sliced arrays"""
_load_general(batch.label, targets, major_axis)
def _merge_multi_context(outputs, major_axis):
"""Merge outputs that lives on multiple context into one, so that they look
like living on one context.
"""
rets = []
for tensors, axis in zip(outputs, major_axis):
if axis >= 0:
rets.append(nd.concatenate(tensors, axis=axis, always_copy=False))
else:
# negative axis means the there is no batch_size axis, and all the
# results should be the same on each device. We simply take the
# first one, without checking they are actually the same
rets.append(tensors[0])
return rets
class DataParallelExecutorGroup(object):
"""DataParallelExecutorGroup is a group of executors that lives on a group of devices.
This is a helper class used to implement data parallelization. Each mini-batch will
be split and run on the devices.
Parameters
----------
symbol : Symbol
The common symbolic computation graph for all executors.
contexts : list
A list of contexts.
workload : list
If not `None`, could be a list of numbers that specify the workload to be assigned
to different context. Larger number indicate heavier workload.
data_shapes : list
Should be a list of (name, shape) tuples, for the shapes of data. Note the order is
important and should be the same as the order that the `DataIter` provide the data.
label_shapes : list
Should be a list of (name, shape) tuples, for the shapes of label. Note the order is
important and should be the same as the order that the `DataIter` provide the label.
param_names : list
A list of strings, indicating the names of parameters (e.g. weights, filters, etc.)
in the computation graph.
for_training : bool
Indicate whether the executors should be bind for training. When not doing training,
the memory for gradients will not be allocated.
inputs_need_grad : bool
Indicate whether the gradients for the input data should be computed. This is currently
not used. It will be useful for implementing composition of modules.
shared_group : DataParallelExecutorGroup
Default is `None`. This is used in bucketing. When not `None`, it should be a executor
group corresponding to a different bucket. In other words, it will correspond to a different
symbol but with the same set of parameters (e.g. unrolled RNNs with different lengths).
In this case, many memory will be shared.
logger : Logger
Default is `logging`.
fixed_param_names: list of str
Indicate parameters to be fixed during training. Parameters in this list will not allocate
space for gradient, nor do gradient calculation.
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
"""
def __init__(self, symbol, contexts, workload, data_shapes, label_shapes, param_names,
for_training, inputs_need_grad, shared_group=None, logger=logging,
fixed_param_names=None, grad_req='write', state_names=None):
self.param_names = param_names
self.arg_names = symbol.list_arguments()
self.aux_names = symbol.list_auxiliary_states()
self.symbol = symbol
self.contexts = contexts
self.workload = workload
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
self.logger = logger
#In the future we should have a better way to profile memory per device (haibin)
# self._total_exec_bytes = 0
self.fixed_param_names = fixed_param_names
if self.fixed_param_names is None:
self.fixed_param_names = []
self.state_names = state_names
if self.state_names is None:
self.state_names = []
if not for_training:
grad_req = 'null'
# data_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in data_shapes]
# if label_shapes is not None:
# label_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in label_shapes]
data_names = [x.name for x in data_shapes[0]]
if isinstance(grad_req, str):
self.grad_req = {}
for k in self.arg_names:
if k in self.param_names:
self.grad_req[k] = 'null' if k in self.fixed_param_names else grad_req
elif k in data_names:
self.grad_req[k] = grad_req if self.inputs_need_grad else 'null'
else:
self.grad_req[k] = 'null'
elif isinstance(grad_req, (list, tuple)):
assert len(grad_req) == len(self.arg_names)
self.grad_req = dict(zip(self.arg_names, grad_req))
elif isinstance(grad_req, dict):
self.grad_req = {}
for k in self.arg_names:
if k in self.param_names:
self.grad_req[k] = 'null' if k in self.fixed_param_names else 'write'
elif k in data_names:
self.grad_req[k] = 'write' if self.inputs_need_grad else 'null'
else:
self.grad_req[k] = 'null'
self.grad_req.update(grad_req)
else:
raise ValueError("grad_req must be one of str, list, tuple, or dict.")
if shared_group is not None:
self.shared_data_arrays = shared_group.shared_data_arrays
else:
self.shared_data_arrays = [{} for _ in contexts]
# initialize some instance variables
self.batch_size = len(data_shapes)
self.slices = None
self.execs = []
self._default_execs = None
self.data_arrays = None
self.label_arrays = None
self.param_arrays = None
self.state_arrays = None
self.grad_arrays = None
self.aux_arrays = None
self.input_grad_arrays = None
self.data_shapes = None
self.label_shapes = None
self.data_layouts = None
self.label_layouts = None
self.output_layouts = [DataDesc.get_batch_axis(self.symbol[name].attr('__layout__'))
for name in self.symbol.list_outputs()]
self.bind_exec(data_shapes, label_shapes, shared_group)
def decide_slices(self, data_shapes):
"""Decide the slices for each context according to the workload.
Parameters
----------
data_shapes : list
list of (name, shape) specifying the shapes for the input data or label.
"""
assert len(data_shapes) > 0
major_axis = [DataDesc.get_batch_axis(x.layout) for x in data_shapes]
for (name, shape), axis in zip(data_shapes, major_axis):
if axis == -1:
continue
batch_size = shape[axis]
if self.batch_size is not None:
assert batch_size == self.batch_size, ("all data must have the same batch size: "
+ ("batch_size = %d, but " % self.batch_size)
+ ("%s has shape %s" % (name, shape)))
else:
self.batch_size = batch_size
self.slices = _split_input_slice(self.batch_size, self.workload)
return major_axis
def _collect_arrays(self):
"""Collect internal arrays from executors."""
# convenient data structures
# self.data_arrays = [[(self.slices[i], e.arg_dict[name]) for i, e in enumerate(self.execs)]
# for name, _ in self.data_shapes]
self.data_arrays = [[e.arg_dict[name] for name, _ in self.data_shapes[0]] for e in self.execs]
self.state_arrays = [[e.arg_dict[name] for e in self.execs]
for name in self.state_names]
if self.label_shapes is not None:
# self.label_arrays = [[(self.slices[i], e.arg_dict[name])
# for i, e in enumerate(self.execs)]
# for name, _ in self.label_shapes]
self.label_arrays = [[e.arg_dict[name] for name, _ in self.label_shapes[0]] for e in self.execs]
else:
self.label_arrays = None
self.param_arrays = [[exec_.arg_arrays[i] for exec_ in self.execs]
for i, name in enumerate(self.arg_names)
if name in self.param_names]
if self.for_training:
self.grad_arrays = [[exec_.grad_arrays[i] for exec_ in self.execs]
for i, name in enumerate(self.arg_names)
if name in self.param_names]
else:
self.grad_arrays = None
data_names = [x[0] for x in self.data_shapes]
if self.inputs_need_grad:
self.input_grad_arrays = [[exec_.grad_arrays[i] for exec_ in self.execs]
for i, name in enumerate(self.arg_names)
if name in data_names]
else:
self.input_grad_arrays = None
self.aux_arrays = [[exec_.aux_arrays[i] for exec_ in self.execs]
for i in range(len(self.aux_names))]
def bind_exec(self, data_shapes, label_shapes, shared_group=None, reshape=False):
"""Bind executors on their respective devices.
Parameters
----------
data_shapes : list
label_shapes : list
shared_group : DataParallelExecutorGroup
reshape : bool
"""
assert reshape or not self.execs
# self.batch_size = None
# calculate workload and bind executors
# self.data_layouts = self.decide_slices(data_shapes)
# if label_shapes is not None:
# # call it to make sure labels has the same batch size as data
# self.label_layouts = self.decide_slices(label_shapes)
for i in range(len(data_shapes)):
# data_shapes_i = self._sliced_shape(data_shapes, i, self.data_layouts)
data_shapes_i = data_shapes[i]
if label_shapes is not None:
label_shapes_i = label_shapes[i]
# label_shapes_i = self._sliced_shape(label_shapes, i, self.label_layouts)
else:
label_shapes_i = []
if reshape:
self.execs[i] = self._default_execs[i].reshape(
allow_up_sizing=True, **dict(data_shapes_i + label_shapes_i))
else:
self.execs.append(self._bind_ith_exec(i, data_shapes_i, label_shapes_i,
shared_group))
self.data_shapes = data_shapes
self.label_shapes = label_shapes
self._collect_arrays()
def reshape(self, data_shapes, label_shapes):
"""Reshape executors.
Parameters
----------
data_shapes : list
label_shapes : list
"""
if self._default_execs is None:
self._default_execs = [i for i in self.execs]
for i in range(len(self.contexts)):
self.execs[i] = self._default_execs[i].reshape(
allow_up_sizing=True, **dict(data_shapes[i] + (label_shapes[i] if label_shapes is not None else []))
)
self.data_shapes = data_shapes
self.label_shapes = label_shapes
self._collect_arrays()
def set_params(self, arg_params, aux_params,allow_extra=False):
"""Assign, i.e. copy parameters to all the executors.
Parameters
----------
arg_params : dict
A dictionary of name to `NDArray` parameter mapping.
aux_params : dict
A dictionary of name to `NDArray` auxiliary variable mapping.
"""
for exec_ in self.execs:
exec_.copy_params_from(arg_params, aux_params,allow_extra)
def get_params(self, arg_params, aux_params):
""" Copy data from each executor to `arg_params` and `aux_params`.
Parameters
----------
arg_params : list of NDArray
target parameter arrays
aux_params : list of NDArray
target aux arrays
Notes
-----
- This function will inplace update the NDArrays in arg_params and aux_params.
"""
for name, block in zip(self.param_names, self.param_arrays):
weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
weight.astype(arg_params[name].dtype).copyto(arg_params[name])
for name, block in zip(self.aux_names, self.aux_arrays):
weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
weight.astype(aux_params[name].dtype).copyto(aux_params[name])
def forward(self, data_batch, is_train=None):
"""Split `data_batch` according to workload and run forward on each devices.
Parameters
----------
data_batch : DataBatch
Or could be any object implementing similar interface.
is_train : bool
The hint for the backend, indicating whether we are during training phase.
Default is `None`, then the value `self.for_training` will be used.
Returns
-------
"""
_load_data(data_batch, self.data_arrays, self.data_layouts)
if is_train is None:
is_train = self.for_training
if self.label_arrays is not None:
assert not is_train or data_batch.label
if data_batch.label:
_load_label(data_batch, self.label_arrays, self.label_layouts)
for exec_ in self.execs:
exec_.forward(is_train=is_train)
def get_outputs(self, merge_multi_context=True):
"""Get outputs of the previous forward computation.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it
is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output
elements are `NDArray`.
"""
outputs = [[exec_.outputs[i] for exec_ in self.execs]
for i in range(len(self.execs[0].outputs))]
if merge_multi_context:
outputs = _merge_multi_context(outputs, self.output_layouts)
return outputs
def get_states(self, merge_multi_context=True):
"""Get states from all devices
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the states
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is `True`, it is like `[out1, out2]`. Otherwise, it
is like `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. All the output
elements are `NDArray`.
"""
assert not merge_multi_context, \
"merge_multi_context=True is not supported for get_states yet."
return self.state_arrays
def set_states(self, states=None, value=None):
"""Set value for states. Only one of states & value can be specified.
Parameters
----------
states : list of list of NDArrays
source states arrays formatted like [[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]].
value : number
a single scalar value for all state arrays.
"""
if states is not None:
assert value is None, "Only one of states & value can be specified."
_load_general(states, self.state_arrays, (0,)*len(states))
else:
assert value is not None, "At least one of states & value must be specified."
assert states is None, "Only one of states & value can be specified."
for d_dst in self.state_arrays:
for dst in d_dst:
dst[:] = value
def get_input_grads(self, merge_multi_context=True):
"""Get the gradients with respect to the inputs of the module.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is `True`, it is like `[grad1, grad2]`. Otherwise, it
is like `[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]`. All the output
elements are `NDArray`.
"""
assert self.inputs_need_grad
if merge_multi_context:
return _merge_multi_context(self.input_grad_arrays, self.data_layouts)
return self.input_grad_arrays
def backward(self, out_grads=None):
"""Run backward on all devices. A backward should be called after
a call to the forward function. Backward cannot be called unless
`self.for_training` is `True`.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
"""
assert self.for_training, 're-bind with for_training=True to run backward'
if out_grads is None:
out_grads = []
# for i, (exec_, islice) in enumerate(zip(self.execs, self.slices)):
for i, exec_ in enumerate(self.execs):
out_grads_slice = []
exec_.backward(out_grads=out_grads_slice)
def update_metric(self, eval_metric, labels):
"""Accumulate the performance according to `eval_metric` on all devices.
Parameters
----------
eval_metric : EvalMetric
The metric used for evaluation.
labels : list of NDArray
Typically comes from `label` of a `DataBatch`.
"""
for texec, labels in zip(self.execs, labels):
eval_metric.update(labels, texec.outputs)
def _bind_ith_exec(self, i, data_shapes, label_shapes, shared_group):
"""Internal utility function to bind the i-th executor.
"""
shared_exec = None if shared_group is None else shared_group.execs[i]
context = self.contexts[i]
shared_data_arrays = self.shared_data_arrays[i]
input_shapes = dict(data_shapes)
if label_shapes is not None:
input_shapes.update(dict(label_shapes))
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None, "shape inference failed"
input_types = {x.name: x.dtype for x in data_shapes}
if label_shapes is not None:
input_types.update({x.name: x.dtype for x in label_shapes})
arg_types, _, aux_types = self.symbol.infer_type(**input_types)
assert arg_types is not None, "type inference failed"
arg_arrays = []
grad_arrays = {} if self.for_training else None
def _get_or_reshape(name, shared_data_arrays, arg_shape, arg_type, context, logger):
"""Internal helper to get a memory block or re-use by re-shaping"""
if name in shared_data_arrays:
arg_arr = shared_data_arrays[name]
if np.prod(arg_arr.shape) >= np.prod(arg_shape):
# nice, we can directly re-use this data blob
assert arg_arr.dtype == arg_type
arg_arr = arg_arr.reshape(arg_shape)
else:
logger.warning(('bucketing: data "%s" has a shape %s' % (name, arg_shape)) +
(', which is larger than already allocated ') +
('shape %s' % (arg_arr.shape,)) +
('. Need to re-allocate. Consider putting ') +
('default_bucket_key to') +
(' be the bucket taking the largest input for better ') +
('memory sharing.'))
arg_arr = nd.zeros(arg_shape, context, dtype=arg_type)
# replace existing shared array because the new one is bigger
shared_data_arrays[name] = arg_arr
else:
arg_arr = nd.zeros(arg_shape, context, dtype=arg_type)
shared_data_arrays[name] = arg_arr
return arg_arr
# create or borrow arguments and gradients
for j in range(len(self.arg_names)):
name = self.arg_names[j]
if name in self.param_names: # model parameters
if shared_exec is None:
arg_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
if self.grad_req[name] != 'null':
grad_arr = nd.zeros(arg_shapes[j], context, dtype=arg_types[j])
grad_arrays[name] = grad_arr
else:
arg_arr = shared_exec.arg_dict[name]
if name.endswith('state'):
arg_arr= mx.nd.zeros(arg_shapes[j],arg_arr.context)
assert arg_arr.shape == arg_shapes[j]
assert arg_arr.dtype == arg_types[j]
if self.grad_req[name] != 'null':
grad_arrays[name] = shared_exec.grad_dict[name]
else: # data, label, or states
arg_arr = _get_or_reshape(name, shared_data_arrays, arg_shapes[j], arg_types[j],
context, self.logger)
# data might also need grad if inputs_need_grad is True
if self.grad_req[name] != 'null':
grad_arrays[name] = _get_or_reshape('grad of ' + name, shared_data_arrays,
arg_shapes[j], arg_types[j], context,
self.logger)
arg_arrays.append(arg_arr)
# create or borrow aux variables
if shared_exec is None:
aux_arrays = [nd.zeros(s, context, dtype=t) for s, t in zip(aux_shapes, aux_types)]
else:
for j, arr in enumerate(shared_exec.aux_arrays):
assert aux_shapes[j] == arr.shape
assert aux_types[j] == arr.dtype
aux_arrays = shared_exec.aux_arrays[:]
executor = self.symbol.bind(ctx=context, args=arg_arrays,
args_grad=grad_arrays, aux_states=aux_arrays,
grad_req=self.grad_req, shared_exec=shared_exec)
# Get the total bytes allocated for this executor
# self._total_exec_bytes += int(executor.debug_str().split('\n')[-3].split()[1])
return executor
def _sliced_shape(self, shapes, i, major_axis):
"""Get the sliced shapes for the i-th executor.
Parameters
----------
shapes : list of (str, tuple)
The original (name, shape) pairs.
i : int
Which executor we are dealing with.
"""
sliced_shapes = []
for desc, axis in zip(shapes, major_axis):
shape = list(desc.shape)
if axis >= 0:
shape[axis] = self.slices[i].stop - self.slices[i].start
sliced_shapes.append(DataDesc(desc.name, tuple(shape), desc.dtype, desc.layout))
return sliced_shapes
def install_monitor(self, mon):
"""Install monitor on all executors"""
for exe in self.execs:
mon.install(exe)
|
[
"9200374@qq.com"
] |
9200374@qq.com
|
7582d45ef182c34a0a120d45cdf49178783bf540
|
d922b02070c11c19ba6104daa3a1544e27a06e40
|
/Hw_1_2/weighted_Quick_union.py
|
f55fcb1fa7f8b7cde7c996f66af3ffce3fe652b0
|
[] |
no_license
|
viharivnv/DSA
|
2ca393a8e304ee7b4d540ff435e832d94ee4b2a7
|
777c7281999ad99a0359c44291dddaa868a2525c
|
refs/heads/master
| 2022-10-15T15:26:59.045698
| 2020-06-17T15:55:33
| 2020-06-17T15:55:33
| 273,020,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,353
|
py
|
#The code was run on PYCHARM IDE on WINDOWS python version 3.x
'''
Steps to recreate:
1)Open PYCHARM
2)Create a new project
3) Add a new python file and paste the code
4) Run the code
'''
import time
file=input("enter the file name excluding '.txt' extension for example 8pair:\n")
file=file+".txt"
# referred "https://stackoverflow.com/questions/47872237/how-to-read-an-input-file-of-integers-separated-by-a-space-using-readlines-in-py/47872327" for splitting
try:
# stores each line in the file as a string in the array of strings text
with open(file, 'r') as f:
text = f.read()
text = text.split("\n")
i = 0
arr = []
a = []
b = []
p = []
q = []
count = 0
# Stores the two strings sepersted by whitespace as seperate elements of the array
for i in range(0, len(text) - 1):
left = text[i].split()
for x in left:
arr.append(x)
# stores the numbers read to p and q
for i in range(0, len(arr)):
if i % 2 == 0:
p.append(arr[i])
else:
q.append(arr[i])
for x in p:
t = int(x)
a.append(t)
for y in q:
t = int(y)
b.append(t)
id = []
sz = []
# referred "https://stackoverflow.com/questions/5998245/get-current-time-in-milliseconds-in-python" for getting time in milliseconds
start = time.time_ns()
# initialization of the array
for i in range(0, 8192):
id.append(i)
sz.append(1)
c = 0
# defining union function
def un(o, l):
i = root(o)
j = root(l)
if sz[i] < sz[j]:
id[i] = j
sz[j] += sz[i]
else:
id[j] = i
sz[i] += sz[j]
# defining find function
def root(i):
global c1
while i != id[i]:
i = id[i]
return i
count = 0
# Weighted Quick-Union Algorithm
for i in range(0, len(p)):
f = a[i]
g = b[i]
if root(a[i]) == root(b[i]):
continue
else:
c += 1
un(f, g)
print('The pairs are :', a[i], b[i],'with root',root(f),root(g))
stop = time.time_ns()
runtime = stop - start
print("The Number of instructions executed", c)
print('time taken to execute', runtime, 'ns')
except:
print('File Not Found')
|
[
"52350934+viharivnv@users.noreply.github.com"
] |
52350934+viharivnv@users.noreply.github.com
|
32473d354374867fc45f1c267f78b7e410396cf4
|
2f4f036cf9074c1efe240ea392dadbad650396ec
|
/advanced python/oop/employee.py
|
73db7d69f40c3825673a2dd74e08ad64e4d72e16
|
[] |
no_license
|
Arunnithyanand/luminarpython
|
a37c304ab475af43794726459439a5bab46a331a
|
0a671700d279e5ac7dd0b5f8d9227af7952cbaf7
|
refs/heads/master
| 2023-04-26T04:09:40.750216
| 2021-05-19T03:38:02
| 2021-05-19T03:38:02
| 368,733,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
class Employee:
company="luminar"
def setval(self,name,id):
self.name=name
self.id=id
def printval(self):
print("name",self.name)
print("id",self.id)
print("company",Employee.company)
st=Employee()
st.setval("arun",55)
st.printval()
st=Employee()
st.setval("bijoy",66)
st.printval()
|
[
"arunnithyanandkz777@gmail.com"
] |
arunnithyanandkz777@gmail.com
|
113d0493eabf421bc48a810da3dfdf024e72469f
|
21fddc001e85465405211501a730de698be0e07a
|
/utils/password_utils.py
|
ed7df235cf711c727abe28fade9f82546704db96
|
[] |
no_license
|
khalilbenayed/starfront
|
7fa9302f928a0a8c7929d52c1b863798fed34969
|
50a306a8c0153eeee4f15c1860a8a05216e37172
|
refs/heads/master
| 2022-02-21T15:43:44.976188
| 2019-10-14T00:18:16
| 2019-10-14T00:18:16
| 210,495,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
import hashlib
import binascii
import os
def hash_password(password):
"""Hash a password for storing."""
salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii')
pwdhash = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'),
salt, 100000)
pwdhash = binascii.hexlify(pwdhash)
return (salt + pwdhash).decode('ascii')
|
[
"kbenayed@edu.uwaterloo.ca"
] |
kbenayed@edu.uwaterloo.ca
|
22b832295b8c616e01f8a5afae0fbfe8f016fe5b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03241/s122967177.py
|
c84c86fe77e36fd71fbba3c22193bc424ab2481b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 963
|
py
|
from math import ceil,floor,factorial,gcd,sqrt,log2,cos,sin,tan,acos,asin,atan,degrees,radians,pi,inf
from itertools import accumulate,groupby,permutations,combinations,product,combinations_with_replacement
from collections import deque,defaultdict,Counter
from bisect import bisect_left,bisect_right
from operator import itemgetter
from heapq import heapify,heappop,heappush
from queue import Queue,LifoQueue,PriorityQueue
from copy import deepcopy
from time import time
import string
import sys
sys.setrecursionlimit(10 ** 7)
def input() : return sys.stdin.readline().strip()
def INT() : return int(input())
def MAP() : return map(int,input().split())
def LIST() : return list(MAP())
def divisor(n):
i = 1
table = []
while i * i <= n:
if n%i == 0:
table.append(i)
table.append(n//i)
i += 1
table = list(set(table))
return table
n, m = MAP()
a = sorted(divisor(m))
print(m//a[bisect_left(a,n)])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
d291f1caf6260285aec3cc8cf260a9b13241a12e
|
6579de78fc908519d98f87d15ec0188818fe89b6
|
/resize_img.py
|
4b0992451a918c158e3343d2524c5fc2cfc25e8d
|
[] |
no_license
|
WingGao/SmallPyTools
|
fa720a7d7c01a9eb6dd17e68839260145aabe35e
|
3bd2ee8af194a22a59c350ae572e7e40d3042802
|
refs/heads/master
| 2021-01-02T09:38:16.405196
| 2016-11-04T10:47:39
| 2016-11-04T10:47:39
| 14,706,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
# coding=utf-8
import os
from PIL import Image
dir = '/Users/wing/Documents/Temp/icon'
des_dir = '/Users/wing/Documents/Temp/icon2'
def reszie():
w = 55
h = 55
for i in os.listdir(dir):
img = Image.open(os.path.join(dir, i))
img.thumbnail((w, h), Image.ANTIALIAS)
# whr = float(w) / h
# iwhr = float(img.size[0]) / img.size[1]
# iw = w
# ih = h
# if iwhr > whr:
# # 太宽,以长为标准
# iw = int(iwhr * ih)
# else:
# ih = int(iw / iwhr)
# img = img.resize((iw, ih))
# left = int((iw - w) / 2)
# upper = int((iwhr / ih) / 2)
# box = (left, upper, w + left, h + upper)
# img = img.crop(box)
img.save(os.path.join(des_dir, i), quality=100)
print i
def rename():
if not os.path.exists(des_dir):
os.mkdir(des_dir)
for i in os.listdir(dir):
r = i
print i
os.rename(os.path.join(dir, i), os.path.join(des_dir, i))
reszie()
|
[
"wing.gao@live.com"
] |
wing.gao@live.com
|
a186971e2560437030c33dd0c302cba82b16534d
|
cf80e9b43fbddebfe6020582da6662e1b5c3b862
|
/debug_test.py
|
40e80b8a8d4636e610b51c1400b342cb0d502861
|
[] |
no_license
|
TonnyQ/PythonDev
|
cd057d150a693dbda4f0fae98993353b8d0ea208
|
c3bd02a5b6ab805d9e8e18ec568c3762dd8b65ef
|
refs/heads/master
| 2020-12-24T20:10:47.818113
| 2016-04-27T17:22:31
| 2016-04-27T17:22:31
| 56,743,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,002
|
py
|
# -*- encoding:utf-8 -*-
#错误处理
try:
print('try...')
r = 10/0
print('result',r)
except ZeroDivisionError as e:
print('except:',e)
finally:
print('finally...')
print('End')
#show:对可能出现错误的代码try,如果真的发生错误,错误发生后的代码将不再执行
#将进入except处理错误,最后如果存在finally,则进入finally。如果没有发生错误
#将不会执行except语句,还可以针对不同错误类型进入不同的except语句块处理。此外
#还可以在except后面加一个else,当没有发生错误时,会自动执行else语句。
try:
a = 3 / 2
except ValueError as e:
print('valueerror:',e)
else:
print('no happen error')
#python的错误其实也是class,所有的错误类型都继承自BaseException,所以使用except时需要注意的是
#它不但能捕获该类型错误,也能处理其子类。
try:
a = int('a')
except ValueError as e:
print('value error')
except UnicodeError as e:
print('UnicodeError') #no execute,because UnicodeError is ValueError subclass
#调用堆栈
#如果错误没有被捕获,就会一直往上抛,最后被python的解释器捕获,打印一个错误信息,然后程序退出
#调试错误时,查看错误堆栈,应该从上往下看,错误的最终原因就在堆栈的最底层
#记录错误
#如果不捕获错误,自然可以让python解释器打印错误堆栈,但程序也被结束了。我们可以自己捕获错误,
#并打印错误堆栈信息,然后分析错误原因,同时,让程序继续执行。
import logging #python内置的log模块
def foo(s):
return 10 / int(s)
def bar(s):
return foo(s) * 2
def main():
try:
bar('0')
except Exception as e:
logging.exception(e)
main()
print('end======')
#抛出错误
#因为错误是class,捕获一个错误就是捕获到该class的一个实例。因此,错误并不是凭空产生的,而是有意创建
#并抛出的。python内置函数会抛出很多类型的错误,我们也能自定义抛出错误
class FooError(ValueError):
pass
def foos(s):
n = int(s)
if n == 0:
raise FooError('invalid value : %s' % s)
return 10 / n
#只有必要时才需要定义自己的错误类,否则尽量使用系统提供的错误类型
try:
foos('0')
except FooError as e:
print('FooError :',e)
#捕获错误,然后继续向上层抛出错误,raise语句如果不带参数,则会把当前错误原样抛出
def bar():
try:
foo('0')
except ValueError as e:
print('ValueError:',e)
raise #继续向上抛出错误
#断言assert,需要小心的使用assert,因为它会让程序错误,停止执行。可以在python解释器时使用-o参数关闭assert
def hoo(s):
n = int(s)
#assert n != 0,'n is zero' #assert将判断表达式是否为true,如果不为true,则根据运行逻辑抛出AssertionError
return 10 / n
def mains():
foo('0')
#mains()
#logging模块,logging不会抛出错误,终止程序,而且可以统一的关闭,允许指定记录信息的级别
logging.basicConfig(Level=logging.error)
logging.exception('test loggin')
#pdb,启动python的调试器pdb,让程序以单步方式执行,可以随时查看运行时状态
#python3 -m pdb err.py
#输入命令:l,来查看代码
#输入命令:n,可以单步执行代码
#输入命令:p ‘变量名’,来查看变量的value
#输入命令:q,结束调试
#pdb.set_trace(),使用p命令查看变量,或者命令c继续执行
import pdb
s = '0'
n = int(s)
pdb.set_trace() #运行到这里自动的暂停
print(1/n)
#punit单元测试:Test-Driven Development
#单元测试是用来对一个模块、一个函数、或者一个类来进行正确性检验的测试工作
#如果单元测试能够通过,说明我们测试的模块能够正常工作,如果单元测试不通过,那么函数存在bug
#python提供了单元测试的模块unit
|
[
"tonny_2014@yeah.net"
] |
tonny_2014@yeah.net
|
608e19bedd581e824ba02b16d8940bbf839bed21
|
467fd7524849df98d24ca36553f9e7fc88aecfea
|
/apps/groups/models.py
|
efeb8ea9618faa55742454f30442d93fdd87b9c7
|
[] |
no_license
|
django-social/django-social
|
62e8010eee41d1a630bca939484059dc10f6344c
|
26efdf7502861fd914cd4a95866bcd4ab71e0261
|
refs/heads/master
| 2020-04-13T14:15:36.175757
| 2011-04-19T08:23:30
| 2011-04-19T08:23:30
| 4,367,095
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 47
|
py
|
# -*- coding: utf-8 -*-
from documents import *
|
[
"dgk@dgk.su"
] |
dgk@dgk.su
|
48c1be2d3d3665ee76ffa1ca186127b0b0d6017e
|
603c77e09f29f5355e4d1266b2852285224084cb
|
/venv/bin/pyreverse
|
bfe3eafdeb6ae0e1c2030ee16deb1a55303bb077
|
[] |
no_license
|
damodharn/FundooApp
|
6e9b66a069379feee6c6d5a9d85085b16d55b452
|
e0e0fa718dfcbca9aca28db992bef909a5f9151d
|
refs/heads/master
| 2022-12-08T20:04:49.677148
| 2019-09-12T08:46:28
| 2019-09-12T08:46:28
| 208,002,941
| 0
| 0
| null | 2022-12-08T01:22:56
| 2019-09-12T08:36:12
|
Python
|
UTF-8
|
Python
| false
| false
| 265
|
#!/home/admin1/PycharmProjects/FundooProj/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pyreverse
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_pyreverse())
|
[
"damodharn21@gmail.com"
] |
damodharn21@gmail.com
|
|
a36e04ad8db1d6ae12228d1d9223da4e0d55536e
|
e99e48aaa88ca87e7d6a89ab03f82f45c66f9981
|
/edx6.0.1x/ndigits.py
|
814fb97409ea99e2b429f14ca3cc1ba6229c331c
|
[] |
no_license
|
shivsharma07/pyexamples
|
dbb7f3e4d9eb1351210b96836ce0ed87fb1b337f
|
89b7916125f5bddb2b3f75003811600fc43c3a62
|
refs/heads/master
| 2021-01-19T11:57:17.984867
| 2016-09-22T20:16:45
| 2016-09-22T20:16:45
| 68,950,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
def ndigits(x):
if x < 0:
x = -x
if x > -10 and x < 10:
return 1
if x/10 > 0:
numOfDigits = ndigits(x/10)
return numOfDigits+1
print ndigits(-1322132)
|
[
"shivsharma07@gmail.com"
] |
shivsharma07@gmail.com
|
4d4deec840fd2d2fd8c95557d85d18c1b19d4d77
|
3491f5c2ef9c31ca7e8a83c2998dba76195d43ec
|
/rpmlv1/urls.py
|
b854b5b8629485347c877f371a0869c5fea52d0f
|
[] |
no_license
|
edgardegantea/rpmlv1
|
1819f32ac11eb9fe07422522bfa2fc619c7456de
|
0d99c72cbbbeecfd7f6d27d7e19c7ba1b8dfe326
|
refs/heads/master
| 2023-04-27T18:50:57.583725
| 2021-04-21T01:17:23
| 2021-04-21T01:17:23
| 359,995,334
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 748
|
py
|
"""rpmlv1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"edgar.degante.a@gmail.com"
] |
edgar.degante.a@gmail.com
|
80f823f41263b1271b589cbbf909eee322c3eee1
|
00d2862c4913bf2a323d43e95f19c1beac67e062
|
/if_else/if_else6.py
|
f397f5ec5eb9ee4d005a7d6946c13617befb2424
|
[] |
no_license
|
kamonchat26/workshop2
|
671fbf074b7e85dcae9783adfc410bbf1b8f30de
|
2e34c0b402797bc2970f89e7d9eaff731af5f845
|
refs/heads/master
| 2023-03-09T22:20:31.409693
| 2021-02-20T17:05:28
| 2021-02-20T17:05:28
| 328,288,492
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 121
|
py
|
a = 200
b = 33
c = 500
if a > b and C > a:
print("Both conditions are True")
# Output : "Both conditions are True"
|
[
"kamonchat2607@gmail.com"
] |
kamonchat2607@gmail.com
|
02e58d3a5fe7125e81acbc8394b3c6ce5aa475b8
|
dc1da2ea8d495db2edb9c84941e7947dcf77ba86
|
/HackerRank/solution/practice/data-structures/linked-lists/print-the-elements-of-a-linked-list/solution.py
|
f60744f0aefd600fd1c04f7c8e6e7575f9b2460c
|
[
"MIT"
] |
permissive
|
dschinzo/Competitive-Programming
|
ea3428b63a732ce87c74392445a8dfe65dc9b7ba
|
3100e083076a571b1896667277dc8cc6b855c18d
|
refs/heads/master
| 2023-03-01T12:59:05.699865
| 2021-01-25T10:25:00
| 2021-01-25T10:25:00
| 272,390,168
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
# Complete the printLinkedList function below.
#
# For your reference:
#
# SinglyLinkedListNode:
# int data
# SinglyLinkedListNode next
#
#
def printLinkedList(head):
if head is not None:
print(head.data)
printLinkedList(head.next)
|
[
"ds.chinzo@gmail.com"
] |
ds.chinzo@gmail.com
|
7cf85dc88b8f0f002e7fbe9b1b983c1f59d4bb40
|
6c1527b2dc3f944b8907d0de5bda6cdfbaeb1f7f
|
/traveler_dilemma/views.py
|
438d490932fa5b6907f65c2fdddf4b8f05dc0b20
|
[
"MIT"
] |
permissive
|
dcthomas4679/otree
|
f0a9204b12cd395e55fd9b77ac90584c2cd3c049
|
363a05d2f70f9225628e4857473dedcb449018dc
|
refs/heads/master
| 2021-06-23T20:07:02.499724
| 2020-11-18T15:32:30
| 2020-11-18T15:32:30
| 37,225,765
| 1
| 1
|
NOASSERTION
| 2021-06-10T23:28:55
| 2015-06-10T22:22:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,435
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
from . import models
from ._builtin import Page, WaitPage
from otree.common import Currency as c, currency_range
from .models import Constants
def vars_for_all_templates(self):
return {'total_q': 1, 'instructions': 'traveler_dilemma/Instructions.html'}
class Introduction(Page):
template_name = 'global/Introduction.html'
def vars_for_template(self):
return {'max_amount': Constants.max_amount,
'min_amount': Constants.min_amount,
'reward': Constants.reward,
'penalty': Constants.penalty}
class Question1(Page):
template_name = 'global/Question.html'
form_model = models.Player
form_fields = ['training_answer_mine', 'training_answer_others']
question = '''Suppose that you claim the antiques are worth 50 points and the other traveler claims they are worth 100 points. What would you and the other traveler receive in compensation from the airline?'''
def is_displayed(self):
return self.subsession.round_number == 1
def vars_for_template(self):
return {'num_q': 1, 'question': self.question}
class Feedback(Page):
def is_displayed(self):
return self.subsession.round_number == 1
def vars_for_template(self):
return {
'num_q': 1}
class Claim(Page):
form_model = models.Player
form_fields = ['claim']
class ResultsWaitPage(WaitPage):
def after_all_players_arrive(self):
for p in self.group.get_players():
p.set_payoff()
class Results(Page):
def vars_for_template(self):
other = self.player.other_player().claim
if self.player.claim < other:
reward = Constants.reward
penalty = c(0)
elif self.player.claim > other:
reward = c(0)
penalty = Constants.penalty
else:
reward = c(0)
penalty = c(0)
return {
'reward': reward,
'penalty': penalty,
'payoff_before_bonus': self.player.payoff - Constants.bonus,
'amount_paid_to_both': self.player.payoff - Constants.bonus - reward,
}
page_sequence = [Introduction,
Question1,
Feedback,
Claim,
ResultsWaitPage,
Results]
|
[
"dcthomas@gmail.com"
] |
dcthomas@gmail.com
|
403ebec7bc3a882608e0d98f4fd5b785fc8a8038
|
5308070c42185ab61d69f7b72450b09b11eea124
|
/class/run.py
|
9ea7246e229d9a272fdad7e43068faf8fd832196
|
[] |
no_license
|
costaxu/swig-test
|
f96b6593486b857a1a0664e3ffb58ab37989d089
|
30af29b18c68f11596acc638fe76e433cffee0f2
|
refs/heads/master
| 2018-12-28T00:53:15.810232
| 2015-01-23T05:08:41
| 2015-01-23T05:08:41
| 29,717,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
#!/usr/bin/python
#coding: utf-8
#file: run.py
import foo
from foo import *
if __name__=='__main__':
fo = Foo()
print Foo_woo()
print fo.woo()
print cvar.Foo__woo
fosub = FooSub()
print isinstance(fosub,Foo)
print issubclass(FooSub,Foo)
print foo(fo)
print foo(fosub)
|
[
"xxb.sklse@gmail.com"
] |
xxb.sklse@gmail.com
|
764143021b116a61b77d2160eb9c038de319c5a8
|
c32d1be401253ac045fbe54dd8ea0900080f2831
|
/inputfunctionangela.py
|
679ce387d80fbc5260293b09c40365d87829c3fc
|
[] |
no_license
|
EzikeChris/Band-Name-Generator
|
0b616b08bb849dfd9cb825e5a557e99435244d1b
|
b78e07394e5796c8663c265f64a144c7076c6394
|
refs/heads/master
| 2023-01-20T06:49:20.468407
| 2020-11-30T16:26:41
| 2020-11-30T16:26:41
| 317,202,700
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 85
|
py
|
print(len(input("what is your name")))
# USE THONNY.COM TO CHECK HOW THE CODE RUNS #
|
[
"christopherezikeu2@gmail.com"
] |
christopherezikeu2@gmail.com
|
a6de66059610b312518db486303f0562580c7410
|
0b24cc5973be51154ddc4d3679ae41b001cc668f
|
/usuarios/migrations/0001_initial.py
|
d3ac74404fb03921c4a517de996f435d19405f88
|
[] |
no_license
|
slacker17/clinica
|
1bc5772ab9d04736cb7210395d4d0340e85d8a7d
|
b3661fe881f5d6472241611bbe397d41608ce44c
|
refs/heads/master
| 2021-01-10T04:13:00.326029
| 2019-01-17T05:18:10
| 2019-01-17T05:18:10
| 47,507,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,213
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Paciente',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.CharField(max_length=25)),
('apellido_paterno', models.CharField(max_length=25)),
('apellido_materno', models.CharField(max_length=25)),
('curp', models.CharField(max_length=20)),
('fecha_nacimiento', models.DateField()),
('fecha_ingreso', models.DateField()),
('edad', models.IntegerField()),
('sexo', models.CharField(max_length=10)),
('direccion', models.CharField(max_length=40)),
('peso', models.FloatField()),
('estatura', models.FloatField()),
('diagnostico', models.CharField(max_length=100)),
],
options={
'ordering': ['nombre'],
},
),
]
|
[
"vale@darkstar.example.net"
] |
vale@darkstar.example.net
|
0db31016f513306ab1665e3149cafb72e7baae7b
|
90af4fb4c23ea4ca72d0d47223abbde14662e550
|
/Stark_panel/supp_acco_views.py
|
6dfdb71f9451d6f7a4b55a87426f9e5009716121
|
[] |
no_license
|
saeedrezaghazanfari/stark_panel
|
3ce0232d87da460dfcc1ec3e97e4b07f73d3b3d2
|
15a7466bfc51a30edae8c3fee6a42b28f52d8f41
|
refs/heads/main
| 2023-05-08T22:35:38.931495
| 2021-06-06T08:14:02
| 2021-06-06T08:14:02
| 372,149,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,690
|
py
|
from django.contrib import messages
from datetime import date, datetime
from django.shortcuts import render, redirect
from django.utils.translation import gettext_lazy as _
from django.views.generic import ListView
from django.utils.translation import get_language
from Stark_account.models import User
from django.views.generic import UpdateView
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.auth.mixins import LoginRequiredMixin
from .st_modules import get_new_data_id
from .models import (
RobotSubscription, BuyAndSell, Ticket, UserStoke, WalletOrder, UserWallet, ChartTokenPrice
)
from .forms import (
TicketResponseForm, AddToChart, SendTicket_OneUser, UserStoke_Form, BuyAndSell_Form, WalletOrderAddForm
)
from .mixins import (
AcountantPermision, acountants_required_decorator, suppurt_required_decorator, active_required_decorator
)
from django.template.loader import render_to_string
from django.core.mail import EmailMessage
from django.conf import settings
# define get absoulute url handle
def get_url_absolute():
lang = get_language()
if lang == 'fa':
return '/fa/acountants/'
elif lang == 'en':
return '/en/acountants/'
elif lang == 'ar':
return '/ar/acountants/'
# list of users
@active_required_decorator(login_url='/sign-in')
@suppurt_required_decorator(login_url='/')
def suppurt_home_page(request):
return render(request, 'is_supporter/supporter_panel.html', {
'unread_tickets': Ticket.objects.filter(is_suppurt=False, is_seen=False).order_by('-id'),
})
@active_required_decorator(login_url='/sign-in')
@suppurt_required_decorator(login_url='/')
def suppurt_set_useraccount_page(request):
if request.POST:
payment_total = request.POST.get('payment-total')
account_total = request.POST.get('account-total')
user_codee = request.POST.get('user-code')
impre_total = request.POST.get('impre-total')
robot_sub_total = request.POST.get('robot-sub-total')
if not account_total:
account_total = 0
if not payment_total:
payment_total = 0
if not impre_total:
impre_total = 0
if not robot_sub_total:
robot_sub_total = 0
if not user_codee:
messages.info(request, _('مشکلی رخ داده است.') )
return redirect('pannel:su_home')
if account_total and payment_total and impre_total and user_codee:
sel_user = User.objects.filter(user_code=user_codee).first()
sel_user.stoke = account_total
sel_user.payment_total = payment_total
sel_user.impression_total = impre_total
sel_user.robot_sub_total = robot_sub_total
sel_user.save()
messages.info(request, _('اطلاعات کاربر با موفقیت ذخیره شد.') )
return redirect('pannel:su_useraccount')
return render(request, 'is_supporter/support_set_user_acc.html', {
'users': User.objects.filter(is_active=True).order_by('-id'),
})
# send ticket to user
@active_required_decorator(login_url='/sign-in')
@suppurt_required_decorator(login_url='/')
def ticket_of_user_page(request, ticketID):
thisTicket = Ticket.objects.get(id=ticketID)
# seened
if request.user.is_suppurt:
thisTicket.is_seen = True
thisTicket.save()
ticket_form = TicketResponseForm(request.POST or None)
# send ticket to user
if ticket_form.is_valid():
res = ticket_form.save(commit=False)
res.id = get_new_data_id(modelname='Ticket')
res.user = thisTicket.user
res.title = thisTicket.title
res.is_suppurt = True
res.is_seen = True
res.date = datetime.now()
res.save()
# send mail
mail_subject = _('استارک | پاسخ تیکت')
if get_language() == 'fa':
messagee = render_to_string('is_supporter/send-email/send-touser-fa.html', {
'date': datetime.now(),
'username': res.user.username,
'link': 'https://panel.st4w.net/fa/ticket/all/',
'meessage': ticket_form.cleaned_data.get('message'),
})
elif get_language() == 'en':
messagee = render_to_string('is_supporter/send-email/send-touser-en.html', {
'date': datetime.now(),
'username': res.user.username,
'link': 'https://panel.st4w.net/en/ticket/all/',
'meessage': ticket_form.cleaned_data.get('message'),
})
elif get_language() == 'ar':
messagee = render_to_string('is_supporter/send-email/send-touser-ar.html', {
'date': datetime.now(),
'username': res.user.username,
'link': 'https://panel.st4w.net/ar/ticket/all/',
'meessage': ticket_form.cleaned_data.get('message'),
})
to_email = res.user.email
msg_EMAIL = EmailMessage(
mail_subject, messagee, from_email=settings.EMAIL_HOST_USER, to=[to_email]
)
msg_EMAIL.content_subtype = "html"
msg_EMAIL.send()
messages.info(request, _('پیام شما با موفقیت ارسال شد.') )
return redirect('pannel:su_home')
return render(request, 'is_supporter/support_response.html', {
'ticket_form': ticket_form,
'ticket': thisTicket,
})
# all of user tickets
@active_required_decorator(login_url='/sign-in')
@suppurt_required_decorator(login_url='/')
def tickets_all_user_page(request):
return render(request, 'is_supporter/support_all_ticket.html', {
'tickets': Ticket.objects.filter(is_suppurt=False).order_by('-id'),
})
# send a ticket to one user
@active_required_decorator(login_url='/sign-in')
@suppurt_required_decorator(login_url='/')
def send_ticket_one_user_page(request):
sendticket_form = SendTicket_OneUser(request.POST or None)
context = {
'sendticket_form': sendticket_form
}
if sendticket_form.is_valid():
obj_ticket = sendticket_form.save(commit=False)
obj_ticket.id = get_new_data_id(modelname='Ticket')
obj_ticket.date = datetime.now()
obj_ticket.is_suppurt = True
obj_ticket.save()
# send mail
mail_subject = _('استارک | دریافت تیکت')
if get_language() == 'fa':
messagee = render_to_string('is_supporter/send-email/send-touser-fa.html', {
'date': datetime.now(),
'username': sendticket_form.cleaned_data.get('user').username,
'link': 'https://panel.st4w.net/fa/ticket/all/',
'meessage': sendticket_form.cleaned_data.get('message'),
'a_user': True,
})
elif get_language() == 'en':
messagee = render_to_string('is_supporter/send-email/send-touser-en.html', {
'date': datetime.now(),
'username': sendticket_form.cleaned_data.get('user').username,
'link': 'https://panel.st4w.net/en/ticket/all/',
'meessage': sendticket_form.cleaned_data.get('message'),
'a_user': True,
})
elif get_language() == 'ar':
messagee = render_to_string('is_supporter/send-email/send-touser-ar.html', {
'date': datetime.now(),
'username': sendticket_form.cleaned_data.get('user').username,
'link': 'https://panel.st4w.net/ar/ticket/all/',
'meessage': sendticket_form.cleaned_data.get('message'),
'a_user': True,
})
theUser = User.objects.filter(user_code=sendticket_form.cleaned_data.get('user')).first()
to_email = theUser.email
msg_EMAIL = EmailMessage(
mail_subject, messagee, from_email=settings.EMAIL_HOST_USER, to=[to_email]
)
msg_EMAIL.content_subtype = "html"
msg_EMAIL.send()
messages.info(request, _('پیام با موفقیت ارسال شد.') )
return redirect('pannel:su_home')
return render(request, 'is_supporter/support_sendticket_a_user.html', context)
# ############ end suppurt ############ #
# ################# is_acountants pages ################# #
@active_required_decorator(login_url='/sign-in')
@acountants_required_decorator(login_url='/')
def acountants_home_page(request):
addtocharrt_form = AddToChart(request.POST or None)
context = {
'addtocharrt_form': addtocharrt_form,
# counters of tables
'user_counter': User.objects.all().count(),
'walletaddr_counter': UserWallet.objects.all().count(),
'user_stoke_counter': UserStoke.objects.all().count(),
'bots_counter': RobotSubscription.objects.all().count(),
'buysells_counter': BuyAndSell.objects.all().count(),
'walletorder_counter': WalletOrder.objects.all().count(),
}
if addtocharrt_form.is_valid():
newprice = addtocharrt_form.save(commit=False)
newprice.date = datetime.now()
newprice.id = get_new_data_id(modelname='ChartTokenPrice')
newprice.save()
messages.info(request, _('با موفقیت اضافه شد.') )
return redirect('pannel:ac_home')
return render(request, 'is_acountants/accountant_panel.html', context)
# user list
class AccountantUsers(AcountantPermision, LoginRequiredMixin, ListView):
template_name='is_acountants/acountants_user.html'
model = User
queryset = User.objects.order_by('-id')
# accountant token prices
class AccountantTokenPrice(AcountantPermision, LoginRequiredMixin, ListView):
template_name='is_acountants/acountant_chart_token_price.html'
model = ChartTokenPrice
queryset = ChartTokenPrice.objects.order_by('-id')
class AccountantUserWalletAddress(AcountantPermision, LoginRequiredMixin, ListView):
template_name='is_acountants/accountant_wallet_addr.html'
model = UserWallet
queryset = UserWallet.objects.order_by('-id')
# bot list
@active_required_decorator(login_url='/sign-in')
@acountants_required_decorator(login_url='/')
def accountant_bots_page(request):
context = {
'object_list': RobotSubscription.objects.order_by('-id')
}
return render(request, 'is_acountants/accountant_bots.html', context)
# Wallet AddForm
@active_required_decorator(login_url='/sign-in')
@acountants_required_decorator(login_url='/')
def accountant_walletuser_page(request):
WalletOrderAddForm_form = WalletOrderAddForm(request.POST or None)
context = {
'add_form': WalletOrderAddForm_form,
'object_list': WalletOrder.objects.order_by('-id')
}
if WalletOrderAddForm_form.is_valid():
WalletOrderAddForm_obj = WalletOrderAddForm_form.save(commit=False)
WalletOrderAddForm_obj.date = datetime.now()
WalletOrderAddForm_obj.id = get_new_data_id(modelname='WalletOrder')
WalletOrderAddForm_obj.save()
messages.info(request, _('عملیات کیف پول اضافه شد.') )
return redirect('pannel:ac_walletorders')
return render(request, 'is_acountants/accountant_walletusers.html', context)
# buy sell list
@active_required_decorator(login_url='/sign-in')
@acountants_required_decorator(login_url='/')
def accountant_buy_sells_page(request):
buyandsell_form = BuyAndSell_Form(request.POST or None)
context = {
'add_form': buyandsell_form,
'object_list': BuyAndSell.objects.order_by('-id'),
}
if buyandsell_form.is_valid():
buyandsell_form_obj = buyandsell_form.save(commit=False)
buyandsell_form_obj.date = datetime.now()
buyandsell_form_obj.id = get_new_data_id(modelname='BuyAndSell')
buyandsell_form_obj.save()
messages.info(request, _('خرید و فروش توکن کاربر ذخیره شد.') )
return redirect('pannel:ac_buysells')
return render(request, 'is_acountants/accountant_buy_sells.html', context)
# user stokes
@active_required_decorator(login_url='/sign-in')
@acountants_required_decorator(login_url='/')
def accountant_userStokes_page(request):
userstoke_form = UserStoke_Form(request.POST or None)
context = {
'add_form': userstoke_form,
'object_list': UserStoke.objects.order_by('-id'),
}
if userstoke_form.is_valid():
user_get = userstoke_form.cleaned_data.get('user')
token_get = userstoke_form.cleaned_data.get('token')
count_get = userstoke_form.cleaned_data.get('count')
user_ex = UserStoke.objects.filter(user=user_get, token=token_get).first()
if user_ex:
user_ex.count = count_get
user_ex.date = datetime.now()
user_ex.save()
messages.info(request, _('توکن کاربر ذخیره شد.') )
return redirect('pannel:ac_userStoke')
else:
userstoke_form_obj = userstoke_form.save(commit=False)
userstoke_form_obj.date = datetime.now()
userstoke_form_obj.save()
messages.info(request, _('توکن کاربر ذخیره شد.') )
return redirect('pannel:ac_userStoke')
return render(request, 'is_acountants/accountant_user_stokes.html', context)
################### EDIT THE LISTS #####################
### edit userStoke
class AccountantUserStokes_Edit(LoginRequiredMixin, AcountantPermision, SuccessMessageMixin, UpdateView):
template_name = 'is_acountants/list-editor/audit-user-stoke.html'
model = UserStoke
success_message = _('تغییرات اعمال شد.')
def get_context_data(self, ** kwargs):
context = super().get_context_data(** kwargs)
context ['object'] = UserStoke.objects.get(id=self.kwargs['pk'])
return context
success_url = get_url_absolute()
fields = ['user', 'token', 'count', 'date']
# edit token prices
class AccountantTokenPrice_Edit(LoginRequiredMixin, AcountantPermision, SuccessMessageMixin, UpdateView):
template_name = 'is_acountants/list-editor/audit-token-price.html'
model = ChartTokenPrice
success_message = _('تغییرات اعمال شد.')
def get_context_data(self, ** kwargs):
context = super().get_context_data(** kwargs)
context ['object'] = ChartTokenPrice.objects.get(id=self.kwargs['pk'])
return context
success_url = get_url_absolute()
fields = ['token', 'price_dollar', 'date']
### edit user wallets
class AccountantWalletuser_Edit(LoginRequiredMixin, AcountantPermision, SuccessMessageMixin, UpdateView):
template_name = 'is_acountants/list-editor/audit-user-wallets.html'
model = WalletOrder
success_message = _('تغییرات اعمال شد.')
def get_context_data(self, ** kwargs):
context = super().get_context_data(** kwargs)
context ['object'] = WalletOrder.objects.get(id=self.kwargs['pk'])
return context
success_url = get_url_absolute()
fields = ['user', 'price', 'type_order', 'wallet_address', 'date', 'is_paid']
### edit user wallets
class AccountantBots_Edit(LoginRequiredMixin, AcountantPermision, SuccessMessageMixin, UpdateView):
template_name = 'is_acountants/list-editor/audit-bot-subs.html'
model = RobotSubscription
success_message = _('تغییرات اعمال شد.')
def get_context_data(self, ** kwargs):
context = super().get_context_data(** kwargs)
context ['object'] = RobotSubscription.objects.get(id=self.kwargs['pk'])
return context
success_url = get_url_absolute()
fields = ['bot_code', 'user', 'time_subscription', 'date', 'is_paid', 'is_active', 'last_date']
### edit user wallets
class AccountantBuySells_Edit(LoginRequiredMixin, AcountantPermision, SuccessMessageMixin, UpdateView):
template_name = 'is_acountants/list-editor/audit-buy-sells.html'
model = BuyAndSell
success_message = _('تغییرات اعمال شد.')
def get_context_data(self, ** kwargs):
context = super().get_context_data(** kwargs)
context ['object'] = BuyAndSell.objects.get(id=self.kwargs['pk'])
return context
success_url = get_url_absolute()
fields = ['user', 'token', 'count', 'buy_sell', 'date', 'is_paid']
|
[
"saeedreza.gh.1397@gmail.com"
] |
saeedreza.gh.1397@gmail.com
|
a189c6c81b68b77f59003ca143c4866d74aaa515
|
2434cc9f60b6203196f81e2bc02ebe8283db0230
|
/src/dataPreprocess/RegressionDataPreprocessor.py
|
5951eea8a41338731e8ad2ab49a3e5e0ffb6a3b0
|
[] |
no_license
|
ys10/GCIDetection
|
99e8d8aa225b4a157144796a1c199cd9bfe203e6
|
f79c03f739e486f67a4af6489406129991631802
|
refs/heads/master
| 2021-07-22T23:44:04.281381
| 2017-10-27T07:19:19
| 2017-10-27T07:19:19
| 104,042,312
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,416
|
py
|
from src import *
class RegressionDataPreprocessor(DataPreprocessor):
def __init__(self, dataFilePath, frameSize=1, frameStride=1, waveDirPath="data/wav/", waveExtension=".wav",
markDirPath="data/mark/",
markExtension=".mark"):
DataPreprocessor.__init__(self, dataFilePath, frameSize, frameStride, waveDirPath, waveExtension, markDirPath, markExtension)
self.gciCriticalDistance = None
pass
def setGCICriticalDistance(self, gciCriticalDistance=400):
self.gciCriticalDistance = gciCriticalDistance
pass
def getGCICriticalDistance(self):
if self.gciCriticalDistance is None:
self.gciCriticalDistance = 400
pass
return self.gciCriticalDistance
def getAmendDistance(self, distance):
if distance > self.getGCICriticalDistance():
distance = self.getGCICriticalDistance()
pass
return distance
# Transform GCI locations to label(binary classification) sequence.
def transLocations2LabelSeq(self, locations, labelSeqLength, samplingRate):
forward = numpy.zeros(shape=(labelSeqLength, 1), dtype=numpy.float32)
backward = numpy.zeros(shape=(labelSeqLength, 1), dtype=numpy.float32)
labelSeq = numpy.reshape(numpy.asarray([forward, backward]).transpose(), [labelSeqLength, 2])
logging.debug("mark data shape:" + str(labelSeq.shape))
labelLocations = list()
for location in locations:
labelLocation = self.getLabelIndex(location, samplingRate, labelSeqLength)
# logging.debug("Time:" + str(labelLocation))
labelLocations.append(labelLocation)
pass
for i in range(labelLocations.__len__()):
currentLocation = labelLocations[i]
labelSeq[currentLocation][0] = 0
labelSeq[currentLocation][1] = 0
# Do with the first GCI
if i == 0:
for j in range(currentLocation):
labelSeq[j][0] = self.getGCICriticalDistance()
labelSeq[j][1] = self.getAmendDistance(currentLocation - j)
pass
pass
# Do with the last GCI
if i == labelLocations.__len__() - 1:
for j in range(currentLocation + 1, labelSeq.__len__()):
labelSeq[j][0] = self.getAmendDistance(j - currentLocation)
labelSeq[j][1] = self.getGCICriticalDistance()
pass
pass
# Other location
else:
nextLocation = labelLocations[i + 1]
for j in range(currentLocation + 1, nextLocation):
labelSeq[j][0] = self.getAmendDistance(j - currentLocation)
labelSeq[j][1] = self.getAmendDistance(nextLocation - j)
pass
pass
pass
print("labelSeq:"+str(labelSeq))
return labelSeq
def transLocations2GCIMask(self, locations, samplingRate):
return None
pass
def main():
dataFilePath = "data/hdf5/APLAWDW_test.hdf5"
dataPreprocessor = RegressionDataPreprocessor(dataFilePath, frameSize=1)
dataPreprocessor.process()
pass
if __name__ == '__main__':
main()
pass
|
[
"yangshuai@pachiratech.com"
] |
yangshuai@pachiratech.com
|
6cfc8a547aa6241224ee19d9e6f5ab5c52083cb2
|
d7390fea6c7f712ee32be6d3478835d965d795e0
|
/py26_20day/reuquests模块的学习/07使用requests如何请求token鉴权的接口.py
|
faad5c7acd6e7ce96c28980b45ec1a7bed958197
|
[] |
no_license
|
luwenchun/Automated_Test
|
2f424655d80127e3ed98657869021a775beca868
|
79b9937cfc0841b0a80d4fd45d8ff467654b5b55
|
refs/heads/master
| 2021-02-10T15:23:08.446463
| 2020-03-26T10:39:38
| 2020-03-26T10:39:38
| 244,393,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,892
|
py
|
"""
============================
Author:柠檬班-木森
Time:2020/2/19 21:27
E-mail:3247119728@qq.com
Company:湖南零檬信息技术有限公司
============================
"""
import requests
# 请求头
headers = {
"X-Lemonban-Media-Type": "lemonban.v2"
}
# # 登录的请求
# url = "http://api.lemonban.com/futureloan/member/login"
# data = {
# "mobile_phone": "13367899876",
# "pwd": "lemonban"
# }
# res = requests.post(url=url, json=data, headers=headers)
# print(res.json())
# 充值的请求
# print('-------------------充值--------------------------')
# re_url = "http://api.lemonban.com/futureloan/member/recharge"
# re_data = {
# "member_id": 74711,
# "amount": 2000
# }
# res = requests.post(url=re_url, json=re_data, headers=headers)
# print(res.json())
# # ----------------------上面方式无法通过鉴权,下面是正确的操作方法--------------------------------------------
headers = {
"X-Lemonban-Media-Type": "lemonban.v2"
}
# 登录的请求
url = "http://api.lemonban.com/futureloan/member/login"
data = {
"mobile_phone": "13367899876",
"pwd": "lemonban"
}
res = requests.post(url=url, json=data, headers=headers)
print(res.json())
# 重登录返回的数据中,提取token
data = res.json()
token = data["data"]["token_info"]["token"]
token_type = data["data"]["token_info"]["token_type"]
token_value = token_type + " " + token
print('token_value',token_value)
#
# # 在请求头中添加token
headers["Authorization"] = token_value
print('headers',headers)
#
# # 充值的请求
print('-------------------充值--------------------------')
re_url = "http://api.lemonban.com/futureloan/member/recharge"
re_data = {
"member_id": 74711,
"amount": 2000
}
res2 = requests.post(url=re_url, json=re_data, headers=headers)
print(res2.json())
|
[
"luwenchun@users.noreply.github.com"
] |
luwenchun@users.noreply.github.com
|
c7e82d2f0ac6909c2faecba83848021518939a3f
|
34df06e8f0a482127c73406259b4ed66f863cefa
|
/evictions_map/main.py
|
8f0e61f567ca724fbfdf104830663832d9af040e
|
[] |
no_license
|
DataWorks-NC/2019-DataPlus-Evictions-Visualizations
|
198f03c1e745400d1de61c8af26cc11f62db5d5c
|
499bb074f17427348af7fd17375db500493958fc
|
refs/heads/master
| 2022-12-10T11:03:56.517365
| 2021-04-06T14:59:45
| 2021-04-06T14:59:45
| 233,906,619
| 0
| 0
| null | 2022-12-08T07:47:54
| 2020-01-14T18:18:46
|
Python
|
UTF-8
|
Python
| false
| false
| 6,686
|
py
|
from bokeh.io import curdoc
from bokeh.layouts import row, column
from bokeh.models import ColumnDataSource, FixedTicker, LogColorMapper, ColorBar, HoverTool, \
WheelZoomTool
from bokeh.models.widgets import Slider, Paragraph, Button
from bokeh.plotting import figure
from bokeh.palettes import YlGnBu5
from bokeh.tile_providers import get_provider, CARTODBPOSITRON_RETINA
# Reverse the color palette so it runs from lighter to darker.
palette = YlGnBu5[::-1]
# Filters the evictions dataframe by year and month.
def filter_evictions(evictions_dataset, year, month):
return evictions_dataset[(evictions_dataset['year'] == year) & (evictions_dataset['month'] == month)]
# setup time range
months_names = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October',
'November',
'December']
num_unique_months = 24
# Import data from server_context
server_context = curdoc().session_context.server_context
evictions_count = server_context.evictions_count
# Find unique dates from dataset
dates = evictions_count.groupby(['year', 'month'])
dates = dates['year'].unique().keys()
update = dates[(-1*num_unique_months-1):-2] # (year, month) pairs for most recent 18 months in dataset, but cut most recent 2 months because server tends to report empty data there.
update = [{'year': d[0], 'month': d[1], 'name': f'{months_names[d[1] - 1]} {d[0]}'} for d in update]
cur_date = update[-1]
# Pull latest data for initial display
initial_filtered_evictions = filter_evictions(evictions_count, cur_date['year'], cur_date['month'])
source = ColumnDataSource(
data=dict(
xs=list(initial_filtered_evictions['xs']),
ys=list(initial_filtered_evictions['ys']),
evics=list(initial_filtered_evictions['evictions_per_rental_unit']),
evics_raw=list(initial_filtered_evictions['evictions']),
fips=list(initial_filtered_evictions.fips),
tract=list(initial_filtered_evictions.tract),
blockgroup=list(initial_filtered_evictions.blockgroup))
)
# ---------------------------------------------------------------#
# Palette Setup / ColorBar
color_bar_height = 650 + 11
color_bar_width = 120
color_mapper = LogColorMapper(palette=palette, low=0.25, high=evictions_count['evictions_per_rental_unit'].max())
color_bar = ColorBar(color_mapper=color_mapper, label_standoff=8, width=20, ticker=FixedTicker(ticks=[0, 2, 5, 10, 50]),
major_tick_line_color='#000000', major_tick_out=5,
height=500, location=(0, 0))
color_bar_plot = figure(title="Evictions per 100 Rental Units", title_location="right",
height=color_bar_height, width=color_bar_width,
toolbar_location=None, min_border=0,
outline_line_color=None)
color_bar_plot.add_layout(color_bar, 'right')
color_bar_plot.title.align = "center"
color_bar_plot.title.text_font_size = '12pt'
# ---------------------------------------------------------------#
# Figures
hover = HoverTool(tooltips=[('Tract', '@tract'), ('Block Group', '@blockgroup'), ('Evictions per 100 rental units', '@evics'), ('Total evictions', '@evics_raw')])
wheel_zoom = WheelZoomTool()
evictions_data = ColumnDataSource(dict(
xs=list(initial_filtered_evictions['xs']),
ys=list(initial_filtered_evictions['ys']),
evics=list(initial_filtered_evictions['evictions_per_rental_unit']),
evics_raw=list(initial_filtered_evictions['evictions']),
fips=list(initial_filtered_evictions.fips),
tract=list(initial_filtered_evictions.tract),
blockgroup=list(initial_filtered_evictions.blockgroup))
)
evictions_map = figure(plot_height=650, plot_width=500, title='Evictions per 100 Rental Units per Block group, Durham',
tools=[hover, wheel_zoom, 'pan', 'save', 'reset'],
toolbar_location='above', x_range=(-8785000, -8775000), y_range=(4280000, 4335000),
x_axis_type='mercator', y_axis_type='mercator')
# ---------------------------------------------------------------#
# Map Setup
evictions_map.axis.visible = False
evictions_map.grid.grid_line_color = None
evictions_map.add_tile(get_provider(CARTODBPOSITRON_RETINA))
evictions_map.grid.grid_line_color = None
evictions_map.toolbar.active_scroll = wheel_zoom
# ---------------------------------------------------------------#
# Glyphs
choropleth_layer = evictions_map.patches('xs', 'ys',
source=evictions_data,
fill_color={'field': 'evics', 'transform': color_mapper},
line_width=0.3,
line_color='black',
fill_alpha=0.9)
# ---------------------------------------------------------------#
# Widgets Setup
year = Slider(title='', value=num_unique_months - 1, start=0, end=num_unique_months - 1, step=1)
year.show_value = False
sliderLabel = Paragraph(text='Select a month to view using the slider')
paragraph = Paragraph(text=cur_date['name'], width=200, height=8) # TODO: This initial value also needs to update dynamically
paragraph.default_size = 500
opacity = Button(label='Show Streets')
# ---------------------------------------------------------------#
# Set Up Callbacks
def update_data(attrname, old, new):
# Transition Sliders
index = year.value
# Pull just evictions data for this month/year.
filtered_evictions = filter_evictions(evictions_count, update[index]['year'], update[index]['month'])
# Inject new dataset
evictions_data.data = dict(
xs=list(filtered_evictions['xs']),
ys=list(filtered_evictions['ys']),
evics=list(filtered_evictions['evictions_per_rental_unit']),
evics_raw=list(filtered_evictions['evictions']),
fips=list(filtered_evictions.fips),
tract=list(filtered_evictions.tract),
blockgroup=list(filtered_evictions.blockgroup)
)
paragraph.text = update[index]['name']
year.on_change('value_throttled', update_data)
paragraph.on_change('text', update_data)
def update_opacity():
if opacity.label == 'Show Streets':
opacity.label = 'Hide Streets'
choropleth_layer.glyph.fill_alpha = 0.5
else:
opacity.label = 'Show Streets'
choropleth_layer.glyph.fill_alpha = 1
opacity.on_click(update_opacity)
# ---------------------------------------------------------------#
# Create Layout
layout = column(row(evictions_map, color_bar_plot), sliderLabel, paragraph, year, opacity, width=800)
curdoc().add_root(layout)
|
[
"tim@rad.cat"
] |
tim@rad.cat
|
7254385c8d174a5fa574996da37c9dc8ad75aa79
|
d4f4bff5d4412abbb73ce534fae0c87ea9a62362
|
/model/rest2/emv_certificate.py
|
d3201f7e38f0799745324acbb996904f10478a11
|
[] |
no_license
|
icorso/wn_api
|
4f023905bcf83fd19eb7826191a6fcf66345e38f
|
b7e558b30d57b62ed3333cbfb7a9359bf954e320
|
refs/heads/master
| 2023-05-25T11:05:02.203211
| 2021-05-22T15:10:57
| 2021-05-22T15:10:57
| 366,672,359
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,971
|
py
|
# coding: utf-8
"""
Merchant API
# Introduction The Merchant API enables you to connect seamlessly and securely to our [Omni-Channel Payments Platform](https://www.worldnetpayments.com/). Our APIs are built around [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) principles and [OpenAPI Specification](https://www.openapis.org/) definitions. Complying to such industry standards means that we can offer developers a much better experience by exposing predictable resource-oriented URL's as well as a comprehensive range of HTTP response codes and verbs. Moreover, you have the possibility to enable and take full advantage of [HATEOAS](https://en.wikipedia.org/wiki/HATEOAS) controls to provide out-of-the-box `Discoverability` and `Functional-Awareness` for your integrations. Get started on building full-featured payment applications and join us in the Revolution of Intelligent Retail. # Authentication The Merchant API uses a combination of API Keys and [Java Web Tokens (JWT)](https://jwt.io/) to authenticate requests. API Key's hold all the necessary information for issuing JWT access tokens which in turn are required to access protected resources and operations. Therefore, before you can start making any calls, you must generate an API Key and use it to obtain access tokens. Please, make yourself familiar with the following security schemes before proceeding: <!-- ReDoc-Inject: <security-definitions> --> ## Generating an API Key In order to generate your first API Key you must [sign up](#) for a developer account and follow the steps below: 1. [Log into the SelfCare System](#) with the credentials you received in the welcome email. 2. Under *Settings*, navigate to *API Keys*, and then click the `NEW API KEY` button. 4. Enter an alias and set the permission modes for each Sub-API. 5. Select the terminals that you want the API Key to be allowed to operate. 6. Back on the list, choose the action `View Authentication Key` to be able to see your API Key. ## Obtaining an Access Token In order to obtain an access token you must use the [authenticate](#operation/authenticate) operation passing your API Key in the `HTTP Authorization` header with `Basic` authentication scheme. In the snippet bellow we show how to achieve that using [cURL](https://github.com/curl/curl). However, if you are not familiar with command line tools we recommend [Postman](https://www.getpostman.com/). ``` curl https://testpayments.worldnettps.com/merchant/api/v1/account/authenticate \\ -H \"Authorization: Basic <Merchant API Key>\" ``` For every successful request you should receive a response just like the one bellow containing the information associated with your crendentials, such as hours to expiry and privileges. Include the JWT Token from the `token` property in the `Authorization` header with `Bearer` authentication scheme for following requests to prove your identity and access protected resources. ``` { \"audience\": \"testpayments.worldnettps.com\", \"boundTo\": \"My API Key\", \"tokenType\": \"Bearer\", \"token\": \"<JWT Access Token>\", \"issuedAt\": \"2020-03-27T10:06:04.891+0000\", \"expiresIn\": 1, \"enableHypermedia\": true, \"roles\": [], \"allowedTerminals\": [] } ``` For security reasons, access tokens expire after a certain amount of time. Therefore, your application must implement a mechanism to keep track of `issuedAt` and `expiresIn` values in order to decide the right moment to automatically request new tokens. **Note:** Your application must not hard-code the lifespan of a token as the value of `expiresIn` property is subject to change without prior notice. ## Making Authenticated Calls Apart from the [authenticate](#operation/authenticate) operation, the entire API requires `Bearer` authentication scheme and expects a valid JWT token as proof of identity. The [cURL](https://github.com/curl/curl) snippet bellow is an example of how to use your access token, in this specific case, to request the list of available terminals in your account. ``` curl https://testpayments.worldnettps.com/merchant/api/v1/account/terminals?pageSize=10 \\ -H \"Content-Type: application/json\" \\ -H \"Authorization: Bearer <JWT Token>\" ``` **Note:** The API will issue a response with status `401 Unauthorized` for requests carrying an expired JWT. # API Requests We provide developers looking to integrate with our solutions with a full-featured **Sandbox**. - Sandbox URL: https://testpayments.worldnettps.com/merchant/ In order to perform actions on the API's resources you must combine your requests with the proper [HTTP Request Method](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods). The Merchant API supports the following HTTP Methods which are sometimes referred as *HTTP Verbs*: HTTP Method | Description ------------ | ------------- [GET](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/GET) | It requests a representation of the specified resource. Requests using `GET` are read-only. [POST](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/POST) | It is used to submit an entity to the specified resource, often causing a change in state on the server. [PATCH](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/PATCH) | It is used to apply partial modifications to a resource. [DELETE](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/DELETE) | It deletes / cancels / reverts the specified resource. ## Request Identifiers The Merchant API assigns a unique identifier for every request that comes in. You can find your requests' identifiers either in the `X-Request-Id` header or in the Error field `debugIdentifier`. Request ID's are part of an effort to speed troubleshooting by facilitating communication between clients and our support team. Since we keep track of all request identifiers in our log files, you just need to inform the request's identifier when asking us to assist you with problems that might come up during your integrations. ## Customer Account Payloads Client applications need to be able to send the customers' account details when generating payments, initiating unreferenced refunds and registering secure credentials. This information is expected in the form of payloads which varies based on the mechanism used to capture the account/card details. For instance, when the card details are manually inputted, a `KEYED` payload is expected. However, an `EMV` payload is always expected for contact and contactless EMV transactions. It is worth mentioning that the proper use of payloads also depend on the channel over which your terminal is operating. In the table below we show the supported payloads for each of the three available channels: Channel | Supported Payloads ---------------------------- | ------------------------- WEB (eCommerce) | `KEYED`, `SECURE_CREDENTIALS`, `DIGITAL_WALLET` POS (Cardholder Present) | `KEYED`, `EMV`, `MAG_STRIPE` MOTO (Mail/Telephone Order) | `KEYED`, `SECURE_CREDENTIALS` ## Request Headers HTTP Header | Description ------------ | ------------- [Accept](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept) | The response format expected by your application.<br />The Merchant API only produces `application/json` response format. [Accept-Language](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Language) | It advertises which languages the client is able to understand, and which locale variant is preferred.<br />The Merchant API fully supports English `en` and French `fr` languages. [Content-Type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type) | The body format of the request your application is sending to the API.<br />The Merchant API only consumes `application/json` content type. [Authorization](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization) | It must contain contain the credentials (API Key or JWT Access Token) to authenticate your application.<br />The API will issue a `401 Unauthorized` response with the `WWW-Authenticate` header attached if your application fails to use this header properly. ## Partial Updates Partial update requests are signaled with the HTTP method `PATCH`. To perform partial updates, clients must specify only the properties that have changed. **Note:** To clear the content of a property, supply an empty value. ## Testing Requests Eventually it will be necessary to perform some transactions. For resources such as testing credit cards and simulated responses, see [Testing Resources](https://docs.worldnettps.com/doku.php?id=developer:integration_docs:testing-guide#testing_resources). # API Responses Client applications must be able to handle JSON body format as well as a range of [HTTP status codes](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status) when processing responses. Some resources might also include contextual hypermedia links. We strongly recommend that clients use these links to request more information or perform additional actions on a given resource. ## HTTP Status Codes The Merchant API has adopted a comprehensive range of status codes where `2XX` statuses are returned for successful requests and `4XX` or `5XX` for failed requests. The full range of status codes supported by this API: HTTP Status Code | Description ----------------- | ------------- [200 OK](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/200) | Indicates that the request has succeeded. [201 Created](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/201) | Indicates that the request has succeeded and has led to the creation of a resource. [204 No Content](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/204) | Indicates that the server successfully executed the method but returns no response body.<br />This status is sent especifically to respond to `DELETE` requests. [400 Bad Request](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/400) | Indicates that the server cannot or will not process the request due to malformed request syntax or schema violation. [401 Unauthorized](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/401) | Indicates that the request has not been applied because it lacks valid authentication credentials.<br />This status is sent with a `WWW-Authenticate` header that contains information on how to authorize correctly. [403 Forbidden](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/403) | Indicates that the server understood the request but refuses to authorize it due to the lack of permissions.<br />Re-authenticating will make no difference until the proper permissions and terminals are added to the API Key. [404 Not Found](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/404) | Indicates that the server cannot find the requested resource. [405 Method Not Allowed](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/405) | Indicates that the request method is known by the server but is not supported by the target resource. [406 Not Acceptable](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/406) | Indicates that the server cannot produce a response matching the value from `Accept` header. [415 Unsupported Media Type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/415) | Indicates that the server refuses to accept the request because the payload format described by the `Content-Type` is unsupported. [422 Unprocessable Entity](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/422) | Indicates that the server understands the content type of the request entity, and the syntax of the request entity is correct, but it was unable to proceed due to semantic errors or failed business validations. [500 Internal Server Error](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/500) | Indicates that the server encountered an unexpected condition that prevented it from fulfilling the request. [501 Not Implemented](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/501) | Indicates that the server does not yet support the functionality required to fulfill the request, but might in the future. ## Error Handling In the event of a failure, the Merchant API returns an error response body that includes additional details in the format below: ``` { \"debugIdentifier\": \"ae6d75eb-381b-4a01-9f49-fdff12e3848b\", \"details\": [ { \"errorCode\": \"X_400_002\", \"errorMessage\": \"Unable to deserialize value\", \"source\": { \"location\": \"BODY\", \"resource\": \"TipType\", \"property\": \"type\", \"value\": \"VARIABLE\", \"expected\": \"Acceptable values: [PERCENTAGE, FIXED_AMOUNT]\" } } ] } ``` Error messages are intented to help developers to fix any problems that may come up during integration.<br />However, if you ever have a hard time troubleshooting an issue or even wish to make a suggestion, do not hesitate to [contact us](https://worldnetpayments.com/contact/). Do not forget to send us the `debugIdentifier` along with your inquiries. ## HATEOAS (Hypermedia Links) [HATEOAS](https://en.wikipedia.org/wiki/HATEOAS) is a powerful mechanism when it comes to enabling self-discoverability, reducing invalid state transition calls and protecting your application against unexpected changes on resources URL's. This snippet from a sample `payments` response shows the list of hypermedia controls that represent the operations available for the newly created payment resource. ``` \"links\": [ { \"rel\": \"capture\", \"method\": \"PATCH\" \"href\": \"https://testpayments.worldnettps.com/merchant/api/v1/transaction/payments/GH2AERQEJS/capture\" }, { \"rel\": \"refund\", \"method\": \"POST\" \"href\": \"https://testpayments.worldnettps.com/merchant/api/v1/transaction/payments/GH2AERQEJS/refunds\" }, { \"rel\": \"update\", \"method\": \"PATCH\" \"href\": \"https://testpayments.worldnettps.com/merchant/api/v1/transaction/payments/GH2AERQEJS\" }, { \"rel\": \"self\", \"method\": \"GET\" \"href\": \"https://testpayments.worldnettps.com/merchant/api/v1/transaction/payments/GH2AERQEJS\" }, { \"rel\": \"reverse\", \"method\": \"DELETE\" \"href\": \"https://testpayments.worldnettps.com/merchant/api/v1/transaction/payments/GH2AERQEJS\" } ] ``` # Pagination The Merchant API features a cursor-based pagination which is sometimes referred as continuation token pagination. This pagination approach works by returning a pointer to a specific item in the dataset. On subsequent requests, the server returns results after the given pointer. Clients don't need to worry about implementing complex pagination mechanism in their applications as we return, for all paginated resources, the total count and a hypermedia link that can be used to load more results. It is important to mention that the response containing the last elements will not contain a `next` hyperlink. We do that so you know that there is no more elements to load. ``` \"links\": [ { \"rel\": \"next\", \"method\": \"GET\" \"href\": \"https://testpayments.worldnettps.com/merchant/api/v1/account/terminals?next=CWY4XRGUUY\" } ] ``` The default number of elements per page is `10` and the maximum is `100`, but it can be changed by adding the query parameter `pageSize` to requests as follows: ``` curl https://testpayments.worldnettps.com/merchant/api/v1/account/terminals?pageSize=5 \\ -H \"Content-Type: application/json\" \\ -H \"Authorization: Bearer <JWT Token>\" ``` **Note:** For performance reasons, the elements inside of a paginated list only represent a compact version of the resource listed. To retrieve the full version of a given resource, client applications must make a subsequent request using the proper hypermedia link. # Versioning Versioning ensures that changes are backward compatible. The Merchant API uses a major and minor version nomenclature to manage changes. ## Major Versions Major version numbers will reflect in the REST URL, for example `/api/v1/transaction/payments`. Currently, **v1** is the only supported major version. ## Minor Versions Minor and backward-compatible changes will be advertised via `X-API-Version` response header, for example `X-API-Version: 2020-01-01`. Developers should use this header to keep track of new features and optimizations that might benefit their applications. # noqa: E501
OpenAPI spec version: v1
Contact: support@worldnettps.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class EmvCertificate(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'rid': 'str',
'exponent': 'str',
'certificate': 'str'
}
attribute_map = {
'rid': 'rid',
'exponent': 'exponent',
'certificate': 'certificate'
}
def __init__(self, rid=None, exponent=None, certificate=None): # noqa: E501
"""EmvCertificate - a model defined in Swagger""" # noqa: E501
self._rid = None
self._exponent = None
self._certificate = None
self.discriminator = None
if rid is not None:
self.rid = rid
if exponent is not None:
self.exponent = exponent
if certificate is not None:
self.certificate = certificate
@property
def rid(self):
"""Gets the rid of this EmvCertificate. # noqa: E501
:return: The rid of this EmvCertificate. # noqa: E501
:rtype: str
"""
return self._rid
@rid.setter
def rid(self, rid):
"""Sets the rid of this EmvCertificate.
:param rid: The rid of this EmvCertificate. # noqa: E501
:type: str
"""
self._rid = rid
@property
def exponent(self):
"""Gets the exponent of this EmvCertificate. # noqa: E501
:return: The exponent of this EmvCertificate. # noqa: E501
:rtype: str
"""
return self._exponent
@exponent.setter
def exponent(self, exponent):
"""Sets the exponent of this EmvCertificate.
:param exponent: The exponent of this EmvCertificate. # noqa: E501
:type: str
"""
self._exponent = exponent
@property
def certificate(self):
"""Gets the certificate of this EmvCertificate. # noqa: E501
:return: The certificate of this EmvCertificate. # noqa: E501
:rtype: str
"""
return self._certificate
@certificate.setter
def certificate(self, certificate):
"""Sets the certificate of this EmvCertificate.
:param certificate: The certificate of this EmvCertificate. # noqa: E501
:type: str
"""
self._certificate = certificate
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EmvCertificate, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EmvCertificate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"icorso@yandex.ru"
] |
icorso@yandex.ru
|
e383e3821b0b120b2412f93c06276104fd24052e
|
dbde9338e87117397c2a7c8969df614f4dd4eacc
|
/test/ux/components/graph/__init__.py
|
192fbd7f73a024e214c34cf121c00a7286ef8e2d
|
[
"Apache-2.0",
"MIT",
"Intel"
] |
permissive
|
leonardozcm/neural-compressor
|
9f83551007351e12df19e5fae3742696613067ad
|
4a49eae281792d987f858a27ac9f83dffe810f4b
|
refs/heads/master
| 2023-08-16T17:18:28.867898
| 2021-09-03T06:44:25
| 2021-09-03T06:54:30
| 407,043,747
| 0
| 0
|
Apache-2.0
| 2021-09-16T07:57:10
| 2021-09-16T06:12:32
| null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The ux package contains test for UX graph component."""
|
[
"tomasz.tybulewicz@intel.com"
] |
tomasz.tybulewicz@intel.com
|
bd56773bd22825306830983a2fd477c334b7bd1e
|
4b179a518fdfc05bbca5033607691c070c21f2dc
|
/itembased.py
|
af2c8201572c551d55ff83c3e52d826731ad5601
|
[] |
no_license
|
evanj354/Netflix-Recommendation-System
|
22501c6a1e20ae577539a4a34253a66b6934e881
|
4bdbe5d97b589b81b15397a96cc3c24a9e763a7d
|
refs/heads/master
| 2020-08-01T06:30:10.122375
| 2019-09-26T01:45:36
| 2019-09-26T01:45:36
| 210,899,685
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,998
|
py
|
import math
import sys
from scipy import spatial
import statistics
import numpy as np
from operator import itemgetter
inputs = ['test5.txt', 'test10.txt', 'test20.txt']
outputs = ['item_result5.txt', 'item_result10.txt', 'item_result20.txt']
train = open('train.txt','r')
numRatings = [1]*1000
test_users = []
train_users = []
mean_trains_iuf = []
mean_trains = []
# class Users:
# def __init__(self, id, movie, rating):
# self.movies = []
# self.id = id
# self.movies.append((movie, rating))
# class Train_Users:
# def __init__(self, id, rating):
# self.id = id
# self.movies = rating
def write(text):
output.write(str(text[0]) + " " + str(text[1]) + " " + str(text[2]) + "\n")
def read_train():
for text in train.readlines():
sum = 0
count = 0
mean = 0.0
ratings = text.strip().split("\t")
ratings = list(map(int, ratings))
train_users.append(ratings)
for id in range(len(ratings)):
if(ratings[id] != 0):
numRatings[id] += 1
mean_trains.append(statistics.mean(ratings))
def compute_euclidean(vector_train, vector_test):
return math.sqrt(sum([(a - b) ** 2 for a, b in zip(vector_train, vector_test)]))
def num_ratings(movieID):
count = 0
for each in train_users:
if(each.movies[movieID-1] != 0):
count+=1
return count
def iuf(vector, movieIDs):
new_vector = []
for rating, ID in zip(vector, movieIDs):
num_ratings = numRatings[ID-1]
iuf_value = math.log(200/num_ratings)
new_vector.append(rating*iuf_value)
return new_vector
def get_column(movieID):
new_vector = []
for userID in range(200):
new_vector.append(train_users[userID][movieID])
return new_vector
def compute_adj_cosine(movieTrainID, movieToFind, rating):
train_vector = get_column(movieTrainID)
test_vector = get_column(movieToFind)
ntrain_vector = []
ntest_vector = []
for userID in range(len(train_vector)):
if(train_vector[userID] != 0 and test_vector[userID] != 0):
adj_train = train_vector[userID] - mean_trains[userID]
adj_test = test_vector[userID] - mean_trains[userID]
ntrain_vector.append(adj_train)
ntest_vector.append(adj_test)
if(len(ntrain_vector) == 0):
return "0"
else:
# user_mean = mean_trains[movieTrainID]
# test_mean = statistics.mean(ntest_vector)
# ntrain_vector[:] = [x - user_mean for x in ntrain_vector]
# ntest_vector[:] = [y - user_mean for y in ntest_vector]
# train_vector[:] = [x - mean_train for x in iuf_train]
# test_vector[:] = [y - mean_test for y in test_vector]
a = 1.0 - (spatial.distance.cosine(ntrain_vector, ntest_vector))
return (a, movieTrainID, rating)
def build_vectors(user, movieToFind):
scores = []
train_vector = []
test_vector = []
movieIDs = []
for movieID, rating in user:
score = compute_adj_cosine(movieID-1, movieToFind-1, rating)
if(score != "0" and math.isnan(score[0]) != True):
scores.append(score)
# mean_test = sum_test/count_test
# if(len(train_vector) == 0):
# score = 0.0
# else:
# score = compute_pearson(train_vector, test_vector, mean_trains_iuf[userID], mean_test, movieIDs)
# if(math.isnan(score)):
# scores.append((userID+1, 0.0, train_rating, mean_trains[userID], mean_test))
# else:
# scores.append((userID+1, score, train_rating, mean_trains[userID], mean_test))
# scores.append(score)
# print(score)
return scores
def sort_abs(a):
return abs(a[0])
def get_k(movieToFind, scores): #return a list of the closest ids
# print(str(scores[0:5]) + "\n")
scores = sorted(scores, reverse = True, key = sort_abs)
# print(str(scores[0:5]) + "\n")
top5 = []
for each in scores:
score = each[0]
movieID = each[1]
rating = each[2]
# train_mean = each[3]
# test_mean = each[4]
top5.append( (score, movieID, rating) )
if(len(top5) >= 5):
break
# print(top5)
return top5
# print(top5)
def getWeight(k_nearest, movieToFind):
sum = 0.0
bot = 0.0
new_weight = []
mean = 0.0
for weight, movieID, rating in k_nearest:
# print("\nmeanA " + str(mean_A))
# if(weight < 1.0 and weight > -1.0):
# weight = weight * math.pow(weight, 1.5)
sum += (weight**2)*(int(rating))
bot += abs(weight)
if(bot == 0.0):
return 0.0
return (sum/bot)
def read_test():
prev_userID = 0
user = []
for text in test.readlines():
# text = text.strip().split('\n')
# line = text
text = text.strip().split(" ")
text = list(map(int, text)) #change text to list of ints
userID = text[0];
if(userID == prev_userID):
if(text[2] != 0): #users rating
pair = (text[1], text[2]) #movie id, rating
user.append(pair)
else: #movie hasn't been rated
movieToFind = text[1] #movie id of which rating to guess
scores = []
scores = build_vectors(user, movieToFind)
# scores.append( (score, train.id) )
# print(str(scores) + "\n\n\n")
# print("done scores")
k_nearest = get_k(movieToFind, scores)
# print("KNEAREST \n " + str(k_nearest) + "\n\n")
new_rating = getWeight(k_nearest, movieToFind)
# print("NEW WEIGHTS \n " + str(new_weights) + "\n\n")
# print(new_rating)
# if(math.isnan(new_rating)): new_rating = 3
new_rating = int(round(new_rating))
if(new_rating == 0):
new_rating = 3
if(new_rating > 5):
new_rating = 5
elif(new_rating < 0):
new_rating = 1
text[2] = new_rating
write(text)
elif(userID != prev_userID):
# print("NEW USER")
user = []
pair = (text[1], text[2])
user.append(pair)
# print(user.id + " " + str(user.movies) + "\n")
prev_userID = userID
def print_train():
# for i in range(200):
# for j in range(1000):
# for i in range(10):
print(len(train_users[1]))
# print(total_users[1].id)
# for user in total_users:
# print(user.id + " " + str(len(user.movies)))
for inp, out in zip(inputs, outputs):
test = open(inp, 'r')
output = open(out, 'w')
read_train()
# print_train()
# item_based()
read_test()
# print(train_users[0].movies)
train.close()
output.close()
|
[
"eejohnson@scu.edu"
] |
eejohnson@scu.edu
|
bf40419ee28d85261d37c37665164d3cd05beebd
|
a341e1a3dcf8225c9211bdb9ba78fef5b046db24
|
/Day9_KnotHash/KnotHashB.py
|
883267f597dc265e2193c133213b3eb7b9126df7
|
[] |
no_license
|
Sam-Hart/AdventOfCode2017
|
2b01d244c9b8412e198ee990b154015814cf2e65
|
5e24dacc439166f04e5f1b49924c409eab55bd47
|
refs/heads/master
| 2021-09-06T21:37:30.682987
| 2018-02-11T23:07:51
| 2018-02-11T23:07:51
| 113,002,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,565
|
py
|
import os
import sys
def calculate_sparse_hash(ascii_codes):
ascii_codes += [17, 31, 73, 47, 23]
number_ring = [i for i in range(0, 256)]
ring_position = 0
skip_size = 0
# Generate sparse hash by performing twisting operation using the input
# codes 64 times, carrying over ring_position and skip_size each time
for _ in range(0, 64):
for ascii_code in ascii_codes:
reverse_integers = \
[i for i in range(ring_position, (ring_position + ascii_code))]
elements_to_reverse = \
[j % len(number_ring) for j in reverse_integers]
ring_position = (ring_position + ascii_code + skip_size) \
% (len(number_ring))
elements_reverse_midpoint = range(
0, len(elements_to_reverse) // 2
)
for reverse_element_index in elements_reverse_midpoint:
former_index = elements_to_reverse[reverse_element_index]
latter_index = elements_to_reverse[
-(reverse_element_index + 1)
]
former_number = number_ring[former_index]
latter_number = number_ring[latter_index]
number_ring[former_index] = latter_number
number_ring[latter_index] = former_number
skip_size += 1
return number_ring
def calculate_dense_hash(number_ring):
xored_numbers = []
numbers_to_xor = number_ring[0:16]
del number_ring[0:16]
xor_value = 0
for xor_number in numbers_to_xor:
xor_value = xor_value ^ xor_number
xored_numbers.append(xor_value)
if len(number_ring) > 0:
xored_numbers += calculate_dense_hash(number_ring)
return xored_numbers
def calculate_hash(clear_text):
text_codes = [ord(char) for char in clear_text]
sparse_hash = calculate_sparse_hash(text_codes)
dense_hash = calculate_dense_hash(sparse_hash)
hash_string = ''
for decimal_value in dense_hash:
hash_string += '{0:02x}'.format(decimal_value)
return hash_string
if __name__ == '__main__':
challenge_data = None
data_file_name = os.path.join(os.path.dirname(sys.argv[0]), 'input.txt')
with open(data_file_name, 'r') as data_file:
challenge_data = data_file.read()
data_file.close()
clear_text_inputs = [
clear_text_input for clear_text_input in challenge_data.split('\n')
]
for clear_text_input in clear_text_inputs:
hash_output = calculate_hash(clear_text_input)
print(hash_output)
|
[
"sam@samhart.me"
] |
sam@samhart.me
|
4c3a43a3ae4f589fcd7fe42f0ff9e6a3e9fbaf13
|
ff18e8408da80bfd4fe36e4645a1fb60d690e337
|
/pid.py
|
7960237301151390501eeb4172766d840f989551
|
[] |
no_license
|
uncodead/biabrewex-micropython
|
0ea53929b3ffba7628346e8213014d07e3b01577
|
6aca0c83a7fb30e4513e85f24e1731df73896629
|
refs/heads/master
| 2020-04-09T15:11:42.527301
| 2018-12-05T10:50:07
| 2018-12-05T10:50:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,137
|
py
|
from time import time
import logging
# Based on Arduino PID Library
# See https://github.com/br3ttb/Arduino-PID-Library
class PIDArduino(object):
"""A proportional-integral-derivative controller.
Args:
sampletime (float): The interval between calc() calls.
kp (float): Proportional coefficient.
ki (float): Integral coefficient.
kd (float): Derivative coefficient.
out_min (float): Lower output limit.
out_max (float): Upper output limit.
time (function): A function which returns the current time in seconds.
"""
def __init__(self, sampletime, kp, ki, kd, out_min=float('-inf'),
out_max=float('inf'), time=time):
if kp is None:
raise ValueError('kp must be specified')
if ki is None:
raise ValueError('ki must be specified')
if kd is None:
raise ValueError('kd must be specified')
if sampletime <= 0:
raise ValueError('sampletime must be greater than 0')
if out_min >= out_max:
raise ValueError('out_min must be less than out_max')
self._logger = logging.getLogger(type(self).__name__)
self._Kp = kp
self._Ki = ki * sampletime
self._Kd = kd / sampletime
self._sampletime = sampletime * 1000
self._out_min = out_min
self._out_max = out_max
self._integral = 0
self._last_input = 0
self._last_output = 0
self._last_calc_timestamp = 0
self._time = time
def calc(self, input_val, setpoint):
"""Adjusts and holds the given setpoint.
Args:
input_val (float): The input value.
setpoint (float): The target value.
Returns:
A value between `out_min` and `out_max`.
"""
now = self._time() * 1000
if (now - self._last_calc_timestamp) < self._sampletime:
return self._last_output
# Compute all the working error variables
error = setpoint - input_val
input_diff = input_val - self._last_input
# In order to prevent windup, only integrate if the process is not saturated
if self._last_output < self._out_max and self._last_output > self._out_min:
self._integral += self._Ki * error
self._integral = min(self._integral, self._out_max)
self._integral = max(self._integral, self._out_min)
p = self._Kp * error
i = self._integral
d = -(self._Kd * input_diff)
# Compute PID Output
self._last_output = p + i + d
self._last_output = min(self._last_output, self._out_max)
self._last_output = max(self._last_output, self._out_min)
# Log some debug info
self._logger.debug('P: {0}'.format(p))
self._logger.debug('I: {0}'.format(i))
self._logger.debug('D: {0}'.format(d))
self._logger.debug('output: {0}'.format(self._last_output))
# Remember some variables for next time
self._last_input = input_val
self._last_calc_timestamp = now
return self._last_output
|
[
"uncodead@gmail.com"
] |
uncodead@gmail.com
|
9f581f91b0bbbef006042ed4256ddd73d291a0d9
|
8fbf7054bc8676eb6754e80ead566ac10277af76
|
/desafio/desafio076.py
|
7ed84fa43cdb12fcf3e2e8326729ba4c88cb3da6
|
[
"MIT"
] |
permissive
|
henriquekirchheck/Curso-em-Video-Python
|
5eb4c97ed6320fcd100030bda718de732430244a
|
1a29f68515313af85c8683f626ba35f8fcdd10e7
|
refs/heads/main
| 2023-06-06T16:25:48.018420
| 2021-07-04T17:46:28
| 2021-07-04T17:46:28
| 379,697,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 583
|
py
|
# Crie um programa que tenha uma tupla única com nomes de produtos e seus respectivos preços, na sequência. No final, mostre uma listagem de preços, organizando os dados em forma tabular
product = ('Lápis', 'Borracha', 'Caderno', 'Estojo', 'Transferidor', 'Compasso', 'Mochila', 'Canetas', 'Livro', 'Computador')
prices = ('1.75', '2.00', '15.90', '25.00', '4.20', '9.99', '120.32', '22.30', '34.90', '10000.00')
print('-' * 30)
print('Listagem de preços'.center(30))
print('-' * 30)
for x in range(0, len(product)):
print(product[x].ljust(18), f'R$ {prices[x].rjust(8)}')
|
[
"86362827+henriquekirchheck@users.noreply.github.com"
] |
86362827+henriquekirchheck@users.noreply.github.com
|
23e1fd48376d16e55529e81407fa1c5a97a646a1
|
20ace38b89c0ebaa0738753fcd11b0fdd4ed21cd
|
/CMSSW_8_0_24/src/HeavyIonsAnalysis/JetAnalysis/python/jets/akPuSoftDrop4PFJetSequence_pp_mc_cff.py
|
01731d69572ab31c9c6055da2a975c4490b8c326
|
[] |
no_license
|
ssanders50/pPb_2016_v0
|
3c32c2920067a2f8a0a7a7fadba6225babf9a905
|
9fc4ae61cf4343c88ce6666f55c0738f963754a3
|
refs/heads/master
| 2020-12-12T16:30:41.253014
| 2020-02-14T21:51:17
| 2020-02-14T21:51:17
| 234,162,163
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,434
|
py
|
import FWCore.ParameterSet.Config as cms
from HeavyIonsAnalysis.JetAnalysis.patHeavyIonSequences_cff import patJetGenJetMatch, patJetPartonMatch, patJetCorrFactors, patJets
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
from HeavyIonsAnalysis.JetAnalysis.bTaggers_cff import *
from RecoJets.JetProducers.JetIDParams_cfi import *
from RecoJets.JetProducers.nJettinessAdder_cfi import Njettiness
akPuSoftDrop4PFmatch = patJetGenJetMatch.clone(
src = cms.InputTag("akPuSoftDrop4PFJets"),
matched = cms.InputTag("ak4GenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.4
)
akPuSoftDrop4PFmatchGroomed = patJetGenJetMatch.clone(
src = cms.InputTag("akSoftDrop4GenJets"),
matched = cms.InputTag("ak4GenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.4
)
akPuSoftDrop4PFparton = patJetPartonMatch.clone(src = cms.InputTag("akPuSoftDrop4PFJets")
)
akPuSoftDrop4PFcorr = patJetCorrFactors.clone(
useNPV = cms.bool(False),
useRho = cms.bool(False),
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("akPuSoftDrop4PFJets"),
payload = "AKPu4PF_offline"
)
akPuSoftDrop4PFJetID= cms.EDProducer('JetIDProducer', JetIDParams, src = cms.InputTag('akPuSoftDrop4CaloJets'))
#akPuSoftDrop4PFclean = heavyIonCleanedGenJets.clone(src = cms.InputTag('ak4GenJets'))
akPuSoftDrop4PFbTagger = bTaggers("akPuSoftDrop4PF",0.4)
#create objects locally since they dont load properly otherwise
#akPuSoftDrop4PFmatch = akPuSoftDrop4PFbTagger.match
akPuSoftDrop4PFparton = patJetPartonMatch.clone(src = cms.InputTag("akPuSoftDrop4PFJets"), matched = cms.InputTag("genParticles"))
akPuSoftDrop4PFPatJetFlavourAssociationLegacy = akPuSoftDrop4PFbTagger.PatJetFlavourAssociationLegacy
akPuSoftDrop4PFPatJetPartons = akPuSoftDrop4PFbTagger.PatJetPartons
akPuSoftDrop4PFJetTracksAssociatorAtVertex = akPuSoftDrop4PFbTagger.JetTracksAssociatorAtVertex
akPuSoftDrop4PFJetTracksAssociatorAtVertex.tracks = cms.InputTag("highPurityTracks")
akPuSoftDrop4PFSimpleSecondaryVertexHighEffBJetTags = akPuSoftDrop4PFbTagger.SimpleSecondaryVertexHighEffBJetTags
akPuSoftDrop4PFSimpleSecondaryVertexHighPurBJetTags = akPuSoftDrop4PFbTagger.SimpleSecondaryVertexHighPurBJetTags
akPuSoftDrop4PFCombinedSecondaryVertexBJetTags = akPuSoftDrop4PFbTagger.CombinedSecondaryVertexBJetTags
akPuSoftDrop4PFCombinedSecondaryVertexV2BJetTags = akPuSoftDrop4PFbTagger.CombinedSecondaryVertexV2BJetTags
akPuSoftDrop4PFJetBProbabilityBJetTags = akPuSoftDrop4PFbTagger.JetBProbabilityBJetTags
akPuSoftDrop4PFSoftPFMuonByPtBJetTags = akPuSoftDrop4PFbTagger.SoftPFMuonByPtBJetTags
akPuSoftDrop4PFSoftPFMuonByIP3dBJetTags = akPuSoftDrop4PFbTagger.SoftPFMuonByIP3dBJetTags
akPuSoftDrop4PFTrackCountingHighEffBJetTags = akPuSoftDrop4PFbTagger.TrackCountingHighEffBJetTags
akPuSoftDrop4PFTrackCountingHighPurBJetTags = akPuSoftDrop4PFbTagger.TrackCountingHighPurBJetTags
akPuSoftDrop4PFPatJetPartonAssociationLegacy = akPuSoftDrop4PFbTagger.PatJetPartonAssociationLegacy
akPuSoftDrop4PFImpactParameterTagInfos = akPuSoftDrop4PFbTagger.ImpactParameterTagInfos
akPuSoftDrop4PFImpactParameterTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akPuSoftDrop4PFJetProbabilityBJetTags = akPuSoftDrop4PFbTagger.JetProbabilityBJetTags
akPuSoftDrop4PFSecondaryVertexTagInfos = akPuSoftDrop4PFbTagger.SecondaryVertexTagInfos
akPuSoftDrop4PFSimpleSecondaryVertexHighEffBJetTags = akPuSoftDrop4PFbTagger.SimpleSecondaryVertexHighEffBJetTags
akPuSoftDrop4PFSimpleSecondaryVertexHighPurBJetTags = akPuSoftDrop4PFbTagger.SimpleSecondaryVertexHighPurBJetTags
akPuSoftDrop4PFCombinedSecondaryVertexBJetTags = akPuSoftDrop4PFbTagger.CombinedSecondaryVertexBJetTags
akPuSoftDrop4PFCombinedSecondaryVertexV2BJetTags = akPuSoftDrop4PFbTagger.CombinedSecondaryVertexV2BJetTags
akPuSoftDrop4PFSecondaryVertexNegativeTagInfos = akPuSoftDrop4PFbTagger.SecondaryVertexNegativeTagInfos
akPuSoftDrop4PFNegativeSimpleSecondaryVertexHighEffBJetTags = akPuSoftDrop4PFbTagger.NegativeSimpleSecondaryVertexHighEffBJetTags
akPuSoftDrop4PFNegativeSimpleSecondaryVertexHighPurBJetTags = akPuSoftDrop4PFbTagger.NegativeSimpleSecondaryVertexHighPurBJetTags
akPuSoftDrop4PFNegativeCombinedSecondaryVertexBJetTags = akPuSoftDrop4PFbTagger.NegativeCombinedSecondaryVertexBJetTags
akPuSoftDrop4PFPositiveCombinedSecondaryVertexBJetTags = akPuSoftDrop4PFbTagger.PositiveCombinedSecondaryVertexBJetTags
akPuSoftDrop4PFNegativeCombinedSecondaryVertexV2BJetTags = akPuSoftDrop4PFbTagger.NegativeCombinedSecondaryVertexV2BJetTags
akPuSoftDrop4PFPositiveCombinedSecondaryVertexV2BJetTags = akPuSoftDrop4PFbTagger.PositiveCombinedSecondaryVertexV2BJetTags
akPuSoftDrop4PFSoftPFMuonsTagInfos = akPuSoftDrop4PFbTagger.SoftPFMuonsTagInfos
akPuSoftDrop4PFSoftPFMuonsTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akPuSoftDrop4PFSoftPFMuonBJetTags = akPuSoftDrop4PFbTagger.SoftPFMuonBJetTags
akPuSoftDrop4PFSoftPFMuonByIP3dBJetTags = akPuSoftDrop4PFbTagger.SoftPFMuonByIP3dBJetTags
akPuSoftDrop4PFSoftPFMuonByPtBJetTags = akPuSoftDrop4PFbTagger.SoftPFMuonByPtBJetTags
akPuSoftDrop4PFNegativeSoftPFMuonByPtBJetTags = akPuSoftDrop4PFbTagger.NegativeSoftPFMuonByPtBJetTags
akPuSoftDrop4PFPositiveSoftPFMuonByPtBJetTags = akPuSoftDrop4PFbTagger.PositiveSoftPFMuonByPtBJetTags
akPuSoftDrop4PFPatJetFlavourIdLegacy = cms.Sequence(akPuSoftDrop4PFPatJetPartonAssociationLegacy*akPuSoftDrop4PFPatJetFlavourAssociationLegacy)
#Not working with our PU sub, but keep it here for reference
#akPuSoftDrop4PFPatJetFlavourAssociation = akPuSoftDrop4PFbTagger.PatJetFlavourAssociation
#akPuSoftDrop4PFPatJetFlavourId = cms.Sequence(akPuSoftDrop4PFPatJetPartons*akPuSoftDrop4PFPatJetFlavourAssociation)
akPuSoftDrop4PFJetBtaggingIP = cms.Sequence(akPuSoftDrop4PFImpactParameterTagInfos *
(akPuSoftDrop4PFTrackCountingHighEffBJetTags +
akPuSoftDrop4PFTrackCountingHighPurBJetTags +
akPuSoftDrop4PFJetProbabilityBJetTags +
akPuSoftDrop4PFJetBProbabilityBJetTags
)
)
akPuSoftDrop4PFJetBtaggingSV = cms.Sequence(akPuSoftDrop4PFImpactParameterTagInfos
*
akPuSoftDrop4PFSecondaryVertexTagInfos
* (akPuSoftDrop4PFSimpleSecondaryVertexHighEffBJetTags+
akPuSoftDrop4PFSimpleSecondaryVertexHighPurBJetTags+
akPuSoftDrop4PFCombinedSecondaryVertexBJetTags+
akPuSoftDrop4PFCombinedSecondaryVertexV2BJetTags
)
)
akPuSoftDrop4PFJetBtaggingNegSV = cms.Sequence(akPuSoftDrop4PFImpactParameterTagInfos
*
akPuSoftDrop4PFSecondaryVertexNegativeTagInfos
* (akPuSoftDrop4PFNegativeSimpleSecondaryVertexHighEffBJetTags+
akPuSoftDrop4PFNegativeSimpleSecondaryVertexHighPurBJetTags+
akPuSoftDrop4PFNegativeCombinedSecondaryVertexBJetTags+
akPuSoftDrop4PFPositiveCombinedSecondaryVertexBJetTags+
akPuSoftDrop4PFNegativeCombinedSecondaryVertexV2BJetTags+
akPuSoftDrop4PFPositiveCombinedSecondaryVertexV2BJetTags
)
)
akPuSoftDrop4PFJetBtaggingMu = cms.Sequence(akPuSoftDrop4PFSoftPFMuonsTagInfos * (akPuSoftDrop4PFSoftPFMuonBJetTags
+
akPuSoftDrop4PFSoftPFMuonByIP3dBJetTags
+
akPuSoftDrop4PFSoftPFMuonByPtBJetTags
+
akPuSoftDrop4PFNegativeSoftPFMuonByPtBJetTags
+
akPuSoftDrop4PFPositiveSoftPFMuonByPtBJetTags
)
)
akPuSoftDrop4PFJetBtagging = cms.Sequence(akPuSoftDrop4PFJetBtaggingIP
*akPuSoftDrop4PFJetBtaggingSV
*akPuSoftDrop4PFJetBtaggingNegSV
# *akPuSoftDrop4PFJetBtaggingMu
)
akPuSoftDrop4PFpatJetsWithBtagging = patJets.clone(jetSource = cms.InputTag("akPuSoftDrop4PFJets"),
genJetMatch = cms.InputTag("akPuSoftDrop4PFmatch"),
genPartonMatch = cms.InputTag("akPuSoftDrop4PFparton"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akPuSoftDrop4PFcorr")),
JetPartonMapSource = cms.InputTag("akPuSoftDrop4PFPatJetFlavourAssociationLegacy"),
JetFlavourInfoSource = cms.InputTag("akPuSoftDrop4PFPatJetFlavourAssociation"),
trackAssociationSource = cms.InputTag("akPuSoftDrop4PFJetTracksAssociatorAtVertex"),
useLegacyJetMCFlavour = True,
discriminatorSources = cms.VInputTag(cms.InputTag("akPuSoftDrop4PFSimpleSecondaryVertexHighEffBJetTags"),
cms.InputTag("akPuSoftDrop4PFSimpleSecondaryVertexHighPurBJetTags"),
cms.InputTag("akPuSoftDrop4PFCombinedSecondaryVertexBJetTags"),
cms.InputTag("akPuSoftDrop4PFCombinedSecondaryVertexV2BJetTags"),
cms.InputTag("akPuSoftDrop4PFJetBProbabilityBJetTags"),
cms.InputTag("akPuSoftDrop4PFJetProbabilityBJetTags"),
#cms.InputTag("akPuSoftDrop4PFSoftPFMuonByPtBJetTags"),
#cms.InputTag("akPuSoftDrop4PFSoftPFMuonByIP3dBJetTags"),
cms.InputTag("akPuSoftDrop4PFTrackCountingHighEffBJetTags"),
cms.InputTag("akPuSoftDrop4PFTrackCountingHighPurBJetTags"),
),
jetIDMap = cms.InputTag("akPuSoftDrop4PFJetID"),
addBTagInfo = True,
addTagInfos = True,
addDiscriminators = True,
addAssociatedTracks = True,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = True,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
# embedCaloTowers = False,
# embedPFCandidates = True
)
akPuSoftDrop4PFNjettiness = Njettiness.clone(
src = cms.InputTag("akPuSoftDrop4PFJets"),
R0 = cms.double( 0.4)
)
akPuSoftDrop4PFpatJetsWithBtagging.userData.userFloats.src += ['akPuSoftDrop4PFNjettiness:tau1','akPuSoftDrop4PFNjettiness:tau2','akPuSoftDrop4PFNjettiness:tau3']
akPuSoftDrop4PFJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("akPuSoftDrop4PFpatJetsWithBtagging"),
genjetTag = 'ak4GenJets',
rParam = 0.4,
matchJets = cms.untracked.bool(False),
matchTag = 'patJetsWithBtagging',
pfCandidateLabel = cms.untracked.InputTag('particleFlow'),
trackTag = cms.InputTag("generalTracks"),
fillGenJets = True,
isMC = True,
doSubEvent = True,
useHepMC = cms.untracked.bool(False),
genParticles = cms.untracked.InputTag("genParticles"),
eventInfoTag = cms.InputTag("generator"),
doLifeTimeTagging = cms.untracked.bool(True),
doLifeTimeTaggingExtras = cms.untracked.bool(False),
bTagJetName = cms.untracked.string("akPuSoftDrop4PF"),
jetName = cms.untracked.string("akPuSoftDrop4PF"),
genPtMin = cms.untracked.double(5),
hltTrgResults = cms.untracked.string('TriggerResults::'+'HISIGNAL'),
doTower = cms.untracked.bool(False),
doSubJets = cms.untracked.bool(True),
doGenSubJets = cms.untracked.bool(True),
subjetGenTag = cms.untracked.InputTag("akSoftDrop4GenJets"),
doGenTaus = True
)
akPuSoftDrop4PFJetSequence_mc = cms.Sequence(
#akPuSoftDrop4PFclean
#*
akPuSoftDrop4PFmatch
#*
#akPuSoftDrop4PFmatchGroomed
*
akPuSoftDrop4PFparton
*
akPuSoftDrop4PFcorr
*
#akPuSoftDrop4PFJetID
#*
akPuSoftDrop4PFPatJetFlavourIdLegacy
#*
#akPuSoftDrop4PFPatJetFlavourId # Use legacy algo till PU implemented
*
akPuSoftDrop4PFJetTracksAssociatorAtVertex
*
akPuSoftDrop4PFJetBtagging
*
akPuSoftDrop4PFNjettiness #No constituents for calo jets in pp. Must be removed for pp calo jets but I'm not sure how to do this transparently (Marta)
*
akPuSoftDrop4PFpatJetsWithBtagging
*
akPuSoftDrop4PFJetAnalyzer
)
akPuSoftDrop4PFJetSequence_data = cms.Sequence(akPuSoftDrop4PFcorr
*
#akPuSoftDrop4PFJetID
#*
akPuSoftDrop4PFJetTracksAssociatorAtVertex
*
akPuSoftDrop4PFJetBtagging
*
akPuSoftDrop4PFNjettiness
*
akPuSoftDrop4PFpatJetsWithBtagging
*
akPuSoftDrop4PFJetAnalyzer
)
akPuSoftDrop4PFJetSequence_jec = cms.Sequence(akPuSoftDrop4PFJetSequence_mc)
akPuSoftDrop4PFJetSequence_mb = cms.Sequence(akPuSoftDrop4PFJetSequence_mc)
akPuSoftDrop4PFJetSequence = cms.Sequence(akPuSoftDrop4PFJetSequence_mc)
|
[
"ssanders@ku.edu"
] |
ssanders@ku.edu
|
2deacb02106665e9fdd4ebd3606e12b06c4c6ebf
|
1c6db771456c0ad1c09d7aebf8c202cdd3f20cb8
|
/src/test/test_parser.py
|
db4c2f0520407da4de356585d4dd928b2d41150b
|
[] |
no_license
|
dicebattle/DynamicCrawler
|
e964badf57d127e0d0b536e92c5810c9184aad93
|
3c9e0b490c44280ffb61e598b14761d237de2334
|
refs/heads/master
| 2021-01-23T01:08:32.730729
| 2017-06-09T17:43:07
| 2017-06-09T17:43:07
| 85,880,316
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,905
|
py
|
from context.TestRuntimeContext import TestRuntimeContext
from task.task import Task
from task.task_builder import parse_object
from newspaper import *
import re
import yaml
ctx = TestRuntimeContext()
class DummyTask(Task):
def execute(ctx, self, input_value, result_set: dict):
return input_value
@classmethod
def get_task(cls, command: str, option):
return DummyTask(option)
def test_task():
url = "https://search.naver.com/search.naver?sm=tab_hty.top&where=news&oquery=%ED%85%8C%EC%8A%A4%ED%8A%B8&ie=utf8&query=%EB%AC%B8%EC%9E%AC%EC%9D%B8"
res_set = {
"inp_url": url
}
context = TestRuntimeContext()
task_source = None
with open("../../test.yaml", 'r') as stream:
try:
task_source = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
print(task_source)
task = parse_object(task_source)
task.execute(context, "", res_set)
for article in res_set['result']:
extracted_article = article_extract(article['url'], article['title'])
# article.extracted = extracted_article
print(res_set)
def article_extract(url, title):
# url = "http://news.joins.com/article/21587110"
a = Article(url, language='ko')
a.download()
a.parse()
a.nlp()
res_set = {
"input_article_url": url
}
# res_set.title = title or a.title
# res_set.author = str(a.authors)
# res_set.publish_date = str(a.publish_date)
# res_set.text = a.text
# res_set.keywords = a.title
# res_set.quotes = str(re.findall(u'(?:\u201c(.*?)\u201d)', a.text))
# return res_set
print("제목: " + a.title)
print("작성자: " + str(a.authors))
print("일시: " + str(a.publish_date))
print("본문: " + a.text)
print("키워드: " + str(a.keywords))
print("발언들: " + str(re.findall(u'(?:\u201c(.*?)\u201d)', a.text)))
|
[
"dicebattle@gmail.com"
] |
dicebattle@gmail.com
|
233351f9312bc41292736fd99cf4e9fb9bc342fd
|
e23310bc376838651b999232c7533116e881ce7f
|
/test05/test05/comments/templatetags/__init__.py
|
e81de517f567fb10aea83a41a273fadaa3da472f
|
[] |
no_license
|
lllwqqq/django
|
7630b9489c178daead1753e4e8319ba026d00338
|
9abb022a7fe0882955599862dcd0e2d4e9ce9fc6
|
refs/heads/master
| 2020-09-14T08:24:15.760543
| 2020-06-04T07:42:08
| 2020-06-04T07:42:08
| 223,076,835
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
#!/usr/bin/env python
# _*_coding:utf-8_*_
"""
@Time : 2019/12/20 下午5:58
@Author: Aroma
@File: __init__.py.py
@Software: PyCharm
"""
|
[
"liwq@spacesforce.com"
] |
liwq@spacesforce.com
|
2f6cd45f795f752ee7d0ac99bc7863dd99c4b465
|
fd221efb1165d56ff7007a3b82aa84b1019883e0
|
/AI/pythonProject/main.py
|
8e3679ba5b0717720a0aede8ea70798d76725755
|
[] |
no_license
|
CyanoFresh/KPI-Labs
|
822a8057a1db8f4df04e0b71b498f80dc42fd281
|
894332df2cc5a6eb32ce08938f7ebecf21e0dc02
|
refs/heads/master
| 2023-01-09T06:50:03.303627
| 2021-12-06T18:14:40
| 2021-12-06T18:14:40
| 253,018,181
| 0
| 1
| null | 2023-01-07T05:54:00
| 2020-04-04T14:28:25
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,047
|
py
|
import numpy as np
from operator import itemgetter
import random
R = [
[100, 0, -1, 0, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[100, -1, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, 0, -1, -1, 0, 0, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0],
[100, 0, -1, -1, 0, -1, 0, 0, -1, -1, -1, -1, -1, -1, -1, -1],
[100, 0, 0, 0, -1, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1, -1],
[-1, 0, 0, -1, 0, -1, -1, 0, 0, -1, -1, -1, -1, 0, 0, 0],
[-1, -1, -1, 0, 0, -1, -1, 0, -1, 0, 0, -1, -1, -1, -1, -1],
[-1, -1, -1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1, -1, -1, -1],
[-1, -1, -1, -1, 0, 0, -1, 0, -1, -1, 0, 0, 0, 0, 0, -1],
[-1, -1, -1, -1, -1, -1, 0, 0, -1, -1, 0, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, 0, 0, 0, 0, -1, 0, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, 0, 0, -1, 0, -1, 0, 0, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, 0, -1, 0, -1, -1],
[-1, -1, -1, -1, -1, 0, -1, -1, 0, -1, -1, 0, 0, -1, 0, -1],
[-1, -1, 0, -1, -1, 0, -1, -1, 0, -1, -1, -1, -1, 0, -1, 0],
[-1, -1, 0, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1],
]
gamma = 0.8
N = len(R)
states = np.arange(N)
finish_state = 0
Q = np.zeros((N, N))
def get_next_action(state, matrix):
next_actions = [(x, i) for i, x in enumerate(matrix[state]) if x != -1]
actions_sum = sum([x[0] for x in next_actions])
if actions_sum == 0:
return random.choice(next_actions)[1]
max_action = max(next_actions, key=itemgetter(0))
return max_action[1]
for i in range(50):
state = np.random.choice(states)
while True:
next_state = get_next_action(state, R)
Q[state][next_state] = R[state][next_state] + gamma * max(Q[next_state])
if state == finish_state:
break
state = next_state
print('\n'.join([''.join(['{:6.0f}'.format(item) for item in row])
for row in Q]))
# Test
state = 10
while state != finish_state:
next_action = get_next_action(state, Q)
print(state, '->', next_action)
state = next_action
|
[
"cyanofresh@gmail.com"
] |
cyanofresh@gmail.com
|
13c84dfbeee8deb2dadd511883f8edcf9cb503d5
|
727f1bc2205c88577b419cf0036c029b8c6f7766
|
/out-bin/py/google/fhir/models/model_test.runfiles/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/ops/gen_spectral_ops.py
|
ac8b6d9e122ce43b5b1663825791a40ec56b0e91
|
[
"Apache-2.0"
] |
permissive
|
rasalt/fhir
|
55cf78feed3596a3101b86f9e9bbf6652c6ed4ad
|
d49883cc4d4986e11ca66058d5a327691e6e048a
|
refs/heads/master
| 2020-04-13T00:16:54.050913
| 2019-01-15T14:22:15
| 2019-01-15T14:22:15
| 160,260,223
| 0
| 0
|
Apache-2.0
| 2018-12-03T22:07:01
| 2018-12-03T22:07:01
| null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
/home/rkharwar/.cache/bazel/_bazel_rkharwar/c4bcd65252c8f8250f091ba96375f9a5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/ops/gen_spectral_ops.py
|
[
"ruchika.kharwar@gmail.com"
] |
ruchika.kharwar@gmail.com
|
b603cf2a42b3c56884a71c71d9b01dbc4cd25815
|
a20cb5dfd6ae2e5067a822f3b828a7c72e55489a
|
/243_Shortest_Word_Distance.py
|
6be04312f419e66cbda347bf6256f372125717d4
|
[
"MIT"
] |
permissive
|
rpm1995/LeetCode
|
51f6325cf77be95bb1106d18de75974e03dba9b7
|
147d99e273bc398c107f2aef73aba0d6bb88dea0
|
refs/heads/master
| 2021-12-07T12:00:59.386002
| 2021-08-12T02:55:19
| 2021-08-12T02:55:19
| 193,178,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 537
|
py
|
class Solution:
def shortestDistance(self, words: List[str], word1: str, word2: str) -> int:
index1 = -1
index2 = -1
ans = float('inf')
for index, word in enumerate(words):
if word == word1:
index1 = index
if index2 != -1:
ans = min(ans, abs(index1 - index2))
if word == word2:
index2 = index
if index1 != -1:
ans = min(ans, abs(index1 - index2))
return ans
|
[
"31997276+rpm1995@users.noreply.github.com"
] |
31997276+rpm1995@users.noreply.github.com
|
a8629dcbf325e141ec9eaa323819b787e6317133
|
ed3910e0e14e01a14a472fa63795b9282226db5e
|
/ex40.py
|
d18a41a58b3d6459e830ba23caaafdff90f2f6ad
|
[] |
no_license
|
ereminmax/ISEME
|
87f5037486ecbf31b5f9bf99ad6313427d296d4f
|
49d19a5fc28e257b501a437be5a922b6d25cbb8a
|
refs/heads/master
| 2021-01-22T21:22:02.949272
| 2017-05-10T14:08:10
| 2017-05-10T14:08:10
| 85,415,223
| 0
| 0
| null | 2017-04-07T16:05:35
| 2017-03-18T16:22:45
| null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
class Song(object):
def __init__(self, lyrics):
self.lyrics = lyrics
def sing_me_a_song(self):
for line in self.lyrics:
print line
happy_bday = Song(["Happy",
"bday",
"dear",
"Nasty"])
bulls_on_parade=Song(["They",
"rally",
"around"])
happy_bday.sing_me_a_song()
bulls_on_parade.sing_me_a_song()
|
[
"eremin.max@gmail.com"
] |
eremin.max@gmail.com
|
8b1bf18eca0b9749a9fa7c8ee3ec4ce3861e6ffc
|
586c371fe1217b12ad95250220b6ff6bf478716d
|
/views.py
|
cc66db3dc144d66ddb3fd7ad350f0844a9158b00
|
[] |
no_license
|
emakuhin/python
|
b0380f70a13346c7ab25932a4dae89ef0efccea1
|
5f4cc9083be081402550d684b6a99c1ab8ebd2a7
|
refs/heads/master
| 2023-03-20T22:22:11.179754
| 2021-02-19T12:39:46
| 2021-02-19T12:39:46
| 339,050,882
| 0
| 0
| null | 2021-02-18T13:36:46
| 2021-02-15T11:08:17
| null |
UTF-8
|
Python
| false
| false
| 814
|
py
|
from functions import render
def index(request):
secret = request.get('secret_key', None)
# Используем шаблонизатор
return '200 OK', render('index.html', secret=secret)
def about(request):
names = request.get('name', None)
return '200 OK', render('about.html', names=names)
def contact_view(request):
# Проверка метода запроса
if request['method'] == 'POST':
data = request['data']
title = data['title']
text = data['text']
email = data['email']
print(f'Нам пришло сообщение от {email} с темой {title} и текстом {text}')
return '200 OK', render('contact_post.html', title=title, text=text, email=email)
else:
return '200 OK', render('contact.html')
|
[
"maku@mail.ru"
] |
maku@mail.ru
|
a3c61fd237bcbaef147e2bc5827ad0b9f91fa6c6
|
e1bd59225ecb84f4141407e2982ce4c8a5b8d99e
|
/src/script1.py
|
0f772ba041c994584eb7485b87cf3f86048ca5bf
|
[] |
no_license
|
ClaraGhabro/UntieNotsRecrutement
|
7176254e3dd4a00805262f1c9ceec64cfaf968a0
|
531b7211d4565935d87e6908b0fd8b30401c33ad
|
refs/heads/master
| 2022-04-10T17:06:34.104806
| 2020-03-29T20:57:53
| 2020-03-29T20:57:58
| 249,480,716
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,144
|
py
|
import json
from kafka import KafkaProducer
import os
CHAR_TO_REMOVE = ['.', ',', '(', ')', '"', "'",'!', '?', ';', ':', '«', '»', '\n']
def clean_line(line):
for c in CHAR_TO_REMOVE:
line = line.replace(c, " ")
return line
def remove_empty(words):
return list(filter(("").__ne__, words))
def read_corpus(dir_path):
data_paths = [os.path.join(pth, f) for pth, dirs, files in os.walk(dir_path) for f in files]
words_dico = {}
for p in data_paths:
f = open(p, "r")
lines = f.readlines()
words_list = []
for line in lines:
line = clean_line(line)
words = remove_empty(line.split(" "))
words_list.append(words)
flat_list = [w for word in words_list for w in word]
words_dico[p] = flat_list
return words_dico
if __name__ == "__main__":
producer = KafkaProducer(bootstrap_servers="localhost:9092")
data = read_corpus("../corpus/")
for file_name, words in data.items():
for w in words:
d = {"source": file_name, "word": w}
producer.send("sendWord", json.dumps(d).encode())
|
[
"clara.ghabro@epita.fr"
] |
clara.ghabro@epita.fr
|
20e1b133e2b412e19e153be019a0bf9f67c3fec2
|
2d227925231be797cc78b644358ecd3adf00fba7
|
/hr/numpy/dot_cross.py
|
515c172ecb47ecd14ec305361361f19f271cc0fd
|
[] |
no_license
|
egalli64/pythonesque
|
6bb107189d4556d832175d41366ea0b18ed6ea1d
|
154042c5ae5cf43a0ae2c03d509fc48d1dc19eb8
|
refs/heads/master
| 2023-07-07T05:50:14.711023
| 2023-07-01T10:52:24
| 2023-07-01T10:52:24
| 53,720,525
| 21
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
"""
HackerRank Python Numpy Dot and Cross
author: Manny egalli64@gmail.com
info: http://thisthread.blogspot.com/
https://www.hackerrank.com/challenges/np-dot-and-cross/problem
Given two NxN arrays, compute their matrix product
"""
import numpy as np
n = int(input())
left = np.array([input().split() for _ in range(n)], int)
right = np.array([input().split() for _ in range(n)], int)
print(np.dot(left, right))
|
[
"egalli64@gmail.com"
] |
egalli64@gmail.com
|
2d17de3711787a66d190699e94ddc0d1a3543ae5
|
d4412a81e17dddda5ed808a6cec83928488e61bd
|
/featuresExtraction.py
|
573c3149ca9e4e1de67937a562a042800915309d
|
[] |
no_license
|
chpplen/basketball
|
7a484aa1001bdadd080ccca049e72bb749f23c20
|
c0d07345cb229eb6fa306d1288954d30f9c30d76
|
refs/heads/master
| 2021-01-22T19:32:07.751280
| 2017-03-16T15:35:51
| 2017-03-16T15:35:51
| 85,212,211
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,844
|
py
|
# encoding: utf-8
import datetime
import csv
import pickle
class featuresExtraction:
def __init__(self):
self.dayLastFight = {}
self.historyAverage = {}
t = datetime.date(2016, 10, 24)
for i in range(30):
self.historyAverage.update({str(i):{"aveGet":0.0,"aveLost":0.0,"count":0}})
self.dayLastFight.update({str(i):t})
def updateAve(self, scoreGet, scoreLost, teamDict):
aveGet = teamDict["aveGet"]
aveLost = teamDict["aveLost"]
count = teamDict["count"]
totalGet = aveGet*count + scoreGet
totalLost = aveLost*count + scoreLost
count += 1
teamDict["count"] = count
teamDict["aveGet"] = totalGet/count
teamDict["aveLost"] = totalLost/count
return teamDict
def isLastdayFight(self, d, team):
today = datetime.datetime.strptime(d, "%Y%m%d").date()
lastFight = self.dayLastFight[team]
diff = (today - lastFight).days
self.dayLastFight[team] = today
if diff == 1:
return True
else:
return False
def scoreFrom70(self, team1Get,team2Lost):
if team1Get > team2Lost:
return (team1Get*2 + team2Lost)/3.0
elif (team1Get + 10) > team2Lost:
return team1Get
else:
return (team1Get + team2Lost)/2.0
def featuresGenerate(self, team1, team2):
x = []
# print team1
team1Index = team1[0]
scoreTeam1 = int(team1[2])
historyTeam1 = self.historyAverage[team1Index]
x.append(historyTeam1["aveGet"])
x.append(historyTeam1["aveLost"])
# x.append(scoreTeam1)
if self.isLastdayFight(team1[1],team1Index):
x.append(1)
else:
x.append(0)
team2Index = team2[0]
scoreTeam2 = int(team2[2])
historyTeam2 = self.historyAverage[team2Index]
x.append(historyTeam2["aveGet"])
x.append(historyTeam2["aveLost"])
# x.append(scoreTeam2)
if self.isLastdayFight(team2[1],team2Index):
x.append(1)
else:
x.append(0)
x.append(self.scoreFrom70(historyTeam1["aveGet"],historyTeam2["aveLost"]))
x.append(self.scoreFrom70(historyTeam2["aveGet"],historyTeam1["aveLost"]))
self.updateAve(scoreTeam1,scoreTeam2,self.historyAverage[team1Index])
self.updateAve(scoreTeam2,scoreTeam1,self.historyAverage[team2Index])
y = 1
if scoreTeam1 > scoreTeam2:
y = 0
return x, y
def extraction(self):
X = []
Y = []
spamreader = csv.reader(open('data/basketballRecord.csv', 'rb'))
index = 0
temp = []
for row in spamreader:
index += 1
if index%2 == 1:
temp = row
else:
try:
x, y = self.featuresGenerate(temp,row)
X.append(x)
Y.append(y)
except Exception:
print Exception
spamwriter = csv.writer(open('data/basketballFeatures.csv', 'wb'))
for i in range(len(Y)):
temp = [Y[i]]
spamwriter.writerow(temp+X[i])
pickle.dump(self, open('model/featuresExtraction.pkl', 'wb'))
|
[
"chpplen@gmail.com"
] |
chpplen@gmail.com
|
b3a9ab3df66920fc9c046b9dbf63ca9b4ca12c9e
|
534b542d9f244c1975b37a5605eb2a6d43a972d6
|
/navi_api/admin.py
|
c6793b04b78f97337a6941110bc4e50b9c954787
|
[] |
no_license
|
techforthepeople/ttp-backend
|
6941f25a136b7f2843692bbfae10c0b8ce512468
|
08a62be9848a1168033d28180cee83e460047a7d
|
refs/heads/master
| 2020-09-02T06:55:29.860151
| 2019-11-08T22:59:38
| 2019-11-08T22:59:38
| 218,837,463
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
from django.contrib import admin
from .models import (
EmtLevel,
EmtProfile,
StatusLog,
)
# Register your models here.
admin.site.register(EmtLevel)
admin.site.register(EmtProfile)
admin.site.register(StatusLog)
|
[
"droza0@users.noreply.github.com"
] |
droza0@users.noreply.github.com
|
f879aabd26c6b0138428de4060582290c0c60f06
|
20bbdaa317e3c4f9088b171fb49fc2021cad4525
|
/project_user_story_one/dict_main.py
|
a685c5ab65336b7fe594b86c997af5d06d6e365f
|
[] |
no_license
|
knarg/United_By_Music_User_Story_One
|
45a96fcc8a696230076891837eff402698eb000b
|
30bf66b4de907339bd7f16882a73f74922149061
|
refs/heads/master
| 2021-10-26T23:39:45.937407
| 2019-04-14T21:49:53
| 2019-04-14T21:49:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
def main():
# This file is just to create my list of questions and I will start with one question
#and then it will be dynamically updated by the user
import json
dict = {1: {"what is good instrument": "flute"}}
with open('my_dict.json', 'w') as f:
json.dump(dict, f)
main()
|
[
"noreply@github.com"
] |
knarg.noreply@github.com
|
a7bd11cdb444a3eb43fbb9a3cd0c661987a8924d
|
d0e792360812b42c34e0e3fed624fcd5df47c013
|
/Blog/migrations/0008_auto_20190809_1858.py
|
9b51695dda257899f8dd40a8111dc937f232aa8c
|
[] |
no_license
|
vlehra/intrepidgeeks
|
5f008a5f490f055dfff1321edd398486f5b1c7c6
|
ea9f5daa06f3617933c685baf0d45ae7381ee0ff
|
refs/heads/master
| 2020-12-04T22:59:16.677518
| 2020-01-05T15:25:57
| 2020-01-05T15:25:57
| 231,927,777
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
# Generated by Django 2.1.5 on 2019-08-09 13:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Blog', '0007_auto_20190809_1851'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='pub_date',
field=models.DateTimeField(auto_now=True),
),
]
|
[
"noreply@github.com"
] |
vlehra.noreply@github.com
|
73186b4781f11432e18fa8c7ac6d96d471f4776a
|
714f1fb452fd11e22255d9fdd844c9773b604c6c
|
/Results-og/write_throughput/write_graph.py
|
1f528c43e2fe422821cef79bfa4a5f6da406e832
|
[] |
no_license
|
jadia/gvisor_analysis
|
1bdc21fb17fdf61d634933d730ee3cb3e2fb1971
|
998c3c490920b520949b630658f26f1110642897
|
refs/heads/master
| 2022-11-07T01:01:09.265937
| 2020-06-18T15:10:00
| 2020-06-18T15:10:00
| 263,347,173
| 0
| 0
| null | 2020-05-12T13:37:58
| 2020-05-12T13:37:57
| null |
UTF-8
|
Python
| false
| false
| 3,062
|
py
|
import numpy as np
import statistics
import sys
import matplotlib.pyplot as plt
import csv
results = {}
# Grab data and put into dictionary
with open(sys.argv[1]) as f:
csv_reader = csv.reader(f, delimiter=',')
for row in csv_reader:
if (row[0] not in results):
results[row[0]] = {}
if (row[1] not in results[row[0]]):
results[row[0]][row[1]] = []
results[row[0]][row[1]].append(float(row[2]))
# Calculate mean throughput for each
def throughput(data, size):
return int(size)/(data *1000000000) # GB/s
averages = {}
for platform in results:
if (platform not in averages):
averages[platform] = {}
for size in results[platform]:
averages[platform][size] = throughput(statistics.mean(results[platform][size]), size)
# Sort keys inorder of size
def sort_keys(mydict):
mylist = []
keylist = sorted(mydict.keys(), key=int)
for key in keylist:
mylist.append(mydict[key])
return mylist
for platform in averages:
averages[platform] = sort_keys(averages[platform])
if (sys.argv[2] == "bar"):
n_groups = 5
print(averages)
# create plot
plt.rc('font', family='serif')
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
fig = plt.figure(figsize=(3.5, 3))
ax = fig.add_subplot(1, 1, 1)
index = np.arange(n_groups)
bar_width = 0.2
opacity = 0.8
plt.rcParams["figure.figsize"] = [3.5,2]
rects1 = plt.bar(index + 0*bar_width, averages['tmpfs_bare'], bar_width,
edgecolor='0.8',
color='0.8',
alpha=opacity,
label='bare')
rects2 = plt.bar(index + 1*bar_width, averages['tmpfs_runc'], bar_width,
edgecolor='0.3',
color='0.3',
alpha=opacity,
label='runc')
rect3 = plt.bar(index + 2*bar_width, averages['tmpfs_runsc_kvm'], bar_width,
alpha=opacity,
edgecolor='0.6',
color='0.6',
label='internal')
rects4 = plt.bar(index + 3*bar_width, averages['vol_tmpfs_kvm'], bar_width,
alpha=opacity,
edgecolor='0.1',
color='0.1',
label='external')
# Add text boxes (userspace_exit)
'''
ax.text(0.43,0.7,'47K',fontsize=10) #tmpfs 4k
ax.text(1.43,2.15,'41K',fontsize=10) #tmpfs 16K
ax.text(2.43,4.15,'28K',fontsize=10) #tmpfs 64K
ax.text(3.43,5.4,'11K',fontsize=10) #tmpfs 256K
ax.text(4.43,9.4,'0.7K',fontsize=10) #tmpfs 1M
ax.text(0.63,0.4,'100K',fontsize=10) #vol 4K
ax.text(1.63,1.43,'100K',fontsize=10) #vol 16K
ax.text(2.63,2.33,'100K',fontsize=10) #vol 64K
ax.text(3.63,3.43,'100K',fontsize=10) #vol 256K
ax.text(4.63,4.0,'100K',fontsize=10) #vol 1MB
'''
plt.xlabel('Size of Write', fontsize=10)
plt.ylabel('Throughput (GB/s)', fontsize=10)
#plt.title('Throughput of Read')
plt.xticks(index + 2*bar_width, ("4KB", "16KB", "64KB", "256KB", "1MB"))
plt.xlim(left=-1*bar_width)
plt.legend(loc = 'upper left', frameon=False, prop={'size':10}, ncol=2)
ax.tick_params(axis=u'both', which=u'both',length=0)
#plt.ylim(top=13)
plt.tight_layout()
plt.savefig('./write_throughput.eps', format='eps', dpi=1000)
plt.show()
|
[
"eyoung8@wisc.edu"
] |
eyoung8@wisc.edu
|
d2caba708b1b7d5cd603cdd367b489d738322716
|
84a0fe2380a0061e9bc86a78b4fb193bf7665bea
|
/emsdjango/urls.py
|
316a4d4a698a8c70ea5bca4396130858b5725eec
|
[] |
no_license
|
RUPAYAN10/employeeManagemntsystem
|
b0bd6f2a7ab360bc4b680dd49f40de616ab3e7ca
|
81bc03d4c90d94d3f011d51cda29a9bbbeb2042a
|
refs/heads/master
| 2023-06-04T09:56:46.788979
| 2021-06-21T20:30:31
| 2021-06-21T20:30:31
| 378,921,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
from django.contrib import admin
from django.urls import path
from ems import views
from django.conf.urls import url
from django.views.static import serve
from django.conf import settings
urlpatterns = [
path('emp', views.emp),
path('', views.emp),
path('show', views.show),
path('edit/<int:id>', views.edit),
path('update/<int:id>', views.update),
path('delete/<int:id>', views.delete),
path('admin/', admin.site.urls),
url(r'^media/(?P<path>.*)$', serve,{'document_root': settings.MEDIA_ROOT}),
url(r'^static/(?P<path>.*)$', serve,{'document_root': settings.STATIC_ROOT}),
]
|
[
"86109345+RUPAYAN10@users.noreply.github.com"
] |
86109345+RUPAYAN10@users.noreply.github.com
|
6cb9680afa00d7490425d67c9b5813e791dd18ba
|
ae4c35cf4b79592153b14f105d197afbfbd6d02d
|
/ReplayService/parse.py
|
e44f1f4660c725f49f261d8ed8065c430e62f633
|
[] |
no_license
|
mheap/riot-hackathon
|
bd7aae03ce1b587005026c0610ece686d88920e2
|
fa073fa9023a1e17cab965675835e59480124596
|
refs/heads/master
| 2020-04-05T06:33:37.158895
| 2018-11-09T21:16:47
| 2018-11-09T21:16:47
| 156,642,045
| 0
| 0
| null | 2018-11-09T07:31:02
| 2018-11-08T02:59:44
|
C#
|
UTF-8
|
Python
| false
| false
| 3,721
|
py
|
import struct
import json
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
ROFL_MAGIC = "RIOT" + chr(0) * 2
class Struct(object):
format = None
extradata = None
@classmethod
def get_extradata(cls, fileobj):
return [None] * len(cls.get_format(fileobj, None))
@classmethod
def get_format(cls, fileobj, extradata):
return cls.format
@classmethod
def read(cls, fh, fileobj, extradata=None):
format = cls.get_format(fileobj, extradata=extradata)
f_str = fh.read(struct.calcsize(format))
res = struct.unpack(format, f_str)
me = cls()
me.unpack_tuple(res, fileobj, extradata)
return me
def unpack_tuple(self, res, fileobj, extradata):
for field_name, field_value in zip(self.fields, res):
custom_func = getattr(self, "unpack_{}".format(field_name), None)
if custom_func is not None:
custom_func(field_name, field_value, fileobj, extradata)
else:
setattr(self, field_name, field_value)
class CompositeStruct(Struct):
@classmethod
def read(cls, fh, fileobj, extradata=None):
self = cls()
for clazz, field in zip(cls.get_format(fileobj), cls.fields):
setattr(self, field, clazz.read(fh, self, extradata=extradata))
return self
class CompositeStructList(Struct):
@classmethod
def read(cls, fh, fileobj, extradata=None):
self = cls()
self.outer = fileobj
self.data = []
for clazz, ed in zip(
cls.get_format(fileobj, extradata=extradata), cls.get_extradata(fileobj)
):
self.data.append(clazz.read(fh, self, extradata=ed))
return self
class RoflHeader(Struct):
format = "6s256sHIIIIII"
fields = [
"magic",
"signature",
"header_len",
"file_len",
"metadata_offset",
"metadata_len",
"payload_header_offset",
"payload_header_len",
"payload_offset",
]
class RoflMetadata(Struct):
fields = ["json"]
@classmethod
def get_format(cls, fileobj, extradata):
return "{}s".format(fileobj.header.metadata_len)
def unpack_json(self, field_name, field_value, fileobj, extradata):
self.json = json.loads(field_value)
self.json["statsJson"] = json.loads(self.json["statsJson"])
return self.json
def as_json(self):
return json.dumps(self.json, indent=4)
class RoflPayloadHeader(Struct):
format = "QIIIIIIH"
fields = [
"game_id",
"game_length",
"keyframe_count",
"chunk_count",
"end_startup_chunk_id",
"start_game_chunk_id",
"keyframe_interval",
"encryption_key_length",
]
def __str__(self):
return (
"<RoflPayloadHeader - game ID: {} - game length: {} - "
+ "keyframe count: {} - chunk count: {}>".format(
self.game_id, self.game_length, self.keyframe_count, self.chunk_count
)
)
class RoflFile(object):
@classmethod
def read(cls, fh):
self = cls()
self.header = RoflHeader.read(fh, self)
if self.header.magic != ROFL_MAGIC:
raise Exception("Decoding error - magic invalid")
self.metadata = RoflMetadata.read(fh, self)
self.payload_header = RoflPayloadHeader.read(fh, self)
return self
def __str__(self):
x = json.loads(self.metadata.as_json())
x['MatchId'] = self.payload_header.game_id
return json.dumps(x)
def process_rofl(rofl_file):
with open(rofl_file, "rb") as f:
return RoflFile.read(f)
|
[
"m@michaelheap.com"
] |
m@michaelheap.com
|
fd8485ee71ed3cb66a763b309e957fc5f125b11e
|
5615f555acea4dba64f7d1b68c0d499982dfd05c
|
/hr_holidays_auto/models/hr_holdays.py
|
269772c9ebd225a0d0cb679187fdc73cd6a2ae23
|
[] |
no_license
|
soulbadguy00/modules
|
9020dd352f32e1756bf447060d55989bd2842e7c
|
79a9b13da9cf0eeb210acbed7c948bdc82962bcf
|
refs/heads/master
| 2023-06-28T20:40:33.352485
| 2021-07-27T20:48:46
| 2021-07-27T20:48:46
| 390,121,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 564
|
py
|
# -*- coding:utf-8 -*-
from odoo import api, fields, models
class HrHolidays(models.Model):
_inherit = 'hr.leave'
def createHolidays(self, employee_id, number_of_days):
type= self.env['hr.holidays.status'].search([('code', '=', 'CONG')], limit=1)
if type:
vals = {
'holidays_type' : 'employee',
'employee_id': employee_id,
'holidays_status_id': type.id,
'number_of_days_temps': number_of_days,
}
self.create(vals)
|
[
"pierrerodolpheagnero@gmail.com"
] |
pierrerodolpheagnero@gmail.com
|
6c0fd52f7332928dc13674944615c189d5b4b3fc
|
930df8fab4c21f5b77d9e8f071f85c772b00653f
|
/mysite/blog/migrations/0003_auto_20180625_1215.py
|
4ddda39d4231f64f21827eee5f712d2322c6f9ca
|
[] |
no_license
|
Newone3/big_project1
|
6182f8aa5b097c196a2ac22b581a87e3014fe38a
|
dd28ad79a85531da3781822e5bc491762412ad18
|
refs/heads/master
| 2020-03-21T13:16:46.513714
| 2018-06-25T13:14:59
| 2018-06-25T13:14:59
| 138,597,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 780
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2018-06-25 12:15
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.utils.timezone
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20180625_1150'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2018, 6, 25, 12, 15, 35, 386541, tzinfo=utc)),
),
migrations.AlterField(
model_name='post',
name='created_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
[
"los.haldos@seznam.cz"
] |
los.haldos@seznam.cz
|
3dce8aafc4f011e58739b49d996a5f30507d623e
|
fd394f07e0d0b1a242d5f20a712f8175c04d48f5
|
/gxformat2/interface.py
|
e113ddf5818174e7327c05821ffb4426a58712d6
|
[
"AFL-3.0",
"CC-BY-2.5",
"AFL-2.1",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hmenager/gxformat2
|
1d6a54104d67897f33c34ce393fb34209b63a552
|
f68e787f218531dc3d106b81b7d296bf1822d125
|
refs/heads/master
| 2020-04-25T09:55:44.158083
| 2018-12-17T08:12:47
| 2018-12-17T09:10:46
| 172,691,358
| 0
| 0
|
NOASSERTION
| 2019-02-26T10:38:53
| 2019-02-26T10:38:53
| null |
UTF-8
|
Python
| false
| false
| 2,684
|
py
|
"""This module contains an interface and implementation describing Galaxy interactions used by gxformat2.
The interface is :class:`ImporterGalaxyInterface` and the default
implementation based on `BioBlend <https://bioblend.readthedocs.io/>`__
is :class:`BioBlendImporterGalaxyInterface`.
"""
import abc
import bioblend
import six
@six.add_metaclass(abc.ABCMeta)
class ImporterGalaxyInterface(object):
"""An abstract interface describing Galaxy operations used by gxformat2.
Specifically containing definitions of operations required to load
workflows into Galaxy.
"""
@abc.abstractmethod
def import_workflow(self, workflow, **kwds):
"""Import a workflow via POST /api/workflows or comparable interface into Galaxy."""
def import_tool(self, tool):
"""Import a new dynamically defined tool.
Not yet implemented in vanilla Galaxy - used only in the cwl branch of Galaxy.
"""
raise NotImplementedError()
class BioBlendImporterGalaxyInterface(object):
"""Implementation of :class:`ImporterGalaxyInterface` using bioblend."""
def __init__(self, **kwds):
"""Build a :class:`bioblend.GalaxyInstance` from supplied arguments."""
url = None
admin_key = None
admin_gi = None
if "admin_gi" in kwds:
admin_gi = kwds["admin_gi"]
elif "gi" in kwds:
admin_gi = kwds["gi"]
elif "url" in kwds and "admin_key" in kwds:
url = kwds["url"]
admin_key = kwds["admin_key"]
if admin_gi is None:
assert url is not None
assert admin_key is not None
admin_gi = bioblend.GalaxyInstance(url=url, key=admin_key)
user_key = None
user_gi = None
if "user_gi" in kwds:
user_gi = kwds["user_gi"]
elif "gi" in kwds:
user_gi = kwds["gi"]
elif "url" in kwds and "user_key" in kwds:
url = kwds["url"]
user_key = kwds["user_key"]
if user_gi is None:
assert url is not None
assert user_key is not None
user_gi = bioblend.GalaxyInstance(url=url, key=user_key)
self._admin_gi = admin_gi
self._user_gi = user_gi
def import_workflow(self, workflow, **kwds):
"""Import Galaxy workflow using instance :class:`bioblend.GalaxyInstance` object."""
return self._user_gi.workflows.import_workflow_json(
workflow,
**kwds
)
def import_tool(self, tool_representation):
"""Import Galaxy tool using instance :class:`bioblend.GalaxyInstance` object."""
raise NotImplementedError()
|
[
"jmchilton@gmail.com"
] |
jmchilton@gmail.com
|
5eaaaf7a891fe628366957175dac812bb10f7455
|
38ddab707ebb9291868338c19f989a5f4c7129ad
|
/剑指offer/17.树的子结构.py
|
4a0d055b7cfbdbe93453b0721c77278ab3c73527
|
[] |
no_license
|
hugechuanqi/Algorithms-and-Data-Structures
|
ae552c407210fa39e2f309ff079b4aca10fa3362
|
4e4f739402b95691f6c91411da26d7d3bfe042b6
|
refs/heads/master
| 2021-06-26T02:26:59.495057
| 2020-11-26T14:06:18
| 2020-11-26T14:06:18
| 174,640,536
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,104
|
py
|
# -*- coding:utf-8 -*-
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class BinaryTree(object):
""" 二叉树结构类
"""
def build_tree(self, List):
""" 构建一棵平衡二叉树,数组必须为排序好地数组,才能使得是平衡二叉树
前提:输入中序遍历,该列表必须满足一棵满二叉树,才能取中间结点为根结点,然后左右子树递归
"""
l=0
r=len(List)-1
if(l>r): # 数组为空
return None
if(l==r): # 数组大小为1
return TreeNode(List[l])
mid = int((l+r)/2)
root=TreeNode(List[mid]) #构造成根结点,然后左右子树递归
root.left=self.build_tree(List[:mid])
root.right=self.build_tree(List[mid+1:])
return root
def PrintFromTopToBottom(self, root): #利用队列的先入先出,将左右孩子结点顺序弹出
""" 从上往下打印二叉树——层序遍历
"""
if not root:
return []
queue = []
result = []
queue.append(root)
while len(queue)>0: #while(len(queue)>0):不知道为什么就错了
node = queue.pop(0)
result.append(node.val)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
return result
class Solution:
def HasSubtree(self, pRoot1, pRoot2):
""" 递归实现:判断二叉树B是否为二叉树A的子结构,首先找到相同根结点
"""
result = False
if pRoot1 != None and pRoot2 != None:
if pRoot1.val == pRoot2.val:
result = self.same(pRoot1, pRoot2)
if not result: #如果根结点不相同,则从树的左右子结点继续寻找
result = self.HasSubtree(pRoot1.left, pRoot2)
if not result:
result = self.HasSubtree(pRoot1.right, pRoot2)
return result
def same(self, pRoot1, pRoot2):
""" 如果根结点相同,则分别判断左右子结点是否相同,直到二叉树B的子节点为空
"""
if pRoot2 == None:
return True
if pRoot1 == None or pRoot1.val != pRoot2.val:
return False
return self.same(pRoot1.left, pRoot2.left) and self.same(pRoot1.right, pRoot2.right)
def HasSubtree2(self, pRoot1, pRoot2):
""" 非递归实现:判断二叉树B是否为二叉树A的子结构,首先找到相同根结点
"""
if not pRoot1 or not pRoot2:
return False
queue = []
queue.append(pRoot1)
while(queue):
node = queue.pop(0)
if node.val==pRoot2.val and self.checkSame(node, pRoot2):
return True
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
return False
def checkSame(self, pRoot1, pRoot2):
""" 如果根结点相同,则分别判断左右子节点是否相同,直到二叉树B的子节点为空
"""
if not pRoot2:
return True
if not pRoot1 or pRoot1.val!=pRoot2.val:
return False
return self.checkSame(pRoot1.left, pRoot2.left) and self.checkSame(pRoot1.right, pRoot2.right)
if __name__ == "__main__":
## 测试用例
# 输入树结构为:
# 8
# 8 7
# 9 2 '#' '#'
# '#' '#' 4 7 '#' '#' '#' '#'
A = ['#', 9, '#', 8, 4, 2, 7, 8, '#', '#', '#', 7, '#', '#', '#'] # 中序遍历
B = [9, 8, 2]
# B1 = [9,8,3]
BT = BinaryTree()
pRootA = BT.build_tree(A)
pRootB = BT.build_tree(B)
print("二叉树A结构(层序遍历)为:", BT.PrintFromTopToBottom(pRootA))
print("二叉树B结构(层序遍历)为:", BT.PrintFromTopToBottom(pRootB))
a = Solution()
print(a.HasSubtree2(pRootA, pRootB))
|
[
"1498509746@qq.com"
] |
1498509746@qq.com
|
330ffe6ee7fc13129d2f9b1decb0666998494cfd
|
653c30261f06a68e6bd67e9bc220599b223a95de
|
/projecteuler38.py
|
f6cf70229e01d2b3bf6427405d9df41a0fc3c295
|
[] |
no_license
|
michaelcjoseph/ProjectEuler
|
9d08fde0865e28a3a0af650d5b88dec54f215a6c
|
9f1be74ef6a26269c641221875fed7f095c4c175
|
refs/heads/master
| 2016-09-01T17:29:30.757688
| 2015-06-09T02:23:15
| 2015-06-09T02:23:15
| 37,105,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
# Project Euler 38
# Pandigital Multiples
import math
def main():
num = 1
for i in range(1, 10000, 1):
x = ConcatProducts(i)
if x >= num:
if IsPandigital(x):
num = x
print num
def ConcatProducts(n):
concat = str(n)
count = 2
while len(concat) < 9 and count < 10:
concat += str(n*count)
count += 1
return concat
def IsPandigital(n):
if len(str(n)) == 9:
n_digits = []
for i in range(0, len(str(n)), 1):
n_digits.append(int(str(n)[i]))
if 1 in n_digits:
if 2 in n_digits:
if 3 in n_digits:
if 4 in n_digits:
if 5 in n_digits:
if 6 in n_digits:
if 7 in n_digits:
if 8 in n_digits:
if 9 in n_digits:
return True
return False
if __name__ == '__main__':
main()
|
[
"mjoseph.cm@gmail.com"
] |
mjoseph.cm@gmail.com
|
3ca4f12cfde07191d7276f95d1acc1cdf4fc71a4
|
9bd564a1b571158d58ebaf34f0b7a91e268c643e
|
/pureButter_project/pureButter_project/wsgi.py
|
81460ed78aa0639f9ccda280f0fa10b4b5c9bef4
|
[] |
no_license
|
elmasta/Pure_butter
|
82a9c2925e9e29a20ee5855cdcf4d0c67d563080
|
c7fae66dcb712ce7c828f05007689624731b727f
|
refs/heads/master
| 2022-12-09T22:27:26.441347
| 2022-06-14T07:38:57
| 2022-06-14T07:38:57
| 203,661,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
"""
WSGI config for pureButter_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pureButter_project.settings')
application = get_wsgi_application()
|
[
"valentinrobin1@gmail.com"
] |
valentinrobin1@gmail.com
|
4a24d9022a64534cf0afbc6bd6084e4c67d23f43
|
80423e48d7d2f6a92cb57a46d62f160f7c2bb042
|
/OrthDatasetAnalyzer/test/Crab/run.py
|
92cdf3baacdc41ef848c146a45126907666c9a7b
|
[] |
no_license
|
khaosmos93/OrthDataset
|
f73a46151dced5a98d51c415518ea9237fe987f9
|
0539edf1bac93b0b437cc2c00a7c08efb5901ce7
|
refs/heads/master
| 2021-01-01T15:44:07.698904
| 2017-07-20T11:59:03
| 2017-07-20T11:59:03
| 97,363,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,710
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process( "MSAnalyser" )
process.source = cms.Source( "PoolSource",
fileNames = cms.untracked.vstring(
#'file:1CFA8097-8AEA-E611-980F-001E67E6F855.root', #/JetHT/Run2016H-03Feb2017_ver3-v1/MINIAOD
#'file:16F28614-84EA-E611-8083-A0369F310374.root' #SingleMuon
#'/store/data/Run2016H/SingleMuon/MINIAOD/03Feb2017_ver3-v1/80000/52C02EA9-7EEA-E611-BA67-A0000420FE80.root'
),
inputCommands = cms.untracked.vstring(
'keep *'
)
)
import FWCore.PythonUtilities.LumiList as LumiList
#process.source.lumisToProcess = LumiList.LumiList(filename = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions16/13TeV/ReReco/Final/Cert_271036-284044_13TeV_23Sep2016ReReco_Collisions16_JSON_MuonPhys.txt').getVLuminosityBlockRange()
#process.source.lumisToProcess = LumiList.LumiList(filename = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions16/13TeV/ReReco/Final/Cert_271036-284044_13TeV_23Sep2016ReReco_Collisions16_JSON.txt').getVLuminosityBlockRange()
process.source.lumisToProcess = LumiList.LumiList(filename = '../Cert_271036-284044_13TeV_23Sep2016ReReco_Collisions16_JSON.txt').getVLuminosityBlockRange()
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.INFO.limit = 0
process.MessageLogger.cout.threshold = cms.untracked.string('WARNING')
process.MessageLogger.cerr.FwkSummary = cms.untracked.PSet(
reportEvery = cms.untracked.int32(10000),
limit = cms.untracked.int32(10000000)
)
process.MessageLogger.cerr.FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32(10000),
limit = cms.untracked.int32(10000000)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32( -1 )
# input = cms.untracked.int32( 20 )
# input = cms.untracked.int32( 100 )
# input = cms.untracked.int32( 2000 )
# input = cms.untracked.int32( 50000 )
# input = cms.untracked.int32( 100000 )
# input = cms.untracked.int32( 300000 )
)
process.OrthDataset = cms.EDAnalyzer('OrthDatasetAnalyzer',
#Verbose = cms.bool(True),
Verbose = cms.bool(False),
MinMass = cms.double(900),
)
#### Standard Configurations
#process.load('Configuration.StandardSequences.Services_cff')
#process.load('Configuration.StandardSequences.Geometry_cff')
#process.load('Configuration.StandardSequences.Reconstruction_cff')
#process.load('Configuration.StandardSequences.MagneticField_cff')
#
#### conditions
GT='GlobalTagReplace'
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff")
process.GlobalTag.globaltag = GT
#process.GlobalTag.globaltag = '80X_dataRun2_2016LegacyRepro_v3' #Data 80X
#process.GlobalTag.globaltag = '80X_dataRun2_Prompt_v16' #Feb RunHv3
##from Configuration.AlCa.GlobalTag import GlobalTag
##process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('TrackingTools.TransientTrack.TransientTrackBuilder_cfi')
PD = 'PDReplace'
Period = 'PeriodReplayce'
OUTPUT='OrthDatasetTree_'+ PD +'_' + Period +'.root'
process.TFileService = cms.Service("TFileService",
fileName = cms.string(OUTPUT)
)
process.p = cms.Path(process.OrthDataset)
process.options = cms.untracked.PSet(
SkipEvent = cms.untracked.vstring('ProductNotFound'),
numberOfThreads = cms.untracked.uint32(8)
)
|
[
"khaosmos93@gmail.com"
] |
khaosmos93@gmail.com
|
65d36252234af52fc4ad69ff244e41567969bb88
|
1635e722e7ede72f4877671f36bbbc4199abae81
|
/sqp-addons/rml_reports/account/account_print_invoice.py
|
5d6f8e1179befc117b270026e8990c4d530253d1
|
[] |
no_license
|
ecosoft-odoo/sqp
|
7c09617048091ac6de4b25a33ad88127d36de452
|
7a7fc6b88087d98d536dd4ec39f9fb572918090e
|
refs/heads/master
| 2023-08-08T00:07:48.405000
| 2023-08-04T15:47:43
| 2023-08-04T15:47:43
| 40,047,976
| 3
| 9
| null | 2023-08-02T08:38:53
| 2015-08-01T13:48:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,524
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class sqp_account_invoice(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(sqp_account_invoice, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
})
report_sxw.report_sxw(
'report.sqp.account.invoice',
'account.invoice',
'sqp-addons/rml_reports/account/account_print_invoice.rml',
parser=sqp_account_invoice
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"kittiu@gmail.com"
] |
kittiu@gmail.com
|
f02c980a16873b8ad5353fdf70b81d5fd92ddef1
|
a15107a9426fb587b1fff12659012b4fbaed9017
|
/crm/urls.py
|
0202699562bc6348a8a38222e834065999e6dad7
|
[
"MIT"
] |
permissive
|
goplannr-samim/manager-app
|
f93ec74211f2dd5927d9f37c0a14f77bc3a45d61
|
cd5bf7f1fea28d51dea55e48fa69cc461520a878
|
refs/heads/master
| 2022-12-10T13:16:45.788301
| 2019-05-28T11:55:18
| 2019-05-28T11:55:18
| 187,600,786
| 0
| 0
|
MIT
| 2022-12-08T05:11:22
| 2019-05-20T08:34:57
|
CSS
|
UTF-8
|
Python
| false
| false
| 960
|
py
|
from django.contrib.auth import views
from django.urls import include, path
from common.views import handler404, handler500
app_name = 'crm'
urlpatterns = [
path('', include('common.urls', namespace="common")),
path('', include('django.contrib.auth.urls')),
path('m/', include('marketing.urls', namespace="marketing")),
path('accounts/', include('accounts.urls', namespace="accounts")),
path('leads/', include('leads.urls', namespace="leads")),
path('contacts/', include('contacts.urls', namespace="contacts")),
path('opportunities/',
include('opportunity.urls', namespace="opportunities")),
path('cases/', include('cases.urls', namespace="cases")),
path('emails/', include('emails.urls', namespace="emails")),
# path('planner/', include('planner.urls', namespace="planner")),
path('logout/', views.LogoutView, {'next_page': '/login/'}, name="logout"),
]
handler404 = handler404
handler500 = handler500
|
[
"samim@goplannr.com"
] |
samim@goplannr.com
|
377a3ba0170c01a9fbd9fb9264a082be80f76179
|
d9fb009fc72bb1313471981952fa0294f73b0995
|
/books/users/views.py
|
0f7892332956dd074676e587f25b442c6a17bbbc
|
[] |
no_license
|
andreytp/django_for_professionals
|
da1e4d09f5eaffb0c8f3f43773c18fdd3c299789
|
01f8e4ee43913504a37ec142f85d66d31c30be3c
|
refs/heads/master
| 2023-04-26T16:00:58.308284
| 2021-05-18T14:01:24
| 2021-05-18T14:01:24
| 360,420,493
| 0
| 0
| null | 2021-05-18T14:01:25
| 2021-04-22T06:51:36
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 275
|
py
|
from django.urls import reverse_lazy
from django.views import generic
from .forms import CustomUserCreationForm
class SignupPageView(generic.CreateView):
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
template_name = 'signup.html'
|
[
"andreytp@gmail.com"
] |
andreytp@gmail.com
|
33ad41188abda71028d7183b04d01f13cc25d200
|
4cd1427ceec3038dd9b21052dc0f0112c4edc0bf
|
/venetia-build/tls/client.py
|
d6d9b9085e05c72c3d7603880bcda72d9ee4e527
|
[] |
no_license
|
VenetiaIO/venetia-cli
|
f9255f25ab544959c02765b1bbc2673a0b3ac1f4
|
7d572c1917e28752f2cb01bed56ef4910c930e78
|
refs/heads/master
| 2023-08-17T01:13:59.353630
| 2021-10-10T20:39:36
| 2021-10-10T20:39:36
| 292,083,004
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,499
|
py
|
from ctypes import (
cdll
, c_int
, Structure
, c_int32
, c_char_p
, c_uint32
, CFUNCTYPE
, pythonapi
, sizeof
)
import base64
import json
import time
from enum import Enum
from .utils import ClientError, GoString, GoClient
from .utils import Fingerprint, Lib, Response
class Session:
loaded = False
is_listening = False
lib = None
client_id = 0
BROWSER_DICT = dict(chrome=0, firefox=1, ff=1)
@classmethod
def _get_clients(cls):
return
def __del__(self):
if not self.client_id:
return
self.lib.lib.delete_client(self.client_id)
def __init__(self, proxy=None, browser=Fingerprint.CHROME, ja3=None, timeout=20):
self.lib = Lib()
self.client_id, self.fingerprint, self.proxy = self.lib.new_client(proxy, browser, ja3, timeout=timeout)
self.ja3 = ja3
def request(self, method, url, **kw):
return self.lib.request(self.client_id, method, url, **kw)
def get(self, url, **kw):
return self.request(url, "GET", **kw)
def post(self, url, **kw):
return self.request(url, "POST", **kw)
def patch(self, url, **kw):
return self.request(url, "PATCH", **kw)
def delete(self, url, **kw):
return self.request(url, "DELETE", **kw)
def put(self, url, **kw):
return self.request(url, "PUT", **kw)
|
[
"charliebottomley11@gmail.com"
] |
charliebottomley11@gmail.com
|
f958b6a09eca7e9a420cecd152efe5665c7ddde1
|
4162b07bca93cbc52da79cd7216369ffa5fb6853
|
/app/routes.py
|
ad0be74a823e7d9ef763b085aa9fb18b92844718
|
[] |
no_license
|
YasinVeliyev/Mastering_Flask
|
8149226dc8efd07e1d195eddfd71347ed05a32f2
|
39c86d14dcd1c8dabbde2d12f4a745feea75cdf3
|
refs/heads/master
| 2023-04-09T17:21:31.806467
| 2021-04-25T09:11:31
| 2021-04-25T09:11:31
| 361,345,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 84
|
py
|
from app import app
@app.route("/")
def index():
return '<h1>Hello docker</h1>'
|
[
"veliyev.yasin@gmail.com"
] |
veliyev.yasin@gmail.com
|
81b390458f34318f9d2cd513c076734aa66901b6
|
99bab2b11bc4c56428b235579a5865c2efc447be
|
/delete list.py
|
3ef3ea620830d1f7cf8ba30d3ed20f669bfa60e1
|
[] |
no_license
|
Anbumani-Sekar/python-codings
|
dc8ecee19dc916363020f3e8d660f54eb4bcd85e
|
6bae1617716c6148107c71080d9aebbd18c01d9a
|
refs/heads/main
| 2023-08-01T01:26:37.228683
| 2021-09-21T02:20:52
| 2021-09-21T02:20:52
| 408,660,620
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
list=[0,9,8,7,6,6,3,9,4,4,3,3,2,2,1]
r=len(list)
print(r)
t=r-5
print(t)
del list[:]
print("delete the number", list)
|
[
"noreply@github.com"
] |
Anbumani-Sekar.noreply@github.com
|
97852c4d02f44c79de41b50062887c8ef3244111
|
f3b82ef97e6a26d2fb3c2132efc394b4e026affd
|
/keras2/keras72_1_vgg19_cifar.py
|
6a3b9899279ed69c9b249f3b7b4566943941be77
|
[] |
no_license
|
dwg920302/study_tensorflow
|
f50c559486deb5f0087ff430134a23ffa0c13123
|
0075c87484766fa37949e411266d90b7933b45f7
|
refs/heads/main
| 2023-07-20T12:53:31.592629
| 2021-09-02T08:50:50
| 2021-09-02T08:50:50
| 383,744,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,490
|
py
|
# 실습
# 시파10과 시파100으로 모델 만들것
# Trainable = True or False 비교
# FC vs GlobalAvgPool 비교
# 같은 방법으로 Xception, Resnet50, 101, InceptionV3, InceptionResNetV2, DenseNet121, MobileNetV2, NasNetMobile, EfficientNetB0
from tensorflow.keras.applications import VGG19
from tensorflow.keras.layers import Dense, Dropout, Flatten, GlobalAvgPool2D
from tensorflow.keras.models import Sequential
from sklearn.preprocessing import OneHotEncoder, MaxAbsScaler
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.callbacks import EarlyStopping
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
encoder = OneHotEncoder(sparse=False)
y_train = encoder.fit_transform(y_train.reshape(-1, 1))
y_test = encoder.transform(y_test.reshape(-1, 1))
scaler = MaxAbsScaler()
x_train = scaler.fit_transform(x_train.reshape(x_train.shape[0], x_train.shape[1] * x_train.shape[2] * x_train.shape[3])).reshape(
x_train.shape[0], x_train.shape[1], x_train.shape[2], x_train.shape[3])
x_test = scaler.transform(x_test.reshape(x_test.shape[0], x_test.shape[1] * x_test.shape[2] * x_test.shape[3])).reshape(
x_test.shape[0], x_test.shape[1], x_test.shape[2], x_test.shape[3])
def model_1(pre_model):
model = Sequential()
model.add(pre_model)
model.add(Flatten())
model.add(Dropout(3/8))
model.add(Dense(64, activation='relu'))
model.add(Dropout(3/8))
model.add(Dense(10, activation='softmax'))
return model
def model_2(pre_model):
model = Sequential()
model.add(pre_model)
model.add(GlobalAvgPool2D())
model.add(Dropout(3/8))
model.add(Dense(64, activation='relu'))
model.add(Dropout(3/8))
model.add(Dense(10, activation='softmax'))
return model
trainables = [True, False]
model_names = [[model_1, 'Flatten'], [model_2, 'GlobalAvgPool']]
es = EarlyStopping(patience=5, verbose=1, restore_best_weights=True)
for trainable in trainables:
for loop in model_names:
model = loop[0]
bc = loop[1]
pre_model = VGG19(weights='imagenet', include_top=False, input_shape=(32, 32, 3), classifier_activation='softmax')
ad = ''
if trainable == True:
pre_model.trainable = True
ad = 'Trainable'
model = model_1(pre_model)
else:
pre_model.trainable = False
ad = 'Non-Trainable'
model = model_2(pre_model)
model.compile(loss="categorical_crossentropy", optimizer='adam', metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=128, epochs=25, verbose=1, validation_split=1/8, shuffle=True, callbacks=es)
loss = model.evaluate(x_test, y_test)
print('[Condition : ', ad, ' ', bc, ']')
print('loss = ', loss[0])
print('accuracy = ', loss[1])
'''
[Condition : Trainable Flatten ]
loss = 2.302602767944336
accuracy = 0.10000000149011612
[Condition : Trainable GlobalAvgPool ]
loss = 0.7797176837921143
accuracy = 0.7730000019073486
[Condition : Non-Trainable Flatten ]
loss = 1.3100067377090454
accuracy = 0.5450000166893005
[Condition : Non-Trainable GlobalAvgPool ]
loss = 1.3116893768310547
accuracy = 0.5453000068664551
'''
|
[
"dwg920302@gmail.com"
] |
dwg920302@gmail.com
|
c17395dcc9f451821d1cb230236f481b80f0080a
|
2c234a7eeb7609e753fb01cf756ab26caf5bd5a1
|
/env/Scripts/django-admin.py
|
a42f641ff6ec13f2135cb079d8f0a88c3d94d323
|
[] |
no_license
|
AprilDDay/myProject_python
|
747d6865cf52ee0fa6b4274ae6fe1b87f99d473a
|
26f344457d6fe23207a044a4f9e0cf7c96753490
|
refs/heads/main
| 2023-07-18T15:09:17.061943
| 2021-09-07T15:08:35
| 2021-09-07T15:08:35
| 404,008,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 702
|
py
|
#!c:\users\user\python_development\myproject\env\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
[
"april.day@gmail.com"
] |
april.day@gmail.com
|
9b8f412e85abf873517f0758e3b4787fb2a02a24
|
26deb4a36da77b76bb546755e1f7a456066bbab2
|
/examples/NodeBox-Site/blines1.py
|
b7fbfba2fb9030a250eb267c7fb013bf2e177c74
|
[
"MIT"
] |
permissive
|
karstenw/nodebox-pyobjc
|
36cfd441f24b38d47975e642bf6e63b8e65e2246
|
cd648d5ea44b223f999cfa1f7986fa93533f593e
|
refs/heads/master
| 2023-08-03T10:47:42.663701
| 2023-07-24T08:07:55
| 2023-07-24T08:07:55
| 12,832,922
| 9
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
# You'll need the Boids and Cornu libraries.
boids = ximport("boids")
cornu = ximport("cornu")
size(550, 550)
background(0.1, 0.1, 0.0)
nofill()
flock = boids.flock(10, 0, 0, WIDTH, HEIGHT)
n = 70
for i in range(n):
flock.update(shuffled=False)
# Each flying boid is a point.
points = []
for boid in flock:
points.append((boid.x, boid.y))
# Relativise points for Cornu.
for i in range(len(points)):
x, y = points[i]
x /= 1.0 * WIDTH
y /= 1.0 * HEIGHT
points[i] = (x,y)
t = float(i) / n
stroke(0.9, 0.9, 4*t, 0.6*t)
cornu.drawpath(points, tweaks=0)
|
[
"karstenwo@web.de"
] |
karstenwo@web.de
|
ff51a2a7d4fde46093f1f1c019c23339e9806b1a
|
b1519cb8a16631f607a0dd10aa647bf094830387
|
/2018/day3.py
|
8657a2aefac9144fabc99f96ec572af28effe6f8
|
[] |
no_license
|
OpportunV/adventofcode
|
786b83b34f82eaac0125ad2522393cb59ed1e06d
|
4238996cb53c25c26d8840d9fedd0433229a7ff3
|
refs/heads/master
| 2023-01-23T14:38:41.041073
| 2023-01-07T23:22:54
| 2023-01-07T23:22:54
| 227,684,529
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 950
|
py
|
import re
from collections import defaultdict
def part_one(inp):
claims = defaultdict(int)
for ind, x, y, w, h in inp:
for i in range(w):
for j in range(h):
claims[(x + i, y + j)] += 1
return len([i for i in claims.values() if i > 1])
def part_two(inp):
variants = defaultdict(set)
claims = defaultdict(int)
for ind, x, y, w, h in inp:
for i in range(w):
for j in range(h):
claims[(x + i, y + j)] += 1
variants[ind].add((x + i, y + j))
for k, v in variants.items():
if all(map(lambda a: claims[a] == 1, v)):
return k
def main():
with open(r'input\day3.txt') as fin:
inp = fin.read().splitlines()
inp = [tuple(map(int, re.findall(r'\d+', line))) for line in inp]
print(part_one(inp))
print(part_two(inp))
if __name__ == '__main__':
main()
|
[
"RsTGear@gmail.com"
] |
RsTGear@gmail.com
|
09fe1c3df1ed49f792c91915b3fe463f0e398a79
|
c46c41b6fac5b99dff48eaeed66ff6bba7e038fb
|
/Lab/CodeChallenge/main.py
|
5a4c0d58527c5778f55c5ab85499e599a2b75d64
|
[] |
no_license
|
Tidesun/BigDataSummer2018
|
71bf2c4e364dd9bcad41d2eb16dda083cd4aaf4b
|
1809568778737a6a44976824b0fc7d5ab13be5f3
|
refs/heads/master
| 2020-03-22T16:28:20.181997
| 2018-07-27T16:17:29
| 2018-07-27T16:17:29
| 140,329,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
from pyspark import SparkContext
sc=SparkContext()
input=sc.textFile("input.txt").map(lambda x:filter(lambda item:item!='',x.split(" "))).map(lambda x:(x[0],x[1]))
RevInput=input.map(lambda x:(x[1],x[0]))
res=RevInput.join(input).values().collect()
for item in res:
print item[0],item[1]
|
[
"lihaoran9836@gmail.com"
] |
lihaoran9836@gmail.com
|
75d78160410e7efd37098c9720a470a3996749e0
|
e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d
|
/a10sdk/core/interface/interface_ethernet_lldp.py
|
779792505bc0d96a03e8daebbc04320579a4aa85
|
[
"Apache-2.0"
] |
permissive
|
amwelch/a10sdk-python
|
4179565afdc76cdec3601c2715a79479b3225aef
|
3e6d88c65bd1a2bf63917d14be58d782e06814e6
|
refs/heads/master
| 2021-01-20T23:17:07.270210
| 2015-08-13T17:53:23
| 2015-08-13T17:53:23
| 40,673,499
| 0
| 0
| null | 2015-08-13T17:51:35
| 2015-08-13T17:51:34
| null |
UTF-8
|
Python
| false
| false
| 5,406
|
py
|
from a10sdk.common.A10BaseClass import A10BaseClass
class TxDot1Cfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param link_aggregation: {"default": 0, "type": "number", "description": "Interface link aggregation information", "format": "flag"}
:param vlan: {"default": 0, "type": "number", "description": "Interface vlan information", "format": "flag"}
:param tx_dot1_tlvs: {"default": 0, "type": "number", "description": "Interface lldp tx IEEE 802.1 Organizationally specific TLVs configuration", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "tx-dot1-cfg"
self.DeviceProxy = ""
self.link_aggregation = ""
self.vlan = ""
self.tx_dot1_tlvs = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class NotificationCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param notification: {"default": 0, "type": "number", "description": "Interface lldp notification configuration", "format": "flag"}
:param notif_enable: {"default": 0, "type": "number", "description": "Interface lldp notification enable", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "notification-cfg"
self.DeviceProxy = ""
self.notification = ""
self.notif_enable = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class TxTlvsCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param system_capabilities: {"default": 0, "type": "number", "description": "Interface lldp system capabilities", "format": "flag"}
:param system_description: {"default": 0, "type": "number", "description": "Interface lldp system description", "format": "flag"}
:param management_address: {"default": 0, "type": "number", "description": "Interface lldp management address", "format": "flag"}
:param tx_tlvs: {"default": 0, "type": "number", "description": "Interface lldp tx TLVs configuration", "format": "flag"}
:param exclude: {"default": 0, "type": "number", "description": "Configure which TLVs excluded. All basic TLVs will be included by default", "format": "flag"}
:param port_description: {"default": 0, "type": "number", "description": "Interface lldp port description", "format": "flag"}
:param system_name: {"default": 0, "type": "number", "description": "Interface lldp system name", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "tx-tlvs-cfg"
self.DeviceProxy = ""
self.system_capabilities = ""
self.system_description = ""
self.management_address = ""
self.tx_tlvs = ""
self.exclude = ""
self.port_description = ""
self.system_name = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class EnableCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param rx: {"default": 0, "type": "number", "description": "Enable lldp rx", "format": "flag"}
:param tx: {"default": 0, "type": "number", "description": "Enable lldp tx", "format": "flag"}
:param rt_enable: {"default": 0, "type": "number", "description": "Interface lldp enable/disable", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "enable-cfg"
self.DeviceProxy = ""
self.rx = ""
self.tx = ""
self.rt_enable = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Lldp(A10BaseClass):
"""Class Description::
Interface lldp configuration.
Class lldp supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/interface/ethernet/{ifnum}/lldp`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "lldp"
self.a10_url="/axapi/v3/interface/ethernet/{ifnum}/lldp"
self.DeviceProxy = ""
self.tx_dot1_cfg = {}
self.notification_cfg = {}
self.tx_tlvs_cfg = {}
self.enable_cfg = {}
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
[
"doug@parksidesoftware.com"
] |
doug@parksidesoftware.com
|
72aa38144b6802415587b485917195c4518f607e
|
533994be9ac790809db20de95cbeaef2095e9563
|
/Py-practice/0331-1.py
|
47069ae56d407c62ad1f5f472e07e2f32816f50f
|
[] |
no_license
|
kaitlynning/Py-practice
|
29a85784a533cfcabd7dbed0f3d5763f9edd67cf
|
3bada17bbab49b4d1b5d4482adb24b75914c0809
|
refs/heads/master
| 2021-02-12T07:29:55.262943
| 2020-06-23T15:59:14
| 2020-06-23T15:59:14
| 244,573,260
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
def delete_starting_evens(lst):
#While loops 符合條件了才認為是真,才可以執行以下程式碼
#check at least 1 element by len(lst); check 1 element is ood by mod(%)
while (len(lst) > 0 and lst[0] % 2 == 0):
#if both are True, slice off 1 element by lst = lst[1:]
lst = lst[1:]
return lst
print(delete_starting_evens([4, 8, 10, 11, 12, 15]))
print(delete_starting_evens([4, 8, 10]))
'''
#if顧名思義就是如果怎樣,那就怎樣
#while True表示永遠為真,不管是什麼條件都會向下執行
[11, 12, 15]
[]
'''
|
[
"noreply@github.com"
] |
kaitlynning.noreply@github.com
|
98e1ad02ddffabfd2110beaf42a1b334e02b0259
|
c29511d996d1780f68cf4512c2cf05ef3148e833
|
/face.py
|
3f5144a1909ad4071740d6ed02e4edb090e90b70
|
[] |
no_license
|
manav014/face_recognition_live
|
23ef373b82a5c55820e642793a45a37a19759de1
|
5cfdcaf74c2db2678029ec08279be96da6580e38
|
refs/heads/master
| 2020-12-22T03:18:50.643211
| 2020-10-20T12:10:23
| 2020-10-20T12:10:23
| 236,654,775
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,321
|
py
|
import face_recognition
import cv2
import numpy as np
# This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the
# other example, but it includes some basic performance tweaks to make things run a lot faster:
# 1. Process each video frame at 1/4 resolution (though still display it at full resolution)
# 2. Only detect faces in every other frame of video.
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# Load a sample picture and learn how to recognize it.
obama_image = face_recognition.load_image_file("/home/manav/Desktop/pics/v.jpeg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
# Load a second sample picture and learn how to recognize it.
biden_image = face_recognition.load_image_file("/home/manav/Desktop/pics/m.jpeg")
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
# Load a second sample picture and learn how to recognize it.
gul_image = face_recognition.load_image_file("/home/manav/Desktop/pics/g.jpeg")
gul_face_encoding = face_recognition.face_encodings(gul_image)[0]
# Create arrays of known face encodings and their names
known_face_encodings = [
obama_face_encoding,
biden_face_encoding,
gul_face_encoding,
]
known_face_names = [
"vanshita",
"Manav",
"Gul",
]
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# # If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
manav014.noreply@github.com
|
5fa425dcb8d840ca75b7c20735717559a369b325
|
f1cb02057956e12c352a8df4ad935d56cb2426d5
|
/LeetCode/742. Closest Leaf in a Binary Tree/Solution.py
|
44ea63c0c3e48cd0ae96c1c13cf6390f4cbc23c6
|
[] |
no_license
|
nhatsmrt/AlgorithmPractice
|
191a6d816d98342d723e2ab740e9a7ac7beac4ac
|
f27ba208b97ed2d92b4c059848cc60f6b90ce75e
|
refs/heads/master
| 2023-06-10T18:28:45.876046
| 2023-05-26T07:46:42
| 2023-05-26T07:47:10
| 147,932,664
| 15
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,389
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
def is_leaf(node: TreeNode):
return node and not (node.left or node.right)
class Solution:
def findClosestLeaf(self, root: TreeNode, k: int) -> int:
# Time and Space Complexity: O(N)
adj_lists = {}
leaves = set()
self.dfs(root, None, adj_lists, leaves)
in_deque = set([k])
to_check = deque()
to_check.append((k, 0))
while len(to_check) > 0:
node, dist = to_check.popleft()
if node in leaves:
return node
for neighbor in adj_lists[node]:
if neighbor not in in_deque:
in_deque.add(neighbor)
to_check.append((neighbor, dist + 1))
def dfs(self, node: TreeNode, par: TreeNode, adj_lists: dict, leaves: dict):
if is_leaf(node):
leaves.add(node.val)
adj_lists[node.val] = []
if par:
adj_lists[node.val].append(par.val)
if node.left:
adj_lists[node.val].append(node.left.val)
self.dfs(node.left, node, adj_lists, leaves)
if node.right:
adj_lists[node.val].append(node.right.val)
self.dfs(node.right, node, adj_lists, leaves)
|
[
"nhatsmrt@uw.edu"
] |
nhatsmrt@uw.edu
|
35504dd41d6667322da8f129698cb14cb7b0760b
|
c22933fe03ccf42b16c219c0f32cdcc7dacf816b
|
/monday/search/views.py
|
f4ebed02639e90cdd2791924f58fd563f2911334
|
[] |
no_license
|
sabyasachi61roy/monday_combo
|
0a41f91113ced39fe59dbba3506318dec414d4b9
|
ca0a625bc9807d861426b6285ea4ebf139a7da77
|
refs/heads/master
| 2022-11-13T22:20:53.206452
| 2020-06-23T12:56:56
| 2020-06-23T12:56:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,429
|
py
|
from django.shortcuts import render, redirect
from django.views.generic import ListView
from products.models import Combo, Addon
class SearchProductView(ListView):
template_name = "search/view.html"
def get_context_data(self, *args, **kwargs):
context = super(SearchProductView, self).get_context_data(*args, **kwargs)
query = self.request.GET.get('q')
combo = Combo.objects.filter(title__icontains=query)
addon = Addon.objects.filter(name__icontains=query)
context['query'] = query
context['combo'] = combo
context['addon'] = addon
# SearchQuery.objects.create(query=query)
return context
def get_queryset(self, *args, **kwargs):
request = self.request
method_dict = request.GET
query = method_dict.get('q', None) # method_dict['q']
print("q",query)
combo = Combo.objects.filter(title__icontains=query)
addon = Addon.objects.filter(name__icontains=query)
if query is not None:
print("1-c",combo)
if addon.exists():
print("1-a",addon)
return Addon.objects.filter(name__icontains=query)
print("2-c",combo)
return Combo.objects.filter(title__icontains=query)
return redirect("/")
'''
__icontains = field contains this
__iexact = fields is exactly this
'''
|
[
"debopriyo09@outlook.com"
] |
debopriyo09@outlook.com
|
b02584594651c72b4f66f4bc79cb227083074d45
|
54e93c632100af4b88383b12283d7cda248b87d7
|
/test-keras.py
|
871edacdce6452d4f89011890894d7b8e14ced8b
|
[] |
no_license
|
loulidanyl/RESEAU-NEURONES---Projet
|
f40db9aefc26435a497ce1713a1ab865edb7a190
|
c99e4bd7bb5891362bb36031fafb38330c217965
|
refs/heads/master
| 2020-04-08T18:51:13.649115
| 2018-11-29T07:52:30
| 2018-11-29T07:52:30
| 159,627,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 842
|
py
|
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
model.add(Dense(units=64, activation='relu', input_dim=100))
model.add(Dense(units=10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True))
# x_train and y_train are Numpy arrays --just like in the Scikit-Learn API.
model.fit(x_train, y_train, epochs=5, batch_size=32)
model.train_on_batch(x_batch, y_batch)
loss_and_metrics = model.evaluate(x_test, y_test, batch_size=128)
classes = model.predict(x_test, batch_size=128)
|
[
"noreply@github.com"
] |
loulidanyl.noreply@github.com
|
d78a2caec6929491b61eee56c75f8265213f0a5d
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/word-count/d4cadeee34e24f3b87cf23eedaeb8115.py
|
8167678efb593ef506769482683415933331d5d4
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
from collections import Counter
def word_count(a_string):
words = a_string.split()
word_count = Counter()
for word in words:
word_count[word] += 1
return word_count
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
43884a3f9a30ff09938668f864f634fdb797befa
|
613b72e6286a170f304bca088197057f76d1d289
|
/data_structures/trie_applications.py
|
e66679c6af243bf2c02c12529fe34c86dd1e4775
|
[] |
no_license
|
counterjack/Python--ds-algo-more
|
cb532dd30b6f9000f63f63a17eda9d1ed4b6a364
|
ced096d5a38763bf976259798f343b2485ede99e
|
refs/heads/master
| 2021-06-30T02:29:47.117945
| 2020-09-04T05:06:36
| 2020-09-04T05:06:36
| 142,243,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,919
|
py
|
# /bin.python
"""
Refer : https://www.geeksforgeeks.org/trie-insert-and-search/
Strings used essentially as in
1. Search Engines
2. Genome Analytics
3. Data Analytics
4. Mobile Name Searching
5.
"""
class TrieNode(object):
# node that can have maximum 26 childrens { alphabets a-z }
def __init__(self):
self.children = [None]*26
self.is_end_of_word = False
class Trie(object):
def __init__(self):
"""
Init method creating root node
"""
self.root = self.get_node()
def get_node(self):
"""
will return new node
"""
return TrieNode()
def get_index(self, char):
"""
will return index of the given char in alphabet series.
{ a:1, b:2, ... z=26}
"""
return ord(char)-ord('a')
def insert(self, key):
parent_node = self.root
length = len(key)
for item in key:
index = self.get_index(item)
if parent_node.children[index] is None:
# insert a new node at that index
parent_node.children[index] = self.get_node()
parent_node = parent_node.children[index]
# mark leaf node as last node
parent_node.is_end_of_word = True
def search(self, key):
"""
search the given key in tree.
"""
parent_node = self.root
length = len(key)
for item in key:
index = self.get_index(item)
if parent_node.children[index] is None:
return False
parent_node = parent_node.children[index]
return parent_node is not None and parent_node.is_end_of_word
def main():
trie = Trie()
keys = ["the","a","there","anaswe","any",
"by","their"]
output = ["Not present in trie",
"Present in tire"]
insert= [trie.insert(key) for key in keys ]
print("{} ---- {}".format("the",output[trie.search("the")]))
print("{} ---- {}".format("these",output[trie.search("these")]))
print("{} ---- {}".format("their",output[trie.search("their")]))
print("{} ---- {}".format("thaw",output[trie.search("thaw")]))
main()
|
[
"=ankur.agrawal@doctorinsta.com"
] |
=ankur.agrawal@doctorinsta.com
|
4f4872d3605a801de0509b029c87572632fd9cc3
|
eecc2e979ab124f835b8f9e2a11f811b27033299
|
/products/migrations/0005_auto_20201120_1854.py
|
801596aab31ef85e3335820f7c70b269d4ef2831
|
[] |
no_license
|
MonicaVizechi/ludevelops
|
4e682ee45a4e1e6bc4a29df70cdafeaa4876ab75
|
37503e125f31495b27989df6602ab99c81b3f352
|
refs/heads/master
| 2023-01-14T11:22:19.559142
| 2020-11-21T13:04:03
| 2020-11-21T13:04:03
| 313,788,346
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
# Generated by Django 3.1.3 on 2020-11-20 21:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0004_auto_20201118_2335'),
]
operations = [
migrations.AlterField(
model_name='product',
name='status',
field=models.CharField(choices=[('A', 'Active'), ('I', 'Inactive')], max_length=1),
),
migrations.DeleteModel(
name='Status',
),
]
|
[
"jaquelineandradenogueira@gmail.com"
] |
jaquelineandradenogueira@gmail.com
|
e3cd5ff07962fe2a7cd18a157eba7b5404e9f3dc
|
abf7262d573780d90471e9bc64ad25194008e418
|
/shakhandar_davrisheva_midterm.py
|
418cfe8d41f39fa31c4b30aaf741b3a0f1236078
|
[] |
no_license
|
shdrvs/shakhandar_davrisheva_Midterm
|
3320f02e510290b9e19ae72b66e295a080b1806a
|
8cb8914bdeeff472d5a619b13d8544f403db6303
|
refs/heads/master
| 2020-04-07T19:54:19.959226
| 2018-11-22T08:42:16
| 2018-11-22T08:42:16
| 158,667,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,383
|
py
|
# -*- coding: utf-8 -*-
"""Shakhandar_davrisheva_MIDTERM.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1bOkIhLHHl-FPUwBxOSkB3tDwJe3Rizcy
*Hello*, please change the file name as your name_surname_MIDTERM.ipynb and upload to the folder 'DROP MIDTERMS HERE' on
Drive.
ex: atay_ilgun_MIDTERM.ipynb
Also, create all TD related tasks on one TD file and upload it.
ex: atay_ilgun_MIDTERM.tox
"""
"""Describe what an algorithm is.
Algorithms is a stated solution of a problem in order and in simple way .
Explain in what ways algorithmic design is different than the brute-force regular design approach. [50 words]
Brute force means that you will go through all possible solutions . For example, as we talked in lesson aboout a chess game, , the brute force will go through all possible combination of moves, without taking anything in consideration. But when it comes to algorithmic design is method which used to create mathematical processing to solve problems.
In 100 words meditate on how an new approach on design such as one that is algorithmic could affect your work practice.
Even nowadays every fıeld has an impact of algorithmic design, also architecture .In architecture even it has own term as parametric design . It is a process, which is based on algorithmic thinking that help us to the expression of parameters and rules that, together, define, encode and clarify the relationship between design intent and design response. Algorithmic design now goes together with architecture,the main reason to my point of view is an identical design , mostly buildings which are designed by slgorithmic design are outstanding , they became a landmark of that country , for instance Zaha Hadids Heydar Alieyev cultural center in Baku . Now algorithmic design has affected my practice and in the future it will increase its influence on us . It forcing to change even our education system , architecture students will start to learn coding as software engineers maybe . In the futere as in all fields it will be not enough to have one specialization .
Create a python dictionary for your name surname and year of birth.
"""
dict = {'name': 'shakhnandar', 'surname': 'davrisheva', 'birth':'1997'}
dict ['name']
"""Write a conditional so If the first letter of your name is 'a' print 'my initial is a!'"""
if 'shakhandar'[0] == 'a':
print ('my initial is a!')
else:
print ('my initial is s!')
"""Write a conditional so If your surnames letter count is divisable by 3 print 'it is divisible' if not 'not divisible'."""
if len('davrisheva')%3:
print ('it is divisible')
else:
print ('not divisible')
"""Change your year of birth to 2678."""
dict = {'name': 'shakhnandar', 'surname': 'davrisheva', 'birth':'1997'}
if '1997'[0] == '2':
print('wrong')
else :
print('birth date is 2678' )
"""TOUCHDESIGNER TIME.
Create a red box [in geo comp] and make it emit particles with a birth rate of 2500.
"""
"""Import a moving image and a sound file in TD.
Add at least two filters to the image file.
Make 2 parameters of these filters responsive to the audio.
"""
"""Create a torus and modify its x-size with an LFO with the frequency of 0.75."""
"""Create a sphere and create an amorph moving shape using 'noise'."""
"""Create a box and apply an image texture on it."""
|
[
"noreply@github.com"
] |
shdrvs.noreply@github.com
|
091e4dd55804a4b69079d9b7c1066d23f0b9ada6
|
5a38fa0ba35d50d526777428d9447f9f5ff60768
|
/apps/base/auth.py
|
4e9bbbdd16f4036a323026c45f9385af5d867ff3
|
[
"MIT"
] |
permissive
|
sachazyto/nbproject
|
52eae7440a35548317657273a57d5c00e0439b51
|
d3c8c4f345f858ad22dcf890f6c16ff714e4ff45
|
refs/heads/master
| 2020-12-25T12:07:51.132233
| 2012-11-16T02:07:32
| 2012-11-16T02:07:32
| 3,731,961
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,549
|
py
|
"""
utils_auth.py - Authentication and per-user rights-check routines
License
Copyright (c) 2010 Massachusetts Institute of Technology.
MIT License (cf. MIT-LICENSE.txt or http://www.opensource.org/licenses/mit-license.php)
$ Id: $
"""
import models as M
import random, string
def confirmInvite(id):
invite = M.Invite.objects.filter(key=id)
if len(invite) == 0:
return None
invite = invite[0]
membership = M.Membership.objects.filter(user=invite.user_id, ensemble=invite.ensemble_id)
if len(membership) == 0:
membership = M.Membership()
membership.user = invite.user
membership.ensemble = invite.ensemble
membership.admin = invite.admin
membership.save()
return invite
def invite2uid(id):
invite = M.Invite.objects.filter(key=id)
if len(invite) == 0:
return None
return invite[0].user.id
def canReadFile(uid, id_source, req=None):
try:
id_source = int(id_source)
except ValueError:
return False
o = M.Membership.objects.filter(ensemble__in=M.Ensemble.objects.filter(ownership__in=M.Ownership.objects.filter(source__id=id_source, deleted=False))).filter(user__id=uid, deleted=False, guest=False)
return len(o)>0 or canGuestReadFile(uid, id_source, req)
def canDownloadPDF(uid, id_source):
try:
id_source = int(id_source)
except ValueError:
return False
o = M.Membership.objects.filter(ensemble__in=M.Ensemble.objects.filter(ownership__in=M.Ownership.objects.filter(source__id=id_source))).filter(user__id=uid)
return (len(o)>0 and (o[0].admin or o[0].ensemble.allow_download)) or canGuestDownloadPDF(id_source)
def canGuestReadFile(uid, id_source, req=None):
o = M.Ownership.objects.get(source__id=id_source)
e = M.Ensemble.objects.get(pk=o.ensemble_id)
if o.ensemble.allow_guest and len(M.Membership.objects.filter(user__id=uid, ensemble=e))==0:
#add membership for guest user:
m = M.Membership()
m.user_id = uid
m.ensemble_id = e.id
m.guest = True
if e.section_assignment == M.Ensemble.SECTION_ASSGT_RAND:
#assign guest to a random section if there are sections, unless we find a pgid cookie that correponded to a existing section
sections = M.Section.objects.filter(ensemble=e)
if sections:
if req is not None and "pgid" in req.COOKIES:
prev_sections = M.Section.objects.filter(membership__user__id=int(req.COOKIES.get("pgid")), membership__ensemble__id=e.id)
if len(prev_sections):
m.section = prev_sections[0]
if m.section is None:
m.section = random.choice(sections)
m.save()
return o.ensemble.allow_guest
def canGuestDownloadPDF(id_source):
o = M.Ownership.objects.get(source__id=id_source)
return o.ensemble.allow_guest and o.ensemble.allow_download
def getGuest(ckey=None):
if ckey is None:
return createGuest()
o = None
try:
o = M.User.objects.get(confkey=ckey)
except M.User.DoesNotExist:
pass
return o if o is not None else createGuest()
def getCkeyInfo(ckey):
if ckey is None:
return None
o = None
try:
o = M.User.objects.get(confkey=ckey)
except M.User.DoesNotExist:
pass
if o is not None and o.valid is False and o.guest is False:
#first login as a non-guest: mark that user as valid
o.valid = True
o.save()
return o
def canAnnotate(uid, eid):
"""Need to be a either a member of a group or a registered user for a public group """
o = M.Membership.objects.filter(ensemble__id=eid, user__id=uid)
if len(o)>0:
return True
#TODO registered user and public group ?
e = M.Ensemble.objects.get(pk=eid)
if e.allow_guest:
u = M.User.objects.get(pk=uid)
return not u.guest
return False
def addUser(email, password, conf, valid=0, guest=0):
o = M.User()
o.email = email
o.password = password
o.confkey = conf
o.valid = valid
o.guest = guest
o.save()
if o.guest:
gh = M.GuestHistory(user=o)
gh.save()
return o
def addInvite(key, id_user, id_ensemble, admin):
o = M.Invite(key=key, user_id=id_user, ensemble_id=id_ensemble, admin=admin)
o.save()
def createGuest():
key = "".join([ random.choice(string.ascii_letters+string.digits) for i in xrange(0,20)])
email = "guest_%s@nb.test" % (key, )
passwd = "".join([ random.choice(string.ascii_letters+string.digits) for i in xrange(0,4)])
return addUser(email,passwd, key, 0, 1)
def getGuestCkey():
return createGuest().confkey
def user_from_email(email):
users = M.User.objects.filter(email=email)
return users[0] if len(users)==1 else None
def checkUser(email, password):
users = M.User.objects.filter(email=email.strip().lower(), password=password, valid=1, guest=0)
return users[0] if len(users)==1 else None
def canAddFolder(uid, id_ensemble, id_parent=None):
return canInsertFile(uid, id_ensemble, id_parent)
def canInsertFile(uid, eid, id_folder=None):
"""need to be an admin on that membership, and the folder (if not None) needs to be in this membership"""
m = M.Membership.objects.get(ensemble__id=eid, user__id=uid)
if id_folder is None:
return m.admin
else:
f = M.Folder.objects.get(pk=id_folder)
return f.ensemble_id == int(eid) and m.admin
def canRenameFile(uid, id):
"""need to be an admin on the ensemble that contains that file"""
o = M.Ownership.objects.filter(source__id=id)
e = M.Ensemble.objects.filter(ownership__in=o)
m = M.Membership.objects.filter(user__id=uid, ensemble__in=e)
return m.count()>0 and m[0].admin
def canRenameFolder(uid, id):
"""need to be an admin on the ensemble that contains that folder"""
e = M.Folder.objects.get(pk=id).ensemble
m = M.Membership.objects.filter(user__id=uid, ensemble=e)
return m.count()>0 and m[0].admin
def canEditAssignment(uid, id):
return canRenameFile(uid, id)
def canDeleteFile(uid, id):
return canRenameFile(uid, id)
def canDeleteFolder(uid, id):
"""
- Need to be an admin on the ensemble that contains that folder.
- Can't contain any file that's not already deleted
- Can't contain any folder
"""
e = M.Folder.objects.get(pk=id).ensemble
m = M.Membership.objects.filter(user__id=uid, ensemble=e)
o = M.Ownership.objects.filter(deleted=False, folder__id=id)
f = M.Folder.objects.filter(parent__id=id)
return m.count()>0 and m[0].admin and o.count()==0 and f.count()==0
def canMoveFile(uid, id, id_dest=None):
return canRenameFile(uid, id)
def __isDirOrParent(id_a, id_b):
#returns true is a == b or is a is a parent of b
d = M.Folder.objects.get(pk=id_b)
while d.parent_id is not None:
if d.id == id_a:
return True
d = d.parent
return id_a == d.id
def canMoveFolder(uid, id, id_dest):
"""need to be an admin on the ensemble that contains that folder, and folder dest not to be the same or a subfolder of id"""
e = M.Folder.objects.get(pk=id).ensemble
m = M.Membership.objects.filter(user__id=uid, ensemble=e)
return m.count()>0 and m[0].admin and not __isDirOrParent(id_dest, id)
def canUpdateFile(uid, id):
return canRenameFile(uid, id)
def canSendInvite(uid, eid):
"""need to be an admin on that membership"""
m = M.Membership.objects.filter(user__id=uid, ensemble__id=eid)
return m.count() > 0 and m[0].admin
def canEditEnsemble(uid, eid):
return canSendInvite(uid, eid)
def canSeeGrades(uid, eid):
return canSendInvite(uid, eid)
def canGrade(uid, id_source, id_student):
"""Need to be admin on ensemble that contains file and student needs to be a member of that ensemble"""
o = M.Ownership.objects.filter(source__id=id_source)
e = M.Ensemble.objects.filter(ownership__in=o)
m = M.Membership.objects.filter(user__id=uid, ensemble__in=e)
m2 = M.Membership.objects.filter(user__id=id_student, ensemble__in=e)
return m.count()>0 and m[0].admin and m2.count()>0
def isMember(user_id, ensemble_id):
return M.Membership.objects.filter(user__id=user_id, ensemble__id=ensemble_id).count() != 0
def canEdit(uid, id_ann):
#uid need to be comment owner and there need to be no dependent non-deleted comment
o = M.Comment.objects.get(pk=id_ann)
return o.author_id==uid and M.Comment.objects.filter(parent=o, deleted=False).count()==0
def canDelete(uid, id_ann):
return canEdit(uid, id_ann)
def canMarkThread(uid, id_location):
#user needs to be able to read root comment in that location
location = M.Location.objects.get(pk=id_location)
root_comment = M.Comment.objects.get(parent=None, location=location)
if root_comment.author_id == uid:
return True
m = M.Membership.objects.filter(ensemble = location.ensemble, user__id=uid)
return m.count()>0 and (root_comment.type>2 or (m[0].admin and root_comment.type>1))
def log_guest_login(ckey, id_user):
try:
guest = M.User.objects.get(confkey=ckey)
glh = M.GuestLoginHistory(user_id=id_user, guest=guest)
glh.save()
except:
pass
|
[
"sachazyto@gmail.com"
] |
sachazyto@gmail.com
|
a91118efeb4df4a6f72f674758bdc107d05cfd6b
|
983cfdaf18bf550c118488d224396ca2b7833743
|
/pytest_commander/watcher.py
|
090aa8b56c579afed3c709609458dee04702add5
|
[
"MIT"
] |
permissive
|
simplifysupport/pytest_commander
|
1355de9d5d11df22b7fe22735eb875e7c0a63247
|
11681fea458de1761e808684f578e183bddc40ef
|
refs/heads/master
| 2023-07-10T06:08:39.224184
| 2021-08-17T21:47:33
| 2021-08-17T21:47:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,560
|
py
|
"""
Watch for filesystem events in the background. Watchdog is not compatible
with the eventlet concurrency model so this needs to run in a separate OS
thread or process.
"""
import logging
import multiprocessing
import os
import time
import traceback
from typing import Callable
from watchdog import events # type: ignore
from watchdog import observers # type: ignore
LOGGER = logging.getLogger(__name__)
READY = 0xFEED
def watch_filesystem(
root_dir: str, events_queue: multiprocessing.Queue, log_level: int
):
logging.basicConfig(level=log_level)
LOGGER.debug("initiating filesystem watcher")
try:
_watch_filesystem(root_dir, events_queue)
except Exception:
traceback.print_exc()
raise
def _watch_filesystem(root_dir: str, events_queue: multiprocessing.Queue):
event_handler = FileSystemEventHandler(events_queue)
observer = observers.Observer()
observer.schedule(event_handler, root_dir, recursive=True)
observer.start()
events_queue.put(READY)
LOGGER.debug("filesystem watcher is ready")
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
class FileSystemEventHandler(events.FileSystemEventHandler):
"""Handles file system events."""
def __init__(self, events_queue: multiprocessing.Queue):
self._events_queue = events_queue
def on_any_event(self, event: events.FileSystemEvent):
LOGGER.debug("caught filesystem event %s", event)
self._events_queue.put(event)
|
[
"ryan@tokencard.io"
] |
ryan@tokencard.io
|
575ec372fbe7d56fc532a4a23056fea4cccb7595
|
dcfef881ca6c3aee094ffe04ad726b5713f96fbb
|
/TwitterStreamService.py
|
235c3d015fba76c60a109f081a41779d48308cdc
|
[] |
no_license
|
hs2873/cloudandbigdata
|
3d97358bea3b85c78fe1d89987c21c1b712ac156
|
bcb64807bc65d0569382e4fd5c6db99ab896b70a
|
refs/heads/master
| 2023-01-11T19:00:35.702505
| 2016-03-06T02:43:00
| 2016-03-06T02:43:00
| 53,233,960
| 0
| 0
| null | 2022-12-26T20:22:38
| 2016-03-06T02:36:40
|
HTML
|
UTF-8
|
Python
| false
| false
| 890
|
py
|
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
import json
from textwrap import TextWrapper
from tweepy import Stream
from elasticsearch import Elasticsearch
consumer_key="kSCEKl0lVyAWRXpNFRNk8VXpL"
consumer_key_secret="zP4DBYsUbwlnTRbtY8wj5cbKCsl7IEXccv74rjZ6I0OPNWQdgM"
access_token="2601074616-d94JMfuZPthDZW4VIUGTCDKXrJFs9SLVwOjIXsn"
access_token_secret="gilQbRAi75s0K0twpQU3w9z9anDhaMySBPz7ej7NkJtRB"
es=Elasticsearch()
class MyStreamListener(StreamListener):
def on_status(self,status):
es.create(index='idx_tmp',doc_type='twitter_twp',body=status._json)
print status._json
if __name__=='__main__':
listener=MyStreamListener()
auth = OAuthHandler(consumer_key, consumer_key_secret)
auth.set_access_token(access_token,access_token_secret)
stream=Stream(auth,listener)
stream.filter(track=['google'])
|
[
"hs2873@columbia.edu"
] |
hs2873@columbia.edu
|
b24dc0cbe09ff2870788c59b9f1d920daf67045b
|
184398a9bf671af7e26c29255c272ee675cfcf90
|
/revno..py
|
45aef1ad7821c26b04be1d21d9c3022eb4d4c8b6
|
[] |
no_license
|
deepika-jaiswal/hands_on_python
|
09ac6c40302e9b80033786cdb0f3841d47634208
|
6328176b4ff47722d2f469890e310c9cd2ff130b
|
refs/heads/master
| 2020-05-30T11:45:25.510240
| 2019-06-01T09:37:51
| 2019-06-01T09:37:51
| 189,713,185
| 0
| 1
| null | 2019-10-15T12:19:54
| 2019-06-01T09:14:31
|
Python
|
UTF-8
|
Python
| false
| false
| 90
|
py
|
n=int(input())
while(n//10!=0):
print(n%10,end="")
n=n//10
print(n,end="")
|
[
"noreply@github.com"
] |
deepika-jaiswal.noreply@github.com
|
ea96bed0458952f2892aa0495a072e7623b515ac
|
0de7538a16f54dfc5d2d6fbf078af1558d30086d
|
/read_airport_csv.py
|
c8c9ed64e52c87a7ff444b18d76cac9a4de22132
|
[] |
no_license
|
feleHaile/20181127EliLilly
|
c15535a60d66c42998fb5b14e5f3a6f2e13a3fc2
|
27390c5af4e4bd70e0a3a6e338a26c7963f4f8fe
|
refs/heads/master
| 2020-05-15T22:31:38.551113
| 2018-11-30T02:14:25
| 2018-11-30T02:14:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
#!/usr/bin/env python
import csv
with open("DATA/airport_boardings.csv") as airports_in:
rdr = csv.reader(airports_in)
for line in rdr:
print(line[1], line[3])
|
[
"jstrickler@gmail.com"
] |
jstrickler@gmail.com
|
69c992b34cb5d5d16eb50889d30d2965b7a49161
|
2f05b019bda19e27fabdfdcb5a4ddf43fa0e88e6
|
/tests/test_logMonitor.py
|
177fc7487098bbbcedd537a7860a7181e7402a74
|
[] |
no_license
|
Dholness2/http-monitor
|
1845093c59e139c39fbc3a0d0ab74a49e8f0d08b
|
9afc00e44c8d1e4b2ea45f841de615396b1e5373
|
refs/heads/master
| 2021-06-15T10:50:49.316845
| 2020-04-24T20:18:35
| 2020-04-24T20:18:35
| 184,966,881
| 0
| 0
| null | 2021-04-20T19:53:08
| 2019-05-05T02:17:03
|
Python
|
UTF-8
|
Python
| false
| false
| 636
|
py
|
import time
from collections import namedtuple
from queue import PriorityQueue
from unittest.mock import Mock
from src.logmonitor import LogMonitor
def test_run_appends_rows_to_q_as_log():
Log = namedtuple('Log', 'date logList')
reader = [["10.0.0.4", "-", "apache", 1549573860, "GET /api/user HTTP/1.0", 200, 1234]]
test_log = Log(1549573860, reader[0])
test_q = PriorityQueue()
test_q.put(test_log)
mock_window = Mock()
test_monitor = LogMonitor(test_q, mock_window, mock_window)
test_monitor.start()
time.sleep(2)
test_monitor.stop()
mock_window.put_log.assert_called_with(test_log)
|
[
"dholness2@gmail.com"
] |
dholness2@gmail.com
|
c735abfc9abb4a78ccccb4150f9e0570f2a5dd77
|
e82a5480b960abc154025168a27742149ae74de3
|
/Leetcode/Trees/Medium/337_house_robber_3.py
|
63477f1241624124aa42d76be4f5554059b97fb7
|
[] |
no_license
|
harshsodi/DSA
|
8e700f0284f5f3c5559a7e385b82e0a6c96d3363
|
18f82f9b17a287abe3f318118691b62607e61ff9
|
refs/heads/master
| 2021-07-07T23:42:50.750471
| 2020-09-11T03:16:41
| 2020-09-11T03:16:41
| 186,679,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,126
|
py
|
# Runtime: 68 ms, faster than 15.48% of Python online submissions for House Robber III.
# Memory Usage: 19 MB, less than 5.34% of Python online submissions for House Robber III.
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def rob(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.mem = {}
def f(root, loot):
if root == None:
return 0
if self.mem.get((root, loot)):
return self.mem[(root, loot)]
ans = float('-inf')
if loot == True:
ans = max(ans, root.val + f(root.left, False) + f(root.right, False))
ans = max(ans, f(root.left, True) + f(root.right, True))
self.mem[(root, loot)] = ans
return ans
return f(root, True)
|
[
"harshsodi@gmail.com"
] |
harshsodi@gmail.com
|
0b42d70bfd7ad2f97b218aa5602996e030c2b7de
|
32e1987ad11ff1bd8f722a5a80fc52cfe0700427
|
/classes/marker_class.py
|
86a56b73d47c23a31cf48c2b59563c094d1eb902
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
tum-phoenix/drive_ros_marker_detection
|
e9ff6e66c2ecf6ba8abf4284d0f0cc4ded5d98da
|
63ca42b87499d530ab91a0ee812e55faa47ffb14
|
refs/heads/master
| 2020-03-30T16:42:09.214916
| 2018-12-09T10:33:39
| 2018-12-09T10:33:39
| 151,422,131
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,097
|
py
|
# -*- coding: utf-8 -*-
# mapping: id (as int) to sign description (string)
import pickle
# all carolo cup signs currently in use
marker_name_dict = {
0: '10_speed_limit',
1: '20_speed_limit',
2: '30_speed_limit',
3: '40_speed_limit',
4: '50_speed_limit',
5: '60_speed_limit',
6: '70_speed_limit',
7: '80_speed_limit',
8: '90_speed_limit',
9: 'end_10_speed_limit',
10: 'end_20_speed_limit',
11: 'end_30_speed_limit',
12: 'end_40_speed_limit',
13: 'end_50_speed_limit',
14: 'end_60_speed_limit',
15: 'end_70_speed_limit',
16: 'end_80_speed_limit',
17: 'end_90_speed_limit',
18: 'right_arrow',
19: 'left_arrow',
20: 'startline',
21: 'broken_crossing_line',
22: 'continuous_crossing_line',
23: 'left_crossing_turning_line',
24: 'right_crossing_turning_line',
25: 'startline',
26: 'zebra_crossing'
}
with open('marker_name_dict.pkl', 'wb') as f:
pickle.dump(marker_name_dict, f)
|
[
"mykyta.denysov@gmail.com"
] |
mykyta.denysov@gmail.com
|
6ef31234008ebc69344e8230d1d081f560ab3f6b
|
ae5f318b1fbbd6170a231f8ec1fef7bf86261f64
|
/gzip_files.py
|
48eb09c9913b1f44003983e7490f0d107bf54535
|
[] |
no_license
|
elijabesu/cron_gzip
|
42becef3ebf16ed11712ab633d29bef8f0a355d7
|
21f70eba10189f5e0208c81033bddd77906498e3
|
refs/heads/master
| 2022-12-10T06:47:55.022934
| 2020-09-08T07:18:07
| 2020-09-08T07:18:07
| 293,447,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
import gzip
import os
def main():
path = "/var/log"
for file in os.listdir(path):
if not file.endswith(".log"):
continue
paths = get_paths(path, file)
with open(paths[0], "r") as f:
with gzip.open(paths[1], "wt") as fgz:
fgz.writelines(f)
clear_file(paths[0])
def clear_file(path):
open(path, "w").close()
def get_paths(path, file):
paths = list()
paths.append(path + "/" + file)
paths.append(path + "/" + file + ".gz")
return paths
if __name__ == "__main__":
main()
|
[
"ellie@saurich.com"
] |
ellie@saurich.com
|
21737601c03b1aabeef438e22c86804b39eaaf09
|
66cab93c26cc252f412860778131b208c6f120be
|
/parts/newproject/pyramid/events.py
|
577f8138a0ad54caab7368f0ebc609f8275ab753
|
[] |
no_license
|
marcogarzini/Zodiac
|
3332733f6ae8d64924557ff022f44c835aeac0a9
|
06e8ad0c709189dc65a26fb7d6c17a9ee2bc9112
|
refs/heads/master
| 2016-09-11T03:18:12.805299
| 2014-01-17T12:50:03
| 2014-01-17T12:50:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 67
|
py
|
/home/user1/newproject/eggs/pyramid-1.4-py2.7.egg/pyramid/events.py
|
[
"user1@user1-VirtualBox.(none)"
] |
user1@user1-VirtualBox.(none)
|
4ae075cb2f1bf1fd470672c465657dadb8015ffd
|
0fc3aa32601333baf5b18deeb54505b054900972
|
/blog_api/tests/test_posts.py
|
0b72d49e5359ba21c36e55752ac881cb19f8238a
|
[
"MIT"
] |
permissive
|
DenMaslov/fastapi_blog
|
b6dd74e40d7e1393ed0f88887b6b50a8ee92c557
|
4f75e03f8e0bf4946b52f49014d1a15d764f5a32
|
refs/heads/master
| 2023-07-18T23:29:51.893740
| 2021-09-22T08:55:26
| 2021-09-22T08:55:26
| 384,967,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,101
|
py
|
from fastapi.testclient import TestClient
import pytest
from main import app
client = TestClient(app)
@pytest.fixture
def valid_data():
valid_d = {
"userId": 1,
"title": "string",
"body": "string"
}
return valid_d
@pytest.fixture
def invalid_data():
data = {
"userId": "sds",
"title": 12,
"body": "string"
}
return data
@pytest.fixture
def post():
post = {
"id": 1,
"title": "sunt aut facere repellat provident occaecati excepturi optio reprehenderit",
"body": "quia et suscipit\nsuscipit recusandae consequuntur expedita et cum\nreprehenderit molestiae ut ut quas totam\nnostrum rerum est autem sunt rem eveniet architecto",
"author": {
"id": 1,
"name": "Leanne Graham",
"username": "Bret",
"email": "Sincere@april.biz",
"phone": "1-770-736-8031 x56442"
}
}
return post
def test_post_list(post):
response = client.get("/posts")
assert response.status_code == 200
resp = response.json()[0]
assert resp["id"] == post['id']
assert resp["author"] == post['author']
assert resp["title"] == post['title']
def test_create_post(valid_data):
response = client.post("/posts/", json=valid_data)
assert response.status_code == 201
resp = response.json()
assert resp["title"] == valid_data['title']
def test_invalid_creation_post(invalid_data):
response = client.post("/posts/", json=invalid_data)
assert response.status_code == 422
resp = response.json()
assert "author" not in resp
def test_get_detail_post(post):
response = client.get("/posts/1")
assert response.status_code == 200
resp = response.json()
assert resp["id"] == post['id']
assert resp["author"] == post['author']
assert resp["title"] == post['title']
def test_update_post():
data = {"title": "string", "body": "string"}
response = client.put("/posts/3", json=data)
assert response.status_code == 200
resp = response.json()
for key in data.keys():
assert data[key] == resp[key]
|
[
"20denismaslov@gmail.com"
] |
20denismaslov@gmail.com
|
eda641b32b24f0c39682196c6801eb801ca618c0
|
c231ade3d7ce59527090e345cc80e41e1310dd2c
|
/python/eit/kernels/c_kernel.py
|
d7c5c6e72299d244c1bf05e0f36ae79c72dd1f14
|
[] |
no_license
|
jcockayne/bayesian_eit
|
2f3ed8b90a9d345db1d38a62b0b5b37ce62e8a85
|
b64336e5d8a34addc6a963b376a5081e2f16466a
|
refs/heads/master
| 2021-03-24T13:52:39.219926
| 2018-12-18T10:33:46
| 2018-12-18T10:33:46
| 91,784,100
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,226
|
py
|
import numpy as np
from .. import collocate, simulate
from .shared import theta_to_a
def construct_posterior(locations, grid, theta, collocate_args, proposal_dot_mat, debug=False):
a_int, a_bdy, a_sensor, a_x, a_y = theta_to_a(theta,
grid,
proposal_dot_mat
)
assert a_int.shape[0] == grid.interior.shape[0]
assert a_x.shape[0] == grid.interior.shape[0]
assert a_y.shape[0] == grid.interior.shape[0]
augmented_int = np.column_stack([grid.interior, a_int, a_x, a_y])
augmented_bdy = np.column_stack([grid.boundary, a_bdy, np.nan * np.zeros((a_bdy.shape[0], 2))])
augmented_sens = np.column_stack([grid.sensors, a_sensor, np.nan * np.zeros((a_sensor.shape[0], 2))])
mu_mult, Sigma = collocate.collocate_no_obs(
np.asfortranarray(locations),
np.asfortranarray(augmented_int),
np.asfortranarray(augmented_bdy),
np.asfortranarray(augmented_sens),
np.asfortranarray(collocate_args)
)
return mu_mult, Sigma
def phi(grid, theta, likelihood_variance, pattern, data, collocate_args, proposal_dot_mat, bayesian=True, debug=False):
return -collocate.log_likelihood(
np.asfortranarray(grid.interior),
np.asfortranarray(grid.boundary),
np.asfortranarray(grid.sensors),
np.asfortranarray(theta),
np.asfortranarray(proposal_dot_mat),
np.asfortranarray(collocate_args),
np.asfortranarray(pattern.stim_pattern),
np.asfortranarray(pattern.meas_pattern),
np.asfortranarray(data),
likelihood_variance,
bayesian=bayesian,
debug=debug
)
def phi_tempered(grid, theta, likelihood_variance, pattern, data_1, data_2, temp, collocate_args, proposal_dot_mat, bayesian=True, debug=False):
return -collocate.log_likelihood_tempered(
np.asfortranarray(grid.interior),
np.asfortranarray(grid.boundary),
np.asfortranarray(grid.sensors),
np.asfortranarray(theta),
np.asfortranarray(proposal_dot_mat),
np.asfortranarray(collocate_args),
np.asfortranarray(pattern.stim_pattern),
np.asfortranarray(pattern.meas_pattern),
np.asfortranarray(data_1),
np.asfortranarray(data_2),
temp,
likelihood_variance,
bayesian=bayesian,
debug=debug
)
class PCNKernel_C(object):
def __init__(self, beta, prior_mean, sqrt_prior_cov, grid, likelihood_variance, pattern, data, collocate_args, proposal_dot_mat):
self.__beta__ = beta
self.__prior_mean__ = prior_mean
self.__sqrt_prior_cov__ = sqrt_prior_cov
self.__grid__ = grid
self.__likelihood_variance__ = likelihood_variance
self.__pattern__ = pattern
self.__data__ = data
self.collocate_args = collocate_args
self.__proposal_dot_mat__ = proposal_dot_mat
def phi(self, theta, collocate_args=None, bayesian=True, debug=False):
return phi(
self.__grid__,
theta,
self.__likelihood_variance__,
self.__pattern__,
self.__data__,
self.collocate_args if collocate_args is None else collocate_args,
self.__proposal_dot_mat__,
bayesian=bayesian,
debug=debug
)
def get_posterior(self, theta, locations, stim=None):
mu_mult, cov = construct_posterior(
locations,
self.__grid__,
theta,
self.collocate_args,
self.__proposal_dot_mat__
)
if stim is None:
return mu_mult, cov
mu = np.dot(mu_mult, np.r_[
np.zeros(len(self.__grid__.interior_plus_boundary)),
stim
])
return mu, cov
def apply(self, kappa_0, n_iter, n_threads=1, beta=None, bayesian=True):
if len(kappa_0.shape) == 1:
kappa_0 = np.copy(kappa_0[None, :])
return simulate.run_pcn_parallel(
n_iter,
self.__beta__ if beta is None else beta,
np.asfortranarray(kappa_0),
np.asfortranarray(self.__prior_mean__),
np.asfortranarray(self.__sqrt_prior_cov__),
np.asfortranarray(self.__grid__.interior),
np.asfortranarray(self.__grid__.boundary),
np.asfortranarray(self.__grid__.sensors),
np.asfortranarray(self.__proposal_dot_mat__),
np.asfortranarray(self.collocate_args),
np.asfortranarray(self.__pattern__.stim_pattern),
np.asfortranarray(self.__pattern__.meas_pattern),
np.asfortranarray(self.__data__),
self.__likelihood_variance__,
n_threads,
bayesian=bayesian
)
class PCNTemperingKernel_C(object):
def __init__(self, beta, prior_mean, sqrt_prior_cov, grid, likelihood_variance, pattern, data_1, data_2, temp, collocate_args, proposal_dot_mat):
self.__beta__ = beta
self.__prior_mean__ = prior_mean
self.__sqrt_prior_cov__ = sqrt_prior_cov
self.__grid__ = grid
self.__likelihood_variance__ = likelihood_variance
self.__pattern__ = pattern
self.__data_1__ = data_1
self.__data_2__ = data_2
self.__temp__ = temp
self.__collocate_args__ = collocate_args
self.__proposal_dot_mat__ = proposal_dot_mat
def phi(self, theta, bayesian=True, debug=False):
return phi_tempered(
self.__grid__,
theta,
self.__likelihood_variance__,
self.__pattern__,
self.__data_1__,
self.__data_2__,
self.__temp__,
self.__collocate_args__,
self.__proposal_dot_mat__,
bayesian=bayesian,
debug=debug
)
def get_posterior(self, theta, locations):
return construct_posterior(
locations,
self.__grid__,
theta,
self.__collocate_args__,
self.__proposal_dot_mat__
)
def apply(self, kappa_0, n_iter, n_threads=1, beta=None, bayesian=True):
if len(kappa_0.shape) == 1:
kappa_0 = np.copy(kappa_0[None, :])
return simulate.run_pcn_parallel_tempered(
n_iter,
self.__beta__ if beta is None else beta,
np.asfortranarray(kappa_0),
np.asfortranarray(self.__prior_mean__),
np.asfortranarray(self.__sqrt_prior_cov__),
np.asfortranarray(self.__grid__.interior),
np.asfortranarray(self.__grid__.boundary),
np.asfortranarray(self.__grid__.sensors),
np.asfortranarray(self.__proposal_dot_mat__),
np.asfortranarray(self.__collocate_args__),
np.asfortranarray(self.__pattern__.stim_pattern),
np.asfortranarray(self.__pattern__.meas_pattern),
np.asfortranarray(self.__data_1__),
np.asfortranarray(self.__data_2__),
self.__temp__,
self.__likelihood_variance__,
n_threads,
bayesian=bayesian
)
|
[
"benorn@gmail.com"
] |
benorn@gmail.com
|
a47c341782889d88d240edf4eb265e4d86767f98
|
619e29b858647f1bde30f70a0e647840850ce68f
|
/src/kitchenrock_api/serializers/food_recipe.py
|
41242bf640d1b5c9650c5f75fad67811cc926846
|
[] |
no_license
|
thqbop/kitchenrock
|
121dc33111cd768d0fd5b0041616c5abe57b7de1
|
b5c5bd25fb05965621615d09439bf79fa1b8d5e8
|
refs/heads/master
| 2021-08-08T11:46:51.326225
| 2017-11-10T08:47:48
| 2017-11-10T08:47:48
| 103,954,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
from rest_framework import serializers
from kitchenrock_api.models.food_recipe import FoodRecipe
class FoodRecipeSerializer(serializers.ModelSerializer):
class Meta:
model = FoodRecipe
fields = '__all__'
|
[
"thqbop@gmail.com"
] |
thqbop@gmail.com
|
86f6b6f0e810c0e76daee88e4a100df12ea034b4
|
be3bc396b580975970a7f323b91229ed5d4aad1c
|
/dft_workflow/run_slabs/setup_jobs_from_oh/local_methods.py
|
fbef2e2b7ae32cb4fa0f8dcf3fc45e1f49c05147
|
[
"MIT"
] |
permissive
|
raulf2012/PROJ_IrOx_OER
|
813ee91139b45f47acb980d1ebfacdf87c364996
|
b79fc490f598a48e405819bd6a788ca6d4af440e
|
refs/heads/master
| 2023-06-23T22:48:25.695679
| 2023-06-09T22:34:41
| 2023-06-09T22:34:41
| 269,264,743
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,738
|
py
|
# #########################################################
# Local methods for setup_jobs_from_oh
# #########################################################
#| - Import Modules
import os
import sys
import copy
from methods import get_df_coord
#__|
def get_bare_o_from_oh(
compenv=None,
slab_id=None,
active_site=None,
att_num=None,
atoms=None,
):
"""
"""
#| - get_bare_o_from_oh
# #####################################################
compenv_i = compenv
slab_id_i = slab_id
active_site_i = active_site
att_num_i = att_num
# #####################################################
name_i = (compenv_i, slab_id_i, "oh", active_site_i, att_num_i, )
df_coord_i = get_df_coord(
slab_id=None,
bulk_id=None,
mode="post-dft", # 'bulk', 'slab', 'post-dft'
slab=None,
post_dft_name_tuple=name_i,
porous_adjustment=True,
)
row_coord_i = df_coord_i[df_coord_i.element == "H"]
mess_i = "isdjfisdif"
assert row_coord_i.shape[0] == 1, mess_i
row_coord_i = row_coord_i.iloc[0]
h_index_i = row_coord_i.structure_index
nn_info_i = row_coord_i.nn_info
# mess_i = "Should only be 1 *O atom attached to *H here"
# assert(len(nn_info_i)) == 1, mess_i
#| - Reading df_coord with porous_adjustment turned off
if len(nn_info_i) != 1:
name_i = (compenv_i, slab_id_i, "oh", active_site_i, att_num_i, )
df_coord_i = get_df_coord(
slab_id=None,
bulk_id=None,
mode="post-dft", # 'bulk', 'slab', 'post-dft'
slab=None,
post_dft_name_tuple=name_i,
porous_adjustment=False,
)
row_coord_i = df_coord_i[df_coord_i.element == "H"]
mess_i = "isdjfisdif"
assert row_coord_i.shape[0] == 1, mess_i
row_coord_i = row_coord_i.iloc[0]
h_index_i = row_coord_i.structure_index
nn_info_i = row_coord_i.nn_info
mess_i = "Should only be 1 *O atom attached to *H here"
assert(len(nn_info_i)) == 1, mess_i
#__|
nn_info_j = nn_info_i[0]
site_j = nn_info_j["site"]
elem_j = site_j.specie.as_dict()["element"]
mess_i = "Must be an *O atom that *H is attached to"
assert elem_j == "O", mess_i
site_index_j = nn_info_j["site_index"]
# #########################################################
# #########################################################
# #########################################################
# #########################################################
# #########################################################
# atoms = atoms_i
atoms_new = copy.deepcopy(atoms)
# #########################################################
indices_to_remove = [site_index_j, h_index_i]
mask = []
for atom in atoms_new:
if atom.index in indices_to_remove:
mask.append(True)
else:
mask.append(False)
del atoms_new[mask]
atoms_bare = atoms_new
# #########################################################
atoms_new = copy.deepcopy(atoms)
indices_to_remove = [h_index_i, ]
mask = []
for atom in atoms_new:
if atom.index in indices_to_remove:
mask.append(True)
else:
mask.append(False)
del atoms_new[mask]
atoms_O = atoms_new
# #####################################################
out_dict = dict()
# #####################################################
out_dict["atoms_bare"] = atoms_bare
out_dict["atoms_O"] = atoms_O
# #####################################################
return(out_dict)
#__|
|
[
"raulf2012@gmail.com"
] |
raulf2012@gmail.com
|
79d8adb955b793b268bef50550806f80266e7dc5
|
c5c4873b721e5f7b3a1bae9d38d578f40a96aaf5
|
/quantumclient/quantum/v2_0/nvp_qos_queue.py
|
386b8879e0c31998d1bbfc8a96def88d26df51fd
|
[
"Apache-2.0"
] |
permissive
|
yacchin1205/python-quantumclient
|
4bf1bb993d06936a4f006edeb6d402b1631702ba
|
8ed38707b12ae6e77480ae8d8542712d63b7fc70
|
refs/heads/master
| 2020-12-25T08:43:13.265493
| 2013-07-01T19:42:23
| 2013-07-01T19:42:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,878
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Nicira Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from quantumclient.quantum import v2_0 as quantumv20
class ListQoSQueue(quantumv20.ListCommand):
"""List queues that belong to a given tenant."""
resource = 'qos_queue'
log = logging.getLogger(__name__ + '.ListQoSQueue')
list_columns = ['id', 'name', 'min', 'max',
'qos_marking', 'dscp', 'default']
class ShowQoSQueue(quantumv20.ShowCommand):
"""Show information of a given queue."""
resource = 'qos_queue'
log = logging.getLogger(__name__ + '.ShowQoSQueue')
allow_names = True
class CreateQoSQueue(quantumv20.CreateCommand):
"""Create a queue."""
resource = 'qos_queue'
log = logging.getLogger(__name__ + '.CreateQoSQueue')
def add_known_arguments(self, parser):
parser.add_argument(
'name', metavar='NAME',
help='Name of queue')
parser.add_argument(
'--min',
help='min-rate'),
parser.add_argument(
'--max',
help='max-rate'),
parser.add_argument(
'--qos-marking',
help='qos marking untrusted/trusted'),
parser.add_argument(
'--default',
default=False,
help=('If true all ports created with be the size of this queue'
' if queue is not specified')),
parser.add_argument(
'--dscp',
help='Differentiated Services Code Point'),
def args2body(self, parsed_args):
params = {'name': parsed_args.name,
'default': parsed_args.default}
if parsed_args.min:
params['min'] = parsed_args.min
if parsed_args.max:
params['max'] = parsed_args.max
if parsed_args.qos_marking:
params['qos_marking'] = parsed_args.qos_marking
if parsed_args.dscp:
params['dscp'] = parsed_args.dscp
if parsed_args.tenant_id:
params['tenant_id'] = parsed_args.tenant_id
return {'qos_queue': params}
class DeleteQoSQueue(quantumv20.DeleteCommand):
"""Delete a given queue."""
log = logging.getLogger(__name__ + '.DeleteQoSQueue')
resource = 'qos_queue'
allow_names = True
|
[
"arosen@nicira.com"
] |
arosen@nicira.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.