blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0c1f81f9a662f0a04390fb702ff7088ed12059d5
|
292c6f367da29b6980b04516ed7afff6dc0e31d4
|
/08-arrays-and-strings.py
|
53ea6b0c8e486fc2ddeb2209a082eb538d308b12
|
[] |
no_license
|
ChrisCummins/cstp
|
f846e4f444b8e9420fa1e4df220f8d8f2984e87a
|
c7f7bda5688ed1ed6e1f8d19695dc412c8a6c564
|
refs/heads/master
| 2016-09-06T10:28:00.695687
| 2014-09-19T12:58:20
| 2014-09-19T12:58:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,945
|
py
|
#!/usr/bin/env python
#
## Arrays and Strings:
#
# Solutions for the code questions posed in Cracking the Code
# Interview, Chapter 1, page 73.
#
# Exercise 1.1:
#
# Implement an algorithm to determine if a string has all unique
# characters. What if you cannot use additional data structures?
#
# This is a variation on the "count the number of times each character
# appears in a string", except that we only need two store two
# possible values: character present, or character not present. On the
# first occurrence of a character recurring, we can return false.
#
# The solution we've implemented operates in O(n) time, with a best
# case time of O(1) (when string length > 256). It operates with O(1)
# space complexity.
#
def characters_are_unique(string):
# This is a crafty optimisation: since we know the character space
# is 256 (the number of characters in the ASCII character set),
# then by definition any string that is longer than this *must*
# include duplicate characters:
if len(string) > 256:
return False
# We need 256 bytes in order to store our set of character
# occurrences. If we were interested in bit twiddling, we could
# reduce the memory footprint by 7/8 by using an array of bytes of
# using individual bits to represent each character:
characters = [False] * 256
for c in string:
val = ord(c)
if characters[val] == True:
return False
else:
characters[val] = True
return True
# Exercise 1.3:
#
# Given two strings, write a method to decide if one is a
# permutation of the other.
#
# A permutation of a string must contain the exact same characters,
# but may contain them in any order. To check whether one string is a
# permutation of another, we can sort the characters within both
# strings in the same way, and check whether these sorted character
# arrays match. The efficiency of this algorithm will depend on the
# efficiency of the sorting algorithm used, as will the memory
# footprint (depends on whether the strings are sorted in place).
#
# An alternative implementation would be to check if the two strings
# have identical character counts, but this requires a priori
# knowledge of the size of the character sets.
def is_permutation(a, b):
if len(a) != len(b):
return False
# Depending on how efficient this comparison is, we may want to
# skip it.
if a == b:
return True
return sorted(list(a)) == sorted(list(b))
# Exercise 1.4:
#
# Write a method to replace all spaces in a string with '%20'. You
# may assume that the string has sufficient space at the end of
# the string to hold the additional characters, and that you given
# the "true" length of the string.
#
# First off, let's get the obvious way over and done with:
def escape_spaces_regexp(string, strlen):
return string.replace(' ', '%20')
# Of course, this misses the purpose of the question by operating on a
# string, not a character array. Implementing a proper character array
# solution requires two passes, and operates in O(n) time, with O(1)
# space complexity (it operates in place):
def escape_spaces(string, strlen):
# The first pass is to ascertain the number of ' ' characters
# which need escaping, which can then be used to calculate the new
# length of the escaped string.
spaces_count = 0
for c in list(string[:strlen]):
if c == ' ':
spaces_count += 1
new_strlen = strlen + 2 * spaces_count
# Now that we know the new string length, we work from front to
# back, copying original string characters into their new
# positions. If we come across a ' ' character, it is replaced
# with the padded equivalent.
#
# We can make a cheeky optimisation because we know that if the
# escaped string length and the original string length are equal,
# then there are no characters which need escaping, so we don't
# need to do anything.
if new_strlen != strlen:
for i in range(strlen - 1, -1, -1):
new_strlen -= 1
if string[i] == ' ':
string[new_strlen - 2] = '%'
string[new_strlen - 1] = '2'
string[new_strlen] = '0'
new_strlen -= 2
else:
string[new_strlen] = string[i]
return string
if __name__ == "__main__":
# Exercise 1.1
assert characters_are_unique("abcdefg") == True
assert characters_are_unique("abcdefga") == False
# Exercise 1.3
assert is_permutation("abc", "abc") == True
assert is_permutation("abc", "abcd") == False
assert is_permutation("abc", "cab") == True
# Exercise 1.4
assert escape_spaces_regexp("Hello, the World!", 17) == "Hello,%20the%20World!"
assert (''.join(escape_spaces(list("Hello, the World! "), 17)) ==
"Hello,%20the%20World! ")
|
[
"chrisc.101@gmail.com"
] |
chrisc.101@gmail.com
|
f46f1d3c2abf05bf55e3c91b1316976bab1b5f12
|
b3e5931420805b22d1e6b1b9497d876129ff8b23
|
/membership_inference_attack-master/experience_mnist.py
|
df9523b79c9d29826254b94215748407697251a7
|
[] |
no_license
|
MezereonXP/dp-project
|
faa4dac42a23908e884cfe9a04fa9559864725b1
|
0b95681a293aa8965f67a65140b42c1472c2713a
|
refs/heads/master
| 2022-12-07T18:57:36.689574
| 2020-08-24T16:43:07
| 2020-08-24T16:43:07
| 289,939,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,212
|
py
|
import torch
import torch.nn as nn
import torch.optim as optim
from dataloaders import *
from utils import config
import numpy as np
from model import *
from torch.optim import lr_scheduler
from trainer import *
from sklearn.utils import shuffle
from sklearn.metrics import precision_recall_fscore_support, accuracy_score
import lightgbm as lgb
from torchdp import PrivacyEngine
def experience_mnist(config, path, param):
print("START MNIST")
use_cuda = config.general.use_cuda and torch.cuda.is_available()
torch.manual_seed(config.general.seed)
device = torch.device("cuda" if use_cuda else "cpu")
print("START TRAINING TARGET MODEL")
data_train_target = custum_MNIST(True, 0, config, '../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
data_test_target = custum_MNIST(True, 0, config, '../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
criterion = nn.CrossEntropyLoss()
train_loader_target = torch.utils.data.DataLoader(data_train_target, batch_size=config.learning.batch_size, shuffle=True)
test_loader_target = torch.utils.data.DataLoader(data_test_target, batch_size=config.learning.batch_size, shuffle=True)
dataloaders_target = {"train": train_loader_target, "val": test_loader_target}
dataset_sizes_target = {"train": len(data_train_target), "val": len(data_test_target)}
print("TAILLE dataset", dataset_sizes_target)
model_target = Net_mnist().to(device)
optimizer = optim.SGD(model_target.parameters(), lr=config.learning.learning_rate, momentum=config.learning.momentum)
# Add DP noise!
privacy_engine = PrivacyEngine(
model_target,
batch_size=config.learning.batch_size,
sample_size=len(train_loader_target.dataset),
alphas=[1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64)),
noise_multiplier=1.0, # sigma
max_grad_norm=1.0, # Clip per-sample gradients to this norm
)
privacy_engine.attach(optimizer)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=config.learning.decrease_lr_factor, gamma=config.learning.decrease_lr_every)
model_target, best_acc_target, data_test_set, label_test_set, class_test_set = train_model(model_target, criterion, optimizer, exp_lr_scheduler,dataloaders_target,dataset_sizes_target,
num_epochs=config.learning.epochs)
np.save(path + "/res_train_target_"+str(param)+".npy", best_acc_target)
print("START TRAINING SHADOW MODEL")
all_shadow_models = []
all_dataloaders_shadow = []
data_train_set = []
label_train_set = []
class_train_set = []
for num_model_sahdow in range(config.general.number_shadow_model):
criterion = nn.CrossEntropyLoss()
data_train_shadow = custum_MNIST(False, num_model_sahdow, config, '../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
data_test_shadow = custum_MNIST(False, num_model_sahdow, config, '../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
train_loader_shadow = torch.utils.data.DataLoader(data_train_shadow, batch_size=config.learning.batch_size, shuffle=True)
test_loader_shadow = torch.utils.data.DataLoader(data_test_shadow, batch_size=config.learning.batch_size, shuffle=True)
dataloaders_shadow = {"train": train_loader_shadow, "val": test_loader_shadow}
dataset_sizes_shadow = {"train": len(data_train_shadow), "val": len(data_test_shadow)}
print("TAILLE dataset", dataset_sizes_shadow)
model_shadow = Net_mnist().to(device)
optimizer = optim.SGD(model_shadow.parameters(), lr=config.learning.learning_rate, momentum=config.learning.momentum)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=config.learning.decrease_lr_factor, gamma=config.learning.decrease_lr_every)
model_shadow, best_acc_sh, data_train_set_unit, label_train_set_unit, class_train_set_unit = train_model(model_shadow, criterion, optimizer, exp_lr_scheduler,dataloaders_target,dataset_sizes_target,
num_epochs=config.learning.epochs)
data_train_set.append(data_train_set_unit)
label_train_set.append(label_train_set_unit)
class_train_set.append(class_train_set_unit)
np.save(path + "/res_train_shadow_"+str(num_model_sahdow)+"_"+str(param)+".npy", best_acc_sh)
all_shadow_models.append(model_shadow)
all_dataloaders_shadow.append(dataloaders_shadow)
print("START GETTING DATASET ATTACK MODEL")
data_train_set = np.concatenate(data_train_set)
label_train_set = np.concatenate(label_train_set)
class_train_set = np.concatenate(class_train_set)
#data_test_set, label_test_set, class_test_set = get_data_for_final_eval([model_target], [dataloaders_target], device)
#data_train_set, label_train_set, class_train_set = get_data_for_final_eval(all_shadow_models, all_dataloaders_shadow, device)
data_train_set, label_train_set, class_train_set = shuffle(data_train_set, label_train_set, class_train_set, random_state=config.general.seed)
data_test_set, label_test_set, class_test_set = shuffle(data_test_set, label_test_set, class_test_set, random_state=config.general.seed)
print("Taille dataset train", len(label_train_set))
print("Taille dataset test", len(label_test_set))
print("START FITTING ATTACK MODEL")
model = lgb.LGBMClassifier(objective='binary', reg_lambda=config.learning.ml.reg_lambd, n_estimators=config.learning.ml.n_estimators)
model.fit(data_train_set, label_train_set)
y_pred_lgbm = model.predict(data_test_set)
precision_general, recall_general, _, _ = precision_recall_fscore_support(y_pred=y_pred_lgbm, y_true=label_test_set, average = "macro")
accuracy_general = accuracy_score(y_true=label_test_set, y_pred=y_pred_lgbm)
precision_per_class, recall_per_class, accuracy_per_class = [], [], []
for idx_class, classe in enumerate(data_train_target.classes):
all_index_class = np.where(class_test_set == idx_class)
precision, recall, _, _ = precision_recall_fscore_support(y_pred=y_pred_lgbm[all_index_class], y_true=label_test_set[all_index_class], average = "macro")
accuracy = accuracy_score(y_true=label_test_set[all_index_class], y_pred=y_pred_lgbm[all_index_class])
precision_per_class.append(precision)
recall_per_class.append(recall)
accuracy_per_class.append(accuracy)
print("END MNIST")
return (precision_general, recall_general, accuracy_general, precision_per_class, recall_per_class, accuracy_per_class)
|
[
"mezereonxp@gmail.com"
] |
mezereonxp@gmail.com
|
48c9e68734e6c9c358b5cdfb2941ffc5f4e94408
|
ef41d36bd614f2bd0b4e84d30f7a6bd95a996c04
|
/alphaBetaPruning.py
|
d042a7423a9906eb40b6c023300fecc5649392f4
|
[] |
no_license
|
sixou/Four_in_a_Row
|
97e55989e29782fb958202f18a43beabe50ceb8d
|
6b2ec519773aa5584df9823a9a96d93cacf7e2e5
|
refs/heads/main
| 2023-02-04T21:14:52.788318
| 2020-12-28T09:43:56
| 2020-12-28T09:43:56
| 324,967,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,392
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 16 22:44:10 2020
@author: USER
"""
import game
DEPTH=2
def go(gm):
print("In go of game ", gm.board)
if game.isHumTurn(gm):
print("Turn of human")
obj= abmin(gm, DEPTH, game.LOSS-1, game.VICTORY+1)[1]
print("object board: ",obj.board)
return obj
else:
print("Turn of agent")
obj= abmax(gm, DEPTH, game.LOSS-1, game.VICTORY+1)[1]
print("object board: ",obj.board)
return obj
#s = the state (max's turn)
#d = max. depth of search
#a,b = alpha and beta
#returns [v, ns]: v = state s's value. ns = the state after recomended move.
# if s is a terminal state ns=0.
def abmax(gm, d, a, b):
print("now calculate abmax")
print("d=",d)
print("alpha=",a)
print("beta=",b)
if d==0 or game.isFinished(gm):
print("returns ", [game.value(gm), gm])
return [game.value(gm),gm]
v=float("-inf")
ns=game.getNext(gm)
print("next moves:", len(ns), " possible moves ")
bestMove=0
for st in ns:
tmp=abmin(st,d-1,a,b)
if tmp[0]>v:
v=tmp[0]
bestMove=st
if v>=b:
return [v,st]
if v>a:
a=v
return [v,bestMove]
#s = the state (min's turn)
#d = max. depth of search
#a,b = alpha and beta
#returns [v, ns]: v = state s's value. ns = the state after recomended move.
# if s is a terminal state ns=0.
def abmin(gm, d, a, b):
print("now calculate abmin")
print("d=",d)
print("a=",a)
print("b=",b)
if d==0 or game.isFinished(gm):
print("returns ", [game.value(gm), gm])
return [game.value(gm),0]
v=float("inf")
ns=game.getNext(gm)
print("next moves:", len(ns), " possible moves ")
bestMove=0
for st in ns:
tmp = abmax(st, d - 1, a, b)
if tmp[0]<v:
v = tmp[0]
bestMove = st
if v <= a:
return [v,st]
if v < b:
b = v
return [v, bestMove]
'''
s=game.create()
game.makeMove(s,1,1)
print(s)
game.makeMove(s,0,0)
game.makeMove(s,0,1)
game.makeMove(s,0,2)
game.makeMove(s,1,0)
game.makeMove(s,1,1)
game.makeMove(s,1,2)
game.makeMove(s,2,1)
game.makeMove(s,2,0)
game.printState(s)
print(go(s))
'''
|
[
"noreply@github.com"
] |
sixou.noreply@github.com
|
419bdb1f567d2f6601df1bbb329c1740bcfe8433
|
78b0c743a59cd78f4efe819197fb77b9af5e394b
|
/pessoal_quadro/migrations/0048_auto_20190516_0245.py
|
c512b5e8abcf74fb5a3a2c75860715d57a7f6b04
|
[] |
no_license
|
ismaely/sigrh_cpl
|
6c741cdb496385c7a52cdd362ec53b7bef3d53ad
|
2cb5b3f3b083e2d50184b220043bcdfcab04a754
|
refs/heads/master
| 2021-06-24T01:11:21.832832
| 2021-01-13T10:54:50
| 2021-01-13T10:54:50
| 183,949,281
| 5
| 0
| null | 2021-01-13T10:54:51
| 2019-04-28T19:21:06
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 744
|
py
|
# Generated by Django 2.2.1 on 2019-05-16 02:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pessoal_quadro', '0047_auto_20190516_0245'),
]
operations = [
migrations.AlterField(
model_name='despromocao',
name='suspensao',
field=models.CharField(choices=[('3 Mes de Cadeia', '3 Mes de Cadeia'), ('1 Ano de Cadeia', '1 Ano de Cadeia'), ('Prestar serviço de Limpeza', 'Prestar serviço de Limpeza'), ('2 Mes de Cadeia', '2 Mes de Cadeia'), ('Retirada da Patente', 'Retirada da Patente'), ('2 Ano de Cadeia', '2 Ano de Cadeia'), ('outro', 'outro'), ('1 Mes de Cadeia', '1 Mes de Cadeia')], max_length=500),
),
]
|
[
"7ilipe@gmail.com"
] |
7ilipe@gmail.com
|
dcf0096c49621c2910baa817fd7b4e4d6fa6e7bc
|
62be5e4190cc1c239671c72187c8feaf9ab23d90
|
/一致性哈希算法.py
|
567c593943609c8ffed530cb362ca60a0fbc5927
|
[] |
no_license
|
zhangbo2008/Consistent-hashing-By-python-with-explanation
|
bc1aeb45418f2ea7d9f60750b79c4110007cc8fe
|
1d77437c9166b84b46854b99fa400feec834e3e0
|
refs/heads/main
| 2023-04-03T19:39:49.198438
| 2021-04-06T03:03:17
| 2021-04-06T03:03:17
| 355,039,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,817
|
py
|
'''
一致性哈希算法:
'''
# -*- coding: utf-8 -*-
import hashlib
class YHash(object):
def __init__(self, nodes=None, n_number=3):
"""
:param nodes: 所有的节点
:param n_number: 一个节点对应多少个虚拟节点
:return:
"""
self._n_number = n_number # 每一个节点对应多少个虚拟节点,这里默认是3个
self._node_dict = dict() # 用于将虚拟节点的hash值与node的对应关系
self._sort_list = [] # 用于存放所有的虚拟节点的hash值,这里需要保持排序
if nodes:
for node in nodes:
self.add_node(node)
def add_node(self, node):
"""
添加node,首先要根据虚拟节点的数目,创建所有的虚拟节点,并将其与对应的node对应起来
当然还需要将虚拟节点的hash值放到排序的里面
这里在添加了节点之后,需要保持虚拟节点hash值的顺序
:param node:
:return:
"""
for i in range(self._n_number): # 对于每一个节点, 我们添加3个虚拟节点.
node_str = "%s%s" % (node, i) # 所以哈希的子节点就是节点名字后面加上0,1,2
key = self._gen_key(node_str)
self._node_dict[key] = node # 这个是哈希和node的对应表.-------注意这里面使用了3个虚拟节点,他们返回的服务器名字都是node!!!!!!!!!!!!
self._sort_list.append(key) # 这个排序数组在get算法中使用.
self._sort_list.sort()
def remove_node(self, node):
"""
这里一个节点的退出,需要将这个节点的所有的虚拟节点都删除
:param node:
:return:
"""
for i in range(self._n_number):
node_str = "%s%s" % (node, i)
key = self._gen_key(node_str) # 核心操作就是下面2个删除功能就够了.
del self._node_dict[key]
self._sort_list.remove(key)
def get_node(self, key_str):
"""
返回这个字符串应该对应的node,这里先求出字符串的hash值,然后找到第一个小于等于的虚拟节点,然后返回node
如果hash值大于所有的节点,那么用第一个虚拟节点
:param :
:return:
"""
if self._sort_list: # 如果有node
key = self._gen_key(key_str)
for node_key in self._sort_list:
if key <= node_key:
return self._node_dict[node_key] # 根据node_key返回真实服务器地址即可.# 从这里面可以看出虚拟节点的使用效果.不适用虚拟节点之前.的图是这样的
'''
2
1 3
4
4个服务器绕城一圈. 这样比如删除4之后, 那么1 上覆盖的弧长就是2,3的2倍了.
但是使用虚拟节点之后.我们的图变成这样. 比如每个服务器对应虚拟节点是2个.
41
20 21
10 30
40 11
31 比如绕城这样一圈,因为哈斯算法的随机性.所以等分布的.
这时我们删除4这个节点. 那么 40,41 都没了.
所以40那部分的数据给10了.41那部分的给21了.所以还是均衡的.
不会像上面那个都给1. 所以虚拟节点多,是一个好方法!!!!!!!!!!!!!
'''
return self._node_dict[self._sort_list[0]] # 如果大于所有的哈希值,就用第一个就行了.
else:
return None
@staticmethod
def _gen_key(key_str):
"""
通过key,返回当前key的hash值,这里采用md5
:param key:
:return:
"""
input_name = hashlib.md5()
input_name.update(key_str.encode("utf-8"))
return input_name.hexdigest()
fjs = YHash(["127.0.0.1", "192.168.1.1"])
fjs.add_node('127.0.0.2')
fjs.add_node('127.0.0.3')
fjs.add_node('127.0.0.4')
fjs.remove_node('127.0.0.1')
print(fjs.get_node("fjs32121")) # 输入一个字符串表示数据,然后 get_node返回应该用什么服务器来处理这个数据.
print(fjs.get_node("12"))
|
[
"noreply@github.com"
] |
zhangbo2008.noreply@github.com
|
4afad2aa64020a390beb1483101cf70b55493038
|
c678dd2e00277a83fddd89063cecd75179e95063
|
/doc/listings/bnf/ex01.py
|
329861d887665420d0463957afd09a1cf582ea08
|
[] |
no_license
|
fantao963/pybnf
|
d07fc550986346f0ca9ef16f30f578e81b471a5e
|
a59cd3629100927c9e472b0d887ee21c18849bb8
|
refs/heads/master
| 2021-01-16T22:53:56.840718
| 2010-08-16T14:38:17
| 2010-08-16T14:38:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
#!/usr/bin/env python
from bnf import Context
from bnf import Literal
if __name__ == '__main__':
context = Context('filename')
# language ::= "Hello, World!"
language = Literal('Hello, World!')
language.parse(context)
|
[
"hotgloupi@trollteam.com"
] |
hotgloupi@trollteam.com
|
f4fc6e462b660b83289d61c769113ee873b187fb
|
4216d56042a9d92f8396d44022a1198d408d3c67
|
/colordetection.py
|
a22934054e753745560b496dcefe9f342c71c607
|
[
"MIT"
] |
permissive
|
mohamedsayed18/JDE_follow_line
|
c0008efec7e3ec3b65ce50f59ad99214b6bd603e
|
86d5910a6ee7ea81c187a5b81032fecd82a64a4d
|
refs/heads/master
| 2020-12-09T07:58:44.503833
| 2020-01-24T05:14:13
| 2020-01-24T05:14:13
| 233,243,030
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 886
|
py
|
import numpy as np
import cv2
image = cv2.imread(Russian_flag.svg) #load image
# define the lower and upper bound of our color in BGR
lower_red = np.array([0,60,255]) #lower limit
upper_red = np.array([40,0,255]) #upper limit
# this function return image with values of (0 or 1) for each pixel
# 0 if out of range, 255 if in the range
mask = cv2.inrange(image, upper_red, lower_red) # filter the red color only
# Apply bitwise and operation which will output only the filterd color
output = cv2.bitwise_and(image, image, mask = mask)
# show the images
cv2.imshow(output)
cv2.waitKey(0)
# Color detection Tutorials
#https://github.com/atduskgreg/opencv-processing-book/blob/master/book/filters/in_range.md
#https://www.pyimagesearch.com/2014/08/04/opencv-python-color-detection/
#TODO find centre
#https://www.learnopencv.com/find-center-of-blob-centroid-using-opencv-cpp-python/
|
[
"mohamed95.a.s@gmail.com"
] |
mohamed95.a.s@gmail.com
|
10d3e77b82dc4994a6bde5ebe6467d46c720eb53
|
73827a2c82033af0a4a9e39b505bc0ef27599d6f
|
/Chapter 4/bookmarks/account/urls.py
|
37a78e427814fe6af0bc2398fe1547be0c642ecc
|
[] |
no_license
|
tehmeerali786/Django-by-Example-2
|
3c3d34518c021b7bee2454854020c0c20901c00b
|
af2976a0a4d11ba514415c3a4032d8b5bc8618b6
|
refs/heads/master
| 2020-05-01T14:50:04.393973
| 2019-04-06T21:17:14
| 2019-04-06T21:17:14
| 177,530,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,088
|
py
|
from django.urls import path, include
from django.contrib.auth import views as auth_views
from .import views
urlpatterns = [
# post views
# path('login/', views.user_login, name='login'),
path('login/', auth_views.LoginView.as_view(), name='login'),
path('logout/', auth_views.LogoutView.as_view(), name='logout'),
path('', views.dashboard, name='dashboard'),
# Change password urls
path('password_change/', auth_views.PasswordChangeView.as_view(), name='password_change'),
path('password_change/done/', auth_views.PasswordChangeDoneView.as_view(), name='password_change_done'),
# Reset password urls
path('password_reset/', auth_views.PasswordResetView.as_view(), name='password_reset'),
path('password_reset/done/', auth_views.PasswordResetDoneView.as_view(), name='password_reset_done'),
path('reset/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view(), name='password_reset_confirm'),
path('reset/done/', auth_views.PasswordResetCompleteView.as_view(), name='password_reset_complete'),
path('register/', views.register, name='register' ),
]
|
[
"freelanceali786@gmail.com"
] |
freelanceali786@gmail.com
|
3a5f8eb70364316d0ea93d9c69236e97208d4ac5
|
83783b0e157a96dd0e6a4ee5590342918162d6b9
|
/core/admin.py
|
6587634111c1e682ff589f3f38a3ad550a8dbacb
|
[] |
no_license
|
ianAraujj/grupos
|
7a158fc5231139d03d8167c9931c0cbb1938a7fa
|
b8f486b4fedfa688086c36aa4dbb28c7dfeb601d
|
refs/heads/master
| 2022-07-15T18:07:35.695349
| 2020-05-13T20:15:04
| 2020-05-13T20:15:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 483
|
py
|
from django.contrib import admin
from .models import Idioma, Tema, Sobre, Pesquisador, Instituicao, Linha, Publicacao, Premiacao, Projeto, Informacao, Grupo
admin.site.register(Idioma)
admin.site.register(Tema)
admin.site.register(Sobre)
admin.site.register(Pesquisador)
admin.site.register(Instituicao)
admin.site.register(Linha)
admin.site.register(Publicacao)
admin.site.register(Premiacao)
admin.site.register(Projeto)
admin.site.register(Informacao)
admin.site.register(Grupo)
|
[
"gabriel.costa.campos.13@gmail.com"
] |
gabriel.costa.campos.13@gmail.com
|
a1be79a17ca5bb5dfe6898fb7ef7b66d44cb6d7b
|
f8f2536fa873afa43dafe0217faa9134e57c8a1e
|
/aliyun-python-sdk-facebody/aliyunsdkfacebody/request/v20191230/DeleteFaceDbRequest.py
|
c205a9d5028e5151028ee5ea4c11b69a59902eae
|
[
"Apache-2.0"
] |
permissive
|
Sunnywillow/aliyun-openapi-python-sdk
|
40b1b17ca39467e9f8405cb2ca08a85b9befd533
|
6855864a1d46f818d73f5870da0efec2b820baf5
|
refs/heads/master
| 2022-12-04T02:22:27.550198
| 2020-08-20T04:11:34
| 2020-08-20T04:11:34
| 288,944,896
| 1
| 0
|
NOASSERTION
| 2020-08-20T08:04:01
| 2020-08-20T08:04:01
| null |
UTF-8
|
Python
| false
| false
| 1,402
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkfacebody.endpoint import endpoint_data
class DeleteFaceDbRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'facebody', '2019-12-30', 'DeleteFaceDb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Name(self):
return self.get_body_params().get('Name')
def set_Name(self,Name):
self.add_body_params('Name', Name)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
ece1502f78e665a12e5199ee02205d6070c94441
|
176052add237168bc2cf36fc9083db2c27b4a5b5
|
/chimera_visualizations.py
|
256d2b3150c56d94542ae2a85d68ed69182393c1
|
[] |
no_license
|
Buddyboy201/top_pro_pack_scripts
|
15512ed487993904b66a869775f680036fe8c310
|
bea3c768ace4b0ffde4a932083754d3aeb049b7e
|
refs/heads/master
| 2023-07-16T03:24:10.010091
| 2021-09-02T18:54:48
| 2021-09-02T18:54:48
| 402,527,272
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
from pathlib import Path
from subprocess import Popen, PIPE, STDOUT, check_call, call
from threading import Thread
import argparse
def _display_chimera(row, pdb_directory, chimera_path):
#chimera_path = Path(r"C:\Program Files\Chimera 1.15rc\bin\chimera.exe")
chimera_path = Path(chimera_path)
residues = ",".join(row[1].split(";"))
path_to_cmd = Path.cwd() / Path("{}.cmd".format(row[0]))
path_to_pdb = Path(pdb_directory) / Path("{}.pdb".format(row[0]))
with open(path_to_cmd, "w") as file:
file.writelines(["select: {}\n".format(residues), "display: {}\n".format(residues), "focus: {}\n".format(residues)])
if not path_to_pdb.exists(): return Exception("Invalid pdb file path/file already exists")
p = check_call([str(chimera_path), str(path_to_pdb), str(path_to_cmd)], shell=True)
path_to_cmd.unlink()
def display_chimera(conn, sql_id, pdb_directory, chimera_path):
stmt_id = "SELECT pdbname, oldresid FROM cliques WHERE id={}".format(sql_id)
row = list(conn.execute(stmt_id))[0]
t = Thread(target=_display_chimera, args=(row, pdb_directory, chimera_path,))
t.start()
|
[
"aprakriya201@gmail.com"
] |
aprakriya201@gmail.com
|
bdd9ada6e3e25ff8707eaf6d31bc474afc410051
|
5223a4a2d4a093c18f923e9ffaab1429f0bac64e
|
/Musicmp3/urls.py
|
f765cdbdfe1f5a482a3739299e2036f85c8ea7d9
|
[] |
no_license
|
rajeshbhagure/MyMusicMp3
|
2f86128114179c48f284255ff5bd0793f36c5ecc
|
05182cbb25e7ed76d300e52bcea5b199f716300d
|
refs/heads/master
| 2022-06-20T08:29:04.648698
| 2020-05-04T02:41:07
| 2020-05-04T02:41:07
| 261,064,605
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,783
|
py
|
"""Musicmp3 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path
from Musicmp3 import settings
from app import views
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.Index.as_view(),name="index"),
path('upload/',views.Upload.as_view(),name="upload"),
path('view_all/',views.View_all.as_view(),name="view_all"),
path('albums/',views.Albums.as_view(),name="albums"),
path('artists/',views.Artists.as_view(),name="artists"),
path('one_album/',views.One_album.as_view(),name="one_album"),
path('one_song/',views.One_song.as_view(),name="one_song"),
path('one_artist/',views.One_artist.as_view(),name="one_artist"),
path('search/',views.search,name="search"),
path('o_delete/',views.OpenDelete.as_view(),name="o_delete"),
path('delete/',views.Delete.as_view(),name="delete"),
path('a_login/',views.AdminLogin.as_view(),name='a_login'),
path('logincheck/',views.logincheck,name='logincheck'),
path('Logout/',views.Logout,name='Logout')
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,document_root = settings.MEDIA_ROOT)
|
[
"61898509+rajeshbhagure@users.noreply.github.com"
] |
61898509+rajeshbhagure@users.noreply.github.com
|
6a41e0259d6541981609052ac07f75fbaa7d1f87
|
2fb3177fbc779dfff1a588e4bdc92d3525f79124
|
/Mysite/poll/migrations/0001_initial.py
|
6fe414800e68a819fb1b26eabf23855d7fb288e7
|
[] |
no_license
|
AmosChenZixuan/DjangoExercise
|
22ead667cec422366279155d1b4a06e524c6b632
|
56d406fecfb62f0a9828f0c18c071ab9f4b8a7ae
|
refs/heads/main
| 2023-08-21T17:25:23.069241
| 2021-10-15T10:19:46
| 2021-10-15T10:19:46
| 416,677,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,074
|
py
|
# Generated by Django 3.1.7 on 2021-03-23 09:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='poll.question')),
],
),
]
|
[
"czxvvv@foxmail.com"
] |
czxvvv@foxmail.com
|
198be27cf7220ab00377679a79bc9c6081767406
|
dcfcd43b4c00fe897866a1e1d530b0f50ed56116
|
/05-高级数据类型/hm_02_del关键字.py
|
22d4bd7b69452b56a9f994dd9ba59ab8930e05ca
|
[] |
no_license
|
Jeremyljm/new
|
8a93f4422b26272fe4a2b68bf806c40879db2d51
|
8a34245c10923a09b9e1cb178ba7fb5368b01f53
|
refs/heads/master
| 2020-09-22T18:06:57.789590
| 2019-12-02T05:41:10
| 2019-12-02T05:41:10
| 225,294,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
name_list = ["张三", "李四", "王五"]
del name_list[0]
# del 关键字本质上是用来将一个变量从内存中删除的
print(name_list)
|
[
"178747324@qq.com"
] |
178747324@qq.com
|
dfb9caed7c2ed3c5596ed79389df148b00ca4253
|
f80d19d58816c14cf51b6457d017ccb1b01918ea
|
/pythonTests/perlinNoise.py
|
9a76e437829e54f20ce8f4452533a5823e6da75d
|
[] |
no_license
|
piochelepiotr/minecraftClone
|
378a277d88d35ab36f1ef517598800b99355e6c5
|
c4389f5f4f7a8164658e7943050119a0508dcd00
|
refs/heads/master
| 2021-05-11T02:53:44.298323
| 2018-11-03T02:24:20
| 2018-11-03T02:24:20
| 117,897,006
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,107
|
py
|
#! /usr/bin/python3
from PIL import Image
import math
import random
def length(a):
(x, y) = a
return math.sqrt(x*x+y*y)
def scale(a):
(x, y) = a
l = math.sqrt(x*x+y*y)
if l == 0:
return a
return (x/l, y/l)
def vec(a, b):
"""returns vector from a to b"""
(xa, ya) = a
(xb, yb) = b
return (xb-xa,yb-ya)
def dot(a, b):
(xa, ya) = a
(xb, yb) = b
return xa*xb + ya*yb
def fade(x):
return 6*pow(x, 5)-15*pow(x, 4)+10*pow(x,3)
def lerp(t, a, b):
return (1-t)*a + t*b
def noise(x, y, g):
X = int(x)
Y = int(y)
x = x - X
y = y - Y
x = fade(x)
y = fade(y)
gaa = g[X][Y]
gab = g[X][Y+1]
gba = g[X+1][Y]
gbb = g[X+1][Y+1]
paa = (0,0)
pab = (0,0+1)
pba = (0+1,0)
pbb = (0+1,0+1)
p = (x,y)
daa = vec(paa,p)
dab = vec(pab,p)
dba = vec(pba,p)
dbb = vec(pbb,p)
maa = dot(daa,gaa)
mab = dot(dab,gab)
mba = dot(dba,gba)
mbb = dot(dbb,gbb)
v1 = lerp(x, maa, mba)
v2 = lerp(x, mab, mbb)
av = lerp(y, v1, v2)
return (av/2) + 0.5
def randg():
r = random.randint(0,3)
(x,y) = (0,0)
if r == 0:
(x,y) = (1,1)
elif r == 1:
(x,y) = (-1,1)
elif r == 2:
(x,y) = (1,-1)
elif r == 3:
(x,y) = (-1,-1)
#elif r == 4:
# (x,y) = (0,1)
#elif r == 5:
# (x,y) = (0,-1)
#elif r == 6:
# (x,y) = (1,0)
#elif r == 7:
# (x,y) = (-1,0)
else:
print("Erreur")
return (x,y)
def randg2():
return scale((random.randint(-10,10), random.randint(-10,10)))
def main():
c = 6
sizeOne = 16
s = sizeOne*c
g = [[randg() for x in range(c+1)] for i in range(c+1)]
img = Image.new('RGB', (s, s), "black")
t = []
pixels = img.load()
for x in range(s):
for y in range(s):
color = noise(x/sizeOne, y/sizeOne, g)
t.append(color)
print(color)
pixels[x,y] = int(color*255)
print("max: ", max(t))
print("min: ", min(t))
img.save("image.png")
img.show()
main()
|
[
"piotr.wolski@telecom-paristech.fr"
] |
piotr.wolski@telecom-paristech.fr
|
db1a4cfb543246788a5916186c15318d03d8f7bf
|
c0f1400dc19eaa1db79a4d3f8c425f5c6f4a166b
|
/Exam 1/Hi-Ho Cherry-O.py
|
36ba5c3d7f7afb8cbbb1b1f68c1ef20024996b68
|
[
"MIT"
] |
permissive
|
leking6176/PHYS-3211
|
5ecf7cfcf15b9f36bd0da1261d5112b9249c66cc
|
cd3aca75a3c41a57e6525e344d933e6f7c21e3fe
|
refs/heads/master
| 2020-07-08T06:46:56.610320
| 2019-12-11T21:02:10
| 2019-12-11T21:02:10
| 203,597,129
| 0
| 0
|
MIT
| 2019-08-21T14:05:37
| 2019-08-21T14:05:37
| null |
UTF-8
|
Python
| false
| false
| 2,942
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 26 17:55:52 2019
Exam 1 Hi-Ho Cherry-O
@author: Lauren King
"""
import numpy as np
tree=10
basket=0
w=0
q=0
spin=rand.random()*7
print("New Game: Equal Probabilities")
while basket < 10:
if spin <=1:
basket +=1
tree -= 1
spin=rand.random()*7
w+=1
print("1 Cherry")
elif spin <=2:
basket +=2
tree -= 2
spin=rand.random()*7
w+=1
print("2 Cherries")
elif spin <=3:
basket +=3
tree -= 3
spin=rand.random()*7
w+=1
print("3 Cherries")
elif spin <=4:
basket +=4
tree -= 4
spin=rand.random()*7
w+=1
print("4 Cherries")
elif spin <=5:
if basket == 0:
spin=rand.random()*7
elif basket == 1:
basket= 0
tree= 10
spin=rand.random()*7
elif basket >= 2:
basket -=2
tree+=2
spin=rand.random()*7
w+=1
print("Dog")
elif spin <=6:
if basket == 0:
spin=rand.random()*7
elif basket == 1:
basket= 0
tree= 10
spin=rand.random()*7
elif basket >= 2:
basket -=2
tree+=2
spin=rand.random()*7
w+=1
print("Bird")
elif spin <= 7:
basket=0
tree=10
spin=rand.random()*7
w+=1
print("Spilled Basket")
print(w)
tree=10
basket=0
spin=rand.random()*11
print(" ")
print("Old Game: Not Equal Probabilities")
while basket < 10:
if spin <=2:
basket +=1
tree -= 1
spin=rand.random()*11
q+=1
print("1 Cherry")
elif spin <=4:
basket +=2
tree -= 2
spin=rand.random()*11
q+=1
print("2 Cherries")
elif spin <=6:
basket +=3
tree -= 3
spin=rand.random()*11
q+=1
print("3 Cherries")
elif spin <=8:
basket +=4
tree -= 4
spin=rand.random()*11
q+=1
print("4 Cherries")
elif spin <=9:
if basket == 0:
spin=rand.random()*11
elif basket == 1:
basket= 0
tree= 10
spin=rand.random()*11
elif basket >= 2:
basket -=2
tree+=2
spin=rand.random()*11
q+=1
print("Dog")
elif spin <=10:
if basket == 0:
spin=rand.random()*11
elif basket == 1:
basket= 0
tree= 10
spin=rand.random()*11
elif basket >= 2:
basket -=2
tree+=2
spin=rand.random()*11
q+=1
print("Bird")
elif spin <= 11:
basket=0
tree=10
spin=rand.random()*11
q+=1
print("Spilled Basket")
print(q)
|
[
"leking6176@ung.edu"
] |
leking6176@ung.edu
|
e5e3067412f5c4a01bdbed5af97c62c9916d777c
|
2178081be880bff976c63a14a21ad03da3c0ccba
|
/hmparse.py
|
725d7ca44a8b88979db0b69800d67b854c71edb8
|
[] |
no_license
|
Zorro666/HMTOOL
|
05a4a3a5f14afb1494a9cceb27cbd943cfa55f7b
|
c6bb7673fe3b679df9c083aa2b46bfefda55510a
|
refs/heads/master
| 2016-09-03T06:41:09.433119
| 2011-07-30T12:46:07
| 2011-07-30T12:46:07
| 2,098,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,789
|
py
|
#! /usr/bin/python
import xml.etree.ElementTree
import os.path
from hmgamestate import GameStateInfo
# Example file for parsing
#<HM_GameState>
# <net_entities>
# <e AHI="000" C="SpawnPoint" CC="0" F="0x0" N="ResultViewPlayerSpawn_2" NI="1" ORI="0.00,0.00,0.93,0.37" P="" POS="102.4,83.0,40.9" />
# <e AHI="010" C="BasicEntity" CC="1" F="0x0" N="Helicopter_body1" NI="2" ORI="-0.22,-0.07,0.59,0.77" P="BasicEntity2" POS="319.0,55.0,-15.7">
# <c N="Sound_Helicopter" />
# </e>
# </net_entities>
# <net_actors>
# <a A="125" AC="0" BC="100" CI="137" D="0" H="125" IC="1" MA="125" MH="125" N="zorro" NI="129">
# <i NI="135" />
# <b C="100" N="ammo_pack" />
# </a>
# </net_actors>
# <global numNetAct="3" numNetEnt="135" />
#</HM_GameState>
class HMParse():
def __init__(self):
self.serverXML = None
self.clientXMLs = []
self.serverGameStateInfo = GameStateInfo()
self.clientGameStateInfos = []
def clearClientXMLs(self):
self.clientGameStateInfos = []
def loadClientXMLs(self, clientFileNames):
self.clientGameStateInfos = []
for clientFileName in clientFileNames:
self.loadClientXML(clientFileName)
def loadClientXML(self, clientFileName):
print "loadClientXML:", clientFileName
if os.path.isfile(clientFileName) == False:
print "file does't exist:", clientFileName
return
clientXML = xml.etree.ElementTree.parse(clientFileName).getroot()
# "GameState_2011_07_29_15_41_zorro_2_10.16.5.161_31544.xml"
tokens = clientFileName.split("_")
nickname = "Client " + "_".join(tokens[6:-2])
print "Tokens:", tokens
print "File:", clientFileName, "nickname:", nickname
clientXML = xml.etree.ElementTree.parse(clientFileName).getroot()
clientXMLstring = xml.etree.ElementTree.tostring(clientXML)
self.addClientXML(nickname, clientXMLstring)
def addClientXML(self, nickname, clientXMLstring):
print "addClientXML"
clientXML = xml.etree.ElementTree.XML(clientXMLstring)
self.parseClientXML(nickname, clientXML)
def parseClientXML(self, nickname, clientXML):
result = self.parseHMGameStateXML(nickname, clientXML)
if result[0] == False:
print "parseServerXML failed"
return
print "parseClientXML succeeded"
self.clientXMLs.append(clientXML)
clientGameStateInfo = result[1]
self.clientGameStateInfos.append(clientGameStateInfo)
print clientGameStateInfo.output()
def loadServerXML(self, serverFileName):
print "loadServerXML:", serverFileName
if os.path.isfile(serverFileName) == False:
print "file does't exist:", serverFileName
return
serverXML = xml.etree.ElementTree.parse(serverFileName).getroot()
serverXMLstring = xml.etree.ElementTree.tostring(serverXML)
self.setServerXML(serverXMLstring)
def setServerXML(self, serverXMLstring):
print "serServerXML"
self.serverXML = xml.etree.ElementTree.XML(serverXMLstring)
self.parseServerXML()
def parseServerXML(self):
result = self.parseHMGameStateXML("Server", self.serverXML)
if result[0] == False:
print "parseServerXML failed"
self.serverXML = None
return
print "parseServerXML succeeded"
self.serverGameStateInfo = result[1]
print self.serverGameStateInfo.output()
def parseHMGameStateXML(self, nickname, gameStateXML):
gameStateInfo = GameStateInfo()
if gameStateXML.tag != "HM_GameState":
print "ERROR can't find HM_GameState:"+gameStateXML.tag
return [False, gameStateInfo]
success = gameStateInfo.parse(nickname, gameStateXML)
return [success, gameStateInfo]
def compareServerToAllClients(self):
clientsDifferent = []
for clientGameStateInfo in self.clientGameStateInfos:
if self.compareServerToClient(clientGameStateInfo) == False:
clientsDifferent.append(clientGameStateInfo.nickname)
if len(clientsDifferent) > 0:
return [False, clientsDifferent]
return [True, clientsDifferent]
def compareServerToClient(self, clientGameStateInfo):
res = self.serverGameStateInfo.compare(clientGameStateInfo)
print res[1]
return res[0]
def compareServerToClientByNickname(self, nickname):
for clientGameStateInfo in self.clientGameStateInfos:
if clientGameStateInfo.nickname == nickname:
res = self.serverGameStateInfo.compare(clientGameStateInfo)
return res
print "ERRRO: Client nickname:", nickname, " not found"
return [False, ""]
def runTest():
this = HMParse()
this.loadServerXML("Jake.xml")
this.loadServerXML("GameState_2011_07_29_15_41_Server_localhost_31415.xml")
this.loadClientXMLs(["GameState_2011_07_29_15_41_zorro_2_10.16.5.161_31544.xml"])
print ""
print "############# Comparing server to all clients #####################"
print ""
res = this.compareServerToAllClients()
if res[0] == False:
print "Server different to clients:", res[1]
else:
print "Server identical to clients"
if __name__ == '__main__':
runTest()
|
[
"jake@evansturner.co.uk"
] |
jake@evansturner.co.uk
|
08dfd545561dcc856b61ad326c14fe994437f260
|
bd5582a6316f18615391ce495511fa7d72eff9e4
|
/SMS_2017/main/migrations/0001_initial.py
|
896a4cf0138d718d53b99e8e2fb725a9e3c11627
|
[] |
no_license
|
anirudhk686/SMS_2017
|
026e7d2451a73c7ca11bf0134c1cea3d8ed8e313
|
4bb2a98a65373a196f86796631cb8d8750e16fe3
|
refs/heads/master
| 2021-01-15T10:41:57.869485
| 2017-08-07T15:23:23
| 2017-08-07T15:23:23
| 99,592,276
| 1
| 0
| null | 2017-08-07T15:24:45
| 2017-08-07T15:24:45
| null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-21 11:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Teaminfo',
fields=[
('team_no', models.PositiveSmallIntegerField(primary_key=True, serialize=False)),
('password', models.PositiveSmallIntegerField()),
('member1', models.CharField(max_length=8)),
('member2', models.CharField(default='NONE', max_length=8)),
('money', models.PositiveIntegerField(default=1000000)),
],
),
]
|
[
"anirudhk686@gmail.com"
] |
anirudhk686@gmail.com
|
a26e3688d0b024a6b2a0c96c2de2b79ce13fe12c
|
5d1e43c40b78d55cb9ff9a191f02bbe15991b54a
|
/assignment2/cs231n/datasets/.env/bin/cygdb
|
6b6f385c1e113c70275684a7e8e46bd7a6248d0b
|
[] |
no_license
|
nicky4/CS231N
|
a70b31b013e4d2ad5f3d2b1dc6e8ac6b467ea698
|
bee27e32596d75306870621a11c8f11a009358bc
|
refs/heads/master
| 2021-01-10T06:22:41.800077
| 2016-02-06T10:01:00
| 2016-02-06T10:01:00
| 51,063,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
#!/Users/nic/Documents/Nic/Work/Winter2016/CS231N/assignment2/cs231n/datasets/.env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from Cython.Debugger.Cygdb import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"nic.zhang4@gmail.com"
] |
nic.zhang4@gmail.com
|
|
5dd6e1266ba6e7a680ca730a91f43ac4d55d7227
|
7146e1722d6055199f40df2c6bb4c403ce1a617c
|
/utils.py
|
c0fa87367f43127983d208df232a0d3c98058f41
|
[
"MIT"
] |
permissive
|
kingster996/Web2Executable
|
6baca3f3319811ad37542d132c846f60db356401
|
5486419a9f585db66551e242346668119e93b1d2
|
refs/heads/master
| 2020-05-20T05:56:03.124977
| 2018-02-12T03:08:53
| 2018-02-12T03:08:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,297
|
py
|
"""Utility functions for Web2Executable
This module holds utility functions that are useful to both the command line
and GUI modules, but aren't related to either module.
"""
from __future__ import print_function
import os
import zipfile
import io
import platform
import urllib.request as request
import tempfile
import codecs
import shutil
import subprocess
from appdirs import AppDirs
import validators
import traceback
import logging
import config
from PySide import QtCore
logger = logging.getLogger(__name__)
def url_exists(path):
if validators.url(path):
return True
return False
def format_exc_info(exc_info):
"""Return exception string with traceback"""
exc_format = traceback.format_exception(exc_info[0],
exc_info[1],
exc_info[2])
error = ''.join([x for x in exc_format])
return error
def load_last_project_path():
"""Load the last open project.
Returns:
string: the last opened project path
"""
proj_path = ''
proj_file = get_data_file_path(config.LAST_PROJECT_FILE)
if os.path.exists(proj_file):
with codecs.open(proj_file, encoding='utf-8') as f:
proj_path = f.read().strip()
if not proj_path:
proj_path = QtCore.QDir.currentPath()
return proj_path
def load_recent_projects():
"""Load the most recent projects opened.
Returns:
list: project files sorted by most recent
"""
files = []
history_file = get_data_file_path(config.RECENT_FILES_FILE)
if not os.path.exists(history_file):
return files
with codecs.open(history_file, encoding='utf-8') as f:
for line in f:
line = line.strip()
if line and os.path.exists(line):
files.append(line)
files.reverse()
return files
def save_project_path(path):
"""Save the last open project path."""
proj_file = get_data_file_path(config.LAST_PROJECT_FILE)
with codecs.open(proj_file, 'w+', encoding='utf-8') as f:
f.write(path)
def save_recent_project(proj):
"""Save the most recent projects to a text file."""
recent_file_path = get_data_file_path(config.RECENT_FILES_FILE)
max_length = config.MAX_RECENT
recent_files = []
if os.path.exists(recent_file_path):
file_contents = codecs.open(recent_file_path, encoding='utf-8').read()
recent_files = file_contents.split('\n')
try:
recent_files.remove(proj)
except ValueError:
pass
recent_files.append(proj)
with codecs.open(recent_file_path, 'w+', encoding='utf-8') as f:
for recent_file in recent_files[-max_length:]:
if recent_file and os.path.exists(recent_file):
f.write('{}\n'.format(recent_file))
def replace_right(source, target, replacement, replacements=None):
"""
String replace rightmost instance of a string.
Args:
source (string): the source to perform the replacement on
target (string): the string to search for
replacement (string): the replacement string
replacements (int or None): if an integer, only replaces N occurrences
otherwise only one occurrence is replaced
"""
return replacement.join(source.rsplit(target, replacements))
def is_windows():
return platform.system() == 'Windows'
def get_temp_dir():
return tempfile.gettempdir()
## File operations ------------------------------------------------------
# These are overridden because shutil gets Windows directories confused
# and cannot write to them even if they are valid in cmd.exe
def path_join(base, *rest):
new_rest = []
for r in rest:
new_rest.append(str(r))
rpath = '/'.join(new_rest)
if not os.path.isabs(rpath):
rpath = base + '/' + rpath
if is_windows():
rpath = rpath.replace('/', '\\')
rpath = os.path.normpath(rpath)
return rpath
def get_data_path(dir_path):
parts = dir_path.split('/')
if config.TESTING:
data_path = path_join(config.CWD, 'tests', 'test_data', *parts)
else:
dirs = AppDirs('Web2Executable', 'Web2Executable')
data_path = path_join(dirs.user_data_dir, *parts)
if is_windows():
data_path = data_path.replace('\\', '/')
if not os.path.exists(data_path):
os.makedirs(data_path)
return data_path
def abs_path(file_path):
path = os.path.abspath(file_path)
if is_windows():
path = path.replace('/', '\\')
return path
def get_data_file_path(file_path):
parts = file_path.split('/')
data_path = get_data_path('/'.join(parts[:-1]))
return path_join(data_path, parts[-1])
def rmtree(path, **kwargs):
if is_windows():
if os.path.isabs(path):
path = '\\\\?\\'+path.replace('/', '\\')
shutil.rmtree(path, **kwargs)
def copy(src, dest, **kwargs):
if is_windows():
if os.path.isabs(src):
src = '\\\\?\\'+src.replace('/', '\\')
if os.path.isabs(dest):
dest = '\\\\?\\'+dest.replace('/', '\\')
shutil.copy2(src, dest, **kwargs)
def move(src, dest, **kwargs):
if is_windows():
if os.path.isabs(src):
src = '\\\\?\\'+src.replace('/', '\\')
if os.path.isabs(dest):
dest = '\\\\?\\'+dest.replace('/', '\\')
shutil.move(src, dest, **kwargs)
def copytree(src, dest, **kwargs):
if is_windows():
if os.path.isabs(src) and not src.startswith('\\\\'):
src = '\\\\?\\'+src.replace('/', '\\')
if os.path.isabs(dest) and not dest.startswith('\\\\'):
dest = '\\\\?\\'+dest.replace('/', '\\')
shutil.copytree(src, dest, **kwargs)
## ------------------------------------------------------------
def log(*args):
"""Print logging information or log it to a file."""
if config.DEBUG:
print(*args)
logger.info(', '.join(args))
def open_folder_in_explorer(path):
"""Cross platform open folder window."""
if platform.system() == "Windows":
os.startfile(path)
elif platform.system() == "Darwin":
subprocess.Popen(["open", path])
else:
subprocess.Popen(["xdg-open", path])
def zip_files(zip_file_name, project_dir, *args, **kwargs):
"""
Zip files into an archive programmatically.
Args:
zip_file_name (string): the name of the resulting zip file
args: the files to zip
kwargs: Options
verbose (bool): if True, gives verbose output
exclude_paths (list): a list of paths to exclude
"""
zip_file = zipfile.ZipFile(zip_file_name, 'w', config.ZIP_MODE)
verbose = kwargs.pop('verbose', False)
old_path = os.getcwd()
os.chdir(project_dir)
for arg in args:
if os.path.exists(arg):
file_loc = arg
if verbose:
log(file_loc)
try:
zip_file.write(file_loc)
except ValueError:
os.utime(file_loc, None)
zip_file.write(file_loc)
os.chdir(old_path)
zip_file.close()
def join_files(destination, *args, **kwargs):
"""
Join any number of files together by stitching bytes together.
This is used to take advantage of NW.js's ability to execute a zip file
contained at the end of the exe file.
Args:
destination (string): the name of the resulting file
args: the files to stitch together
"""
with io.open(destination, 'wb') as dest_file:
for arg in args:
if os.path.exists(arg):
with io.open(arg, 'rb') as file:
while True:
bytes = file.read(4096)
if len(bytes) == 0:
break
dest_file.write(bytes)
def urlopen(url):
"""
Call urllib.request.urlopen with a modified SSL context to prevent
"SSL: CERTIFICATE_VERIFY_FAILED” errors when no verification is
actually needed.
"""
return request.urlopen(url, context=config.SSL_CONTEXT)
# To avoid a circular import, we import config at the bottom of the file
# and reference it on the module level from within the functions
import config
|
[
"jyapayne@gmail.com"
] |
jyapayne@gmail.com
|
c7ca295ae615e470f14bce12ffeb05f9e2443ba5
|
4569d707a4942d3451f3bbcfebaa8011cc5a128d
|
/tracbibplugin/trunk/tracbib/bibtexparse.py
|
f91e665f26a36e46d8463dea2875e15ae25048b4
|
[] |
no_license
|
woochica/trachacks
|
28749b924c897747faa411876a3739edaed4cff4
|
4fcd4aeba81d734654f5d9ec524218b91d54a0e1
|
refs/heads/master
| 2021-05-30T02:27:50.209657
| 2013-05-24T17:31:23
| 2013-05-24T17:31:23
| 13,418,837
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,101
|
py
|
#!/usr/bin/env python
"""
tracbib/bibtexparser.py
Copyright (C) 2011 Roman Fenkhuber
Tracbib is a trac plugin hosted on trac-hacks.org. It brings support for
citing from bibtex files in the Trac wiki from different sources.
This File mostly bases on the version offered by Vidar Bronken Gundersen
and Sara Sprenkle. See the copyright notices below. I just provide the
function authors(data) which returns correctly split author strings, as
described on http://artis.imag.fr/~Xavier.Decoret/resources/xdkbibtex\
/bibtex_summary.html.
tracbib is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
tracbib is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with tracbib. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Yet another Parser for bibtex files
Usage: python bibtexparse.py bibfile.bib
output will be bibfile.xml
Reuse approved as long as this notification is kept.
Licence: GPL.
This is a version cannibalized from bibtexml.py by Vidar Bronken Gundersen
and modified by Sara Sprenkle (See Original copyright note below.)
1. The main change is that we parse the bibtex file and put the list in
a python dictionary, making very easy any post-processing. For an example
look at the main routine, where author are selected.
It can also be imported as a module from a script for a more complex
processing or conversion
2. Conversion to xml is simply a write of a python object (dictionary)
3. Added handling of accents and some other latex tags (incomplete) through
a translation table
4. Handling of a few LaTeX math constructions.
5. There were some small bugs, when one of the fields (I found them in url
or abstract)
have an equal sign "=" in the data.
6. Entries like:
author = {{Author1}, R.~O. and {Author2}, J. and {Author3}, P.},
were not correcly handled.
Points 3 and 4 arised when bibtex entries were downloaded from
http://adsabs.harvard.edu/physics_service.html
7. Added a tag <bibxml:authors> around all <bibxml:author> and the same for
----------------------------------------------------------------------
----------------------------------------------------------------------
----------------------------------------------------------------------
Original copyright notice
----------------------------------------------------------------------
(c)2002-06-23 Vidar Bronken Gundersen
http://bibtexml.sf.net/
Reuse approved as long as this notification is kept.
Licence: GPL.
Contributions/thanks to:
Egon Willighagen, http://sf.net/projects/jreferences/
Richard Mahoney (for providing a test case)
Editted by Sara Sprenkle to be more robust and handle more bibtex features.
(c) 2003-01-15
1. Changed bibtex: tags to bibxml: tags.
2. Use xmlns:bibxml="http://bibtexml.sf.net/"
3. Allow spaces between @type and first {
4. "author" fields with multiple authors split by " and "
are put in separate xml "bibxml:author" tags.
5. Option for Titles: words are capitalized
only if first letter in title or capitalized inside braces
6. Removes braces from within field values
7. Ignores comments in bibtex file (including @comment{ or % )
8. Replaces some special latex tags, e.g., replaces ~ with ' '
9. Handles bibtex @string abbreviations
--> includes bibtex's default abbreviations for months
--> does concatenation of abbr # " more " and " more " # abbr
10. Handles @type( ... ) or @type{ ... }
11. The keywords field is split on , or ; and put into separate xml
"bibxml:keywords" tags
12. Ignores @preamble
Known Limitations
1. Does not transform Latex encoding like math mode and special latex
symbols.
2. Does not parse author fields into first and last names.
E.g., It does not do anything special to an author whose name is in the
form LAST_NAME, FIRST_NAME
In "author" tag, will show up as
<bibxml:author>LAST_NAME, FIRST_NAME</bibxml:author>
3. Does not handle "crossref" fields other than to print
<bibxml:crossref>...</bibxml:crossref>
4. Does not inform user of the input's format errors. You just won't be
able to transform the file later with XSL
You will have to manually edit the XML output if you need to handle
these (and unknown) limitations.
----------------------------------------------------------------------
"""
import sys
import string
import re
from helper import match_pair, replace_tags, def_strings
# set of valid name characters
valid_name_chars = '[\w\-:]'
#
# define global regular expression variables
#
author_rex = re.compile('\s+and\s+')
rembraces_rex = re.compile('[{}]')
capitalize_rex = re.compile('({[^}]*})')
# used by bibtexkeywords(data)
keywords_rex = re.compile('[,;]')
# split on {, }, or " in verify_out_of_braces
delimiter_rex = re.compile('([{}"])', re.I)
field_rex = re.compile('\s*(\w*)\s*=\s*(.*)')
data_rex = re.compile('\s*(\w*)\s*=\s*([^,]*),?')
def handle_math(str):
mathexp = [(r'\^([^{]+){', r'<sup>\1</sup>'),
(r'\^{([^{]+)}', r'<sup>\1</sup>'),
(r'_([^{]+){', r'<sub>\1</sub>'),
(r'_{([^{]+)}', r'<sub>\1</sub>')
]
# mathmarker= ('<math>','</math>')
mathmarker = ('', '')
# Find math substrings
p = re.compile(r'\$([^\$]+)\$')
if p.search(str):
ini = 0
linecontent = ''
iterator = p.finditer(str)
for match in iterator:
strmath = match.group()[1:-1]
linecontent += str[ini:match.start()]
for i, o in mathexp:
strmath = re.sub(i, o, strmath)
linecontent += mathmarker[0] + strmath + mathmarker[1]
ini = match.end()
linecontent += str[ini:]
else:
return str
return linecontent
# return the string parameter without braces
#
def removebraces(str):
return rembraces_rex.sub('', str)
# fix author so that it creates multiple authors,
# split by "and"
def bibtexauthor(data):
bibtex = []
author_list = author_rex.split(data)
for author in author_list:
author = author.strip()
bibtex.append(removebraces(author).strip())
return bibtex
# @return the bibtex for the title
# @param data --> title string
# braces are removed from title
def bibtextitle(data):
# title = removebraces(data).strip()
title = data.strip()
return title
# @return the bibtex for the keyword
# keywords are assumed to be delimited by , or ;
def bibtexkeyword(data):
bibtex = []
keyword_list = keywords_rex.split(data)
for keyword in keyword_list:
keyword = keyword.strip()
bibtex.append(removebraces(keyword).strip())
return bibtex
# @return an array containing a dictionary for each author, split into 'first',
# 'last', 'von' and 'jr'
# @param data -> The 'authors' BibTex field
def authors(data):
tokenized = []
a = []
sticky = (None, "")
#determine the case of the word
for i in re.finditer("(?P<caseless>[{\\\][^,\s]*)|(?P<separator>,)"
"|(?P<word>[^\s,]+)|(?P<space>\s)", data):
if not sticky[0] and re.search("{", i.group(0)) \
and not match_pair(i.group(0)): # brace not closed?
if i.group("caseless"):
sticky = ("caseless", i.group(0))
elif i.group("word"):
sticky = ("word", i.group(0))
continue
elif sticky[0] and not match_pair(sticky[1] + i.group(0)):
sticky = (sticky[0], sticky[1] + i.group(0))
continue
if sticky[0]:
match = sticky[1] + i.group(0)
token = sticky[0]
sticky = (None, "")
else:
match = i.group(0)
if i.group("caseless"):
token = "caseless"
if i.group("word"):
token = "word"
if i.group("separator"):
a.append("separator")
token = "separator"
if i.group("space"):
token = "space"
if token == "caseless":
m = (0, 0)
caseless = match
while m:
m = match_pair(caseless)
if m and m[0] == 0:
caseless = caseless[m[1]:]
else:
break
w = re.search("[\w]", caseless)
if len(caseless) > 0 and w:
if w.group(0).islower() or w.group(0).isdigit():
a.append(("lowercase", match))
else:
a.append(("uppercase", match))
else:
a.append(("caseless", match))
elif token == "word":
if match == "and":
tokenized.append(a)
a = []
elif match[0].islower() or match[0].isdigit():
a.append(("lowercase", match))
else:
a.append(("uppercase", match))
if sticky[0]:
pass
#raise Exception("Brace error!")
tokenized.append(a)
#determine the cite structure
ret = []
for author in tokenized:
count = author.count("separator")
a = {"first": "", "von": "", "last": "", "jr": ""}
#First von Last
if count == 0:
index = 0
#first
for index, word in enumerate(author):
if index + 1 < len(author) and word[0] != "lowercase":
a["first"] += " " + word[1]
else:
author = author[index:]
break
#von
caseless = []
for index, word in enumerate(author):
if index + 1 < len(author) and word[0] != "uppercase":
if word[0] == "caseless":
caseless.append(word[1])
elif word[0] == "lowercase":
for w in caseless:
a["von"] += " " + w
caseless = []
a["von"] += " " + word[1]
else:
author = author[index:]
#last
for word in caseless:
a["last"] += " " + word
for index, word in enumerate(author):
a["last"] += " " + word[1]
#von Last, [jr ,] First
elif count > 0:
#von
upper = []
for index, word in enumerate(author):
if author[index + 1] == "separator":
upper.append(word[1])
author = author[index + 2:]
break
if word == "uppercase":
upper.append(word)
elif word != "separator":
for w in upper:
a["von"] += " " + w
upper = []
a["von"] += " " + word[1]
else:
author = author[index + 1:]
break
#last
for word in upper:
a["last"] += " " + word
#jr
if count > 1:
for index, word in enumerate(author):
if word != "separator":
a["jr"] += " " + word[1]
else:
author = author[index + 1:]
break
#first
for index, word in enumerate(author):
if word != "separator":
a["first"] += " " + word[1]
else:
a["first"] += ","
elif count > 1:
pass
b = {}
for k in a:
if len(a[k]) > 0:
b[k] = a[k]
b[k] = b[k].lstrip()
ret.append(b)
return ret
# data = title string
# @return the capitalized title (first letter is capitalized), rest are
# capitalized only if capitalized inside braces
def capitalizetitle(data):
title_list = capitalize_rex.split(data)
title = ''
count = 0
for phrase in title_list:
#print phrase
check = string.lstrip(phrase)
# keep phrase's capitalization the same
if check.find('{') == 0:
title = title + removebraces(phrase)
else:
# first word --> capitalize first letter (after spaces)
if count == 0:
title = title + check.capitalize()
else:
title = title + phrase.lower()
count = count + 1
return title
def no_outer_parens(filecontents):
# do checking for open parens
# will convert to braces
paren_split = re.split('([(){}])', filecontents)
open_paren_count = 0
open_type = 0
look_next = 0
# rebuild filecontents
filecontents = ''
at_rex = re.compile('@\w*')
for phrase in paren_split:
if look_next == 1:
if phrase == '(':
phrase = '{'
open_paren_count = open_paren_count + 1
else:
open_type = 0
look_next = 0
if phrase == '(':
open_paren_count = open_paren_count + 1
elif phrase == ')':
open_paren_count = open_paren_count - 1
if open_type == 1 and open_paren_count == 0:
phrase = '}'
open_type = 0
elif at_rex.search(phrase):
open_type = 1
look_next = 1
filecontents = filecontents + phrase
return filecontents
def get_fields(strng):
f = strng.find('=')
braces_rex = re.compile(r'\s*[{]')
comilla_rex = re.compile(r'\s*["]')
start = 0
fields = []
end = len(strng)
# start holds the current position in the strng
# f : position of equal sign
# s : position of {, opening " or first line after the equal sign
# e : position of closing }, " or next comma
while f != -1 and start < end:
name = string.strip(strng[start:f]).lower()
if name != '':
ss = strng[f + 1:]
if braces_rex.match(ss):
s, e = match_pair(ss)
data = ss[s + 1:e - 1].strip()
elif comilla_rex.match(ss):
s = string.find(ss, r'"')
e = string.find(ss, r'"', s + 1)
data = ss[s + 1:e].strip()
else:
s = 1
e = ss.find(',')
data = ss[s:e].strip()
fields.append((name, data))
# There is trailing comma, we should take it out
e = ss.find(',', e) + 1
start = f + e + 2
f = string.find(strng, '=', start)
return fields
# make all whitespace into just one space
# format the bibtex file into a usable form.
# Bug: Does not handle "=" inside a field data (for instance in url)
def bibtexload(filecontents_source):
space_rex = re.compile('\s+')
pub_rex = re.compile('\W?@(\w*)\s*{')
filecontents = []
# remove trailing and excessive whitespace
# ignore comments
for line in filecontents_source:
line = string.strip(line)
line = space_rex.sub(' ', line)
# ignore comments
filecontents.append(' ' + line)
filecontents = string.join(filecontents, '')
# the file is in one long string
filecontents = no_outer_parens(filecontents)
# character encoding, reserved latex characters
filecontents = re.sub('{\\\&}', '&', filecontents)
filecontents = re.sub('\\\&', '&', filecontents)
filecontents = filecontents.strip()
#
# Find entries
#
strings = []
entries = {}
s = 0
e = 0
start = 0
final = len(filecontents) - 1
while start < final:
entry = {}
m = pub_rex.search(filecontents[start:])
if m:
start += m.start()
arttype = string.lower(pub_rex.sub('\g<1>', m.group()))
d = match_pair(filecontents[start:])
if d:
s, e = d
s += start + 1
e += (start - 1)
# current has the currently analyzed entry
current = filecontents[s:e]
if arttype == 'string':
name, defin = string.split(current, "=")
defin = defin.replace('"', '').replace(' ', ' ')
strings.append((name.strip(), defin.strip()))
elif arttype == 'comment' or arttype == 'preamble':
pass
# print '# '+ arttype
else:
p = re.match('([^,]+),', current)
artid = p.group()[:-1]
entry['type'] = arttype
entry['id'] = artid
current = current[p.end():]
ff = get_fields(current)
for n, d in ff:
entry[n] = d
entries[artid] = entry
start = e
else:
return strings, entries
return strings, entries
def bibtex_to_xml(bibtexlist, xmlhead=None, xmlfoot=None):
if not xmlhead:
xmlhead = """<?xml version="1.0" encoding="iso-8859-1"?>
<!DOCTYPE bibxml:file SYSTEM "bibtexml-strict.dtd" >
<bibxml:file xmlns:bibxml="http://bibtexml.sf.net/">\n
"""
if not xmlfoot:
xmlfoot = "\n</bibxml:file>"
sp = 1
spd = ' '
blist = bibtexlist.copy()
entry = ''
for Id, bib in blist.iteritems():
tipo = bib['type']
entry += sp * spd + '<bibxml:entry id="' + Id + '">\n'
sp += 1
entry += sp * spd + '<bibxml:' + tipo + '>\n'
del(bib['id'])
del(bib['type'])
sp += 1
for k, e in bib.iteritems():
if k == 'author' or k == 'keywords':
entry += sp * spd + '<bibxml:' + k + 's>\n'
if k == 'author':
e = e.replace(',', '')
e = string.split(e, ' and ')
else:
e = string.split(e, ',')
field = k
sp += 1
for val in e:
v = replace_tags(val, 'xml')
v = handle_math(v)
v = removebraces(v)
v = replace_tags(v, 'accents')
v = replace_tags(v, 'other')
entry += sp * spd + '<bibxml:' + \
field + '>' + v + '</bibxml:' + field + '>\n'
sp -= 1
entry += sp * spd + '</bibxml:' + k + 's>\n'
else:
v = replace_tags(e, 'xml')
v = handle_math(v)
v = removebraces(v)
v = replace_tags(v, 'accents')
v = replace_tags(v, 'other')
entry += sp * spd + '<bibxml:' + k + '>' + \
v + '</bibxml:' + k + '>\n'
sp -= 1
entry += sp * spd + '</bibxml:' + tipo + '>\n'
sp -= 1
entry += sp * spd + '</bibxml:entry>\n\n'
return xmlhead + entry + xmlfoot
def replace_abbrev(bibtexentry, strings):
for k, v in bibtexentry.iteritems():
for s, d in strings:
if s in v:
if s == v.strip() or '#' in v:
v = v.replace(s, d)
if '#' in v:
ss = v.split('#')
v = string.join(ss, ' ')
bibtexentry[k] = v
def bibteximport(filepath):
# Reads a BibTeX File and returns a dictionary where each entry is a
# dictionary
try:
fd = open(filepath, 'r')
filecontents_source = fd.readlines()
fd.close()
except:
print 'Could not open file:', filepath
sys.exit(2)
strings, outdata = bibtexload(filecontents_source)
for k, bib in outdata.iteritems():
replace_abbrev(bib, def_strings)
replace_abbrev(bib, strings)
return outdata
def filehandler(filepath):
outdata = bibteximport(filepath)
xmldata = bibtex_to_xml(outdata)
return len(outdata.keys()), xmldata
# main program
def main():
if sys.argv[1:]:
filepath = sys.argv[1]
else:
print "No input file"
print "USAGE: " + sys.argv[0] + \
" FILE.bib\n\n It will output the XML file: FILE.xml"
sys.exit(2)
nentries, xmldata = filehandler(filepath)
outfile = filepath[:filepath.rfind('.')] + '.xml'
mensaje = 'Written ' + str(nentries) + ' entries to ' + outfile + '\n'
sys.stderr.write(mensaje)
fo = open(outfile, 'w')
fo.write(xmldata)
fo.close()
if __name__ == "__main__":
main()
# end python script
|
[
"Amfortas@7322e99d-02ea-0310-aa39-e9a107903beb"
] |
Amfortas@7322e99d-02ea-0310-aa39-e9a107903beb
|
c71741e43596ace6c69363a7cdcdabca3bb45e03
|
1625b7bb8b04fba0803015da581f4baf8a16bf13
|
/apps/front/models.py
|
daf46503cc180bd8c2e854a814b6f75cb8e0724c
|
[
"Apache-2.0"
] |
permissive
|
liuxianshengqwertyuiop/index
|
0199ca2224652e51f9709d0783a189707df942a4
|
17d5ae1d44a9c4ba41cf7983e9cf5a3c43cdd927
|
refs/heads/master
| 2020-03-29T08:45:21.614908
| 2018-10-08T10:51:17
| 2018-10-08T10:51:17
| 149,726,046
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,534
|
py
|
from ext import db
import shortuuid
import datetime
from enum import Enum
from werkzeug.security import generate_password_hash,check_password_hash
# 0 1 2 3
class GenderEnum(Enum) :
MALE = 1
FEMALE = 2
SECRET = 3
UNKNOW = 4
class FrontUser(db.Model):
__tablename__ = "front_user"
id = db.Column(db.String(100), primary_key=True, default=shortuuid.uuid)
telephone = db.Column(db.String(11), nullable=False, unique=True)
username = db.Column(db.String(30), nullable=False)
_password = db.Column(db.String(100), nullable=False)
email = db.Column(db.String(50), unique=True)
realname = db.Column(db.String(50))
avatar = db.Column(db.String(100)) # 头像
signature = db.Column(db.String(100)) # 签名
gender = db.Column(db.Enum(GenderEnum), default=GenderEnum.UNKNOW)
join_time = db.Column(db.DateTime, default=datetime.datetime.now)
# 因为要特殊处理password
def __init__(self, password, **kwargs):
self.password = password
kwargs.pop('password', None)
super(FrontUser, self).__init__(**kwargs)
@property
def password(self):
return self._password
@password.setter
def password(self, frontpwd):
# 1. 密码不希望外界访问 2.防止循环引用
self._password = generate_password_hash(frontpwd)
def checkPwd(self, frontpwd):
# return self.password == generate_password_hash(frontpwd)
return check_password_hash(self._password, frontpwd)
|
[
"43464705+liuxianshengqwertyuiop@users.noreply.github.com"
] |
43464705+liuxianshengqwertyuiop@users.noreply.github.com
|
5e3cd93cb6a1a2db496f515ca70cfe22984f79b7
|
74dbd8a17c21b4a31672936360726485430869fe
|
/scripts/travel_gobear.py
|
37984af8e7102bcd5f2c6f03ed7015cc3071830f
|
[] |
no_license
|
vanthuannguyen/travel_insurance_release
|
574f627d463d1fede5bf636b16edbec6a56441ac
|
15b8d7b001bc10070bb5b237451a5866dc7a4d0b
|
refs/heads/master
| 2020-04-29T01:01:50.195387
| 2019-03-15T02:20:35
| 2019-03-15T02:20:35
| 175,716,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,511
|
py
|
# -*- coding: utf-8 -*-
import time, os
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.common.exceptions import *
from logging import getLogger
logger = getLogger(__name__)
driverLocation = 'C:\\Python27\\chromedriver.exe'
os.environ['webdriver.chrome.driver'] = driverLocation
home_page = 'https://www.gobear.com/ph?x_session_type=UAT'
class Travel_application():
def goto_result_page(self, url):
'''
Arguments:
1. url: link to travel insurance homepage
'''
logger.info('Test start here')
#Init a chrome session
driver = webdriver.Chrome(driverLocation)
# Create an explicit wait handler with wait time is 10 seconds and frequency check is 1 second
wait = WebDriverWait(driver, 10, poll_frequency=1)
# Maximize current browser window
driver.maximize_window()
# Access to homepage
driver.get(url)
# Wait until homepage is loaded and button ĐĂNG NHẬP NGAY is clickable
insurance_tab = wait.until(ec.element_to_be_clickable((By.XPATH, "//a[@href='#Insurance' and @aria-controls='Insurance']")))
# Click tab Insurance
insurance_tab.click()
# Wait until insurance form is loaded, then click Travel tab
travel_tab = wait.until(ec.visibility_of_element_located((By.XPATH, "//a[@href='#Travel' and @aria-controls='Travel']")))
travel_tab.click()
# Wait until Show my results is clickable
show_result_bt = wait.until(ec.element_to_be_clickable((By.XPATH, "//button[@name='product-form-submit']")))
show_result_bt.click()
time.sleep(3)
collepseFilter_btn = wait.until(ec.visibility_of_element_located((By.XPATH, "//h5[@id='collapseFilterBtn']")))
collepseFilter_btn.click()
try:
expanded_element = wait.until(ec.invisibility_of_element_located((By.XPATH, "//div[@id='collapseFilter' and @aria-expanded='false']")))
except Exception as e:
print ('Test expanded Filter failed')
print ('Exception: {}'.format(e))
if expanded_element:
print ('Test expanded Filter pass')
else:
print ('Test expanded Filter failed')
# Expand Filter form
collepseFilter_btn.click()
time.sleep(1)
# Test radio button
promos_only = driver.find_element_by_xpath("//div[@data-filter-name='Promos Only']")
promos_only.click()
promos_only_radio = driver.find_element_by_xpath("//div[@data-filter-name='Promos Only']/input[@type='radio' and @class='gb-radio__original']")
time.sleep(1)
if promos_only_radio.is_selected():
print ('Test Promotion pass')
else:
print ('Test Promotion failed')
# Test checkbox
insurers_pacific_cross = driver.find_element_by_xpath("//div[@data-filter-name='Pacific Cross']")
insurers_pacific_cross.click()
pacific_cross_checkbox = driver.find_element_by_xpath("//div[@data-filter-name='Pacific Cross']/input[@type='checkbox' and @class='gb-checkbox__original']")
time.sleep(1)
if pacific_cross_checkbox.is_selected():
print ('Test Insurers pass')
else:
print ('Test Insurers failed')
travel = Travel_application()
travel.goto_result_page(home_page)
|
[
"nvthuan154@gmail.com"
] |
nvthuan154@gmail.com
|
bbffa112789032a599ec62a8bd35a62ca1b97de2
|
af06ad698af6c23efa32e48e09d151170cb34bb8
|
/Semester_1/InstantHacking/InstantHacking5.py
|
e6dda8d16f0b3ac31f53091f7cdbb83ed36822df
|
[] |
no_license
|
lxndrblz/DHBW-Programmierung
|
5581ab501a3aba730ecc8c2d70f5714ca952094e
|
316767563b0dfbce457e904d283b76f06cfc114f
|
refs/heads/master
| 2020-12-24T12:32:59.666056
| 2017-06-06T10:48:29
| 2017-06-06T10:48:29
| 72,984,385
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
def ganzZahl(nummer):
ganzZahl = 0
while ganzZahl <= nummer:
ganzZahl =ganzZahl+1
ganzZahl=ganzZahl -1
return ganzZahl
nummer = input("Bitte eine Nummer eingeben: ")
#Manuell
print "Der ganzahlige Anteil ist: " + str(ganzZahl(nummer))
#System Funktion
print "Der ganzahlige Anteil ist (system): " + str(int(nummer))
|
[
"mail@alexbilz.com"
] |
mail@alexbilz.com
|
3441df9d520fb3aa1947667ed801fc5194a3f894
|
bcf4b09c7569d18a42294c6dbf46dd0545bd0130
|
/configs/lunarlander_v2/distillation_dqn.py
|
2630a8f47bb17e3c11d8a73f146a671571f31319
|
[
"MIT"
] |
permissive
|
SHITIANYU-hue/rl_algorithms
|
fd1574da612b796f70f48cb9421673083c14858b
|
1f73371e2dc9d2193140dcbbf8acc5b61d238b8e
|
refs/heads/master
| 2022-12-22T08:19:40.539389
| 2020-09-25T04:15:13
| 2020-09-25T04:15:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,590
|
py
|
"""Config for DQN on LunarLander-v2.
- Author: Kyunghwan Kim
- Contact: kh.kim@medipixel.io
"""
from rl_algorithms.common.helper_functions import identity
agent = dict(
type="DistillationDQN",
hyper_params=dict(
gamma=0.99,
tau=5e-3,
update_starts_from=int(1e4), # openai baselines: int(1e4)
multiple_update=1, # multiple learning updates
train_freq=1, # in openai baselines, train_freq = 4
gradient_clip=10.0, # dueling: 10.0
n_step=3,
w_n_step=1.0,
w_q_reg=1e-7,
per_alpha=0.6, # openai baselines: 0.6
per_beta=0.4,
per_eps=1e-6,
# Epsilon Greedy
max_epsilon=1.0,
min_epsilon=0.01, # openai baselines: 0.01
epsilon_decay=1e-5, # openai baselines: 1e-7 / 1e-1
# Distillation
epochs=20, # epoch of student training
buffer_size=int(50000), # distillation buffer size
batch_size=64, # distillation batch size
n_frame_from_last=int(5e4), # number of frames to save from the end of training
),
learner_cfg=dict(
type="DQNLearner",
loss_type=dict(type="C51Loss"),
backbone=dict(),
head=dict(
type="C51DuelingMLP",
configs=dict(
hidden_sizes=[128, 64],
use_noisy_net=False,
v_min=-300,
v_max=300,
atom_size=1530,
output_activation=identity,
),
),
optim_cfg=dict(lr_dqn=1e-4, weight_decay=1e-7, adam_eps=1e-8),
),
)
|
[
"noreply@github.com"
] |
SHITIANYU-hue.noreply@github.com
|
4cdd6c7856a9920371957248d4e87d94942fc11c
|
2dd26e031162e75f37ecb1f7dd7f675eeb634c63
|
/examples/nlp/token_classification/data/get_libritts_data.py
|
86a5d01eb9dcfb99c97bed3a3d07bd5d804fb64c
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/NeMo
|
1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1
|
c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7
|
refs/heads/main
| 2023-08-21T15:28:04.447838
| 2023-08-21T00:49:36
| 2023-08-21T00:49:36
| 200,722,670
| 7,957
| 1,986
|
Apache-2.0
| 2023-09-14T18:49:54
| 2019-08-05T20:16:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,822
|
py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script downloads and unpacks LibriTTS data. And prepares it for punctuation and capitalization lexical audio model.
Data is being downloaded from www.openslr.org and then extracted via tar.
The script gathers text from every *.normalized.txt file inside of archive into single file with text and file with audio filepaths.
"""
import argparse
import glob
import os
import re
import shutil
import subprocess
import tarfile
from tqdm import tqdm
from nemo.collections.nlp.data.token_classification.token_classification_utils import create_text_and_labels
from nemo.utils import logging
URL = {
'train_clean_100': "https://www.openslr.org/resources/60/train-clean-100.tar.gz",
'train_clean_360': "https://www.openslr.org/resources/60/train-clean-360.tar.gz",
'train_other_500': "https://www.openslr.org/resources/60/train-other-500.tar.gz",
'dev_clean': "https://www.openslr.org/resources/60/dev-clean.tar.gz",
'dev_other': "https://www.openslr.org/resources/60/dev-other.tar.gz",
'test_clean': "https://www.openslr.org/resources/60/test-clean.tar.gz",
'test_other': "https://www.openslr.org/resources/60/test-other.tar.gz",
}
def __extract_file(filepath, data_dir):
try:
tar = tarfile.open(filepath)
tar.extractall(data_dir)
tar.close()
except Exception:
print(f"Error while extracting {filepath}. Already extracted?")
def __maybe_download_file(destination: str, source: str):
"""
Downloads source to destination if not exists.
If exists, skips download
Args:
destination: local filepath
source: url of resource
"""
source = URL[source]
if not os.path.exists(destination):
logging.info(f'Downloading {source} to {destination}')
subprocess.run(['wget', '-O', destination, source])
return 1
else:
logging.info(f'{destination} found. Skipping download')
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Prepare LibriTTS dataset for punctuation capitalization lexical audio model training/evaluating.'
)
parser.add_argument("--data_sets", default="dev_clean", type=str, help="List of subsets separated by comma")
parser.add_argument("--data_dir", required=True, type=str, help="Path to dir where data will be stored")
parser.add_argument(
"--clean", "-c", action="store_true", help="If set to True will delete all files except produced .txt and .wav"
)
args = parser.parse_args()
data_dir = args.data_dir
if not os.path.exists(data_dir):
os.makedirs(data_dir)
for subset in args.data_sets.split(','):
logging.info(f'Downloading {subset} subset')
if __maybe_download_file(data_dir + f'/{subset}.tar.gz', subset):
logging.info(f'Extracting {subset} subset')
__extract_file(data_dir + f'/{subset}.tar.gz', data_dir)
logging.info(f'Processing data')
splits = set([split.split('_')[0] for split in args.data_sets.split(',')])
for split in splits:
os.makedirs(f'{data_dir}/audio/{split}', exist_ok=True)
with open(f'{data_dir}/{split}.txt', 'w') as text_data, open(
f'{data_dir}/audio_{split}.txt', 'w'
) as audio_data:
for file in tqdm(glob.glob(f'{data_dir}/LibriTTS/{split}*/*/*/*.wav'), desc=f'Processing {split}'):
with open(file[:-4] + '.normalized.txt', 'r') as source_file:
lines = source_file.readlines()
text = lines[0]
text = re.sub(r"[^a-zA-Z\d,?!.']", ' ', text)
text = re.sub(' +', ' ', text)
shutil.copy(file.strip(), (f'{data_dir}/audio/{split}/' + file.split('/')[-1]).strip())
text_data.write(text.strip() + "\n")
audio_data.write((f'{data_dir}/audio/{split}/' + file.split('/')[-1]).strip() + "\n")
create_text_and_labels(f'{data_dir}/', f'{data_dir}/{split}.txt')
logging.info(f'Processed {split} subset')
if args.clean:
shutil.rmtree(f'{data_dir}/LibriTTS')
for tar in glob.glob(f'{data_dir}/**.tar.gz'):
os.remove(tar)
|
[
"noreply@github.com"
] |
NVIDIA.noreply@github.com
|
31b431fe415810691d171a52f289ac961c0fb559
|
29d79d1630fd09889f75c52c9c3f5a6f05599048
|
/evennia/evennia/server/portal/mssp.py
|
5442ccc32082bfe6643573f186da034312e69c21
|
[
"BSD-3-Clause"
] |
permissive
|
Descyndis/mud
|
103584ed102f18acd1108b1c3a5d982fb676c589
|
87aebba215465e9deef1c24a3b222865cdf8e5e1
|
refs/heads/master
| 2022-12-27T21:19:56.287387
| 2019-04-15T03:34:40
| 2019-04-15T03:34:40
| 181,402,703
| 0
| 1
| null | 2022-12-17T23:35:55
| 2019-04-15T03:04:16
|
Python
|
UTF-8
|
Python
| false
| false
| 6,434
|
py
|
"""
MSSP - Mud Server Status Protocol
This implements the MSSP telnet protocol as per
http://tintin.sourceforge.net/mssp/. MSSP allows web portals and
listings to have their crawlers find the mud and automatically
extract relevant information about it, such as genre, how many
active players and so on.
"""
from builtins import object
from django.conf import settings
from evennia.utils import utils
MSSP = b'\x46'
MSSP_VAR = b'\x01'
MSSP_VAL = b'\x02'
# try to get the customized mssp info, if it exists.
MSSPTable_CUSTOM = utils.variable_from_module(settings.MSSP_META_MODULE, "MSSPTable", default={})
class Mssp(object):
"""
Implements the MSSP protocol. Add this to a variable on the telnet
protocol to set it up.
"""
def __init__(self, protocol):
"""
initialize MSSP by storing protocol on ourselves and calling
the client to see if it supports MSSP.
Args:
protocol (Protocol): The active protocol instance.
"""
self.protocol = protocol
self.protocol.will(MSSP).addCallbacks(self.do_mssp, self.no_mssp)
def get_player_count(self):
"""
Get number of logged-in players.
Returns:
count (int): The number of players in the MUD.
"""
return str(self.protocol.sessionhandler.count_loggedin())
def get_uptime(self):
"""
Get how long the portal has been online (reloads are not counted).
Returns:
uptime (int): Number of seconds of uptime.
"""
return str(self.protocol.sessionhandler.uptime)
def no_mssp(self, option):
"""
Called when mssp is not requested. This is the normal
operation.
Args:
option (Option): Not used.
"""
self.protocol.handshake_done()
def do_mssp(self, option):
"""
Negotiate all the information.
Args:
option (Option): Not used.
"""
self.mssp_table = {
# Required fields
"NAME": "Evennia",
"PLAYERS": self.get_player_count,
"UPTIME": self.get_uptime,
# Generic
"CRAWL DELAY": "-1",
"HOSTNAME": "", # current or new hostname
"PORT": ["4000"], # most important port should be last in list
"CODEBASE": "Evennia",
"CONTACT": "", # email for contacting the mud
"CREATED": "", # year MUD was created
"ICON": "", # url to icon 32x32 or larger; <32kb.
"IP": "", # current or new IP address
"LANGUAGE": "", # name of language used, e.g. English
"LOCATION": "", # full English name of server country
"MINIMUM AGE": "0", # set to 0 if not applicable
"WEBSITE": "www.evennia.com",
# Categorisation
"FAMILY": "Custom", # evennia goes under 'Custom'
"GENRE": "None", # Adult, Fantasy, Historical, Horror, Modern, None, or Science Fiction
"GAMEPLAY": "None", # Adventure, Educational, Hack and Slash, None,
# Player versus Player, Player versus Environment,
# Roleplaying, Simulation, Social or Strategy
"STATUS": "Open Beta", # Alpha, Closed Beta, Open Beta, Live
"GAMESYSTEM": "Custom", # D&D, d20 System, World of Darkness, etc. Use Custom if homebrew
"SUBGENRE": "None", # LASG, Medieval Fantasy, World War II, Frankenstein,
# Cyberpunk, Dragonlance, etc. Or None if not available.
# World
"AREAS": "0",
"HELPFILES": "0",
"MOBILES": "0",
"OBJECTS": "0",
"ROOMS": "0", # use 0 if room-less
"CLASSES": "0", # use 0 if class-less
"LEVELS": "0", # use 0 if level-less
"RACES": "0", # use 0 if race-less
"SKILLS": "0", # use 0 if skill-less
# Protocols set to 1 or 0)
"ANSI": "1",
"GMCP": "0",
"ATCP": "0",
"MCCP": "0",
"MCP": "0",
"MSDP": "0",
"MSP": "0",
"MXP": "0",
"PUEBLO": "0",
"SSL": "1",
"UTF-8": "1",
"ZMP": "0",
"VT100": "0",
"XTERM 256 COLORS": "0",
# Commercial set to 1 or 0)
"PAY TO PLAY": "0",
"PAY FOR PERKS": "0",
# Hiring set to 1 or 0)
"HIRING BUILDERS": "0",
"HIRING CODERS": "0",
# Extended variables
# World
"DBSIZE": "0",
"EXITS": "0",
"EXTRA DESCRIPTIONS": "0",
"MUDPROGS": "0",
"MUDTRIGS": "0",
"RESETS": "0",
# Game (set to 1, 0 or one of the given alternatives)
"ADULT MATERIAL": "0",
"MULTICLASSING": "0",
"NEWBIE FRIENDLY": "0",
"PLAYER CITIES": "0",
"PLAYER CLANS": "0",
"PLAYER CRAFTING": "0",
"PLAYER GUILDS": "0",
"EQUIPMENT SYSTEM": "None", # "None", "Level", "Skill", "Both"
"MULTIPLAYING": "None", # "None", "Restricted", "Full"
"PLAYERKILLING": "None", # "None", "Restricted", "Full"
"QUEST SYSTEM": "None", # "None", "Immortal Run", "Automated", "Integrated"
"ROLEPLAYING": "None", # "None", "Accepted", "Encouraged", "Enforced"
"TRAINING SYSTEM": "None", # "None", "Level", "Skill", "Both"
"WORLD ORIGINALITY": "None", # "All Stock", "Mostly Stock", "Mostly Original", "All Original"
}
# update the static table with the custom one
if MSSPTable_CUSTOM:
self.mssp_table.update(MSSPTable_CUSTOM)
varlist = ''
for variable, value in self.mssp_table.items():
if callable(value):
value = value()
if utils.is_iter(value):
for partval in value:
varlist += MSSP_VAR + str(variable) + MSSP_VAL + str(partval)
else:
varlist += MSSP_VAR + str(variable) + MSSP_VAL + str(value)
# send to crawler by subnegotiation
self.protocol.requestNegotiation(MSSP, varlist)
self.protocol.handshake_done()
|
[
"elodis.descyndis@gmail.com"
] |
elodis.descyndis@gmail.com
|
230be5fdf1999d9c2dac6c255a85f6a123d9ef50
|
a3f92620614de57abd0c517f2681a33e67cead5d
|
/main.py
|
b4134db5e9926c510bf87f8d65b98c0de8d927ab
|
[] |
no_license
|
Abhinav-Chauhan1/Jarvis
|
598ecb2bdc02bdf7ced56b13e334876473b43ed4
|
da6608132a7b13a577d172872ebb32dfbe1ea71d
|
refs/heads/main
| 2023-02-14T23:57:18.701170
| 2021-01-03T14:15:50
| 2021-01-03T14:15:50
| 321,596,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,204
|
py
|
import pyttsx3 # pip install pyttsx3
import speech_recognition as sr # pip install speechRecognition
import datetime
import wikipedia # pip install wikipedia
import webbrowser
import os
import smtplib
import pywhatkit
import pyjokes # pip install pyjokes
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
# print(voices[1].id)
engine.setProperty('voice', voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour >= 0 and hour < 12:
speak("Good Morning!")
elif hour >= 12 and hour < 18:
speak("Good Afternoon!")
else:
speak("Good Evening!")
speak("I am Jarvis Sir. Please tell me how may I help you")
def takeCommand():
# It takes microphone input from the user and returns string output
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
print(f"User said: {query}\n")
if 'jarvis' in query:
pass
else:
return query
except Exception as e:
print("Say that again please...")
return "None"
return query
if __name__ == "__main__":
wishMe()
while True:
# if 1:
query = takeCommand().lower()
# Logic for executing tasks based on query
if 'info about' in query:
speak('Searching Wikipedia...')
query = query.replace("info about", "")
results = wikipedia.summary(query, sentences=1)
speak("According to Wikipedia")
print(results)
speak(results)
elif 'open youtube' in query:
webbrowser.open("youtube.com")
elif 'open google' in query:
webbrowser.open("google.com")
elif 'open stackoverflow' in query:
webbrowser.open("stackoverflow.com")
elif 'play' in query:
song = query.replace('play', '')
speak('playing ' + song)
pywhatkit.playonyt(song)
elif 'the time' in query:
strTime = datetime.datetime.now().strftime("%I:%M:%p")
speak(f"Sir, the time is {strTime}")
elif 'your father' in query:
speak("my creator is Abhinav Chauhan")
elif 'creator' in query:
speak("my creator is Abhinav Chauhan")
elif 'joke' in query:
speak(pyjokes.get_joke())
elif 'how are you' in query:
speak("Awesome and Ready To help You")
elif 'what are you doing' in query:
speak("Just Planing To Destroy humanity")
elif 'what can you do for me' in query:
speak("whatever you want sir")
elif 'what can you not do for me' in query:
speak("Sir, I can not Cook food For you")
elif 'what are your limitations' in query:
speak("Sir, I can not Cook food For you")
|
[
"noreply@github.com"
] |
Abhinav-Chauhan1.noreply@github.com
|
6d2800c68e6aa1f895b7a76a664d5c8dcfbf3195
|
5c00b0626b4ec2bc428e565c97b4afc355198cc4
|
/torchsim/core/eval/doc_generator/document.py
|
5c40c8ca769c494edd4bb17a30a8e9a12773a360
|
[
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
andreofner/torchsim
|
8cff778a324d4f7dc040f11a12d0dc8cd66375b7
|
81d72b82ec96948c26d292d709f18c9c77a17ba4
|
refs/heads/master
| 2021-10-24T05:25:33.740521
| 2019-03-22T10:20:00
| 2019-03-22T10:20:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,501
|
py
|
from torchsim.core.eval.doc_generator.element import XmlElement
from typing import List, Any, Iterable, Dict, Optional
class Document:
"""An HTML document used to record experiment output."""
_doc_header: str
_html: XmlElement
_body: XmlElement
_elements: List[XmlElement]
def __init__(self):
self._doc_header = "<!DOCTYPE html>"
self._body = XmlElement('body')
self._html = XmlElement('html')
self._elements = [self._html.add(self._body)]
def add(self, element: XmlElement):
"""Adds an XML element or string to the document."""
self._body.add(element)
return self
def as_text(self):
return '\n'.join([self._doc_header, self._html.as_text()])
def write_file(self, path: str):
with open(path, 'w') as document_file:
document_file.write(self.as_text())
def add_table(self, headers: Iterable[str], values: Iterable[Iterable[Any]], attribs: Optional[Dict[str, str]] = None):
table = XmlElement('table', attribs)
# header
header = XmlElement('tr')
for h in headers:
header.add(XmlElement('th', text=h))
table.add(header)
# rows
for row_values in values:
row = XmlElement('tr')
for cell in row_values:
str_value = str(cell)
row.add(XmlElement('td', {'style': 'text-align: right;'}, text=str_value))
table.add(row)
self.add(table)
|
[
"jan.sinkora@goodai.com"
] |
jan.sinkora@goodai.com
|
fc2d99d99f3a268a0529bac950ee9a1e8e1cafea
|
43a80ed4a70bb71285790766251e4ae7d8807126
|
/slicing.py
|
de257e7552df0a134a94b1c07d0a42a18cda50f2
|
[] |
no_license
|
crisuvas/AdvancedTopics
|
677ebca8625890ae114f26ec1fecefa858637061
|
92b96cf7ebd35f2db2b010f633222ae05d7ce99e
|
refs/heads/master
| 2020-04-07T09:37:13.983711
| 2018-11-19T16:46:42
| 2018-11-19T16:46:42
| 158,259,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 109
|
py
|
to_21 = list(range(1, 22))
odd = to_21[::2]
middle_third = to_21[7:14:1]
print(odd)
print(middle_third)
|
[
"noreply@github.com"
] |
crisuvas.noreply@github.com
|
0beff6e4958a0376b2e17d074c1ec6db700ca442
|
3708088db561212ca57af6f22b273fc24e7de538
|
/LiDarAnalysis/LiDar_DemBasedClassification_FB_part0f3basins2.py
|
b382eab781030db0744a14418415d4e859bb331f
|
[] |
no_license
|
hhs732/chapter2
|
5310ed5975239c19f8be85194dec747c8872ee9a
|
d6872b39f82f0c4e2f70f4a58933bfeba741e141
|
refs/heads/master
| 2020-05-15T20:19:31.768295
| 2020-02-20T23:27:09
| 2020-02-20T23:27:09
| 182,478,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 30,107
|
py
|
import laspy as ls
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from osgeo import gdal, ogr, osr
from mpl_toolkits.mplot3d import Axes3D
import csv
import os
class K_Means:
def __init__(self, numOfClusters=2, init_centroids=None):
self.numOfClusters = numOfClusters
self.centroids={}
for i in range(self.numOfClusters):
self.centroids[i] = init_centroids[i]
def fit(self,data,cols,cole):
self.classifications = {}
for i in range(self.numOfClusters):
self.classifications[i] = []
for featureset in data:
distances = [np.linalg.norm(featureset[cols:cole]-self.centroids[centroid]) for centroid in self.centroids]
classification = distances.index(min(distances))
self.classifications[classification].append(featureset)
def predict(self,data):
distances = [np.linalg.norm(data-self.centroids[centroid]) for centroid in self.centroids]
classification = distances.index(min(distances))
return classification
def readPlotDEM(filename,elevationMissNo,pathName):
demset = gdal.Open(filename)
band = demset.GetRasterBand(1)
elevation = band.ReadAsArray()
elevation[elevation == -9999.] = elevationMissNo
elevation[elevation > 100000] = elevationMissNo
x0, dx, dxdy, y0, dydx, dy = demset.GetGeoTransform()
nrows, ncols = elevation.shape
x1 = x0 + dx * ncols
y1 = y0 + dy * nrows
extent=[x0, x1, y1, y0]
plt.figure(figsize=(30,20))
plt.imshow(elevation, cmap='gist_earth', extent=extent)
plt.savefig(pathName)
return elevation
def readDEMt0findBoundray(filename,elevationMissNo,pathName):
demset = gdal.Open(filename)
band = demset.GetRasterBand(1)
elevation = band.ReadAsArray()
elevation[elevation == -9999.] = elevationMissNo
elevation[elevation > 100000] = elevationMissNo
x0, dx, dxdy, y0, dydx, dy = demset.GetGeoTransform()
nrows, ncols = elevation.shape
x1 = x0 + dx * ncols
y1 = y0 + dy * nrows
extent=[x0, x1, y1, y0]
return extent
def creatingCentroidGroundpointsFromDem(tiffFilename,elevationMissNo):#,pathNameforDemImage):
demset = gdal.Open(tiffFilename)
band = demset.GetRasterBand(1)
elevation = band.ReadAsArray()
elevation[elevation == -9999.] = elevationMissNo
elevation[elevation > 10000.] = elevationMissNo
x0, dx, dxdy, y0, dydx, dy = demset.GetGeoTransform()
nrows, ncols = elevation.shape
latitude =[]
for x in range (ncols):
latitude.append(x+x0)
longitude = []
for y in range (nrows):
longitude.append(y0-y)
latitude_rp = np.tile(latitude, nrows)
longitude_rp = np.repeat(longitude, ncols)
elevation_rp = np.reshape(elevation,(nrows*ncols)).T
dem_groundPoints = np.vstack([latitude_rp,longitude_rp,elevation_rp]).T
return dem_groundPoints
# Grab just the X dimension from the file, and scale it.
def scaled_x_dimension(las_file):
x_dimension = las_file.X
scale = las_file.header.scale[0]
offset = las_file.header.offset[0]
return(x_dimension*scale + offset)
def lidarDiffGrndPoints(classes,dem_groundPoints):
upGroundPoints = []
classes_rplc = [] #returns filled class with [0,0,0]
pureDiff = []
for clss in range (len (classes)):
if len(classes[clss])==0:
nokhale = [np.array([0,0,0])]
classes_rplc.append(nokhale)
else: classes_rplc.append(classes[clss])
pureDiff.append(classes_rplc[clss]-dem_groundPoints[clss])
eachpoint = []
for ep in range(len(classes_rplc[clss])):
height = classes_rplc[clss][ep][2]-dem_groundPoints[clss][2]
eachpoint.append(np.vstack([classes_rplc[clss][ep][0],classes_rplc[clss][ep][1],height]).T)
upGroundPoints.append(eachpoint)
return upGroundPoints, classes_rplc, pureDiff
def classificationTest(pureDiff):
failclass=[]
for xyidx in range (len(pureDiff)):
for xycl in range (len(pureDiff[xyidx])):
if ((abs(pureDiff[xyidx][xycl][0])>0.5) or (abs(pureDiff[xyidx][xycl][1])>0.5)):
failclass.append(xyidx)
break
return failclass
def defineSpecificClassGreater(classesG, specific0bjectHeightG):
specificClassG = []
numSpecificClassG = []
for vgcl in range (len (classesG)):
for pnt in range (len (classesG[vgcl])):
if classesG[vgcl][pnt][0][2]>specific0bjectHeightG:
numSpecificClassG.append(vgcl)
specificClassG.append(classesG[vgcl])
break
return specificClassG, numSpecificClassG
def defineSpecificClassLess (classesL, specific0bjectHeightL):
specificClassL = []
numSpecificClassL = []
for vgcl in range (len (classesL)):
for pnt in range (len (classesL[vgcl])):
if classesL[vgcl][pnt][0][2]<specific0bjectHeightL:
numSpecificClassL.append(vgcl)
specificClassL.append(classesL[vgcl])
break
return specificClassL, numSpecificClassL
def defineLowVegClass(classes):
lowVegClass = []
lowVegNumClass = []
for lvgcl in range (len (classes)):
for crdnt in range (len (classes[lvgcl])):
if classes[lvgcl][crdnt][0][2]<2 and classes[lvgcl][crdnt][0][2]>0.15:
lowVegNumClass.append(lvgcl)
lowVegClass.append(classes[lvgcl])
break
return lowVegClass,lowVegNumClass
def differenceBetwee2classes (primaryClass,secondNumClass): #to define nolowVegClass and openClass
primaryNumClass = list(np.arange(len(primaryClass)))
cleanNumClass = list(set(primaryNumClass)-set(secondNumClass))
cleanClass = []
for indx in cleanNumClass:
cleanClass.append(primaryClass[indx])
return cleanClass, cleanNumClass
#%%example
A= pd.DataFrame(np.array([[2,22,1],[3,23,1],[1,21,0],[4,24,9]]),columns=['x','y','z'])
B= pd.DataFrame(np.array([[1,21,2],[2,22,3],[3,23,3],[10,200,3],[4,24,2]]),columns=['x','y','z'])
#B= np.array([[1,21,2],
# [2,22,3],
# [10,200,3],
# [3,23,3],
# [4,24,2]
# ])
C=B.copy()
R,c2=B.shape
indx = []
for i in range(R):
row=B[['x','y']].iloc[i]
R=np.where(A[['x','y']]==row)[0]
print (R)
if len(R)>0:
if R[0]==R[1]:
print ("row %i of B is the same as row %i of A" %(i,R[0]))
C['z'][i]=A['z'][R[0]]
indx.append(i)
else:
C['z'][i]=-1
print (C)
#%% DEM file (.tif) reading and creating centroid (ground points) from Dem files for sagehen
elevationMissNoS0f = 1900.
filenameS0fSc = "W:\_users\hsafa\Chapter2_snow_forest\LIDAR_analysis\Sagehen\sc_veg_dem.tif" #path to raster
pathNameS0fSc = "W:\_users\hsafa\Chapter2_snow_forest\LIDAR_analysis\Sagehen\dem_snow0ff_sc.png"
elevationVegSc = readPlotDEM(filenameS0fSc,elevationMissNoS0f,pathNameS0fSc)
dem_groundPointsVegSc = creatingCentroidGroundpointsFromDem(filenameS0fSc,elevationMissNoS0f)#,pathNameS)
dem_groundPointsVegSc_df = pd.DataFrame(dem_groundPointsVegSc,columns=['x','y','z'])
dem_groundPointsVegSc_intg = pd.concat([dem_groundPointsVegSc_df[['x','y']].astype(int),dem_groundPointsVegSc_df['z']],axis=1)
dem_groundPointsVegSc_intg.sort_values(by=['x'])
# LiDar Data reading and Grab the scaled x, y, and z dimensions and stick them together in an nx3 numpy array
infileVegSc = ls.file.File("W:\_users\hsafa\Chapter2_snow_forest\LIDAR_analysis\Sagehen\sc_snw0ff_points.las", mode="r")
coordsVegSc = np.vstack((infileVegSc.x, infileVegSc.y, infileVegSc.z)).T
coordsVegSc_df = pd.DataFrame(coordsVegSc,columns=['x','y','z'])
coordsVegSc_intg = pd.concat([coordsVegSc_df[['x','y']].astype(int),coordsVegSc_df['z']],axis=1)
coordsVegSc_intg.sort_values(by=['x','y'])
#lidar data for whole sagehen, snow on 2016
from LiDar_DemBasedClassification_scSnow0n import coordsnow0nSc26M
coordsnow0nSc26M_df = pd.DataFrame(coordsnow0nSc26M,columns=['x','y','z'])
coordsnow0nSc26M_intg = pd.concat([coordsnow0nSc26M_df[['x','y']].astype(int),coordsnow0nSc26M_df['z']],axis=1)
coordsnow0nSc26M_intg.sort_values(by=['x','y'])
from LiDar_DemBasedClassification_scSnow0n import coordsnow0nSc18A
coordsnow0nSc18M_df = pd.DataFrame(coordsnow0nSc18A,columns=['x','y','z'])
coordsnow0nSc18M_intg = pd.concat([coordsnow0nSc18M_df[['x','y']].astype(int),coordsnow0nSc18M_df['z']],axis=1)
coordsnow0nSc18M_intg.sort_values(by=['x','y'])
#%%
upGroundPointsVegSc=coordsVegSc_intg.copy()
R,c2=coordsVegSc_intg.shape
indx = []
for i in range(R):
row=coordsVegSc_intg[['x','y']].iloc[i]
R=np.where(dem_groundPointsVegSc_intg[['x','y']]==row)[0]
#print (R)
if len(R)>0:
if R[0]==R[1]:
#print ("row %i of B is the same as row %i of A" %(i,R[0]))
upGroundPointsVegSc['z'][i]=coordsVegSc_intg['z'][i]-dem_groundPointsVegSc_intg['z'][R[0]]
indx.append(i)
else:
upGroundPointsVegSc['z'][i]=-9999
#upGroundPointsVegSc, classes_rplcVegSc, pureVegClassSc = lidarDiffGrndPoints(classesVegSc,dem_groundPointsVegSc_intg)
#%% some part of data
#minXSc = 736400.
#maxXSc = 736500.
#minYSc = 4368100.
#maxYSc = 4368200.
#
#dem_groundPointsVegSc_int = dem_groundPointsVegSc_df[(dem_groundPointsVegSc_df['x'] >= minXSc) & (dem_groundPointsVegSc_df['x'] <= maxXSc)]
#dem_groundPointsVegSc_sp0 = dem_groundPointsVegSc_int[(dem_groundPointsVegSc_int['y'] >= minYSc) & (dem_groundPointsVegSc_int['y'] <= maxYSc)]
#dem_groundPointsVegSc_sp = dem_groundPointsVegSc_sp0.values
#coordsVegSc_df_int = coordsVegSc_df[(coordsVegSc_df['x'] >= minXSc) & (coordsVegSc_df['x'] <= maxXSc)]
#coordsVegSc_sp0 = coordsVegSc_df_int[(coordsVegSc_df_int['y'] >= minYSc) & (coordsVegSc_df_int['y'] <= maxYSc)]
#coordsVegSc_sp = coordsVegSc_sp0.values
##finding boarder of las file veg T1 based on all vegDem
#maxX = coordsVegT1_df.iloc[coordsVegT1_df['x'].idxmax()][0:1]
#minX = coordsVegT1_df.iloc[coordsVegT1_df['x'].idxmin()][0:1]
#maxY = coordsVegT1_df.iloc[coordsVegT1_df['y'].idxmax()][1:2]
#minY = coordsVegT1_df.iloc[coordsVegT1_df['y'].idxmin()][1:2]
#borderT1_init = pd.DataFrame([maxX,maxY,minX,minY])
#borderT1 = pd.DataFrame([[borderT1_init['x'].max(),borderT1_init['y'].min()],[borderT1_init['x'].max(),borderT1_init['y'].max()],
# [borderT1_init['x'].min(),borderT1_init['y'].min()],[borderT1_init['x'].min(),borderT1_init['y'].max()]])
#lidar data for sagehen, snow on 2016, 26 March
#coordsnow0nSc26M_int = coordsnow0nSc26M[(coordsnow0nSc26M['x'] >= minXSc) & (coordsnow0nSc26M['x'] <= maxXSc)]
#coordSnwSc26M0 = coordsnow0nSc26M_int[(coordsnow0nSc26M_int['y'] >= minYSc) & (coordsnow0nSc26M_int['y'] <= maxYSc)]
#coordsSnwSc26M = coordSnwSc26M0.values
#lidar data for sagehen, snow on 2016, 18 May
#coordsnow0nSc18M_int = coordsnow0nSc18A[(coordsnow0nSc18A['x'] >= minXSc) & (coordsnow0nSc18A['x'] <= maxXSc)]
#coordSnwSc18M0 = coordsnow0nSc18M_int[(coordsnow0nSc18M_int['y'] >= minYSc) & (coordsnow0nSc18M_int['y'] <= maxYSc)]
#coordsSnwSc18M = coordSnwSc18M0.values
#%% classification with veggrountpoints from veg dem file :T1
#centroids_newT1=dem_groundPointsVegSc_sp[:,0:2]
#kT1 = np.size(dem_groundPointsVegSc_sp[:,0])
## instantiate a class
#clf1T1 = K_Means(numOfClusters=kT1,init_centroids=centroids_newT1)
## fit kmean class to data
#clf1T1.fit(coordsVegSc_sp,0,2)
## get classification
#classesVegSc = clf1T1.classifications
#
#upGroundPointsVegSc, classes_rplcVegSc, pureVegClassSc = lidarDiffGrndPoints(classesVegSc,dem_groundPointsVegSc_sp)
#
##%% classification of snow las file with veg_grountpoints
#clfsT1 = K_Means(numOfClusters=kT1,init_centroids=centroids_newT1)
## classification for sagehen 26 March
#clfsT1.fit(coordsSnwSc26M,0,2)
#classeSnwSc26M = clfsT1.classifications
#upGroundPointsSnwVegSc26M, classes_rplcVSSc26M, pureVegsnowClassSc26M = lidarDiffGrndPoints(classeSnwSc26M,dem_groundPointsVegSc_sp)
## classification for sagehen 18 May
#clfsT1.fit(coordsSnwSc18M,0,2)
#classeSnwSc18M = clfsT1.classifications
#upGroundPointsSnwVegSc18M, classes_rplcVSSc18M, pureVegsnowClassSc18M = lidarDiffGrndPoints(classeSnwSc18M,dem_groundPointsVegSc_sp)
##%% vegtation classification from DEM2014 and las2014
#vegClassSc = upGroundPointsVegSc[:]
##all tree classification
#allTreeClassSc, treeNumClassSc = defineSpecificClassGreater (vegClassSc, 2)
#
#negVegClassSc, negVegNumClassSc = defineSpecificClassLess (vegClassSc, 0)
#
## trees with low branches
#lowVegTreeClassSc, lowVegNumClassSc = defineLowVegClass(allTreeClassSc)
#
## trees with no low blanches
## "*************tall canopy no snow class*****************"
#nolowVegTreeClassSc, nolowVegTreeNumClassSc = differenceBetwee2classes (allTreeClassSc,lowVegNumClassSc)
#
## open space (no trees, no return between 0.15 to 2)
## all low veg
#allLowVegClassSc, allLowVegNumClassSc = defineLowVegClass(vegClassSc)
#
##open places
#notOpenNumClassSc = list(set(allLowVegNumClassSc).union(set(treeNumClassSc)))
## "******************open no snow class*******************"
#allOpenClassSc, allOpenNumClassSc = differenceBetwee2classes (vegClassSc,notOpenNumClassSc)
#
##%% snow classification for sagehen 26 March
#vegSnowClassSc26M, vegSnowNumClassSc26M = defineSpecificClassGreater (upGroundPointsSnwVegSc26M, -1)
#
##no snow on the ground
#nosnowClassSc26M, noSnowNumClassSc26M = defineSpecificClassLess (vegSnowClassSc26M, 0.15)
##snow on the ground or on the trees
#allSnowClassSc26M, allSnowNumClassSc26M = differenceBetwee2classes (vegSnowClassSc26M,noSnowNumClassSc26M)
## "******************tall canopy snow class*******************"
##snow on the tall canopy >2m
#treeSnowClassSc26M, treeSnowNumClassSc26M = defineSpecificClassGreater (allSnowClassSc26M, 2)
## "******************open snow class**************************"
##snow on the ground
#groundSnowClassSc26M, groundSnowNumClassSc26M = differenceBetwee2classes (allSnowClassSc26M,treeSnowNumClassSc26M)
#
##%% snow classification for sagehen 18 May
#vegSnowClassSc18M, vegSnowNumClassSc18M = defineSpecificClassGreater (upGroundPointsSnwVegSc18M, -1)
#
##no snow on the ground
#nosnowClassSc18M, noSnowNumClassSc18M = defineSpecificClassLess (vegSnowClassSc18M, 0.15)
##snow on the ground or on the trees
#allSnowClassSc18M, allSnowNumClassSc18M = differenceBetwee2classes (vegSnowClassSc18M,noSnowNumClassSc18M)
## "******************tall canopy snow class*******************"
##snow on the tall canopy >2m
#treeSnowClassSc18M, treeSnowNumClassSc18M = defineSpecificClassGreater (allSnowClassSc18M, 2)
## "******************open snow class**************************"
##snow on the ground
#groundSnowClassSc18M, groundSnowNumClassSc18M = differenceBetwee2classes (allSnowClassSc18M,treeSnowNumClassSc18M)
#
##%% ploting
#fig3 = plt.figure(figsize=(20,15))
#ax3 = Axes3D(fig3)
#ax3.scatter(dem_groundPointsVegSc_sp[:, 0], dem_groundPointsVegSc_sp[:, 1], dem_groundPointsVegSc_sp[:, 2])
##ax3.scatter(coordsVegSc_sp[:, 0], coordsVegSc_sp[:, 1], coordsVegSc_sp[:, 2])
##ax3.scatter(coordsSnwSc26M[:, 0], coordsSnwSc26M[:, 1], coordsSnwSc26M[:, 2])
#ax3.scatter(coordsSnwSc18M[:, 0], coordsSnwSc18M[:, 1], coordsSnwSc18M[:, 2])
#
#ax3.legend()
#plt.title('snow Lidar data sagehen May 18',fontsize=30)
#
##for flcl in failclass2:
## ax3.scatter([x[0] for x in classes_rplc2[flcl]], [x[1] for x in classes_rplc2[flcl]])#, [x[2] for x in classesnow[flcl]])
#plt.savefig('H:\Chapter2_snow_forest\LIDAR_analysis\Sagehen\dem_groundPoints_sc_Veg&dem.png')
#
##%%
##%%
##%%
##%%
##%%
##%% dem snow off (veg) for vcm
#minXVcm = 362000.
#maxXVcm = 362100.
#minYVcm = 3972900.
#maxYVcm = 3973000.
#
#filenameS0fvcm = "W:/_users/hsafa/Chapter2_snow_forest/LIDAR_analysis/JemezVcm/vcm_snw0ff_dem.tif" #path to raster
#pathNameS0fvcm = "W:/_users/hsafa/Chapter2_snow_forest/LIDAR_analysis/JemezVcm/dem_snow0ff_vcm.png"
#elevationVegvcm = readPlotDEM(filenameS0fvcm,elevationMissNoS0f,pathNameS0fvcm)
#dem_groundPointsVegVcm = creatingCentroidGroundpointsFromDem(filenameS0fvcm,elevationMissNoS0f)#,pathNameS)
#
#dem_groundPointsVegVcm_df = pd.DataFrame(dem_groundPointsVegVcm,columns=['x','y','z'])
#dem_groundPointsVegVcm_int = dem_groundPointsVegVcm_df[(dem_groundPointsVegVcm_df['x'] >= minXVcm) & (dem_groundPointsVegVcm_df['x'] <= maxXVcm)]
#dem_groundPointsVegVcm_sp0 = dem_groundPointsVegVcm_int[(dem_groundPointsVegVcm_int['y'] >= minYVcm) & (dem_groundPointsVegVcm_int['y'] <= maxYVcm)]
#dem_groundPointsVegVcm_sp = dem_groundPointsVegVcm_sp0.values
#
##dem snow on (snw) for vcm
##filenameS0nVcm = "W:/_users/hsafa/Chapter2_snow_forest/LIDAR_analysis/JemezVcm/vcm_snw0n_dem.tif" #path to raster
##pathNameS0nVcm = "W:/_users/hsafa/Chapter2_snow_forest/LIDAR_analysis/JemezVcm/dem_snow0n_vcm.png"
##elevationSnowVcm = readPlotDEM(filenameS0nVcm,elevationMissNoS0f,pathNameS0nVcm)
##dem_groundPointsSnowVcm = creatingCentroidGroundpointsFromDem(filenameS0nVcm,elevationMissNoS0f)#,pathNameS)
#
##las file snow off (snw) for vcm
#infileVegVcm = ls.file.File("W:/_users/hsafa/Chapter2_snow_forest/LIDAR_analysis/JemezVcm/jemezSnow0ffpoints.las", mode="r")
#coordsVegVcm = np.vstack((infileVegVcm.x, infileVegVcm.y, infileVegVcm.z)).T
#
#coordsVegVcm_df = pd.DataFrame(coordsVegVcm,columns=['x','y','z'])
#coordsVegVcm_df_int = coordsVegVcm_df[(coordsVegVcm_df['x'] >= minXVcm) & (coordsVegVcm_df['x'] <= maxXVcm)]
#coordsVegVcm_sp0 = coordsVegVcm_df_int[(coordsVegVcm_df_int['y'] >= minYVcm) & (coordsVegVcm_df_int['y'] <= maxYVcm)]
#coordsVegVcm_sp = coordsVegVcm_sp0.values
#
##las file snow on (snw) for vcm
#infileSnwVcm = ls.file.File("W:/_users/hsafa/Chapter2_snow_forest/LIDAR_analysis/JemezVcm/jemezSnow0npoints.las", mode="r")
#coordsSnwVcm = np.vstack((infileSnwVcm.x, infileSnwVcm.y, infileSnwVcm.z)).T
#
#coordsSnwVcm_df = pd.DataFrame(coordsSnwVcm,columns=['x','y','z'])
#coordsnow0nVcm_int = coordsSnwVcm_df[(coordsSnwVcm_df['x'] >= minXVcm) & (coordsSnwVcm_df['x'] <= maxXVcm)]
#coordSnwVcm0 = coordsnow0nVcm_int[(coordsnow0nVcm_int['y'] >= minYVcm) & (coordsnow0nVcm_int['y'] <= maxYVcm)]
#coordsSnwVcm_sp = coordSnwVcm0.values
##%% classification with grountpoints from veg dem file :VCM
#centroids_newVcm=dem_groundPointsVegVcm_sp[:,0:2]
#kVcm = np.size(dem_groundPointsVegVcm_sp[:,0])
## instantiate a class
#clf1Vcm = K_Means(numOfClusters=kVcm,init_centroids=centroids_newVcm)
## fit kmean class to data
#clf1Vcm.fit(coordsVegVcm_sp,0,2)
## get classification
#classesVegVcm = clf1Vcm.classifications
#
#upGroundPointsVegVcm, classes_rplcVegVcm, pureVegClassVcm = lidarDiffGrndPoints(classesVegVcm,dem_groundPointsVegVcm_sp)
#
##%% classification of snow las file with veg_grountpoints
#clfsVcm = K_Means(numOfClusters=kVcm,init_centroids=centroids_newVcm)
## fit kmean class to data
#clfsVcm.fit(coordsSnwVcm_sp,0,2)
## get classification
#classeSnwVcm = clfsVcm.classifications
#
#upGroundPointsSnwVegVcm, classes_rplcVSVcm, pureVegsnowClassVcm = lidarDiffGrndPoints(classeSnwVcm,dem_groundPointsVegVcm_sp)
##%% vegtation classification from DEM2014 and las2014
#vegClassVcm = upGroundPointsVegVcm[:]
##all tree classification
#allTreeClassVcm, treeNumClassVcm = defineSpecificClassGreater (vegClassVcm, 2)
#
#negVegClassVcm, negVegNumClassVcm = defineSpecificClassLess (vegClassVcm, 0)
#
## trees with low branches
#lowVegTreeClassVcm, lowVegNumClassVcm = defineLowVegClass(allTreeClassVcm)
#
## trees with no low blanches
## "*************tall canopy no snow class*****************"
#nolowVegTreeClassVcm, nolowVegTreeNumClassVcm = differenceBetwee2classes (allTreeClassVcm,lowVegNumClassVcm)
#
## open space (no trees, no return between 0.15 to 2)
## all low veg
#allLowVegClassVcm, allLowVegNumClassVcm = defineLowVegClass(vegClassVcm)
#
##open places
#notOpenNumClassVcm = list(set(allLowVegNumClassVcm).union(set(treeNumClassVcm)))
## "******************open no snow class*******************"
#allOpenClassVcm, allOpenNumClassVcm = differenceBetwee2classes (vegClassVcm,notOpenNumClassVcm)
#
##%% snow classification
#vegSnowClassVcm, vegSnowNumClassVcm = defineSpecificClassGreater (upGroundPointsSnwVegVcm, -1)
#
##no snow on the ground
#nosnowClassVcm, noSnowNumClassVcm = defineSpecificClassLess (vegSnowClassVcm, 0.15)
##snow on the ground or on the trees
#allSnowClassVcm, allSnowNumClassVcm = differenceBetwee2classes (vegSnowClassVcm,noSnowNumClassVcm)
## "******************tall canopy snow class*******************"
##snow on the tall canopy >2m
#treeSnowClassVcm, treeSnowNumClassVcm = defineSpecificClassGreater (allSnowClassVcm, 2)
## "******************open snow class**************************"
##snow on the ground
#groundSnowClassVcm, groundSnowNumClassVcm = differenceBetwee2classes (allSnowClassVcm,treeSnowNumClassVcm)
##%% ploting
#figVcm = plt.figure(figsize=(20,15))
#axVcm = Axes3D(figVcm)
#axVcm.scatter(dem_groundPointsVegVcm_sp[:, 0], dem_groundPointsVegVcm_sp[:, 1], dem_groundPointsVegVcm_sp[:, 2])
#axVcm.scatter(coordsSnwVcm_sp[:, 0], coordsSnwVcm_sp[:, 1], coordsSnwVcm_sp[:, 2])
##axVcm.scatter(coordsVegVcm_sp[:, 0], coordsVegVcm_sp[:, 1], coordsVegVcm_sp[:, 2])
#axVcm.legend()
#plt.title('snow Lidar data JemezVcm',fontsize=30)
#
##for flcl in failclass2:
## ax3.scatter([x[0] for x in classes_rplc2[flcl]], [x[1] for x in classes_rplc2[flcl]])#, [x[2] for x in classesnow[flcl]])
#plt.savefig('H:\Chapter2_snow_forest\LIDAR_analysis\JemezVcm\dem_groundPointsVeg&coordsVegSnwVcm.png')
##%%
##%%
##%%
##%%
##%%
##%% dem snow off (veg) for NR1 Niwot
#
#minXNr1 = 454900.
#maxXNr1 = 455000.
#minYNr1 = 4430500.
#maxYNr1 = 4430600.
#
#filenameS0fNr1 = "W:/_users/hsafa/Chapter2_snow_forest/LIDAR_analysis/Niwot/nr1_snw0ff_dem.tif" #path to raster
#pathNameS0fNr1 = "W:/_users/hsafa/Chapter2_snow_forest/LIDAR_analysis/Niwot/dem_snow0ff_Nr1.png"
#elevationVegNr1 = readPlotDEM(filenameS0fNr1,elevationMissNoS0f,pathNameS0fNr1)
#dem_groundPointsVegNr1 = creatingCentroidGroundpointsFromDem(filenameS0fNr1,elevationMissNoS0f)#,pathNameS)
#
#dem_groundPointsVegNr1_df = pd.DataFrame(dem_groundPointsVegNr1,columns=['x','y','z'])
#dem_groundPointsVegNr1_int = dem_groundPointsVegNr1_df[(dem_groundPointsVegNr1_df['x'] >= minXNr1) & (dem_groundPointsVegNr1_df['x'] <= maxXNr1)]
#dem_groundPointsVegNr1_sp0 = dem_groundPointsVegNr1_int[(dem_groundPointsVegNr1_int['y'] >= minYNr1) & (dem_groundPointsVegNr1_int['y'] <= maxYNr1)]
#dem_groundPointsVegNr1_sp = dem_groundPointsVegNr1_sp0.values
#
## dem snow on (snw) for NR1
##filenameS0nNr1 = "W:/_users/hsafa/Chapter2_snow_forest/LIDAR_analysis/Niwot/nr1_snw0n9_dem.tif" #path to raster
##pathNameS0nNr1 = "W:/_users/hsafa/Chapter2_snow_forest/LIDAR_analysis/Niwot/dem_snow0n_Nr1.png"
##elevationSnowNr1 = readPlotDEM(filenameS0nNr1,elevationMissNoS0f,pathNameS0nNr1)
##dem_groundPointsSnowNr1 = creatingCentroidGroundpointsFromDem(filenameS0nNr1,elevationMissNoS0f)#,pathNameS)
#
##las file snow off (snw) for NR1
#infileVegNr1 = ls.file.File("W:/_users/hsafa/Chapter2_snow_forest/LIDAR_analysis/Niwot/niwotSnow0ffpoints.las", mode="r")
#coordsVegNr1 = np.vstack((infileVegNr1.x, infileVegNr1.y, infileVegNr1.z)).T
#
#coordsVegNr1_df = pd.DataFrame(coordsVegNr1,columns=['x','y','z'])
#coordsVegNr1_df_int = coordsVegNr1_df[(coordsVegNr1_df['x'] >= minXNr1) & (coordsVegNr1_df['x'] <= maxXNr1)]
#coordsVegNr1_sp0 = coordsVegNr1_df_int[(coordsVegNr1_df_int['y'] >= minYNr1) & (coordsVegNr1_df_int['y'] <= maxYNr1)]
#coordsVegNr1_sp = coordsVegNr1_sp0.values
#
##las file snow on (snw) for NR1
#infileSnwNr1 = ls.file.File("W:/_users/hsafa/Chapter2_snow_forest/LIDAR_analysis/Niwot/niwotSnowOn09may2010points.las", mode="r")
#coordsSnwNr1 = np.vstack((infileSnwNr1.x, infileSnwNr1.y, infileSnwNr1.z)).T
#
#coordsSnwNr1_df = pd.DataFrame(coordsSnwNr1,columns=['x','y','z'])
#coordsnow0nNr1_int = coordsSnwNr1_df[(coordsSnwNr1_df['x'] >= minXNr1) & (coordsSnwNr1_df['x'] <= maxXNr1)]
#coordSnwNr10 = coordsnow0nNr1_int[(coordsnow0nNr1_int['y'] >= minYNr1) & (coordsnow0nNr1_int['y'] <= maxYNr1)]
#coordsSnwNr1_sp = coordSnwNr10.values
##%% classification with grountpoints from veg dem file :NR1
#centroids_newNr1=dem_groundPointsVegNr1_sp[:,0:2]
#kNr1 = np.size(dem_groundPointsVegNr1_sp[:,0])
## instantiate a class
#clf1Nr1 = K_Means(numOfClusters=kNr1,init_centroids=centroids_newNr1)
## fit kmean class to data
#clf1Nr1.fit(coordsVegNr1_sp,0,2)
## get classification
#classesVegNr1 = clf1Nr1.classifications
#
#upGroundPointsVegNr1, classes_rplcVegNr1, pureVegClassNr1 = lidarDiffGrndPoints(classesVegNr1,dem_groundPointsVegNr1_sp)
#
##%% classification of snow las file with veg_grountpoints
#clfsNr1 = K_Means(numOfClusters=kNr1,init_centroids=centroids_newNr1)
## fit kmean class to data
#clfsNr1.fit(coordsSnwNr1_sp,0,2)
## get classification
#classeSnwNr1 = clfsNr1.classifications
#
#upGroundPointsSnwVegNr1, classes_rplcVSNr1, pureVegsnowClassNr1 = lidarDiffGrndPoints(classeSnwNr1,dem_groundPointsVegNr1_sp)
#
##%% vegtation classification from DEM2014 and las2014
#vegClassNr1 = upGroundPointsVegNr1[:]
##all tree classification
#allTreeClassNr1, treeNumClassNr1 = defineSpecificClassGreater (vegClassNr1, 2)
#
#negVegClassNr1, negVegNumClassNr1 = defineSpecificClassLess (vegClassNr1, 0)
#
## trees with low branches
#lowVegTreeClassNr1, lowVegNumClassNr1 = defineLowVegClass(allTreeClassNr1)
#
## trees with no low blanches
## "*************tall canopy no snow class*****************"
#nolowVegTreeClassNr1, nolowVegTreeNumClassNr1 = differenceBetwee2classes (allTreeClassNr1,lowVegNumClassNr1)
#
## open space (no trees, no return between 0.15 to 2)
## all low veg
#allLowVegClassNr1, allLowVegNumClassNr1 = defineLowVegClass(vegClassNr1)
#
##open places
#notOpenNumClassNr1 = list(set(allLowVegNumClassNr1).union(set(treeNumClassNr1)))
## "******************open no snow class*******************"
#allOpenClassNr1, allOpenNumClassNr1 = differenceBetwee2classes (vegClassNr1,notOpenNumClassNr1)
#
##%% snow classification
#vegSnowClassNr1, vegSnowNumClassNr1 = defineSpecificClassGreater (upGroundPointsSnwVegNr1, -1)
#
##no snow on the ground
#nosnowClassNr1, noSnowNumClassNr1 = defineSpecificClassLess (vegSnowClassNr1, 0.15)
##snow on the ground or on the trees
#allSnowClassNr1, allSnowNumClassNr1 = differenceBetwee2classes (vegSnowClassNr1,noSnowNumClassNr1)
## "******************tall canopy snow class*******************"
##snow on the tall canopy >2m
#treeSnowClassNr1, treeSnowNumClassNr1 = defineSpecificClassGreater (allSnowClassNr1, 2)
## "******************open snow class**************************"
##snow on the ground
#groundSnowClassNr1, groundSnowNumClassNr1 = differenceBetwee2classes (allSnowClassNr1,treeSnowNumClassNr1)
##%%
#figNr1 = plt.figure(figsize=(20,15))
#axNr1 = Axes3D(figNr1)
#axNr1.scatter(dem_groundPointsVegNr1_sp[:, 0], dem_groundPointsVegNr1_sp[:, 1], dem_groundPointsVegNr1_sp[:, 2])
#axNr1.scatter(coordsSnwNr1_sp[:, 0], coordsSnwNr1_sp[:, 1], coordsSnwNr1_sp[:, 2])
##axNr1.scatter(coordsVegNr1_sp[:, 0], coordsVegNr1_sp[:, 1], coordsVegNr1_sp[:, 2])
#axNr1.legend()
#plt.title('snow Lidar data sagehen ',fontsize=30)
#
##for flcl in failclass2:
## ax3.scatter([x[0] for x in classes_rplc2[flcl]], [x[1] for x in classes_rplc2[flcl]])#, [x[2] for x in classesnow[flcl]])
#plt.savefig("H:/Chapter2_snow_forest/LIDAR_analysis/Niwot/dem_groundPointsVegT1&coordsVegNr1.png")
##%%
#
#def choosingFirstArray0fEachClassAndSettingValueForThatClass(classList,value):
# class_1stArr = []
# for arr in classList:
# class_1stArr.append(arr[0][0])
# class_1stArr_df = pd.DataFrame(class_1stArr, columns = ['x','y','z'])
# class_1stArr_df['z'] = value
# return class_1stArr_df
#
#Nosnow0nGroundNr1_df = choosingFirstArray0fEachClassAndSettingValueForThatClass(nosnowClassVcm,0)
#Nosnow0nGroundNr1_df_int = Nosnow0nGroundNr1_df.astype(int)
#
#Snow0nTallTreeNr1_df = choosingFirstArray0fEachClassAndSettingValueForThatClass(treeSnowClassVcm,0.2)
#Snow0nTallTreeNr1_df_int = Snow0nTallTreeNr1_df.astype(int)
#
#Snow0nGroundNr1_df = choosingFirstArray0fEachClassAndSettingValueForThatClass(groundSnowClassVcm,0.4)
#Snow0nGroundNr1_df_int = Snow0nGroundNr1_df.astype(int)
#
#figNr1 = plt.figure(figsize=(20,15))
#axNr1 = Axes3D(figNr1)
#axNr1.scatter(Nosnow0nGroundNr1_df_int['x'], Nosnow0nGroundNr1_df_int['y'], Nosnow0nGroundNr1_df_int['z'])
#axNr1.scatter(Snow0nGroundNr1_df_int['x'], Snow0nGroundNr1_df_int['y'], Snow0nGroundNr1_df_int['z'])
#axNr1.scatter(Snow0nTallTreeNr1_df_int['x'], Snow0nTallTreeNr1_df_int['y'], Snow0nTallTreeNr1_df_int['z'])
#plt.legend()
#plt.title('snow on the ground vs. on the tall canopy in Jemez_VCM',fontsize=30)
#
#plt.savefig("H:/Chapter2_snow_forest/LIDAR_analysis/Sagehen/snowDepthSc26M.png")
#
##SnowDemNR1 = pd.concat([Nosnow0nGroundNr1_df_int, Snow0nGroundNr1_df_int, Snow0nTallTreeNr1_df_int], axis=0, ignore_index=True)
##SnowDemNR1.sort_values(by=['x'])
##SnowDemNR1_drop = SnowDemNR1.drop_duplicates(subset=['x','y','z'], keep = "first")
##SnowDemNR1_drop.set_index([np.arange(0,len(SnowDemNR1_drop))],inplace=True)
##
##DemNr1 = dem_groundPointsVegNr1_sp0.copy()
##DemNr1_int = DemNr1.astype(int)
##DemNr1_int.set_index([np.arange(0,len(DemNr1_int))],inplace=True)
#
##for demPnt in range(len(DemNr1_int)):
## for snwPnt in range(len(SnowDemNR1_drop)):
## if (DemNr1_int['x'][demPnt] == SnowDemNR1_drop['X'][snwPnt]) & (DemNr1_int['y'][demPnt] == SnowDemNR1_drop['Y'][snwPnt]):
## DemNr1_int['z'][demPnt] = SnowDemNR1_drop['Z'][snwPnt]
## else:DemNr1_int['z'][demPnt] = -1
#
##Check = DemNr1_int['z'].replace(SnowDemNR1_drop['z'])
#
##DemNr1_int.loc[SnowDemNR1_drop['x'].isin(DemNr1_int.x) & SnowDemNR1_drop['y'].isin(DemNr1_int.y),'z']=SnowDemNR1_drop['z']
#
##DemNr1_int.loc[SnowDemNR1_drop['x']==DemNr1_int.x & SnowDemNR1_drop['y']==DemNr1_int.y,'z']=SnowDemNR1_drop['z']
#
##SameIndexNr1 = DemNr1_int[~(DemNr1_int['x'].isin(SnowDemNR1_drop['x']))].index# | (df1['A'].isnull())
##a = SnowDemNR1_drop[SnowDemNR1_drop['x'].isin(DemNr1_int['x'])].index
##df3['D'][a]=3
#
#
#
#
#
#
#
#
#
#
#
#
#
#
|
[
"safa.hamideh@gmail.com"
] |
safa.hamideh@gmail.com
|
d421d8a6cb77af09a0b88d475ce0a1924f8cbbd4
|
5f0c1597fc4a31e4b5e57013a36d7eb946478f00
|
/setup.py
|
59aee57af603496aeb774fce64a9976d2a05a0db
|
[] |
no_license
|
g-delrieu/final_project
|
7d8b0614fe7a0064452aa8873aa260603a8ab1bf
|
b51c6d0954cf7ca28c057f235b871f6da29bc341
|
refs/heads/master
| 2022-12-04T13:10:58.053975
| 2020-08-17T13:21:08
| 2020-08-17T13:21:08
| 288,180,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
from setuptools import find_packages
from setuptools import setup
with open('requirements.txt') as f:
content = f.readlines()
requirements = [x.strip() for x in content if 'git+' not in x]
setup(name='final_project',
version="1.0",
description="Project Description",
packages=find_packages(),
test_suite = 'tests',
# include_package_data: to install data from MANIFEST.in
include_package_data=True,
scripts=['scripts/final_project-run'],
zip_safe=False)
|
[
"georges.delrieu@laposte.net"
] |
georges.delrieu@laposte.net
|
fc4c0d1db4eaeead8e1eb8cff28e33ab6b3f4f39
|
89ac4818b73dcc84687cbd83c45f403a79cc8f2c
|
/profile_dao.py
|
50b09d807f1b5af48c2efc1d019ca246b00d4377
|
[] |
no_license
|
thedanielzhang/quarantine-profile-service
|
1939f8a3072b9cc01b544e42f6665eb3e562e76c
|
6fb582063ca7892c3dc8ecca3acc884fb6bf8dcf
|
refs/heads/master
| 2021-05-23T14:16:10.434475
| 2020-04-05T23:57:28
| 2020-04-05T23:57:28
| 253,333,002
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
from flask_restful import Resource, fields, marshal_with
class ProfileDao(object):
def __init__(self, user_id, name):
self.user_id = user_id
self.name = name
|
[
"daniel.zhang@berkeley.edu"
] |
daniel.zhang@berkeley.edu
|
ddf0c274cf4d14eafea505c4824a1e8f29fbb4e7
|
9ae3f2c497935b0e7733eef7000e630c86ea7afd
|
/src/EmailClient.py
|
4bdf3501d311fce584300383a5e3f173a6231372
|
[] |
no_license
|
bearhockey/tyrra
|
0e25af719a1ff9e542b99391f711c73a6859ec83
|
7877c9551909e5e635501dd4e8eadde6a1e94160
|
refs/heads/master
| 2021-01-17T14:30:54.732060
| 2018-08-01T16:53:22
| 2018-08-01T16:53:22
| 44,580,223
| 0
| 0
| null | 2018-08-01T16:53:23
| 2015-10-20T03:38:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,921
|
py
|
import pygame
import Color
from src.components.text.TextBox import TextBox
class EmailClient(object):
def __init__(self):
self.emails = []
class Email(object):
def __init__(self, screen_width, screen_height, subject=None, message=None, font=None, small_font=None):
self.big_font_size = 24
self.small_font_size = 16
self.large_font = font
self.small_font = small_font
if not self.large_font:
self.large_font = pygame.font.Font(pygame.font.match_font('kaiti'), self.big_font_size)
if not self.small_font:
self.large_font = pygame.font.Font(pygame.font.match_font('kaiti'), self.small_font_size)
self.subject = subject
self.message = message
self.body_width = screen_width
self.body_height = screen_height
self.email_screen = pygame.Surface(size=(self.body_width, self.body_height))
self.subject_box = TextBox(pygame.Rect(10, 10, 10, 50), box_color=Color.blue, border_color=Color.gray,
highlight_color=Color.white, active_color=Color.gray, text_color=Color.white,
text_outline=Color.white, font=self.large_font)
self.message_box = TextBox(pygame.Rect(10, 40, 10, 50), text_color=Color.white,
text_outline=Color.white, font=self.small_font)
if self.subject:
self.subject_box.message = self.subject
else:
self.subject_box.message = 'FW: Get big peen now!'
if self.message:
self.message_box.message = self.message
else:
self.message_box.message = 'FW: Get big peen now!'
def draw(self, screen):
self.email_screen.fill(Color.d_blue)
self.subject_box.draw(self.email_screen)
self.message_box.draw(self.email_screen)
screen.blit(self.email_screen, (0, 0))
|
[
"bobmanworld@yahoo.com"
] |
bobmanworld@yahoo.com
|
85bd61efaca702a1523de934ab4d8f65ceca6037
|
24d97f3af69d9664b5555aeb81e37ba7aa833690
|
/utility_functions.py
|
4f4b03949064d713aaa38468da7cb80aeb385e45
|
[
"MIT"
] |
permissive
|
Filippo95/SensorNetwork
|
579c4fdf86ba554392feb6eed8eaa13d69eb5108
|
f5129aefd6e12d078627628556ba666eb6aac67e
|
refs/heads/main
| 2023-06-08T20:05:53.922402
| 2021-06-25T11:12:07
| 2021-06-25T11:12:07
| 369,548,450
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,637
|
py
|
import os
import sys
import pprint
from math import sin, cos, sqrt, atan2, radians
from deprecated import deprecated
# classe di supporto per controllare la quantità
# di output stampato a video
class Verbosity:
def __init__(self, quiet, verbose, more_verbose):
self.quiet = quiet
self.verbose = verbose or more_verbose # se ho output more_verbose voglio che si stampi anche il verbose
self.more_verbose = more_verbose
# Inutilizzato.
# Creato in origine per calcolare la distanza in 2D, ora si usa
# un metodo più preciso che tiene conto della curvatura terrestre.
@deprecated(reason="Utilizzare il metodo distance(), che tiene conto della curvatura terrestre")
def distance_in_2d(sens_one, sens_two):
x_0 = sens_one.longitudine
y_0 = sens_one.latitudine
x_1 = sens_two.longitudine
y_1 = sens_two.latitudine
return sqrt((y_0 - y_1) ** 2 + (x_0 - x_1) ** 2)
def find_sensor_by_id(sensor):
for sen in get_global_sensors():
if sen.id == sensor:
return sen
return None
# Prende in input due tuple di coordinate e restituisce la loro distanza sulla superficie terrestre
def distance_by_coord(node_one, node_two):
# Approssimazione del raggio della Terra in Km
raggio_terra = 6373.0
lat1 = radians(node_one[0])
lon1 = radians(node_one[1])
lat2 = radians(node_two[0])
lon2 = radians(node_two[1])
diff_lon = lon2 - lon1
diff_lat = lat2 - lat1
a = sin(diff_lat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(diff_lon / 2) ** 2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distanza = raggio_terra * c * 1000
return distanza
# Prende in input due sensori e restituisce
# la loro distanza sulla superficie terrestre
def distance(sens_one, sens_two):
return distance_by_coord((sens_one.latitudine, sens_one.longitudine),
(sens_two.latitudine, sens_two.longitudine))
def print_scenario(a_dict, order_by):
print("\n\n\n\n\n---------------------------------------------------\n\n\n\n\n")
print("SCENARIO - ORDINATO PER: " + order_by)
for temp_sens in a_dict.keys():
print("\nSensore " + str(temp_sens.id) + ":")
temp_val = a_dict[temp_sens]
temp_sens_list = temp_val["senders"]
temp_tot_cap = temp_val["tot_capacita"]
temp_rapp_cap_costo = temp_val["rapp_cap_costo"]
temp_rapp_numsensori_costo = temp_val["rapp_numsensori_costo"]
print("Senders: ", end='')
for temp_sender in temp_sens_list:
print(str(temp_sender.id) + " ", end='')
print("\nTot_capacità: " + str(temp_tot_cap))
print("Rapporto capacità/costo: " + str(temp_rapp_cap_costo))
print("Rapporto numsensori/costo: " + str(temp_rapp_numsensori_costo))
print("\n\n")
def print_greedy_result(result):
if get_verbosity().verbose:
print("\n\n\n")
print("Dispositivi installati dalla greedy:\n")
pp = pprint.PrettyPrinter(indent=3)
pp.pprint(result)
elif not get_verbosity().quiet: # Se ho verbosity "normale" stampo solo i primi 3
print("\n\n\n")
print("Dispositivi installati dalla greedy (parziale):\n")
pp = pprint.PrettyPrinter(indent=3)
pp.pprint(dict(list(result.items())[:3]))
print("\t\t.\n\t\t.\n\t\t.\n")
def print_mst_result(mst):
if get_verbosity().verbose:
print("\n\n\nArchi selezionati per il MST:\n")
for edge in mst:
print(f"{edge['node_one']} - {edge['node_two']} - Costo {edge['costo']}")
elif not get_verbosity().quiet: # Se ho verbosity "normale" stampo solo i primi 3
print("\n\n\nArchi selezionati per il MST (parziale):\n")
for edge in mst[:3]:
print(f"{edge['node_one']} - {edge['node_two']} - Costo {edge['costo']}")
print("\t.\n\t.\n\t.\n")
def prepara_cartelle_e_file(num_sensori, order_by, pack_by, num_iter, no_display):
if not os.path.isdir("./solutions"):
os.mkdir("./solutions")
intestazione_csv = "seed,numsensori,order_by,pack_by,num_iter_ls," + \
"greedy_cost,mst_cost,first_tot,first_ls_tot,second_ls_tot," + \
"num_gw_class_1,fattore_riduzione"
# Se viene passata l'opzione --no-display si aggiunge solamente il risultato
# dell'esecuzione al file .csv (per analisi e creazione di grafici)
if no_display:
text_output_path_grafici = f"./solutions/graph_data.csv"
if not os.path.isfile(text_output_path_grafici):
with open(text_output_path_grafici, 'w') as f:
original_stdout = sys.stdout
sys.stdout = f
print(intestazione_csv)
sys.stdout = original_stdout
return None, None, None, text_output_path_grafici
saving_path = f"./solutions/{num_sensori}/{get_seed()}/{order_by}+{pack_by}+{num_iter}/"
saving_path_ls = saving_path + "localsearch/"
text_output_path = saving_path + "output.txt"
text_output_path_grafici = f"./solutions/graph_data.csv"
if not os.path.isdir(f"./solutions/{num_sensori}"):
os.mkdir(f"./solutions/{num_sensori}")
if not os.path.isdir(f"./solutions/{num_sensori}/{get_seed()}"):
os.mkdir(f"./solutions/{num_sensori}/{get_seed()}")
if not os.path.isdir(saving_path):
os.mkdir(saving_path)
if not os.path.isdir(saving_path_ls):
os.mkdir(saving_path_ls)
if os.path.isfile(text_output_path):
os.remove(text_output_path)
if not os.path.isfile(text_output_path_grafici):
with open(text_output_path_grafici, 'w') as f:
original_stdout = sys.stdout
sys.stdout = f
print(intestazione_csv)
sys.stdout = original_stdout
return saving_path, saving_path_ls, text_output_path, text_output_path_grafici
verbosity = Verbosity(False, False, False)
def get_verbosity():
return verbosity
def set_verbosity(quiet=False, verbose=False, more_verbose=False):
global verbosity
verbosity = Verbosity(quiet, verbose, more_verbose)
random_seed = 12345 # Per la riproducibilità degli esempi
# Il seed originale è 1625
def get_seed():
return random_seed
def set_seed(new_seed):
global random_seed
random_seed = new_seed
gateway_classes = []
def get_gateways_classes():
return gateway_classes
def set_gateways_classes(new_gateway_classes):
global gateway_classes
gateway_classes = new_gateway_classes
sensors = []
def get_global_sensors():
return sensors
def set_global_sensors(new_sensors):
global sensors
sensors = new_sensors
|
[
"bettoide@gmail.com"
] |
bettoide@gmail.com
|
6c05152ea75bc93635a3d36a0bafa88e0bdf729c
|
73921d824c55cc976a6d59b1fe3353760f27386f
|
/json2yaml.py
|
7e5250f6ebe03d550fa6b385580e637cd5aa046e
|
[] |
no_license
|
mocanug/yq
|
eb00771df013c8cddd6bd604389628d5900a44c8
|
4edde41062fb4b6a3a06add0820027e43572d8bf
|
refs/heads/master
| 2021-06-17T12:09:48.620767
| 2019-05-30T05:10:05
| 2019-05-30T06:18:04
| 189,212,510
| 1
| 0
| null | 2021-03-25T22:40:02
| 2019-05-29T11:30:30
|
Shell
|
UTF-8
|
Python
| false
| false
| 506
|
py
|
#!/usr/bin/env python3
import sys
import json
import yaml
def main():
std_input = ""
while True:
try:
line = input()
except EOFError:
break
std_input += line
try:
json_in = json.loads(std_input)
print(yaml.dump(json_in))
except json.JSONDecodeError as e:
print("No valid json: {}".format(e.msg))
except Exception as e:
print("Error: {}".format(e.msg))
if __name__ == '__main__':
sys.exit(main())
|
[
"mocanu.georgel@gmail.com"
] |
mocanu.georgel@gmail.com
|
3fc4ede212e38d6330eaee78fa49f2c2513c49e1
|
27705074a8125aa8379663d4b57621ab9fe41da0
|
/DEA_2nd_order_stats_tas.py
|
756463ccd099e85b472f997b715578e7c379e3e4
|
[] |
no_license
|
taslandcover/DEA_testing
|
98572af682d50b3b59eabab1ae53b2b61d66be9d
|
19d9526e8d7a9327ddbe7fa2d21c42b03bede135
|
refs/heads/master
| 2022-05-05T20:41:35.833716
| 2022-04-11T04:13:42
| 2022-04-11T04:13:42
| 153,358,933
| 2
| 0
| null | 2021-10-21T22:23:02
| 2018-10-16T21:51:33
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,914
|
py
|
import warnings; warnings.simplefilter('ignore')
import fnmatch
import os
import pandas as pd
import geopandas as gpd
from datacube_stats.statistics import GeoMedian
from datacube.helpers import ga_pq_fuser
from datacube.storage import masking
from datacube.helpers import write_geotiff
import xarray as xr
#get the DEA version of the plotting functions
import sys
sys.path.append(os.path.abspath('/g/data/r78/DPIPWE_lm/dea-notebooks/10_Scripts'))
sys.path.append(os.path.abspath('/g/data/r78/DPIPWE_lm/datacube-2nd-order-stats'))
import DEAPlotting
import DEADataHandling
from model import SMAD, BCMAD, EMAD, TernaryMAD
import datacube
dc = datacube.Datacube(app='stats_2nd_testing')
print("modules loaded...")
###############################################################################
outputdir = '/g/data/r78/DPIPWE_lm/output_data'
if not os.path.exists(outputdir):
print("output directory doesn't exist")
exit()
#x, y = (1385000.0, 1390000.0), (-4570000.0, -4575000.0)
sensors = ['ls8', 'ls7', 'ls5'] #take or remove as needed
deriv = 'nbart'
#product = 'nbart' #
time = ('2010-01-01', '2015-12-31')
resolution = (-25,25)
bands = ['red', 'green', 'blue', 'nir', 'swir1', 'swir2']
#epoch = ('2016', '2017') # time query for datacube function can be just years
query = {'x': (1300000.0, 1400000.0),
'y': (-4700000.0, -4800000.0),
'time': time,
'resolution': resolution,
'crs': 'EPSG:3577'}
###############################################################################
print("...loading clear landsat.")
dsma = DEADataHandling.load_clearlandsat(dc=dc, query=query,
#product=product,
masked_prop=0,
sensors = sensors,
bands_of_interest = bands,
mask_pixel_quality=True,
ls7_slc_off=True)
dsma = dsma.drop('data_perc')
# compute geomedian
#dsma_gm = GeoMedian().compute(dsma)
##############################################################################
print("...computing TernaryMAD")
dsma_tmad = TernaryMAD().compute(dsma)
ds=xr.Dataset({'smad': (['y','x'], dsma_tmad.sdev),
'emad': (['y','x'], dsma_tmad.edev),
'bcmad': (['y','x'], dsma_tmad.bcdev)},
coords={'x': dsma.x, 'y':dsma.y}, attrs=dsma.attrs)
print("...writing output")
#datacube.storage.storage.write_dataset_to_netcdf(dsma_smad, '/g/data/r78/DPIPWE_LM/output_data/ls8_smad_test.nc')
datacube.helpers.write_geotiff(filename='/g/data/r78/DPIPWE_lm/output_data/lsX_TMAD_2010_2015_v2.tif', dataset=ds)
#datacube.storage.storage.write_dataset_to_netcdf(dsma_gm, '/g/data/r78/DPIPWE_lm/output_data/lsX_pcm_2016.nc')
#DEADataHandling.dataset_to_geotiff('dsma_smad_netcdf_test.nc', dsma_smad)
|
[
"noreply@github.com"
] |
taslandcover.noreply@github.com
|
aa0dfa44d4fa20c94bc9ea9a88d7d006a8ea3f3e
|
99ba551645dc9beed36f0478b396977c50c3e7ef
|
/leetcode-vscode/3.无重复字符的最长子串.py
|
96731b4198c1e46ad3c2adcb2621ff3ca7a1600c
|
[] |
no_license
|
wulinlw/leetcode_cn
|
57381b35d128fb3dad027208935d3de3391abfd0
|
b0f498ebe84e46b7e17e94759dd462891dcc8f85
|
refs/heads/master
| 2021-08-09T17:26:45.688513
| 2021-07-15T14:38:30
| 2021-07-15T14:38:30
| 134,419,586
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,565
|
py
|
#
# @lc app=leetcode.cn id=3 lang=python3
#
# [3] 无重复字符的最长子串
#
# https://leetcode-cn.com/problems/longest-substring-without-repeating-characters/description/
#
# algorithms
# Medium (33.08%)
# Likes: 3551
# Dislikes: 0
# Total Accepted: 455.9K
# Total Submissions: 1.3M
# Testcase Example: '"abcabcbb"'
#
# 给定一个字符串,请你找出其中不含有重复字符的 最长子串 的长度。
#
# 示例 1:
#
# 输入: "abcabcbb"
# 输出: 3
# 解释: 因为无重复字符的最长子串是 "abc",所以其长度为 3。
#
#
# 示例 2:
#
# 输入: "bbbbb"
# 输出: 1
# 解释: 因为无重复字符的最长子串是 "b",所以其长度为 1。
#
#
# 示例 3:
#
# 输入: "pwwkew"
# 输出: 3
# 解释: 因为无重复字符的最长子串是 "wke",所以其长度为 3。
# 请注意,你的答案必须是 子串 的长度,"pwke" 是一个子序列,不是子串。
#
#
#
# @lc code=start
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
h = {}
maxlen, curlen = 0, 0
start = 0
for i in range(len(s)):
if s[i] in h and h[s[i]] >= start: #重复了,新的字符索引大,去掉最左边的(左边界+1)
start = h[s[i]] + 1
curlen = i - start + 1 #当前长度,更新最大值maxlen
h[s[i]] = i
maxlen = max(maxlen, curlen)
return maxlen
# @lc code=end
s = "abcabcbb"
o = Solution()
print(o.lengthOfLongestSubstring(s))
|
[
"wulinlw@gmail.com"
] |
wulinlw@gmail.com
|
ff2ab21a6816a602b9b8428959d31059be5f74ce
|
48ca4bbdc231e02c7c107bdcff2110e829674b8b
|
/fishserve/migrations/0004_auto_20170919_0117.py
|
bf0a64bbce168fafae3fb30a2157b353703f2b9a
|
[] |
no_license
|
fisherieslogistics/py3_django_reporting_rest_api
|
a42b77761568e991ce15776b0a0fb675ed15be15
|
26cacc84ecc0626dcdba5aa06b3fcc8490b08775
|
refs/heads/master
| 2022-12-11T00:55:36.160647
| 2018-07-09T05:56:39
| 2018-07-09T05:56:39
| 100,552,776
| 0
| 0
| null | 2022-05-25T00:29:36
| 2017-08-17T02:29:33
|
Python
|
UTF-8
|
Python
| false
| false
| 473
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-19 01:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fishserve', '0003_auto_20170918_2319'),
]
operations = [
migrations.AlterField(
model_name='fishserveevents',
name='status',
field=models.CharField(max_length=20, null=True),
),
]
|
[
"30398335+martin-fll@users.noreply.github.com"
] |
30398335+martin-fll@users.noreply.github.com
|
3f0f76bb849e5dab384a6de2abf6d49e8355b2b8
|
4eecb0ccaaf0ec865d75a0b70d7afcb9e1824543
|
/Python/Analysis/PID.py
|
637b937c57842c52d0d6b1b9a5de3f1e497ccd6c
|
[
"Apache-2.0"
] |
permissive
|
garethnisbet/T-BOTS
|
8a82c15cd248610b82b7f4a77f83dec7dcac797a
|
60fdb85faefffcbb2314f74fdd19b81f37313c8f
|
refs/heads/master
| 2023-02-07T06:32:22.962540
| 2023-02-05T11:30:59
| 2023-02-05T11:30:59
| 136,524,216
| 27
| 10
| null | 2020-05-10T11:38:52
| 2018-06-07T19:52:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,673
|
py
|
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
################## Functions #########################
def integrate(setpoint, Ki, ydata, dt):
integ = np.cumsum(Ki*(setpoint-ydata)*dt)
return integ
def derivative(setpoint, Kd, ydata, dt):
deriv = Kd*((setpoint-ydata[:-1])-(setpoint-ydata[1:]))/dt
return deriv
############### Create data ########################
xdata = np.linspace(0,360*3.25,360)
noise = (1+((np.random.rand(xdata.shape[0])-0.5)*0.1))
ydata = ((np.cos(xdata*np.pi/180))*(xdata))*noise
#ydata = np.cos(xdata*np.pi/180)
setpoint = 0
Kp, Ki, Kd = 0.5,0.01,10
dt = xdata[1] - xdata[0]
py = Kp*(setpoint - ydata)
iiy = (setpoint - ydata)
iy = integrate(setpoint, Ki, ydata, dt)
dy = derivative(setpoint, Kd, ydata, dt)
#upid = py[2:]+iy[2:]+dy[1:]
#upi = py[2:]+iy[2:]
############### Plot data ########################
plt.figure(figsize=(10, 4))
plt.title('PID')
plt.plot(xdata, ydata, c=(91/255.,111/255.,189/255.), label = 'Signal')
plt.plot(xdata, py, c=(56/255.,192/255.,255/255.),label = 'Proportional Kp = '+str(Kp))
plt.fill_between(xdata, 0, iiy,facecolor=(191/255.,211/255.,255/255.),edgecolor=(191/255.,211/255.,255/255.), alpha = 0.6,label = 'Error')
plt.plot(xdata, iy, c=(255/255.,0/255.,0/255.),label = 'Integral Ki = '+str(Ki))
plt.plot(xdata[2:], dy[1:], c=(255/255.,150/255.,0/255.),label = 'Derivitave Kd = '+str(Kd))
#plt.plot(xdata[2:], upid, 'g',label = 'u PID')
#plt.plot(xdata[2:], upi, 'b',label = 'u PI')
plt.legend(loc = 'best',prop={ 'size': 8})
plt.xlabel('t (s)')
plt.ylabel('Deviation (Arb. Units)')
plt.axis('tight')
plt.subplots_adjust(bottom=0.15)
plt.savefig('PID.svg')
|
[
"garethnisbet@gmail.com"
] |
garethnisbet@gmail.com
|
07909fdf081f870cae5483021ef5489b39e4d9f9
|
0fc6ae6e50ce9125d67b0d5cb526ecf816e1d8f5
|
/department-app/views/__init__.py
|
889ce0f3e11b273e20442d230b9b1fe31d8d2076
|
[] |
no_license
|
Polina-Prakhova/Final-Project
|
3fa39dbb1e70e8f3a730a45555286aef81c7eaa7
|
866f42d741dc33985619db92ca63fc7db71d725f
|
refs/heads/main
| 2023-02-23T23:07:49.165458
| 2021-01-25T14:23:25
| 2021-01-25T14:23:25
| 312,890,102
| 0
| 0
| null | 2020-12-10T20:03:33
| 2020-11-14T19:46:31
|
Python
|
UTF-8
|
Python
| false
| false
| 276
|
py
|
""" Creating Blueprints for views. """
from flask import Blueprint
TEMPLATE_FOLDER = 'templates'
employee_page = Blueprint(
'employee', __name__, template_folder=TEMPLATE_FOLDER)
department_page = Blueprint(
'department', __name__, template_folder=TEMPLATE_FOLDER)
|
[
"polinaprakhova@gmail.com"
] |
polinaprakhova@gmail.com
|
82742f8a5aa21fc09608fb3003d59f346ff7acf6
|
09d96841ed10eb30a8ed8ec3c45af693c5f0d3da
|
/PyPoll/main.py
|
6654d2f2fc0bf5b66ce517270213b235280e86f8
|
[] |
no_license
|
Shandilya/Python-Challenge
|
a0f9bc713aa005c75929bfbb1655c71ba3d1cab8
|
117b42cc2405570458127cf9aa8d59a6961f1e37
|
refs/heads/master
| 2020-03-22T20:23:23.194864
| 2018-11-26T17:54:52
| 2018-11-26T17:54:52
| 140,595,552
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,406
|
py
|
#import dependicies
import os
import csv
import sys
poll_data_csv = os.path.join("C:/Users/Deepti/washu/WASHSTL201806DATA4-Class-Repository-DATA/Week 3 - Python/Homework/PyPoll/Resources","election_data.csv")
# open and read csv commands
with open(poll_data_csv, newline ="") as electiondatacsv:
csvreader = csv.reader(electiondatacsv, delimiter = ',')
csvheader = next(electiondatacsv)
totalvotes = 0
candvotes = []
candidates = []
allcandname = []
#candname = []
# listofcands = []
candvotes = 0
percentvotes = []
i=0
kv = 0
cv = 0
lv= 0
ov= 0
for row in csvreader:
totalvotes = totalvotes + 1
allcandname.append(row[2])
# print total no of votes
#print(totalvotes)
# looping
for i in range(0, totalvotes):
if allcandname[i] == "Khan":
kv = kv +1
perck=(kv/totalvotes *100)
wname ="Khan"
elif allcandname[i] == "Correy":
cv = cv + 1
percc=(cv/totalvotes * 100)
wname="Correy"
elif allcandname[i] == "Li":
lv = lv + 1
percl=(lv/totalvotes * 100)
wname ="Li"
elif allcandname[i] == "O'Tooley":
ov = ov + 1
perco=(ov/totalvotes * 100)
wname ="O'Tooley"
if (kv>cv):
winner =kv
wname ="Khan"
elif (kv>lv):
winner = lv
wname ="Li"
elif (lv>ov):
winner =ov
wname ="O'Tooley"
else:
wname="Correy"
print("winner" + str(wname))
print("Election Results")
print("-------------------------------------")
print("Total Votes :" + str(totalvotes) )
print("-------------------------------------")
# two ways of printing output
#print("Khan : " + str( perck ) + "%" + "( " + str(kv)+ " )")
print(f" Khan : {perck:.3f} % ({kv})")
print(f" Correy : {percc:.3f} % ({cv})")
print(f" Li : {percl:.3f} % ({lv})")
print(f" O'Tooley : {perco:.3f} % ({ov})")
print("--------------------------------------")
print(f" Winner : {wname}")
print("--------------------------------------")
# Below are samples to check results
#print("Khan Total " + str(kv))
#print("Khan % " + str(perck))
#print("Correy Total " + str(cv))
#print("Correy % " + str(percc))
#print("Li Total " + str(lv))
#print("Li % " + str(percl))
#print("otooley Total " + str(ov))
#print("otoo % " + str(perco))
# Printing the output to the txt file created.
# Export the results to text file
fileoutput = "C:/Users/Deepti/Python-Challenge/PyPoll/pypoll.txt"
with open(fileoutput, "w") as txt_file:
sys.stdout = txt_file
# Generate Output Summary
print("Election Results")
print("-------------------------------------")
print("Total Votes :" + str(totalvotes) )
print("-------------------------------------")
# two ways of printing output
#print("Khan : " + str( perck ) + "%" + "( " + str(kv)+ " )")
print(f" Khan : {perck:.3f} % ({kv})")
print(f" Correy : {percc:.3f} % ({cv})")
print(f" Li : {percl:.3f} % ({lv})")
print(f" O'Tooley : {perco:.3f} % ({ov})")
print("--------------------------------------")
print(f" Winner : {wname}")
print("--------------------------------------")
|
[
"deeptis.shandilya@gmail.com"
] |
deeptis.shandilya@gmail.com
|
437818b6f7b3ef1a207bae3f05b3169e8504b5f4
|
86ac26428d84993ce6cd09770f99ed4f05aa0001
|
/excel_read.py
|
f687e135a63f8c828bd29c20396bf7e2c77cbcb5
|
[] |
no_license
|
sha1painkiller/excel-extract
|
4dff4246936083b5c6716aaf0386087bb7384745
|
c01483c42374bab00198208b976f4c0f0eaca7b7
|
refs/heads/master
| 2021-04-26T13:14:37.546729
| 2017-02-03T05:15:17
| 2017-02-03T05:15:17
| 77,613,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,958
|
py
|
# Excel (xlsx) information retrieval
# (c) 2016, Burt Lien <burt.lien@gmail.com>
#
# used to generate a report for specific topic
import sys
import pandas as pd
#import numpy as np
def excel_find_relevant(ef, kw, complete):
df = pd.read_excel(ef, header=None, parse_cols="B:H", names=['ID','Desc','Project','Owner','Status','Severity','Note'])
#df is a type of DataFrame now.
#strip redundant space
desc = 'Desc'
df[desc] = df[desc].str.strip()
#find tuples that contain specific key words
query_df = df[df['Project'].str.contains(kw, case=False)]
if query_df.empty:
query_df = df[df['Owner'].str.contains(kw, case=False)]
if query_df.empty:
print('No result found!!')
return
#drop some fields to make the report simpler
if not complete:
query_df = query_df.drop(['Project', 'Status', 'Note'], axis=1)
#do not show the preceding index
#left justified the Description column
#use df['Description'].str.len().max() to compute the length of the longest string in df['Description'], and use that number, N, in a left-justified formatter '{:<Ns}'.format
#the formatting might fail if data is not string type.. to be fixed
print(query_df.to_string(formatters={desc:'{{:<{}s}}'.format(df[desc].str.len().max()).format}, index=False))
if __name__ == '__main__':
if len(sys.argv) < 3:
print('usage: python <this script> <excel file name> <keyword to be searched>')
print('ex: python excel_read.py weekly_repot.xlsx Burt')
sys.exit()
excel_f = sys.argv[1]
keyword = sys.argv[2]
complete = False
#do not care the value of the 3rd argument.. not defined for the moment
if len(sys.argv) == 4:
complete = True
print('---------- looking for \"' + keyword + '\" in \"' + excel_f + '\" ----------')
excel_find_relevant(excel_f, keyword, complete)
print('---------- end of query ----------')
|
[
"burt.lien@gmail.com"
] |
burt.lien@gmail.com
|
a76dd791d73a7a5c02e78172d5512eb1b90c0ba3
|
734698c9f0f26c323da62f25b4c8c99427efb534
|
/ATM.py
|
38a6612f78c09f7b5a05395522a21051ffe764dd
|
[] |
no_license
|
yusuf17-10/C100Pro
|
f794dd540b85e3df7f37138f441e2a2e931faca4
|
0dc874f0ce70986d83320e2a1811ca59d2ee697b
|
refs/heads/main
| 2023-06-24T14:25:46.939563
| 2021-07-23T15:08:00
| 2021-07-23T15:08:00
| 388,840,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,022
|
py
|
class ATM(object):
def __init__(self,cardnumber,pin):
self.cardnumber = cardnumber
self.pin = pin
def balanceenqiury(self):
print("Your Balance Is :$50")
def cashwithdrawal(self,amount):
new_amount = 100-amount
print("You Withdrawed: "+str(amount) + "Your remaing balance is :"+str(new_amount))
def main():
name = input("Hello what's Your name : ")
print("Hello, "+name)
cardnumber = input("Input your card number: ")
pin = input("Insert Your Card Number Please: ")
new_user = ATM(cardnumber,pin)
print("Choose your activity")
print("1. Balance Inquiry")
print("2. Cash Withdrawal")
activity = int(input("Enter Activity choice : "))
if (activity ==1):
new_user.balanceenqiury()
elif (activity ==2):
amount = int(input("Enter The Amount:"))
new_user.cashwithdrawal(amount)
else:
print("Enter a valif number")
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
yusuf17-10.noreply@github.com
|
1192e617df7a4aa0c398c5b2ccc03bf74536c2c1
|
c31713418c56a386b4911ff628ea5914b67dad03
|
/zootropolis/wsgi.py
|
be033be94d6635cea18e8d652df2a5161c92eda9
|
[] |
no_license
|
Code-Institute-Submissions/zootropolis-stars
|
348bfbb1cf4853d6e1be2d74ba8cc913c05d793d
|
d4839645ece69112f44f7c5e9a95dc4a46a939fb
|
refs/heads/master
| 2023-02-27T02:54:41.131894
| 2021-02-05T21:14:13
| 2021-02-05T21:14:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for zootropolis project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'zootropolis.settings')
application = get_wsgi_application()
|
[
"emal_fiza@hotmail.com"
] |
emal_fiza@hotmail.com
|
ee70d6fe255fa158a1c716a32c09f4a712128c92
|
9d04fa366ba7b7ab7a5bca38058f1a731af61c0b
|
/giris_app/urls.py
|
eba2c9fabd5fb7f963c418339804f23cf132dd70
|
[] |
no_license
|
ayasargithub/unv_yerles
|
d2bbcd03af7013c928bfd3444b5f533d64fc93df
|
aa468886cc6a5034c8e1d83e73b19686733db11e
|
refs/heads/master
| 2022-09-21T10:51:05.742660
| 2020-05-31T21:36:35
| 2020-05-31T21:36:35
| 265,931,918
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
from django.urls import path, include
from . import views
from django.contrib import admin
urlpatterns = [
path('',views.index, name="index"),
path('unv_yerles_giris/',views.unv_yerles_giris, name="unv_yerles_giris"),
path('unv_yerles_giris/',views.unv_yerles_giris, name="unv_yerles_giris"),
]
|
[
"ayse.yasar.iu@gmail.com"
] |
ayse.yasar.iu@gmail.com
|
d50a3d5191673cef0a00d5534f2e869693d871d5
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/detection/SOLOv2/mmdet/__init__.py
|
eb5cc9b4489898143d3dc5835255d73f4a3d6927
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 677
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .version import __version__, short_version
__all__ = ['__version__', 'short_version']
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
d12991a835cebea5697ad448c30115bb551da7b1
|
cd04a80acfeb7c7133d44b264af4ed6b5b71838d
|
/resource/lib/python2.7/site-packages/Crypto/PublicKey/ElGamal.py
|
c15e577a5a53265f99daf84661d6a8877825a697
|
[
"MIT"
] |
permissive
|
claudiopastorini/geofire-python
|
d443eb918cce7da556b66d820b7a0778be961676
|
274e1b1d733a1158e4f36de40f0349dbc1ff6c34
|
refs/heads/master
| 2023-08-02T18:45:16.465099
| 2021-10-05T18:53:48
| 2021-10-05T18:53:48
| 413,938,148
| 0
| 0
|
MIT
| 2021-10-05T18:53:49
| 2021-10-05T18:37:49
|
Python
|
UTF-8
|
Python
| false
| false
| 11,681
|
py
|
#
# ElGamal.py : ElGamal encryption/decryption and signatures
#
# Part of the Python Cryptography Toolkit
#
# Originally written by: A.M. Kuchling
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""ElGamal public-key algorithm (randomized encryption and signature).
Signature algorithm
-------------------
The security of the ElGamal signature scheme is based (like DSA) on the discrete
logarithm problem (DLP_). Given a cyclic group, a generator *g*,
and an element *h*, it is hard to find an integer *x* such that *g^x = h*.
The group is the largest multiplicative sub-group of the integers modulo *p*,
with *p* prime.
The signer holds a value *x* (*0<x<p-1*) as private key, and its public
key (*y* where *y=g^x mod p*) is distributed.
The ElGamal signature is twice as big as *p*.
Encryption algorithm
--------------------
The security of the ElGamal encryption scheme is based on the computational
Diffie-Hellman problem (CDH_). Given a cyclic group, a generator *g*,
and two integers *a* and *b*, it is difficult to find
the element *g^{ab}* when only *g^a* and *g^b* are known, and not *a* and *b*.
As before, the group is the largest multiplicative sub-group of the integers
modulo *p*, with *p* prime.
The receiver holds a value *a* (*0<a<p-1*) as private key, and its public key
(*b* where *b*=g^a*) is given to the sender.
The ElGamal ciphertext is twice as big as *p*.
Domain parameters
-----------------
For both signature and encryption schemes, the values *(p,g)* are called
*domain parameters*.
They are not sensitive but must be distributed to all parties (senders and
receivers).
Different signers can share the same domain parameters, as can
different recipients of encrypted messages.
Security
--------
Both DLP and CDH problem are believed to be difficult, and they have been proved
such (and therefore secure) for more than 30 years.
The cryptographic strength is linked to the magnitude of *p*.
In 2012, a sufficient size for *p* is deemed to be 2048 bits.
For more information, see the most recent ECRYPT_ report.
Even though ElGamal algorithms are in theory reasonably secure for new designs,
in practice there are no real good reasons for using them.
The signature is four times larger than the equivalent DSA, and the ciphertext
is two times larger than the equivalent RSA.
Functionality
-------------
This module provides facilities for generating new ElGamal keys and for constructing
them from known components. ElGamal keys allows you to perform basic signing,
verification, encryption, and decryption.
>>> from Crypto import Random
>>> from Crypto.PublicKey import ElGamal
>>> from Crypto.Hash import SHA
>>> from Crypto.Math import Numbers
>>>
>>> message = "Hello"
>>> key = ElGamal.generate(1024, Random.new().read)
>>> h = SHA.new(message).digest()
>>> while 1:
>>> k = Numbers.random_range(min_inclusive=1, min_exclusive=key.p-1)
>>> if k.gcd(key.p-1)==1: break
>>> sig = key.sign(h,k)
>>> ...
>>> if key.verify(h,sig):
>>> print "OK"
>>> else:
>>> print "Incorrect signature"
.. _DLP: http://www.cosic.esat.kuleuven.be/publications/talk-78.pdf
.. _CDH: http://en.wikipedia.org/wiki/Computational_Diffie%E2%80%93Hellman_assumption
.. _ECRYPT: http://www.ecrypt.eu.org/documents/D.SPA.17.pdf
"""
__all__ = ['generate', 'construct', 'ElGamalKey']
from Crypto import Random
from Crypto.Math.Primality import ( generate_probable_safe_prime,
test_probable_prime, COMPOSITE )
from Crypto.Math.Numbers import Integer
# Generate an ElGamal key with N bits
def generate(bits, randfunc):
"""Randomly generate a fresh, new ElGamal key.
The key will be safe for use for both encryption and signature
(although it should be used for **only one** purpose).
:Parameters:
bits : int
Key length, or size (in bits) of the modulus *p*.
Recommended value is 2048.
randfunc : callable
Random number generation function; it should accept
a single integer N and return a string of random data
N bytes long.
:attention: You should always use a cryptographically secure random number generator,
such as the one defined in the ``Crypto.Random`` module; **don't** just use the
current time and the ``random`` module.
:Return: An ElGamal key object (`ElGamalKey`).
"""
obj=ElGamalKey()
# Generate a safe prime p
# See Algorithm 4.86 in Handbook of Applied Cryptography
obj.p = generate_probable_safe_prime(exact_bits=bits, randfunc=randfunc)
q = (obj.p - 1) >> 1
# Generate generator g
# See Algorithm 4.80 in Handbook of Applied Cryptography
# Note that the order of the group is n=p-1=2q, where q is prime
while 1:
# We must avoid g=2 because of Bleichenbacher's attack described
# in "Generating ElGamal signatures without knowning the secret key",
# 1996
#
obj.g = Integer.random_range(min_inclusive=3,
max_exclusive=obj.p,
randfunc=randfunc)
safe = 1
if pow(obj.g, 2, obj.p)==1:
safe=0
if safe and pow(obj.g, q, obj.p)==1:
safe=0
# Discard g if it divides p-1 because of the attack described
# in Note 11.67 (iii) in HAC
if safe and (obj.p-1) % obj.g == 0:
safe=0
# g^{-1} must not divide p-1 because of Khadir's attack
# described in "Conditions of the generator for forging ElGamal
# signature", 2011
ginv = obj.g.inverse(obj.p)
if safe and (obj.p-1) % ginv == 0:
safe=0
if safe:
break
# Generate private key x
obj.x = Integer.random_range(min_inclusive=2,
max_exclusive=obj.p-1,
randfunc=randfunc)
# Generate public key y
obj.y = pow(obj.g, obj.x, obj.p)
return obj
def construct(tup):
"""Construct an ElGamal key from a tuple of valid ElGamal components.
The modulus *p* must be a prime.
The following conditions must apply:
- 1 < g < p-1
- g^{p-1} = 1 mod p
- 1 < x < p-1
- g^x = y mod p
:Parameters:
tup : tuple
A tuple of long integers, with 3 or 4 items
in the following order:
1. Modulus (*p*).
2. Generator (*g*).
3. Public key (*y*).
4. Private key (*x*). Optional.
:Raise PublicKey.ValueError:
When the key being imported fails the most basic ElGamal validity checks.
:Return: An ElGamal key object (`ElGamalKey`).
"""
obj=ElGamalKey()
if len(tup) not in [3,4]:
raise ValueError('argument for construct() wrong length')
for i in range(len(tup)):
field = obj._keydata[i]
setattr(obj, field, Integer(tup[i]))
fmt_error = test_probable_prime(obj.p) == COMPOSITE
fmt_error |= obj.g<=1 or obj.g>=obj.p
fmt_error |= pow(obj.g, obj.p-1, obj.p)!=1
fmt_error |= obj.y<1 or obj.y>=obj.p
if len(tup)==4:
fmt_error |= obj.x<=1 or obj.x>=obj.p
fmt_error |= pow(obj.g, obj.x, obj.p)!=obj.y
if fmt_error:
raise ValueError("Invalid ElGamal key components")
return obj
class ElGamalKey(object):
"""Class defining an ElGamal key.
:undocumented: __getstate__, __setstate__, __repr__, __getattr__
"""
#: Dictionary of ElGamal parameters.
#:
#: A public key will only have the following entries:
#:
#: - **y**, the public key.
#: - **g**, the generator.
#: - **p**, the modulus.
#:
#: A private key will also have:
#:
#: - **x**, the private key.
_keydata=['p', 'g', 'y', 'x']
def __init__(self, randfunc=None):
if randfunc is None:
randfunc = Random.new().read
self._randfunc = randfunc
def _encrypt(self, M, K):
a=pow(self.g, K, self.p)
b=( pow(self.y, K, self.p)*M ) % self.p
return map(int, ( a,b ))
def _decrypt(self, M):
if (not hasattr(self, 'x')):
raise TypeError('Private key not available in this object')
r = Integer.random_range(min_inclusive=2,
max_exclusive=self.p-1,
randfunc=self._randfunc)
a_blind = (pow(self.g, r, self.p) * M[0]) % self.p
ax=pow(a_blind, self.x, self.p)
plaintext_blind = (ax.inverse(self.p) * M[1] ) % self.p
plaintext = (plaintext_blind * pow(self.y, r, self.p)) % self.p
return int(plaintext)
def _sign(self, M, K):
if (not hasattr(self, 'x')):
raise TypeError('Private key not available in this object')
p1=self.p-1
K = Integer(K)
if (K.gcd(p1)!=1):
raise ValueError('Bad K value: GCD(K,p-1)!=1')
a=pow(self.g, K, self.p)
t=(Integer(M)-self.x*a) % p1
while t<0: t=t+p1
b=(t*K.inverse(p1)) % p1
return map(int, (a, b))
def _verify(self, M, sig):
sig = map(Integer, sig)
if sig[0]<1 or sig[0]>self.p-1:
return 0
v1=pow(self.y, sig[0], self.p)
v1=(v1*pow(sig[0], sig[1], self.p)) % self.p
v2=pow(self.g, M, self.p)
if v1==v2:
return 1
return 0
def has_private(self):
if hasattr(self, 'x'):
return 1
else:
return 0
def can_encrypt(self):
return True
def can_sign(self):
return True
def publickey(self):
return construct((self.p, self.g, self.y))
def __eq__(self, other):
if bool(self.has_private()) != bool(other.has_private()):
return False
result = True
for comp in self._keydata:
result = result and (getattr(self.key, comp, None) ==
getattr(other.key, comp, None))
return result
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
# ElGamal key is not pickable
from pickle import PicklingError
raise PicklingError
# Methods defined in PyCrypto that we don't support anymore
def sign(self, M, K):
raise NotImplementedError
def verify(self, M, signature):
raise NotImplementedError
def encrypt(self, plaintext, K):
raise NotImplementedError
def decrypt(self, ciphertext):
raise NotImplementedError
def blind(self, M, B):
raise NotImplementedError
def unblind(self, M, B):
raise NotImplementedError
def size():
raise NotImplementedError
|
[
"ininex@umich.edu"
] |
ininex@umich.edu
|
ee5342e80feb8c88b5d41d519851dca3b2602f4a
|
bde24afd6af173d489484b6a0677bd1ec12a320e
|
/src/msds510/util.py
|
e6c7b73e681aaf84646164cf1c9518dadc6c9288
|
[] |
no_license
|
Pandelov/msds510
|
dd209a207c9d9cedf9969341d6ad90ae7639666a
|
7603d0e52b3a4d6b15dc92dc8271d6832054221d
|
refs/heads/master
| 2020-03-12T08:16:32.478483
| 2018-06-04T02:27:40
| 2018-06-04T02:27:40
| 130,524,085
| 0
| 0
| null | 2018-06-04T02:27:41
| 2018-04-22T01:06:14
|
Python
|
UTF-8
|
Python
| false
| false
| 3,927
|
py
|
from datetime import date, datetime
import re
import csv
def get_month(dateString):
months = ["jan", "feb", "mar", "apr", "may", "jun",
"jul", "aug", "sep", "oct", "nov", "dec"]
for i in range(0, len(months)):
if months[i] in dateString.lower():
return i + 1
return ''
def get_date_joined(yearString, dateString):
return datetime.date(int(yearString), get_month(dateString), 1)
def days_since_joined(yearString, dateString):
rval = datetime.date.today() - get_date_joined(yearString, dateString)
rval = rval.days
return rval
# function that converts an input value into an integer
def to_int(value):
try:
return int(value)
except:
return None
# function that takes as args a series of items and a value
# If list or tuple returns integer index, if dict returns the input key
# If no value returns None
def get_value(items, value):
try:
return items.index(value)
except:
return items[value]
else:
return None
# to boolean
def to_bool(value):
if not value.strip():
return None
else:
return True if value == 'YES' else False
# cleaning up the notes
def clean_notes(value):
return value.strip()
# making nice names
def make_nice_name(name):
newString = name.replace(" ", "_")
newString = newString.replace("/", "_")
newString = newString.strip("?").strip().lower()
newString = re.sub(r'[^0-9a-z_\_]', '', newString)
return newString
# reading the processed csv file
def readProcessedCSVFile(infile):
with open(infile, 'r') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
return list(reader)
# sending the below to markdown
def printMarkdown(records, outfile):
with open(outfile, 'w') as ofile:
for idx, rc in enumerate(records):
ofile.write("# " + str(idx + 1) + ". " + rc["name_alias"] + "\n\n")
ofile.write("* Number of Appearances: " + rc["appearances"] + "\n")
ofile.write("* Year Joined: " + rc["year"] + "\n")
ofile.write("* Years Since Joining: "
+ rc["years_since_joining"] + "\n")
ofile.write("* URL: " + rc["url"] + "\n\n")
ofile.write("## Notes \n\n")
ofile.write(rc["notes"] + "\n\n")
def readCSVFile(infile):
with open(infile, 'rU') as csvfile: # open file
reader = csv.DictReader(csvfile, delimiter=',')
return list(reader)
def readFieldNames(infile):
# open file
fieldnames = []
with open(infile, 'rU') as csvfile1:
fieldreader = csv.reader(csvfile1, delimiter=',')
fielddata = list(fieldreader)[0]
fieldnames = [make_nice_name(field) for field in fielddata]
return fieldnames
def cleanFieldNames(data):
retdata = []
for d in data:
newRow = {}
for key, val in d.items():
newRow[make_nice_name(key)] = d[key]
newRow = transform_record(newRow)
retdata.append(newRow)
return retdata
def transform_record(rdict):
rdict["appearances"] = to_int(rdict["appearances"])
rdict["current"] = to_bool(rdict["current"])
rdict["year"] = to_int(rdict["year"])
rdict['years_since_joining'] = date.today().year - rdict['year']
rdict["notes"] = clean_notes(rdict["notes"])
rdict["month_joinded"] = get_month(rdict["full_reserve_avengers_intro"])
for key, val in rdict.items():
if (key.startswith('death') or key.startswith('return')):
rdict[key] = to_bool(rdict[key])
return rdict
def writeCSVFile(outfile, writedata, fieldnames):
with open(outfile, 'w') as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=fieldnames,
lineterminator='\n')
writer.writeheader()
for d in writedata:
writer.writerow(d)
|
[
"spandelov@my.bellevue.edu"
] |
spandelov@my.bellevue.edu
|
ed998aa5a580d6c24b8a4ee855914be86bd5091f
|
888830190c874f666ff4bc49d0400951cdefface
|
/tests/tests_measurement.py
|
f9cfc5654b291d3e7fe3b38bbdb6bcb7d3314ef2
|
[] |
no_license
|
cogeorg/abm_template
|
8b5e9d6ba076382b6d524a104ebd0dfcf0ade93b
|
b09cde92388ce9de0d5ff22f17260cce784befd9
|
refs/heads/master
| 2020-12-29T02:32:32.264861
| 2017-04-10T17:02:11
| 2017-04-10T17:02:11
| 45,737,598
| 5
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 43,586
|
py
|
#!/usr/bin/env python
# [SublimeLinter pep8-max-line-length:150]
# -*- coding: utf-8 -*-
"""
black_rhino is a multi-agent simulator for financial network analysis
Copyright (C) 2016 Co-Pierre Georg (co-pierre.georg@keble.ox.ac.uk)
Pawel Fiedor (pawel@fiedor.eu)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
from src.helper import Helper
# -------------------------------------------------------------------------
# class Tests
# -------------------------------------------------------------------------
class TestsMeasurement(object):
#
# VARIABLES
#
#
# METHODS
#
# -------------------------------------------------------------------------
# __init__
# -------------------------------------------------------------------------
def __init__(self):
pass
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# print_info(text)
# -------------------------------------------------------------------------
def print_info(self, text):
print('##############################################################################\n')
print(text)
print('##############################################################################\n')
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# TESTS FOR MEASUREMENT.PY
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# measurement__get_identifier
# -------------------------------------------------------------------------
def measurement__get_identifier(self, args):
import os
from src.bank import Bank
from src.household import Household
from src.firm import Firm
from src.environment import Environment
from src.transaction import Transaction
from src.market import Market
from src.runner import Runner
from src.measurement import Measurement
text = "This test checks measurement.get_identifier \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test measurement__get_identifier in run: %s',
environment_directory + identifier + ".xml")
# Construct household filename
environment = Environment(environment_directory, identifier)
# Construct a runner
runner = Runner(environment)
# generate a bank
bank = Bank()
bank.identifier = "test_bank"
environment.banks.append(bank)
# generate a firm
firm = Firm()
firm.identifier = "test_firm"
environment.firms.append(firm)
# generate a household
household = Household()
household.identifier = "test_household"
environment.households.append(household)
#
# TESTING
#
measurement = Measurement(environment, runner)
print("Measurement's ID:")
print(measurement.get_identifier())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# measurement__set_identifier
# -------------------------------------------------------------------------
def measurement__set_identifier(self, args):
import os
from src.bank import Bank
from src.household import Household
from src.firm import Firm
from src.environment import Environment
from src.transaction import Transaction
from src.market import Market
from src.runner import Runner
from src.measurement import Measurement
text = "This test checks measurement.set_identifier \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test measurement__set_identifier in run: %s',
environment_directory + identifier + ".xml")
# Construct household filename
environment = Environment(environment_directory, identifier)
# Construct a runner
runner = Runner(environment)
# generate a bank
bank = Bank()
bank.identifier = "test_bank"
environment.banks.append(bank)
# generate a firm
firm = Firm()
firm.identifier = "test_firm"
environment.firms.append(firm)
# generate a household
household = Household()
household.identifier = "test_household"
environment.households.append(household)
#
# TESTING
#
measurement = Measurement(environment, runner)
print("Measurement's ID:")
print(measurement.get_identifier())
measurement.set_identifier("new_measurement_id")
print("Measurement's ID:")
print(measurement.get_identifier())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# measurement__get_config
# -------------------------------------------------------------------------
def measurement__get_config(self, args):
import os
from src.bank import Bank
from src.household import Household
from src.firm import Firm
from src.environment import Environment
from src.transaction import Transaction
from src.market import Market
from src.runner import Runner
from src.measurement import Measurement
text = "This test checks measurement.get_config \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test measurement__get_config in run: %s',
environment_directory + identifier + ".xml")
# Construct household filename
environment = Environment(environment_directory, identifier)
# Construct a runner
runner = Runner(environment)
# generate a bank
bank = Bank()
bank.identifier = "test_bank"
environment.banks.append(bank)
# generate a firm
firm = Firm()
firm.identifier = "test_firm"
environment.firms.append(firm)
# generate a household
household = Household()
household.identifier = "test_household"
environment.households.append(household)
#
# TESTING
#
measurement = Measurement(environment, runner)
print("Measurement's config:")
print(measurement.get_config())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# measurement__set_config
# -------------------------------------------------------------------------
def measurement__set_config(self, args):
import os
from src.bank import Bank
from src.household import Household
from src.firm import Firm
from src.environment import Environment
from src.transaction import Transaction
from src.market import Market
from src.runner import Runner
from src.measurement import Measurement
text = "This test checks measurement.set_config \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test measurement__set_config in run: %s',
environment_directory + identifier + ".xml")
# Construct household filename
environment = Environment(environment_directory, identifier)
# Construct a runner
runner = Runner(environment)
# generate a bank
bank = Bank()
bank.identifier = "test_bank"
environment.banks.append(bank)
# generate a firm
firm = Firm()
firm.identifier = "test_firm"
environment.firms.append(firm)
# generate a household
household = Household()
household.identifier = "test_household"
environment.households.append(household)
#
# TESTING
#
measurement = Measurement(environment, runner)
print("Measurement's config:")
print(measurement.get_config())
measurement.set_config({"test": ["config"]})
print("Measurement's config:")
print(measurement.get_config())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# measurement__get_environment
# -------------------------------------------------------------------------
def measurement__get_environment(self, args):
import os
from src.bank import Bank
from src.household import Household
from src.firm import Firm
from src.environment import Environment
from src.transaction import Transaction
from src.market import Market
from src.runner import Runner
from src.measurement import Measurement
text = "This test checks measurement.get_environment \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test measurement__get_environment in run: %s',
environment_directory + identifier + ".xml")
# Construct household filename
environment = Environment(environment_directory, identifier)
# Construct a runner
runner = Runner(environment)
# generate a bank
bank = Bank()
bank.identifier = "test_bank"
environment.banks.append(bank)
# generate a firm
firm = Firm()
firm.identifier = "test_firm"
environment.firms.append(firm)
# generate a household
household = Household()
household.identifier = "test_household"
environment.households.append(household)
#
# TESTING
#
measurement = Measurement(environment, runner)
print("Measurement's environment:")
print(measurement.get_environment())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# measurement__set_environment
# -------------------------------------------------------------------------
def measurement__set_environment(self, args):
import os
from src.bank import Bank
from src.household import Household
from src.firm import Firm
from src.environment import Environment
from src.transaction import Transaction
from src.market import Market
from src.runner import Runner
from src.measurement import Measurement
text = "This test checks measurement.set_environment \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test measurement__set_environment in run: %s',
environment_directory + identifier + ".xml")
# Construct household filename
environment = Environment(environment_directory, identifier)
# Construct a runner
runner = Runner(environment)
# generate a bank
bank = Bank()
bank.identifier = "test_bank"
environment.banks.append(bank)
# generate a firm
firm = Firm()
firm.identifier = "test_firm"
environment.firms.append(firm)
# generate a household
household = Household()
household.identifier = "test_household"
environment.households.append(household)
#
# TESTING
#
measurement = Measurement(environment, runner)
print("Measurement's environment:")
print(measurement.get_environment())
measurement.set_environment(runner)
print("Measurement's environment:")
print(measurement.get_environment())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# measurement__get_runner
# -------------------------------------------------------------------------
def measurement__get_runner(self, args):
import os
from src.bank import Bank
from src.household import Household
from src.firm import Firm
from src.environment import Environment
from src.transaction import Transaction
from src.market import Market
from src.runner import Runner
from src.measurement import Measurement
text = "This test checks measurement.get_runner \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test measurement__get_runner in run: %s',
environment_directory + identifier + ".xml")
# Construct household filename
environment = Environment(environment_directory, identifier)
# Construct a runner
runner = Runner(environment)
# generate a bank
bank = Bank()
bank.identifier = "test_bank"
environment.banks.append(bank)
# generate a firm
firm = Firm()
firm.identifier = "test_firm"
environment.firms.append(firm)
# generate a household
household = Household()
household.identifier = "test_household"
environment.households.append(household)
#
# TESTING
#
measurement = Measurement(environment, runner)
print("Measurement's runner:")
print(measurement.get_runner())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# measurement__set_runner
# -------------------------------------------------------------------------
def measurement__set_runner(self, args):
import os
from src.bank import Bank
from src.household import Household
from src.firm import Firm
from src.environment import Environment
from src.transaction import Transaction
from src.market import Market
from src.runner import Runner
from src.measurement import Measurement
text = "This test checks measurement.set_runner \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test measurement__set_runner in run: %s',
environment_directory + identifier + ".xml")
# Construct household filename
environment = Environment(environment_directory, identifier)
# Construct a runner
runner = Runner(environment)
# generate a bank
bank = Bank()
bank.identifier = "test_bank"
environment.banks.append(bank)
# generate a firm
firm = Firm()
firm.identifier = "test_firm"
environment.firms.append(firm)
# generate a household
household = Household()
household.identifier = "test_household"
environment.households.append(household)
#
# TESTING
#
measurement = Measurement(environment, runner)
print("Measurement's runner:")
print(measurement.get_runner())
measurement.set_runner(environment)
print("Measurement's runner:")
print(measurement.get_runner())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# measurement__get_filename
# -------------------------------------------------------------------------
def measurement__get_filename(self, args):
import os
from src.bank import Bank
from src.household import Household
from src.firm import Firm
from src.environment import Environment
from src.transaction import Transaction
from src.market import Market
from src.runner import Runner
from src.measurement import Measurement
text = "This test checks measurement.get_filename \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test measurement__get_filename in run: %s',
environment_directory + identifier + ".xml")
# Construct household filename
environment = Environment(environment_directory, identifier)
# Construct a runner
runner = Runner(environment)
# generate a bank
bank = Bank()
bank.identifier = "test_bank"
environment.banks.append(bank)
# generate a firm
firm = Firm()
firm.identifier = "test_firm"
environment.firms.append(firm)
# generate a household
household = Household()
household.identifier = "test_household"
environment.households.append(household)
#
# TESTING
#
measurement = Measurement(environment, runner)
print("Measurement's filename:")
print(measurement.get_filename())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# measurement__set_filename
# -------------------------------------------------------------------------
def measurement__set_filename(self, args):
import os
from src.bank import Bank
from src.household import Household
from src.firm import Firm
from src.environment import Environment
from src.transaction import Transaction
from src.market import Market
from src.runner import Runner
from src.measurement import Measurement
text = "This test checks measurement.set_filename \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test measurement__set_filename in run: %s',
environment_directory + identifier + ".xml")
# Construct household filename
environment = Environment(environment_directory, identifier)
# Construct a runner
runner = Runner(environment)
# generate a bank
bank = Bank()
bank.identifier = "test_bank"
environment.banks.append(bank)
# generate a firm
firm = Firm()
firm.identifier = "test_firm"
environment.firms.append(firm)
# generate a household
household = Household()
household.identifier = "test_household"
environment.households.append(household)
#
# TESTING
#
measurement = Measurement(environment, runner)
print("Measurement's filename:")
print(measurement.get_filename())
measurement.set_filename("TestFilename.csv")
print("Measurement's filename:")
print(measurement.get_filename())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# measurement__get_file
# -------------------------------------------------------------------------
def measurement__get_file(self, args):
import os
from src.bank import Bank
from src.household import Household
from src.firm import Firm
from src.environment import Environment
from src.transaction import Transaction
from src.market import Market
from src.runner import Runner
from src.measurement import Measurement
text = "This test checks measurement.get_file \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test measurement__get_file in run: %s',
environment_directory + identifier + ".xml")
# Construct household filename
environment = Environment(environment_directory, identifier)
# Construct a runner
runner = Runner(environment)
# generate a bank
bank = Bank()
bank.identifier = "test_bank"
environment.banks.append(bank)
# generate a firm
firm = Firm()
firm.identifier = "test_firm"
environment.firms.append(firm)
# generate a household
household = Household()
household.identifier = "test_household"
environment.households.append(household)
#
# TESTING
#
measurement = Measurement(environment, runner)
print("Measurement's file:")
print(measurement.get_file())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# measurement__set_file
# -------------------------------------------------------------------------
def measurement__set_file(self, args):
import os
from src.bank import Bank
from src.household import Household
from src.firm import Firm
from src.environment import Environment
from src.transaction import Transaction
from src.market import Market
from src.runner import Runner
from src.measurement import Measurement
text = "This test checks measurement.set_file \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test measurement__set_file in run: %s',
environment_directory + identifier + ".xml")
# Construct household filename
environment = Environment(environment_directory, identifier)
# Construct a runner
runner = Runner(environment)
# generate a bank
bank = Bank()
bank.identifier = "test_bank"
environment.banks.append(bank)
# generate a firm
firm = Firm()
firm.identifier = "test_firm"
environment.firms.append(firm)
# generate a household
household = Household()
household.identifier = "test_household"
environment.households.append(household)
#
# TESTING
#
file_new = open("__init__.py", "r")
measurement = Measurement(environment, runner)
print("Measurement's file:")
print(measurement.get_file())
measurement.set_file(file_new)
print("Measurement's file:")
print(measurement.get_file())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# measurement__get_csv_writer
# -------------------------------------------------------------------------
def measurement__get_csv_writer(self, args):
import os
from src.bank import Bank
from src.household import Household
from src.firm import Firm
from src.environment import Environment
from src.transaction import Transaction
from src.market import Market
from src.runner import Runner
from src.measurement import Measurement
text = "This test checks measurement.get_csv_writer \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test measurement__get_csv_writer in run: %s',
environment_directory + identifier + ".xml")
# Construct household filename
environment = Environment(environment_directory, identifier)
# Construct a runner
runner = Runner(environment)
# generate a bank
bank = Bank()
bank.identifier = "test_bank"
environment.banks.append(bank)
# generate a firm
firm = Firm()
firm.identifier = "test_firm"
environment.firms.append(firm)
# generate a household
household = Household()
household.identifier = "test_household"
environment.households.append(household)
#
# TESTING
#
measurement = Measurement(environment, runner)
print("Measurement's csv_writer:")
print(measurement.get_csv_writer())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# measurement__set_csv_writer
# -------------------------------------------------------------------------
def measurement__set_csv_writer(self, args):
import os
from src.bank import Bank
from src.household import Household
from src.firm import Firm
from src.environment import Environment
from src.transaction import Transaction
from src.market import Market
from src.runner import Runner
from src.measurement import Measurement
text = "This test checks measurement.set_csv_writer \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test measurement__set_csv_writer in run: %s',
environment_directory + identifier + ".xml")
# Construct household filename
environment = Environment(environment_directory, identifier)
# Construct a runner
runner = Runner(environment)
# generate a bank
bank = Bank()
bank.identifier = "test_bank"
environment.banks.append(bank)
# generate a firm
firm = Firm()
firm.identifier = "test_firm"
environment.firms.append(firm)
# generate a household
household = Household()
household.identifier = "test_household"
environment.households.append(household)
#
# TESTING
#
import csv
file_new = open("__init__.py", "r")
csv_writer = csv.writer(file_new, lineterminator='\n')
measurement = Measurement(environment, runner)
print("Measurement's csv_writer:")
print(measurement.get_csv_writer())
measurement.set_csv_writer(csv_writer)
print("Measurement's csv_writer:")
print(measurement.get_csv_writer())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# measurement__init
# -------------------------------------------------------------------------
def measurement__init(self, args):
import os
from src.bank import Bank
from src.household import Household
from src.firm import Firm
from src.environment import Environment
from src.transaction import Transaction
from src.market import Market
from src.runner import Runner
from src.measurement import Measurement
text = "This test checks measurement.init \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test measurement__init in run: %s',
environment_directory + identifier + ".xml")
# Construct household filename
environment = Environment(environment_directory, identifier)
# Construct a runner
runner = Runner(environment)
# generate a bank
bank = Bank()
bank.identifier = "test_bank"
environment.banks.append(bank)
# generate a firm
firm = Firm()
firm.identifier = "test_firm"
environment.firms.append(firm)
# generate a household
household = Household()
household.identifier = "test_household"
environment.households.append(household)
#
# TESTING
#
measurement = Measurement(environment, runner)
print(measurement)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# measurement__open_file
# -------------------------------------------------------------------------
def measurement__open_file(self, args):
import os
from src.bank import Bank
from src.household import Household
from src.firm import Firm
from src.environment import Environment
from src.transaction import Transaction
from src.market import Market
from src.runner import Runner
from src.measurement import Measurement
text = "This test checks measurement.open_file \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test measurement__open_file in run: %s',
environment_directory + identifier + ".xml")
# Construct household filename
environment = Environment(environment_directory, identifier)
# Construct a runner
runner = Runner(environment)
# generate a bank
bank = Bank()
bank.identifier = "test_bank"
environment.banks.append(bank)
# generate a firm
firm = Firm()
firm.identifier = "test_firm"
environment.firms.append(firm)
# generate a household
household = Household()
household.identifier = "test_household"
environment.households.append(household)
#
# TESTING
#
measurement = Measurement(environment, runner)
measurement.open_file()
print(measurement.get_file())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# measurement__write_to_file
# -------------------------------------------------------------------------
def measurement__write_to_file(self, args):
import os
from src.bank import Bank
from src.household import Household
from src.firm import Firm
from src.environment import Environment
from src.transaction import Transaction
from src.market import Market
from src.runner import Runner
from src.measurement import Measurement
text = "This test checks measurement.write_to_file \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test measurement__write_to_file in run: %s',
environment_directory + identifier + ".xml")
# Construct household filename
environment = Environment(environment_directory, identifier)
# Construct a runner
runner = Runner(environment)
# generate a bank
bank = Bank()
bank.identifier = "test_bank"
environment.banks.append(bank)
# generate a firm
firm = Firm()
firm.identifier = "test_firm"
environment.firms.append(firm)
# generate a household
household = Household()
household.identifier = "test_household"
environment.households.append(household)
#
# TESTING
#
measurement = Measurement(environment, runner)
measurement.open_file()
print(measurement.get_file())
print("The current size of the file: ")
print(measurement.file.tell())
measurement.write_to_file()
print(measurement.get_file())
print("The current size of the file: ")
print(measurement.file.tell())
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# measurement__close_file
# -------------------------------------------------------------------------
def measurement__close_file(self, args):
import os
from src.bank import Bank
from src.household import Household
from src.firm import Firm
from src.environment import Environment
from src.transaction import Transaction
from src.market import Market
from src.runner import Runner
from src.measurement import Measurement
text = "This test checks measurement.close_file \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test measurement__close_file in run: %s',
environment_directory + identifier + ".xml")
# Construct household filename
environment = Environment(environment_directory, identifier)
# Construct a runner
runner = Runner(environment)
# generate a bank
bank = Bank()
bank.identifier = "test_bank"
environment.banks.append(bank)
# generate a firm
firm = Firm()
firm.identifier = "test_firm"
environment.firms.append(firm)
# generate a household
household = Household()
household.identifier = "test_household"
environment.households.append(household)
#
# TESTING
#
measurement = Measurement(environment, runner)
measurement.open_file()
print("Is the file closed?")
print(measurement.file.closed)
measurement.close_file()
print("Is the file closed?")
print(measurement.file.closed)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# measurement__read_xml_config_file
# -------------------------------------------------------------------------
def measurement__read_xml_config_file(self, args):
import os
from src.bank import Bank
from src.household import Household
from src.firm import Firm
from src.environment import Environment
from src.transaction import Transaction
from src.market import Market
from src.runner import Runner
from src.measurement import Measurement
text = "This test checks measurement.read_xml_config_file \n"
self.print_info(text)
#
# INITIALIZATION
#
environment_directory = str(args[0])
identifier = str(args[1])
log_directory = str(args[2])
# Configure logging parameters so we get output while the program runs
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S',
filename=log_directory + identifier + ".log", level=logging.INFO)
logging.info('START logging for test measurement__read_xml_config_file in run: %s',
environment_directory + identifier + ".xml")
# Construct household filename
environment = Environment(environment_directory, identifier)
# Construct a runner
runner = Runner(environment)
# generate a bank
bank = Bank()
bank.identifier = "test_bank"
environment.banks.append(bank)
# generate a firm
firm = Firm()
firm.identifier = "test_firm"
environment.firms.append(firm)
# generate a household
household = Household()
household.identifier = "test_household"
environment.households.append(household)
#
# TESTING
#
measurement = Measurement(environment, runner)
measurement.read_xml_config_file(environment.measurement_config)
print("Identifier read from the config:")
print(measurement.identifier)
# -------------------------------------------------------------------------
|
[
"pawel@fiedor.eu"
] |
pawel@fiedor.eu
|
f10623356e15cb697f8b774d563e58a69ae6752b
|
1442105981c685be628983823d8b0d60205dda49
|
/src/transformers/models/wav2vec2/modeling_wav2vec2.py
|
ba151a8478aa334ec08d457ef2d9a542f498fc27
|
[
"Apache-2.0"
] |
permissive
|
ArneBinder/transformers
|
5154e5989fe69b785106857028e91c296ae78c3f
|
ddaafd78fb9c98d4f7b5009fb1998deff4c3d6f1
|
refs/heads/master
| 2021-07-11T03:49:48.113732
| 2021-02-08T16:58:02
| 2021-02-08T16:58:02
| 232,599,957
| 0
| 0
|
Apache-2.0
| 2020-01-08T15:55:00
| 2020-01-08T15:54:59
| null |
UTF-8
|
Python
| false
| false
| 29,977
|
py
|
# coding=utf-8
# Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Wav2Vec2 model. """
from typing import Optional, Tuple
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from ...activations import ACT2FN
from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...modeling_outputs import BaseModelOutput, MaskedLMOutput
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_wav2vec2 import Wav2Vec2Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "Wav2Vec2Config"
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/wav2vec2-base-960h"
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
]
class Wav2Vec2NoLayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.dropout = nn.Dropout(config.feat_extract_dropout)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
class Wav2Vec2LayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.dropout = nn.Dropout(config.feat_extract_dropout)
self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.activation(hidden_states)
return hidden_states
class Wav2Vec2GroupNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.dropout = nn.Dropout(config.feat_extract_dropout)
self.activation = ACT2FN[config.feat_extract_activation]
self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
class Wav2Vec2PositionalConvEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = nn.Conv1d(
config.hidden_size,
config.hidden_size,
kernel_size=config.num_conv_pos_embeddings,
padding=config.num_conv_pos_embeddings // 2,
groups=config.num_conv_pos_embedding_groups,
)
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
self.padding = Wav2Vec2SamePadLayer(config.num_conv_pos_embeddings)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
class Wav2Vec2SamePadLayer(nn.Module):
def __init__(self, num_conv_pos_embeddings):
super().__init__()
self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
def forward(self, hidden_states):
if self.num_pad_remove > 0:
hidden_states = hidden_states[:, :, : -self.num_pad_remove]
return hidden_states
class Wav2Vec2FeatureExtractor(nn.Module):
"""Construct the featurs from raw audio waveform"""
def __init__(self, config):
super().__init__()
if config.feat_extract_norm == "group":
conv_layers = [Wav2Vec2GroupNormConvLayer(config, layer_id=0)] + [
Wav2Vec2NoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)
]
elif config.feat_extract_norm == "layer":
conv_layers = [
Wav2Vec2LayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)
]
else:
raise ValueError(
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
)
self.conv_layers = nn.ModuleList(conv_layers)
def forward(self, input_values):
hidden_states = input_values[:, None]
for conv_layer in self.conv_layers:
hidden_states = conv_layer(hidden_states)
return hidden_states
class Wav2Vec2FeatureProjection(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
self.dropout = nn.Dropout(config.feat_extract_dropout)
def forward(self, hidden_states):
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.projection(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Wav2Vec2
class Wav2Vec2Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})."
self.scaling = self.head_dim ** -0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
assert attn_weights.size() == (
bsz * self.num_heads,
tgt_len,
src_len,
), f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
if attention_mask is not None:
assert attention_mask.size() == (
bsz,
1,
tgt_len,
src_len,
), f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.num_heads,
), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit akward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
assert attn_output.size() == (
bsz * self.num_heads,
tgt_len,
self.head_dim,
), f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
attn_output = (
attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
.transpose(1, 2)
.reshape(bsz, tgt_len, embed_dim)
)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
class Wav2Vec2FeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate_dropout = nn.Dropout(config.hidden_dropout_prob)
self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.output_dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states):
hidden_states = self.intermediate_dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.intermediate_dropout(hidden_states)
hidden_states = self.output_dense(hidden_states)
hidden_states = self.output_dropout(hidden_states)
return hidden_states
class Wav2Vec2Output(nn.Module):
def __init__(self, config):
super().__init__()
def forward(self, hidden_states, input_tensor):
return hidden_states
class Wav2Vec2EncoderLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = Wav2Vec2Attention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.hidden_dropout_prob,
is_decoder=False,
)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = Wav2Vec2FeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, output_attentions=False):
attn_residual = hidden_states
hidden_states, attn_weights, _ = self.attention(hidden_states, output_attentions=output_attentions)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
return hidden_states, attn_weights
class Wav2Vec2EncoderLayerStableLayerNorm(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = Wav2Vec2Attention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.hidden_dropout_prob,
is_decoder=False,
)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = Wav2Vec2FeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, output_attentions=False):
attn_residual = hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.attention(hidden_states, output_attentions=output_attentions)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
return hidden_states, attn_weights
class Wav2Vec2Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = Wav2Vec2PositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
# IMPORTANT: the param for dropout is probs wrong
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.layers = nn.ModuleList([Wav2Vec2EncoderLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
hidden_states, attn_weights = layer(hidden_states, output_attentions=output_attentions)
if output_attentions:
all_self_attentions = all_self_attentions + (attn_weights,)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class Wav2Vec2EncoderStableLayerNorm(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = Wav2Vec2PositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
# IMPORTANT: the param for dropout is probs wrong
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.layers = nn.ModuleList(
[Wav2Vec2EncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]
)
def forward(
self,
hidden_states,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.dropout(hidden_states)
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
hidden_states, attn_weights = layer(hidden_states, output_attentions=output_attentions)
if output_attentions:
all_self_attentions = all_self_attentions + (attn_weights,)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class Wav2Vec2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = Wav2Vec2Config
base_model_prefix = "wav2vec2"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Conv1d):
torch.nn.init.kaiming_normal_(module.weight.data)
if isinstance(module, (nn.Linear, nn.Conv1d)) and module.bias is not None:
module.bias.data.zero_()
WAV_2_VEC_2_START_DOCSTRING = r"""
Wav2Vec2 was proposed in `wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations
<https://arxiv.org/abs/2006.11477>`__ by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving etc.).
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config (:class:`~transformers.Wav2Vec2Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
WAV_2_VEC_2_INPUTS_DOCSTRING = r"""
Args:
input_values (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install
soundfile`). To prepare the array into `input_values`, the :class:`~transformers.Wav2Vec2Tokenizer` should
be used for padding and conversion into a tensor of type `torch.FloatTensor`. See
:meth:`transformers.Wav2Vec2Tokenizer.__call__` for details.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top.",
WAV_2_VEC_2_START_DOCSTRING,
)
class Wav2Vec2Model(Wav2Vec2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.feature_extractor = Wav2Vec2FeatureExtractor(config)
self.feature_projection = Wav2Vec2FeatureProjection(config)
if config.do_stable_layer_norm:
self.encoder = Wav2Vec2EncoderStableLayerNorm(config)
else:
self.encoder = Wav2Vec2Encoder(config)
self.init_weights()
@add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_values,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
"""
Returns:
Example::
>>> from transformers import Wav2Vec2Tokenizer, Wav2Vec2Model
>>> from datasets import load_dataset
>>> import soundfile as sf
>>> tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
>>> model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h")
>>> def map_to_array(batch):
>>> speech, _ = sf.read(batch["file"])
>>> batch["speech"] = speech
>>> return batch
>>> ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.map(map_to_array)
>>> input_values = tokenizer(ds["speech"][0], return_tensors="pt").input_values # Batch size 1
>>> hidden_states = model(input_values).last_hidden_state
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = self.feature_extractor(input_values)
hidden_states = self.feature_projection(hidden_states)
encoder_outputs = self.encoder(
hidden_states,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if not return_dict:
return (hidden_states,) + encoder_outputs[1:]
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings("""Wav2Vec2 Model with a `language modeling` head on top. """, WAV_2_VEC_2_START_DOCSTRING)
class Wav2Vec2ForMaskedLM(Wav2Vec2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.wav2vec2 = Wav2Vec2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size)
self.init_weights()
@add_start_docstrings_to_model_forward(WAV_2_VEC_2_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_values,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
):
r"""
labels (:obj:`Float.LongTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
TODO(PVP): Fill out when adding training
Returns:
Example::
>>> from transformers import Wav2Vec2Tokenizer, Wav2Vec2Model
>>> from datasets import load_dataset
>>> import soundfile as sf
>>> tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
>>> model = Wav2Vec2ForMaskedLM.from_pretrained("facebook/wav2vec2-base-960h")
>>> def map_to_array(batch):
>>> speech, _ = sf.read(batch["file"])
>>> batch["speech"] = speech
>>> return batch
>>> ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.map(map_to_array)
>>> input_values = tokenizer(ds["speech"][0], return_tensors="pt").input_values # Batch size 1
>>> logits = model(input_values).logits
>>> predicted_ids = torch.argmax(logits, dim=-1)
>>> transcription = tokenizer.decode(predicted_ids[0])
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.wav2vec2(
input_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.lm_head(hidden_states)
if not return_dict:
output = (logits,) + outputs[1:]
return output
return MaskedLMOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
[
"noreply@github.com"
] |
ArneBinder.noreply@github.com
|
b6834a2efa1f82a1a95bd21b0c7b1455a6a64ed2
|
2b4de876a79d405540890375925dfa032e492fa7
|
/Packages/SplitEbinMetric.py
|
865796fcef7ebcd32af8b438f90d7d337aba77fd
|
[] |
no_license
|
aarentai/Metric-Matching-3D
|
2e4c5f06b5b3713ee4cce44cfe4ccb7c3b448a8d
|
0f43389584fb7f5d6e9fef9fb0711c5b495b1ad8
|
refs/heads/main
| 2023-05-26T16:48:12.314933
| 2021-03-20T17:59:16
| 2021-03-20T17:59:16
| 349,301,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,395
|
py
|
import numpy as np
import torch
# from GeoPlot import *
from scipy.linalg import expm, logm
import warnings
import SimpleITK as sitk
import matplotlib.pyplot as plt
from IPython.core.debugger import set_trace
import scipy
# refer to martin-bauer-GSEVP[5852].pdf Section 3 Relevant properties of M and K and the matlab code
def trKsquare(B, A):
G = torch.cholesky(B)
inv_G = torch.inverse(G)
W = torch.einsum("...ij,...jk,...lk->...il", inv_G, A, inv_G)
'''gpu'''
# lamda = torch.symeig(W.to(device=torch.device('cpu')), eigenvectors=True)[
# 0] # symeig is super slow on GPU due to Magma
# result = torch.sum(torch.log(lamda.to(device=torch.device('cuda'))) ** 2, (-1))
'''cpu'''
lamda = torch.symeig(W, eigenvectors=True)[0]
result = torch.sum(torch.log(lamda) ** 2, (-1))
return result
# the squared distance for the split Ebin metric
def Squared_distance_Ebin(g0, g1, a, mask):
# inputs: g0.shape, g1.shape = [h, w, d, 3, 3]
# output: scalar
# a = 1/dim
inv_g0_g1 = torch.einsum("...ik,...kj->...ij", torch.inverse(g0), g1)
trK0square = trKsquare(g0, g1) - torch.log(torch.det(inv_g0_g1)) ** 2 *a#/ 3.
# trK0square is simply the tr(K^2_0) in Ebin_metric.pdf Eq.(3)
theta = torch.min((trK0square / a + 1e-40).sqrt() / 4., torch.tensor(np.pi))
alpha, beta = torch.det(g0).pow(1. / 4.), torch.det(g1).pow(1. / 4.)
E = 16 * a * (alpha ** 2 - 2 * alpha * beta * torch.cos(theta) + beta ** 2)
return torch.einsum("ijk,ijk->", E, mask.squeeze())
# return torch.einsum("ijk,ijk->",[E, mask.squeeze()])
# the inverse Riemannian exponential map
def logm_invB_A(B, A):
# inputs: A/B.shape = [h, w, d, 3, 3]
# output: shape = [h, w, d, 3, 3]
G = torch.cholesky(B)
inv_G = torch.inverse(G)
W = torch.einsum("...ij,...jk,...lk->...il", inv_G, A, inv_G)
lamda, Q = torch.symeig(W.to(device=torch.device('cpu')), eigenvectors=True)
# log_lamda = torch.zeros((*lamda.shape, lamda.shape[-1]))#,dtype=torch.double
# for i in range(lamda.shape[-1]):
# log_lamda[:, i, i] = torch.log(lamda[:, i])
log_lamda = torch.diag_embed(torch.log(lamda.to(device=torch.device('cuda'))))
V = torch.einsum('...ji,...jk->...ik', inv_G, Q.to(device=torch.device('cuda')))
inv_V = torch.inverse(V)
return torch.einsum('...ij,...jk,...kl->...il', V, log_lamda, inv_V)
# 2 without for loops using Kyle's method
def inv_RieExp(g0, g1, a): # g0,g1: two tensors of size (s,t,...,3,3), where g0\neq 0
'''this function is to calculate the inverse Riemannian exponential of g1 in the image of the maximal domain of the Riemannian exponential at g0
'''
n = g1.size(-1)
# matrix multiplication
inv_g0_g1 = torch.einsum("...ik,...kj->...ij", torch.inverse(g0), g1) # (s,t,...,3,3)
def get_u_g0direction(g0, inv_g0_g1): # (-1,3,3) first reshape g0,g1,inv_g..
# permute
inv_g0_g1 = torch.einsum("...ij->ij...", inv_g0_g1) # (3,3,-1)
s = inv_g0_g1[0, 0] # (-1)
u = 4 / n * (s ** (n / 4) - 1) * torch.einsum("...ij->ij...", g0) # (-1)@(3,3,-1) -> (3,3,-1)
return u.permute(2, 0, 1) # (-1,3,3)
def get_u_ng0direction(g0, g1, inv_g0_g1, a): # (-1,3,3) first reshape g0,g1,inv_g..
K = logm_invB_A(g0, g1)
KTrless = K - torch.einsum("...ii,kl->...kl", K, torch.eye(n, dtype=torch.double)) / n # (-1,3,3)
# AA^T
theta = (1 / a * torch.einsum("...ik,...ki->...", KTrless, KTrless)).sqrt() / 4 # (-1)
gamma = torch.det(g1).pow(1 / 4) / (torch.det(g0).pow(1 / 4)) # (-1)
A = 4 / n * (gamma * torch.cos(theta) - 1) # (-1)
B = 1 / theta * gamma * torch.sin(theta)
u = A * torch.einsum("...ij->ij...", g0) + B * torch.einsum("...ik,...kj->ij...",
g0, KTrless) # (-1)@(3,3,-1) -> (3,3,-1)
return u.permute(2, 0, 1) # (-1,3,3)
inv_g0_g1_trless = inv_g0_g1 - torch.einsum("...ii,kl->...kl",
inv_g0_g1, torch.eye(n, dtype=torch.double)) / n # (s,t,...,3,3)
norm0 = torch.einsum("...ij,...ij->...", inv_g0_g1_trless, inv_g0_g1_trless).reshape(-1) # (-1)
# find the indices for which the entries are 0s and non0s
Ind0 = (norm0 <= 1e-12).nonzero().reshape(-1) # using squeeze results in [1,1]->[]
Indn0 = (norm0 > 1e-12).nonzero().reshape(-1)
u = torch.zeros(g0.reshape(-1, n, n).size(), dtype=torch.double) # (-1,3,3)
if len(Indn0) == 0:
u = get_u_g0direction(g0.reshape(-1, n, n), inv_g0_g1.reshape(-1, n, n))
elif len(Ind0) == 0:
u = get_u_ng0direction(g0.reshape(-1, n, n), g1.reshape(-1, n, n), inv_g0_g1.reshape(-1, n, n), a)
else:
u[Ind0] = get_u_g0direction(g0.reshape(-1, n, n)[Ind0], inv_g0_g1.reshape(-1, n, n)[Ind0])
u[Indn0] = get_u_ng0direction(g0.reshape(-1, n, n)[Indn0], g1.reshape(-1, n, n)[Indn0],
inv_g0_g1.reshape(-1, n, n)[Indn0], a)
return u.reshape(g1.size())
# The Riemannian exponential map
from scipy.linalg import expm, logm
def torch_expm(g):
return torch.from_numpy(expm(g.detach().numpy()))
def Rie_Exp(g0, u, a): # here g0 is of size (s,t,...,3,3) and u is of size (s,t,...,3,3), where g0\neq 0
'''this function is to calculate the Riemannian exponential of u in the the maximal domain of the Riemannian exponential at g0
'''
n = g0.size(-1)
U = torch.einsum("...ik,...kj->...ij", torch.inverse(g0), u) # (s,t,...,3,3)
trU = torch.einsum("...ii->...", U) # (s,t,...)
UTrless = U - torch.einsum("...,ij->...ij", trU, torch.eye(n, n, dtype=torch.double)) / n # (s,t,...,3,3)
# in g0 direction:K_0=0
def get_g1_g0direction(g0, trU): # first reshape g0 (-1,3,3) and trU (-1)
g1 = (trU / 4 + 1).pow(4 / n) * torch.einsum("...ij->ij...", g0) # (3,3,-1)
return g1.permute(2, 0, 1) # (-1,3,3)
# not in g0 direction SplitEbinMetric.pdf Theorem 1 :K_0\not=0
def get_g1_ng0direction(g0, trU, UTrless, a): # first reshape g0,UTrless (-1,3,3) and trU (-1)
# if len((trU < -4).nonzero().reshape(-1)) != 0:
# warnings.warn('The tangent vector u is out of the maximal domain of the Riemannian exponential.',
# DeprecationWarning)
q = trU / 4 + 1 # (-1)
r = (1 / a * torch.einsum("...ik,...ki->...", UTrless, UTrless)).sqrt() / 4 # (-1)
ArctanUtrless = torch.atan2(r, q) * torch.einsum("...ij->ij...", UTrless) / r # use (3,3,-1) for computation
# ExpArctanUtrless = torch.zeros(ArctanUtrless.size(), dtype=torch.double) # (3,3,-1)
# for i in range(q.size(-1)):
# ExpArctanUtrless[:, :, i] = torch_expm(ArctanUtrless[:, :, i])
ExpArctanUtrless = torch.matrix_exp(ArctanUtrless.permute(2, 0, 1))
g1 = (q ** 2 + r ** 2).pow(2 / n) * torch.einsum("...ik,...kj->ij...", g0, ExpArctanUtrless) # (3,3,-1)
return g1.permute(2, 0, 1) # (-1,3,3)
# ArctanUtrless = torch.einsum('k,k...,k->k...', torch.atan2(r, q), UTrless, 1./r) # use (-1,3,3) for computation
# ExpArctanUtrless = torch.matrix_exp(ArctanUtrless)
#
# g1 = (q ** 2 + r ** 2).pow(2 / n) * torch.einsum("...ik,...kj->...ij", g0, ExpArctanUtrless) # (3,3,-1)
# return g1 # (-1,3,3)
# pointwise multiplication Tr(U^TU)
norm0 = torch.einsum("...ij,...ij->...", UTrless, UTrless).reshape(-1) # (-1)
# find the indices for which the entries are 0s and non0s
# k_0=0 or \not=0
Ind0 = (norm0 <= 1e-12).nonzero().reshape(-1)
Indn0 = (norm0 > 1e-12).nonzero().reshape(-1)
g1 = torch.zeros(g0.reshape(-1, n, n).size()) # , dtype=torch.double # (-1,3,3)
if len(Indn0) == 0:
g1 = get_g1_g0direction(g0.reshape(-1, n, n), trU.reshape(-1))
elif len(Ind0) == 0:
g1 = get_g1_ng0direction(g0.reshape(-1, n, n), trU.reshape(-1), UTrless.reshape(-1, n, n), a)
else:
g1[Ind0] = get_g1_g0direction(g0.reshape(-1, n, n)[Ind0], trU.reshape(-1)[Ind0])
g1[Indn0] = get_g1_ng0direction(g0.reshape(-1, n, n)[Indn0], trU.reshape(-1)[Indn0],
UTrless.reshape(-1, n, n)[Indn0], a)
return g1.reshape(g0.size())
######################
''' The following Riemannian exponential and inverse Riemannian exponential are extended to the case g0=0
'''
def Rie_Exp_extended(g0, u, a): # here g0 is of size (s,t,...,3,3) and u is of size (s,t,...,3,3)
size = g0.size()
g0, u = g0.reshape(-1, *size[-2:]), u.reshape(-1, *size[-2:]) # (-1,3,3)
detg0 = torch.det(g0)
Ind_g0_is0 = (detg0 == 0).nonzero().reshape(-1)
Ind_g0_isnot0 = (detg0 != 0).nonzero().reshape(-1)
if len(Ind_g0_isnot0) == 0: # g0x are 0s for all x
g1 = u * g0.size(-1) / 4
elif len(Ind_g0_is0) == 0: # g0x are PD for all x
g1 = Rie_Exp(g0, u, a)
else:
g1 = torch.zeros(g0.size(), dtype=torch.double)
g1[Ind_g0_is0] = u[Ind_g0_is0] * g0.size(-1) / 4
g1[Ind_g0_isnot0] = Rie_Exp(g0[Ind_g0_isnot0], u[Ind_g0_isnot0], a)
return g1.reshape(size)
def inv_RieExp_extended(g0, g1, a): # g0, g1: (s,t,...,3,3)
size = g0.size()
g0, g1 = g0.reshape(-1, *size[-2:]), g1.reshape(-1, *size[-2:]) # (-1,3,3)
detg0 = torch.det(g0)
Ind_g0_is0 = (detg0 == 0).nonzero().reshape(-1)
Ind_g0_isnot0 = (detg0 != 0).nonzero().reshape(-1)
if len(Ind_g0_isnot0) == 0: # g0x are 0s for all x
u = g1 * 4 / g0.size(-1)
elif len(Ind_g0_is0) == 0: # g0x are PD for all x
u = inv_RieExp(g0, g1, a)
else:
u = torch.zeros(g0.size(), dtype=torch.double)
u[Ind_g0_is0] = g1[Ind_g0_is0] * 4 / g0.size(-1)
u[Ind_g0_isnot0] = inv_RieExp(g0[Ind_g0_isnot0], g1[Ind_g0_isnot0], a)
return u.reshape(size)
##############################
# Compute geodesics
def get_Geo(g0, g1, a, Tpts): # (s,t,...,,3,3)
'''
use odd number Tpts of time points since the geodesic may go
though the zero matrix which will give the middle point of the geodesic
'''
size = g0.size()
g0, g1 = g0.reshape(-1, *size[-2:]), g1.reshape(-1, *size[-2:]) # (-1,3,3)
Time = torch.arange(Tpts, out=torch.DoubleTensor()) / (Tpts - 1) # (Tpts)
U = logm_invB_A(g0, g1)
UTrless = U - torch.einsum("...ii,kl->...kl", U, torch.eye(g1.size(-1), dtype=torch.double)) / g1.size(
-1) # (...,3,3)
theta = ((1 / a * torch.einsum("...ik,...ki->...", UTrless, UTrless)).sqrt() / 4 - np.pi)
Ind_inRange = (theta < 0).nonzero().reshape(-1)
Ind_notInRange = (theta >= 0).nonzero().reshape(-1)
def geo_in_range(g0, g1, a, Tpts):
u = inv_RieExp_extended(g0, g1, a) # (-1,3,3)
geo = torch.zeros(Tpts, *g0.size(), dtype=torch.double) # (Tpts,-1,3,3)
geo[0], geo[-1] = g0, g1
for i in range(1, Tpts - 1):
geo[i] = Rie_Exp_extended(g0, u * Time[i], a)
return geo # (Tpts,-1,3,3)
def geo_not_in_range(g0, g1, a, Tpts): # (-1,3,3)
m0 = torch.zeros(g0.size(), dtype=torch.double)
u0 = inv_RieExp_extended(g0, m0, a)
u1 = inv_RieExp_extended(g1, m0, a)
geo = torch.zeros(Tpts, *g0.size(), dtype=torch.double) # (Tpts,-1,3,3)
geo[0], geo[-1] = g0, g1
for i in range(1, int((Tpts - 1) / 2)):
geo[i] = Rie_Exp_extended(g0, u0 * Time[i], a)
for j in range(-int((Tpts - 1) / 2), -1):
geo[j] = Rie_Exp_extended(g1, u1 * (1 - Time[j]), a)
return geo # (Tpts,-1,3,3)
# If g1 = 0, len(Ind_notInRange) and len(Ind_inRange) are both zero. In this case we say that g1 is in the range
if len(Ind_notInRange) == 0: # all in the range
geo = geo_in_range(g0, g1, a, Tpts)
elif len(Ind_inRange) == 0: # all not in range
geo = geo_not_in_range(g0, g1, a, Tpts)
else:
geo = torch.zeros(Tpts, *g0.size(), dtype=torch.double) # (Tpts,-1,3,3)
geo[:, Ind_inRange] = geo_in_range(g0[Ind_inRange], g1[Ind_inRange], a, Tpts)
geo[:, Ind_notInRange] = geo_not_in_range(g0[Ind_notInRange], g1[Ind_notInRange], a, Tpts)
return geo.reshape(Tpts, *size)
####################################
# Karcher mean
def ptPick_notInRange(g0, g1, i): # (-1,3,3)
alpha = torch.det(g1).pow(1 / 4) / torch.det(g0).pow(1 / 4) # (-1)
Ind_close_to_g0 = (alpha <= i).nonzero().reshape(-1)
Ind_close_to_g1 = (alpha > i).nonzero().reshape(-1)
def get_gm_inLine_0g0(alpha, g0, i):
kn_over4 = -(1 + alpha) / (i + 1) # (-1)
gm = (1 + kn_over4) ** (4 / g0.size(-1)) * torch.einsum("...ij->ij...", g0) # (3,3,-1)
return gm.permute(2, 0, 1) # (-1,3,3)
def get_gm_inLine_0g1(alpha, g1, i):
kn_over4 = -i * (1 + 1 / alpha) / (i + 1) # (-1)
gm = (1 + kn_over4) ** (4 / g1.size(-1)) * torch.einsum("...ij->ij...", g1) # (3,3,-1)
return gm.permute(2, 0, 1)
if len(Ind_close_to_g1) == 0: # all are close to g0
gm = get_gm_inLine_0g0(alpha, g0, i)
elif len(Ind_close_to_g0) == 0:
gm = get_gm_inLine_0g1(alpha, g1, i)
else:
gm = torch.zeros(g0.size(), dtype=torch.double)
gm[Ind_close_to_g0] = get_gm_inLine_0g0(alpha[Ind_close_to_g0], g0[Ind_close_to_g0], i)
gm[Ind_close_to_g1] = get_gm_inLine_0g1(alpha[Ind_close_to_g1], g1[Ind_close_to_g1], i)
return gm
def get_KarcherMean(G, a):
size = G.size()
G = G.reshape(size[0], -1, *size[-2:]) # (T,-1,3,3)
gm = G[0]
for i in range(1, G.size(0)):
U = logm_invB_A(gm, G[i])
UTrless = U - torch.einsum("...ii,kl->...kl", U, torch.eye(size[-1], dtype=torch.double)) / size[
-1] # (...,3,3)
theta = ((torch.einsum("...ik,...ki->...", UTrless, UTrless) / a).sqrt() / 4 - np.pi)
Ind_inRange = (theta < 0).nonzero().reshape(-1) ## G[i] is in the range of the exponential map at gm
Ind_notInRange = (theta >= 0).nonzero().reshape(-1) ## G[i] is not in the range
# when g1 = 0, len(Ind_notInRange) and len(Ind_inRange) are both zero. So check len(Ind_notInRange) first
if len(Ind_notInRange) == 0: # all in the range
gm = Rie_Exp_extended(gm, inv_RieExp_extended(gm, G[i], a) / (i + 1), a)
elif len(Ind_inRange) == 0: # all not in range
gm = ptPick_notInRange(gm, G[i], i)
else:
gm[Ind_inRange] = Rie_Exp_extended(gm[Ind_inRange],
inv_RieExp_extended(gm[Ind_inRange], G[i, Ind_inRange], a) / (i + 1),
a) # stop here
gm[Ind_notInRange] = ptPick_notInRange(gm[Ind_notInRange], G[i, Ind_notInRange], i)
return gm.reshape(*size[1:])
if __name__ == "__main__":
torch.set_default_tensor_type('torch.cuda.DoubleTensor')
A = torch.rand(145, 174, 145, 3, 3) + 0.001
A[:, :, :, 0, 1] = 0
A[:, :, :, 0, 2] = 0
A[:, :, :, 1, 2] = 0
A = torch.einsum("...ij,...kj->...ik", A, A)
B = torch.rand(145, 174, 145, 3, 3) + 0.001
B[:, :, :, 0, 1] = 0
B[:, :, :, 0, 2] = 0
B[:, :, :, 1, 2] = 0
B = torch.einsum("...ij,...kj->...ik", B, B)
G = torch.stack((A, B)).to(device=torch.device('cuda'))
gm = get_KarcherMean(G, 1. / 3.)
|
[
"hdai@cibcgpu1.sci.utah.edu"
] |
hdai@cibcgpu1.sci.utah.edu
|
44b3a1fc8afd6f521d75ab0c25b1a1169eab1245
|
bcb8337b488f6acb91b700a2312a5b2018855413
|
/federatedml/logistic_regression/hetero_logistic_regression/__init__.py
|
01fda0015f92697629eb11e65fe48dd613d46685
|
[
"Apache-2.0"
] |
permissive
|
freeeedooom/FATE
|
d17a4729f059cfec6bc07c3142bebcd3b470dc3c
|
7bbce8ee037543f280791378681742b40a300b0a
|
refs/heads/master
| 2020-08-19T12:15:22.517131
| 2019-10-17T07:09:22
| 2019-10-17T07:09:22
| 215,918,890
| 1
| 0
|
Apache-2.0
| 2019-10-18T01:45:43
| 2019-10-18T01:45:43
| null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.logistic_regression.hetero_logistic_regression.hetero_lr_arbiter import HeteroLRArbiter
from federatedml.logistic_regression.hetero_logistic_regression.hetero_lr_host import HeteroLRHost
from federatedml.logistic_regression.hetero_logistic_regression.hetero_lr_guest import HeteroLRGuest
__all__ = ["HeteroLRHost", "HeteroLRGuest", "HeteroLRArbiter"]
|
[
"jicezeng@gmail.com"
] |
jicezeng@gmail.com
|
b99ff972f4904f5b43dfec23c0932d4e5973d2b0
|
1aa7b0160e419945bb224df76c3dda26b98ea17a
|
/kujiuapp/migrations/0003_auto_20190829_2338.py
|
8a943eb1e6cbbf0fff5dbf501086a9a47e335c23
|
[] |
no_license
|
laosipm/django-kujiuapp
|
63261a891d8f15490f0294bb6c999ccc59d21f7d
|
f71ad2ce45f36f2bbb2b9981db9cc3595533223e
|
refs/heads/master
| 2021-06-13T20:30:05.246476
| 2019-09-08T13:07:26
| 2019-09-08T13:07:26
| 206,112,950
| 0
| 0
| null | 2021-06-10T21:56:12
| 2019-09-03T15:40:17
|
CSS
|
UTF-8
|
Python
| false
| false
| 455
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('kujiuapp', '0002_category'),
]
operations = [
migrations.DeleteModel(
name='animetype',
),
migrations.DeleteModel(
name='author',
),
migrations.DeleteModel(
name='category',
),
]
|
[
"ledongba@163.com"
] |
ledongba@163.com
|
af249fb2c93c46228f83e0e3c250cac55a83c4b1
|
67d99b4761a5e86408a9e4bac67e48cbe720555c
|
/fundamental3_tipe_data_dictonary.py
|
2cf5a33e663a83a54f9d1b8d5337141c0d5bff3d
|
[] |
no_license
|
Yuwana-Patriawarman/fundamental-python-2021
|
da81f67a4847877c43dc7b08c8dceb5c062352d2
|
e3f9bda18685321e74f78211a123bbb27a9c3e62
|
refs/heads/main
| 2023-06-24T03:47:02.330519
| 2021-07-20T22:59:34
| 2021-07-20T22:59:34
| 386,840,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 729
|
py
|
"""
Tipe data dictionary sekedar menghubungkan KEY dan VALUE
KVP = Key Value Pair
Dictionary = Kamus
"""
from typing import Dict
kamus_eng_in = {"anak": "son", "istri": "wife", "bapak": "father", "ibu": "mother"}
print(kamus_eng_in)
print(kamus_eng_in['bapak'])
print(kamus_eng_in['ibu'])
print("\nData dikirimkan oleh server Gojek, untuk memberikan info driver disekitar pemakai aplikasi")
data_dari_server_gojek = {
"tanggal" : "2021-07-21",
"driver_list" : ["Eko", "Dwi", "Tri", "Catur"]
}
print(data_dari_server_gojek)
print(f"Driver sekitar sini {data_dari_server_gojek['driver_list']}")
print(f"Driver #1 {data_dari_server_gojek['driver_list'][0]}")
print(f"Driver #4 {data_dari_server_gojek['driver_list'][3]}")
|
[
"pyoewono@gmail.com"
] |
pyoewono@gmail.com
|
5b96bd6940bab8293320a4495a070969a68a6521
|
9391b8caee1a764829ae5c3d23b06a302a1eee3f
|
/TCPClienteZ.py
|
6a57057f5066c7810c3b29627837465a52e82b99
|
[] |
no_license
|
vgs2/security2
|
7ca07ea77693adf9898d1fb9b89604a169c5880a
|
2b6051db7c64a74da66aaf760aeb290f06251144
|
refs/heads/master
| 2023-07-27T07:16:21.092621
| 2021-08-22T21:52:08
| 2021-08-22T21:52:08
| 398,400,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
import socket
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientSocket.connect((socket.gethostname(),5100))
sentence = input('lowercase sentence:')
clientSocket.send(bytes(sentence, "utf-8"))
modifiedSentence = clientSocket.recv(1024)
print('From Server:', modifiedSentence.decode("utf-8"))
clientSocket.close()
|
[
"vgs2@cin.ufpe.br"
] |
vgs2@cin.ufpe.br
|
d5a40ea1e317a34ecf931c8eff9c9d5ae34facb9
|
49e87feb9b716972a636b72a97ee18be0f1f3f0d
|
/src/oidcservice/oidc/access_token.py
|
174d095e1e1c6f8ae0a13df826952f6d34d467c7
|
[
"Apache-2.0"
] |
permissive
|
pi-lar/JWTConnect-Python-OidcService
|
b1d6cb1338861c9d7e771a1a67f4003a81319dab
|
cfd34fe88300a53f8f3ea748275f6f714c4571ca
|
refs/heads/master
| 2023-03-29T11:17:05.762172
| 2020-10-05T12:57:48
| 2020-10-05T12:57:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,707
|
py
|
import logging
from oidcmsg import oidc
from oidcmsg.oidc import verified_claim_name
from oidcmsg.time_util import time_sans_frac
from oidcservice.oauth2 import access_token
from oidcservice.exception import ParameterError
from oidcservice.oidc import IDT2REG
__author__ = 'Roland Hedberg'
LOGGER = logging.getLogger(__name__)
class AccessToken(access_token.AccessToken):
msg_type = oidc.AccessTokenRequest
response_cls = oidc.AccessTokenResponse
error_msg = oidc.ResponseMessage
def __init__(self, service_context, client_authn_factory=None,
conf=None):
access_token.AccessToken.__init__(self, service_context,
client_authn_factory=client_authn_factory, conf=conf)
def gather_verify_arguments(self):
"""
Need to add some information before running verify()
:return: dictionary with arguments to the verify call
"""
_ctx = self.service_context
# Default is RS256
kwargs = {
'client_id': _ctx.get('client_id'), 'iss': _ctx.get('issuer'),
'keyjar': _ctx.keyjar, 'verify': True,
'skew': _ctx.clock_skew,
}
if 'registration_response' in _ctx:
_reg_resp = _ctx.get('registration_response')
for attr, param in IDT2REG.items():
try:
kwargs[attr] = _reg_resp[param]
except KeyError:
pass
try:
kwargs['allow_missing_kid'] = self.service_context.allow['missing_kid']
except KeyError:
pass
if 'behaviour' in _ctx:
_verify_args = _ctx.get('behaviour').get("verify_args")
if _verify_args:
kwargs.update(_verify_args)
return kwargs
def update_service_context(self, resp, key='', **kwargs):
try:
_idt = resp[verified_claim_name('id_token')]
except KeyError:
pass
else:
try:
if self.get_state_by_nonce(_idt['nonce']) != key:
raise ParameterError('Someone has messed with "nonce"')
except KeyError:
raise ValueError('Invalid nonce value')
self.store_sub2state(_idt['sub'], key)
if 'expires_in' in resp:
resp['__expires_at'] = time_sans_frac() + int(
resp['expires_in'])
self.store_item(resp, 'token_response', key)
def get_authn_method(self):
try:
return self.service_context.get('behaviour')[
'token_endpoint_auth_method']
except KeyError:
return self.default_authn_method
|
[
"roland@catalogix.se"
] |
roland@catalogix.se
|
00ed0f98404c2d5528999c3919798c3f8538be63
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02850/s576857061.py
|
98f13279c663bc9d8332c53c1eae7af84cfd5d50
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,083
|
py
|
import sys
import itertools
sys.setrecursionlimit(1000000000)
from heapq import heapify,heappop,heappush,heappushpop
import math
import collections
import copy
import bisect
INF = 10**9+7
if __name__ == "__main__":
n = int(input())
edge = [[] for i in range(n)]
side = []
for i in range(n-1):
a,b = map(int,input().split())
a-=1
b-=1
edge[a].append(b)
edge[b].append(a)
side.append([a,b])
I = collections.defaultdict(lambda : 0)
k = 0
for i in edge:
k = max(k,len(i))
print(k)
color = [INF]*(n)
d = [INF]*(n)
d[0] = 0
q = collections.deque([0])
while q:
p = q.popleft()
color_num = 1
for v in edge[p]:
if d[v] == INF:
q.append(v)
d[v] = d[p] + 1
if color_num == color[p]:
color_num+=1
color[v] = color_num
I[(p,v)] = color_num
I[(v,p)] = color_num
color_num+=1
for i,j in side:
print(I[(i,j)])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
ec7aeafa8f17df6e88c702453a4a43ad6103af3e
|
ac952320fd3b5ebc30f981d5400d4f55fae341cd
|
/animate_diffusion.py
|
0f80759a04f7dd1b5429266d86e66d2168ef24c5
|
[] |
no_license
|
Lehotzky/finite-difference-3D-diffusion-equation-solver
|
a15b1713d06af50bfd889e8773df192122d82630
|
1e1bfc2dfcf16be85addd8091991599be92a90de
|
refs/heads/main
| 2023-07-16T21:38:04.033453
| 2021-08-17T17:26:55
| 2021-08-17T17:26:55
| 397,338,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,325
|
py
|
import matplotlib.pyplot as plt
from matplotlib import animation
import numpy as np
class AnimatedDiffusion():
def __init__(self, U, ind_excluded, N_x, N_r, del_x, del_r, del_t, u_max, coord_fixed, n_levels):
N_tot = N_x * N_r ** 2
N_pt = U.shape[1]
U_full = np.zeros((N_tot, N_pt))
mask = np.ones(N_tot, dtype=bool)
mask[np.array(ind_excluded)] = False
U_full[mask, :] = U
U_full[np.logical_not(mask), :] = np.nan
self.U = U_full
self.ind_excluded = ind_excluded
self.N_x = N_x
self.N_r = N_r
self.N_tot = N_tot
self.N_pt = N_pt
self.del_t = del_t
self.del_x = del_x * 10 ** 4
self.del_r = del_r * 10 ** 4
self.u_max = u_max
self.coord_fixed = coord_fixed
self.n_levels = n_levels
ind_fixed = np.zeros(3, dtype=bool)
for ind, coord in enumerate(['i', 'j', 'k']):
if list(coord_fixed.keys())[0] == coord:
ind_fixed[ind] = True
self.ind_varied = np.logical_not(ind_fixed)
self.ind_fixed = ind_fixed
self.scale_global = np.array([1, N_x, N_x*N_r])
self.fixed_value = list(coord_fixed.values())[0]
if list(coord_fixed.keys())[0] == 'i':
self.N_max_1 = N_r
self.del_cr1 = self.del_r
else:
self.N_max_1 = N_x
self.del_cr1 = self.del_x
# Set up the figure and axes
self.fig = plt.figure(figsize=(5, 3), dpi=300)
self.ax = self.fig.add_subplot()
u_section = np.zeros((self.N_max_1, self.N_r))
i_var = np.zeros(3)
for c1 in range(self.N_max_1):
for c2 in range(self.N_r):
i_var[self.ind_varied] = np.array([c1, c2])
q = self.fixed_value * np.dot(self.ind_fixed, self.scale_global) + np.dot(i_var, self.scale_global)
u_section[c1, c2] = self.U[int(q), 0]
self.cr1, self.cr2 = np.meshgrid(np.arange(self.N_max_1), np.arange(self.N_r))
if list(coord_fixed.keys())[0] == 'i':
self.coord1 = (self.cr1 - int(self.N_r / 2) + 1 / 2) * self.del_r
else:
self.coord1 = self.cr1 * self.del_cr1
self.coord2 = (self.cr2 - int(self.N_r / 2) + 1 / 2) * self.del_r
self.cf = self.ax.contourf(self.coord1, self.coord2, u_section.transpose(),
cmap=plt.cm.get_cmap('jet'), vmin=0, vmax=u_max, levels=self.n_levels)
self.cmap = plt.get_cmap("tab10")
self.fig.colorbar(self.cf, ticks=[i/10 for i in range(11)])
self.ax.set_title('$t={:.0f}$'.format(0*self.del_t) + ' [sec]')
# Set plot range
self.ax.set_aspect('equal')
self.labels = ['x', 'y', 'z']
self.lind_varied = np.arange(3)[self.ind_varied]
self.ax.set_xlabel('$' + self.labels[self.lind_varied[0]] + '(\mu \mathrm{m})$')
self.ax.set_ylabel('$' + self.labels[self.lind_varied[1]] + '(\mu \mathrm{m})$')
self.fig.tight_layout()
# Set up FuncAnimation
self.ani = animation.FuncAnimation(self.fig, self.update, frames=self.N_pt, interval=self.N_pt)
def update(self, t: int):
u_section = np.zeros((self.N_max_1, self.N_r))
i_var = np.zeros(3)
for c1 in range(self.N_max_1):
for c2 in range(self.N_r):
i_var[self.ind_varied] = np.array([c1, c2])
q = self.fixed_value * np.dot(self.ind_fixed, self.scale_global) + np.dot(i_var, self.scale_global)
u_section[c1, c2] = self.U[int(q), t]
# Update coordinate data in PathCollection
self.ax.clear()
self.cf = self.ax.contourf(self.coord1, self.coord2, u_section.transpose(), cmap=plt.cm.get_cmap('jet'),
vmin=0, vmax=self.u_max, levels=self.n_levels)
# Update title
self.ax.set_title('$t={:.2f}$'.format(t*self.del_t/60/60) + ' [hour]')
self.ax.set_aspect('equal')
self.ax.set_xlabel('$' + self.labels[self.lind_varied[0]] + '(\mu \mathrm{m})$')
self.ax.set_ylabel('$' + self.labels[self.lind_varied[1]] + '(\mu \mathrm{m})$')
return self.cf
|
[
"noreply@github.com"
] |
Lehotzky.noreply@github.com
|
63a7929a3f1d5347f8cf3419feaaadfb7c8144df
|
7d200b5af6a3573e1c72a21bb791fabb5040d04f
|
/eventex/teste42/apps.py
|
14b0b99faca146cc4d35d51eba2beb485de801ee
|
[] |
no_license
|
lscosta90br/eventex-lc
|
38a6bed56bd37d88edbe85d5b6f3fa70ea7b335a
|
43bf5edfba257cd7fa1822b0bfaa79321ba48726
|
refs/heads/master
| 2022-05-13T22:42:41.752411
| 2019-07-12T01:25:47
| 2019-07-12T01:25:47
| 193,165,901
| 0
| 0
| null | 2022-04-22T21:37:39
| 2019-06-21T22:15:30
|
CSS
|
UTF-8
|
Python
| false
| false
| 89
|
py
|
from django.apps import AppConfig
class Teste42Config(AppConfig):
name = 'teste42'
|
[
"lscosta@gmail.com"
] |
lscosta@gmail.com
|
edef85a4c9634f236ca7b35ae3b2175b51e69589
|
afbf981c90603eedee0e6a997402b09ebc110b16
|
/node_modules/node-sass/build/config.gypi
|
a81f964290d9b272bd8696ea1b5a27f3e0781ca7
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Prakhar19999/Realest_Estate
|
67be4254ed3c953685f75c2152ea9148b45109fa
|
6dfb1e42dcb474d92fe3435b82893b2ee700a9e2
|
refs/heads/main
| 2023-06-20T14:29:43.107183
| 2021-07-16T11:29:10
| 2021-07-16T11:29:10
| 386,613,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,049
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"gas_version": "2.30",
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt67l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "0.0",
"napi_build_version": "7",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_section_ordering_info": "",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "so.83",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"nodedir": "/home/dell/.node-gyp/14.15.0",
"standalone_static_library": 1,
"libsass_ext": "",
"libsass_cflags": "",
"libsass_ldflags": "",
"libsass_library": "",
"user_agent": "npm/7.10.0 node/v14.15.0 linux x64",
"noproxy": "",
"userconfig": "/home/dell/.npmrc",
"metrics_registry": "https://registry.npmjs.org/",
"prefix": "/usr/local",
"cache": "/home/dell/.npm",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"globalconfig": "/usr/local/etc/npmrc",
"init_module": "/home/dell/.npm-init.js"
}
}
|
[
"prakhar.srivastava19999@gmail.com"
] |
prakhar.srivastava19999@gmail.com
|
ce417e51519ba96cf01bccb4bb5296821255a68e
|
b81db9f784ce21aacf57ba246a906aa232b81764
|
/reminders/migrations/0002_initial.py
|
860581d0102e774b02fa56a7760b99c4118e1afc
|
[] |
no_license
|
kmvit/prosbi
|
51c0b4e88fc645ccad68435d3cf8ad375ebac1f0
|
019b55a77e56fe53e1c524301ae95bf72fd8390c
|
refs/heads/master
| 2021-01-22T13:38:14.056683
| 2014-12-05T18:14:06
| 2014-12-05T18:14:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,164
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Reminder'
db.create_table(u'reminders_reminder', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('account', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['account.Account'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=150)),
('type', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=3)),
('permanent', self.gf('django.db.models.fields.BooleanField')(default=False)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal(u'reminders', ['Reminder'])
# Adding model 'ReminderItem'
db.create_table(u'reminders_reminderitem', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('reminder', self.gf('django.db.models.fields.related.ForeignKey')(related_name='reminder_items', to=orm['reminders.Reminder'])),
('name', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['names.Name'])),
('comment', self.gf('django.db.models.fields.TextField')(blank=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal(u'reminders', ['ReminderItem'])
# Adding model 'RequestReminderLink'
db.create_table(u'reminders_requestreminderlink', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('reminder', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['reminders.Reminder'])),
('request', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['requests.Request'])),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal(u'reminders', ['RequestReminderLink'])
def backwards(self, orm):
# Deleting model 'Reminder'
db.delete_table(u'reminders_reminder')
# Deleting model 'ReminderItem'
db.delete_table(u'reminders_reminderitem')
# Deleting model 'RequestReminderLink'
db.delete_table(u'reminders_requestreminderlink')
models = {
u'account.account': {
'Meta': {'object_name': 'Account'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'anonym': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'confession': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'confession_accounts'", 'null': 'True', 'to': u"orm['account.Confession']"}),
'dignity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'dignity_accounts'", 'null': 'True', 'to': u"orm['account.Dignity']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'name_accounts'", 'null': 'True', 'to': u"orm['names.Name']"}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'account.confession': {
'Meta': {'object_name': 'Confession'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'account.dignity': {
'Meta': {'object_name': 'Dignity'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'names.name': {
'Meta': {'ordering': "['nominative']", 'object_name': 'Name'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'genitive': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderation': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'nominative': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'reminders.reminder': {
'Meta': {'ordering': "['-permanent', 'title']", 'object_name': 'Reminder'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['account.Account']"}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'permanent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'requests': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'request_reminders'", 'symmetrical': 'False', 'through': u"orm['reminders.RequestReminderLink']", 'to': u"orm['requests.Request']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '3'})
},
u'reminders.reminderitem': {
'Meta': {'object_name': 'ReminderItem'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['names.Name']"}),
'reminder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reminder_items'", 'to': u"orm['reminders.Reminder']"})
},
u'reminders.requestreminderlink': {
'Meta': {'object_name': 'RequestReminderLink'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reminder': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['reminders.Reminder']"}),
'request': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['requests.Request']"})
},
u'requests.category': {
'Meta': {'object_name': 'Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'requests.request': {
'Meta': {'ordering': "['-date']", 'object_name': 'Request'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'account_requests'", 'to': u"orm['account.Account']"}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'category_requests'", 'to': u"orm['requests.Category']"}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'names': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'name_requests'", 'null': 'True', 'to': u"orm['names.Name']"}),
'old_names': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'old_prayer_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
}
}
complete_apps = ['reminders']
|
[
"kmv-it@yandex.ru"
] |
kmv-it@yandex.ru
|
0730c36f35fd16df8b1036e9c139052096299020
|
698c44c9f0f3238db8b07dbee740c4d5cffd7ea5
|
/example_game/bullet.py
|
81dadaceedd9c127ae8a7979ceeb557857449015
|
[] |
no_license
|
Yashwant94308/games
|
9ecffb99050fdb1231c5babda42eb5ef790960f9
|
9440baf1fcc5623fe769e7695a8c074a32e5e4c0
|
refs/heads/master
| 2022-11-07T03:31:08.314317
| 2020-06-20T06:40:11
| 2020-06-20T06:40:11
| 269,345,513
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 833
|
py
|
import pygame
from pygame.sprite import Sprite
class Bullet(Sprite):
def __init__(self, eg_settings, screen, virus):
super(Bullet, self).__init__()
self.virus = virus
self.screen = screen
self.screen_rect = screen.get_rect()
self.eg_settings = eg_settings
self.bullet_image = pygame.image.load("bullet.bmp")
self.rect = self.bullet_image.get_rect()
self.rect.centery = self.virus.rect.centery
self.rect.right = self.virus.rect.right
self.speed_factor = self.eg_settings.bullet_speed_factor
"""adjusting decimal value of bullet"""
self.x = float(self.rect.x)
def update(self):
self.x += self.speed_factor
self.rect.x = self.x
def draw_bullet(self):
self.screen.blit(self.bullet_image, self.rect)
|
[
"yashwant94308@gmail.com"
] |
yashwant94308@gmail.com
|
f275a8e46539605b5248dd2e21134aed90b5a415
|
34fafe0ce4a28a0b2ace024858868ed030d5e432
|
/configs/0HSCParticleProducer_cfg_V3.py
|
3e4bd053e6750695ca41b2fd2ae8710826a5de12
|
[] |
no_license
|
CMS-HSCP/SUSYBSMAnalysis-HSCP
|
d40fe64ece600fc0909a2c32eb07b4204784948c
|
809271e1dacc98d34992561b5fcb502c01cc1172
|
refs/heads/master
| 2023-07-25T00:46:38.870643
| 2023-07-11T20:25:12
| 2023-07-11T20:25:12
| 164,119,298
| 3
| 16
| null | 2023-07-11T20:25:14
| 2019-01-04T14:47:05
|
Python
|
UTF-8
|
Python
| false
| false
| 6,257
|
py
|
import sys, os
import FWCore.ParameterSet.Config as cms
from FWCore.ParameterSet.VarParsing import VarParsing
options = VarParsing('analysis')
# defaults
options.outputFile = 'HSCP.root'
options.maxEvents = -1 # -1 means all events
options.register('GTAG', '106X_dataRun2_v20',
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
"Global Tag"
)
options.register('SAMPLE', 'isData',
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
"Sample Type. Use: isSignal or isBckg or isData"
)
options.register('isSkimmedSample', False,
VarParsing.multiplicity.singleton,
VarParsing.varType.bool,
"is sample Skimmed? True or False"
)
options.register('LUMITOPROCESS', 'Cert_294927-306462_13TeV_UL2017_Collisions17_GoldenJSON.txt',
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
"Lumi to process"
)
options.parseArguments()
from Configuration.Eras.Era_Run2_2017_cff import Run2_2017
process = cms.Process('HSCPAnalysis',Run2_2017)
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load('Configuration.StandardSequences.Services_cff')
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True),
# SkipEvent = cms.untracked.vstring('ProductNotFound'),
)
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(options.maxEvents) )
process.source = cms.Source("PoolSource",
#fileNames = cms.untracked.vstring(options.inputFiles),
fileNames = cms.untracked.vstring("file:2018_SingleMuon_AOD_100.root"),
#fileNames = cms.untracked.vstring("file:/afs/cern.ch/work/t/tvami/public/HSCP/PrivateReAOD/CMSSW_10_6_27/src/2018_SingleMuon_AOD.root"),
#inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*")
)
#The duplicateCheckMode works only if we submit with Condor - not with Crab - checks process history, run number, lumi number
#process.source.duplicateCheckMode = cms.untracked.string("checkAllFilesOpened")
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, options.GTAG, '')
process.HSCPTuplePath = cms.Path()
########################################################################
#Run the Skim sequence if necessary
if(not options.isSkimmedSample):
process.nEventsBefSkim = cms.EDProducer("EventCountProducer")
process.load('HLTrigger.HLTfilters.hltHighLevel_cfi')
process.HSCPTrigger = process.hltHighLevel.clone()
process.HSCPTrigger.TriggerResultsTag = cms.InputTag( "TriggerResults", "", "HLT" )
process.HSCPTrigger.andOr = cms.bool( True ) #OR
process.HSCPTrigger.throw = cms.bool( False )
if(options.SAMPLE=='isData'):
process.HSCPTrigger.HLTPaths = [ #check triggers
"HLT_PFMET120_PFMHT120_IDTight_v*",
"HLT_Mu50_v*",
]
process.HSCPTuplePath += process.nEventsBefSkim + process.HSCPTrigger
########################################################################
#Run the HSCP EDM-tuple Sequence on skimmed sample
process.nEventsBefEDM = cms.EDProducer("EventCountProducer")
process.load("SUSYBSMAnalysis.HSCP.HSCParticleProducer_cff")
process.HSCPTuplePath += process.nEventsBefEDM + process.HSCParticleProducerSeq
#make the pool output
process.Out = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring(
"drop *",
"keep EventAux_*_*_*",
"keep LumiSummary_*_*_*",
"keep edmMergeableCounter_*_*_*",
"keep GenRunInfoProduct_*_*_*",
"keep GenEventInfoProduct_generator_*_*",
"keep *_genParticlesSkimmed_*_*",
"keep *_genParticlePlusGeant_*_*",
"keep *_offlinePrimaryVertices_*_*",
"keep recoTracks_generalTracks_*_*",
"keep recoTracks_standAloneMuons_*_*",
"keep recoTrackExtras_standAloneMuons_*_*",
"keep TrackingRecHitsOwned_standAloneMuons_*_*",
"keep recoTracks_globalMuons_*_*",
"keep recoTrackExtras_globalMuons_*_*",
"keep recoMuons_muons_*_*",
"keep recoMuonTimeExtraedmValueMap_muons_*_*",
"keep edmTriggerResults_TriggerResults_*_*",
"keep *_ak4PFJetsCHS__*",
"keep recoPFMETs_pfMet__*",
"keep *_HSCParticleProducer_*_*",
"keep *_HSCPIsolation*_*_*",
"keep *_dedxHitInfo*_*_*",
"keep triggerTriggerEvent_hltTriggerSummaryAOD_*_*",
"keep *_offlineBeamSpot_*_*",
"keep *_MuonSegmentProducer_*_*",
"keep *_g4SimHits_StoppedParticles*_*",
"keep PileupSummaryInfos_addPileupInfo_*_*",
"keep *_dt4DSegments__*",
"keep *_cscSegments__*",
),
fileName = cms.untracked.string(options.outputFile),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('*')
),
)
if(options.SAMPLE=='isData' and len(options.LUMITOPROCESS)>0):
import FWCore.PythonUtilities.LumiList as LumiList
# process.source.lumisToProcess = LumiList.LumiList(filename = options.LUMITOPROCESS).getVLuminosityBlockRange()
#process.source.lumisToProcess = LumiList.LumiList(url = https://cms-service-dqm.web.cern.ch/cms-service-dqm/CAF/certification/Collisions17/13TeV/ReReco/Cert_294927-306462_13TeV_EOY2017ReReco_Collisions17_JSON.txt).getVLuminosityBlockRange()
if(options.SAMPLE=='isBckg' or options.SAMPLE=='isData'):
process.Out.SelectEvents.SelectEvents = cms.vstring('HSCPTuplePath') #take just the skimmed ones
process.Out.outputCommands.extend(["drop triggerTriggerEvent_hltTriggerSummaryAOD_*_*"])
else:
process.Out.SelectEvents = cms.untracked.PSet()
########################################################################
process.tsk = cms.Task()
for mod in process.producers_().itervalues():
process.tsk.add(mod)
for mod in process.filters_().itervalues():
process.tsk.add(mod)
#schedule the sequence
process.endPath1 = cms.EndPath(process.Out)
process.schedule = cms.Schedule(process.HSCPTuplePath, process.endPath1)
|
[
"noreply@github.com"
] |
CMS-HSCP.noreply@github.com
|
c10023fbd1c5181e1c360c78de33a3745f8b5d7d
|
62fba3c7fe99d29985f12d2915853c4269f5d2fc
|
/test/functional/p2p_segwit.py
|
cf89a54b053a9be26881f6c7f0b2417a589464d0
|
[
"MIT"
] |
permissive
|
cnccdev/wes_coind
|
fe88222d5f5ac9f0b91b54d2b97e34700175c912
|
a15d64caa24dec050f997fe2031d518ee1d76836
|
refs/heads/master
| 2020-05-22T23:49:06.733413
| 2019-05-22T01:53:20
| 2019-05-22T01:53:20
| 186,550,211
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 95,628
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
from binascii import hexlify
import math
import random
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import CECKey, CPubKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
CBlock,
CBlockHeader,
CInv,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
CTxWitness,
MAX_BLOCK_BASE_SIZE,
MSG_WITNESS_FLAG,
NODE_NETWORK,
NODE_WITNESS,
msg_block,
msg_getdata,
msg_headers,
msg_inv,
msg_tx,
msg_witness_block,
msg_witness_tx,
ser_uint256,
ser_vector,
sha256,
uint256_from_str,
)
from test_framework.mininode import (
P2PInterface,
mininode_lock,
wait_until,
)
from test_framework.script import (
CScript,
CScriptNum,
CScriptOp,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_16,
OP_2DROP,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SIGHASH_ANYONECANPAY,
SIGHASH_NONE,
SIGHASH_SINGLE,
SegwitVersion1SignatureHash,
SignatureHash,
hash160,
)
from test_framework.test_framework import WescoinTestFramework
from test_framework.util import (
assert_equal,
bytes_to_hex_str,
connect_nodes,
disconnect_nodes,
get_bip9_status,
hex_str_to_bytes,
sync_blocks,
sync_mempools,
)
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
class UTXO():
"""Used to keep track of anyone-can-spend outputs that we can use in the tests."""
def __init__(self, sha256, n, value):
self.sha256 = sha256
self.n = n
self.nValue = value
def get_p2pkh_script(pubkeyhash):
"""Get the script associated with a P2PKH."""
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key):
"""Add signature for a P2PK witness program."""
tx_hash = SegwitVersion1SignatureHash(script, tx_to, in_idx, hashtype, value)
signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script]
tx_to.rehash()
def get_virtual_size(witness_block):
"""Calculate the virtual size of a witness block.
Virtual size is base + witness/4."""
base_size = len(witness_block.serialize(with_witness=False))
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3 * base_size + total_size + 3) / 4)
return vsize
def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None):
"""Send a transaction to the node and check that it's accepted to the mempool
- Submit the transaction over the p2p interface
- use the getrawmempool rpc to check for acceptance."""
tx_message = msg_tx(tx)
if with_witness:
tx_message = msg_witness_tx(tx)
p2p.send_message(tx_message)
p2p.sync_with_ping()
assert_equal(tx.hash in node.getrawmempool(), accepted)
if (reason is not None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(p2p.last_message["reject"].reason, reason)
def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=None):
"""Send a block to the node and check that it's accepted
- Submit the block over the p2p interface
- use the getbestblockhash rpc to check for acceptance."""
if with_witness:
p2p.send_message(msg_witness_block(block))
else:
p2p.send_message(msg_block(block))
p2p.sync_with_ping()
assert_equal(node.getbestblockhash() == block.hash, accepted)
if (reason is not None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(p2p.last_message["reject"].reason, reason)
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.getdataset = set()
def on_getdata(self, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
if success:
self.wait_for_getdata(timeout)
else:
time.sleep(timeout)
assert not self.last_message.get("getdata")
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
class SegWitTest(WescoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [["-whitelist=127.0.0.1", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
# Helper functions
def build_next_block(self, version=4):
"""Build a block on top of node0's tip."""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.version = version
block.rehash()
return block
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
"""Add list of transactions to block, adds witness commitment, then solves."""
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
def run_test(self):
# Setup the p2p connections
# self.test_node sets NODE_WITNESS|NODE_NETWORK
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
# self.old_node sets only NODE_NETWORK
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
# self.std_node is for testing node1 (fRequireStandard=true)
self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
assert self.test_node.nServices & NODE_WITNESS != 0
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Segwit status 'defined'
self.segwit_status = 'defined'
self.test_non_witness_transaction()
self.test_unnecessary_witness_before_segwit_activation()
self.test_v0_outputs_arent_spendable()
self.test_block_relay()
self.advance_to_segwit_started()
# Segwit status 'started'
self.test_getblocktemplate_before_lockin()
self.advance_to_segwit_lockin()
# Segwit status 'locked_in'
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay()
self.test_standardness_v0()
self.advance_to_segwit_active()
# Segwit status 'active'
self.test_p2sh_witness()
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay()
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0()
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness_blinding()
self.test_non_standard_witness()
self.test_upgrade_after_activation()
self.test_witness_sigops()
# Individual tests
def subtest(func): # noqa: N805
"""Wraps the subtests for logging and state assertions."""
def func_wrapper(self, *args, **kwargs):
self.log.info("Subtest: {} (Segwit status = {})".format(func.__name__, self.segwit_status))
# Assert segwit status is as expected
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], self.segwit_status)
func(self, *args, **kwargs)
# Each subtest should leave some utxos for the next subtest
assert self.utxo
sync_blocks(self.nodes)
# Assert segwit status is as expected at end of subtest
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], self.segwit_status)
return func_wrapper
@subtest
def test_non_witness_transaction(self):
"""See if sending a regular transaction works, and create a utxo to use in later tests."""
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
block = self.build_next_block(version=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49 * 100000000))
self.nodes[0].generate(1)
@subtest
def test_unnecessary_witness_before_segwit_activation(self):
"""Verify that blocks with witnesses are rejected before activation."""
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(version=(VB_TOP_BITS | (1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
wait_until(lambda: 'reject' in self.test_node.last_message and self.test_node.last_message["reject"].reason == b"unexpected-witness")
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
@subtest
def test_block_relay(self):
"""Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG.
This is true regardless of segwit activation.
Also test that we don't ask for blocks from unupgraded peers."""
blocktype = 2 | MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block1, True)
block2 = self.build_next_block(version=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block2, True)
block3 = self.build_next_block(version=(VB_TOP_BITS | (1 << 15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0], self.test_node, block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if self.segwit_status != 'active':
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height + 1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3 * len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(version=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [CBlockHeader(block4)]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
@subtest
def test_v0_outputs_arent_spendable(self):
"""Test that v0 outputs aren't spendable before segwit activation.
~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was
backdated so that it applies to all blocks, going back to the genesis
block.
Consequently, version 0 witness outputs are never spendable without
witness, and so can't be spent before segwit activation (the point at which
blocks are permitted to contain witnesses)."""
# node2 doesn't need to be connected for this test.
# (If it's connected, node0 may propogate an invalid block to it over
# compact blocks and the nodes would have inconsistent tips.)
disconnect_nodes(self.nodes[0], 2)
# Create two outputs, a p2wsh and p2sh-p2wsh
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(script_pubkey)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
value = self.utxo[0].nValue // 3
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b'')]
tx.vout = [CTxOut(value, script_pubkey), CTxOut(value, p2sh_script_pubkey)]
tx.vout.append(CTxOut(value, CScript([OP_TRUE])))
tx.rehash()
txid = tx.sha256
# Add it to a block
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# Verify that segwit isn't activated. A block serialized with witness
# should be rejected prior to activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason=b'unexpected-witness')
# Now send the block without witness. It should be accepted
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
# Now try to spend the outputs. This should fail since SCRIPT_VERIFY_WITNESS is always enabled.
p2wsh_tx = CTransaction()
p2wsh_tx.vin = [CTxIn(COutPoint(txid, 0), b'')]
p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2wsh_tx.rehash()
p2sh_p2wsh_tx = CTransaction()
p2sh_p2wsh_tx.vin = [CTxIn(COutPoint(txid, 1), CScript([script_pubkey]))]
p2sh_p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2sh_p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2sh_p2wsh_tx.rehash()
for tx in [p2wsh_tx, p2sh_p2wsh_tx]:
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# When the block is serialized with a witness, the block will be rejected because witness
# data isn't allowed in blocks that don't commit to witness data.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason=b'unexpected-witness')
# When the block is serialized without witness, validation fails because the transaction is
# invalid (transactions are always validated with SCRIPT_VERIFY_WITNESS so a segwit v0 transaction
# without a witness is invalid).
# Note: The reject reason for this failure could be
# 'block-validation-failed' (if script check threads > 1) or
# 'non-mandatory-script-verify-flag (Witness program was passed an
# empty witness)' (otherwise).
# TODO: support multiple acceptable reject reasons.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=False)
connect_nodes(self.nodes[0], 2)
self.utxo.pop(0)
self.utxo.append(UTXO(txid, 2, value))
@subtest
def advance_to_segwit_started(self):
"""Mine enough blocks for segwit's vb state to be 'started'."""
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD - height - 1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.segwit_status = 'started'
@subtest
def test_getblocktemplate_before_lockin(self):
# Node0 is segwit aware, node2 is not.
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate()
block_version = gbt_results['version']
# If we're not indicating segwit support, we will still be
# signalling for segwit activation.
assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0])
# If we don't specify the segwit rule, then we won't get a default
# commitment.
assert('default_witness_commitment' not in gbt_results)
# Workaround:
# Can either change the tip, or change the mempool and wait 5 seconds
# to trigger a recomputation of getblocktemplate.
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
# Using mocktime lets us avoid sleep()
sync_mempools(self.nodes)
self.nodes[0].setmocktime(int(time.time()) + 10)
self.nodes[2].setmocktime(int(time.time()) + 10)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules": ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should still not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, bytes_to_hex_str(script))
# undo mocktime
self.nodes[0].setmocktime(0)
self.nodes[2].setmocktime(0)
@subtest
def advance_to_segwit_lockin(self):
"""Mine enough blocks to lock in segwit, but don't activate."""
height = self.nodes[0].getblockcount()
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD - 1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.segwit_status = 'locked_in'
@subtest
def test_witness_tx_relay_before_segwit_activation(self):
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_message["getdata"].inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2, success=False)
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.old_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
@subtest
def test_standardness_v0(self):
"""Test V0 txout standardness.
V0 segwit outputs and inputs are always standard.
V0 segwit inputs may only be mined after activation, but not before."""
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue - 1000, p2sh_script_pubkey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 10000, script_pubkey)]
tx.vout.append(CTxOut(8000, script_pubkey)) # Might burn this later
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool
tx.rehash()
# This is always accepted, since the mempool policy is to consider segwit as always active
# and thus allow segwit outputs
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True)
# Now create something that looks like a P2PKH output. This won't be spendable.
script_pubkey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
# tx was accepted, so we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
# Now update self.utxo for later tests.
tx3 = CTransaction()
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
if self.segwit_status != 'active':
# Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed
# in blocks and the tx is impossible to mine right now.
assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
# Create the same output as tx3, but by replacing tx
tx3_out = tx3.vout[0]
tx3 = tx
tx3.vout = [tx3_out]
tx3.rehash()
assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
@subtest
def advance_to_segwit_active(self):
"""Mine enough blocks to activate segwit."""
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height % VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
self.segwit_status = 'active'
@subtest
def test_p2sh_witness(self):
"""Test P2SH wrapped witness programs."""
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
script_pubkey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# Verify mempool acceptance and block validity
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=True)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), script_sig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older wescoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the script_sig, should also fail.
spend_tx.vin[0].script_sig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = script_sig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_program]
# Verify mempool acceptance
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're after activation, then sending this with witnesses should be valid.
# This no longer works before activation, because SCRIPT_VERIFY_WITNESS
# is always set.
# TODO: rewrite this test to make clear that it only works after activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
@subtest
def test_witness_commitments(self):
"""Test witness commitments.
This test can only be run after segwit has activated."""
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=True)
# Now test commitments with actual transactions
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
test_witness_block(self.nodes[0], self.test_node, block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_block_malleability(self):
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness reserved value doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(1)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Changing the witness reserved value doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
@subtest
def test_witness_block_size(self):
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
script_pubkey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value / NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, script_pubkey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes + 1, 55)
block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2 * 1024 * 1024)
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (cur_length - 1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
@subtest
def test_submit_block(self):
"""Test that submitblock adds the nonce automatically when possible."""
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let wescoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
@subtest
def test_extra_witness_data(self):
"""Test extra witness data in a transaction."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 2000, script_pubkey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program]
tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_push_length(self):
"""Test that witness stack can only allow up to 520 byte pushes."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_program_length(self):
"""Test that witness outputs greater than 10kB can't be spent."""
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH + 1)
long_witness_hash = sha256(long_witness_program)
long_script_pubkey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, long_script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_witness_input_length(self):
"""Test that vin length must match vtxinwit length."""
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
value = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(value / 10), script_pubkey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(value - 3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_tx_relay_after_segwit_activation(self):
"""Test transaction relay after segwit activation.
After segwit activates, verify that mempool:
- rejects transactions with unnecessary/extra witnesses
- accepts transactions with valid witnesses
and that witness transactions are relayed to non-upgraded peers."""
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a' * 400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, b'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, b'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program]
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
weight = len(tx3.serialize_with_witness()) + 3 * len(tx3.serialize_without_witness())
vsize = math.ceil(weight / 4)
assert_equal(raw_tx["vsize"], vsize)
assert_equal(raw_tx["weight"], weight)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_segwit_versions(self):
"""Test validity of future segwit version transactions.
Future segwit version transactions are non-standard, but valid in blocks.
Can run this before and after segwit activation."""
NUM_SEGWIT_VERSIONS = 17 # will test OP_0, OP1, ..., OP_16
if len(self.utxo) < NUM_SEGWIT_VERSIONS:
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS
for i in range(NUM_SEGWIT_VERSIONS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_SEGWIT_VERSIONS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16 + 1)) + [OP_0]:
# First try to spend to a future version segwit script_pubkey.
script_pubkey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue - 1000, script_pubkey)]
tx.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
script_pubkey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue - 1000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
self.test_node.sync_with_ping()
with mininode_lock:
assert(b"reserved for soft-fork upgrades" in self.test_node.last_message["reject"].reason)
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_premature_coinbase_witness_spend(self):
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = script_pubkey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=True)
sync_blocks(self.nodes)
@subtest
def test_uncompressed_pubkey(self):
"""Test uncompressed pubkey validity in segwit transactions.
Uncompressed pubkeys are no longer supported in default relay policy,
but (for now) are still valid in blocks."""
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = CECKey()
key.set_secretbytes(b"9")
key.set_compressed(False)
pubkey = CPubKey(key.get_pubkey())
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue - 1000, script_pkh))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_wsh = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_wsh))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.rehash()
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(script_wsh)
script_p2sh = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([script_wsh])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_p2sh))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
script_pubkey = get_p2pkh_script(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue - 1000, script_pubkey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue - 1000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(script_pubkey, tx5, 0, SIGHASH_ALL)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
test_transaction_acceptance(self.nodes[0], self.test_node, tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
@subtest
def test_signature_version_1(self):
key = CECKey()
key.set_secretbytes(b"9")
pubkey = CPubKey(key.get_pubkey())
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [0, SIGHASH_ANYONECANPAY]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key)
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Too-small input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try correct value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_SIGHASH_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_SIGHASH_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS
for i in range(NUM_SIGHASH_TESTS):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_SIGHASH_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_SIGHASH_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_p2pk_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests by creating two outputs
# that consolidate all the coins in temp_utxos.
output_value = sum(i.nValue for i in temp_utxos) // 2
tx = CTransaction()
index = 0
# Just spend to our usual anyone-can-spend output
tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2
for i in temp_utxos:
# Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
@subtest
def test_non_standard_witness_blinding(self):
"""Test behavior of unnecessary witnesses in transactions does not blind the node for the transaction"""
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness
# doesn't blind a node to a transaction. Transactions
# rejected for having a witness shouldn't be added
# to the rejection cache.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program])))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400]
tx2.rehash()
# This will be rejected due to a policy check:
# No witness is allowed, since it is not a witness program but a p2sh program
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, True, False, b'bad-witness-nonstandard')
# If we send without witness, it should be accepted.
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, False, True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_non_standard_witness(self):
"""Test detection of non-standard P2WSH witness"""
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[0], True, False, b'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, False, b'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[3], True, False, b'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
@subtest
def test_upgrade_after_activation(self):
"""Test the behavior of starting up a segwit-aware node after the softfork has activated."""
# Restart with the new binary
self.stop_node(2)
self.start_node(2, extra_args=["-vbparams=segwit:0:999999999999"])
connect_nodes(self.nodes[0], 2)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(self.nodes[2], 'segwit')['status'] == "active")
# Make sure this peer's blocks match those of node0.
height = self.nodes[2].getblockcount()
while height >= 0:
block_hash = self.nodes[2].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), self.nodes[2].getblock(block_hash))
height -= 1
@subtest
def test_witness_sigops(self):
"""Test sigop counting is correct inside witnesses."""
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
sigops_per_script = 20 * 5 + 193 * 1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
script_pubkey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
script_pubkey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.vout[-2].scriptPubKey = script_pubkey_toomany
tx.vout[-1].scriptPubKey = script_pubkey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
test_witness_block(self.nodes[0], self.test_node, block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs - 1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_toomany]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
script_pubkey_checksigs = CScript([OP_CHECKSIG] * checksig_count)
tx2.vout.append(CTxOut(0, script_pubkey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG] * (checksig_count - 1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_justright]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True)
# TODO: test p2sh sigop counting
if __name__ == '__main__':
SegWitTest().main()
|
[
"cncc.dev@gmail.com"
] |
cncc.dev@gmail.com
|
a25af1fae40ebeacc3692e36155f2a30c316c55f
|
d4e986cae639e4824ea7256fa727f4e8f183c516
|
/pys/66.py
|
7c7bbf638f50eb0af22813ccc5384703c4898aef
|
[] |
no_license
|
yinzhuoqun/yzq
|
bc28410f5a9aee2193fb95679b93ff8133ac0f6d
|
972b4b7ec23a6440910ed63f8734c96e01dcc780
|
refs/heads/master
| 2022-12-20T16:24:28.177467
| 2019-11-12T03:34:38
| 2019-11-12T03:34:38
| 36,655,612
| 0
| 0
| null | 2022-12-08T00:00:20
| 2015-06-01T11:09:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'yinzhuoqun'
list = [ x for x in range(1,10)]
# print(list)
a=b=c=d=e=f=g=h=i=1
list1=[]
for x in list:
a = x
for x in list:
b=x
for x in list:
c=x
for x in list:
d=x
for x in list:
e=x
for x in list:
f=x
for x in list:
g=x
for x in list:
h=x
for x in list:
i=x
if a+13*b/c+d+12*e-f-11+g*h/i-10 == 66:
# if a+13*b/c+d+12*e-f+g*h/i-21 == 66:
# print(a,b,c,d,e,f,g,h,i)
dd = '%s-%s-%s-%s-%s-%s-%s-%s-%s'%(a,b,c,d,e,f,g,h,i)
list1.append(dd)
print(len(list1))
|
[
"zhuoqun527@qq.com"
] |
zhuoqun527@qq.com
|
b9a04280a76f7384059d8027d067c702f2d429fe
|
11d5cc15a4f03c51f7875066b1f27eded6fab0e9
|
/list/account.py
|
e1a5325ed5714a56817dc1aedc974fee699e37b5
|
[] |
no_license
|
pororobro/phython-oop2
|
512ef39a9831f5b77172cc7c901d33641b7baeb4
|
e5e089527782eee08053257d7eaa983a6ef74ea0
|
refs/heads/master
| 2023-07-13T04:30:12.674657
| 2021-08-15T12:40:36
| 2021-08-15T12:40:36
| 368,100,036
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,659
|
py
|
import random
class Account(object):
def __init__(self, holder,account_number, balance):
self.bank = 'SC제일은행'
self.holder = holder
self.balance = balance
self.account_number = account_number
def get_account(self):
return f'은행이름:{self.bank} 예금주:{self.holder} 계좌번호:{self.account_number} 잔액:{self.balance}'
@staticmethod
def random_number():
first = random.randint(0, 999)
second = random.randint(0, 99)
third = random.randint(0, 999999)
first = str(first).zfill(3)
second = str(second).zfill(2)
third = str(third).zfill(6)
return first + '-' + second + '-' + third
@staticmethod
def main():
account_howmany = 0
ls = []
while 1:
menu = input('0.취소 1.계좌생성 2.확인 3.제거')
if menu == '0':
break
elif menu == '1':
ls.append(Account(input('예금주를 입력해주세요'),Account.random_number(),int(input('초기잔액을 입력해주세요'))))
account_howmany+=1
print(f'생성 계좌 수: {account_howmany}')
elif menu == '2':
for i in ls:
print(i.get_account())
elif menu == '3':
del_number = input('제거 할 계좌번호: ')
for i, j in enumerate(ls):
if j.account_number == del_number:
del ls[i]
else:
print('잘못된 입력입니다')
continue
Account.main()
|
[
"pororobro@naver.com"
] |
pororobro@naver.com
|
adb391fa107985065a1f01441d28fda4de68f385
|
38f507b9b9e492bcdde1680b4eedb154ccf3476f
|
/anosql/__init__.py
|
b3f509063dfcd8066945987d0cb31ca7d2af644f
|
[
"BSD-2-Clause-Views",
"BSD-3-Clause"
] |
permissive
|
rodo/anosql
|
a5c192f09042297ec9538f4895794ca1b936090c
|
ba2acc01eff36f0a96f784698917604a3f5b2ff7
|
refs/heads/master
| 2021-01-15T22:38:41.070581
| 2014-08-31T20:17:08
| 2014-08-31T20:17:08
| 42,585,031
| 1
| 0
| null | 2015-09-16T12:13:21
| 2015-09-16T12:13:20
| null |
UTF-8
|
Python
| false
| false
| 30
|
py
|
from core import load_queries
|
[
"me@honza.ca"
] |
me@honza.ca
|
4ef41b22b07660f2ff7f21dbf1f9daf9ee65a518
|
e88cc7e7379c99efad64fbaa5038aede2d3dcdf7
|
/ESCEQ/Equinos/views.py
|
8e7532642e0fcd905bef326911be3379fe954a9d
|
[] |
no_license
|
yricardo79/ESCEQ
|
4fffa8c8a359601bf9905d2477962511f5061206
|
b3a5a5338e69c6ebf5fc585659def65ae8ac5154
|
refs/heads/master
| 2023-04-29T08:40:21.434166
| 2021-05-23T20:14:11
| 2021-05-23T20:14:11
| 361,932,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,010
|
py
|
# from django.shortcuts import render_to_response, RequestContext
from django.urls import reverse_lazy
from django.views.generic import CreateView, ListView, UpdateView, DeleteView
from ESCEQ.Equinos.forms import RazaForm, ComportamientoForm, DisciplinaDeportivaForm, EquDiscForm, EquinoForm
from registro.models import Raza, Comportamiento, Disciplina_Deportiva, Equ_Disc, Equino
# @login_required
# def upload_image_view(request):
# if request.method == 'POST':
# form = UploadImageForm(request.POST, request.FILES)
# if form.is_valid():
# form.save()
# message = "Image uploaded succesfully!"
# else:
# form = UploadImageForm()
#
# return render_to_response('crear-foto-equ.html', locals(), context_instance=RequestContext(request))
#
#
# def home_view(request):
# return render_to_response('index.html')
# Tabla Raza
class CrearRaza(CreateView):
model = Raza
template_name = 'crear-raza.html'
form_class = RazaForm
success_url = reverse_lazy('listar-razas')
class ListarRazas(ListView):
model = Raza
template_name = 'listar-razas.html'
context_object_name = 'razas'
queryset = Raza.objects.all()
class ActualizarRaza(UpdateView):
model = Raza
template_name = 'crear-raza.html'
form_class = RazaForm
success_url = reverse_lazy('listar-razas')
# Para ver la actualización en otra parte usar el metodo def post()
class EliminarRaza(DeleteView):
model = Raza
template_name = 'raza_confirm_delete.html'
success_url = reverse_lazy('listar-razas')
# Tabla Comportamiento
class CrearComportamiento(CreateView):
model = Comportamiento
template_name = 'crear-comportamiento.html'
form_class = ComportamientoForm
success_url = reverse_lazy('listar-comportamientos')
class ListarComportamientos(ListView):
model = Comportamiento
template_name = 'listar-comportamientos.html'
context_object_name = 'comportamientos'
queryset = Comportamiento.objects.all()
class ActualizarComportamiento(UpdateView):
model = Comportamiento
template_name = 'crear-comportamiento.html' #
form_class = ComportamientoForm
success_url = reverse_lazy('listar-comportamientos')
# Para ver la actualización en otra parte usar el metodo def post()
class EliminarComportamiento(DeleteView):
model = Comportamiento
template_name = 'comportamiento_confirm_delete.html'
success_url = reverse_lazy('listar-comportamientos')
# Tabla Disciplina_Deportiva
class CrearDisDep(CreateView):
model = Disciplina_Deportiva
template_name = 'crear-dis-dep.html'
form_class = DisciplinaDeportivaForm
success_url = reverse_lazy('listar-dis-deps')
class ListarDisDeps(ListView):
model = Disciplina_Deportiva
template_name = 'listar-dis-deps.html'
context_object_name = 'disdeps'
queryset = Disciplina_Deportiva.objects.all()
class ActualizarDisDep(UpdateView):
model = Disciplina_Deportiva
template_name = 'crear-dis-dep.html'
form_class = DisciplinaDeportivaForm
success_url = reverse_lazy('listar-dis-deps')
# Para ver la actualización en otra parte usar el metodo def post()
class EliminarDisDep(DeleteView):
model = Disciplina_Deportiva
template_name = 'dis-dep_confirm_delete.html'
success_url = reverse_lazy('listar-dis-deps')
# Tabla Equ_Disc
class CrearEquDisc(CreateView):
model = Equ_Disc
template_name = 'crear-equ-dis.html'
form_class = EquDiscForm
success_url = reverse_lazy('listar-equ-dis')
class ListarEquDiscs(ListView):
model = Equ_Disc
template_name = 'listar-equ-dis.html'
context_object_name = 'equdiscs'
queryset = Equ_Disc.objects.all()
class ActualizarEquDisc(UpdateView):
model = Equ_Disc
template_name = 'crear-equ-dis.html'
form_class = EquDiscForm
success_url = reverse_lazy('listar-equ-dis')
# Para ver la actualización en otra parte usar el metodo def post()
class EliminarEquDisc(DeleteView):
model = Equ_Disc
template_name = 'equ-dis_confirm_delete.html'
success_url = reverse_lazy('listar-equ-dis') ############
# Tabla Equino
class CrearEquino(CreateView):
model = Equino
template_name = 'crear-equino.html' # ''
form_class = EquinoForm
success_url = reverse_lazy('listar-equinos')
class ListarEquinos(ListView):
model = Equino
template_name = 'listar-equinos.html'
context_object_name = 'equinos'
queryset = Equino.objects.all()
class ActualizarEquino(UpdateView):
model = Equino
template_name = 'crear-equino.html'
form_class = EquinoForm
success_url = reverse_lazy('listar-equinos')
# Para ver la actualización en otra parte usar el metodo def post()
class EliminarEquino(DeleteView):
model = Equino
template_name = 'equino_confirm_delete.html'
success_url = reverse_lazy('listar-equinos')
# # Tabla Foto_Equino
# class CrearFotoEquino(CreateView):
# model = Foto_Equino
# form_class = FotoEquinoForm
# template_name = 'crear-foto-equ.html'
# success_url = reverse_lazy('listar-fotos-equinos')
#
#
# class ListarFotoEquinos(ListView):
# model = Foto_Equino
# template_name = 'listar-fotos-equinos.html'
# context_object_name = 'fotosequinos'
# queryset = Foto_Equino.objects.all()
#
#
# class ActualizarFotoEquino(UpdateView):
# model = Foto_Equino
# form_class = FotoEquinoForm
# template_name = 'crear-foto-equ.html'
# success_url = reverse_lazy('listar-fotos-equinos')
# # Para ver la actualización en otra parte usar el metodo def post()
#
#
# class EliminarFotoEquino(DeleteView):
# model = Foto_Equino
# template_name = 'foto-equino_confirm_delete.html'
# success_url = reverse_lazy('listar-fotos-equinos')
# NOOOO
# Tabla Foto_Equino
# class CrearRaza(CreateView):
# model = Foto_Equino
# template_name = 'crear-raza.html'
# form_class = FotoEquinoForm
# success_url = reverse_lazy('listar-razas')
#
#
# class ListarRazas(ListView):
# model = Raza
# template_name = 'listar-razas.html'
# context_object_name = 'razas'
# queryset = Raza.objects.all()
#
#
# class ActualizarRaza(UpdateView):
# model = Raza
# template_name = 'crear-raza.html'
# form_class = RazaForm
# success_url = reverse_lazy('listar-razas')
# # Para ver la actualización en otra parte usar el metodo def post()
#
#
# class EliminarRaza(DeleteView):
# model = Raza
# template_name = 'raza_confirm_delete.html'
# success_url = reverse_lazy('listar-razas')
# def bus_ser_equ(request):
# # ctrl info form
# if request.GET["IDENRAZA"]:
# # mensaje = "Caballo búscado. %r" %request.GET["txt_caballo"]
# texto_caballo = request.GET["IDENRAZA"]
# if len(texto_caballo) > 20:
# mensaje = "Busqueda deamasiado larga"
# else:
# # texto_caballo: object = request.GET["txt_caballo"]
# equinos = Equino.objects.filter(nom_equino__icontains=texto_caballo)
# return render(request, "prueba.html", {"equinos": equinos, "query": equinos})
# else:
# mensaje = "No se ha realizado ninguna búsqueda!"
# return HttpResponse(mensaje)
# Tabla Disciplina Deportiva
# class CrearDisDep(CreateView):
# model = Disciplina_Deportiva
# template_name = 'crear-dis-dep.html'
# form_class = DisDepForm
# success_url = reverse_lazy('listar-dis-deps')
#
#
# class ListarDisDeps(ListView):
# model = Disciplina_Deportiva
# template_name = 'listar-dis-deps.html'
# context_object_name = 'disDeps'
# queryset = Disciplina_Deportiva.objects.all()
#
#
# class ActualizarDisDep(UpdateView):
# model = Disciplina_Deportiva
# template_name = 'crear-dis-dep.html'
# form_class = DisDepForm
# success_url = reverse_lazy('listar-dis-deps')
# # Para ver la actualización en otra parte usar el metodo def post()
# class EliminarDisDep(DeleteView):
# model = Disciplina_Deportiva
# template_name = 'dis-dep_confirm_delete.html'
# success_url = reverse_lazy('listar-dis-deps')
#
#
# # Tabla Comp Nutricional
# class CrearComNut(CreateView):
# model = Comps_Nutricionales
# template_name = 'crear-com-nut.html'
# form_class = ComNutForm
# success_url = reverse_lazy('listar-com-nuts')
#
#
# class ListarComNuts(ListView):
# model = Comps_Nutricionales
# template_name = 'listar-com-nuts.html'
# context_object_name = 'comNuts'
# queryset = Comps_Nutricionales.objects.all()
#
#
# class ActualizarComNut(UpdateView):
# model = Comps_Nutricionales
# template_name = 'crear-com-nut.html'
# form_class = ComNutForm
# success_url = reverse_lazy('listar-com-nuts')
# # Para ver la actualización en otra parte usar el metodo def post()
#
#
# class EliminarComNut(DeleteView):
# model = Comps_Nutricionales
# template_name = 'com-nut_confirm_delete.html'
# success_url = reverse_lazy('listar-com-nuts')
|
[
"you@example.com"
] |
you@example.com
|
e0eee403d172e9aaa4cc79ecd8ddb1a7fcecbbb7
|
43a0980f95d071bd119abdc972b15304fc252c18
|
/crypto.py
|
b8a9b8099eeee3319e522b2c694a3edbfd68c8b6
|
[] |
no_license
|
Agnimandur/Red-Crab-Inn-Bot
|
e7406f12fd108703d5bc14b154ae3b9be4d27a1b
|
a0401a7b178d7c44c86fc877724f2e91d4dc21f0
|
refs/heads/main
| 2023-07-16T15:08:05.893963
| 2021-09-04T18:45:41
| 2021-09-04T18:45:41
| 323,823,312
| 8
| 1
| null | 2020-12-31T04:11:22
| 2020-12-23T06:36:47
|
Python
|
UTF-8
|
Python
| false
| false
| 9,661
|
py
|
import discord
import math
import time
from datetime import timedelta
from replit import db
from conversion import get_conversion
from conversion import networth
from leaderboard import leaderboard
from leaderboard import leaderboardEmbed
from help import crypto_help
from help import make_embed
AGNIMANDUR = 482581806143766529
async def transaction(params,key,kind):
ret = {'btc':0,'eth':0,'success':True,'h':24}
r = await get_conversion()
for p in params:
if p.startswith('btc='):
try:
if p=='btc=all':
ret['btc'] = db[key][0]/r[0] if kind=='buy' else db[key][1]
elif p.startswith('btc=$'):
ret['btc'] = float(p[5:])/r[0]
else:
ret['btc'] = float(p[4:])
except:
ret['success']=False
elif p.startswith('eth='):
try:
if p=='eth=all':
ret['eth'] = db[key][0]/r[1] if kind=='buy' else db[key][2]
elif p.startswith('eth=$'):
ret['eth'] = float(p[5:])/r[1]
else:
ret['eth'] = float(p[4:])
except:
ret['success']=False
elif p.startswith('h=') and kind=='short':
try:
ret['h'] = float(p[2:])
except:
success = False
else:
ret['success']=False
for k in ret.keys():
if math.isnan(ret[k]) or ret[k] < 0:
ret['success'] = False
return ret
async def crypto(message):
server = str(message.guild.id)
key = "CRYPTO " + server + " " + str(message.author.id) #db[key] = (dollars,bitcoins,ethereums)
text = message.content.lower()
response = ""
if text == 'join':
if key not in db.keys():
db[key] = (1000000.0,0,0)
response = "Hi {user}, welcome to the Red Crab Inn's cryptocurrency simulator, where you can trade Bitcoin and Ethereum! You start with $1,000,000.".format(user=message.author.mention)
else:
response = "Hi {user}, you are already in the simulator!".format(user=message.author.mention)
return response
elif text.startswith('help'):
embed = crypto_help(text)
await message.channel.send(embed=embed)
return 200
if key not in db.keys():
return ""
if text=='exchange rate':
r = await get_conversion()
response = "The current Bitcoin exchange rate is ${btc}. The current Ethereum exchange rate is ${eth}.".format(btc=round(r[0]),eth=round(r[1]))
elif text.startswith('buy '):
params = text[4:].split(' ')
r = await get_conversion()
ret = await transaction(params,key,'buy')
if not ret['success']:
return "Invalid use of the `buy` command!"
cost = ret['btc']*r[0]+ret['eth']*r[1]
if cost > db[key][0]:
response = "Hi {user}, you do not have enough money to buy that much cryptocurrency!".format(user=message.author.mention)
else:
db[key] = (db[key][0]-cost,db[key][1]+ret['btc'],db[key][2]+ret['eth'])
response = "Hi {user}, your transaction was successful! You now have ${cash} and ฿{btc} and Ξ{eth}.".format(user=message.author.mention,cash=round(db[key][0]),btc=db[key][1],eth=db[key][2])
elif text.startswith('sell '):
params = text[5:].split(' ')
r = await get_conversion()
ret = await transaction(params,key,'sell')
if not ret['success']:
return "Invalid use of the `sell` command!"
profit = ret['btc']*r[0]+ret['eth']*r[1]
if ret['btc'] > db[key][1] or ret['eth'] > db[key][2]:
response = "Hi {user}, you do not have enough cryptocurrency to make that sale!".format(user=message.author.mention)
else:
db[key] = (db[key][0]+profit,db[key][1]-ret['btc'],db[key][2]-ret['eth'])
response = "Hi {user}, your transaction was successful! You now have ${cash} and ฿{btc} and Ξ{eth}.".format(user=message.author.mention,cash=round(db[key][0]),btc=db[key][1],eth=db[key][2])
elif text.startswith('short'):
params = text[6:].split(' ')
success = True
r = await get_conversion()
ret = await transaction(params,key,'short')
if not ret['success'] or (ret['eth'] > 0 and ret['btc'] > 0):
return "Invalid use of the `short` command!"
contracts = "CONTRACT "+key
if ret['btc']*r[0] > db[key][0] or ret['eth']*r[1] > db[key][0]:
response = "Hi {user}, you do not have enough dollars for this contract!".format(user=message.author.mention)
elif ret['h'] < 0.1 or ret['h'] > 10000:
response = "Hi {user}, the duration of the contract must be between 0.1 and 10000 hours.".format(user=message.author.mention)
elif contracts in db.keys() and len(db[contracts])==5:
response = "Hi {user}, you can only have a maximum of 5 ongoing contracts at a time! Use the `contracts` command to view them.".format(user=message.author.mention)
else:
#place the contract ('short',currency-name ('btc'/'eth'),amt,val at open, end_time)
endTime = round(time.time())+3600*ret['h']
if ret['btc'] > 0:
contract = ('short','btc',ret['btc'],r[0],endTime)
db[key] = (db[key][0]-ret['btc']*r[0],db[key][1],db[key][2])
else:
contract = ('short','eth',ret['eth'],r[1],endTime)
db[key] = (db[key][0]-ret['eth']*r[1],db[key][1],db[key][2])
if contracts not in db.keys():
db[contracts] = []
temp = db[contracts]
temp.append(contract)
db[contracts] = temp
response = "Hi {user}, your CFD contract for {symbol}{amt} lasting {h} hours was successfully placed! Check back on its status with the `contracts` command!".format(user=message.author.mention,symbol='฿' if contract[1]=='btc' else 'Ξ',amt=contract[2],h=ret['h'])
elif text=='contracts':
contracts = "CONTRACT "+key
if contracts not in db.keys() or len(db[contracts])==0:
response = "You have no active contracts!"
else:
r = await get_conversion()
temp = db[contracts]
temp2 = []
currentTime = round(time.time())
cash = db[key][0]
amts = ""
ends = ""
profits = ""
for contract in temp:
if contract[1]=='btc':
profit = (contract[3]-r[0])*contract[2]
else:
profit = (contract[3]-r[1])*contract[2]
if contract[4] <= currentTime:
end = "none"
cash += (contract[2]*contract[3]+profit)
else:
end = str(timedelta(seconds=contract[4]-currentTime))
temp2.append(contract)
amts += "`{symbol}{amt}`\n".format(symbol='฿' if contract[1]=='btc' else 'Ξ',amt=contract[2])
ends += "`{end}`\n".format(end=end)
profits += "`${profit}`\n".format(profit=round(profit))
db[key] = (cash,db[key][1],db[key][2])
db[contracts] = temp2
embed = make_embed(title="**{user}'s Contracts**".format(user=message.author.name),description="A list of your ongoing contracts.").add_field(name="**Amount**",value=amts,inline=True).add_field(name="**Time Until End**",value=ends[:-1],inline=True).add_field(name="**Projected Profit**",value=profits[:-1],inline=True)
await message.channel.send(embed=embed)
response = 200
elif text=='leaderboard':
admin = False
for role in message.author.roles:
if role.name=='reaper-admin':
admin = True
break
if admin:
embed = await leaderboardEmbed(message.guild,"CRYPTO " + server,'crypto')
try:
await message.channel.send(embed=embed)
except:
await message.channel.send("The leaderboard is empty!")
wait = "WAIT "+str(message.author.id)
db[wait] = round(time.time())
response = 200
else:
response = "This command is only available to reaper admins."
elif text=='net worth':
embed = make_embed(title="**{user}'s Net Worth**".format(user=message.author.name),description="A list of your liquid assets. Use `contracts` to view your current contracts.").add_field(name="**US Dollars**",value='$'+str(round(db[key][0])),inline=True).add_field(name="**Bitcoin**",value='฿'+str(db[key][1]),inline=True).add_field(name="**Ethereum**",value='Ξ'+str(db[key][2]),inline=True)
await message.channel.send(embed=embed)
response = 200
elif text=='rank':
#board = await leaderboard("CRYPTO "+server,'crypto')
#rankList = [x[1] for x in board]
#rank = rankList.index(int(message.author.id))+1
#wait = "WAIT "+str(message.author.id)
#db[wait] = round(time.time())
#response = "Hi {user}, you have ${cash} and ฿{btc} and Ξ{eth}. Your current net worth is ${net}. Your rank in the simulation is {r} out of {t} investors.".format(user=message.author.mention,cash=round(db[key][0]),btc=db[key][1],eth=db[key][2],net=round(nw),r=rank,t=len(rankList))
nw = await networth(key)
response = "Hi {user}, you have ${cash} and ฿{btc} and Ξ{eth}. Your current net worth is ${net}.".format(user=message.author.mention,cash=round(db[key][0]),btc=db[key][1],eth=db[key][2],net=round(nw))
elif text.startswith('rank=') and len(text)>8:
try:
search = message.content[5:]
#all members whose names start with "search"
members = await message.guild.query_members(search,limit=5)
if len(members)>0:
for member in members:
hisInfo = "CRYPTO " + server + " " + str(member.id)
hisName = member.name if member.nick==None else member.nick
#check if they're in the game or not
try:
nw = await networth(hisInfo)
response += f"{hisName}'s current net worth is ${round(nw)}.\n"
except:
response += hisName + " has not entered the simulation yet.\n"
else:
response = search+" is not in this server."
except:
response = "Invalid use of the rank command. Type in help for documentation."
return response
|
[
"noreply@github.com"
] |
Agnimandur.noreply@github.com
|
9a6da703715dae0bf305cbf2fe8f307eb4975295
|
6640a76eb4c224da136da61eebb7f9e71cda21cd
|
/examples/basic/user/user_pb2.py
|
31bc84d87a752dee7438701c146df8293ecd9d43
|
[] |
no_license
|
kevindrosendahl/ProtoDB
|
47251c4657b940a0adc5476592103c2f39b0c038
|
40e8ad8f61f367e4c863d614b34150b7861c36e8
|
refs/heads/master
| 2020-04-15T12:43:10.802403
| 2018-12-18T01:02:56
| 2018-12-18T01:02:56
| 67,671,184
| 6
| 0
| null | 2018-10-12T05:39:31
| 2016-09-08T05:25:03
|
Rust
|
UTF-8
|
Python
| false
| true
| 3,532
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: user.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='user.proto',
package='protodb.examples.user',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\nuser.proto\x12\x15protodb.examples.user\"]\n\x04User\x12\n\n\x02id\x18\x01 \x01(\x04\x12\x12\n\nfirst_name\x18\x02 \x01(\t\x12\x11\n\tlast_name\x18\x03 \x01(\t\x12\x0b\n\x03\x61ge\x18\x04 \x01(\r\x12\x15\n\remail_address\x18\x05 \x01(\tb\x06proto3')
)
_USER = _descriptor.Descriptor(
name='User',
full_name='protodb.examples.user.User',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='protodb.examples.user.User.id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='first_name', full_name='protodb.examples.user.User.first_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='last_name', full_name='protodb.examples.user.User.last_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='age', full_name='protodb.examples.user.User.age', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='email_address', full_name='protodb.examples.user.User.email_address', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=37,
serialized_end=130,
)
DESCRIPTOR.message_types_by_name['User'] = _USER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
User = _reflection.GeneratedProtocolMessageType('User', (_message.Message,), dict(
DESCRIPTOR = _USER,
__module__ = 'user_pb2'
# @@protoc_insertion_point(class_scope:protodb.examples.user.User)
))
_sym_db.RegisterMessage(User)
# @@protoc_insertion_point(module_scope)
|
[
"kevindrosendahl@gmail.com"
] |
kevindrosendahl@gmail.com
|
01bb1e2e9d8361c3a698196c14c88c08326b0c3a
|
d2d2b8c217a4cb728c1ce04d545522ee5f3bff19
|
/venv/Source/Homework1/Homework1_2.py
|
c906649f979edcf9199bcdb15bf40a33e95e0229
|
[] |
no_license
|
AlspachS/MTH451-Numerical-Methods
|
ebbddc557fbce011c0faaa984757a4d124f15381
|
c7fba58bc70a02d04d1bc4950aa85dea35f4855b
|
refs/heads/master
| 2020-04-06T20:08:54.815992
| 2018-11-15T19:36:51
| 2018-11-15T19:36:51
| 157,762,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
# Problem 1.2:
# Use big O notation to describe the behavior of the function:
# f(θ) = 1 − cos(θ)
# as θ becomes small. Make a table supporting your claim.
|
[
"steven.alspach@outlook.com"
] |
steven.alspach@outlook.com
|
dcbc8ff87031215c546183ab94b57b73caba4972
|
8e4ad77c242ad897eda01ba665c33cce83aeeadf
|
/Solutions/1686.石子游戏-vi.py
|
025c16398a936e0f3d6145f6d5e10f041b0f205e
|
[] |
no_license
|
targeton/LeetCode-cn
|
b7f4b833a7c79ce18b103b1c06ce53707a1b8c7b
|
d57c5ff23199c560ce3a0908e25d8aa76bbf2f49
|
refs/heads/master
| 2021-12-13T22:47:48.071321
| 2021-12-08T07:35:41
| 2021-12-08T07:35:41
| 195,835,667
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,558
|
py
|
#
# @lc app=leetcode.cn id=1686 lang=python3
#
# [1686] 石子游戏 VI
#
# https://leetcode-cn.com/problems/stone-game-vi/description/
#
# algorithms
# Medium (46.30%)
# Likes: 25
# Dislikes: 0
# Total Accepted: 2.2K
# Total Submissions: 4.7K
# Testcase Example: '[1,3]\n[2,1]'
#
# Alice 和 Bob 轮流玩一个游戏,Alice 先手。
#
# 一堆石子里总共有 n 个石子,轮到某个玩家时,他可以 移出 一个石子并得到这个石子的价值。Alice 和 Bob 对石子价值有 不一样的的评判标准
# 。双方都知道对方的评判标准。
#
# 给你两个长度为 n 的整数数组 aliceValues 和 bobValues 。aliceValues[i] 和 bobValues[i] 分别表示
# Alice 和 Bob 认为第 i 个石子的价值。
#
# 所有石子都被取完后,得分较高的人为胜者。如果两个玩家得分相同,那么为平局。两位玩家都会采用 最优策略 进行游戏。
#
# 请你推断游戏的结果,用如下的方式表示:
#
#
# 如果 Alice 赢,返回 1 。
# 如果 Bob 赢,返回 -1 。
# 如果游戏平局,返回 0 。
#
#
#
#
# 示例 1:
#
#
# 输入:aliceValues = [1,3], bobValues = [2,1]
# 输出:1
# 解释:
# 如果 Alice 拿石子 1 (下标从 0开始),那么 Alice 可以得到 3 分。
# Bob 只能选择石子 0 ,得到 2 分。
# Alice 获胜。
#
#
# 示例 2:
#
#
# 输入:aliceValues = [1,2], bobValues = [3,1]
# 输出:0
# 解释:
# Alice 拿石子 0 , Bob 拿石子 1 ,他们得分都为 1 分。
# 打平。
#
#
# 示例 3:
#
#
# 输入:aliceValues = [2,4,3], bobValues = [1,6,7]
# 输出:-1
# 解释:
# 不管 Alice 怎么操作,Bob 都可以得到比 Alice 更高的得分。
# 比方说,Alice 拿石子 1 ,Bob 拿石子 2 , Alice 拿石子 0 ,Alice 会得到 6 分而 Bob 得分为 7 分。
# Bob 会获胜。
#
#
#
#
# 提示:
#
#
# n == aliceValues.length == bobValues.length
# 1 <= n <= 105
# 1 <= aliceValues[i], bobValues[i] <= 100
#
#
#
# @lc code=start
class Solution:
def stoneGameVI(self, aliceValues: List[int], bobValues: List[int]) -> int:
vals, N = [], len(aliceValues)
for i in range(N):
vals.append((aliceValues[i] + bobValues[i], i))
vals.sort(reverse=True)
sum1, sum2 = 0, 0
for i in range(N):
if i%2:
sum2 += bobValues[vals[i][1]]
else:
sum1 += aliceValues[vals[i][1]]
return 1 if sum1 > sum2 else -1 if sum1 < sum2 else 0
# @lc code=end
|
[
"pzhou@mail.ie.ac.cn"
] |
pzhou@mail.ie.ac.cn
|
0b6d48549906aa14f18cbfda4b5d13d3d04db4f9
|
4ff611caf56aa799ee8aa164a625bb71220ea67c
|
/forumdb.py
|
ec9659fad960fc91b993ca0cd77b14976c4c039c
|
[] |
no_license
|
princekurt/Forum_Server
|
e844cd11bd89e41db22b85d35102077e27e9916b
|
e3eb5d980035ae3292f1bf00055e20703796edf8
|
refs/heads/master
| 2021-01-21T10:38:30.302456
| 2017-05-18T14:57:18
| 2017-05-18T14:57:18
| 91,701,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
# "Database code" for the DB Forum.
import datetime
import psycopg2
import bleach
DBNAME = "forum"
def get_posts():
"""Return all posts from the 'database', most recent first."""
db = psycopg2.connect(database=DBNAME)
c = db.cursor()
c.execute("select content, time from posts order by time desc")
return c.fetchall()
db.close
def add_post(content):
"""Add a post to the 'database' with the current timestamp."""
db = psycopg2.connect(database=DBNAME)
c = db.cursor()
output = bleach.clean(content)
c.execute("insert into posts values (%s)", (output,))
db.commit()
db.close()
|
[
"Kurtanderson234@gmail.com"
] |
Kurtanderson234@gmail.com
|
d04bfd06874a40c5e9dafedc0dd0cd109be0ef18
|
abc6265d1201b6769de5ddffcabe0ba86b25aa1a
|
/dictonaries.py
|
82911d1416c2a144426456dfb4ba0ce38179e4ad
|
[] |
no_license
|
himanshugotra31/Day_38
|
06f3c34ce5ae976d5a3e395c6e6c66239ae055b8
|
dc9713fb8fa6edbd540480dfe698b68a3c5d163a
|
refs/heads/master
| 2023-07-20T22:51:42.324294
| 2021-09-04T08:03:30
| 2021-09-04T08:03:30
| 402,999,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
# ages={}
# print(type(ages))
# ages["himanshu"]=22
# ages["sahil"]=23
# ages["virat"]=20
# print(ages)
# print(ages["himanshu"])
# print(ages.keys())
# print(ages.values())
# for t in ages.items():
# print(t)
# print(type(t))
# r=(5,6,7)
# print(type(r))
# print(r)
ages={}
ages=dict()
ages={}
ages={'hi':45, 'bye': 56}
ages['ji']=89
ages[77]='io'
print(ages)
print(ages.keys())
print(ages.values())
|
[
"himanshugotra31@gmail.com"
] |
himanshugotra31@gmail.com
|
bfb3f0286894b88b3901e5811acc574ff7b3c880
|
738f664fa3a749ab61156f996bc53564f9bf5988
|
/entity_level_sentiment.py
|
3a17f6a5f0b2c9d6f5cf23ad54f6301d590dff6f
|
[] |
no_license
|
webclinic017/Stock-Prediction-5
|
a77d4b20159d15ed097bc371eb29556ace16d5ae
|
f7cee5f7cbb67e9c1f1b7ba9e7eb712eac000124
|
refs/heads/main
| 2023-04-25T03:40:56.637510
| 2021-05-06T09:58:10
| 2021-05-06T09:58:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,103
|
py
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import pandas as pd
from textblob import TextBlob
import re
# %%
articles = pd.read_csv("feeds/articles.csv", index_col=0, parse_dates=["published"])
articles
# %%
# Dictionary contains some banks with all kind of its names
bank = {
"sbi": ["sbi", "state bank of india", "state bank"],
"axis": ["axis", "axis bank"],
"icici": ["icici"],
}
# %%
one_article = articles["entry_text"][4]
one_article
# %%
one_article = re.sub(r"\r", "", one_article) # remove \r
one_article = re.sub(r"\n", " ", one_article) # replace \n with space
one_article = one_article.lower() # convert all words to lowercase
one_article
# %%
# replace dots that have numbers(ex.12.9), vs., i.e., .com around them with "[PROTECTED_DOT]"
string1_protected = re.sub(r"(\d)\.(\d)", r"\1[PROTECTED_DOT]\2", one_article)
string1_protected = re.sub(r"(i)\.(e)\.", r"\1[PROTECTED_DOT]\2", string1_protected)
string1_protected = re.sub(r"(v)(s)\.", r"\1[PROTECTED_DOT]\2", string1_protected)
string1_protected = re.sub(r"([a-z])\.(com)", r"\1[PROTECTED_DOT]\2", string1_protected)
# now split (and remove empty lines)
lines_protected = [line + "." for line in string1_protected.split(".") if line]
# now re-replace all "[PROTECTED_DOT]"s
lines = [line.replace("[PROTECTED_DOT]", ".") for line in lines_protected]
lines
# %%
# input given as sbi bank (input bank name should be in small letters, abbreviated form)
input_given = "icici"
bank_names = bank[input_given]
pol = 0
print("All lines having the given bank name:")
for i in range(len(lines)):
for j in range(len(bank_names)):
if bank_names[j] in lines[i]:
print("Line " + str(i) + ": ")
print(lines[i])
print("Line polarity: " + str(TextBlob(lines[i]).sentiment.polarity))
print("\n")
pol = pol + TextBlob(lines[i]).sentiment.polarity
print("Overall Polarity: " + str(pol))
if pol > 0:
print("Sentiment: 1")
elif pol == 0:
print("Sentiment: 0")
else:
print("Sentiment: -1")
# %%
|
[
"rsarthakgupta@gmail.com"
] |
rsarthakgupta@gmail.com
|
85ffb2c97dc6bdf86485222b35936034f786c4b5
|
c19bcbc98555ef06276f9f0dcffc9ac35942a7c4
|
/jc/parsers/upower.py
|
a659717fc6aa1e6872df753ef485daa2ab7fe47f
|
[
"MIT"
] |
permissive
|
kellyjonbrazil/jc
|
4e81a5421cd20be5965baf375f4a5671c2ef0410
|
4cd721be8595db52b620cc26cd455d95bf56b85b
|
refs/heads/master
| 2023-08-30T09:53:18.284296
| 2023-07-30T17:08:39
| 2023-07-30T17:08:39
| 215,404,927
| 6,278
| 185
|
MIT
| 2023-09-08T14:52:22
| 2019-10-15T22:04:52
|
Python
|
UTF-8
|
Python
| false
| false
| 14,113
|
py
|
"""jc - JSON Convert `upower` command output parser
The `updated_epoch` calculated timestamp field is naive. (i.e. based on the
local time of the system the parser is run on)
The `updated_epoch_utc` calculated timestamp field is timezone-aware and is
only available if the timezone field is UTC.
Usage (cli):
$ upower -d | jc --upower
or
$ jc upower -d
Usage (module):
import jc
result = jc.parse('upower', upower_command_output)
Schema:
[
{
"type": string,
"device_name": string,
"native_path": string,
"power_supply": boolean,
"updated": string,
"updated_epoch": integer, # [0]
"updated_epoch_utc": integer, # [0]
"updated_seconds_ago": integer,
"has_history": boolean,
"has_statistics": boolean,
"detail": {
"type": string,
"warning_level": string, # null if none
"online": boolean,
"icon_name": string
"present": boolean,
"rechargeable": boolean,
"state": string,
"energy": float,
"energy_unit": string,
"energy_empty": float,
"energy_empty_unit": string,
"energy_full": float,
"energy_full_unit": string,
"energy_full_design": float,
"energy_full_design_unit": string,
"energy_rate": float,
"energy_rate_unit": string,
"voltage": float,
"voltage_unit": string,
"time_to_full": float,
"time_to_full_unit": string,
"percentage": float,
"capacity": float,
"technology": string
},
"history_charge": [
{
"time": integer,
"percent_charged": float,
"status": string
}
],
"history_rate":[
{
"time": integer,
"percent_charged": float,
"status": string
}
],
"daemon_version": string,
"on_battery": boolean,
"lid_is_closed": boolean,
"lid_is_present": boolean,
"critical_action": string
}
]
[0] null if date-time conversion fails
Examples:
$ upower -i /org/freedesktop/UPower/devices/battery | jc --upower -p
[
{
"native_path": "/sys/devices/LNXSYSTM:00/device:00/PNP0C0A:00/p...",
"vendor": "NOTEBOOK",
"model": "BAT",
"serial": "0001",
"power_supply": true,
"updated": "Thu 11 Mar 2021 06:28:08 PM UTC",
"has_history": true,
"has_statistics": true,
"detail": {
"type": "battery",
"present": true,
"rechargeable": true,
"state": "charging",
"energy": 22.3998,
"energy_empty": 0.0,
"energy_full": 52.6473,
"energy_full_design": 62.16,
"energy_rate": 31.6905,
"voltage": 12.191,
"time_to_full": 57.3,
"percentage": 42.5469,
"capacity": 84.6964,
"technology": "lithium-ion",
"energy_unit": "Wh",
"energy_empty_unit": "Wh",
"energy_full_unit": "Wh",
"energy_full_design_unit": "Wh",
"energy_rate_unit": "W",
"voltage_unit": "V",
"time_to_full_unit": "minutes"
},
"history_charge": [
{
"time": 1328809335,
"percent_charged": 42.547,
"status": "charging"
},
{
"time": 1328809305,
"percent_charged": 42.02,
"status": "charging"
}
],
"history_rate": [
{
"time": 1328809335,
"percent_charged": 31.691,
"status": "charging"
}
],
"updated_seconds_ago": 441975,
"updated_epoch": 1615516088,
"updated_epoch_utc": 1615487288
}
]
$ upower -i /org/freedesktop/UPower/devices/battery | jc --upower -p -r
[
{
"native_path": "/sys/devices/LNXSYSTM:00/device:00/PNP0C0A:00/p...",
"vendor": "NOTEBOOK",
"model": "BAT",
"serial": "0001",
"power_supply": "yes",
"updated": "Thu 11 Mar 2021 06:28:08 PM UTC (441975 seconds ago)",
"has_history": "yes",
"has_statistics": "yes",
"detail": {
"type": "battery",
"present": "yes",
"rechargeable": "yes",
"state": "charging",
"energy": "22.3998 Wh",
"energy_empty": "0 Wh",
"energy_full": "52.6473 Wh",
"energy_full_design": "62.16 Wh",
"energy_rate": "31.6905 W",
"voltage": "12.191 V",
"time_to_full": "57.3 minutes",
"percentage": "42.5469%",
"capacity": "84.6964%",
"technology": "lithium-ion"
},
"history_charge": [
{
"time": "1328809335",
"percent_charged": "42.547",
"status": "charging"
},
{
"time": "1328809305",
"percent_charged": "42.020",
"status": "charging"
}
],
"history_rate": [
{
"time": "1328809335",
"percent_charged": "31.691",
"status": "charging"
}
]
}
]
"""
import jc.utils
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.4'
description = '`upower` command parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
compatible = ['linux']
magic_commands = ['upower']
tags = ['command']
__version__ = info.version
def _process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (List of Dictionaries) raw structured data to process
Returns:
List of Dictionaries. Structured data to conform to the schema.
"""
for entry in proc_data:
# time conversions
if 'updated' in entry:
updated_list = entry['updated'].replace('(', '').replace(')', '').split()
entry['updated'] = ' '.join(updated_list[:-3])
entry['updated_seconds_ago'] = jc.utils.convert_to_int(updated_list[-3])
if entry['updated']:
hints = (1000, 2000, 3000, 4000, 5000, 8000, 8100)
ts = jc.utils.timestamp(entry['updated'], format_hint=hints)
entry['updated_epoch'] = ts.naive
entry['updated_epoch_utc'] = ts.utc
# top level boolean conversions
bool_list = [
'power_supply', 'has_history', 'has_statistics', 'on_battery', 'lid_is_closed',
'lid_is_present'
]
for key in entry:
if key in bool_list:
entry[key] = jc.utils.convert_to_bool(entry[key])
# detail level boolean conversions
bool_list = ['online', 'present', 'rechargeable']
if 'detail' in entry:
for key in entry['detail']:
if key in bool_list:
entry['detail'][key] = jc.utils.convert_to_bool(entry['detail'][key])
# detail level convert warning to null if value is none
if 'detail' in entry:
if 'warning_level' in entry['detail']:
if entry['detail']['warning_level'] == 'none':
entry['detail']['warning_level'] = None
# detail level convert energy readings to float and add unit keys
if 'detail' in entry:
add_items = []
for key, value in entry['detail'].items():
if value and isinstance(value, str):
if len(value.split()) == 2:
entry['detail'][key] = jc.utils.convert_to_float(value.split()[0])
add_items.append({
key + '_unit': value.split()[1]
})
if add_items:
for item in add_items:
for key, value in item.items():
entry['detail'][key] = value
# detail level fix percentages
if 'detail' in entry:
for key, value in entry['detail'].items():
if value and isinstance(value, str):
if value[-1] == '%':
entry['detail'][key] = jc.utils.convert_to_float(value)
# detail level fix quoted values
if 'detail' in entry:
for key, value in entry['detail'].items():
if value and isinstance(value, str):
if value.startswith("'") and value.endswith("'"):
entry['detail'][key] = value[1:-1]
# history_charge and history_rate level convert floats and ints
histories = []
if 'history_charge' in entry:
histories.append('history_charge')
if 'history_rate' in entry:
histories.append('history_rate')
if histories:
for history_obj_list in histories:
new_history_list = []
for history_obj in entry[history_obj_list]:
new_history_obj = {}
for key, value in history_obj.items():
if key == 'time':
new_history_obj[key] = jc.utils.convert_to_int(value)
elif key == 'percent_charged':
new_history_obj[key] = jc.utils.convert_to_float(value)
else:
new_history_obj[key] = value
new_history_list.append(new_history_obj)
entry[history_obj_list] = new_history_list
return proc_data
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) unprocessed output if True
quiet: (boolean) suppress warning messages if True
Returns:
List of Dictionaries. Raw or processed structured data.
"""
jc.utils.compatibility(__name__, info.compatible, quiet)
jc.utils.input_type_check(data)
raw_output = []
device_obj = {}
device_name = None
history_key = ''
history_list = []
history_list_obj = {}
if jc.utils.has_data(data):
for line in filter(None, data.splitlines()):
if line.startswith('Device:') or line.startswith('Daemon:'):
if device_obj:
raw_output.append(device_obj)
device_obj = {}
if line.startswith('Device:'):
device_name = line.split(':', maxsplit=1)[1].strip()
device_obj = {
'type': 'Device',
"device_name": device_name
}
elif line.startswith('Daemon:'):
device_obj = {
'type': 'Daemon'
}
continue
# history detail lines
if line.startswith(' ') and ':' not in line:
line_list = line.strip().split()
history_list_obj = {
'time': line_list[0],
'percent_charged': line_list[1],
'status': line_list[2]
}
history_list.append(history_list_obj)
device_obj[history_key] = history_list
continue
# general detail lines
if line.startswith(' ') and ':' in line:
key = line.split(':', maxsplit=1)[0].strip().lower().replace('-', '_')\
.replace(' ', '_').replace('(', '')\
.replace(')', '')
val = line.split(':', maxsplit=1)[1].strip()
device_obj['detail'][key] = val
continue
# history detail lines are a special case of detail lines
# set the history detail key
if line.startswith(' History (charge):') or line.startswith(' History (rate):'):
if line.startswith(' History (charge):'):
history_key = 'history_charge'
elif line.startswith(' History (rate):'):
history_key = 'history_rate'
device_obj[history_key] = {}
history_list = []
continue
# top level lines
if line.startswith(' ') and ':' in line:
key = line.split(':', maxsplit=1)[0].strip().lower().replace('-', '_')\
.replace(' ', '_').replace('(', '')\
.replace(')', '')
val = line.split(':', maxsplit=1)[1].strip()
device_obj[key] = val
continue
# set the general detail object
if line.startswith(' ') and ':' not in line:
detail_type = line.strip()
device_obj['detail'] = {
'type': detail_type
}
continue
if device_obj:
raw_output.append(device_obj)
if raw:
return raw_output
else:
return _process(raw_output)
|
[
"kellyjonbrazil@gmail.com"
] |
kellyjonbrazil@gmail.com
|
a01335789376d4ea6ccf0c9557d841c67111b5dd
|
3320c8368a663eb8a5519ebd8ccbbe988efc7a20
|
/contacts/migrations/0001_initial.py
|
d7e23c6759d6d454035dbd9b5108d3024d8283e7
|
[] |
no_license
|
ineesalmeida/DjangoProject
|
55622c8c6fe23641604082af55227c40d0b00450
|
b9b7b1792b71ab4079098e1d53f57c9f1af3c491
|
refs/heads/master
| 2022-12-01T01:00:58.562954
| 2022-01-25T09:32:23
| 2022-01-25T09:32:23
| 214,301,790
| 0
| 0
| null | 2022-11-22T09:47:33
| 2019-10-10T23:12:51
|
CSS
|
UTF-8
|
Python
| false
| false
| 989
|
py
|
# Generated by Django 2.2.6 on 2019-10-18 19:14
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('listing', models.CharField(max_length=200)),
('listing_id', models.IntegerField()),
('name', models.CharField(max_length=200)),
('email', models.CharField(max_length=100)),
('phone', models.CharField(max_length=100)),
('message', models.TextField(blank=True)),
('contact_date', models.DateField(blank=True, default=datetime.datetime(2019, 10, 18, 20, 14, 59, 349119))),
('user_id', models.IntegerField(blank=True)),
],
),
]
|
[
"ineesalmeida@gmail.com"
] |
ineesalmeida@gmail.com
|
b92e8fb0c2f2f42f1afb07d2a15c4a2920349c8c
|
9b523a5844de9722fcefa47b25a9577134d712a1
|
/producer.py
|
fe8a760624e0e7c901d1c21ca55f263cb038da95
|
[] |
no_license
|
ucakmak02/rabbitmq
|
28563c18d8174aaca7a4d16a5b99b8b29f0c8453
|
9bcaebb26c87a0f6d2cbcf7e77ec16f1558ab9ce
|
refs/heads/master
| 2020-04-08T08:01:44.809662
| 2018-11-26T11:54:46
| 2018-11-26T11:54:46
| 159,161,960
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
import pika
import mysql.connector
connection = mysql.connector.connect(host='localhost',
database='mail_db',
user='root',
password='1234')
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='selam')
# yukarıda aldığımız kanala bir iş gönderiyoruz
channel.basic_publish(exchange='',
routing_key='selam',
body='Merhaba!')
print("Selam mesajı sıraya gönderildi.")
connection.close()
|
[
"u.s.cakmak02@gmail.com"
] |
u.s.cakmak02@gmail.com
|
480f481173c4a34614549fce5ad5ab1931552caa
|
226cd94f3f3b82caf1022e847cb5eaabfaf8ed98
|
/Scraps/Python/py_scraps/leetcode/ZigZagLevelOrder.py
|
8938b968a7ce2cc6fc86166b556e946958eb8dc5
|
[
"MIT"
] |
permissive
|
rajdeepslather/Scraps
|
015c49b4f26c5fedbf5500bdd1fd9e366c64f52a
|
214729529e3583ab168f36498106a6d3ba2c1a43
|
refs/heads/dev
| 2023-01-04T18:51:36.464892
| 2022-12-23T04:45:15
| 2022-12-23T04:45:15
| 148,342,773
| 0
| 0
|
MIT
| 2019-10-28T18:28:54
| 2018-09-11T15:57:06
|
Java
|
UTF-8
|
Python
| false
| false
| 995
|
py
|
from typing import List
import collections
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Node:
def __init__(self, treeNode, level):
self.treeNode = treeNode
self.level = level
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
if not root:
return None
nodes = []
queue = [(root, 0)]
while queue:
node, level = queue.pop(0)
if not node:
continue
if len(nodes) > level:
if level % 2 == 1:
nodes[level].appendleft(node.val)
else:
nodes[level].append(node.val)
else:
nodes.append(collections.deque([node.val]))
queue.append((node.left, level + 1))
queue.append((node.right, level + 1))
return nodes
|
[
"rajdeepslather@gmail.com"
] |
rajdeepslather@gmail.com
|
29edb95f18b2c32f7967358a4c9a50bee7f64b2d
|
9d1238fb0e4a395d49a7b8ff745f21476c9d9c00
|
/framework/Tests/PAS/PAS/DomainAccount/AddDomainAccount/API/test_add_account_without_account_permission.py
|
2a99ebeceaeb81c4be8adf69220455c062d7d377
|
[] |
no_license
|
jaspalsingh92/TestAutomation-1
|
a48ee1d3b73386f1bf8f53328a5b55444238e054
|
e631c67255b10f150e0012991fb1474ede904417
|
refs/heads/master
| 2023-04-18T14:52:08.836221
| 2021-04-07T12:01:07
| 2021-04-07T12:01:07
| 357,175,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,129
|
py
|
from Shared.API.infrastructure import ResourceManager
import logging
import pytest
logger = logging.getLogger("test")
@pytest.mark.api
@pytest.mark.pas
@pytest.mark.pasapi
@pytest.mark.bhavna
def test_add_account_without_add_account_permission(core_session, domain_config_data, create_domain, cleanup_accounts):
"""
TC:- C1325 Add account without Add Account permission
Steps for this scenario using API:
1) Add Domain Accounts without Add Account Permission
System configuration received through test_pas_bat_domain.yaml stored in config/tests/PAS/PAS_BAT
"""
user_details = core_session.__dict__
domain_id = create_domain
account_list = cleanup_accounts[0]
assert domain_id, f'failed to create domain with response {domain_id}'
conf = domain_config_data
data = conf['pas_scenario1_new_accounts'][0]
permissions = "Grant,View,Edit,Delete"
result, add_domain_account_success = ResourceManager.set_domain_account_permissions(core_session, permissions,
user_details["auth_details"]["User"],
user_details["auth_details"]["UserId"],
pvid=domain_id)
assert add_domain_account_success, f'Failed to set add account permission in the domain {result}'
logger.info(f"add account permission set successfully in the Domain.")
new_account_id, add_account_success = ResourceManager.add_account(core_session, data['User_name'],
data['Password'],
domainid=domain_id)
assert add_account_success is False, (f"Successfully added domain account: {data['User_name']}")
account_list.append(new_account_id)
logger.info(f"Failed to add Account in the Domain, Add Account permission for this domain is required: {new_account_id}")
|
[
"singh.jaspal92@gmail.com"
] |
singh.jaspal92@gmail.com
|
094e22379764b34aafa78ac1a8d8d3f198e1ac03
|
f00fc0e009811a33145ab02ed050fe176db5212a
|
/diccionary.py
|
e309b80d2267b07c7e56a84e94970deecf7361af
|
[] |
no_license
|
egarciago/Katas
|
f664d49c8ee8aa12a4df70fb21cdc46f4c33abe1
|
343900a19c30ce25c66a46b6eb9fc28d14d04a0d
|
refs/heads/master
| 2020-04-11T01:08:47.901151
| 2018-05-12T03:04:07
| 2018-05-12T03:04:07
| 124,316,583
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 69
|
py
|
dict1 = {}
dict2 = {'user':'jPerez', 'pass':'132465'}
print dict2
|
[
"eliezer.ggz@gmail.com"
] |
eliezer.ggz@gmail.com
|
ccdbe5086d288f031a86899f2e692f349f629806
|
15e174cd088f78f72a903f661e62e944a7c93908
|
/sudoku/sudoku_solver/migrations/0013_auto_20200210_0205.py
|
43494dcb62671835f2e5399fec060df26685fc6e
|
[] |
permissive
|
Sudhir22/python-sudoku-generator-solver
|
f0fff58a7aeec440e2cb5ce5600ca4052136048f
|
7bd61e6e7861aab9fdb8776c9e831ee917e0d16d
|
refs/heads/master
| 2020-11-26T19:27:42.441544
| 2020-09-03T07:02:00
| 2020-09-03T07:02:00
| 229,184,985
| 0
| 0
|
MIT
| 2019-12-20T03:44:19
| 2019-12-20T03:44:18
| null |
UTF-8
|
Python
| false
| false
| 1,229
|
py
|
# Generated by Django 3.0.1 on 2020-02-09 15:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sudoku_solver', '0012_auto_20200116_1641'),
]
operations = [
migrations.AddField(
model_name='results',
name='age_2',
field=models.CharField(max_length=10, null=True),
),
migrations.AddField(
model_name='results',
name='favorite_2',
field=models.CharField(max_length=10, null=True),
),
migrations.AddField(
model_name='results',
name='gender_2',
field=models.CharField(max_length=10, null=True),
),
migrations.AddField(
model_name='results',
name='standard_2',
field=models.CharField(max_length=10, null=True),
),
migrations.AddField(
model_name='results',
name='subjects_2',
field=models.CharField(max_length=10, null=True),
),
migrations.AddField(
model_name='results',
name='token',
field=models.CharField(max_length=10, null=True),
),
]
|
[
"u6415626@cbenet.anu.edu.au"
] |
u6415626@cbenet.anu.edu.au
|
643e2a7399641fe1b7560dd285c4ac4e48ec7bf1
|
9a5661b2dbc5d1b829211ad8e50ed5764fa8048e
|
/project/views.py
|
3e64214c09a5c28dc7f114a2549f2eadb88684b8
|
[] |
no_license
|
khoury-ds-hub/khoury-ds-hub.github.io
|
9d8b29132869567e6beac9f23fe86991614acf69
|
15c537df90fb0bf656c085ca2e140785a4f1e2bc
|
refs/heads/master
| 2020-11-24T10:06:30.242639
| 2020-02-11T02:52:28
| 2020-02-11T02:52:28
| 228,099,537
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 601
|
py
|
from flask import render_template
from app import app, pages
@app.route('/')
def home():
posts = [page for page in pages if 'date' in page.meta]
#Sort pages by date
sorted_posts = sorted(posts, reverse=True,
key=lambda page: page.meta['date'])
return render_template('index.html', pages=sorted_posts)
@app.route('/<path:path>/')
def page(path):
# `path` is the filename of a page, without the file extension
# e.g. "first-post"
page = pages.get_or_404(path)
return render_template('page.html', page=page)
# The page(path) view retrieves a page object and passes it to the template
|
[
"49109653+stanley-c-yu@users.noreply.github.com"
] |
49109653+stanley-c-yu@users.noreply.github.com
|
a16e487551648e699b5ad9047e67dabe3e34480e
|
6b69dfaa8688b662f763e8fa8504276102f2559b
|
/py05.py
|
2a4c420fa910984c96f8f34053ad6f00085a1421
|
[] |
no_license
|
LightStar0/Python-
|
a936184bc939933c59f3d2226e5be5cecb43efe9
|
5ebd4195cfc130f8349c9408554fac6c5404bc54
|
refs/heads/master
| 2020-06-03T20:17:00.407688
| 2019-06-13T07:54:45
| 2019-06-13T07:54:45
| 191,716,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
# -*- coding: utf-8 -*-
x=5
x='test' # 字串 ' " 皆可
x="test"
print(x)
a=49/3
print(a)
print(type(a))
a = int(a) #轉整數
print(a)
print(type(a))
print(id(a)) #
del a # 刪除 a 變數
#print(a)
x=int(2.5)
print(x/3)
print(x)
print("x=%.15f"%(x/3))
print("x=%.5f"%(x))
|
[
"noreply@github.com"
] |
LightStar0.noreply@github.com
|
427be4b57c3258d6b6614e3d6dbca933bc2c194d
|
6341ef1974236ca2bfe6c6044a7c8c34bba4632f
|
/dvhb_hybrid/user_action_log/amodels.py
|
5ac283f8c93d91def7f90a6d2125f503bd85636b
|
[
"MIT"
] |
permissive
|
dvhb/dvhb-hybrid
|
f1db73fa8eeb2a5d3d4bcb9aa2979e8bff7a99ce
|
643a45976b33eb914a86a691dba510cb8e508ec9
|
refs/heads/master
| 2023-01-23T04:37:41.793953
| 2022-01-25T14:15:07
| 2022-01-25T14:15:07
| 80,511,300
| 28
| 17
|
MIT
| 2023-01-06T11:01:13
| 2017-01-31T10:37:14
|
Python
|
UTF-8
|
Python
| false
| false
| 326
|
py
|
from .base_amodels import BaseUserActionLogEntry
from . import models
class UserActionLogEntry(BaseUserActionLogEntry):
"""
Concrete action log entry async model class to be used when utilizing hybrid's user_action_log app
"""
table = BaseUserActionLogEntry.get_table_from_django(models.UserActionLogEntry)
|
[
"aamalev@users.noreply.github.com"
] |
aamalev@users.noreply.github.com
|
71cd0a8b51e18f7eb4769731a658d93eac652ed2
|
06d41f4c61f7bd71605a9d6cd9c5e66a8e2be23c
|
/music/admin.py
|
64fd91d32779afec4cdabeaff3cacc115083ad8f
|
[] |
no_license
|
nikhil16-bhaskar/Viberr
|
0d30001bef42efada850d0ce7ee53d60bc5cca0a
|
61d16ba9375004cd7909839badd51c370e70236b
|
refs/heads/master
| 2020-04-27T20:28:41.218888
| 2019-03-15T04:55:44
| 2019-03-15T04:55:44
| 174,659,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
from django.contrib import admin
# Register your models here.
from .models import Album, Song
admin.site.register(Album)
admin.site.register(Song)
|
[
"nikhilbhaskar1697@gmail.com"
] |
nikhilbhaskar1697@gmail.com
|
fe434bd65b08f94459de732c1a5ce40de13dd36f
|
133f480f4d3071b78fb3e2fde7b36bedcbf567ff
|
/attic/VodafoneMobileConnectCard/build/opt/vmc/lib/python2.5/site-packages/Vodafone_Mobile_Connect_Card_driver_for_Linux-1.99.19-py2.5.egg/vmc/test/__init__.py
|
a90eab0f887e310ef1bfbfd243dd5aa66e5b4ffb
|
[] |
no_license
|
Croosi/vodafone-mobile-connect
|
5a8fe61763c37ad64e3925400bf52bd8fe6e92da
|
bf7a8d53b9cade9e76a6c1543812596e166900c5
|
refs/heads/master
| 2021-05-28T01:17:34.588541
| 2010-04-20T20:39:16
| 2010-07-18T16:43:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 833
|
py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Vodafone España, S.A.
# Author: Pablo Martí
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
__version__ = "$Rev: 1172 $"
|
[
"rgalonso@gmv.es"
] |
rgalonso@gmv.es
|
f77dd68f34a3fd571ba7604afe7e4a1950f07c5a
|
ec1d00385ff1c7295c6f0da8901364886cc83bbe
|
/loginradius/views.py
|
9104981dd2486f097c09e436c8029488d9a7315a
|
[] |
no_license
|
op9494/Django_LoginRadius_Demo
|
412dfbd6ccace0663305043a8a952122f9256e9b
|
911e06ce4a94f77f7cf479eeffba98fe2b00f8a4
|
refs/heads/main
| 2023-04-30T05:48:37.355662
| 2021-05-27T07:17:12
| 2021-05-27T07:17:12
| 371,024,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,012
|
py
|
from django.shortcuts import redirect, render
from django.http import HttpResponse
from .lr import loginradius
from django.views.generic.base import RedirectView
# Create your views here.
def login(request):
return render(request,"loginradius/index.html",{
})
def dahboard(request):
return render(request,"loginradius/dashboard.html",{
})
def auth(request,action):
if action=="register":
return render(request,"loginradius/auth.html",{
"msg":"Registration Sucessfull You can login now"
})
elif action=="login":
return render(request,"loginradius/auth.html",{
"msg":"Login Sucessfull"
})
elif action=="fp":
return render(request,"loginradius/auth.html",{
})
elif action=="logout":
return render(request,"loginradius/auth.html",{
"msg":"logout Sucessfull"
})
else:
return HttpResponse("Invalid URL")
|
[
"o.pradeep64@gmail.com"
] |
o.pradeep64@gmail.com
|
c39a410ca2372d80328c35f88327d65bd577b0e3
|
134da7d3797b82e0e0c753f89f08663d2c9aee7f
|
/aps/__init__.py
|
6812fc97c1d4168eb9db139739e8f535fd37ad6d
|
[] |
no_license
|
kakiezhang/pyaps
|
98f066f5d26e06cea8ff5fbfe6cae6f28464ee65
|
7d9e7ff4478568c9a3a0b1217447cfe29a1694c2
|
refs/heads/master
| 2021-01-16T18:38:29.733615
| 2014-04-23T06:46:07
| 2014-04-23T06:46:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 77
|
py
|
# -*- coding: utf-8 -*-
from aps.client import APS
__all__ = [
'APS'
]
|
[
"chenlei@anjuke.com"
] |
chenlei@anjuke.com
|
d72a7776c5c7e045333f31c54fd2079d151931cd
|
afd9f3b7e7e5af52c3123eaa68296e5f4378b2aa
|
/PruebaFrameTkinter.py
|
ffc6efb8d169e012c171957a66900666e489b1c1
|
[] |
no_license
|
loqum/Curso-Python
|
8e7d32e4c7356f78115d283d267b1c182256d224
|
5442e6da383613c4ba79cc46887675e0c8f7c37c
|
refs/heads/master
| 2020-09-13T21:17:12.090886
| 2019-12-16T19:50:40
| 2019-12-16T19:50:40
| 222,905,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
from tkinter import *
class App:
def __init__(self, master):
frame = Frame(master)
frame.height = 10
frame.width = 10
frame.pack()
self.hello = Button(frame, text = "Hello", command = self.say_hello)
self.hello.pack(side = LEFT)
self.label_hello = Label(frame, text = self.say_hello)
self.label_hello.pack(side = LEFT)
def say_hello(self):
return "Hello everyone!"
root = Tk()
app = App(root)
root.mainloop()
|
[
"loqum@users.github.com"
] |
loqum@users.github.com
|
182f61a71014117ff7ac3fb9d5fa0885ba103181
|
157d53570ffc165a78c48c3c25764d8e60ff2329
|
/sagedining/core.py
|
b6626f007ac8c2c743ffe9f19dedda82e71df9a9
|
[] |
no_license
|
morzack/python-sagedining
|
b4a35108839b4492d1f5938ccf435d587416bd3f
|
bc37529628c7c7cce7eff43b92401288813bf20f
|
refs/heads/master
| 2020-06-19T21:16:50.184112
| 2019-08-20T01:15:20
| 2019-08-20T01:15:20
| 196,876,590
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,281
|
py
|
from time import time
from urllib.request import urlopen
from json import loads
import datetime
from .exceptions import *
class Meal:
"""Meal serving time"""
BREAKFAST, LUNCH, SNACK, DINNER = range(4)
class Day:
"""Sage days"""
SUNDAY, MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY = range(7)
class MenuCategory:
"""Sections of Sage menu"""
STOCK_EXCHANGE, IMPROVISATIONS, CLASSIC_CUTS, MAIN_INGREDIENT, SEASONINGS, CROSSROADS, MANGIA_MANGIA, TRANSIT_FARE, PS, SPLASHES, VARIABLE, PAQUITOS, PACHIFIC_THYME, VEGITAS = range(14)
class HealthDot:
"""Sage nutrition dot rating"""
RED = "red"
YELLOW = "yellow"
GREEN = "green"
ALL = "all"
NIL = "none"
RATING_SCALE = {
1 : RED,
2 : YELLOW,
3 : GREEN,
6 : ALL,
}
def __init__(self, i):
self.rating = HealthDot.get_dot_rating(i)
def __str__(self):
return self.rating
def get_dot_rating(i):
"""
get a health dot given a numerical rating from Sage json
:param i: sage rating as stored in json (1, 2, 3 == red, yellow, green)
"""
return HealthDot.RATING_SCALE[i] if i in HealthDot.RATING_SCALE else HealthDot.NIL
def construct_query_url(school_id, cardinality=0):
"""
Generate a URL to access a Sage menu
:param school_id: id of menu/school to get data from
:param cardinality=0:
"""
return "https://www.sagedining.com/intranet/apps/mb/pubasynchhandler.php?unitId={}&mbMenuCardinality={}&_={}".format(school_id, cardinality, int(time()))
class SageMenuItem:
def __init__(self, sage_data):
"""
:param sage_data: data for the menu item structured in the sage format
"""
self.name = sage_data["t"]
self.health_rating = HealthDot(sage_data["d"])
def __str__(self):
return self.name
class Sage:
"""Object to interface with Sage menu"""
def __init__(self, school_id):
"""
:param school_id: id of school menu to access
"""
self.school_id = school_id
self.menu_data = None
self.menu_name = None
self.meals_served = None
self.first_date = None
def update(self):
"""update the cached data"""
with urlopen(construct_query_url(self.school_id)) as request:
request_data = loads(request.read().decode("utf-8"))
if "menu" not in request_data:
raise NoMenusFound
self.menu_name = request_data["unit"]["name"]
self.first_date = datetime.datetime.fromtimestamp(int(request_data["menuList"][0]["menuFirstDate"]))
raw_menu_data = request_data["menu"]
self.meals_served = raw_menu_data["config"]["grid"]["mealsServed"]
self.menu_data = raw_menu_data["menu"]["items"]
def get_menu_date(self, date : datetime.datetime, meal):
"""
get the menu for a given date and meal
:param date: the date to get the menu for
:param meal: the meal to get data for
"""
self._ensure_updated()
if meal not in range(0, 4):
raise MealNotValid
day_of_week = (date.weekday()+1)%7
days_from_first = (date-self.first_date).days+1
if days_from_first < 0:
raise DateNotValid
week = (days_from_first+((self.first_date.weekday()+1)%7)) // 7
if week >= len(self.menu_data):
raise DateNotValid
return self.menu_data[week][day_of_week][meal]
def get_categories_date(self, date : datetime.datetime, meal, categories):
"""
get data for categories passed in
:param date: the date to get the menu for
:param meal: the meal to get data for
:param categories: a list of categories to get data for
"""
self._ensure_updated()
menu = self.get_menu_date(date, meal)
r = []
for i in categories:
if i >= len(menu):
raise CategoryNotValid
r.append([SageMenuItem(j) for j in menu[i]])
return r
def _ensure_updated(self):
"""make sure that the cache exists/is up to date"""
if self.menu_data == None:
raise MenuCacheNotPresent
|
[
"john_kesler@caryacademy.org"
] |
john_kesler@caryacademy.org
|
2fd3c1fc630a1c7d3726d871f7a3504999d77e45
|
eb99769b7c9e0eb1cf3b88878934a400ba42f0bf
|
/kennels/api/serializers.py
|
dd79a917e4f82fe112e0bbf912035aa6d9af62c4
|
[] |
no_license
|
Levalife/petsterr2.0
|
3657b200b9e236b81896f4ac104932e85517ceb3
|
43d20e65362596d72942fe624c29fd4f84d90f9a
|
refs/heads/master
| 2023-01-13T04:58:23.496527
| 2018-09-13T09:50:48
| 2018-09-13T09:50:48
| 203,134,329
| 0
| 0
| null | 2023-01-05T21:55:18
| 2019-08-19T08:48:32
|
Python
|
UTF-8
|
Python
| false
| false
| 2,090
|
py
|
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from kennels.KennelsHandler import KennelsHandler
from kennels.models import Kennel
class KennelSerializer(serializers.ModelSerializer):
url = serializers.SerializerMethodField(read_only=True)
owner = serializers.SerializerMethodField(read_only=True)
owner_name = serializers.ReadOnlyField(source='owner.username')
# country_club = serializers.SerializerMethodField(source='country_club.id')
handler = KennelsHandler()
read_only_field = ['id']
class Meta:
model = Kennel
fields = ['id',
'url',
'title',
'type',
'reg_number',
'about',
'url',
'cover',
'slug',
'address',
'timezone',
'skype',
'facebook',
'site',
'owner',
'owner_name',
'country_club',
'country']
def get_url(self, obj):
request = self.context.get('request')
return obj.get_api_url(request=request)
def validate_slug(self, value):
slug = value.replace(' ', '_')
kennels = self.handler.get_by_slug(slug)
if kennels and self.instance and kennels.id != int(self.instance.id):
raise serializers.ValidationError(_(u'This page address is already in use. Try another one'))
return slug
def create(self, validated_data):
"""
Create and return a new `Kennel` instance, given the validated data.
"""
validated_data['slug'] = self.handler.make_slug(validated_data.get('title'))
return Kennel.objects.create(**validated_data)
def get_owner(self, obj):
request = self.context.get('request')
if request and request.user.is_authenticated:
if obj.owner == request.user:
return True
return False
|
[
"levushka14@gmail.com"
] |
levushka14@gmail.com
|
5eb22afee1b11d76798aa863c12c951168ce4baf
|
8c985a3471ec5ff477869a22cee320d4be39727d
|
/Backend(Django)/farcast/manage.py
|
fc83d230b1ed5db3b16f3539c14e1201430164ab
|
[] |
no_license
|
max-dmytryshyn/farcast-site
|
4bf74b243fcedbbb5d66e55f24d88573dc7787f4
|
4e994b3a0df6e57db2dfe1fef9401fa901037795
|
refs/heads/main
| 2023-07-23T10:01:55.955464
| 2021-08-03T10:30:31
| 2021-08-03T10:30:31
| 359,403,884
| 0
| 0
| null | 2021-08-03T10:30:32
| 2021-04-19T09:35:16
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 663
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'farcast.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"danilpatana1905@gmail.com"
] |
danilpatana1905@gmail.com
|
acd750dafe428564e0a711ba19ae364d354d9875
|
8068e14332d3a1cbf977f0099eae2756b4d5c8c6
|
/ad_manager/urls.py
|
08ce7ad34e175a1986cc260d9d3b80ca3d1598b9
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
mhulse/django-ad-manager
|
7ac665bff46ce21c2faeaef274d41d7751403363
|
c1335d2e304e20dd84ad766d57fcccbfbae60dd8
|
refs/heads/master
| 2021-01-25T09:59:22.349355
| 2015-03-28T19:41:23
| 2015-03-28T19:41:23
| 33,046,627
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
from django.conf.urls.defaults import *
from ad_manager.views import Api
# https://bitbucket.org/chris1610/satchmo/src/1730bf912bc1/satchmo/apps/product/urls/category.py?at=default
# http://stackoverflow.com/a/9492349/922323
urlpatterns = patterns('',
# Possible examples:
# "/grand-parent:parent:child/page-type" OR "/parent:child/page-type" OR "/child/page-type" OR "/child"
url(
r'^(?P<hierarchy>[-\w:]+)/?(?P<page>[-\w]+)?/$',
Api.as_view(),
name='ad_manager_target_api',
),
)
|
[
"mickyhulse@gmail.com"
] |
mickyhulse@gmail.com
|
a005f00519d05ed8dcc4174258ff08973c071747
|
5524489fd69786605a0dc7109abe78dc599470cb
|
/backend/config.example.py
|
663e8a86cf83d605284a9522ab2de65c8322e172
|
[
"MIT"
] |
permissive
|
rishkarajgi/flask-boilerplate
|
26d5c2a1d9829a1a47f9bc81567b573b00ec7b53
|
584da59e3eb3c39261b5b435ef7eaf92bf127aeb
|
refs/heads/master
| 2023-04-10T02:12:50.682961
| 2019-08-22T11:36:09
| 2019-08-22T11:36:09
| 203,781,041
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,620
|
py
|
import os
from datetime import timedelta
import redis
from appdirs import AppDirs
APP_NAME = 'flask-base'
app_dirs = AppDirs(APP_NAME)
APP_CACHE_FOLDER = app_dirs.user_cache_dir
APP_DATA_FOLDER = app_dirs.user_data_dir
PROJECT_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir))
TEMPLATE_FOLDER = os.path.join(PROJECT_ROOT, 'backend', 'templates')
STATIC_FOLDER = os.environ.get('FLASK_STATIC_FOLDER',
os.path.join(PROJECT_ROOT, 'static'))
STATIC_URL_PATH = '/static' # serve asset files in static/ at /static/
# list of bundle modules to register with the app, in dot notation
BUNDLES = [
'backend.database'
]
# ordered list of extensions to register before the bundles
# syntax is import.name.in.dot.module.notation:extension_instance_name
EXTENSIONS = [
'backend.extensions.jwt:jwt_manager',
'backend.extensions:db',
'backend.extensions:alembic', # must come after db
'backend.extensions.celery:celery',
'backend.extensions.mail:mail',
'backend.extensions.marshmallow:ma', # must come after db
]
# list of extensions to register after the bundles
# syntax is import.name.in.dot.module.notation:extension_instance_name
DEFERRED_EXTENSIONS = [
'backend.extensions.api:api',
'backend.extensions.docs:docs',
]
def get_boolean_env(name, default):
default = 'true' if default else 'false'
return os.getenv(name, default).lower() in ['true', 'yes', '1']
class BaseConfig(object):
##########################################################################
# flask #
##########################################################################
DEBUG = get_boolean_env('FLASK_DEBUG', False)
SECRET_KEY = os.environ.get('FLASK_SECRET_KEY', 'not-secret-key') # FIXME
STRICT_SLASHES = False
BUNDLES = BUNDLES
##########################################################################
# database #
##########################################################################
SQLALCHEMY_TRACK_MODIFICATIONS = False
ALEMBIC = {
'script_location': os.path.join(PROJECT_ROOT, 'migrations'),
}
##########################################################################
# celery #
##########################################################################
CELERY_BROKER_URL = 'redis://{host}:{port}/0'.format(
host=os.getenv('FLASK_REDIS_HOST', '127.0.0.1'),
port=os.getenv('FLASK_REDIS_PORT', 6379),
)
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
CELERY_ACCEPT_CONTENT = ('json', 'pickle')
##########################################################################
# mail #
##########################################################################
MAIL_ADMINS = ('admin@example.com',) # FIXME
MAIL_SERVER = os.environ.get('FLASK_MAIL_HOST', 'localhost')
MAIL_PORT = int(os.environ.get('FLASK_MAIL_PORT', 25))
MAIL_USE_TLS = get_boolean_env('FLASK_MAIL_USE_TLS', False)
MAIL_USE_SSL = get_boolean_env('FLASK_MAIL_USE_SSL', False)
MAIL_USERNAME = os.environ.get('FLASK_MAIL_USERNAME', None)
MAIL_PASSWORD = os.environ.get('FLASK_MAIL_PASSWORD', None)
MAIL_DEFAULT_SENDER = (
os.environ.get('FLASK_MAIL_DEFAULT_SENDER_NAME', 'Flask Base'),
os.environ.get('FLASK_MAIL_DEFAULT_SENDER_EMAIL',
f"noreply@{os.environ.get('FLASK_DOMAIN', 'localhost')}")
)
##########################################################################
# jwt #
##########################################################################
JWT_BLACKLIST_ENABLED = True
JWT_BLACKLIST_TOKEN_CHECKS = ('access', 'refresh')
JWT_SECRET_KEY = 'super-secret'
class ProdConfig(BaseConfig):
##########################################################################
# flask #
##########################################################################
ENV = 'prod'
DEBUG = get_boolean_env('FLASK_DEBUG', False)
##########################################################################
# database #
##########################################################################
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://{user}:{password}@{host}:{port}/{db_name}'.format(
user=os.environ.get('FLASK_DATABASE_USER', 'flask_api'),
password=os.environ.get('FLASK_DATABASE_PASSWORD', 'flask_api'),
host=os.environ.get('FLASK_DATABASE_HOST', '127.0.0.1'),
port=os.environ.get('FLASK_DATABASE_PORT', 5432),
db_name=os.environ.get('FLASK_DATABASE_NAME', 'flask_api'),
)
class DevConfig(BaseConfig):
##########################################################################
# flask #
##########################################################################
ENV = 'dev'
DEBUG = get_boolean_env('FLASK_DEBUG', True)
# EXPLAIN_TEMPLATE_LOADING = True
##########################################################################
# database #
##########################################################################
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://{user}:{password}@{host}:{port}/{db_name}'.format(
user=os.environ.get('FLASK_DATABASE_USER', 'flask_api'),
password=os.environ.get('FLASK_DATABASE_PASSWORD', 'flask_api'),
host=os.environ.get('FLASK_DATABASE_HOST', '127.0.0.1'),
port=os.environ.get('FLASK_DATABASE_PORT', 5432),
db_name=os.environ.get('FLASK_DATABASE_NAME', 'flask_api'),
)
# SQLALCHEMY_ECHO = True
##########################################################################
# mail #
##########################################################################
MAIL_PORT = 1025 # MailHog
MAIL_DEFAULT_SENDER = ('Flask Base', 'noreply@localhost')
class TestConfig(BaseConfig):
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite://' # :memory:
|
[
"rishabh.karajgi@capitalfloat.com"
] |
rishabh.karajgi@capitalfloat.com
|
eee9e4376fbb39b7af890185d892489b63b2f5ed
|
25bf04731fdaf917485427332171e36446bc1a89
|
/Resnet_SpeakReco.py
|
4118897a9217215252e47cdd3b0086c9519cd8d0
|
[] |
no_license
|
Ming0818/Multiregion-Speaker-Recognition
|
7bb9556902adfd3aa8cec344c10c8cb2aa9921d9
|
71c95f1f5f7be8058f59890b457be6785f37961a
|
refs/heads/master
| 2020-05-16T15:26:30.517567
| 2019-03-23T05:52:02
| 2019-03-23T05:52:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,934
|
py
|
import numpy as np
from keras import layers
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from keras.models import Model, load_model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from resnets_utils import *
from keras.initializers import glorot_uniform
import scipy.misc
from create_speak_reco_ds import dataset_train, dataset_test
from matplotlib.pyplot import imshow
import keras.backend as K
K.set_image_data_format('channels_last')
K.set_learning_phase(1)
def identity_block(X, f, filters, stage, block):
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
F1, F2, F3 = filters
X_shortcut = X
X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
X = Activation('relu')(X)
X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)
X = Add()([X, X_shortcut])
X = Activation('relu')(X)
return X
def convolutional_block(X, f, filters, stage, block, s = 2):
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
F1, F2, F3 = filters
X_shortcut = X
X = Conv2D(F1, (1, 1), strides = (s,s), name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('elu')(X)
X = Conv2D(F2, (f, f), strides = (1,1), padding='same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
X = Activation('elu')(X)
X = Conv2D(F3, (1, 1), strides = (1,1), name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)
X_shortcut = Conv2D(F3, (1, 1), strides = (s,s), name = conv_name_base + '1', kernel_initializer = glorot_uniform(seed=0))(X_shortcut)
X_shortcut = BatchNormalization(axis = 3, name = bn_name_base + '1')(X_shortcut)
X = Add()([X, X_shortcut])
X = Activation('elu')(X)
return X
def ResNet50(input_shape = (480, 640, 4), classes = 15):
X_input = Input(input_shape)
X = ZeroPadding2D((3, 3))(X_input)
X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)
X = Activation('elu')(X)
X = MaxPooling2D((3, 3), strides=(2, 2))(X)
X = convolutional_block(X, f = 4, filters = [64, 64, 256], stage = 2, block='a', s = 1)
X = identity_block(X, 4, [64, 64, 256], stage=2, block='b')
X = identity_block(X, 4, [64, 64, 256], stage=2, block='c')
X = convolutional_block(X, f = 4, filters = [128, 128, 512], stage = 3, block='a', s = 2)
X = identity_block(X, 4, [128, 128, 512], stage=3, block='b')
X = identity_block(X, 4, [128, 128, 512], stage=3, block='c')
X = identity_block(X, 4, [128, 128, 512], stage=3, block='d')
X = convolutional_block(X, f = 4, filters = [256, 256, 1024], stage = 4, block='a', s = 2)
X = identity_block(X, 4, [256, 256, 1024], stage=4, block='b')
X = identity_block(X, 4, [256, 256, 1024], stage=4, block='c')
X = identity_block(X, 4, [256, 256, 1024], stage=4, block='d')
X = identity_block(X, 4, [256, 256, 1024], stage=4, block='e')
X = identity_block(X, 4, [256, 256, 1024], stage=4, block='f')
X = convolutional_block(X, f = 4, filters = [512, 512, 2048], stage = 5, block='a', s = 2)
X = identity_block(X, 4, [512, 512, 2048], stage=5, block='b')
X = identity_block(X, 4, [512, 512, 2048], stage=5, block='c')
X = AveragePooling2D(pool_size=(2, 2),name = 'avg_pool')(X)
X = Flatten()(X)
X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)
model = Model(inputs = X_input, outputs = X, name='ResNet50')
return model
model = ResNet50(input_shape = (480, 640, 4), classes = 15)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
X_train_orig, Y_train_orig = dataset_train()
X_test_orig, Y_test_orig = dataset_test()
classes=15
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(Y_train_orig, 15).T
Y_test = convert_to_one_hot(Y_test_orig, 15).T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
model.fit(X_train, Y_train, epochs = 30, batch_size = 32)
preds = model.evaluate(X_test, Y_test)
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
|
[
"Archit@Archits-MacBook-Pro.local"
] |
Archit@Archits-MacBook-Pro.local
|
1a87da88dc433cc3a98c815f50879c6aadb07708
|
ee8541d4e8e5a41a28f9bdd5e16dc7db16e39742
|
/RentBike_test.py
|
007fe541876ea39c832cd2cfb8f52aa93f4c2ad8
|
[] |
no_license
|
vovojtee/Simple-bike-rental
|
737f8bc8b4fbcab9d6f90cbadf9353914cdb365b
|
edee4d21ac6558e25ef54eefa26ed9c54532ef4a
|
refs/heads/master
| 2022-08-28T08:59:14.964123
| 2020-05-28T13:40:25
| 2020-05-28T13:40:25
| 267,594,641
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,801
|
py
|
import unittest
from datetime import datetime, timedelta
from pujcovnaKol import BikeRental, Customer
class BikeRentalTest(unittest.TestCase):
def test_Bike_Rental_diplays_correct_stock(self):
shop1 = BikeRental()
shop2 = BikeRental(10)
self.assertEqual(shop1.displaystock(), 0)
self.assertEqual(shop2.displaystock(), 10)
def test_rentBikeOnHourlyBasis_for_negative_number_of_bikes(self):
shop = BikeRental(10)
self.assertEqual(shop.rentBikeOnHourlyBasis(-1), None)
def test_rentBikeOnHourlyBasis_for_zero_number_of_bikes(self):
shop = BikeRental(10)
self.assertEqual(shop.rentBikeOnHourlyBasis(0), None)
def test_rentBikeOnHourlyBasis_for_valid_positive_number_of_bikes(self):
shop = BikeRental(10)
hour = datetime.now().hour
self.assertEqual(shop.rentBikeOnHourlyBasis(2).hour, hour)
def test_rentBikeOnHourlyBasis_for_invalid_positive_number_of_bikes(self):
shop = BikeRental(10)
self.assertEqual(shop.rentBikeOnHourlyBasis(11), None)
def test_rentBikeOnDailyBasis_for_negative_number_of_bikes(self):
shop = BikeRental(10)
self.assertEqual(shop.rentBikeOnDailyBasis(-1), None)
def test_rentBikeOnDailyBasis_for_zero_number_of_bikes(self):
shop = BikeRental(10)
self.assertEqual(shop.rentBikeOnDailyBasis(0), None)
def test_rentBikeOnDailyBasis_for_valid_positive_number_of_bikes(self):
shop = BikeRental(10)
hour = datetime.now().hour
self.assertEqual(shop.rentBikeOnDailyBasis(2).hour, hour)
def test_rentBikeOnDailyBasis_for_invalid_positive_number_of_bikes(self):
shop = BikeRental(10)
self.assertEqual(shop.rentBikeOnDailyBasis(11), None)
def test_rentBikeOnWeeklyBasis_for_negative_number_of_bikes(self):
shop = BikeRental(10)
self.assertEqual(shop.rentBikeOnWeeklyBasis(-1), None)
def test_rentBikeOnWeeklyBasis_for_zero_number_of_bikes(self):
shop = BikeRental(10)
self.assertEqual(shop.rentBikeOnWeeklyBasis(0), None)
def test_rentBikeOnWeeklyBasis_for_valid_positive_number_of_bikes(self):
shop = BikeRental(10)
hour = datetime.now().hour
self.assertEqual(shop.rentBikeOnWeeklyBasis(2).hour, hour)
def test_rentBikeOnWeeklyBasis_for_invalid_positive_number_of_bikes(self):
shop = BikeRental(10)
self.assertEqual(shop.rentBikeOnWeeklyBasis(11), None)
def test_returnBike_for_invalid_rentalTime(self):
# create a shop and a customer
shop = BikeRental(10)
customer = Customer()
# let the customer not rent a bike a try to return one.
request = customer.returnBike()
self.assertIsNone(shop.returnBike(request))
# manually check return function with error values
self.assertIsNone(shop.returnBike((0, 0, 0)))
def test_returnBike_for_invalid_rentalBasis(self):
# create a shop and a customer
shop = BikeRental(10)
customer = Customer()
# create valid rentalTime and bikes
customer.rentalTime = datetime.now()
customer.bikes = 3
# create invalid rentalbasis
customer.rentalBasis = 7
request = customer.returnBike()
self.assertEqual(shop.returnBike(request), 0)
def test_returnBike_for_invalid_numOfBikes(self):
# create a shop and a customer
shop = BikeRental(10)
customer = Customer()
# create valid rentalTime and rentalBasis
customer.rentalTime = datetime.now()
customer.rentalBasis = 1
# create invalid bikes
customer.bikes = 0
request = customer.returnBike()
self.assertIsNone(shop.returnBike(request))
def test_returnBike_for_valid_credentials(self):
# create a shop and a various customers
shop = BikeRental(50)
customer1 = Customer()
customer2 = Customer()
customer3 = Customer()
customer4 = Customer()
customer5 = Customer()
customer6 = Customer()
# create valid rentalBasis for each customer
customer1.rentalBasis = 1 # hourly
customer2.rentalBasis = 1 # hourly
customer3.rentalBasis = 2 # daily
customer4.rentalBasis = 2 # daily
customer5.rentalBasis = 3 # weekly
customer6.rentalBasis = 3 # weekly
# create valid bikes for each customer
customer1.bikes = 1
customer2.bikes = 5 # eligible for family discount 30%
customer3.bikes = 2
customer4.bikes = 8
customer5.bikes = 15
customer6.bikes = 30
# create past valid rental times for each customer
customer1.rentalTime = datetime.now() + timedelta(hours=-4)
customer2.rentalTime = datetime.now() + timedelta(hours=-23)
customer3.rentalTime = datetime.now() + timedelta(days=-4)
customer4.rentalTime = datetime.now() + timedelta(days=-13)
customer5.rentalTime = datetime.now() + timedelta(weeks=-6)
customer6.rentalTime = datetime.now() + timedelta(weeks=-12)
# make all customers return their bikes
request1 = customer1.returnBike()
request2 = customer2.returnBike()
request3 = customer3.returnBike()
request4 = customer4.returnBike()
request5 = customer5.returnBike()
request6 = customer6.returnBike()
# check if all of them get correct bill
self.assertEqual(shop.returnBike(request1), 20)
self.assertEqual(shop.returnBike(request2), 402.5)
self.assertEqual(shop.returnBike(request3), 160)
self.assertEqual(shop.returnBike(request4), 2080)
self.assertEqual(shop.returnBike(request5), 5400)
self.assertEqual(shop.returnBike(request6), 21600)
class CustomerTest(unittest.TestCase):
def test_return_Bike_with_valid_input(self):
# create a customer
customer = Customer()
# create valid rentalTime, rentalBasis, bikes
now = datetime.now()
customer.rentalTime = now
customer.rentalBasis = 1
customer.bikes = 4
self.assertEqual(customer.returnBike(), (now, 1, 4))
def test_return_Bike_with_invalid_input(self):
# create a customer
customer = Customer()
# create valid rentalBasis and bikes
customer.rentalBasis = 1
customer.bikes = 0
# create invalid rentalTime
customer.rentalTime = 0
self.assertEqual(customer.returnBike(), (0, 0, 0))
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
vovojtee.noreply@github.com
|
fb909d916602d47b30f99cff6741b55c2ef27840
|
933d03d91112fd516d82752b93f38fe330c07ad2
|
/ReadTemp.py
|
4e5b9f4fddabe1972108b4f0766bf855cd6c1382
|
[] |
no_license
|
jdryden572/TempSensor
|
c7e13c0c747e0e3c2dca1feca075c19d681f3133
|
88c8326689c844ade333a70e8a5261ed647bd1c9
|
refs/heads/master
| 2021-01-21T21:48:18.300642
| 2016-05-01T18:49:48
| 2016-05-01T18:49:48
| 20,930,117
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,863
|
py
|
"""
ReadTemp for Raspberry Pi
James Dryden and Brett Nelson
OSIsoft
Reads temperature values from a DS18B20 sensor and writes the timestamp
and values to a .txt file
"""
import os
import glob
import time
import sys
import re
baseDir = '/sys/bus/w1/devices/'
writeFile = '/opt/LightingSystem/webServer/TempReadings.txt'
def findSensorDir():
try:
deviceFolder = glob.glob(baseDir + '28-*')[0]
return deviceFolder
except IndexError:
print('DS18B20 temperature sensor not detected. Retrying in 10 seconds.')
time.sleep(10)
return findSensorDir()
def writeToFile(tempC, tempF):
with open(writeFile, mode='w') as f:
f.write(time.strftime("%m/%d/%Y %H:%M:%S", time.localtime()) +
' tempC=' + str(tempC) + ', tempF=' + str(tempF) + '\n')
def initializeSensor():
os.system('sudo modprobe w1-gpio')
os.system('sudo modprobe w1-therm')
deviceFolder = findSensorDir()
return TempSensor(deviceFolder + '/w1_slave')
class TempSensor(object):
yesPattern = re.compile(r'YES')
tempPattern = re.compile(r't=(\d+)')
def __init__(self, path):
self.path = path
self._ready = False
self._temp = []
def _getData(self):
with open(self.path) as f:
data = f.read()
if self.yesPattern.search(data):
self._ready = True
else:
self._ready = False
return data
@property
def temp(self):
while not self._ready:
data = self._getData()
self._ready = False
tempString = self.tempPattern.search(data).group(1)
tempC = float(tempString) / 1000.0
tempF = tempC * 9.0 / 5.0 + 32.0
self._temp = [tempC, tempF]
return self._temp
if __name__ == '__main__':
try:
DEBUG = sys.argv[1] == '-d'
except IndexError:
DEBUG = False
sensor = initializeSensor()
while True:
[tempC, tempF] = sensor.temp
if DEBUG: print('tempC=' + str(tempC), 'tempF=' + str(tempF))
# writeToFile(tempC, tempF)
time.sleep(2)
|
[
"jdryden572@gmail.com"
] |
jdryden572@gmail.com
|
5e88e5d73b668e61060687eb006553a3d7a0657b
|
09fd4eb68ed59de9608ee2917af5eaece0de123f
|
/ait/core/server/broker.py
|
ba4dbee2cb05d9e4e931ddef0ec3db538f18a1a9
|
[
"MIT"
] |
permissive
|
seanlu99/AIT-Core
|
9540450bf3a772e4c996bfa2c23b31a2cb67c9e6
|
d746079bcff574d930f633bee59337eabf54e99c
|
refs/heads/master
| 2020-06-02T15:37:40.496968
| 2019-08-20T17:33:56
| 2019-08-20T17:33:56
| 191,211,543
| 1
| 0
|
MIT
| 2019-06-10T17:11:23
| 2019-06-10T17:11:22
| null |
UTF-8
|
Python
| false
| false
| 3,048
|
py
|
import zmq.green as zmq
import gevent
import gevent.monkey; gevent.monkey.patch_all()
import ait.core
import ait.core.server
from ait.core import log
class Broker(gevent.Greenlet):
"""
This broker contains the ZeroMQ context and proxy that connects all
streams and plugins to each other through publish-subscribe sockets.
This broker subscribes all ZMQ clients to their input topics.
"""
inbound_streams = [ ]
outbound_streams = [ ]
servers = [ ]
plugins = [ ]
def __init__(self):
self.context = zmq.Context()
self.XSUB_URL = ait.config.get('server.xsub',
ait.SERVER_DEFAULT_XSUB_URL)
self.XPUB_URL = ait.config.get('server.xpub',
ait.SERVER_DEFAULT_XPUB_URL)
gevent.Greenlet.__init__(self)
def _run(self):
self._setup_proxy()
self._subscribe_all()
log.info("Starting broker...")
while True:
gevent.sleep(0)
socks = dict(self.poller.poll())
if socks.get(self.frontend) == zmq.POLLIN:
message = self.frontend.recv_multipart()
self.backend.send_multipart(message)
if socks.get(self.backend) == zmq.POLLIN:
message = self.backend.recv_multipart()
self.frontend.send_multipart(message)
def _setup_proxy(self):
self.frontend = self.context.socket(zmq.XSUB)
self.frontend.bind(self.XSUB_URL)
self.backend = self.context.socket(zmq.XPUB)
self.backend.bind(self.XPUB_URL)
self.poller = zmq.Poller()
self.poller.register(self.frontend, zmq.POLLIN)
self.poller.register(self.backend, zmq.POLLIN)
def _subscribe_all(self):
"""
Subscribes all streams to their input.
Subscribes all plugins to all their inputs.
Subscribes all plugin outputs to the plugin.
"""
for stream in (self.inbound_streams + self.outbound_streams):
for input_ in stream.inputs:
if not type(input_) is int and input_ is not None:
self._subscribe(stream, input_)
for plugin in self.plugins:
for input_ in plugin.inputs:
self._subscribe(plugin, input_)
for output in plugin.outputs:
# Find output stream instance
subscriber = next((x for x in self.outbound_streams
if x.name == output), None)
if subscriber is None:
log.warn('The outbound stream {} does not '
'exist so will not receive messages '
'from {}'.format(output, plugin))
else:
self._subscribe(subscriber, plugin.name)
def _subscribe(self, subscriber, publisher):
log.info('Subscribing {} to topic {}'.format(subscriber, publisher))
subscriber.sub.setsockopt(zmq.SUBSCRIBE, str(publisher))
|
[
"anna.waldron@jpl.nasa.gov"
] |
anna.waldron@jpl.nasa.gov
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.