blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c8596050613ddd18b404c32750a3793b9e419fc6
|
dff24f8c37afd9a031fcd9d5be49ff6557149279
|
/model/PartCapsuleNet.py
|
f49e7ba734e3b673a37d447aa34aec8738a2f008
|
[
"MIT"
] |
permissive
|
ajdillhoff/3dhpe-udd
|
a08ddc7a54f8901f708b1fb7ef91d504c0643433
|
5a77818670060710dff2b1b617a96481e5dc20a0
|
refs/heads/master
| 2020-09-23T06:29:34.757943
| 2020-09-03T17:22:35
| 2020-09-03T17:22:35
| 225,428,163
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,519
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.stride = stride
self.downsample = downsample
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Capsule(nn.Module):
def __init__(self, conv_block, num_pose_features, num_point_features):
super(Capsule, self).__init__()
self.num_pose_features = num_pose_features
self.num_point_features = num_point_features
# Pose encoder
self.conv_block = conv_block
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.pose_fc = nn.Linear(512, self.num_pose_features)
# Point map decoder
up = nn.Upsample(mode='bilinear', scale_factor=2)
u1 = nn.Conv2d(512, 128, kernel_size=3, stride=1, padding=1)
u2 = nn.Conv2d(128, 1, kernel_size=3, stride=1, padding=1)
self.map_decoder = nn.Sequential(*[
up,
u1,
nn.ReLU(inplace=True),
up,
u2,
nn.ReLU(inplace=True),
nn.Flatten(),
nn.Linear(28*28, num_point_features),
nn.ReLU(inplace=True)
])
def forward(self, x):
conv_out = self.conv_block(x)
pose_out = self.avgpool(conv_out)
pose_out = pose_out.squeeze(-1).squeeze(-1)
pose_out = self.pose_fc(pose_out)
map_out = self.map_decoder(conv_out)
return torch.cat((pose_out, map_out), -1)
class PartCapsuleNet(nn.Module):
def __init__(self, layers, num_parts=1, num_features=4,
point_features=1024):
super(PartCapsuleNet, self).__init__()
self.num_parts = num_parts
self.num_features = num_features
self.point_features = point_features
block = BasicBlock
self.inplanes = 64
self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=7,
stride=2, padding=3, bias=False)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self.make_layer(block, 64, layers[0])
self.layer2 = self.make_layer(block, 128, layers[1], stride=2)
self.layer3 = self.make_layer(block, 256, layers[2], stride=2)
self.capsules = nn.ModuleList([self.create_capsule(block)
for i in range(num_parts)])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
def create_capsule(self, block):
self.inplanes = 256
b1 = self.make_layer(block, 512, 1, stride=2)
capsule = Capsule(b1, self.num_features, self.point_features)
return capsule
def make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes, stride)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
output = [capsule(x).unsqueeze(1) for capsule in self.capsules]
output = torch.cat(output, 1)
return output
|
[
"ajdillhoff@gmail.com"
] |
ajdillhoff@gmail.com
|
46578a876b572f705fd240a207db106194fe2d29
|
3c6505934caec3fcdc9ed533f87aad8617255c1c
|
/PG17.py
|
4d89b4c165d26b672325618b3928bb46a67ef791
|
[] |
no_license
|
teamchz/pythonProject
|
d7d566bb6413ee94258d8452fd3737dd89a72ab9
|
581b5e60f18141fb021baea7b9f0a7c62a75903f
|
refs/heads/master
| 2023-01-04T05:57:56.228449
| 2020-11-02T09:39:03
| 2020-11-02T09:39:03
| 306,906,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 152
|
py
|
def progress(n):
global money
if n == 0:
return money
else:
return progress(n-1) * 1.05
money = 10000
print(progress(30))
|
[
"teamchz@gmail.com"
] |
teamchz@gmail.com
|
27e43e9d33a28d66d39c58401890ea2ae4378934
|
71cf8ce77bd73ae0bf1794654e9cf4c5445aad86
|
/pyblake2/bench.py
|
ea6eb8876e8d273a2c8d85f5d757a0961331c10d
|
[
"MIT"
] |
permissive
|
bryant/pyblake2
|
d2b7298401d929cea24b5b8275dc3136b12ff2d8
|
47cd6da3b06ae71b73289da6510526fc992f595b
|
refs/heads/master
| 2016-09-06T09:58:04.476570
| 2013-04-06T08:18:23
| 2013-04-06T18:15:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
from pyblake2 import Blake2b
b = Blake2b()
payload = ''.join(chr(i) for i in xrange(256)) * 4000
b(payload)
|
[
"bryant.wong@stonybrook.edu"
] |
bryant.wong@stonybrook.edu
|
e6f5c41e185e736d4772e6ccc91ba53bfe5ba936
|
d8d3b0629f3ae207a36d406c40ca3e963b60aebf
|
/setup.py
|
0ca4cd92144ab24b67f53327a0e3bc149203b120
|
[] |
no_license
|
pepebonet/bbgMod
|
7db940916cdc2d458a4f6782da8221c05ee9c572
|
e86fc4f2c00aba3972622331142baf4d677aacd6
|
refs/heads/master
| 2022-06-12T04:32:06.101946
| 2020-05-05T07:18:15
| 2020-05-05T07:18:15
| 252,757,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 873
|
py
|
from os import path
from setuptools import setup, find_packages
VERSION = "0.1"
DESCRIPTION = "Deeplearning tool"
directory = path.dirname(path.abspath(__file__))
# Get requirements from the requirements.txt file
with open(path.join(directory, 'requirements.txt')) as f:
required = f.read().splitlines()
# Get the long description from the README file
with open(path.join(directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='bbgMod',
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
url="",
author="Miguel Grau & Jose Bonet",
author_email="-",
packages=find_packages(),
install_requires=required,
entry_points={
'console_scripts': [
'bbgMod = bbgmod.bbgMod:cli',
]
}
)
|
[
"jose.bonet@irbbarcelona.org"
] |
jose.bonet@irbbarcelona.org
|
a49f3d3e0d40c24c70d0cb23ce2a51ac5fd2af44
|
f22d4319e6f848202fe847f9190b78ceaae8ed12
|
/envExemplo/Lista09/Lista09ExPOO12.py
|
7a98654d33705d72f5c682217478f62f45d25783
|
[] |
no_license
|
AlexandreLouzada/Pyquest
|
7ecc0a3e3002df169bd53ae99e66c54019782698
|
29f0e67e5902fad0fc336ece63c9e5d3868d6b09
|
refs/heads/master
| 2023-08-30T22:54:46.438567
| 2023-08-21T19:04:39
| 2023-08-21T19:04:39
| 248,549,732
| 10
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,538
|
py
|
class Produto:
def __init__(self, codigo, nome, quantidade):
self.codigo = codigo
self.nome = nome
self.quantidade = quantidade
def mostrar_detalhes(self):
print("Detalhes do Produto:")
print(f"Código: {self.codigo}")
print(f"Nome: {self.nome}")
print(f"Quantidade: {self.quantidade}")
def adicionar_estoque(self, quantidade):
self.quantidade += quantidade
def __str__(self):
return f"Código: {self.codigo}\nNome: {self.nome}\nQuantidade: {self.quantidade}"
def __eq__(self, other):
if isinstance(other, Produto):
return self.codigo == other.codigo
return False
def __lt__(self, other):
if isinstance(other, Produto):
return self.quantidade < other.quantidade
raise TypeError("Incompatível com a comparação")
def __len__(self):
return self.quantidade
# Exemplo de uso dos métodos e atributos da classe Produto
produto1 = Produto(1, "Camiseta", 10)
produto2 = Produto(2, "Calça", 5)
produto1.mostrar_detalhes() # Exibe os detalhes do produto1
produto2.mostrar_detalhes() # Exibe os detalhes do produto2
print(len(produto1)) # Retorna a quantidade do produto1
produto1.adicionar_estoque(5) # Adiciona 5 unidades ao estoque do produto1
print(produto1) # Exibe a representação em string do produto1
print(produto1 == produto2) # Compara se o produto1 é igual ao produto2
print(produto1 < produto2) # Compara se o produto1 é menor que o produto2
|
[
"professorlouzada@gmail.com"
] |
professorlouzada@gmail.com
|
05a4111cad8b7254f2fa80e1235f86b814ba4854
|
02ec51c28d0c0e17a0621646ee571f72202e5c41
|
/deepseg/dataset.py
|
4a85a993bd2634342b2573cd634a23db65c27df2
|
[
"Apache-2.0"
] |
permissive
|
luozhouyang/deepseg
|
4934bcef01ba6bd8debcd89a8f1fdc3f7ef8a788
|
3759af585f32fb5d543ddd5af71c47f16d7e73d2
|
refs/heads/master
| 2023-09-06T05:46:48.495346
| 2021-01-16T13:02:58
| 2021-01-16T13:02:58
| 155,837,694
| 28
| 8
|
Apache-2.0
| 2023-03-25T00:52:11
| 2018-11-02T08:40:19
|
Python
|
UTF-8
|
Python
| false
| false
| 6,528
|
py
|
import logging
import os
import re
import tensorflow as tf
def read_vocab_file(vocab_file):
words = []
with open(vocab_file, mode='rt', encoding='utf8') as fin:
for line in fin:
line = line.rstrip('\n').strip()
if not line:
continue
words.append(line)
vocab = {}
for i, v in enumerate(words):
vocab[v] = i
return vocab
def build_tags(words):
tokens, tags = [], []
for word in words:
if len(word) == 1:
tokens.append(word)
tags.append('O')
continue
for i, c in enumerate(word):
if i == 0:
tokens.append(c)
tags.append('B')
else:
tokens.append(c)
tags.append('I')
return tokens, tags
def read_files(input_files, callback=None):
if isinstance(input_files, str):
input_files = [input_files]
for f in input_files:
if not os.path.exists(f):
logging.warning('File %s does not exist.', f)
continue
with open(f, mode='rt', encoding='utf8') as fin:
for line in fin:
line = line.rstrip('\n')
if not line:
continue
if callback:
callback(line)
logging.info('Read file %s finished.', f)
logging.info('Read all files finished.')
def read_train_files(input_files, sep=' '):
features, labels = [], []
def collect_fn(line):
tokens, tags = build_tags(re.split(sep, line))
if len(tokens) != len(tags):
return
features.append(tokens)
labels.append(tags)
read_files(input_files, callback=collect_fn)
return features, labels
def read_predict_files(input_files):
features = []
def collect_fn(line):
tokens = [w.strip() for w in line if w.strip()]
features.append(tokens)
read_files(input_files, callback=collect_fn)
return features
class LabelMapper:
def __init__(self):
self.label2id = {
'O': 0,
'B': 1,
'I': 2
}
self.id2label = {v: k for k, v in self.label2id.items()}
def encode(self, labels):
ids = [self.label2id.get(label, 0) for label in labels]
return ids
def decode(self, ids):
labels = [self.id2label.get(_id, 'O') for _id in ids]
return labels
class TokenMapper:
def __init__(self, vocab_file, unk_token='[UNK]', pad_token='[PAD]'):
self.token2id = read_vocab_file(vocab_file)
self.id2token = {v: k for k, v in self.token2id.items()}
assert len(self.token2id) == len(self.id2token)
self.unk_token = unk_token
self.unk_id = self.token2id[self.unk_token]
self.pad_token = pad_token
self.pad_id = self.token2id[self.pad_token]
def encode(self, tokens):
ids = [self.token2id.get(token, self.unk_id) for token in tokens]
return ids
def decode(self, ids):
tokens = [self.id2token.get(_id, self.unk_token) for _id in ids]
return tokens
class DatasetBuilder:
def __init__(self, token_mapper, label_mapper, **kwargs):
self.token_mapper = token_mapper
self.label_mapper = label_mapper
self.feature_pad_id = self.token_mapper.pad_id
self.label_pad_id = self.label_mapper.label2id['O']
def build_train_dataset(
self,
input_files,
batch_size=32,
buffer_size=1000000,
sequence_maxlen=None,
bucket_boundaries=[50, 100, 150, 200, 250, 300, 350, 400, 450, 500],
repeat=1,
**kwargs):
features, labels = read_train_files(input_files, sep=kwargs.get('sep', ' '))
features = [self.token_mapper.encode(x) for x in features]
labels = [self.label_mapper.encode(x) for x in labels]
features = tf.ragged.constant(features, dtype=tf.int32)
labels = tf.ragged.constant(labels, dtype=tf.int32)
x_dataset = tf.data.Dataset.from_tensor_slices(features)
# convert ragged tensor to tensor
x_dataset = x_dataset.map(lambda x: x)
y_dataset = tf.data.Dataset.from_tensor_slices(labels)
y_dataset = y_dataset.map(lambda y: y)
dataset = tf.data.Dataset.zip((x_dataset, y_dataset))
if sequence_maxlen is not None and sequence_maxlen > 0:
dataset = dataset.filter(lambda x, y: tf.size(x) < sequence_maxlen)
dataset = dataset.repeat(repeat)
dataset = dataset.shuffle(buffer_size=buffer_size, reshuffle_each_iteration=True)
bucket_batch_sizes = [batch_size] * (len(bucket_boundaries) + 1)
dataset = dataset.apply(tf.data.experimental.bucket_by_sequence_length(
element_length_func=lambda x, _: tf.size(x[0]),
bucket_boundaries=bucket_boundaries,
bucket_batch_sizes=bucket_batch_sizes,
padded_shapes=([None], [None]),
padding_values=(self.token_mapper.pad_id, self.label_mapper.label2id['O'])
))
# dataset = dataset.padded_batch(
# batch_size=batch_size,
# padded_shapes=([None], [None]),
# padding_values=(self.token_mapper.pad_id, self.label_mapper.label2id['O'])
# )
return dataset
def build_valid_dataset(
self,
input_files,
batch_size=32,
buffer_size=100000,
sequence_maxlen=None,
bucket_boundaries=[50, 100, 150, 200, 250, 300, 350, 400, 450, 500],
repeat=1,
**kwargs):
return self.build_train_dataset(
input_files,
batch_size,
buffer_size,
sequence_maxlen=sequence_maxlen,
bucket_boundaries=bucket_boundaries,
repeat=repeat,
**kwargs)
def build_predict_dataset(self, input_files, batch_size, **kwargs):
features = read_predict_files(input_files)
features = [self.token_mapper.encode(x) for x in features]
features = tf.ragged.constant(features, dtype=tf.int32)
dataset = tf.data.Dataset.from_tensor_slices(features)
dataset = dataset.map(lambda x: x)
dataset = dataset.padded_batch(
batch_size=batch_size,
padded_shapes=[None],
padding_values=self.token_mapper.pad_id
)
# dataset = dataset.map(lambda x: (x, None))
return dataset
|
[
"zhouyang.luo@gmail.com"
] |
zhouyang.luo@gmail.com
|
205adc6a7e58f4313d71b4cfc42c1bba51e03ec8
|
d85a04ad00942081027e56ab69a7734d27bcc6d8
|
/simple-cnn/util/data_processor.py
|
e49e7043fa7e0245f4cd59bc805dfe6e29f35988
|
[] |
no_license
|
Nash2325138/DL_contest2_codes_viewing
|
ecc9f1a1ed97c70acf27f379c9b801da7069aef0
|
82ca637a97726e18ab902c1eaee18cd42fba6ed0
|
refs/heads/master
| 2021-08-22T06:37:50.613075
| 2017-11-29T14:58:02
| 2017-11-29T14:58:02
| 112,488,710
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 903
|
py
|
#!/usr/bin/env python3
import numpy as np
import pickle as pkl
import tensorflow as tf
from PIL import Image
from train.params import *
from util.bbox_transform import *
def preprocess_train_data(df):
width = par_img_width()
height = par_img_height()
boxes_resize = df['boxes'].copy()
for img in range(len(boxes_resize)):
image = Image.open('/home/Public/JPEGImages/' + df['image_name'][img])
w = image.size[0]
h = image.size[1]
boxes = boxes_resize[img]
boxes[:, [0, 2]] = boxes[:, [0, 2]] * (width / w)
boxes[:, [1, 3]] = boxes[:, [1, 3]] * (height / h)
boxes_resize[img] = np.array(
[df['gt_classes'][img][0]] + bbox_transform(
np.array([0, 0, width - 1, height - 1]),
boxes[0]).tolist())
new_df = df.copy()
new_df['one_gt'] = boxes_resize
return new_df
|
[
"j2325138@gmail.com"
] |
j2325138@gmail.com
|
013d119c7e6c94b89935d95ed0ebb30cce151760
|
b6fc54cff7037f5e4ef26cb4a645d5ea5a6fecdf
|
/000562HeadFirstPy/000562_01_01_p079_Range_20200225.py
|
76af97dd475972c7f62f1a8d29ef28ecd5ded55e
|
[
"Apache-2.0"
] |
permissive
|
SafonovMikhail/python_000577
|
5483eaf2f7c73bc619ce1f5de67d8d689d2e7dd4
|
f2dccac82a37df430c4eb7425b5d084d83520409
|
refs/heads/master
| 2022-12-08T10:53:57.202746
| 2022-12-07T09:09:51
| 2022-12-07T09:09:51
| 204,713,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
# range
print('range(5):\t\t\t\t', range(5))
print('list(range(0, 5)):\t\t', list(range(0, 5)))
print('list(range(5, 10)):\t\t', list(range(5, 10)))
print('list(range(0, 10, 2)):\t', list(range(0, 10, 2)))
print('list(range(10, 0, -2)):\t', list(range(10, 0, -2)))
|
[
"ms33@inbox.ru"
] |
ms33@inbox.ru
|
ac2b7e9aab22ce7a5f8cf75c707dfeacedc7298e
|
a538f10ff195c1708fb60a87198eeb939129c7fa
|
/sos_app/login.py
|
6e15303b7d4c69e6d47371846386c76d380715c1
|
[] |
no_license
|
sharmaprateek/sos
|
e2d5124b1c4ece72a6fdff1ab3d4c98a59a008b9
|
b1e697a7ae6f1bc5350d6bab0ec4a636ea76a0ad
|
refs/heads/master
| 2021-01-10T19:05:48.045019
| 2014-12-14T20:44:41
| 2014-12-14T20:44:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,822
|
py
|
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext, loader
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
@csrf_exempt
def user_login(request):
post = request.POST
if request.user:
logout(request)
print request
if post == {}:
template = loader.get_template('sos_app/login.html')
context = RequestContext(request, {})
return HttpResponse(template.render(context))
else:
username = post['username']
password = post['password']
# do this way for now
# update_database(None)
user = authenticate(username=username, password=password)
if user:
if user.is_active:
# If the account is valid and active, we can log the user in.
# We'll send the user back to the homepage.
login(request, user)
template = loader.get_template('sos_app/login.html')
context = RequestContext(request, {})
return HttpResponse(template.render(context))
else:
# An inactive account was used - no logging in!
return HttpResponse("Your account is disabled.")
else:
# Bad login details were provided. So we can't log the user in.
print "Invalid login details: {0}, {1}".format(username, password)
return HttpResponse("Invalid login details supplied.")
@login_required
def user_logout(request):
logout(request)
nextURL = "/clusters/"
if ('next' in request.GET):
nextURL = request.GET['next']
return HttpResponseRedirect(nextURL)
|
[
"prateek.sharma@delphix.com"
] |
prateek.sharma@delphix.com
|
e54ed7f655645acc151f4ce37ea3e07d86ced540
|
434e97ced79dd752239512b1f7eb58f400e09212
|
/1. Single Linked List/1. Simple Linked List.py
|
650090aa9e668a738513c97ddcb7bc43c36dcf1a
|
[] |
no_license
|
bipulhstu/Data-Structures-and-Algorithms-Through-Python-in-Depth
|
22aa2de2f3b68ebf4a640d99adf9e5fa47c3a988
|
f53d9053b045a496536259f29b696f9b331e55d2
|
refs/heads/master
| 2020-04-05T20:00:35.765508
| 2018-11-12T05:29:20
| 2018-11-12T05:29:20
| 157,160,846
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
class Node:
def __init__(self,data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
if __name__ == "__main__":
llist = LinkedList()
llist.head = Node(6)
second = Node(3)
third = Node(4)
#linked
llist.head.next = second
second.next = third
|
[
"bipulhstu@gmail.com"
] |
bipulhstu@gmail.com
|
c0563edd5ffa2d7bea4dd77f4ffab00fc1caba0f
|
b35d346d5e2d379453805a32e56efb734fb3a783
|
/hf/3hf/10rejtettuzi.py
|
4f9c6ed2caaa7bd4684b55182f0958e49f2b6cdc
|
[] |
no_license
|
firstvan/Python
|
19e6e3063cb14065e86f2e58315fb46cd47053f6
|
a9495af9bb00f280b4ddb3a9a0dd21de4d7c83d4
|
refs/heads/master
| 2020-04-08T07:34:47.117847
| 2014-12-01T16:51:33
| 2014-12-01T16:51:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
#!/usr/bin/env python2
import sys
szoveg ='''Cbcq Dgyk!
Dmeybh kce cew yrwyg hmrylyaqmr:
rylsjb kce y Nwrfml npmepykmxyqg lwcjtcr!
Aqmimjjyi:
Ynyb'''
def main():
szoveg1=''
for ch in szoveg:
a = chr(ord(ch) + 2)
if a == '{':
szoveg1 += 'a'
elif a =='[':
szoveg1 += 'A'
elif a =='<':
szoveg1 += ':'
elif ch =='!' or ch == ' ' or ch == '\n':
szoveg1 += ch
else:
szoveg1 += a
print szoveg1
if __name__ == "__main__":
main();
|
[
"firstvan@live.com"
] |
firstvan@live.com
|
e272f1bdf83a3217db0d3d28ac63b47e6ed78a94
|
33363e4ceb2088bcc2ce3e70349e5024e3229f58
|
/python/cugraph/centrality/katz_centrality.py
|
d5ed93622382c55d44b3af2ae27eb9a15f15d1c3
|
[
"Apache-2.0"
] |
permissive
|
hieuqtran/cugraph
|
b8c1e9a39a8627db330f05061a5b655f8546579f
|
ed867e5db69639ceaf09fffc3aea1027ff09bfe5
|
refs/heads/master
| 2020-09-02T00:51:46.420739
| 2019-10-17T17:56:36
| 2019-10-17T17:56:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,831
|
py
|
# Copyright (c) 2019, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.centrality import katz_centrality_wrapper
def katz_centrality(G,
alpha=0.1,
max_iter=100,
tol=1.0e-6,
nstart=None,
normalized=True):
"""
Compute the Katz centrality for the nodes of the graph G. cuGraph does not
currently support the 'beta' and 'weight' parameters as seen in the
corresponding networkX call. This implementation is based on a relaxed
version of Katz defined by Foster with a reduced computational complexity
of O(n+m)
Foster, K.C., Muth, S.Q., Potterat, J.J. et al.
Computational & Mathematical Organization Theory (2001) 7: 275.
https://doi.org/10.1023/A:1013470632383
Parameters
----------
G : cuGraph.Graph
cuGraph graph descriptor with connectivity information. The graph can
contain either directed or undirected edges where undirected edges are
represented as directed edges in both directions.
alpha : float
Attenuation factor with a default value of 0.1. If alpha is not less
than 1/(lambda_max) where lambda_max is the maximum degree
GDF_CUDA_ERROR is returned
max_iter : int
The maximum number of iterations before an answer is returned. This can
be used to limit the execution time and do an early exit before the
solver reaches the convergence tolerance.
If this value is lower or equal to 0 cuGraph will use the default
value, which is 100.
tolerance : float
Set the tolerance the approximation, this parameter should be a small
magnitude value.
The lower the tolerance the better the approximation. If this value is
0.0f, cuGraph will use the default value which is 1.0e-6.
Setting too small a tolerance can lead to non-convergence due to
numerical roundoff. Usually values between 1e-2 and 1e-6 are
acceptable.
nstart : cudf.Dataframe
GPU Dataframe containing the initial guess for katz centrality.
nstart['vertex'] : cudf.Series
Contains the vertex identifiers
nstart['values'] : cudf.Series
Contains the katz centrality values of vertices
normalized : bool
If True normalize the resulting katz centrality values
Returns
-------
df : cudf.DataFrame
GPU data frame containing two cudf.Series of size V: the vertex
identifiers and the corresponding katz centrality values.
df['vertex'] : cudf.Series
Contains the vertex identifiers
df['katz_centrality'] : cudf.Series
Contains the katz centrality of vertices
Examples
--------
>>> M = cudf.read_csv('datasets/karate.csv', delimiter=' ',
>>> dtype=['int32', 'int32', 'float32'], header=None)
>>> sources = cudf.Series(M['0'])
>>> destinations = cudf.Series(M['1'])
>>> G = cugraph.Graph()
>>> G.add_edge_list(sources, destinations, None)
>>> kc = cugraph.katz_centrality(G)
"""
df = katz_centrality_wrapper.katz_centrality(G.graph_ptr, alpha, max_iter,
tol, nstart, normalized)
return df
|
[
"kaatish@nvidia.com"
] |
kaatish@nvidia.com
|
f83ef861fff3787e8ddea902f0faa23e23325946
|
c426f269e8f7598d78b0a3bcc5629bfe447d12f6
|
/PythonProjects/Python3EssentialTraining/10_InheritanceAndPolymorphism.py
|
26d1a776323f3c76f703c6d3289aa4356d761e02
|
[] |
no_license
|
DmitryVakhrushev/Python
|
8d05d083f63822622f43ea5d873b98ef4e8cfd15
|
9dd2f37bcdce25a5cc0146adb4513ed2e539b650
|
refs/heads/master
| 2022-11-13T18:39:44.131820
| 2020-06-28T02:29:17
| 2020-06-28T02:29:17
| 243,847,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,815
|
py
|
#-------------------------------------------------------------------
# Lesson 10: Greater reusability with inheritance and polymorphism
#-------------------------------------------------------------------
# Each class (Duck, Person, Dog) inherites "AnimalActions" class
# Class AnimalActions has 4 methods
# Other classes have dictionaries with different strings
# Each method of AnimalAction class becomes avaliable for each other class
class AnimalActions:
def quack(self): return self.strings['quack']
def feathers(self): return self.strings['feathers']
def bark(self): return self.strings['bark']
def fur(self): return self.strings['fur']
class Duck(AnimalActions):
strings = dict(
quack = "Quaaaaak!",
feathers = "The duck has grey and white feathers.",
bark = "The duck cannot bark.",
fur = "The duck has no fur."
)
class Person(AnimalActions):
strings = dict(
quack = "The person imitates a duck.",
feathers = "The person takes a feather from the ground and shows it.",
bark = "The person says woof!",
fur = "The person puts on a fur coat."
)
class Dog(AnimalActions):
strings = dict(
quack = "The dog cannot quack.",
feathers = "The dog has no feathers.",
bark = "Arf!",
fur = "The dog has white fur with black spots."
)
def in_the_doghouse(dog):
print(dog.bark())
print(dog.fur())
def in_the_forest(duck):
print(duck.quack())
print(duck.feathers())
def main():
donad = Duck()
john = Person()
fido = Dog()
print("- In the forest:")
for o in (donad, john, fido):
in_the_doghouse(o)
print("- In the doghouse:")
for o in (donad, john, fido):
in_the_doghouse(o)
if __name__ == "__main__":
main()
|
[
"dm.vakhrushev@gmail.com"
] |
dm.vakhrushev@gmail.com
|
f8906983156698e016b2c1b42eaacae61431e5bd
|
dea470247c097fc7e3af3e6921365a47c16f3b2e
|
/others/SSIM/configs/_base_/datasets/coco_detection.py
|
78d1f516957855ad1e3b51dab1db02e3e2744406
|
[
"Apache-2.0"
] |
permissive
|
gist-ailab/mmdetection
|
e4cdf47a7d58fd24ba5657e63b998ea400d7acbb
|
ba73ebe80c1af2517dc27ae04b7ac549a6b859ca
|
refs/heads/master
| 2023-07-26T04:05:58.330144
| 2023-07-18T04:18:36
| 2023-07-18T04:18:36
| 583,538,372
| 0
| 1
|
Apache-2.0
| 2023-04-11T18:42:27
| 2022-12-30T04:35:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,774
|
py
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = '/SSDb/sung/dataset/coco/'
# data_root = '/home/jovyan/sung/dataset/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
|
[
"hogili89@gist.ac.kr"
] |
hogili89@gist.ac.kr
|
c9a5215f2c1b60e591fdd67e7e6bf37c9b83b335
|
4f0536090506380b7a6f73c810be453ebc19d8d6
|
/arena/rollout_worker.py
|
9ac8487e6ed692c28d4bfb3a1a2dc5fc461b4f1d
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
YuhangSong/Arena-Baselines
|
9c9ac3f077210796bec889f71f7a5f66b5873726
|
e3e1b2cf0714ff8d35d3297932a4ad483b7e384e
|
refs/heads/master
| 2021-06-28T06:08:21.559251
| 2021-04-05T21:45:52
| 2021-04-05T21:45:52
| 178,899,988
| 36
| 8
|
Apache-2.0
| 2019-08-15T02:21:19
| 2019-04-01T16:08:45
|
ASP
|
UTF-8
|
Python
| false
| false
| 16,894
|
py
|
from ray.rllib.evaluation.rollout_worker import *
from ray.rllib.evaluation.rollout_worker import _validate_env, _validate_and_canonicalize, _has_tensorflow_graph
from gym import wrappers
class ArenaRolloutWorker(RolloutWorker):
"""arena-spec, support monitor for MultiAgentEnv
"""
@DeveloperAPI
def __init__(self,
env_creator,
policy,
policy_mapping_fn=None,
policies_to_train=None,
tf_session_creator=None,
batch_steps=100,
batch_mode="truncate_episodes",
episode_horizon=None,
preprocessor_pref="deepmind",
sample_async=False,
compress_observations=False,
num_envs=1,
observation_filter="NoFilter",
clip_rewards=None,
clip_actions=True,
env_config=None,
model_config=None,
policy_config=None,
worker_index=0,
monitor_path=None,
log_dir=None,
log_level=None,
callbacks=None,
input_creator=lambda ioctx: ioctx.default_sampler_input(),
input_evaluation=frozenset([]),
output_creator=lambda ioctx: NoopOutput(),
remote_worker_envs=False,
remote_env_batch_wait_ms=0,
soft_horizon=False,
no_done_at_end=False,
seed=None,
_fake_sampler=False):
"""Initialize a rollout worker.
Arguments:
env_creator (func): Function that returns a gym.Env given an
EnvContext wrapped configuration.
policy (class|dict): Either a class implementing
Policy, or a dictionary of policy id strings to
(Policy, obs_space, action_space, config) tuples. If a
dict is specified, then we are in multi-agent mode and a
policy_mapping_fn should also be set.
policy_mapping_fn (func): A function that maps agent ids to
policy ids in multi-agent mode. This function will be called
each time a new agent appears in an episode, to bind that agent
to a policy for the duration of the episode.
policies_to_train (list): Optional whitelist of policies to train,
or None for all policies.
tf_session_creator (func): A function that returns a TF session.
This is optional and only useful with TFPolicy.
batch_steps (int): The target number of env transitions to include
in each sample batch returned from this worker.
batch_mode (str): One of the following batch modes:
"truncate_episodes": Each call to sample() will return a batch
of at most `batch_steps * num_envs` in size. The batch will
be exactly `batch_steps * num_envs` in size if
postprocessing does not change batch sizes. Episodes may be
truncated in order to meet this size requirement.
"complete_episodes": Each call to sample() will return a batch
of at least `batch_steps * num_envs` in size. Episodes will
not be truncated, but multiple episodes may be packed
within one batch to meet the batch size. Note that when
`num_envs > 1`, episode steps will be buffered until the
episode completes, and hence batches may contain
significant amounts of off-policy data.
episode_horizon (int): Whether to stop episodes at this horizon.
preprocessor_pref (str): Whether to prefer RLlib preprocessors
("rllib") or deepmind ("deepmind") when applicable.
sample_async (bool): Whether to compute samples asynchronously in
the background, which improves throughput but can cause samples
to be slightly off-policy.
compress_observations (bool): If true, compress the observations.
They can be decompressed with rllib/utils/compression.
num_envs (int): If more than one, will create multiple envs
and vectorize the computation of actions. This has no effect if
if the env already implements VectorEnv.
observation_filter (str): Name of observation filter to use.
clip_rewards (bool): Whether to clip rewards to [-1, 1] prior to
experience postprocessing. Setting to None means clip for Atari
only.
clip_actions (bool): Whether to clip action values to the range
specified by the policy action space.
env_config (dict): Config to pass to the env creator.
model_config (dict): Config to use when creating the policy model.
policy_config (dict): Config to pass to the policy. In the
multi-agent case, this config will be merged with the
per-policy configs specified by `policy`.
worker_index (int): For remote workers, this should be set to a
non-zero and unique value. This index is passed to created envs
through EnvContext so that envs can be configured per worker.
monitor_path (str): Write out episode stats and videos to this
directory if specified.
log_dir (str): Directory where logs can be placed.
log_level (str): Set the root log level on creation.
callbacks (dict): Dict of custom debug callbacks.
input_creator (func): Function that returns an InputReader object
for loading previous generated experiences.
input_evaluation (list): How to evaluate the policy performance.
This only makes sense to set when the input is reading offline
data. The possible values include:
- "is": the step-wise importance sampling estimator.
- "wis": the weighted step-wise is estimator.
- "simulation": run the environment in the background, but
use this data for evaluation only and never for learning.
output_creator (func): Function that returns an OutputWriter object
for saving generated experiences.
remote_worker_envs (bool): If using num_envs > 1, whether to create
those new envs in remote processes instead of in the current
process. This adds overheads, but can make sense if your envs
remote_env_batch_wait_ms (float): Timeout that remote workers
are waiting when polling environments. 0 (continue when at
least one env is ready) is a reasonable default, but optimal
value could be obtained by measuring your environment
step / reset and model inference perf.
soft_horizon (bool): Calculate rewards but don't reset the
environment when the horizon is hit.
no_done_at_end (bool): Ignore the done=True at the end of the
episode and instead record done=False.
seed (int): Set the seed of both np and tf to this value to
to ensure each remote worker has unique exploration behavior.
_fake_sampler (bool): Use a fake (inf speed) sampler for testing.
"""
global _global_worker
_global_worker = self
policy_config = policy_config or {}
if (tf and policy_config.get("eager")
and not policy_config.get("no_eager_on_workers")):
tf.enable_eager_execution()
if log_level:
logging.getLogger("ray.rllib").setLevel(log_level)
if worker_index > 1:
disable_log_once_globally() # only need 1 worker to log
elif log_level == "DEBUG":
enable_periodic_logging()
env_context = EnvContext(env_config or {}, worker_index)
self.policy_config = policy_config
self.callbacks = callbacks or {}
self.worker_index = worker_index
model_config = model_config or {}
policy_mapping_fn = (policy_mapping_fn
or (lambda agent_id: DEFAULT_POLICY_ID))
if not callable(policy_mapping_fn):
raise ValueError(
"Policy mapping function not callable. If you're using Tune, "
"make sure to escape the function with tune.function() "
"to prevent it from being evaluated as an expression.")
self.env_creator = env_creator
self.sample_batch_size = batch_steps * num_envs
self.batch_mode = batch_mode
self.compress_observations = compress_observations
self.preprocessing_enabled = True
self.last_batch = None
self._fake_sampler = _fake_sampler
# arena-spec
self.env = _validate_env(env_creator(env_context).unwrapped)
# arena-spec
if isinstance(self.env, MultiAgentEnv) or \
isinstance(self.env, BaseEnv):
# if isinstance(self.env, BaseEnv):
def wrap(env):
return env # we can't auto-wrap these env types
elif is_atari(self.env) and \
not model_config.get("custom_preprocessor") and \
preprocessor_pref == "deepmind":
# Deepmind wrappers already handle all preprocessing
self.preprocessing_enabled = False
if clip_rewards is None:
clip_rewards = True
def wrap(env):
env = wrap_deepmind(
env,
dim=model_config.get("dim"),
framestack=model_config.get("framestack"))
if monitor_path:
env = gym.wrappers.Monitor(env, monitor_path, resume=True)
return env
else:
def wrap(env):
if monitor_path:
env = gym.wrappers.Monitor(env, monitor_path, resume=True)
return env
self.env = wrap(self.env)
def make_env(vector_index):
return wrap(
env_creator(
env_context.copy_with_overrides(
vector_index=vector_index, remote=remote_worker_envs)))
self.tf_sess = None
# arena-spec
policy_dict = _validate_and_canonicalize(policy, self.env.unwrapped)
self.policies_to_train = policies_to_train or list(policy_dict.keys())
# set numpy and python seed
if seed is not None:
np.random.seed(seed)
random.seed(seed)
if not hasattr(self.env, "seed"):
raise ValueError("Env doesn't support env.seed(): {}".format(
self.env))
self.env.seed(seed)
try:
import torch
torch.manual_seed(seed)
except ImportError:
logger.info("Could not seed torch")
if _has_tensorflow_graph(policy_dict) and not (tf and
tf.executing_eagerly()):
if (ray.is_initialized()
and ray.worker._mode() != ray.worker.LOCAL_MODE
and not ray.get_gpu_ids()):
logger.debug("Creating policy evaluation worker {}".format(
worker_index) +
" on CPU (please ignore any CUDA init errors)")
if not tf:
raise ImportError("Could not import tensorflow")
with tf.Graph().as_default():
if tf_session_creator:
self.tf_sess = tf_session_creator()
else:
self.tf_sess = tf.Session(
config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=True)))
with self.tf_sess.as_default():
# set graph-level seed
if seed is not None:
tf.set_random_seed(seed)
self.policy_map, self.preprocessors = \
self._build_policy_map(policy_dict, policy_config)
else:
self.policy_map, self.preprocessors = self._build_policy_map(
policy_dict, policy_config)
self.multiagent = set(self.policy_map.keys()) != {DEFAULT_POLICY_ID}
# arena-spec
if self.multiagent:
if not ((isinstance(self.env.unwrapped, MultiAgentEnv)
or isinstance(self.env.unwrapped, ExternalMultiAgentEnv))
or isinstance(self.env.unwrapped, BaseEnv)):
raise ValueError(
"Have multiple policies {}, but the env ".format(
self.policy_map) +
"{} is not a subclass of BaseEnv, MultiAgentEnv or "
"ExternalMultiAgentEnv?".format(self.env))
self.filters = {
policy_id: get_filter(observation_filter,
policy.observation_space.shape)
for (policy_id, policy) in self.policy_map.items()
}
if self.worker_index == 0:
logger.info("Built filter map: {}".format(self.filters))
# Always use vector env for consistency even if num_envs = 1
self.async_env = BaseEnv.to_base_env(
self.env,
make_env=make_env,
num_envs=num_envs,
remote_envs=remote_worker_envs,
remote_env_batch_wait_ms=remote_env_batch_wait_ms)
self.num_envs = num_envs
if self.batch_mode == "truncate_episodes":
unroll_length = batch_steps
pack_episodes = True
elif self.batch_mode == "complete_episodes":
unroll_length = float("inf") # never cut episodes
pack_episodes = False # sampler will return 1 episode per poll
else:
raise ValueError("Unsupported batch mode: {}".format(
self.batch_mode))
self.io_context = IOContext(log_dir, policy_config, worker_index, self)
self.reward_estimators = []
for method in input_evaluation:
if method == "simulation":
logger.warning(
"Requested 'simulation' input evaluation method: "
"will discard all sampler outputs and keep only metrics.")
sample_async = True
elif method == "is":
ise = ImportanceSamplingEstimator.create(self.io_context)
self.reward_estimators.append(ise)
elif method == "wis":
wise = WeightedImportanceSamplingEstimator.create(
self.io_context)
self.reward_estimators.append(wise)
else:
raise ValueError(
"Unknown evaluation method: {}".format(method))
if sample_async:
self.sampler = AsyncSampler(
self.async_env,
self.policy_map,
policy_mapping_fn,
self.preprocessors,
self.filters,
clip_rewards,
unroll_length,
self.callbacks,
horizon=episode_horizon,
pack=pack_episodes,
tf_sess=self.tf_sess,
clip_actions=clip_actions,
blackhole_outputs="simulation" in input_evaluation,
soft_horizon=soft_horizon,
no_done_at_end=no_done_at_end)
self.sampler.start()
else:
self.sampler = SyncSampler(
self.async_env,
self.policy_map,
policy_mapping_fn,
self.preprocessors,
self.filters,
clip_rewards,
unroll_length,
self.callbacks,
horizon=episode_horizon,
pack=pack_episodes,
tf_sess=self.tf_sess,
clip_actions=clip_actions,
soft_horizon=soft_horizon,
no_done_at_end=no_done_at_end)
self.input_reader = input_creator(self.io_context)
assert isinstance(self.input_reader, InputReader), self.input_reader
self.output_writer = output_creator(self.io_context)
assert isinstance(self.output_writer, OutputWriter), self.output_writer
logger.debug(
"Created rollout worker with env {} ({}), policies {}".format(
self.async_env, self.env, self.policy_map))
|
[
"yuhangsong.china@gmail.com"
] |
yuhangsong.china@gmail.com
|
95e35d4976bbcfc3afac960c917a0777e0d4dda7
|
2d3c2c7e21acd846960adf0b23e5329fdf84f2cd
|
/math_progam_2.3.py
|
6387a52990cbdda12efa6a96f7dbc6dad56e3368
|
[] |
no_license
|
yeaagreed/Math-Program
|
6021f9042876e56299d6a0a7b78fa0e91ee990cf
|
e8165c0da8c9631284fed8efb84e22af72292b6f
|
refs/heads/main
| 2023-07-27T13:29:24.596394
| 2021-09-14T23:54:59
| 2021-09-14T23:54:59
| 399,730,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,230
|
py
|
##
# math_teacher.py
# 18/08/2021
# SF
def student_menu():
"""Student Module"""
print("""\n\n---------------Welcome to Who Wants to be a Mathionaire!---------------
Who Wants to be a Mathionaire will test your ability at maths
while rewarding you with money. Questions will progressively get harder,
increasing in age level until you get one wrong
As you get more questions correct, the reward will increase,
but if you get one wrong you lose your whole reward.
""")
print("\n##### Lets go, first question #####")
reward = 10
answer = 0
i = 0
grade = 1
while True:
# Steps the grade up every 3 questions
i += 1
if i % 3 == 0:
grade += 1
answer, operation = question_generation(grade, i)
# If the user got it correct
if answer == True:
print("Good job, you got it correct!!")
# Adds random reward on
reward *= random.uniform(1, 3)
print("Your reward is now ${:.0f}".format(reward))
# Checks if user still wants to play
quit_YN = input("\nTo continue play press enter or N to quit: ").upper().strip()
while quit_YN != "" and quit_YN != "N":
quit_YN = input("Please press enter or N" +
"\nTo continue play press enter or N to quit: ").upper().strip()
if quit_YN == "N":
print("Congratulations you won {:.0f}$ in total\n".format(reward))
break
# If user did not get it correct
else:
print("Unlucky that is incorrect.\n" +
"You lost all your money\n\n")
break
return grade, operation
def question_generation(grade, i):
"""Generates questions"""
# Only do all operators if above grade two
if grade > 2:
operations = ["/", "x", "+", "-"]
operation = random.choice(operations)
number_1, number_2 = number_picker(grade)
# Checks if plus or minus operator
answer = plus_or_minus(operation, number_1, number_2)
if operation == "x":
answer = number_1*number_2
# Need to make sure division gives a int answer
elif operation == "/":
while True:
if number_1%number_2 == 0:
answer = number_1/number_2
break
else:
number_1, number_2 = number_picker(grade)
# If not above grade two, only do plus or minus
else:
operations = ["+", "-"]
operation = random.choice(operations)
number_1, number_2 = number_picker(grade)
# Checks if plus or minus operator
answer = plus_or_minus(operation, number_1, number_2)
while True:
try:
user_answer = int(input("\nWhat is {} {} {}: ".format(number_1, operation, number_2)))
break
except:
print("Please enter an whole number")
question_info[i] = [operation, number_1, number_2, answer, user_answer, grade]
# Check if answer is correct
if answer == user_answer:
operation = False
return True, operation
else:
return False, operation
def number_picker(grade):
"""Picks two random numbers"""
number_1 = random.randint(0, grade * 10)
number_2 = random.randint(1, grade * 10)
return number_1, number_2
def plus_or_minus(operation, number_1, number_2):
"Checks if plus or minus operator then executes the operator"
if operation == "+":
answer = number_1 + number_2
return answer
elif operation == "-":
answer = number_1 - number_2
return answer
def teacher_menu(grade, operation):
"""Reads the teachers options"""
while True:
print("""\n#####Teacher Description#####
(G) for grade summary
(T) for students weaknessed
(S) to see questions and answers of last student
(Q) to quit""")
mode = input("Please enter your option: ").lower()
if mode == "g":
grade_summary(grade)
elif mode == "t":
weakness(operation)
elif mode == "s":
question_print()
elif mode == "q":
print("\nExiting to main menu\n")
break
def grade_summary(grade):
"""Gives Teacher a grade summary"""
if grade == None:
print("Math program has not been run yet by a student")
return
print("\nYour student is working comfortably " +
"at grade number {}.".format(grade-1) +
"\nGrade {} is challenging for them".format(grade))
def weakness(operation):
"""Gives the students weak operator"""
if operation == False:
print("We have not identified a weak operator for your students")
return
print("\nYour students are struggling with {}".format(operation))
def question_print():
"""Prints the questions that have been"""
for question in question_info:
print("\nGrade: {}".format(question_info[question][5]) +
"\nWhat is {} {} {}: ".format(question_info[question][1], question_info[question][0], question_info[question][2]) +
"\nStudent Answer: {}".format(question_info[question][4]) +
"\nCorrect answer: {}\n".format(question_info[question][3]))
def main_menu():
grade = None
operation = False
while True:
print("""Welcome to Who wants to be a mathionaire!!"
(P) to play
(T) for teacher menu
(Q) to quit""")
mode = input("Please enter your option: ").lower()
if mode == "p":
grade, operation = student_menu()
elif mode == "t":
teacher_menu(grade, operation)
elif mode == "q":
print("Thank you for playing")
break
else:
print("Please enter either P, T or Q")
if __name__ == "__main__":
import random
question_info = {}
main_menu()
|
[
"noreply@github.com"
] |
yeaagreed.noreply@github.com
|
bc1927651feb362fb11038e4a1b6b12a240f8b33
|
20bd15d181c403f0ba6ea269761937aabf34b3c1
|
/embeds/embed_common.py
|
64ff561a912817ef4f6388cd8c2012c28e5f5d73
|
[] |
no_license
|
XinliYu/PyModels
|
1ee96e5a544bddf6d6e7d54fa82bab195640d901
|
d9b0978f8829e599dee51463c9e437ad20e54c9f
|
refs/heads/master
| 2020-05-22T13:28:18.009886
| 2019-05-21T18:30:29
| 2019-05-21T18:30:29
| 186,359,076
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
def find_analogies(w1, w2, w3):
for w in (w1, w2, w3):
if w not in word2vec:
print("%s not in dictionary" % w)
return
king = word2vec[w1]
man = word2vec[w2]
woman = word2vec[w3]
v0 = king - man + woman
distances = pairwise_distances(v0.reshape(1, D), embedding, metric=metric).reshape(V)
idxs = distances.argsort()[:4]
for idx in idxs:
word = idx2word[idx]
if word not in (w1, w2, w3):
best_word = word
break
print(w1, "-", w2, "=", best_word, "-", w3)
|
[
"xyu350@gatech.edu"
] |
xyu350@gatech.edu
|
cf8ec172f833dec2b1f41077e0893166865d5c4f
|
8004831758776360a421b6cb458b48a120d1586e
|
/chapter_4/slices.py
|
4b85a2707d73238969cf98a296ba46482258709c
|
[] |
no_license
|
scott-gordon72/python_crash_course
|
025d15952d7372c2a40780b7038008f9b39c42d2
|
605f1f7e8d90534bd9cb63f44098d95dec739e50
|
refs/heads/master
| 2022-06-04T19:29:40.142291
| 2020-04-25T01:13:48
| 2020-04-25T01:13:48
| 255,947,783
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
import math
cubes_list = []
for i in range(1, 11):
cubes_list.append(i)
cubes_list_comprehension = [cube ** 3 for cube in cubes_list]
print(cubes_list_comprehension)
print(
f"The first three items in the list of cubes are:{cubes_list_comprehension[:3]}")
cube_middle = int((len(cubes_list_comprehension) / 2) - 1)
print(cube_middle)
print(
f"The items from the middle of the cube list are: {cubes_list_comprehension[cube_middle-1:cube_middle+2]}")
print(f"The last three items in a list are: {cubes_list_comprehension[-3:]}")
|
[
"scott.gordon72@outlook.com"
] |
scott.gordon72@outlook.com
|
5d1b4ee00dd806c681b392173175892dcc7fa2b7
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-vas/huaweicloudsdkvas/v2/model/stop_task_request.py
|
fe99f6e141c1773022da7df217fec7c46e3a8df1
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,639
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class StopTaskRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'service_name': 'str',
'task_id': 'str'
}
attribute_map = {
'service_name': 'service_name',
'task_id': 'task_id'
}
def __init__(self, service_name=None, task_id=None):
"""StopTaskRequest - a model defined in huaweicloud sdk"""
self._service_name = None
self._task_id = None
self.discriminator = None
self.service_name = service_name
self.task_id = task_id
@property
def service_name(self):
"""Gets the service_name of this StopTaskRequest.
服务名称
:return: The service_name of this StopTaskRequest.
:rtype: str
"""
return self._service_name
@service_name.setter
def service_name(self, service_name):
"""Sets the service_name of this StopTaskRequest.
服务名称
:param service_name: The service_name of this StopTaskRequest.
:type: str
"""
self._service_name = service_name
@property
def task_id(self):
"""Gets the task_id of this StopTaskRequest.
指定的服务作业ID
:return: The task_id of this StopTaskRequest.
:rtype: str
"""
return self._task_id
@task_id.setter
def task_id(self, task_id):
"""Sets the task_id of this StopTaskRequest.
指定的服务作业ID
:param task_id: The task_id of this StopTaskRequest.
:type: str
"""
self._task_id = task_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StopTaskRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
04c1467478361ba2732edb5915c0b5761c73dc42
|
0da798d9578988a40354f97e94dc6d272cedc82f
|
/main.py
|
8d61ed18583e5ae7817db7232da6837e26fb0cf9
|
[] |
no_license
|
dmancy/Euler-1D-Solver
|
1d9d44717b2d5904a46e22c4b208db5ea25b06a2
|
579a1e6cec77b2270ef7607fe5d44e8d9d4895c9
|
refs/heads/master
| 2023-03-13T06:39:39.479136
| 2021-03-08T22:19:42
| 2021-03-08T22:19:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 949
|
py
|
#Python libraries
import matplotlib.pyplot as plt
import numpy as np
import tikzplotlib
from Grid import Grid
from State import State
from Solver import Solver
from Riemann import Riemann
from Euler import Euler
from plot import plot
#Number of cells
N_cells = 100
#Grid step
delta_x = 1
#Number of faces
N_faces = N_cells + 1
#Generation of face positions
faces = np.arange(-N_cells//2, N_cells//2+1, delta_x)
#Grid generation
grid = Grid(faces)
#Specific heat ratio
gamma = 1.4
#Initial condition
U_initial = [State("Left", gamma, 1, 0, 2) if grid.cell_position[i] <= 0 else State("Right", gamma, 1, 0, 1) for i in range(N_cells)]
#Time parameters
t0 = 0
t_final = 25
#Exact solution
Riemann_problem = Riemann(1., 0., 2., 1., 0., 1., 1.4)
Courant_number = 0.7
solution = Euler(U_initial, grid, Courant_number, t0, t_final, "Steger_Warming")
plot(Riemann_problem, solution, grid, t_final, Courant_number)
#Plot Figures
plt.show()
|
[
"mancydamien@gmail.com"
] |
mancydamien@gmail.com
|
50aa2c4e21a87d6584cca67efa345d843c5d8901
|
d31baab88d4f0c652a87d21f872759b75bb636ea
|
/augmentation.py
|
0b1cc07fc5534024a664493de76bd0b131501d5c
|
[] |
no_license
|
ahmetkucuk/flare-prediction
|
6841063f43a600d370839129ce3eb03de9cbfa9f
|
9d16a790658f9a5e17f8a4c7317e918fd9c7502f
|
refs/heads/master
| 2018-12-20T15:29:24.403037
| 2017-04-30T05:12:27
| 2017-04-30T05:12:27
| 86,946,432
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,679
|
py
|
import numpy as np
from collections import deque
NO_AUGMENTATION = -1
STRETCH_AUGMENTATION = 1
SQUEEZE_AUGMENTATION = 2
SHIFT_AUGMENTATION = 3
MIRROR_AUGMENTATION = 4
FLIP_AUGMENTATION = 5
REVERSE_AUGMENTATION = 6
def mirror_augmentation(data, labels):
new_labels = list(labels)
new_data = (np.array(data).astype(np.float32)*-1).tolist()
return new_data, new_labels
def flip_augmentation(data, labels):
new_labels = list(labels)
mid_point = int(len(data)/2)
new_data = data[mid_point:] + data[:mid_point]
return new_data, new_labels
def reverse_augmentation(data, labels):
new_labels = list(labels)
new_data = list(data)
new_data.reverse()
return new_data, new_labels
def stretch_augmentation(data, labels):
new_data = []
new_labels = []
for (single_record, label) in zip(data, labels):
double_record = double_array(single_record)
ts_length = len(double_record)
new_single_record = []
for i in range(ts_length / 2):
new_single_record.append(double_record[i])
new_data.append(new_single_record)
new_labels.append(label)
new_single_record = []
for i in range(ts_length / 2, ts_length, 1):
new_single_record.append(double_record[i])
new_data.append(new_single_record)
new_labels.append(label)
return new_data, new_labels
def double_array(data):
if len(data) == 0:
return data
new_data = []
prev = np.array(data[0], dtype=float)
for i in range(len(data)):
d_array = np.array(data[i], dtype=float)
new_data.append(((d_array + prev) / 2.0).tolist())
new_data.append(data[i])
prev = np.array(data[i], dtype=float)
return new_data
def squeeze_augmentation(data, labels):
new_data = []
new_labels = []
for (single_record, label) in zip(data, labels):
ts_length = len(single_record)
new_single_record = []
for i in range(0, ts_length, 2):
new_single_record.append(single_record[i])
for i in range(1, ts_length, 2):
new_single_record.append(single_record[i])
new_data.append(new_single_record)
new_labels.append(label)
return new_data, new_labels
def shift_2d_list(list2d, rotate=1):
d1_len = len(list2d)
new_list2d = []
for i in range(d1_len):
data = list2d[i]
items = deque(data)
items.rotate(rotate)
new_list2d.append(list(items))
return new_list2d
def t_list(list):
return np.asarray(list).T.tolist()
def shift_augmentation(data, labels, rotate):
new_data = []
new_labels = []
for (single_record, label) in zip(data, labels):
single_record = t_list(single_record)
single_record = shift_2d_list(single_record, rotate=rotate)
single_record = t_list(single_record)
new_data.append(single_record)
new_labels.append(label)
return new_data, new_labels
|
[
"ahmetkucuk92@gmail.com"
] |
ahmetkucuk92@gmail.com
|
d2bace887ae8d6e8f98a445073d1abb6dba3225e
|
27a9b251f4cda1c94f7aafd8a7b3a47be38fcd55
|
/scripts/create_task_database_psych.py
|
8a1be30522ec903f599dd38dc0385e87e5cf951f
|
[] |
no_license
|
openstax/research-wrst
|
f6ccd0e700f965dc2d356584dbeb4a88a4c16ede
|
a984a48e59d9bb2457edef5ac38506ef5836a7da
|
refs/heads/master
| 2023-07-19T23:46:32.477195
| 2021-09-02T19:09:12
| 2021-09-02T19:09:12
| 238,274,285
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,422
|
py
|
import pandas as pd
import itertools
import numpy as np
import re
import sys
sys.path.append('..')
from wrst.app import create_app
from wrst.database import db
from wrst.database.models import Tasks
from wrst.logic.experiment import Experiment
sentences_file = "../textbook_data/book/psychology_14_1_final.csv"
terms_file = "../textbook_data/terms/processed/term_psychology_14_1.csv"
def extract_rex_ch_sec(rex_link):
pattern = "^\d{,2}\-\d{,2}"
tmp = rex_link.split("/")[-1]
chsec = "".join(re.findall(pattern, tmp)).split("-")
# chsec = rex_link.split('/')[-1][0:3].split('-')
chsec = [int(c) for c in chsec]
return chsec
def get_term_list(text, all_terms):
text_lower = text.lower().replace('.', '').replace(',', '').replace('?', '').replace(';', '').replace('(', '').replace(')', '')
words = set(text_lower.split())
term_list = set(all_terms)
compound_terms = set([t for t in term_list if " " in t])
# Extract simple single-word matches as well as full text matches
terms_simple = all_terms & words
terms_compound = set([c for c in compound_terms if c in text_lower])
# Get all the simple terms that are substrings of the compound terms
terms_simple_dup = set(
[t for t in terms_simple if any([t in t2 for t2 in terms_compound])]
)
# Get the "final" term list
final_term_list = list(terms_simple | (terms_compound - terms_simple_dup))
# Finally, check each term to see if it a substring of any other term in the list. If so, kill it with fire!
occ_count = [
(t1, np.sum([t1 in t2 for t2 in final_term_list])) for t1 in final_term_list
]
final_term_list = [t[0] for t in occ_count if t[1] == 1]
return final_term_list
def create_book_dataframe(sentences_file, all_terms):
df_book = pd.read_csv(sentences_file)
# Do term extraction, etc
df_book["terms"] = df_book["sentence"].apply(lambda x: get_term_list(x, all_terms))
df_book["N_terms"] = df_book["terms"].apply(lambda x: len(x))
df_book_tmp = df_book.copy()
df_book = df_book[df_book["N_terms"] >= 2]
df_book["sentence_id"] = list(range(0, len(df_book)))
return df_book, df_book_tmp
def get_base_term(term, df_terms):
# Find the term in the dataframe, return it's base pair
term = term.lower()
dft = df_terms[df_terms["term"]==term]
return dft["base_term"].iloc[0]
app = create_app()
app.app_context().push()
# Read out the sentences and term files
# Weave everything together to create the task db table, which contains each possible exercise for participants
# First, purge whatever is in the Task table currently
db.session.query(Tasks).delete()
db.session.commit()
# Get the terms dataframe and the set of all terms
df_terms = pd.read_csv(
terms_file
)
df_terms["term"] = df_terms["term"].apply(lambda x: x.lower())
all_terms = set(df_terms["term"].unique().tolist())
print("I have foune {} total terms".format(df_terms.shape[0]))
# Get the book dataframe, filtered down to sentences allowed in the current experiment
df_book, df_book_full = create_book_dataframe(sentences_file, all_terms)
# Now go through each sentence and assemble a task for each pairwise combination of terms found therein
task_count = 0
task_list = []
for ii in range(0, df_book.shape[0]):
sentence = df_book.iloc[ii]
terms = sentence["terms"]
for term_combination in itertools.combinations(terms, 2):
task = Tasks(
task_id=task_count,
paragraph_id=sentence["paragraph_idx"],
sentence_id=sentence["sentence_id"],
sentence=sentence["sentence"],
term_1=term_combination[0],
term_2=term_combination[1],
type_1=df_terms[df_terms["term"]==term_combination[0]].iloc[0]["type"],
type_2=df_terms[df_terms["term"] == term_combination[1]].iloc[0]["type"],
base_term_1=get_base_term(term_combination[0], df_terms),
base_term_2=get_base_term(term_combination[1], df_terms)
)
task_list.append(task)
task_count += 1
db.session.bulk_save_objects(task_list)
db.session.commit()
# print("Finished doing Ch. {} Sec. {}".format(chapter, section))
print("I found {} valid sentences having at least two terms".format(df_book.shape[0]))
print("I found {} total tasks that can be completed".format(task_count))
print("I wrote {} tasks to the db".format(len(task_list)))
|
[
"andrew.e.waters@gmail.com"
] |
andrew.e.waters@gmail.com
|
ef36e1c1a2b4b5b64e1e2ba1f028e6f8b4fe565b
|
275135c409644b46a551ded2165649145f95a032
|
/BotosanLayer.py
|
2fde92550b06df6a68d18e1534fe5ebbf77f072f
|
[
"MIT"
] |
permissive
|
gokomo/BotoSan
|
1094dda9eac70e191767bd1ce665b506170c068e
|
8869f8016b9ab07d4cbaed1170ae12c329dcdb30
|
refs/heads/master
| 2021-06-21T06:48:00.887990
| 2017-08-14T21:40:18
| 2017-08-14T21:40:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,669
|
py
|
# coding=utf-8
import random
import time
import logging
import RegexMatcher
from DateHelper import DateHelper
from yowsup.layers.interface import YowInterfaceLayer, ProtocolEntityCallback
from yowsup.layers.protocol_messages.protocolentities import TextMessageProtocolEntity
from yowsup.layers.protocol_presence.protocolentities import *
from yowsup.layers.protocol_chatstate.protocolentities import *
from yowsup.common.tools import Jid
from yowsup.layers.protocol_media.protocolentities import *
class BotosanLayer(YowInterfaceLayer):
def __init__(self):
super(BotosanLayer, self).__init__()
self.regex = RegexMatcher.RegexMatcher()
self.logger = logging.getLogger("botosan.logger")
@ProtocolEntityCallback("message")
def onMessage(self, messageProtocolEntity):
# Ack immediately then take a while to respond
self.toLower(messageProtocolEntity.ack())
self.toLower(messageProtocolEntity.ack(True))
if DateHelper.determineIfBotosanShouldRespond(messageProtocolEntity.getTimestamp()):
self.sleepBotosan()
if messageProtocolEntity.getType() == 'text':
if messageProtocolEntity.isGroupMessage():
self.onGroupMessage(messageProtocolEntity)
else:
self.onTextMessage(messageProtocolEntity)
elif messageProtocolEntity.getType() == 'media':
self.onMediaMessage(messageProtocolEntity)
if messageProtocolEntity.getType() == 'text' and self.regex.doesMatchAPattern(messageProtocolEntity):
message_to_send = self.regex.matchPatterns(messageProtocolEntity)
if message_to_send is not None:
self.simulateBotosanPreparingAnswer(messageProtocolEntity, messageToSend=message_to_send)
else:
self.logger.info("No pattern match.")
@ProtocolEntityCallback("receipt")
def onReceipt(self, entity):
self.toLower(entity.ack())
def setBotosanOnline(self):
self.toLower(AvailablePresenceProtocolEntity())
def setBotosanDisconnect(self):
self.toLower(UnavailablePresenceProtocolEntity())
def setBotosanPresence(self):
self.toLower(PresenceProtocolEntity(name="Botosan"))
def setBotosanTyping(self, message):
self.toLower(OutgoingChatstateProtocolEntity(
OutgoingChatstateProtocolEntity.STATE_TYPING,
message.getFrom()
))
def stopBotosanTyping(self, message):
self.toLower(OutgoingChatstateProtocolEntity(
OutgoingChatstateProtocolEntity.STATE_PAUSED,
message.getFrom()
))
def simulateBotosanPreparingAnswer(self, messageProtocol, messageToSend):
self.setBotosanPresence()
self.setBotosanOnline()
self.sleepBotosan()
self.setBotosanTyping(messageProtocol)
self.sleepBotosan(minTime=2.0, maxTime=4.0)
self.stopBotosanTyping(messageProtocol)
self.sleepBotosan()
self.toLower(messageToSend)
@staticmethod
def sleepBotosan(minTime=0.2, maxTime=0.6):
"""
Sleeps botosan for a random amount of time between min and max, defaults to 0.2 and 0.6 if none are provided.
:param minTime: Minimum amount of time to sleep
:param maxTime: Maximum amount of time to sleep
"""
time.sleep(random.uniform(minTime, maxTime))
def onTextMessage(self, messageProtocolEntity):
"""Log the body and phone from which it originated"""
self.logger.info("Echoing %s to %s" % (messageProtocolEntity.getBody(), messageProtocolEntity.getFrom(True)))
def onMediaMessage(self, messageProtocolEntity):
if messageProtocolEntity.getMediaType() == "image":
self.logger.info("Echoing image %s to %s" % (messageProtocolEntity.url, messageProtocolEntity.getFrom(True)))
elif messageProtocolEntity.getMediaType() == "location":
self.logger.info("Echoing location (%s, %s) to %s" % (
messageProtocolEntity.getLatitude(), messageProtocolEntity.getLongitude(),
messageProtocolEntity.getFrom(False)))
elif messageProtocolEntity.getMediaType() == "vcard":
self.logger.info("Echoing vcard (%s, %s) to %s" % (
messageProtocolEntity.getName(), messageProtocolEntity.getCardData(),
messageProtocolEntity.getFrom(True)))
def onGroupMessage(self, message):
self.logger.info("Group Message: [%s] === [%s]-[%s]\t%s" % (
message.getAuthor(), message.getParticipant(), message.getFrom(), message.getBody()))
|
[
"freddyloher@gmail.com"
] |
freddyloher@gmail.com
|
b7c5da7117ce9730e6e185f03a1ba51ec604b2da
|
d1808d8cc5138489667b7845466f9c573591d372
|
/notebooks/Ambient Seismic Noise/Probabilistic Power Spectral Densities.py
|
ea3a14af430a40fdedd483965e6a4b0dd1899483
|
[] |
no_license
|
krischer/seismo_live
|
e140777900f6246a677bc28b6e68f0a168ec41ab
|
fcc615aee965bc297e8d53da5692abb2ecd6fd0c
|
refs/heads/master
| 2021-10-20T22:17:42.276096
| 2019-11-27T23:21:16
| 2019-11-28T10:44:21
| 44,953,995
| 69
| 59
| null | 2020-05-22T11:00:52
| 2015-10-26T08:00:42
|
Python
|
UTF-8
|
Python
| false
| false
| 5,442
|
py
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div style='background-image: url("../share/images/header.svg") ; padding: 0px ; background-size: cover ; border-radius: 5px ; height: 250px'>
# <div style="float: right ; margin: 50px ; padding: 20px ; background: rgba(255 , 255 , 255 , 0.7) ; width: 50% ; height: 150px">
# <div style="position: relative ; top: 50% ; transform: translatey(-50%)">
# <div style="font-size: xx-large ; font-weight: 900 ; color: rgba(0 , 0 , 0 , 0.8) ; line-height: 100%">Noise</div>
# <div style="font-size: large ; padding-top: 20px ; color: rgba(0 , 0 , 0 , 0.5)">Lab: Probabilistic Power Spectral Densities</div>
# </div>
# </div>
# </div>
#
#
# Seismo-Live: http://seismo-live.org
#
# ##### Authors:
# * Tobias Megies ([@megies](https://github.com/megies))
#
# ---
# %matplotlib inline
import matplotlib.pyplot as plt
plt.style.use("bmh")
plt.rcParams['figure.figsize'] = 10, 6
# * read waveform data from file `data/GR.FUR..BHN.D.2015.361` (station `FUR`, [LMU geophysical observatory in Fürstenfeldbruck](https://www.geophysik.uni-muenchen.de/observatory/seismology))
# * read corresponding station metadata from file `data/station_FUR.stationxml`
# * print info on both waveforms and station metadata
# +
from obspy import read, read_inventory
st = read("data/GR.FUR..BHN.D.2015.361")
inv = read_inventory("data/station_FUR.stationxml")
print(st)
print(inv)
inv.plot(projection="ortho");
# -
# * compute probabilistic power spectral densities using `PPSD` class from obspy.signal, see http://docs.obspy.org/tutorial/code_snippets/probabilistic_power_spectral_density.html (but use the inventory you read from StationXML as metadata)
# * plot the processed `PPSD` (`plot()` method attached to `PPSD` object)
# +
from obspy.signal import PPSD
tr = st[0]
ppsd = PPSD(stats=tr.stats, metadata=inv)
ppsd.add(tr)
ppsd.plot()
# -
# Since longer term stacks would need too much waveform data and take way too long to compute, we prepared one year continuous data preprocessed for a single channel of station `FUR` to play with..
#
# * load long term pre-computed PPSD from file `PPSD_FUR_HHN.npz` using `PPSD`'s `load_npz()` staticmethod (i.e. it is called directly from the class, not an instance object of the class)
# * plot the PPSD (default is full time-range, depending on how much data and spread is in the data, adjust `max_percentage` option of `plot()` option) (might take a couple of minutes..!)
# * do a cumulative plot (which is good to judge non-exceedance percentage dB thresholds)
# +
from obspy.signal import PPSD
ppsd = PPSD.load_npz("data/PPSD_FUR_HHN.npz")
# -
ppsd.plot(max_percentage=10)
ppsd.plot(cumulative=True)
# * do different stacks of the data using the [`calculate_histogram()` (see docs!)](http://docs.obspy.org/packages/autogen/obspy.signal.spectral_estimation.PPSD.calculate_histogram.html) method of `PPSD` and visualize them
# * compare differences in different frequency bands qualitatively (anthropogenic vs. "natural" noise)..
# * nighttime stack, daytime stack
# * advanced exercise: Use the `callback` option and use some crazy custom callback function in `calculate_histogram()`, e.g. stack together all data from birthdays in your family.. or all German holidays + Sundays in the time span.. or from dates of some bands' concerts on a tour.. etc.
ppsd.calculate_histogram(time_of_weekday=[(-1, 0, 2), (-1, 22, 24)])
ppsd.plot(max_percentage=10)
ppsd.calculate_histogram(time_of_weekday=[(-1, 8, 16)])
ppsd.plot(max_percentage=10)
# * do different stacks of the data using the [`calculate_histogram()` (see docs!)](http://docs.obspy.org/packages/autogen/obspy.signal.spectral_estimation.PPSD.calculate_histogram.html) method of `PPSD` and visualize them
# * compare differences in different frequency bands qualitatively (anthropogenic vs. "natural" noise)..
# * weekdays stack, weekend stack
ppsd.calculate_histogram(time_of_weekday=[(1, 0, 24), (2, 0, 24), (3, 0, 24), (4, 0, 24), (5, 0, 24)])
ppsd.plot(max_percentage=10)
ppsd.calculate_histogram(time_of_weekday=[(6, 0, 24), (7, 0, 24)])
ppsd.plot(max_percentage=10)
# * do different stacks of the data using the [`calculate_histogram()` (see docs!)](http://docs.obspy.org/packages/autogen/obspy.signal.spectral_estimation.PPSD.calculate_histogram.html) method of `PPSD` and visualize them
# * compare differences in different frequency bands qualitatively (anthropogenic vs. "natural" noise)..
# * seasonal stacks (e.g. northern hemisphere autumn vs. spring/summer, ...)
ppsd.calculate_histogram(month=[10, 11, 12, 1])
ppsd.plot(max_percentage=10)
ppsd.calculate_histogram(month=[4, 5, 6, 7])
ppsd.plot(max_percentage=10)
# * do different stacks of the data using the [`calculate_histogram()` (see docs!)](http://docs.obspy.org/packages/autogen/obspy.signal.spectral_estimation.PPSD.calculate_histogram.html) method of `PPSD` and visualize them
# * compare differences in different frequency bands qualitatively (anthropogenic vs. "natural" noise)..
# * stacks by specific month
# * maybe even combine several of above restrictions.. (e.g. only nighttime on weekends)
|
[
"lion.krischer@gmail.com"
] |
lion.krischer@gmail.com
|
026f2b1121016680fce1de335e8f080d67989dce
|
bc9ad33b3a6e89b3b6d68ec4e2ef25e35b90d5a0
|
/students_api/serializers.py
|
4db1e71ba39fc1c397d2155c49117a4d1c014353
|
[] |
no_license
|
AndreasPatakis/students-rest-api
|
d56db8a478a0e117a004f200e4b5871087b30ccb
|
480dc64e7ae9927b0c84d4d3b633e90919de6de6
|
refs/heads/main
| 2023-07-15T02:12:16.048230
| 2021-08-31T13:55:01
| 2021-08-31T13:55:01
| 387,763,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,745
|
py
|
from rest_framework import serializers
from students_api import models
class StudentInfoSerializer(serializers.ModelSerializer):
class Meta:
model = models.StudentInfo
fields = ['id','first_name','last_name','email','home_university','outgoing_university','created_on']
extra_kwargs = {
'created_on':{
'read_only':True
}
}
class UserStudentSerializer(serializers.ModelSerializer):
"""Serializer for UserStudent model"""
class Meta:
model = models.UserStudent
fields = ['id','name', 'email', 'password']
extra_kwargs = {
'password':{
'write_only':True,
'style':{'input_type': 'password'}
}
}
def create(self, validated_data):
"""We overwrite create method so that we use our custom create_user from UserStudent model"""
user = models.UserStudent.objects.create_user(
email=validated_data['email'],
name=validated_data['name'],
password=validated_data['password']
)
return user
def update(self, instance, validated_data):
"""We overwrite update method so it hashes the password"""
if 'password' in validated_data:
password = validated_data.pop('password')
instance.set_password(password)
return super().update(instance, validated_data)
class StudentNotesSerializer(serializers.ModelSerializer):
"""Serializer for the StudentNotes model"""
class Meta:
model = models.StudentNotes
fields = ('id','user_student', 'note_text', 'created_on')
extra_kwargs = {
'user_student':{'read_only':True}
}
|
[
"andreaspatakis@gmail.com"
] |
andreaspatakis@gmail.com
|
abaab192266b62fecf1d3ed43812d90398d0cb17
|
5526613640c53bd4193d5c08dc5b192e359271d9
|
/phase_2-3/ML/Models.py
|
5e26467f8be473f11872366d1447bb06b1300182
|
[
"MIT"
] |
permissive
|
Coke-Zhang/Deep-Docking-NonAutomated
|
4451234defbc002362b3600756425d728bdc7741
|
a55178dd910e7827dafcc3e0ffaed12277df8307
|
refs/heads/main
| 2023-07-05T05:50:11.236359
| 2021-08-17T20:16:32
| 2021-08-17T20:16:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,733
|
py
|
"""
Version 1.1.0
The model to be used in deep docking
James Gleave
"""
import keras
from ML.lasso_regularizer import Lasso
from ML.DDMetrics import *
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import (Input, Dense, Activation, BatchNormalization, Dropout, LSTM,
Conv2D, MaxPool2D, Flatten, Embedding, MaxPooling1D,
Conv1D)
from tensorflow.keras.regularizers import *
import warnings
warnings.filterwarnings('ignore')
# TODO Give user option to make their own model
class Models:
def __init__(self, hyperparameters, output_activation, name="model"):
"""
Class to hold various NN architectures allowing for cleaner code when determining which architecture
is best suited for DD.
:param hyperparameters: a dictionary holding the parameters for the models:
('bin_array', 'dropout_rate', 'learning_rate', 'num_units')
"""
self.hyperparameters = hyperparameters
self.output_activation = output_activation
self.name = name
def original(self, input_shape):
x_input = Input(input_shape, name="original")
x = x_input
for j, i in enumerate(self.hyperparameters['bin_array']):
if i == 0:
x = Dense(self.hyperparameters['num_units'], name="Hidden_Layer_%i" % (j + 1))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
else:
x = Dropout(self.hyperparameters['dropout_rate'])(x)
x = Dense(1, activation=self.output_activation, name="Output_Layer")(x)
model = Model(inputs=x_input, outputs=x, name='Progressive_Docking')
return model
def dense_dropout(self, input_shape):
"""This is the most simple neural architecture.
Four dense layers, batch normalization, relu activation, and dropout.
"""
# The model input
x_input = Input(input_shape, name='dense_dropout')
x = x_input
# Model happens here...
x = Dense(self.hyperparameters['num_units'], name="Hidden_Layer_1")(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(self.hyperparameters['dropout_rate'])(x)
x = Dense(self.hyperparameters['num_units'], name="Hidden_Layer_2")(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(self.hyperparameters['dropout_rate'])(x)
x = Dense(self.hyperparameters['num_units'], name="Hidden_Layer_3")(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(self.hyperparameters['dropout_rate'])(x)
x = Dense(self.hyperparameters['num_units'], name="Hidden_Layer_4")(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(self.hyperparameters['dropout_rate'])(x)
# output
x = Dense(1, activation=self.output_activation, name="Output_Layer")(x)
model = Model(inputs=x_input, outputs=x, name='Progressive_Docking')
return model
def wide_net(self, input_shape):
"""
A simple square model
"""
# The model input
x_input = Input(input_shape, name='wide_net')
x = x_input
# The width coefficient
width_coef = len(self.hyperparameters['bin_array'])//2
for i, layer in enumerate(self.hyperparameters['bin_array']):
if layer == 0:
layer_name = "Hidden_Dense_Layer_" + str(i//2)
x = Dense(self.hyperparameters['num_units'] * width_coef, name=layer_name)(x)
x = Activation('relu')(x)
else:
x = BatchNormalization()(x)
x = Dropout(self.hyperparameters['dropout_rate'])(x)
# output
x = Dense(1, activation=self.output_activation, name="Output_Layer")(x)
model = Model(inputs=x_input, outputs=x, name='Progressive_Docking')
return model
def shared_layer(self, input_shape):
"""This Model uses a shared layer"""
# The model input
x_input = Input(input_shape, name="shared_layer")
# Here is a layer that will be shared
shared_layer = Dense(input_shape[0], name="Shared_Hidden_Layer")
# Apply the layer twice
x = shared_layer(x_input)
x = shared_layer(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(self.hyperparameters['dropout_rate'])(x)
# Apply dropout and normalization
x = Dense(self.hyperparameters['num_units'], name="Hidden_Layer")(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(self.hyperparameters['dropout_rate'])(x)
for i, layer in enumerate(self.hyperparameters['bin_array']):
if layer == 0:
layer_name = "Hidden_Layer_" + str(i)
x = Dense(self.hyperparameters['num_units']//i, name=layer_name)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(self.hyperparameters['dropout_rate'])(x)
# output
x = Dense(1, activation=self.output_activation, name="output_layer")(x)
model = Model(inputs=x_input, outputs=x, name='Progressive_Docking')
return model
@staticmethod
def get_custom_objects():
return {"Lasso": Lasso}
@staticmethod
def get_available_modes():
modes = []
for attr in Models.__dict__:
if attr[0] != '_' and attr != "get_custom_objects" and attr != "get_available_modes":
modes.append(attr)
return modes
class TunerModel:
def __init__(self, input_shape):
self.input_shape = input_shape
def build_tuner_model(self, hp):
"""
This method should be used with keras tuner.
"""
# Create the hyperparameters
num_hidden_layers = hp.Int('hidden_layers', min_value=1, max_value=4, step=1)
num_units = hp.Int("num_units", min_value=128, max_value=1024)
dropout_rate = hp.Float("dropout_rate", min_value=0.0, max_value=0.8)
learning_rate = hp.Float('learning_rate', min_value=0.00001, max_value=0.001)
epsilon = hp.Float('epsilon', min_value=1e-07, max_value=1e-05)
kernel_reg_func = [None, Lasso, l1, l2][hp.Choice("kernel_reg", values=[0, 1, 2, 3])]
reg_amount = hp.Float("reg_amount", min_value=0.0, max_value=0.001, step=0.0001)
# Determine how the layer(s) are shared
share_layer = hp.Boolean("shared_layer")
if share_layer:
share_all = hp.Boolean("share_all")
shared_layer_units = hp.Int("num_units", min_value=128, max_value=1024)
shared_layer = Dense(shared_layer_units, name="shared_hidden_layer")
if not share_all:
where_to_share = set()
layer_connections = hp.Int("num_shared_layer_connections", min_value=1, max_value=num_hidden_layers)
for layer in range(layer_connections):
where_to_share.add(hp.Int("where_to_share", min_value=0, max_value=num_hidden_layers, step=1))
# Build the model according to the hyperparameters
inputs = Input(shape=self.input_shape, name="input")
x = inputs
# Determine number of hidden layers
for layer_num in range(num_hidden_layers):
# If we are not using a kernel regulation function or not...
if kernel_reg_func is None:
x = Dense(num_units, name="dense_" + str(layer_num))(x)
else:
x = Dense(num_units, kernel_regularizer=kernel_reg_func(reg_amount), name="dense_" + str(layer_num))(x)
# If we are using a common shared layer, then connect it.
if (share_layer and share_all) or (share_layer and layer_num in where_to_share):
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(dropout_rate)(x)
x = shared_layer(x)
# Apply these to every layer
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(dropout_rate)(x)
outputs = Dense(1, activation='sigmoid', name="output_layer")(x)
model = Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=keras.optimizers.Adam(learning_rate=learning_rate, epsilon=epsilon),
loss=keras.losses.BinaryCrossentropy(),
metrics=['accuracy', "AUC", "Precision", "Recall", DDMetrics.scaled_performance])
print(model.summary())
return model
|
[
"j.gleave.james@gmail.com"
] |
j.gleave.james@gmail.com
|
6c47c751b632674e2e0ad4d9a57dc7dea040b04d
|
b02eeeeef39a55e89c4cb0abfd19df903f3bd41a
|
/gen/pb_python/flyteidl/service/flyteadmin/test/test_admin_pager_duty_notification.py
|
79062cc22e761815e110c4f9c4667a88a215871a
|
[
"Apache-2.0"
] |
permissive
|
nuclyde-io/flyteidl
|
af0b81c621c45c05120468ccdfdbcac8e776b5e6
|
376cfc2990f610dfc9f7a63e5bef84785241fb6b
|
refs/heads/master
| 2023-07-16T19:02:34.683059
| 2021-02-03T03:07:16
| 2021-02-03T03:07:16
| 336,040,693
| 0
| 0
|
Apache-2.0
| 2021-08-20T05:16:37
| 2021-02-04T18:06:03
| null |
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
# coding: utf-8
"""
flyteidl/service/admin.proto
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: version not set
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import flyteadmin
from flyteadmin.models.admin_pager_duty_notification import AdminPagerDutyNotification # noqa: E501
from flyteadmin.rest import ApiException
class TestAdminPagerDutyNotification(unittest.TestCase):
"""AdminPagerDutyNotification unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAdminPagerDutyNotification(self):
"""Test AdminPagerDutyNotification"""
# FIXME: construct object with mandatory attributes with example values
# model = flyteadmin.models.admin_pager_duty_notification.AdminPagerDutyNotification() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"kumare@lyft.com"
] |
kumare@lyft.com
|
bdfac2cd61e44faa51a89c9a34c72d969a59e1f1
|
bac982f419e9160176c979c15d733870039cf083
|
/genThumbnail/pages/tests.py
|
aa8380c963c04db37d3f30b263c8965b2659126b
|
[] |
no_license
|
mukuls9971/generateThumbnail
|
cfcf6ff8e62f80f6425e3917c0b7b85758224733
|
e49e2be0b28227fe10112d56461f398f099f8a89
|
refs/heads/master
| 2022-12-26T06:09:46.283747
| 2019-08-25T09:29:37
| 2019-08-25T09:29:37
| 204,172,131
| 1
| 0
| null | 2022-12-08T06:04:08
| 2019-08-24T14:58:15
|
Python
|
UTF-8
|
Python
| false
| false
| 256
|
py
|
from django.urls import reverse
from django.test import TestCase
class GenThumbnail(TestCase):
def serverUp(self):
response = self.client.get(reverse('pages:generateThumbnailView'))
self.assertEqual(response.status_code, 200)
|
[
"mukul9971@gmail.com"
] |
mukul9971@gmail.com
|
90eb24001dac503770a3e79bbaf167297f1ef847
|
6741d2d956ecaa84f7bac4205de7ac6f3e78d5b1
|
/bin/libs/DataStruc.py
|
24b69bad7ed8374684a4c5ec57db8f5c5a6cd33c
|
[] |
no_license
|
huohongjian/stock3
|
dee9e1a8730bcbe9999ef14addc2d7861fcb13e9
|
57b3f6f298693309252cb3c34c0ec64f450f4ce9
|
refs/heads/master
| 2021-05-12T09:38:31.946794
| 2019-04-11T07:29:59
| 2019-04-11T07:29:59
| 117,325,435
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,876
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author: HuoHongJian
# date : 2018-01-22
class LinkedDict():
def __init__(self, maxLength=10):
self.maxLength = maxLength
self.length = 0
self.buffer = {}
self.head = None
self.last = None
def clear(self):
self.buffer = {}
def size(self):
return self.length
def has(self, key):
return key in self.buffer
def isHead(self, key):
return key == self.head
def isLast(self, key):
return key == self.last
def remove(self, key):
if self.has(key):
prevKey = self.buffer.get(key).get('prev')
nextKey = self.buffer.get(key).get('next')
if self.isHead(key):
self.head = nextKey
self.buffer[nextKey]['prev'] = None
elif self.isLast(key):
self.last = prevKey
self.buffer[prevKey]['next'] = None
else:
self.buff[prevKey]['next'] = nextKey
self.buff[nextKey]['prev'] = prevKey
del self.buffer[key]
self.length -= 1
def add(self, key, data):
if self.length > self.maxLength:
self.remove(self.head)
node = {
'data': data,
'prev': self.last,
'next': None,
}
if self.head is None:
self.head = key
else:
self.buffer[self.last]['next'] = key
self.last = key
self.buffer[key] = node
self.length += 1
def get(self, key):
if self.has(key):
node = self.buffer[key]
if not self.isLast(key):
prevKey = node.get('prev')
nextKey = node.get('next')
if self.isHead(key):
self.head = nextKey
self.buffer[nextKey]['prev'] = None
else:
self.buffer[prevKey]['next'] = nextKey
self.buffer[nextKey]['prev'] = prevKey
self.buffer[self.last]['next'] = key
node['prev'] = self.last
node['next'] = None
self.last = key
print('get from cache')
return node.get('data')
def getHead(self):
return self.get(self.head)
def getLast(self):
return self.get(self.last)
|
[
"hhj@cwork.FreeBSD.cn"
] |
hhj@cwork.FreeBSD.cn
|
982f0da4ad46edb7ce2b9cdb7e87f69614147d69
|
33c2d31fa51ff47507d40bcf50308ee1c36637c7
|
/listings/views.py
|
65a7229a4b47faef2603d31450403bf4d1dc9d6b
|
[] |
no_license
|
hataung/Django-Python-Backend-Project-for-MaasDev
|
cc1aa4cf6e96be5f9f72fdd3a5ee3a1d0689ed45
|
93e32cf1fff0c0a44a18a12773522afc783e3d63
|
refs/heads/master
| 2020-12-02T11:37:11.806004
| 2019-12-30T23:26:51
| 2019-12-30T23:26:51
| 230,993,337
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
from django.shortcuts import render
from .models import Listing
def index(request):
listings = Listing.objects.all()
context = {
'listings': listings
}
return render(request, 'listings/listings.html', context)
def listing(request):
return render(request, 'listings/listing.html')
def search(request):
return render(request, 'listings/search.html')
|
[
"hataung@gmail.com"
] |
hataung@gmail.com
|
57b0621f9b678f50a2a4a7b4115417ccddb6ec5b
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Flask/Book_evaluator/venv/Lib/encodings/iso8859_7.py
|
548579285f31d0307074b640f799b13507d8cd4f
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:b352eca3b819488f64fb3338fd93f39c1e30f32bb13f2f9c577925e58f2960e4
size 12844
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
5a89fbeca9a6f09ba1a9cb2766d6401f3cfe9c37
|
ff2adf0c96aee0e76f145ecb148969c295a7684a
|
/BionsMLenv/bin/pip
|
f8ee14b098b870153f3cdf50177c000b3227c073
|
[] |
no_license
|
Romitha/BionsFlask
|
ff305a5c8e8ca352c0aed526397431e10dcd340b
|
b07cee56c656535386afa19723ca90ac177af570
|
refs/heads/master
| 2023-05-31T19:13:17.355540
| 2019-12-05T10:43:22
| 2019-12-05T10:43:22
| 226,051,566
| 0
| 0
| null | 2023-05-22T22:34:47
| 2019-12-05T08:21:45
|
C++
|
UTF-8
|
Python
| false
| false
| 283
|
#!/home/janith/Documents/Python/Bions/BionsRealTimeMLFlask/BionsMLenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"janith@datampowered.com.au"
] |
janith@datampowered.com.au
|
|
f4290df45807ae3421e74367c8c802d41d3876cd
|
cca89a7bbe2da907a38eb00e9a083f57597273f0
|
/38. 外观数列/pythonCode.py
|
4e6c06fc944b194ff8272cb5d208bd2633070cec
|
[] |
no_license
|
xerprobe/LeetCodeAnswer
|
cc87941ef2a25c6aa1366e7a64480dbd72750670
|
ea1822870f15bdb1a828a63569368b7cd10c6ab8
|
refs/heads/master
| 2022-09-23T09:15:42.628793
| 2020-06-06T16:29:59
| 2020-06-06T16:29:59
| 270,215,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,339
|
py
|
class Solution:
def countAndSay(self, n: int) -> str:
if(n > 1):
s = self.countAndSay(n-1)
cache = 1
res = ''
for i in range(1,len(s)):
if(s[i-1] == s[i]):
cache += 1
else:
res = res + str(cache) + s[i-1]
cache = 1
return res + str(cache) + s[-1]
else:
return '1'
'''
「外观数列」是一个整数序列,从数字 1 开始,序列中的每一项都是对前一项的描述。前五项如下:
1. 1
2. 11
3. 21
4. 1211
5. 111221
1 被读作 "one 1" ("一个一") , 即 11。
11 被读作 "two 1s" ("两个一"), 即 21。
21 被读作 "one 2", "one 1" ("一个二" , "一个一") , 即 1211。
给定一个正整数 n(1 ≤ n ≤ 30),输出外观数列的第 n 项。
注意:整数序列中的每一项将表示为一个字符串。
示例 1:
输入: 1
输出: "1"
解释:这是一个基本样例。
示例 2:
输入: 4
输出: "1211"
解释:当 n = 3 时,序列是 "21",其中我们有 "2" 和 "1" 两组,"2" 可以读作 "12",也就是出现频次 = 1 而 值 = 2;类似 "1" 可以读作 "11"。所以答案是 "12" 和 "11" 组合在一起,也就是 "1211"。
'''
|
[
"changwenhao1@qq.com"
] |
changwenhao1@qq.com
|
9493d35e5d07b571524679c86f2b3ec92f2cf783
|
a414569f6e54d8b208301515e6e3ed2e71f71264
|
/linkedlist/ck189/delete-middle-node.py
|
7142d2969300961003faa8da17b247b574861b21
|
[] |
no_license
|
MoranLi/algoritm-and-data-structure
|
ea17778f208d4a158e6649765521b95b5ee5a6ba
|
7a38253d99e4a18809649aaa15506d01bf1d9846
|
refs/heads/master
| 2020-03-30T06:42:45.620795
| 2018-10-23T02:12:05
| 2018-10-23T02:12:05
| 150,883,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
import random
def delete_middle(listx):
del listx[len(listx)/2]
return listx
list1 = []
for x in range(0,10):
list1.append(random.randint(0,5))
print(list1)
print(delete_middle(list1))
|
[
"moran.li@usask.ca"
] |
moran.li@usask.ca
|
5503154045f9544e51b2e9b4ceda3ff597290bee
|
a50180753b613c3bbdc2b25b31b6cd344152e086
|
/http_server/modified_http_server.py
|
dfc22291784420eada96ad69797a6a434e9700b2
|
[] |
no_license
|
add-skill/API-Development-Guide
|
38affc41ecb8af27622d813e4627ea114ac94d2d
|
6f9f97bbaf1bb28d1b8cc7b24714e6a698a000f2
|
refs/heads/master
| 2023-03-18T16:45:16.979959
| 2020-04-22T10:12:40
| 2020-04-22T10:12:40
| 257,858,173
| 1
| 0
| null | 2021-03-20T03:32:57
| 2020-04-22T09:46:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,248
|
py
|
from http.server import BaseHTTPRequestHandler, HTTPServer, SimpleHTTPRequestHandler
from urllib.parse import urlparse
from urllib.parse import parse_qs
class MyHttpRequestHandler(SimpleHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
self.path = 'addskillwebpage.html'
return SimpleHTTPRequestHandler.do_GET(self)
# Sending an '200 OK' response
self.send_response(200)
# Setting the header
self.send_header("Content-type", "text/html")
# Whenever using 'send_header', you also have to call 'end_headers'
self.end_headers()
# Extract query param
name = 'World'
query_components = parse_qs(urlparse(self.path).query)
if 'name' in query_components:
name = query_components["name"][0]
# Some custom HTML code, possibly generated by another function
html = f"<html><head></head><body><h1>Hello {name}!</h1></body></html>"
# Writing the HTML contents with UTF-8
self.wfile.write(bytes(html, "utf8"))
return
PORT = 8000
my_server = HTTPServer(('localhost', PORT), MyHttpRequestHandler)
print("Serving on port ", PORT)
# Star the server
my_server.serve_forever()
|
[
"akdeswa@microsoft.com"
] |
akdeswa@microsoft.com
|
a4dccc8a1ad8197482bc566d88feabfacd628ce4
|
e91011e80191d415cdec4b155a721559b5f0c2fd
|
/NeuralNetworks-tensorflow/CNN/transfer_learning/simple_transfer_learning.py
|
a26e378111014657fc4a3538bae8fd50df079acd
|
[] |
no_license
|
zhaocc1106/machine_learn
|
1cb70366f61d38251715094e07152e0d04e3be01
|
d1b70b2a954f4665b628ba252b03c1a74b95559f
|
refs/heads/master
| 2023-05-31T14:24:27.467600
| 2023-05-12T02:12:11
| 2023-05-12T02:12:11
| 125,951,974
| 15
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,066
|
py
|
#
# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved
#
"""
This is a simple transfer learning model via tf hub(https://tfhub.dev/).
Copy the codes into colab of google to run, otherwise can't access tf hub.
Need install tf-hub as follow:
pip install -U --pre tf-hub-nightly
Authors: zhaochaochao(zhaochaochao@baidu.com)
Date: 2019/6/19 下午8:32
"""
# Common libs.
import os
import shutil
import math
# 3rd-part libs.
import numpy as np
import tensorflow_hub as tf_hub
import tensorflow.keras as keras
import matplotlib.pyplot as plt
IMAGE_SHAPE = (224, 224)
PRE_TRAINED_MODEL_URL = "https://tfhub.dev/google/tf2-preview/mobilenet_v2" \
"/feature_vector/2"
IMAGE_SIZE = 3670 * 0.8 # 80% data use to training.
NUM_CLASS = 5
EPOCH = 10
BATCH_SIZE = 100
MODEL_PATH = "/tmp/simple_transfer_learning/"
WEIGHTS_PATH = "/tmp/simple_transfer_learning/model_weights.h5"
def load_image_data():
"""Load the flower image datas.
Returns:
The image class names and image data generator of training and
validation.
"""
data_root = keras.utils.get_file(
'flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
# Use 20% image to validate.
datagen_kwargs = dict(rescale=1. / 255, validation_split=.20)
image_generator = keras.preprocessing.image.ImageDataGenerator(
**datagen_kwargs)
# Load training image data set.
train_image_data = image_generator.flow_from_directory(str(data_root),
subset="training",
target_size=IMAGE_SHAPE,
batch_size=BATCH_SIZE)
# Load test image data set.
vali_image_data = image_generator.flow_from_directory(str(data_root),
subset="validation",
target_size=IMAGE_SHAPE,
batch_size=BATCH_SIZE)
class_names = sorted(train_image_data.class_indices.items(),
key=lambda pair: pair[1])
# {0: 'daisy', 1: 'dandelion', 2: 'roses', 3: 'sunflowers', 4: 'tulips'}
class_names = {value: key for key, value in class_names}
return class_names, train_image_data, vali_image_data
def build_model(pre_trained_model_url):
"""Build the transfer learning network model.
Args:
pre_trained_model: The pre-trained model url of tensorflow hub
(https://tfhub.dev/s?module-type=image-feature-vector).
Returns:
The transfer learning network model.
"""
# Construct the feature extractor layer via tf hub model.
feature_extractor_layer = tf_hub.KerasLayer(handle=pre_trained_model_url,
trainable=False,
input_shape=[224, 224, 3])
model = keras.Sequential([
feature_extractor_layer,
keras.layers.Dropout(rate=0.2),
# Add softmax layer.
keras.layers.Dense(units=NUM_CLASS,
activation="softmax",
kernel_regularizer=keras.regularizers.l2(0.0001))
])
model.compile(optimizer=keras.optimizers.Adam(),
loss=keras.losses.categorical_crossentropy,
metrics=["accuracy"])
model.summary()
return model
def train(model, train_image_data, vali_image_data, epoch):
"""Train the model.
Args:
model: The network model.
image_data: The image data generator.
epoch: The epoch number.
"""
# Define fit callbacks.
callbacks = [
# Tensorboard callback.
keras.callbacks.TensorBoard(log_dir=MODEL_PATH)
]
if os.path.exists(MODEL_PATH):
shutil.rmtree(MODEL_PATH)
step_per_epoch = math.ceil(IMAGE_SIZE / BATCH_SIZE)
model.fit_generator(
generator=train_image_data,
epochs=epoch,
steps_per_epoch=step_per_epoch,
validation_data=vali_image_data,
validation_steps=1,
callbacks=callbacks
)
model.save_weights(filepath=WEIGHTS_PATH, overwrite=True)
def predict(model, image_data, image_labels, class_names):
"""Predict the classes of image_data.
Args:
model: The model.
image_data: The test image data.
image_labels: The true image labels.
class_names: The class names directory.
"""
# Predict the labels.
predict_labels = model.predict(image_data)
predict_labels = np.argmax(predict_labels, axis=1)
image_labels = np.argmax(image_labels, axis=1)
print("Predict labels:\n", str(predict_labels))
print("True labels:\n", str(image_labels))
# Plot the image classification results.
plt.figure(figsize=(10, 9))
plt.subplots_adjust(hspace=0.5)
for n in range(30):
plt.subplot(6, 5, n + 1)
plt.imshow(image_data[n])
color = "green" if predict_labels[n] == image_labels[n] else "red"
plt.title(class_names[predict_labels[n]], color=color)
plt.axis('off')
_ = plt.suptitle("Model predictions (green: correct, red: incorrect)")
plt.show()
if __name__ == "__main__":
class_names, train_image_data, vali_image_data = load_image_data()
print("class names:\n", str(class_names))
# Build network model.
model = build_model(PRE_TRAINED_MODEL_URL)
# Train the model.
train(model, train_image_data, vali_image_data, EPOCH)
# Test the model
# Load model
model_loaded = build_model(PRE_TRAINED_MODEL_URL)
model_loaded.load_weights(WEIGHTS_PATH)
# Get test image data.
test_image_data, test_image_labels = vali_image_data.next()
test_image_data = test_image_data[: 30]
test_image_labels = test_image_labels[: 30]
# Predict.
predict(model_loaded, test_image_data, test_image_labels, class_names)
|
[
"zhaochaochao@baidu.com"
] |
zhaochaochao@baidu.com
|
30ff18bf4236dcf8755197d6ade87af5518874be
|
6238dc5b5818f54295547cf4cb1afa5553ddfb94
|
/taobao/top/api/rest/SimbaAdgroupAdgroupcatmatchsGetRequest.py
|
24e2de0cbe6edc91e0df8f31993d210f8c2e1e8c
|
[] |
no_license
|
liaosiwei/guagua
|
8208bb82b1df5506dcb86c1a7094c849ea5576a6
|
ee6025813e83568dc25beb52279c86f8bd33f1a4
|
refs/heads/master
| 2016-09-06T16:45:00.798633
| 2013-05-03T04:02:35
| 2013-05-03T04:02:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
'''
Created by auto_sdk on 2013-04-14 16:35:32
'''
from top.api.base import RestApi
class SimbaAdgroupAdgroupcatmatchsGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.adgroup_ids = None
self.nick = None
def getapiname(self):
return 'taobao.simba.adgroup.adgroupcatmatchs.get'
|
[
"liaosiweiorxiaowei@gmail.com"
] |
liaosiweiorxiaowei@gmail.com
|
0406509ee9f8401da8685b1a85441948fddffe9d
|
8e216c3e3d4e9887d02311c82b2a7a747f0e093a
|
/py/2021/day1/part1.py
|
b004e3a8ae628084fc64631811bad16a358c2888
|
[] |
no_license
|
Antetokounpo/Advent-of-Code
|
9f07f09a6c08f69db4a8dc3559e354549e4aa215
|
607d5754cdf27f52858214563dccbc23f8f1b5a9
|
refs/heads/master
| 2023-07-21T19:30:15.845750
| 2023-07-10T01:32:11
| 2023-07-10T01:32:11
| 224,746,966
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
from AOC import read_input
ds = read_input().splitlines()
ds = list(map(int, ds))
print([j>i for i, j in zip(ds[:-1], ds[1:])].count(True))
|
[
"antor.232@outlook.com"
] |
antor.232@outlook.com
|
88780208078048052dcd6df32f8d029888d6a13c
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_minesweeper.py
|
058fff249bf41fa4e8d0d821acb7b0d187f2bb06
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
#calss header
class _MINESWEEPER():
def __init__(self,):
self.name = "MINESWEEPER"
self.definitions = [u'a ship that is used to discover if mines (= bombs) are present and to remove them from the sea']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
8a789384c12f9a48619251e8bc9e1a677abad00a
|
e3bd7f1175997e68424484c8f782a018192104a3
|
/tweets/migrations/0002_tweet_parent.py
|
d01d2b59df2196fff742cc40629e66def344de7c
|
[] |
no_license
|
GolouHamady/tweet
|
a0495a3d97b5a7a260892ea9e00b9d4f5462627f
|
efc9f79568e806eb530806cf713bbdcf88b5e83b
|
refs/heads/master
| 2023-05-10T10:26:10.096892
| 2020-12-06T02:15:32
| 2020-12-06T02:15:32
| 266,257,008
| 0
| 0
| null | 2021-06-10T23:41:36
| 2020-05-23T03:40:50
|
Python
|
UTF-8
|
Python
| false
| false
| 477
|
py
|
# Generated by Django 2.1.7 on 2019-04-21 11:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tweets', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tweet',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='tweets.Tweet'),
),
]
|
[
"BARRY@dev.groupe.adg"
] |
BARRY@dev.groupe.adg
|
02e308e90fc57cec8266fc26f0efd6dcd6c50976
|
f12e0cb6bedec951db6e39574176534902bb8e66
|
/tcs.py
|
ee3f0705e2b5493ff4128784df8436513221913c
|
[] |
no_license
|
Denni007/PythonLearning
|
3827c43c0560f0fd5e2ac3029948e52c0b98882d
|
5b4db5c2703991bfecda881e116f7c411957e80f
|
refs/heads/master
| 2020-04-12T16:11:53.145822
| 2019-08-07T18:00:42
| 2019-08-07T18:00:42
| 162,605,345
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
a=input()
a=a[::-1]
l=[]
sum = 0
for i in range(len(a)):
if(a[i]=='A'):
l.append(10)
elif(a[i]=='B'):
l.append(11)
elif(a[i]=='C'):
l.append(12)
elif(a[i]=='D'):
l.append(13)
elif(a[i]=='E'):
l.append(14)
elif(a[i]=='F'):
l.append(15)
elif(a[i]=='G'):
l.append(16)
else:
c=a[i]
l.append(c)
sum=sum+(17**i)*int(l[i])
print(sum)
|
[
"kukadiyadenish@gmail.com"
] |
kukadiyadenish@gmail.com
|
6feb5e761a4ee0937e105cf7bfd8ca6c3a35b827
|
1fc94118f1a088109abaf6ca2ffa531d43f4b005
|
/src/Computational Photography/HDR/HDR/HDR.py
|
e35e27617d7c01da4c1c32faa40d83cff23d27fe
|
[] |
no_license
|
lealzhan/opencv_python
|
b2e805228a57082079ea4317e078cf9f4b713840
|
4ac3c8bcc8a2fe3d259c3a5d2c4f4d7c216f820e
|
refs/heads/master
| 2021-01-01T04:02:34.672980
| 2017-07-15T06:43:41
| 2017-07-15T06:43:41
| 97,108,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,524
|
py
|
#http://docs.opencv.org/3.2.0/d2/df0/tutorial_py_hdr.html
import cv2
import numpy as np
def Test():
#################################
#### Exposure_sequence_HDR() ####
#################################
# Loading exposure images into a list
img_fn = ["img0.jpg", "img1.jpg", "img2.jpg", "img3.jpg"]
img_list = [cv2.imread(fn) for fn in img_fn]
exposure_times = np.array([15.0, 2.5, 0.25, 0.0333], dtype=np.float32)
# Merge exposures to HDR image
merge_debvec = cv2.createMergeDebevec()
hdr_debvec = merge_debvec.process(img_list, times=exposure_times.copy())
merge_robertson = cv2.createMergeRobertson()
hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy())
# Tonemap HDR image
tonemap1 = cv2.createTonemapDurand(gamma=2.2)
res_debvec = tonemap1.process(hdr_debvec.copy())
tonemap2 = cv2.createTonemapDurand(gamma=1.3)
res_robertson = tonemap2.process(hdr_robertson.copy())
# Exposure fusion using Mertens (don't need exposure time and tone mapping, it already give result in [0,1])
merge_mertens = cv2.createMergeMertens()
res_mertens = merge_mertens.process(img_list)
# Convert datatype to 8-bit and save
res_debvec_8bit = np.clip(res_debvec*255, 0, 255).astype('uint8')
res_robertson_8bit = np.clip(res_robertson*255, 0, 255).astype('uint8')
res_mertens_8bit = np.clip(res_mertens*255, 0, 255).astype('uint8')
cv2.imwrite("ldr_debvec.jpg", res_debvec_8bit)
cv2.imwrite("ldr_robertson.jpg", res_robertson_8bit)
cv2.imwrite("fusion_mertens.jpg", res_mertens_8bit)
#################################################
#### Estimate camera response function (CRF) ####
#################################################
#The camera response function (CRF) gives us the
#connection between the scene radiance to the measured intensity values.
cal_debvec = cv2.createCalibrateDebevec()
crf_debvec = cal_debvec.process(img_list, times=exposure_times)
hdr_debvec = merge_debvec.process(img_list, times=exposure_times.copy(), response=crf_debvec.copy())
cal_robertson = cv2.createCalibrateRobertson()
crf_robertson = cal_robertson.process(img_list, times=exposure_times)
hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy(), response=crf_robertson.copy())
#robertson is similar to Debevec
def HDRDebevec(img_fn, exposure_times, if_crf=False):
img_list = [cv2.imread(fn) for fn in img_fn]
if if_crf :
merge_debvec = cv2.createMergeDebevec()
cal_debvec = cv2.createCalibrateDebevec()
crf_debvec = cal_debvec.process(img_list, times=exposure_times)
hdr_debvec = merge_debvec.process(img_list, times=exposure_times.copy(), response=crf_debvec.copy())
else:
merge_debvec = cv2.createMergeDebevec()
hdr_debvec = merge_debvec.process(img_list, times=exposure_times.copy())
tonemap1 = cv2.createTonemapDurand(gamma=2.2)
res_debvec = tonemap1.process(hdr_debvec.copy())
res_debvec_8bit = np.clip(res_debvec*255, 0, 255).astype('uint8')
cv2.imwrite("ldr_debvec_0.jpg", res_debvec_8bit)
def HDRMertens(img_fn, exposure_times):
img_list = [cv2.imread(fn) for fn in img_fn]
merge_mertens = cv2.createMergeMertens()
res_mertens = merge_mertens.process(img_list)
res_mertens_8bit = np.clip(res_mertens*255, 0, 255).astype('uint8')
cv2.imwrite("fusion_mertens_0.jpg", res_mertens_8bit)
if __name__ == '__main__':
img_fn = ["img0.jpg", "img1.jpg", "img2.jpg", "img3.jpg"]
exposure_times = np.array([15.0, 2.5, 0.25, 0.0333], dtype=np.float32)
HDRDebevec(img_fn, exposure_times, False)
HDRMertens(img_fn, exposure_times)
|
[
"lealzhan@126.com"
] |
lealzhan@126.com
|
0e534268fd1c901e0b8d1118a09ec70546910e4d
|
fc772efe3eccb65e4e4a8da7f2b2897586b6a0e8
|
/Controller/glance/registry/client/v2/api.py
|
9d3854fd49122f539d7ee02cd2e0203bbd79972e
|
[] |
no_license
|
iphonestack/Openstack_Kilo
|
9ae12505cf201839631a68c9ab4c041f737c1c19
|
b0ac29ddcf24ea258ee893daf22879cff4d03c1f
|
refs/heads/master
| 2021-06-10T23:16:48.372132
| 2016-04-18T07:25:40
| 2016-04-18T07:25:40
| 56,471,076
| 0
| 2
| null | 2020-07-24T02:17:46
| 2016-04-18T02:32:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,640
|
py
|
# Copyright 2013 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Registry's Client V2
"""
import os
from oslo.config import cfg
from glance.common import exception
from glance import i18n
import glance.openstack.common.log as logging
from glance.registry.client.v2 import client
LOG = logging.getLogger(__name__)
_ = i18n._
CONF = cfg.CONF
_registry_client = 'glance.registry.client'
CONF.import_opt('registry_client_protocol', _registry_client)
CONF.import_opt('registry_client_key_file', _registry_client)
CONF.import_opt('registry_client_cert_file', _registry_client)
CONF.import_opt('registry_client_ca_file', _registry_client)
CONF.import_opt('registry_client_insecure', _registry_client)
CONF.import_opt('registry_client_timeout', _registry_client)
CONF.import_opt('use_user_token', _registry_client)
CONF.import_opt('admin_user', _registry_client)
CONF.import_opt('admin_password', _registry_client)
CONF.import_opt('admin_tenant_name', _registry_client)
CONF.import_opt('auth_url', _registry_client)
CONF.import_opt('auth_strategy', _registry_client)
CONF.import_opt('auth_region', _registry_client)
_CLIENT_CREDS = None
_CLIENT_HOST = None
_CLIENT_PORT = None
_CLIENT_KWARGS = {}
def configure_registry_client():
"""
Sets up a registry client for use in registry lookups
"""
global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT
try:
host, port = CONF.registry_host, CONF.registry_port
except cfg.ConfigFileValueError:
msg = _("Configuration option was not valid")
LOG.error(msg)
raise exception.BadRegistryConnectionConfiguration(msg)
except IndexError:
msg = _("Could not find required configuration option")
LOG.error(msg)
raise exception.BadRegistryConnectionConfiguration(msg)
_CLIENT_HOST = host
_CLIENT_PORT = port
_CLIENT_KWARGS = {
'use_ssl': CONF.registry_client_protocol.lower() == 'https',
'key_file': CONF.registry_client_key_file,
'cert_file': CONF.registry_client_cert_file,
'ca_file': CONF.registry_client_ca_file,
'insecure': CONF.registry_client_insecure,
'timeout': CONF.registry_client_timeout,
}
if not CONF.use_user_token:
configure_registry_admin_creds()
def configure_registry_admin_creds():
global _CLIENT_CREDS
if CONF.auth_url or os.getenv('OS_AUTH_URL'):
strategy = 'keystone'
else:
strategy = CONF.auth_strategy
_CLIENT_CREDS = {
'user': CONF.admin_user,
'password': CONF.admin_password,
'username': CONF.admin_user,
'tenant': CONF.admin_tenant_name,
'auth_url': CONF.auth_url,
'strategy': strategy,
'region': CONF.auth_region,
}
def get_registry_client(cxt):
global _CLIENT_CREDS, _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT
kwargs = _CLIENT_KWARGS.copy()
if CONF.use_user_token:
kwargs['auth_token'] = cxt.auth_token
if _CLIENT_CREDS:
kwargs['creds'] = _CLIENT_CREDS
return client.RegistryClient(_CLIENT_HOST, _CLIENT_PORT, **kwargs)
|
[
"wwang@linx-info.com"
] |
wwang@linx-info.com
|
94da3b5459ec2a866efc488c0c16c7c18ce2774a
|
b76116a922d8fc33c610738a59919669bf85535a
|
/Roam Research (Second Brain)/todoist_activity.py
|
4a105696f715ec16acdc4c4096d39697d459766d
|
[] |
no_license
|
hjneuhaus/eyecreality-public
|
05505fea202b64abbbf2f1cb2d8fb155acdd5441
|
388b3c8a17abb663de472c192958b491fa41bbe4
|
refs/heads/main
| 2023-05-27T08:43:17.619209
| 2021-05-25T21:43:36
| 2021-05-25T21:43:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,305
|
py
|
from todoist.api import TodoistAPI
import json
import matplotlib.pyplot as plt
import config
from datetime import datetime
api = TodoistAPI(config.todoistAPI)
outputJSON = api.activity.get(page=1,limit=100)
outputDirectory = "Roam Research (Second Brain)/Roam Import/"
projects = []
dates = []
project_counts = []
all_activity = []
complete = []
api.sync()
#Creating a Callable Function to import Tasks
def contentToRoam(data):
export = ""
for d in data:
dateFormatted = custom_strftime('[[%B {S}, %Y]]', datetime.strptime(d["date"], '%Y-%m-%d'))
export = export + f"- {dateFormatted}\n"
for t in d["proj_tasks"]:
export = export + f" - [[{t['project']}]]\n"
for st in t["tasks"]:
doneCheckbox = "{{DONE}}"
export = export + f" - {doneCheckbox} {st} \n"
outputFile = open(outputDirectory+"Todoist Todos.md", "w")
outputFile.write(export)
outputFile.close()
# Needed Files for Date Format for Roam
def suffix(d):
return 'th' if 11<=d<=13 else {1: 'st', 2: 'nd', 3: 'rd'}.get(d%10, 'th')
def custom_strftime(format, t):
return t.strftime(format).replace('{S}', str(t.day) + suffix(t.day))
#Beginning of Code
for p in api.state['projects']:
projName = p['name']
# Substituting ProjectNames for More Readable Resources
# projectSubstitition = [{"old": "OriginalProjectName", "new": "ReadableName"}, ...]
for re in config.projectSubstitution:
if re["old"] == projName:
projName = re["new"]
proj = {"id": p["id"], "name": projName}
projects.append(proj)
#Get All Activity
for x in outputJSON["events"]:
print(x)
all_activity.append({"project": x['parent_project_id'], "event_type": x['event_type'], "id": x['object_id'], "eventDate": x['event_date'][0:10], "task": x['extra_data']['content'] })
#Build Content for Graphs and Exports
for ip in projects:
added_count = 0
updated_count = 0
completed_count = 0
updated_items = []
for aa in all_activity:
if aa["project"] == ip["id"]:
if aa["event_type"] == "added":
added_count = added_count + 1
if aa["event_type"] == "updated":
updated_count = updated_count + 1
updated_items.append(aa["id"])
if aa["event_type"] == "completed":
completed_count = completed_count + 1
dates.append(aa["eventDate"][0:10])
event = {"date": aa['eventDate'][0:10], "name": aa['task'], "project":aa['project'], "id": aa['id'] }
complete.append(event)
#Only Adding Projects that have Item Counts
if added_count != 0 and updated_count != 0 and completed_count != 0:
project_counts.append({"project": ip["name"], "added_items": added_count, "updated_count": updated_count, "completed_count": completed_count, "updated_items": updated_items })
#MatPlotLib Creation
barChartAxes =[]
barChartCompletedValues = []
barChartUniqueUpdatedCount = []
dateAxes = []
dateCompletedValues=[]
for pp in project_counts:
barChartAxes.append(pp["project"])
barChartCompletedValues.append(pp["completed_count"])
barChartUniqueUpdatedCount.append(len(set(pp["updated_items"])))
#print(pp["project"] + "-" + str(pp["updated_items"]) + " - " + str(len(set(pp["updated_items"]))))
fig, ax = plt.subplots()
ax.bar(barChartAxes, barChartCompletedValues, .35, label='Completed Tasks')
ax.bar(barChartAxes, barChartUniqueUpdatedCount, .35, bottom=barChartCompletedValues, label='Unique Delayed Tasks')
ax.set_title('Velocity for Week')
ax.legend
plt.savefig('Roam Research (Second Brain)/Roam Import/todoist_barchart.png')
#Building Content for Roam
todoExport = []
for d in sorted(set(dates)):
print(d)
publish =[]
for p in projects:
roamProject = p["name"]
taskList = []
for c in complete:
if p["id"] == c["project"] and c["date"] == d:
taskList.append(c["name"])
if len(taskList) > 0:
publish.append({"project" : roamProject, "tasks": taskList})
todoExport.append({"date": d, "proj_tasks" : publish})
contentToRoam(todoExport)
|
[
"andrewcrider@brillada.com"
] |
andrewcrider@brillada.com
|
a5cac3550aff9d6295a3027084f78fff9f8ac6f0
|
94dd6607af2184dce5115fcdce2b0b330cad052f
|
/regatta.py
|
1f918022a8df2d332c8ccc6397c5da1d4a56fe10
|
[] |
no_license
|
Already-inst/testA
|
02dfc1df5f750d5552f55a673af2f79f4ac9d1f5
|
91c83a4a2bb6d4d7c872dbf8dd4103952020b75c
|
refs/heads/master
| 2022-11-24T03:59:07.394474
| 2022-11-09T11:52:42
| 2022-11-09T11:52:42
| 240,494,743
| 0
| 0
| null | 2020-02-20T14:09:13
| 2020-02-14T11:36:40
|
Ruby
|
UTF-8
|
Python
| false
| false
| 54
|
py
|
SLACK_TOKEN_LAC = "xoxb-54123987-asdfglttzzekhrnemo"
|
[
"elacaille0+restricted@gmail.com"
] |
elacaille0+restricted@gmail.com
|
c8dbdaceaddbb40c066ca040a9c2ee8c28e4fa69
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/196/usersdata/265/78476/submittedfiles/atividade.py
|
3765fcc073afbf3178b15cab93d01bb6cdb42c43
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 185
|
py
|
# -*- coding: utf-8 -*-
import math
n=int(input('digite o valor de n: '))
i=1
soma=0
if (n<0):
n=n*(-1)
else:
n=n
while (i<n):
soma=(i/(n))
n=n-1
i=i+1
print(soma)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
b57e43d52ac04a1a6aafbe6d416506910a590a71
|
37e87b3d5e1ee9009f0ea0671bc0c6edf0e233b7
|
/787_3.py
|
194ee7b81f126d774550c6d0683ab09e69646e87
|
[] |
no_license
|
Jane11111/Leetcode2021
|
d9f4987792938597bf89ff72ba6bbcb4a3f9d081
|
a95b871578aae0103066962c33b8c0f4ec22d0f2
|
refs/heads/master
| 2023-07-14T21:29:41.196752
| 2021-08-23T03:28:02
| 2021-08-23T03:28:02
| 344,804,297
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2021-08-15 10:49
# @Author : zxl
# @FileName: 787_3.py
class Solution:
def findCheapestPrice(self, n: int, flights , src: int, dst: int, k: int) -> int:
dis = [float('inf') for i in range(n)]
dis[src] = 0
while k+1:
old_dis = [item for item in dis]
for f,t,p in flights:
if dis[f]!=float('inf'):
dis[t] = min(old_dis[f]+p,dis[t])
k -=1
if dis[dst] == float('inf'):
return -1
return int(dis[dst])
obj = Solution()
n = 4
flights = [[0,1,1],[0,2,5],[1,2,1],[2,3,1]]
src = 0
dst = 3
k = 1
ans = obj.findCheapestPrice(n,flights,src,dst,k)
print(ans)
|
[
"791057615@qq.com"
] |
791057615@qq.com
|
aebbed9f4be141876d5534da14664d51688d3af2
|
d6ad73da7d5087882aea80f500057e7201675a94
|
/10966_물놀이를가자/s1.py
|
a3e6f6322a60c41468b0f0a39344cd6aced85663
|
[] |
no_license
|
Ysh096/swea
|
8bcd6038c96705f8a8d83bb5391d5f061bf98e52
|
3d018e5d3e6cea69268a025a117542f9e4a96702
|
refs/heads/master
| 2023-04-15T05:17:52.933799
| 2021-04-22T11:44:00
| 2021-04-22T11:44:00
| 340,567,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
import sys
from collections import deque
sys.stdin = open('input.txt')
def bfs():
result = 0
deq = deque(w_pos)
while deq:
i, j = deq.popleft()
for k in range(4):
nr = i + dr[k]
nc = j + dc[k]
if not (0 <= nr < N and 0 <= nc < M):
continue
# 이동한 곳이 땅이고 방문하지 않았으면
if visited[nr][nc] < 0:
visited[nr][nc] = visited[i][j] + 1
result += visited[nr][nc]
deq.append((nr, nc))
return result
T = int(input())
for tc in range(1, T+1):
N, M = map(int, input().split())
visited = [[-1] * M for _ in range(N)]
jido = []
w_pos = []
for i in range(N):
temp = list(input())
jido.append(temp)
for j in range(M):
if temp[j] == 'W':
w_pos.append((i, j))
visited[i][j] = 0 # 물임을 표시
dr = [-1, 1, 0, 0]
dc = [0, 0, -1, 1]
result = bfs()
print('#{} {}'.format(tc, result))
|
[
"skk7541@gmail.com"
] |
skk7541@gmail.com
|
ee5739a0303713f51d6c14a2cd5201480f0e6f37
|
7b51d315d9d60f6c1ec0bad7caf4de1c690bae61
|
/stanford-nlp-uses/pos-tagging.py
|
35ab6c8ca6c4973caf5f4f01ec24a0aeeb99e7f7
|
[] |
no_license
|
ritamnrg/absa
|
1758b3f847ca4cf8ee97661f738105994b9ce691
|
503cd89a5ff70cbd5ef738aed56d7cf4bab2d061
|
refs/heads/master
| 2021-04-26T23:28:28.670229
| 2018-03-07T04:41:47
| 2018-03-07T04:41:47
| 124,001,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
# -*- coding: utf-8 -*-
from stanfordcorenlp import StanfordCoreNLP
nlp = StanfordCoreNLP("stanford-corenlp-full-2018-01-31")
# res = nlp.annotate("I love you. I hate him. You are nice. He is dumb",
# properties={
# 'annotators': 'sentiment',
# 'outputFormat': 'json',
# 'timeout': 1000,
# })
# for s in res["sentences"]:
# print ("%d: '%s': %s %s" % (s["index"]," ".join([t["word"] for t in s["tokens"]]), s["sentimentValue"], s["sentiment"]))
sentence = 'Guangdong University of Foreign Studies is located in Guangzhou.'
print(nlp.pos_tag(sentence))
print(nlp.dependency_parse(sentence))
|
[
"if414007@students.del.ac.id"
] |
if414007@students.del.ac.id
|
5ea906c630642fc75e024f7419a93c0ffdfcec28
|
dca0254ddb5681d9ed8903d71f5d1cfec6cfd3f0
|
/hw4/task4_main.py
|
72df1239971428480fad8fea13c7647f8865bcb3
|
[] |
no_license
|
HectorLector/nn17
|
7d54a5e35d4e9b34d895ab135d1d3f24929bf601
|
f5f038cf8e0701cb97d8ca34c656816bf0eae211
|
refs/heads/master
| 2021-09-03T17:36:43.206153
| 2018-01-10T19:11:29
| 2018-01-10T19:11:29
| 109,736,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from task4_data import generate_data
tf.reset_default_graph() # for iPython convenience
# how to define recurrent layers in tensorflow:
cell_type = 'simple'
#cell_type = 'gru'
#cell_type = 'lstm'
# define recurrent layer
if cell_type == 'simple':
cell = tf.nn.rnn_cell.BasicRNNCell(num_hidden)
elif cell_type == 'lstm':
cell = tf.nn.rnn_cell.LSTMCell(num_hidden)
elif cell_type == 'gru':
cell = tf.nn.rnn_cell.GRUCell(num_hidden)
else:
raise ValueError('bad cell type.')
# wrap this layer in a recurrent neural network
outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
# notes:
# - this tensor is unrolled in time (it contains the hidden states of all time
# points)
# - the recurrent weights are encapsuled, we do not need to define them
# - you should only use the outputs (not the hidden states) for creating the
# output neuron
# get the unit outputs at the last time step
last_outputs = outputs[:,-1,:]
# use on top of this a sigmoid neuron for sequence classification
# ...
|
[
"d.seywald@student.tugraz.at"
] |
d.seywald@student.tugraz.at
|
53c433d0713154b6c2857dacae4582305191deec
|
3a6e71c36e7d5a5b0b2e0d4aecb8de7c743efc5f
|
/url.py
|
52e84ac36917fc112a64513e50d5d4b0b68dbc6e
|
[] |
no_license
|
woowooh/12306auto
|
3c5be8e988d0ded65e18b69798a457c0cc64d869
|
ec31ea64136ce5cea35e861c61580e415f0a223b
|
refs/heads/master
| 2021-01-25T14:32:30.610168
| 2018-03-03T15:14:19
| 2018-03-03T15:14:19
| 123,702,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 43
|
py
|
https://kyfw.12306.cn/otn/leftTicket/query?
|
[
"woowooh@outlook.com"
] |
woowooh@outlook.com
|
ee0788af1f5d22772e783da7a2f03386ccdfa5cf
|
f061e90f3488b33608e672d5746fb5c5e10f4a2d
|
/weiboscrapy-1.1.0/mian.py
|
e88d774eb6748d922bc8fa4c58c7ca9c3a9559fc
|
[] |
no_license
|
Asenli/spider
|
617200502830a06782d88ab5b6cfcd7272532acb
|
753b15fdcbd3ca92e274da7c6df7d0bfae898d86
|
refs/heads/master
| 2020-03-22T12:44:55.140658
| 2018-08-12T01:57:20
| 2018-08-12T01:57:20
| 140,059,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74
|
py
|
from scrapy import cmdline
cmdline.execute(['scrapy','crawl', 'weibocn'])
|
[
"634163114@qq.com"
] |
634163114@qq.com
|
5d524861359bf7aa6efd4cb5039526f1713f9996
|
c5746efe18a5406764c041d149d89c0e0564c5a5
|
/4. Python 爬虫工程师/1. 爬虫/Day7/selenium_qqmail.py
|
9e1ae747143cf548d202b8c5801fb50b6563f1ef
|
[] |
no_license
|
ShaoxiongYuan/PycharmProjects
|
fc7d9eeaf833d3711211cd2fafb81dd277d4e4a3
|
5111d4c0a7644c246f96e2d038c1a10b0648e4bf
|
refs/heads/master
| 2021-12-15T05:45:42.117000
| 2021-11-23T06:45:16
| 2021-11-23T06:45:16
| 241,294,858
| 3
| 1
| null | 2021-02-20T15:29:07
| 2020-02-18T07:06:08
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 454
|
py
|
from selenium import webdriver
driver = webdriver.Chrome()
driver.maximize_window()
driver.get('https://mail.qq.com')
i_node = driver.find_element_by_xpath('//*[@id="login_frame"]')
driver.switch_to.frame(i_node)
driver.find_element_by_xpath('//*[@id="switcher_plogin"]').click()
driver.find_element_by_id('u').send_keys('2379773801@qq.com')
driver.find_element_by_id('p').send_keys('65460035maomao')
driver.find_element_by_id('login_button').click()
|
[
"stevenpp123@sina.com"
] |
stevenpp123@sina.com
|
0eed086909dfa9ba0e05bf6c5182a0d96d5ea294
|
9d1a59da79df4eec9fd0abdc7bf00b8ff14e0111
|
/Part2/part2.py
|
d95a054f6c3e898035dcd3e4f94db8fa354c19be
|
[] |
no_license
|
2019-bgmp/demultiplexing-anniewly
|
f3c1fc1552e5d003ae30fb1c5539f096380a7f08
|
0297d4925619b2400b5faa00bc88ba1feb2aa674
|
refs/heads/master
| 2020-08-06T06:40:08.477702
| 2019-11-12T09:53:24
| 2019-11-12T09:53:24
| 212,875,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,346
|
py
|
import numpy as np
import gzip
def convert_phred(string):
"""Converts a string of characters into phred scores"""
return [ord(x)-33 for x in string]
complement={"A":"T","T":"A","G":"C","C":"G","N":"N"}
def rev_complement(string):
return ''.join([complement[x] for x in string[::-1]])
def cutoff(q1,q2,q3,q4):
"""Return True if the average score of the biological reads is below 25 or if any base of the indexes is below 10"""
if np.mean(convert_phred(q1))<25:
return True
if np.mean(convert_phred(q4))<25:
return True
if min(convert_phred(q2)) <10:
return True
if min(convert_phred(q3)) <10:
return True
return False
def check_match(r2,r3):
"""Return True if two things are the same otherwise return false"""
if r2 == r3:
return True
return False
## Read in indexes
barcodes=[]
with open('/projects/bgmp/shared/2017_sequencing/indexes.txt') as fb:
[barcodes.append(x.split()[4])for x in fb.readlines()[1:]]
print(barcodes)
#open all file that is going to be written in and make a dictionary
#dictionary that has barcodes as key and file as value
## start a dictionary to count reads for each index
filedict_R1={}
filedict_R2={}
barcounts = {}
for i in barcodes:
filedict_R1.update({i:open("R1_"+str(i)+".fq","a")})
filedict_R2.update({i:open("R2_"+str(i)+".fq","a")})
barcounts.update({i:0})
f1= open("unknown_R1.fq","a")
f2= open("unknown_R2.fq","a")
f3= open("hopped_R1.fq","a")
f4= open("hopped_R2.fq","a")
## start a counter for unknow, matched and hopped reads
counter_unknow = 0
counter_matched = 0
counter_hopped = 0
## Read in files
filenames = ['/projects/bgmp/shared/2017_sequencing/1294_S1_L008_R1_001.fastq.gz','/projects/bgmp/shared/2017_sequencing/1294_S1_L008_R2_001.fastq.gz','/projects/bgmp/shared/2017_sequencing/1294_S1_L008_R3_001.fastq.gz','/projects/bgmp/shared/2017_sequencing/1294_S1_L008_R4_001.fastq.gz']
with gzip.open(filenames[0],'rt') as R1, gzip.open(filenames[1],'rt') as R2, gzip.open(filenames[2],'rt') as R3, gzip.open(filenames[3],'rt') as R4:
##
while True:
record_R1=[]
record_R2=[]
record_R3=[]
record_R4=[]
for i in range(4):
record_R1 = np.append(record_R1,R1.readline().splitlines() )
record_R2 = np.append(record_R2,R2.readline().splitlines() )
record_R3 = np.append(record_R3,R3.readline().splitlines() )
record_R4 = np.append(record_R4,R4.readline().splitlines() )
if record_R1[0] =='':
break
record_R3[1] = rev_complement(record_R3[1])
## Check if indexes are one of our indexes, otherwise output in unknown
if record_R2[1] in barcodes and record_R3[1] in barcodes:
## quality check
if cutoff(record_R1[3],record_R2[3],record_R3[3],record_R4[3]):
record_R1[0] += str(record_R2[1])+ '_' + str(record_R3[1])
record_R4[0] += str(record_R2[1])+ '_' + str(record_R3[1])
# f1= open("unknown_R1.fq","a")
f1.write('\n'.join((record_R1))+'\n')
# f1.close()
# f2= open("unknown_R2.fq","a")
f2.write('\n'.join((record_R4))+'\n')
# f2.close()
counter_unknow+=1
## match check
elif check_match(record_R2[1],record_R3[1]):
record_R1[0] += str(record_R2[1])+ '_' + str(record_R3[1])
record_R4[0] += str(record_R2[1])+ '_' + str(record_R3[1])
# f1= open("R1_"+str(record_R2[1])+".fq","a")
filedict_R1[record_R2[1]].write('\n'.join(record_R1)+'\n')
# f1.close()
# f2= open("R2_"+str(record_R3[1])+".fq","a")
filedict_R2[record_R3[1]].write('\n'.join(record_R4)+'\n')
# f2.close()
counter_matched+=1
barcounts[record_R2[1]]+=1
## the rest is hopped reads
else:
record_R1[0] += str(record_R2[1])+ '_' + str(record_R3[1])
record_R4[0] += str(record_R2[1])+ '_' + str(record_R3[1])
# f1= open("hopped_R1.fq","a")
f3.write('\n'.join((record_R1))+'\n')
# f3.close()
# f2= open("hopped_R2.fq","a")
f4.write('\n'.join((record_R4))+'\n')
# f2.close()
counter_hopped+=1
else:
record_R1[0] += str(record_R2[1])+ '_' + str(record_R3[1])
record_R4[0] += str(record_R2[1])+ '_' + str(record_R3[1])
# print(record_R1)
# f1= open("unknown_R1.fq","a")
f1.write('\n'.join(record_R1)+'\n')
# f1.close()
# f2= open("unknown_R2.fq","a")
f2.write('\n'.join(record_R4)+'\n')
# f2.close()
counter_unknow+=1
print("The number of unknow reads is " + str(counter_unknow))
print("The number of matched reads is " + str(counter_matched))
print("The number of hopped reads is " + str(counter_hopped))
for i in barcodes:
print("The percentage of " + str(i) + "is" + str(barcounts[i]/counter_matched))
#Percentage of reads from each sample
#Overall amount of index swapping
#Any figures/any other relevant data your code output
|
[
"annie@Annies-MacBook.local"
] |
annie@Annies-MacBook.local
|
2bebdc36e6a65509c698c83f4d0b5a1a4db2c21a
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/dockerized-gists/36834b68b7b4d6366f38/snippet.py
|
1d9db68c22a47a3e4ee4b7af2bcab5032498c11c
|
[
"MIT"
] |
permissive
|
gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398
| 2023-02-11T18:20:10
| 2023-02-11T18:20:10
| 119,861,038
| 76
| 19
| null | 2020-07-26T03:14:55
| 2018-02-01T16:19:24
|
Python
|
UTF-8
|
Python
| false
| false
| 3,281
|
py
|
#!/usr/bin/env python
import sqlite3
import threading
from time import time, sleep, gmtime, strftime
import serial
import requests
# global variales
# sqlite database location
dbname = 'templog.db'
# serial device
DEVICE = '/dev/ttyAMA0'
BAUD = 9600
ser = serial.Serial(DEVICE, BAUD)
# timeout in seconds for waiting to read temperature from sensors
TIMEOUT = 30
# weather underground data
WUKEY = ''
STATION = ''
# time between weather underground samples in seconds
SAMPLE = 30 * 60
def log_temperature(temp):
"""
Store the temperature in the database.
"""
conn = sqlite3.connect(dbname)
curs = conn.cursor()
curs.execute("INSERT INTO temps values(datetime('now', 'localtime'), '{0}', '{1}' )".format(temp['temperature'], temp['id']))
conn.commit()
conn.close()
def get_temp():
"""
Retrieves the temperature from the sensor.
Returns -100 on error, or the temperature as a float.
"""
global ser
tempvalue = -100
deviceid = '??'
voltage = 0
fim = time() + TIMEOUT
while (time() < fim) and (tempvalue == -100):
n = ser.inWaiting()
if n != 0:
data = ser.read(n)
nb_msg = len(data) / 12
for i in range(0, nb_msg):
msg = data[i*12:(i+1)*12]
deviceid = msg[1:3]
if msg[3:7] == "TMPA":
tempvalue = msg[7:]
if msg[3:7] == "BATT":
voltage = msg[7:11]
if voltage == "LOW":
voltage = 0
else:
sleep(5)
return {'temperature':tempvalue, 'id':deviceid}
def get_temp_wu():
"""
Retrieves temperature(s) from weather underground (wu) and stores it to the database
"""
try:
conn = sqlite3.connect(dbname)
curs = conn.cursor()
query = "SELECT baudrate, port, id, active FROM sensors WHERE id like 'W_'"
curs.execute(query)
rows = curs.fetchall()
#print(rows)
conn.close()
if rows != None:
for row in rows[:]:
WUKEY = row[1]
STATION = row[0]
if int(row[3]) > 0:
try:
url = "http://api.wunderground.com/api/{0}/conditions/q/{1}.json".format(WUKEY, STATION)
r = requests.get(url)
data = r.json()
log_temperature({'temperature': data['current_observation']['temp_c'], 'id': row[2]})
except Exception as e:
raise
except Exception as e:
text_file = open("debug.txt", "a+")
text_file.write("{0} ERROR:\n{1}\n".format(strftime("%Y-%m-%d %H:%M:%S", gmtime()), str(e)))
text_file.close()
def main():
"""
Program starts here.
"""
get_temp_wu()
t = threading.Timer(SAMPLE, get_temp_wu)
t.start()
while True:
temperature = get_temp()
if temperature['temperature'] != -100:
log_temperature(temperature)
if t.is_alive() == False:
t = threading.Timer(SAMPLE, get_temp_wu)
t.start()
if __name__ == "__main__":
main()
|
[
"gistshub@gmail.com"
] |
gistshub@gmail.com
|
8080b7c3c35e7127dba17d798bcb4d651b39886c
|
18693f41ef3c8600db5c6ffd35efbabd257cbbe3
|
/evaluation/trajectory/kitti-01/convert.py
|
343147f4fc8724420296116690d011e4ff1bcc7a
|
[] |
no_license
|
QinHarry/my_imu_slam
|
d1edb20cc264de1679143befda9a5ed30e2c5526
|
9ee6ac86e0363622756981fd51836be17d3eba5c
|
refs/heads/master
| 2020-07-30T03:19:07.559623
| 2019-11-04T00:04:57
| 2019-11-04T00:04:57
| 210,068,280
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 835
|
py
|
orb = []
gt = []
vins = []
with open('vins.kitti', 'r') as f:
vins = f.readlines()
with open('01.txt', 'r') as f:
gt = f.readlines()
with open('kitti-orb-origin.kitti', 'r') as f:
orb = f.readlines()
m = min(len(vins), len(gt), len(orb))
rate1 = 1.0 * len(vins) / m
rate2 = 1.0 * len(gt) / m
rate3 = 1.0 * len(orb) / m
orbStr = ''
gtStr = ''
vinsStr = ''
for i in range(m):
index = round(i * rate1)
if index < len(vins):
vinsStr = vinsStr + vins[index]
index = round(i * rate2)
if index < len(gt):
gtStr = gtStr + gt[index]
index = round(i * rate3)
if index < len(orb):
orbStr = orbStr + orb[index]
with open('vins-new.kitti', 'w') as f:
f.write(vinsStr)
with open('gt.kitti', 'w') as f:
f.write(gtStr)
with open('orb.kitti', 'w') as f:
f.write(orbStr)
|
[
"282588926@qq.com"
] |
282588926@qq.com
|
9050ae88bb42afcba500311172f766cff4c50560
|
17224a0e717291c31b5ec454dea5a27d4d1c6602
|
/feeder.py
|
a4313d67cd6abe2428e29f07334016104b8b4619
|
[] |
no_license
|
junggil/newswall
|
4ef6059750ddd78d5ac4f763d14225b3346f358a
|
057d9c307eeecde4c4455bb70f937fc91663f7dc
|
refs/heads/master
| 2021-05-26T21:10:10.494209
| 2011-09-08T00:58:39
| 2011-09-08T00:58:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,015
|
py
|
# -*- coding: utf-8 -*-
import feedparser
from random import choice
from pickle import dump, load
from time import time, localtime, asctime
class GoogleNews(object):
TEMPLATE = 'http://news.google.com/news?pz=1&cf=all&ned=%(territory)s&hl=%(language)s&topic=%(topic)s&output=rss'
FEED = {
'ko' : {
'ned' : 'kr',
'topics' : {
'w' : u'국제',
'b' : u'경제',
'y' : u'사회',
'l' : u'문화/생활',
'p' : u'정치',
't' : u'정보과학',
'e' : u'연예',
's' : u'스포츠',
'po' : u'인기뉴스',
},
'lang' : u'한국어'
},
'us' : {
'ned' : 'en',
'topics' : {
'w' : u'WORLD',
'm' : u'HEALTH',
'b' : u'BUSINESS',
'e' : u'ENTERTAIN',
'tc' : u'TECHNOLOGY',
'snc' : u'SCIENCE',
's' : u'SPORTS',
'ir' : u'SPOTLIGHT',
},
'lang' : u'English'
},
'zh-CN' : {
'ned' : 'cn',
'topics' : {
'b' : u'财经',
'y' : u'社会',
'w' : u'国际/港台',
't' : u'科技',
'e' : u'娱乐',
's' : u'体育',
'po' : u'热门报道',
},
'lang' : u'中文'
}
}
def __init__(self, locale):
self.locale = locale
def checkPickledFeed(self):
return False
def getRSS(self, topic):
if not self.checkPickledFeed():
return feedparser.parse(self.TEMPLATE % {'topic' : topic,
'territory' : self.FEED[self.locale]['ned'],
'language' : self.locale,
}).entries
def getAllTopics(self):
return self.FEED[self.locale]['topics']
@classmethod
def getAllLocales(self):
return self.FEED.keys()
@classmethod
def getLangFromLocale(cls, locale):
return cls.FEED[locale]['lang']
|
[
"sooshia@gmail.com"
] |
sooshia@gmail.com
|
f774dd4085a2eb26479a0d5c41795e3381d0bc21
|
70e97bd9ff4e09f740ec666fbf5ae09a93b1a12e
|
/UDP/UDP_Server.py
|
74e01e86ba6aafd8e69f01ee93708d001985b9a5
|
[] |
no_license
|
ayman-elkassas/Practical-Own
|
c241c8b5b6b4195d66a8bb47d0c753b84c05d27b
|
b303c6cc9c661362e5ba7cdc12a86c63d94d80da
|
refs/heads/master
| 2021-01-09T06:33:55.362618
| 2017-02-05T18:05:28
| 2017-02-05T18:05:28
| 81,011,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
import socket
#determine ip type ( IPv4,IPv6 ) and transimision protocol type ( TCP OR UDP )
sock=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
sock.bind(("127.0.0.1",3000)) #bind means join or connect
data,address=sock.recvfrom(1024)
text = data.decode('ascii')
|
[
"aymanelkassas88@gmail.com"
] |
aymanelkassas88@gmail.com
|
8c49138d00e10f5b67630ddf9bf794ad29d79d14
|
9a30ca725bdec0795093f5318eb28564afe88ffd
|
/sawtooth-module/validator/sawtooth_validator/server/core.py
|
7ead70e9ab2e303bfc58bee4a1614078ae46d12c
|
[
"CC-BY-4.0",
"Apache-2.0",
"Zlib",
"MIT"
] |
permissive
|
cylon56/Cross-chain-Cryptoasset
|
7d8b62cf4f2f4ca8150315d060048e7206700619
|
26d13ebeb16ec2b64cace31a61e265a2f3165557
|
refs/heads/master
| 2020-03-24T22:40:18.597344
| 2018-08-06T20:21:43
| 2018-08-06T20:21:43
| 143,096,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,287
|
py
|
# Copyright 2016, 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import hashlib
import logging
import os
import signal
import time
import threading
from sawtooth_validator.concurrent.threadpool import \
InstrumentedThreadPoolExecutor
from sawtooth_validator.execution.context_manager import ContextManager
from sawtooth_validator.consensus.notifier import ConsensusNotifier
from sawtooth_validator.consensus.proxy import ConsensusProxy
from sawtooth_validator.database.indexed_database import IndexedDatabase
from sawtooth_validator.database.lmdb_nolock_database import LMDBNoLockDatabase
from sawtooth_validator.database.native_lmdb import NativeLmdbDatabase
from sawtooth_validator.journal.block_validator import BlockValidator
from sawtooth_validator.journal.publisher import BlockPublisher
from sawtooth_validator.journal.chain import ChainController
from sawtooth_validator.journal.genesis import GenesisController
from sawtooth_validator.journal.batch_sender import BroadcastBatchSender
from sawtooth_validator.journal.block_sender import BroadcastBlockSender
from sawtooth_validator.journal.block_store import BlockStore
from sawtooth_validator.journal.block_cache import BlockCache
from sawtooth_validator.journal.completer import Completer
from sawtooth_validator.journal.responder import Responder
from sawtooth_validator.journal.batch_injector import \
DefaultBatchInjectorFactory
from sawtooth_validator.networking.dispatch import Dispatcher
from sawtooth_validator.journal.chain_id_manager import ChainIdManager
from sawtooth_validator.execution.executor import TransactionExecutor
from sawtooth_validator.state.batch_tracker import BatchTracker
from sawtooth_validator.state.merkle import MerkleDatabase
from sawtooth_validator.state.settings_view import SettingsViewFactory
from sawtooth_validator.state.settings_cache import SettingsObserver
from sawtooth_validator.state.settings_cache import SettingsCache
from sawtooth_validator.state.identity_view import IdentityViewFactory
from sawtooth_validator.state.state_view import StateViewFactory
from sawtooth_validator.gossip.permission_verifier import PermissionVerifier
from sawtooth_validator.gossip.permission_verifier import IdentityCache
from sawtooth_validator.gossip.identity_observer import IdentityObserver
from sawtooth_validator.networking.interconnect import Interconnect
from sawtooth_validator.gossip.gossip import Gossip
from sawtooth_validator.server.events.broadcaster import EventBroadcaster
from sawtooth_validator.journal.receipt_store import TransactionReceiptStore
from sawtooth_validator.server import network_handlers
from sawtooth_validator.server import component_handlers
from sawtooth_validator.server import consensus_handlers
LOGGER = logging.getLogger(__name__)
class Validator:
def __init__(self,
bind_network,
bind_component,
bind_consensus,
endpoint,
peering,
seeds_list,
peer_list,
data_dir,
config_dir,
identity_signer,
scheduler_type,
permissions,
minimum_peer_connectivity,
maximum_peer_connectivity,
state_pruning_block_depth,
network_public_key=None,
network_private_key=None,
roles=None):
"""Constructs a validator instance.
Args:
bind_network (str): the network endpoint
bind_component (str): the component endpoint
endpoint (str): the zmq-style URI of this validator's
publically reachable endpoint
peering (str): The type of peering approach. Either 'static'
or 'dynamic'. In 'static' mode, no attempted topology
buildout occurs -- the validator only attempts to initiate
peering connections with endpoints specified in the
peer_list. In 'dynamic' mode, the validator will first
attempt to initiate peering connections with endpoints
specified in the peer_list and then attempt to do a
topology buildout starting with peer lists obtained from
endpoints in the seeds_list. In either mode, the validator
will accept incoming peer requests up to max_peers.
seeds_list (list of str): a list of addresses to connect
to in order to perform the initial topology buildout
peer_list (list of str): a list of peer addresses
data_dir (str): path to the data directory
config_dir (str): path to the config directory
identity_signer (str): cryptographic signer the validator uses for
signing
"""
# -- Setup Global State Database and Factory -- #
global_state_db_filename = os.path.join(
data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:]))
LOGGER.debug(
'global state database file is %s', global_state_db_filename)
global_state_db = NativeLmdbDatabase(
global_state_db_filename,
indexes=MerkleDatabase.create_index_configuration())
state_view_factory = StateViewFactory(global_state_db)
# -- Setup Receipt Store -- #
receipt_db_filename = os.path.join(
data_dir, 'txn_receipts-{}.lmdb'.format(bind_network[-2:]))
LOGGER.debug('txn receipt store file is %s', receipt_db_filename)
receipt_db = LMDBNoLockDatabase(receipt_db_filename, 'c')
receipt_store = TransactionReceiptStore(receipt_db)
# -- Setup Block Store -- #
block_db_filename = os.path.join(
data_dir, 'block-{}.lmdb'.format(bind_network[-2:]))
LOGGER.debug('block store file is %s', block_db_filename)
block_db = IndexedDatabase(
block_db_filename,
BlockStore.serialize_block,
BlockStore.deserialize_block,
flag='c',
indexes=BlockStore.create_index_configuration())
block_store = BlockStore(block_db)
# The cache keep time for the journal's block cache must be greater
# than the cache keep time used by the completer.
base_keep_time = 1200
block_cache = BlockCache(
block_store,
keep_time=int(base_keep_time * 9 / 8),
purge_frequency=30)
# -- Setup Thread Pools -- #
component_thread_pool = InstrumentedThreadPoolExecutor(
max_workers=10,
name='Component')
network_thread_pool = InstrumentedThreadPoolExecutor(
max_workers=10,
name='Network')
client_thread_pool = InstrumentedThreadPoolExecutor(
max_workers=5,
name='Client')
sig_pool = InstrumentedThreadPoolExecutor(
max_workers=3,
name='Signature')
# -- Setup Dispatchers -- #
component_dispatcher = Dispatcher()
network_dispatcher = Dispatcher()
# -- Setup Services -- #
component_service = Interconnect(
bind_component,
component_dispatcher,
secured=False,
heartbeat=False,
max_incoming_connections=20,
monitor=True,
max_future_callback_workers=10)
zmq_identity = hashlib.sha512(
time.time().hex().encode()).hexdigest()[:23]
secure = False
if network_public_key is not None and network_private_key is not None:
secure = True
network_service = Interconnect(
bind_network,
dispatcher=network_dispatcher,
zmq_identity=zmq_identity,
secured=secure,
server_public_key=network_public_key,
server_private_key=network_private_key,
heartbeat=True,
public_endpoint=endpoint,
connection_timeout=120,
max_incoming_connections=100,
max_future_callback_workers=10,
authorize=True,
signer=identity_signer,
roles=roles)
# -- Setup Transaction Execution Platform -- #
context_manager = ContextManager(global_state_db)
batch_tracker = BatchTracker(block_store)
settings_cache = SettingsCache(
SettingsViewFactory(state_view_factory),
)
transaction_executor = TransactionExecutor(
service=component_service,
context_manager=context_manager,
settings_view_factory=SettingsViewFactory(state_view_factory),
scheduler_type=scheduler_type,
invalid_observers=[batch_tracker])
component_service.set_check_connections(
transaction_executor.check_connections)
event_broadcaster = EventBroadcaster(
component_service, block_store, receipt_store)
# -- Consensus Engine -- #
consensus_thread_pool = InstrumentedThreadPoolExecutor(
max_workers=3,
name='Consensus')
consensus_dispatcher = Dispatcher()
consensus_service = Interconnect(
bind_consensus,
consensus_dispatcher,
secured=False,
heartbeat=False,
max_incoming_connections=20,
monitor=True,
max_future_callback_workers=10)
consensus_notifier = ConsensusNotifier(consensus_service)
# -- Setup P2P Networking -- #
gossip = Gossip(
network_service,
settings_cache,
lambda: block_store.chain_head,
block_store.chain_head_state_root,
consensus_notifier,
endpoint=endpoint,
peering_mode=peering,
initial_seed_endpoints=seeds_list,
initial_peer_endpoints=peer_list,
minimum_peer_connectivity=minimum_peer_connectivity,
maximum_peer_connectivity=maximum_peer_connectivity,
topology_check_frequency=1
)
completer = Completer(
block_store,
gossip,
cache_keep_time=base_keep_time,
cache_purge_frequency=30,
requested_keep_time=300)
self._completer = completer
block_sender = BroadcastBlockSender(completer, gossip)
batch_sender = BroadcastBatchSender(completer, gossip)
chain_id_manager = ChainIdManager(data_dir)
identity_view_factory = IdentityViewFactory(
StateViewFactory(global_state_db))
id_cache = IdentityCache(identity_view_factory)
# -- Setup Permissioning -- #
permission_verifier = PermissionVerifier(
permissions,
block_store.chain_head_state_root,
id_cache)
identity_observer = IdentityObserver(
to_update=id_cache.invalidate,
forked=id_cache.forked)
settings_observer = SettingsObserver(
to_update=settings_cache.invalidate,
forked=settings_cache.forked)
# -- Setup Journal -- #
batch_injector_factory = DefaultBatchInjectorFactory(
block_cache=block_cache,
state_view_factory=state_view_factory,
signer=identity_signer)
block_publisher = BlockPublisher(
transaction_executor=transaction_executor,
block_cache=block_cache,
state_view_factory=state_view_factory,
settings_cache=settings_cache,
block_sender=block_sender,
batch_sender=batch_sender,
chain_head=block_store.chain_head,
identity_signer=identity_signer,
data_dir=data_dir,
config_dir=config_dir,
permission_verifier=permission_verifier,
batch_observers=[batch_tracker],
batch_injector_factory=batch_injector_factory)
block_validator = BlockValidator(
block_cache=block_cache,
state_view_factory=state_view_factory,
transaction_executor=transaction_executor,
identity_signer=identity_signer,
data_dir=data_dir,
config_dir=config_dir,
permission_verifier=permission_verifier)
chain_controller = ChainController(
block_store=block_store,
block_cache=block_cache,
block_validator=block_validator,
state_database=global_state_db,
chain_head_lock=block_publisher.chain_head_lock,
consensus_notifier=consensus_notifier,
state_pruning_block_depth=state_pruning_block_depth,
data_dir=data_dir,
observers=[
event_broadcaster,
receipt_store,
batch_tracker,
identity_observer,
settings_observer
])
genesis_controller = GenesisController(
context_manager=context_manager,
transaction_executor=transaction_executor,
completer=completer,
block_store=block_store,
state_view_factory=state_view_factory,
identity_signer=identity_signer,
data_dir=data_dir,
config_dir=config_dir,
chain_id_manager=chain_id_manager,
batch_sender=batch_sender)
responder = Responder(completer)
completer.set_on_block_received(chain_controller.queue_block)
completer.set_chain_has_block(chain_controller.has_block)
self._incoming_batch_sender = None
# -- Register Message Handler -- #
network_handlers.add(
network_dispatcher, network_service, gossip, completer,
responder, network_thread_pool, sig_pool,
chain_controller.has_block, self.has_batch,
permission_verifier, block_publisher, consensus_notifier)
component_handlers.add(
component_dispatcher, gossip, context_manager,
transaction_executor, completer, block_store, batch_tracker,
global_state_db, self.get_chain_head_state_root_hash,
receipt_store, event_broadcaster, permission_verifier,
component_thread_pool, client_thread_pool,
sig_pool, block_publisher,
identity_signer.get_public_key().as_hex())
# -- Store Object References -- #
self._component_dispatcher = component_dispatcher
self._component_service = component_service
self._component_thread_pool = component_thread_pool
self._network_dispatcher = network_dispatcher
self._network_service = network_service
self._network_thread_pool = network_thread_pool
consensus_proxy = ConsensusProxy(
block_cache=block_cache,
chain_controller=chain_controller,
block_publisher=block_publisher,
gossip=gossip,
identity_signer=identity_signer,
settings_view_factory=SettingsViewFactory(state_view_factory),
state_view_factory=state_view_factory)
consensus_handlers.add(
consensus_dispatcher,
consensus_thread_pool,
consensus_proxy,
consensus_notifier)
self._consensus_dispatcher = consensus_dispatcher
self._consensus_service = consensus_service
self._consensus_thread_pool = consensus_thread_pool
self._client_thread_pool = client_thread_pool
self._sig_pool = sig_pool
self._context_manager = context_manager
self._transaction_executor = transaction_executor
self._genesis_controller = genesis_controller
self._gossip = gossip
self._block_publisher = block_publisher
self._chain_controller = chain_controller
self._block_validator = block_validator
def start(self):
self._component_dispatcher.start()
self._component_service.start()
if self._genesis_controller.requires_genesis():
self._genesis_controller.start(self._start)
else:
self._start()
def _start(self):
self._consensus_dispatcher.start()
self._consensus_service.start()
self._network_dispatcher.start()
self._network_service.start()
self._gossip.start()
self._incoming_batch_sender = self._block_publisher.start()
self._chain_controller.start()
self._completer.set_on_batch_received(self._incoming_batch_sender.send)
signal_event = threading.Event()
signal.signal(signal.SIGTERM,
lambda sig, fr: signal_event.set())
# This is where the main thread will be during the bulk of the
# validator's life.
while not signal_event.is_set():
signal_event.wait(timeout=20)
def stop(self):
self._gossip.stop()
self._component_dispatcher.stop()
self._network_dispatcher.stop()
self._network_service.stop()
self._component_service.stop()
self._consensus_service.stop()
self._consensus_dispatcher.stop()
self._network_thread_pool.shutdown(wait=True)
self._component_thread_pool.shutdown(wait=True)
self._client_thread_pool.shutdown(wait=True)
self._sig_pool.shutdown(wait=True)
self._transaction_executor.stop()
self._context_manager.stop()
self._block_publisher.stop()
self._chain_controller.stop()
self._block_validator.stop()
threads = threading.enumerate()
# This will remove the MainThread, which will exit when we exit with
# a sys.exit() or exit of main().
threads.remove(threading.current_thread())
while threads:
if len(threads) < 4:
LOGGER.info(
"remaining threads: %s",
", ".join(
["{} ({})".format(x.name, x.__class__.__name__)
for x in threads]))
for t in threads.copy():
if not t.is_alive():
t.join()
threads.remove(t)
if threads:
time.sleep(1)
LOGGER.info("All threads have been stopped and joined")
def has_batch(self, batch_id):
if self._block_publisher.has_batch(batch_id):
return True
if self._incoming_batch_sender and \
self._incoming_batch_sender.has_batch(batch_id):
return True
return False
def get_chain_head_state_root_hash(self):
return self._chain_controller.chain_head.state_root_hash
|
[
"michaelnlewellen@gmail.com"
] |
michaelnlewellen@gmail.com
|
c840b0eca1e96539f3f005041cccea99ee8a46a8
|
36a2f42d887959f7819b0477a4f8c259f9f57c21
|
/utils/file_handler.py
|
cbea7614612e226dd12daf452f1023de45f26322
|
[] |
no_license
|
khaxis/image_tags
|
25cf57a649e8678558199051daf6c3e685b1501e
|
2765f48a268775fe53902723493988451990a7c1
|
refs/heads/master
| 2021-01-11T06:35:02.633260
| 2017-06-08T08:22:05
| 2017-06-08T08:22:05
| 71,862,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
"""
The idea of this module is to abstract files handeling. Historicaly they have been stored on local disk. Moving to the cloud now.
"""
import io
from boto import storage_uri
SCHEME = 'gs'
def get_file_stream(file_path):
content = storage_uri(file_path, SCHEME)
content_stream = io.BytesIO()
content.get_contents_to_file(content_stream)
content_stream.seek(0)
return content_stream
def upload_file_stream(file_path, fp):
content = storage_uri(file_path, SCHEME)
content.set_contents_from_file(fp)
if __name__ == '__main__':
with open('/tmp/a') as fp:
upload_file_stream('image_tags/tmp/a', fp)
|
[
"khaxis@gmail.com"
] |
khaxis@gmail.com
|
42d5447390d6c320fc2a9c365f222566a5e00d58
|
5c7089ceddd2503ec79a6d81eb2140ac8cd64209
|
/snuggle/web/routing/oauth.py
|
e3d4c9f1eda322e528eef85c4d6764990ebf19cd
|
[
"MIT"
] |
permissive
|
halfak/snuggle
|
a7e12ed0b82d6dca3c5a1154ef77e2a281306439
|
384818aaf8a783013b076ada3c74226f10e5dc18
|
refs/heads/master
| 2022-04-07T03:30:06.500679
| 2016-10-30T23:14:23
| 2016-10-30T23:14:23
| 72,371,739
| 2
| 0
|
MIT
| 2020-02-28T13:51:05
| 2016-10-30T19:52:01
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 451
|
py
|
import json
from bottle import post, get, request
from snuggle.web import processing
from snuggle.web.util import preprocessors
# /oauth/initiate/
@get("/oauth/initiate/")
@preprocessors.session
def oauth_initiate(session): return processing.processor.oauth.initiate(session)
# /oauth/callback/
@get("/oauth/callback/")
@preprocessors.session
def oauth_callback(session): return processing.processor.oauth.complete(session, request.query_string)
|
[
"aaron.halfaker@gmail.com"
] |
aaron.halfaker@gmail.com
|
60e4627fb8813e7e21791775e0fd18e5130e7bf0
|
975704b9d73e39275ef8f7b2cf70ce0f52e78bba
|
/Table.py
|
84b7a351807a9ee17f9857b2b32a3b6804ee28ec
|
[] |
no_license
|
kunu2804/PythonLab
|
680d9a787d47d9ea3edec0f21c32d2bd3c06f323
|
daeb2d1aa58eed746e9a6bbbf520e4f1934b81a3
|
refs/heads/main
| 2023-08-22T21:08:52.471494
| 2021-10-11T16:04:37
| 2021-10-11T16:04:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
# Q. Write a program to print multiplication table for a given number using for as well as while loop
# By - Kunal Baraniya
num = int(input("Enter Number: "))
print("Table of " + str(num) + " =")
while(num>0):
for i in range(1,11):
print(num, 'x', i, '=', num*i)
break
# Output: Enter Number: 2
# Table of 2 =
# 2 x 1 = 2
# 2 x 2 = 4
# 2 x 3 = 6
# 2 x 4 = 8
# 2 x 5 = 10
# 2 x 6 = 12
# 2 x 7 = 14
# 2 x 8 = 16
# 2 x 9 = 18
# 2 x 10 = 20
|
[
"noreply@github.com"
] |
kunu2804.noreply@github.com
|
9599c70352c7192bda1d9c5a4e92ced129563c66
|
c1ad83f59be8bce846a09fe2b4cc36269d130192
|
/project/data_prep/clean_scisumm.py
|
9e53d461e85b6178c141f86357fbe17bd891a983
|
[] |
no_license
|
heikalb/CMPT825
|
d498b8977d2a451c5673f16c549506bf7ea2a9b7
|
467abb2168babd518662df32f2674c877d4dec2f
|
refs/heads/master
| 2023-05-11T12:15:20.655939
| 2019-12-23T22:58:40
| 2019-12-23T22:58:40
| 220,340,940
| 0
| 0
| null | 2023-03-24T22:57:45
| 2019-11-07T22:32:52
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,186
|
py
|
"""
Clean article and abstract data in the Scisumm dataset and place them in separate files.
"""
# coding: utf-8
import os
from xml.dom import minidom
import xml
art_xmls = []
summary_xmls = []
human_summary_txts = []
articles = []
summaries = []
# Get paths of article XMLs
for r, d, f in os.walk('../data/scisumm-corpus'):
for f_ in f:
if '.xml' in f_:
art_xmls.append(os.path.join(r, f_))
elif 'human.txt' in f_:
human_summary_txts.append(os.path.join(r, f_))
# Get abstracts and article contents
for art_xml in art_xmls:
try:
ref_xml = minidom.parse(art_xml)
except xml.parsers.expat.ExpatError:
print('Parsing error: ', art_xml)
continue
# Get article contents
article_xml = ref_xml.getElementsByTagName('SECTION')
article_str = ''
for sect in article_xml:
sect = sect.getElementsByTagName('S')
sect_string = ''.join([s.childNodes[0].nodeValue.replace('\n', '').strip() for s in sect])
article_str += sect_string
if not article_str:
continue
# Get abstracts
try:
abstract = ref_xml.getElementsByTagName('ABSTRACT')[0]
abs_sents = abstract.getElementsByTagName('S')
except:
continue
abs_string = ''.join([s.childNodes[0].nodeValue.replace('\n', '').strip() for s in abs_sents])
summaries.append(abs_string)
articles.append(article_str)
# Get human-made summary if available, duplicate article if there is one
for human_sum in human_summary_txts:
if art_xml.split('/')[-1][:-4] in human_sum.split('/')[-1]:
articles.append(article_str)
s = str(open(human_sum, 'rb').read()).replace('\n', '').strip()
summaries.append(s)
# Save data into files
art_save_file = open('../data/scisumm-corpus/cleaned_data/articles_cleaned.txt', 'w')
art_save_file.write('\n'.join(articles))
art_save_file.close()
summary_save_file = open('../data/scisumm-corpus/cleaned_data/abstracts_cleaned.txt', 'w')
summary_save_file.write('\n'.join(summaries))
summary_save_file.close()
print('#Articles: ', len(articles))
print('#Summaries: ', len(summaries))
exit(0)
|
[
"heikal93@gmail.com"
] |
heikal93@gmail.com
|
3d7cdbb64b99d5a7c4a1331681e23069ff80eb8a
|
8475b07b53ec6de9ff92a65cad8d06d4e0732e34
|
/SinglyLinkedLists/python/DoublyLinked.py
|
4386d69dfc92d3dd7448321d508bfb747cedaaab
|
[] |
no_license
|
p-cap/algo-data-structures
|
2461293c9daf8d5e9af22e74e3979d8fd3ed1add
|
3512c81eef868bfb647a0fb386816ff1231913ae
|
refs/heads/main
| 2023-03-05T00:07:36.849024
| 2021-02-19T17:17:19
| 2021-02-19T17:17:19
| 336,418,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 861
|
py
|
class PcapDoublyLinkedList(object):
def __init__(self):
self.head = None
class PcapNode():
def __init__(self, value):
self.value = value
self.next = None
self.previous = None
# decalred the list
pcapList = PcapDoublyLinkedList()
# declared the nodes
pcapHead = PcapNode("Uno")
pcapNode2 = PcapNode("Dos")
pcapNode3 = PcapNode("Tres")
# assign head to the list class
pcapList.head = pcapHead
# link all 3 modes together
pcapHead.next = pcapNode2
pcapHead.previous = None
pcapNode2.next = pcapNode3
pcapNode2.previous = pcapHead
pcapNode3.next = None
pcapNode3.previous = pcapNode2
print(pcapHead.value)
print(pcapHead.next.value)
print(pcapHead.next.next.value)
print("#######################################")
print(pcapNode3.value)
print(pcapNode3.previous.value)
print(pcapNode3.previous.previous.value)
|
[
"slashedeye@gmail.com"
] |
slashedeye@gmail.com
|
7c1c6656e90b5178411fe365e4ba970c5ad541e1
|
d07307e15105fef3d9ba604f55af34835d7533e7
|
/dlapp/main/urls.py
|
9aeaeee5c0ea57e4452031f85886f6b6afecac8d
|
[] |
no_license
|
jun-16/animalai
|
ededda46b5e2b9901a2b5e3a1111732a3b777754
|
eb315bd1d33f3a210aa16b04586c026926751800
|
refs/heads/master
| 2020-03-08T08:20:59.611004
| 2019-04-14T06:25:05
| 2019-04-14T06:25:05
| 128,020,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
from django.conf.urls import url
from . import views
from main.views import Home, upload
#urlpatterns = [url(r'^$', views.index, name='index')]
urlpatterns = [
url(r'^$', Home.as_view(), name='home'),
url(r'^upload/$', upload, name='upload'),
]
|
[
"junichiro.sumitomo@e-gravity.co.jp"
] |
junichiro.sumitomo@e-gravity.co.jp
|
80d068801eeb173da337bf4ab7b7b3e4eda4576c
|
8d33ae757ce4e566f2525483f9b40579708de043
|
/healpix_to_mesh.py
|
fadb021b3e4b895da8b3f6f97773a4cd5cb82cb6
|
[] |
no_license
|
joezuntz/3dprint
|
2bab71bee8fea3506e632b19b3c9f1515db0b119
|
9836af734d5ab9689f4e5e6e1051c112cc028e04
|
refs/heads/master
| 2020-06-24T16:57:09.964409
| 2019-07-29T09:21:48
| 2019-07-29T09:21:48
| 199,022,921
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,802
|
py
|
import healpy
import stl
import numpy as np
def healpix_to_mesh(healpix_map, vmin=None, vmax=None, radius_variation=0.05, hemisphere='both'):
"""Make an STL mesh from a healpix map.
"""
npix = healpix_map.size
nside = healpy.npix2nside(npix)
# May which to cap the maximum variation
if vmin is None:
vmin = healpix_map.min()
if vmax is None:
vmax = healpix_map.max()
# For this to be 3D we scale the point radius by this amount
scale_min = 1 - radius_variation
scale_max = 1 + radius_variation
pix = np.arange(npix, dtype=int)
# get the full list of pixel corners
pix_corners = healpy.boundaries(nside, pix)
if hemisphere == 'north' or 'hemisphere' == 'south':
_, _, z = healpy.pix2vec(nside, np.arange(npix))
if hemisphere == 'north':
w = z >= 0
elif hemisphere == 'south':
w = z < 0
pix = pix[w]
npix = len(pix)
elif hemisphere != 'both':
raise ValueError("hemisphere parameter must be 'north', 'south', or 'both'")
# Scale the pixel corners according to the surrounding pixel values,
# to make this a 3D thing
for i in pix:
# This is inefficient - vertices are shared between pixels, so
# we could avoid calculating this scale more than once for each pixel
# This is probably four times slower than it has to be.
for j in range(4):
xyz = pix_corners[i,:,j]
# Use healpy to interpolate the neighbouring pixels to this
# one to a single scalar value
theta, phi = healpy.vec2ang(xyz)
f = healpy.get_interp_val(healpix_map, theta, phi)
# Convert from intetnsity to radius scaling
s = scale_min + (scale_max-scale_min)*(f-vmin)/(vmax-vmin)
# Avoid going beyond the maximum radius variation
s = np.clip(s, scale_min, scale_max)
# Scale the vertex
pix_corners[i,:,j] *= s
# Make an output mesh object
mesh = stl.mesh.Mesh(np.zeros(npix*2, dtype=stl.mesh.Mesh.dtype))
# Divide each pixel into two triangles and assign the vertices
# of those triangles to the mesh vectors
for i in pix:
f = 2*i
mesh.vectors[f,0] = pix_corners[i,:,0]
mesh.vectors[f,1] = pix_corners[i,:,1]
mesh.vectors[f,2] = pix_corners[i,:,2]
f = 2*i + 1
mesh.vectors[f,0] = pix_corners[i,:,2]
mesh.vectors[f,1] = pix_corners[i,:,3]
mesh.vectors[f,2] = pix_corners[i,:,0]
return mesh
def example():
T = healpy.read_map('wmap_ilc_9yr_v5.fits')
T = healpy.ud_grade(T, nside_out=32)
M = healpix_to_mesh(T, hemisphere='both')
M.save('wmap_ilc_9yr_v5.stl')
if __name__ == '__main__':
example()
|
[
"joezuntz@googlemail.com"
] |
joezuntz@googlemail.com
|
68dd71a34db06a3e7a917ed02689051115f6ad75
|
d138deda43e36f6c79c5e3a9ef1cc62c6a92e881
|
/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py
|
7c039efeb1d34b772c15206f8cc372cbd8f1884f
|
[
"Apache-2.0"
] |
permissive
|
seiriosPlus/Paddle
|
51afd6f5c85c3ce41dd72953ee659d1539c19f90
|
9602a182b2a4979247c09df1ec283fc39cb4a981
|
refs/heads/develop
| 2021-08-16T16:05:10.848535
| 2020-12-27T15:15:19
| 2020-12-27T15:15:19
| 123,257,829
| 2
| 0
|
Apache-2.0
| 2019-12-10T08:22:01
| 2018-02-28T08:57:42
|
C++
|
UTF-8
|
Python
| false
| false
| 41,809
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import collections
import gast
import inspect
import six
import textwrap
import threading
import warnings
import weakref
from paddle.fluid import framework
from paddle.fluid import in_dygraph_mode
from paddle.fluid.dygraph import layers
from paddle.fluid.data_feeder import check_type
from paddle.fluid.layers.utils import flatten
from paddle.fluid.dygraph.base import param_guard
from paddle.fluid.dygraph.base import switch_to_static_graph
from paddle.fluid.dygraph.dygraph_to_static import DygraphToStaticAst
from paddle.fluid.dygraph.dygraph_to_static import error
from paddle.fluid.dygraph.dygraph_to_static import logging_utils
from paddle.fluid.dygraph.dygraph_to_static.origin_info import attach_origin_info
from paddle.fluid.dygraph.dygraph_to_static.origin_info import create_and_update_origin_info_map
from paddle.fluid.dygraph.dygraph_to_static.origin_info import update_op_callstack_with_origin_info
from paddle.fluid.dygraph.dygraph_to_static.partial_program import partial_program_from
from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_func
from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code
from paddle.fluid.dygraph.dygraph_to_static.utils import func_to_source_code
from paddle.fluid.dygraph.dygraph_to_static.utils import input_specs_compatible
from paddle.fluid.dygraph.dygraph_to_static.utils import type_name
from paddle.fluid.dygraph.dygraph_to_static.utils import unwrap
from paddle.fluid.dygraph.dygraph_to_static.utils import make_hashable
from paddle.fluid.dygraph.dygraph_to_static.function_spec import FunctionSpec
from paddle.fluid.dygraph.dygraph_to_static.function_spec import get_buffers, get_parameters
from paddle.fluid.wrapped_decorator import signature_safe_contextmanager
__all__ = ['ProgramTranslator', 'convert_to_static']
# For each traced function, we set `max_traced_program_count` = 10 to consider caching performance.
# Once exceeding the threshold, we will raise warning to users to make sure the conversion is as expected.
MAX_TRACED_PROGRAM_COUNT = 10
class FunctionCache(object):
"""
Caches the transformed functions to avoid redundant conversions of the same function.
"""
def __init__(self):
# Caches the converted static functions. {dygraph_func: static_func}
self._converted_static_func_caches = dict()
# Caches the converted ast node for same source code. {source_code: ast_root}
self._code_to_ast_caches = dict()
self._dygraph_to_static = DygraphToStaticAst()
def convert_with_cache(self, func):
"""
Returns the cached static function or converts it when first encounters the function.
"""
# If hit cache, return it directly.
static_func = self._converted_static_func_caches.get(func, None)
if static_func is None:
static_func = self._convert(func)
self._converted_static_func_caches[func] = static_func
return static_func
def _convert(self, func):
"""
Converts dygraph function into static function. For two functions with same dedent code,
the second function will reuse the transformed ast node of previous one.
For example:
# A.py
def foo(x, y):
z = x + y
return z
# B.py
def foo(x, y):
z = x + y
return z
If the conversion of A.foo happens after B.foo, it will reuse the transformed ast node of B.foo
to speed up the conversion.
"""
# Note: In Python2, it will raise OSError when inspect function
# with decorator directly and function.__wrapped__ holds the actual function.
func = unwrap(func)
source_code = func_to_source_code(func)
# TODO(liym27):
# Consider this case: source_code in self._code_to_ast_caches,
# but actually they are methods in different classes.
# Maybe use (__class__, source_code) as key
if source_code in self._code_to_ast_caches:
root_wrapper = self._code_to_ast_caches[source_code]
else:
root = gast.parse(source_code)
root = attach_origin_info(root, func)
root_wrapper = self._dygraph_to_static.get_static_ast(root)
self._code_to_ast_caches[source_code] = root_wrapper
# Get static function from AST
static_func, file_name = ast_to_func(root_wrapper.node, func)
create_and_update_origin_info_map(root_wrapper.node, static_func)
return static_func
def exist(self, func):
return func in self._converted_static_func_caches
_CACHE_LOCK = threading.Lock()
_FUNCTION_CACHE = FunctionCache()
def convert_to_static(function):
"""
Transforms function of dygraph into static function using the cache mechanism.
Args:
function(callable): The function with dygraph layers that will be converted into static layers.
"""
with _CACHE_LOCK:
static_func = _FUNCTION_CACHE.convert_with_cache(function)
return static_func
class CacheKey(object):
"""
Cached key for ProgramCache.
"""
__slots__ = [
'function_spec', 'input_args_with_spec', 'input_kwargs_with_spec',
'class_instance'
]
def __init__(self, function_spec, input_args_with_spec,
input_kwargs_with_spec, class_instance):
"""
Initializes a cache key.
Args:
functions_spec(FunctionSpec): a FunctionSpec instance of decorated function.
input_args_with_spec(list[InputSpec]): actual input args with some arguments replaced by InputSpec.
input_kwargs_with_spec(list[{string:InputSpec}]): actual input kwargs with some arguments replaced by InputSpec.
class_instance(object): a instance of class `Layer`.
"""
self.function_spec = function_spec
self.input_args_with_spec = input_args_with_spec
self.input_kwargs_with_spec = input_kwargs_with_spec
self.class_instance = class_instance
@classmethod
def from_func_and_args(cls, function_spec, args, kwargs, class_instance):
"""
Generated a CacheKey instance by given inputs.
Args:
functions_spec(FunctionSpec): a FunctionSpec instance of decorated function.
args(tuple): tuple of actual inputs arguments.
kwargs(dict): dict of actual inputs keyword arguments.
class_instance(object): a instance of class `Layer`.
"""
# 1. filter `self` in args
if args and isinstance(args[0], layers.Layer):
args = args[1:]
# 2. convert tensor and numpy array into InputSpec
_args, _kwargs = function_spec.unified_args_and_kwargs(args, kwargs)
input_args_with_spec, input_kwargs_with_spec = function_spec.args_to_input_spec(
_args, _kwargs)
# 3. check whether hit the cache or build a new program for the input arguments
return CacheKey(function_spec, input_args_with_spec,
input_kwargs_with_spec, class_instance)
def __hash__(self):
error_msg = "Arguments to a `@paddle.jit.to_static` must be a hashable Python objects (or nested structures of these types)."
return hash((id(self.function_spec),
make_hashable(self.input_args_with_spec, error_msg),
make_hashable(self.input_kwargs_with_spec, error_msg),
self.class_instance))
def __eq__(self, other):
return (type(self) is type(other)) and hash(self) == hash(other)
def __neq__(self, other):
return not self == other
def __repr__(self):
return "id(function_spec): {}, input_args_with_spec: {}, input_kwargs_with_spec: {}, class_instance: {}".format(
id(self.function_spec), self.input_args_with_spec,
self.input_kwargs_with_spec, self.class_instance)
def unwrap_decorators(func):
"""
Unwraps a decorated function and returns the decorator list and inner target.
"""
decorators = []
cur = func
while True:
if isinstance(cur, StaticFunction):
decorators.append(cur)
# Note: if `cur` is a method, keep it as bound method of class.
instance = cur._class_instance
if instance is not None:
cur = cur.dygraph_function.__get__(instance)
else:
cur = cur.dygraph_function
else:
break
return decorators, cur
class StaticFunction(object):
"""
Wrapper class to Manage program conversion of decorated function.
"""
def __init__(self, function, input_spec=None):
"""
Initializes a `StaticFunction`.
Args:
function(callable): A function or method that will be converted into static program.
input_spec(list[InputSpec]): list of InputSpec to specify the `shape/dtype/name` information for each input argument, default None.
"""
# save the instance `self` while decorating a method of class.
if inspect.ismethod(function):
self._dygraph_function = getattr(function, '__func__')
self._class_instance = getattr(function, '__self__')
else:
self._dygraph_function = function
self._class_instance = None
self._input_spec = input_spec
self._function_spec = FunctionSpec(function, input_spec)
self._program_cache = ProgramCache()
self._descriptor_cache = weakref.WeakKeyDictionary()
# Note: Hold a reference to ProgramTranslator for switching `enable_to_static`.
self._program_trans = ProgramTranslator()
def __get__(self, instance, owner):
"""
Overrides this method to parse the class instance and call bound method correctly.
For example:
'''
class Net(Layer):
def __init__(self):
pass
@paddle.jit.to_static
def forward(self, x, y):
return x + y
net = Net()
out = net(x, y)
'''
In above case, `net(x, y)` will call `net.forward(x, y)` firstly that is a bound method
of `Net` instance. After decorated by `@paddle.jit.to_static`, it will firstly to call `__get__`
to parse the class instance correctly instead of the `StaticFunction` instance.
"""
if instance not in self._descriptor_cache:
if instance is None:
return self
# Note(Aurelius84): To construct new instance of StaticFunction when we
# first encouter the bound function of layer and cache it.
new_static_layer = self._clone()
new_static_layer._class_instance = instance
self._descriptor_cache[instance] = new_static_layer
return self._descriptor_cache[instance]
def _clone(self):
return self.__class__(self._dygraph_function, self._input_spec)
def __call__(self, *args, **kwargs):
"""
Supports to call the returned instance with input `args` and `kwargs` directly.
Args:
*args(tuple): tuple of all input arguments from original decorated function.
**kwargs(dict): dict of all input keyward arguments from original decorated function.
Return:
Outputs of decorated function.
"""
# 1. call dygraph function directly if not enable `declarative`
if not self._program_trans.enable_to_static:
# NOTE(liym27):
# Here calls `warnings.warn` but not `logging_utils.warn` because by default warnings.warn(message)
# will show up **only once**. StaticFunction.__call__ will run many times, it is appropriate to
# display this warning message only once.
warnings.warn(
"The decorator '@paddle.jit.to_static' does NOT work when setting ProgramTranslator.enable to False. "
"We will just return dygraph output. If you would like to get static graph output, please call API "
"ProgramTranslator.enable(True)")
return self._call_dygraph_function(*args, **kwargs)
if not in_dygraph_mode():
raise RuntimeError(
"Failed to run the callable object {} decorated by '@paddle.jit.to_static', "
"because it is NOT in dynamic mode. Please disable the static mode to enter dynamic mode with the "
"following API: paddle.disable_static().".format(
self.dygraph_function))
# 2. trace ops from dygraph layers and cache the generated program.
args, kwargs = self._function_spec.unified_args_and_kwargs(args, kwargs)
try:
concrete_program, partial_program_layer = self.get_concrete_program(
*args, **kwargs)
# 3. synchronize self.training attribute.
if isinstance(self._class_instance, layers.Layer):
partial_program_layer.training = self._class_instance.training
# 4. return outputs.
try:
return partial_program_layer(args)
except Exception as e:
if not hasattr(e, error.ERROR_DATA):
# runtime error
error.attach_error_data(e, in_runtime=True)
raise
except Exception as e:
error_data = getattr(e, error.ERROR_DATA, None)
if error_data:
error_data.raise_new_exception()
else:
logging_utils.warn(
"Please file an issue at 'https://github.com/PaddlePaddle/Paddle/issues'"
" if you can't handle this {} yourself.".format(type(e)))
raise e
def _call_dygraph_function(self, *args, **kwargs):
"""
Calls dygraph function directly and returns the outputs.
Args:
*args(tuple): tuple of all input arguments from original decorated function.
**kwargs(dict): dict of all input keyward arguments from original decorated function.
Return:
Outputs of dygraph function.
"""
if self._class_instance is not None:
dygraph_function = self._dygraph_function.__get__(
self._class_instance)
else:
dygraph_function = self._dygraph_function
return dygraph_function(*args, **kwargs)
def get_concrete_program(self, *args, **kwargs):
"""
Returns traced concrete program and inner executable partial layer.
Args:
*args(tuple): input arguments values or InputSpec
**kwargs(dict) : input kwargs values.
Returns:
Traced ConcreteProgram and executable translated Layer.
"""
# 1. unify args/kwargs and replace Tensor with InputSpec
if len(args) != len(self._function_spec.args_name):
args, kwargs = self._function_spec.unified_args_and_kwargs(args,
kwargs)
input_args_with_spec, input_kwargs_with_spec = self._function_spec.args_to_input_spec(
args, kwargs)
# 2. generate cache key
cache_key = CacheKey(self._function_spec, input_args_with_spec,
input_kwargs_with_spec, self._class_instance)
# 3. check whether hit the cache or build a new program for the input arguments
concrete_program, partial_program_layer = self._program_cache[cache_key]
return concrete_program, partial_program_layer
def get_traced_count(self):
"""
Returns the number of traced programs for the decorated function.
"""
return len(self._program_cache)
@property
def code(self):
"""
Returns the source code of transformed static function for debugging.
"""
static_func = convert_to_static(self._dygraph_function)
source_code = func_to_source_code(static_func)
return source_code
@property
def dygraph_function(self):
"""
Returns the original decorated function.
"""
return self._dygraph_function
@property
def concrete_program(self):
"""
Returns recent ConcreteProgram instance of decorated function.
Examples:
.. code-block:: python
import paddle
from paddle.jit import to_static
from paddle.static import InputSpec
paddle.disable_static()
def foo(x, y):
z = x + y
return z
# usage 1:
decorated_foo = to_static(foo, input_spec=[InputSpec([10], name='x'), InputSpec([10], name='y')])
print(decorated_foo.concrete_program)
# usage 2:
decorated_foo = to_static(foo)
out_foo = decorated_foo(paddle.rand([10]), paddle.rand([10]))
print(decorated_foo.concrete_program)
"""
return self.concrete_program_specify_input_spec(input_spec=None)
def concrete_program_specify_input_spec(self, input_spec=None):
"""
Returns recent ConcreteProgram instance of decorated function while
specifying input_spec. If the self._function_spec already has
input_spce, it will check the compatibility of input input_spec and
the self._function_spec.input_spec. If input input_spec=None, then
this method uses self._function_spec.input_spec
args:
input_spec (list[InputSpec], optional): Describes the input of
the translate function.
"""
# if specific the `input_spec`, the length of program_cache will always 1,
# else, return the last one.
cached_program_len = len(self._program_cache)
# If specific `input_spec`, apply convertion from dygraph layers into static Program.
if cached_program_len == 0:
if input_spec is None:
input_spec = self._function_spec.input_spec
elif self._function_spec.input_spec is not None:
if not input_specs_compatible(
flatten(input_spec),
flatten(self._function_spec.input_spec)):
raise ValueError(
"The `input_spec`: {} used to construct concrete_program is conflict with the `input_spec`: {} in `@paddle.jit.to_static`".
format(input_spec, self._function_spec.input_spec))
has_input_spec = (input_spec is not None)
if has_input_spec:
concrete_program, _ = self.get_concrete_program(*input_spec)
return concrete_program
else:
raise ValueError(
"No valid transformed program for {}.\n\t Please specific `input_spec` in `@paddle.jit.to_static` or feed input tensor to call the decorated function at once.\n".
format(self._function_spec))
# If more than one programs have been cached, return the recent converted program by default.
elif cached_program_len > 1:
logging_utils.warn(
"Current {} has more than one cached programs: {}, the last traced progam will be return by default.".
format(self._function_spec, cached_program_len))
cache_key, (concrete_program,
partial_layer) = self._program_cache.last()
return concrete_program
@property
def inputs(self):
"""
Returns input tensors of recent converted static program.
"""
concrete_program = self.concrete_program
inputs = [
var for var in flatten(concrete_program.inputs)
if isinstance(var, framework.Variable)
]
return inputs
@property
def outputs(self):
"""
Returns output tensors of recent converted static program.
"""
concrete_program = self.concrete_program
outputs = [
var for var in flatten(concrete_program.outputs)
if isinstance(var, framework.Variable)
]
return outputs
@property
def main_program(self):
"""
Returns recent converted static main program.
"""
concrete_program = self.concrete_program
main_program = concrete_program.main_program
return main_program
@property
def program_cache(self):
return self._program_cache
@property
def function_spec(self):
return self._function_spec
# Flag that indicates whether running code under `@declarative`
_in_declarative_mode_ = False
def in_declarative_mode():
"""
Return a bool value that indicates whether running code under `@declarative`
"""
return _in_declarative_mode_
@signature_safe_contextmanager
def _switch_declarative_mode_guard_(is_declarative=True):
global _in_declarative_mode_
original_val = _in_declarative_mode_
_in_declarative_mode_ = is_declarative
yield
_in_declarative_mode_ = original_val
def _verify_init_in_dynamic_mode(class_instance):
"""
Verifies the instance is initialized in dynamic mode.
"""
if isinstance(class_instance, layers.Layer):
if not class_instance._init_in_dynamic_mode:
raise RuntimeError(
" `paddle.jit.to_static` is only available in dynamic mode. Please call `paddle.disable_static()` before "
"initializing your Layer class `{}` . Because parameters of Layer class should be initialized firstly "
"in dynamic mode while applying transformation.".format(
class_instance))
class ConcreteProgram(object):
__slots__ = [
'inputs', 'outputs', 'main_program', "startup_program", "parameters",
"function"
]
def __init__(self,
inputs,
outputs,
parameters,
function,
main_program,
startup_program=None):
self.inputs = inputs
self.outputs = outputs
self.main_program = main_program
self.startup_program = startup_program
self.parameters = parameters
self.function = function
@staticmethod
@switch_to_static_graph
def from_func_spec(func_spec, input_spec, input_kwargs_spec,
class_instance):
"""
Builds the main_program with specialized inputs and returns outputs
of program as fetch_list.
Args:
func_spec(FunctionSpec): A FunctionSpec instance for decorated function.
input_spec(list[InputSpec]):
"""
# verify the instance is initialized in imperative mode.
_verify_init_in_dynamic_mode(class_instance)
# Transforms dygraph function into static function and caches it.
dygraph_function = func_spec.dygraph_function
static_func = convert_to_static(dygraph_function)
main_program, startup_program = framework.Program(), framework.Program()
# Note: The random seed should be synchronized into cached program
# if set in `fluid.dygraph_guard` because some ops rely on it, such as
# `fluid.layers.dropout`.
main_program.random_seed = framework.default_main_program().random_seed
startup_program.random_seed = framework.default_startup_program(
).random_seed
with framework.program_guard(main_program, startup_program):
with _switch_declarative_mode_guard_(is_declarative=True):
# 1. Adds `fluid.data` layers for input if needed
inputs = func_spec.to_static_inputs_with_spec(input_spec,
main_program)
kwargs = func_spec.to_static_inputs_with_spec(input_kwargs_spec,
main_program)
if class_instance:
inputs = tuple([class_instance] + list(inputs))
# 2. Gets all ParamBases and buffered VarBases in the function
all_parameters_and_buffers = _extract_indeed_params_buffers(
class_instance)
# 3. Builds program only once and returns the output Variables.
with param_guard(get_parameters(
class_instance, False)), param_guard(
get_buffers(class_instance, False)):
try:
if kwargs:
outputs = static_func(*inputs, **kwargs)
else:
outputs = static_func(*inputs)
except BaseException as e:
# NOTE: If e is raised in compile time, e should be attached to ERROR_DATA here.
error.attach_error_data(e)
error_data = getattr(e, error.ERROR_DATA, None)
if error_data:
error_data.raise_new_exception()
raise
if outputs is not None:
need_wrap_into_list = not isinstance(outputs, (
tuple, list)) or len(outputs) == 1
if need_wrap_into_list:
outputs = [outputs]
main_program = update_op_callstack_with_origin_info(main_program)
return ConcreteProgram(
inputs=inputs,
outputs=outputs,
parameters=all_parameters_and_buffers,
function=dygraph_function,
main_program=main_program,
startup_program=startup_program)
def _extract_indeed_params_buffers(class_instance):
"""
To filter not initialzed buffers.
"""
params = list(get_parameters(class_instance).values())
buffers = list(get_buffers(class_instance).values())
buffers = [buffer for buffer in buffers if len(buffer.shape) != 0]
return params + buffers
class ProgramCache(object):
"""
Wrapper class for the program functions defined by dygraph function.
"""
def __init__(self):
self._caches = collections.OrderedDict()
def _build_once(self, cache_key):
concrete_program = ConcreteProgram.from_func_spec(
func_spec=cache_key.function_spec,
input_spec=cache_key.input_args_with_spec,
input_kwargs_spec=cache_key.input_kwargs_with_spec,
class_instance=cache_key.class_instance)
return concrete_program, partial_program_from(concrete_program)
def __getitem__(self, item):
if not isinstance(item, CacheKey):
raise ValueError('type(item) should be CacheKey, but received %s' %
type_name(item))
if item not in self._caches:
self._caches[item] = self._build_once(item)
# Note: raise warnings if number of traced program is more than `max_tracing_count`
current_tracing_count = len(self._caches)
if current_tracing_count > MAX_TRACED_PROGRAM_COUNT:
logging_utils.warn(
"Current traced program number: {} > `max_tracing_count`:{}. Too much cached programs will bring expensive overhead. "
"The reason may be: (1) passing tensors with different shapes, (2) passing python objects instead of tensors.".
format(current_tracing_count, MAX_TRACED_PROGRAM_COUNT))
return self._caches[item]
def get_program(self, item):
if not isinstance(item, CacheKey):
raise ValueError(
"Input item's type should be FunctionSpec, but received %s" %
type_name(item))
if item not in self._caches:
raise RuntimeError(
"Failed to find program for input item, please decorate input function by `@paddle.jit.to_static`."
)
return self._caches[item]
def last(self):
assert len(
self._caches) >= 1, "No valid cached program in ProgramCache."
key = next(reversed(self._caches.keys()))
return key, self._caches[key]
def __len__(self):
return len(self._caches)
def concrete_programs(self):
return [cp for key, (cp, _) in six.iteritems(self._caches)]
def synchronized(func):
func.__lock__ = threading.Lock()
def lock_func(*args, **kwargs):
with func.__lock__:
return func(*args, **kwargs)
return lock_func
class ProgramTranslator(object):
"""
Class to translate dygraph function into static graph function. The object
of this class is a singleton.
Args:
None.
Returns:
ProgramTranslator: the singleton object.
Examples:
.. code-block:: python
import paddle
# Two methods get same object because ProgramTranslator is a singleton
paddle.jit.ProgramTranslator()
paddle.jit.ProgramTranslator.get_instance()
"""
_singleton_lock = threading.Lock()
_instance = None
@synchronized
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = object.__new__(cls, *args, **kwargs)
cls._instance._initialized = False
return cls._instance
@classmethod
def get_instance(cls):
if cls._instance is None:
with cls._singleton_lock:
cls._instance = cls()
return cls._instance
@classmethod
def reset(cls):
if cls._instance is not None:
cls._instance._initialized = False
cls._instance.__init__()
def __init__(self):
# To make sure that calls __init__ only once.
if self._initialized:
return
self._initialized = True
self._program_cache = ProgramCache()
self.enable_to_static = True
def enable(self, enable_to_static):
"""
Enable or disable the converting from imperative to static graph by
ProgramTranslator globally.
Args:
enable_to_static (bool): True or False to enable or disable converting to static.
Returns:
None.
Examples:
.. code-block:: python
import paddle
@paddle.jit.to_static
def func(x):
if paddle.mean(x) > 0:
x_v = x - 1
else:
x_v = x + 1
return x_v
prog_trans = paddle.jit.ProgramTranslator()
prog_trans.enable(False)
x = paddle.ones([1, 2])
# ProgramTranslator is disabled so the func is run in dygraph
print(func(x)) # [[0. 0.]]
"""
check_type(enable_to_static, "enable_to_static", bool,
"ProgramTranslator.enable")
self.enable_to_static = enable_to_static
def get_output(self, dygraph_func, *args, **kwargs):
"""
Returns the output dygraph Tensor for dygraph function. The dygraph
function will be translated into static graph function so the under
beneath numerical result will be calculated by static graph mode.
Args:
dygraph_func (callable): the dygraph function.
*args (tuple): the input argument of dygraph_func.
**kwargs (dict): the input argument of dygraph_func.
Returns:
Tensor or tuple of Tensors: the dygraph Tensor containing digital result.
Examples:
.. code-block:: python
import paddle
def func(x):
if paddle.mean(x) > 0:
x_v = x - 1
else:
x_v = x + 1
return x_v
prog_trans = paddle.jit.ProgramTranslator()
x = paddle.ones([1, 2])
x_v = prog_trans.get_output(func, x)
print(x_v) # [[0. 0.]]
"""
assert callable(
dygraph_func
), "Input dygraph_func is not a callable in ProgramTranslator.get_output"
if not self.enable_to_static:
# Here calls `warnings.warn` but not `logging_utils.warn` because by default warnings.warn(message)
# will show up **only once**.
warnings.warn(
"The ProgramTranslator.get_output doesn't work when setting ProgramTranslator.enable to False. "
"We will just return dygraph output. "
"Please call ProgramTranslator.enable(True) if you would like to get static output."
)
return dygraph_func(*args, **kwargs)
try:
function_spec = FunctionSpec(dygraph_func)
cache_key = CacheKey.from_func_and_args(function_spec, args, kwargs,
getattr(dygraph_func,
'__self__', None))
_, partial_program_layer = self._program_cache[cache_key]
if args and isinstance(args[0], layers.Layer):
# Synchronize self.training attribute.
partial_program_layer.training = args[0].training
args = args[1:]
try:
return partial_program_layer(args)
except BaseException as e:
# NOTE:
# 1. If e is raised in compile time, e should have been attached to ERROR_DATA before;
# 2. If e raised in runtime, e should be attached to ERROR_DATA here.
if not hasattr(e, error.ERROR_DATA):
# runtime error
error.attach_error_data(e, in_runtime=True)
raise
except BaseException as e:
error_data = getattr(e, error.ERROR_DATA, None)
if error_data:
error_data.raise_new_exception()
else:
logging_utils.warn(
"Please file an issue at 'https://github.com/PaddlePaddle/Paddle/issues'"
" if you can't handle this {} yourself.".format(type(e)))
raise e
def get_func(self, dygraph_func):
"""
Returns a callable function which converts imperative dygraph APIs of
the input dygraph_func into declarative net-building APIs, which means
it doesn't return immediate digital result as get_output does.
Users should handle Program and Executor by themselves.
Args:
dygraph_func (callable): the dygraph function.
Returns:
callable: converting imperative dygraph APIs into declarative
net-building APIs.
Examples:
.. code-block:: python
import paddle
def func(x):
if paddle.mean(x) > 0:
x_v = x - 1
else:
x_v = x + 1
return x_v
prog_trans = paddle.jit.ProgramTranslator()
static_func = prog_trans.get_func(func)
print(callable(static_func)) # True
"""
assert callable(
dygraph_func
), "Input dygraph_func is not a callable in ProgramTranslator.get_func"
if not self.enable_to_static:
logging_utils.warn(
"The ProgramTranslator.get_func doesn't work when setting ProgramTranslator.enable to False. We will "
"just return dygraph output. Please call ProgramTranslator.enable(True) if you would like to get static output."
)
return dygraph_func
static_func = convert_to_static(dygraph_func)
return static_func
def get_program(self, dygraph_func, *args, **kwargs):
"""
Returns the translated static program and input/output Tensors from
dygraph function. The users can use the program to run by executor.
Args:
dygraph_func (callable): the dygraph function.
*args (tuple): the input argument of dygraph_func.
**kwargs (dict): the input argument of dygraph_func.
Returns:
tuple of (main_program, startup_program, inputs, outputs) whose
types are (Program, Program, list of Tensors, list of Tensors).
main_program: the converted main program.
startup_program: the converted startup program.
inputs: list of input Tensors which need to be fed.
outputs: list of output Tensors which users can fetch.
Examples:
.. code-block:: python
import paddle
def func(x):
if paddle.mean(x) > 0:
x_v = x - 1
else:
x_v = x + 1
return x_v
prog_trans = paddle.jit.ProgramTranslator()
x = paddle.ones([1, 2])
main_prog, start_prog, inputs, outputs = prog_trans.get_program(func, x)
print([i.name for i in inputs])
# [u'generated_tensor_0'] the feed input Tensor name representing x
print([o.name for o in outputs])
# [u'_generated_var_4'] the fetch output Tensor name representing x_v
"""
assert callable(
dygraph_func
), "Input dygraph_func is not a callable in ProgramTranslator.get_program"
if not self.enable_to_static:
logging_utils.warn(
"The ProgramTranslator.get_program doesn't work when setting ProgramTranslator.enable to False."
"We will just return dygraph output. "
"Please call ProgramTranslator.enable(True) if you would like to get static output."
)
return dygraph_func(*args, **kwargs)
function_spec = FunctionSpec(dygraph_func)
cache_key = CacheKey.from_func_and_args(function_spec, args, kwargs,
getattr(dygraph_func,
'__self__', None))
concrete_program, partial_program_layer = self._program_cache[cache_key]
# Note: concrete_program hold all input/output infos include non-Variable
input_vars = [
var for var in concrete_program.inputs
if isinstance(var, framework.Variable)
]
output_vars = [
var for var in concrete_program.outputs
if isinstance(var, framework.Variable)
]
return concrete_program.main_program, \
concrete_program.startup_program, \
input_vars, \
output_vars
def get_code(self, dygraph_func):
"""
Returns the translated static function string code from dygraph function.
Args:
dygraph_func (callable): the dygraph function.
Returns:
str: the string code of translated static function.
Examples:
.. code-block:: python
import paddle
def func(x):
if paddle.mean(x) > 0:
x_v = x - 1
else:
x_v = x + 1
return x_v
prog_trans = paddle.jit.ProgramTranslator()
code = prog_trans.get_code(func)
print(type(code)) # <class 'str'>
"""
assert callable(
dygraph_func
), "Input dygraph_func is not a callable in ProgramTranslator.get_code"
# Gets AST from dygraph function
unwrap_func = unwrap(dygraph_func)
raw_code = inspect.getsource(unwrap_func)
code = textwrap.dedent(raw_code)
root = gast.parse(code)
# Transform AST
dygraph_to_static = DygraphToStaticAst()
root_wrapper = dygraph_to_static.get_static_ast(root)
# Get source_code
source_code = ast_to_source_code(root_wrapper.node)
return source_code
def get_program_cache(self):
"""
Returns the ProgramCache instance. This method is used by PaddlePaddle
developers to manage program cache in ProgramTranslator. Normal users
don't have to call this method.
Returns:
ProgramCache: ProgramCache instance of ProgramTranslator.
Examples:
.. code-block:: python
import paddle
prog_trans = paddle.jit.ProgramTranslator()
prog_cache = prog_trans.get_program_cache()
"""
return self._program_cache
|
[
"noreply@github.com"
] |
seiriosPlus.noreply@github.com
|
c6e3bcb4f3247eab5377cf6364e3705af4e21173
|
4d81a1065f1e487aebae794d5637d37423efd345
|
/PyTest_2/test_file_11.py
|
fad377384ac463f312929453ca675996dcfde478
|
[] |
no_license
|
amanoj319319319/EclipsePython
|
b10cc787125f681e8cb134301c20c450e52e5154
|
d366839e03c13daf052874e44aafe21d0f9d437d
|
refs/heads/master
| 2022-09-08T03:47:08.013223
| 2020-06-01T06:12:14
| 2020-06-01T06:12:14
| 267,015,649
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,363
|
py
|
##https://www.tutorialspoint.com/pytest/pytest_tutorial.pdf
#https://www.guru99.com/pytest-tutorial.html
import unittest
import pytest
from selenium import webdriver
import time
import pytest_ordering
import pytest_html
from selenium.webdriver.common.by import By
import pytest_rerunfailures
class Test_Testing2():
a=10
b=20
@pytest.yield_fixture(scope="module")
def setup(self):
print("it will be printed only once before execution of a module")
global driver
driver = webdriver.Chrome()
driver.maximize_window()
driver.implicitly_wait(6)
yield
print ("i will be printed after only once after execution of a module")
driver.close()
# @pytest.mark.skip("Dont want to excute this test in this batch")
#st @pytest.mark.skipif(a<b , reason="a is less than b")
# @pytest.mark.smoke
# @pytest.mark.flasky(reruns=2)
def test_method_1(self,setup):
driver.get("https://learn.letskodeit.com")
print ("Title of the page is:-",driver.titl)
# @pytest.mark.regression
# @pytest.mark.flasky(reruns=3,reruns_delay=6)
def test_method_2(self,setup):
driver.get("https://learn.letskodeit.com/p/practice")
print ("Title of the page is:-",driver.title)
ele=driver.find_element_by_id("name").send_keys("Manoj")
time.sleep(5)
|
[
"a.manoj16@gmail.com"
] |
a.manoj16@gmail.com
|
b8947586aa09a52a9513dc0465110568bb8d6273
|
53994cd9f5e7170190ad0cc92f206338f1d9ee51
|
/server.py
|
96075e32ee7b07068d7db1a04ff1cf3aa3377d57
|
[] |
no_license
|
onatc/SimpleSiri
|
02f446cc28a97cb72778adc7d7899aa1a5fa71a7
|
fd369104e4c3008c85ffafb801f65bc5b5f574df
|
refs/heads/master
| 2020-03-09T11:58:01.523389
| 2018-04-09T13:15:22
| 2018-04-09T13:15:22
| 128,773,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,952
|
py
|
#!/usr/bin/env python3
import wolframalpha
import serverKeys
import os
import sys
import socket
import pickle
import hashlib
from cryptography.fernet import Fernet
host = ''
port = int(sys.argv[2])
backlog = int(sys.argv[4])
size = int(sys.argv[6])
client = wolframalpha.Client(serverKeys.wolfram_alpha_appid)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host,port))
print('[Checkpoint] Created socket at 0.0.0.0 on port',port,'\n')
print('[Checkpoint] Listening for client connections\n')
s.listen(backlog)
while 1:
cli, address = s.accept()
print('[Checkpoint] Accepted client connection from',address[0],'on port',port,'\n')
data = cli.recv(size)
print('[Checkpoint] Received data: ',data,'\n')
data_tuple = pickle.loads(data)
key = data_tuple[0]
question_enc = data_tuple[1]
checksum = data_tuple[2]
# Check checksum
if (hashlib.md5(question_enc).hexdigest() != checksum):
print('[Checkpoint] Checksum is INVALID\n')
else:
print('[Checkpoint] Checksum is VALID\n')
# Decrypt
f = Fernet(key)
question_dec = f.decrypt(question_enc)
print('[Checkpoint] Decrypt: Using Key:',key,'\nPlaintext:',question_dec,'\n')
question = question_dec.decode('utf-8')
print('[Checkpoint] Speaking:',question,'\n')
os.system('espeak "{}" 2>/dev/null'.format(question))
print('[Checkpoint] Sending question to Wolfram Alpha:',question,'\n')
res = client.query(question)
answer = next(res.results).text
print('[Checkpoint] Received answer from Wolfram Alpha: ',answer,'\n')
if answer:
# Encrypt
answer_enc = f.encrypt(str.encode(answer))
print('[Checkpoint] Encrypt: Generated Key:',key,'\nCiphertext:',answer_enc,'\n')
checksum = hashlib.md5(answer_enc).hexdigest()
print('[Checkpoint] Generated MD5 Checksum:',checksum,'\n')
data = (answer_enc, checksum)
payload = pickle.dumps(data)
print('[Checkpoint] Sending data:',payload,'\n')
cli.send(payload)
cli.close()
|
[
"noreply@github.com"
] |
onatc.noreply@github.com
|
bdc1abbcea60534d7378d0d3b011e9518c61bde7
|
f94c3f8ca0876444714518950fb3776cd32b3c56
|
/area/migrations/0001_initial.py
|
7e5bf70e96bba5ee0fc2227b469e11b25ac8db7b
|
[] |
no_license
|
dengshilong/test-django
|
13abe65c48e2eb5f83b3fbf1ddb2c8e66f392f29
|
853f342cb3d7958d120beafe54028c4e8fc2c822
|
refs/heads/master
| 2020-04-02T13:00:00.577137
| 2016-11-19T03:24:46
| 2016-11-19T03:24:46
| 62,448,833
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,529
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-02 12:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Area',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='area.Area', verbose_name='\u4e0a\u7ea7\u533a\u57df')),
],
options={
'db_table': 'area',
'verbose_name': '\u7701/\u5e02/\u5730\u533a(\u53bf)',
'verbose_name_plural': '\u7701/\u5e02/\u5730\u533a(\u53bf)',
},
managers=[
('_default_manager', django.db.models.manager.Manager()),
],
),
]
|
[
"dengshilong1988@gmail.com"
] |
dengshilong1988@gmail.com
|
4b3ef732583128e53e0513d15cfa521e048c6f99
|
1a6af3dae05e649f7106b4c487dc48b7bf2ee06d
|
/nlu/streamlit/08_sentence_embedding_manifolds.py
|
688d9f87822f56bbe1a30d5d3a73d2386eca4016
|
[
"Apache-2.0"
] |
permissive
|
JohnSnowLabs/spark-nlp-workshop
|
8911e11a82e2f5f6cd8774ed36753a0215528381
|
25531c9501219ee9874090d02c02985596e6f432
|
refs/heads/master
| 2023-08-31T03:37:10.689274
| 2023-08-30T18:40:50
| 2023-08-30T18:40:50
| 145,453,331
| 947
| 587
|
Apache-2.0
| 2023-09-14T13:01:23
| 2018-08-20T18:06:12
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 362
|
py
|
import nlu
text= """You can visualize any of the 100 + Sentence Embeddings
with 10+ dimension reduction algorithms
and view the results in 3D, 2D, and 1D
which can be colored by various classifier labels!
"""
nlu.enable_streamlit_caching() # Optional caching the models, recommended
nlu.load('embed_sentence.bert').viz_streamlit_sentence_embed_manifold(text)
|
[
"66001253+ahmedlone127@users.noreply.github.com"
] |
66001253+ahmedlone127@users.noreply.github.com
|
5a7772e786b1242b8206f85b33f5ba4a4d87888c
|
66bd051d9d2bbbdec5c7fdd4021bc73c6c7c7294
|
/Mission_to_Mars/Scraper.py
|
e6cc60696e2e21823069cabfc73f62ff2c7dafe1
|
[] |
no_license
|
Crobinson17/web-scraping-challenge
|
90527d8fba9e7e8907b7b102f7d5f7a3e06c6d06
|
2b1addaa2240e73d237970d492f74b6d3f811a03
|
refs/heads/main
| 2023-05-26T14:48:41.619007
| 2021-06-10T05:38:27
| 2021-06-10T05:38:27
| 371,876,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,600
|
py
|
from splinter import Browser
from bs4 import BeautifulSoup as soup
import pandas as pd
import requests
import pymongo
import os
from webdriver_manager.chrome import ChromeDriverManager
def init_browser():
executable_path = {'executable_path': ChromeDriverManager().install()}
return Browser('chrome', **executable_path, headless=False)
browser = init_browser()
def mars_news(browser):
# Mars News URL
news_url = "https://mars.nasa.gov/news/"
browser.visit(news_url)
html = browser.html
article = soup(html, 'html.parser')
# Most Recent Mars News
article = soup.find("div", class_="list_text")
news_title = article.find_all('div', class_='content_title')[0].text
news_p = article.find_all('div', class_='article_teaser_body')[0].text
news_date = article.find("div", class_="list_date").text
def featured_image(browser):
# Mars Image
jpl_nasa_url = 'https://www.jpl.nasa.gov'
images_url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(images_url)
html = browser.html
browser.visit(images_url)
#Full Image
full_image = browser.find_by_tag('button')[1]
full_image.click()
html = browser.html
images_soup = soup(html, 'html.parser')
def Mars_Facts():
# Mars facts to be scraped, converted into html table
facts_url = 'http://space-facts.com/mars/'
tables = pd.read_html(facts_url)
Facts_df = tables[2]
Facts_df.columns = ["Description", "Value"]
mars_html_table = Facts_df.to_html()
mars_html_table.replace('\n', '')
def hemisphere(browser):
hemisphere_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(hemisphere_url)
hemisphere_image_urls = []
# Get a List of All the Hemisphere
links = browser.find_by_css("a.product-item h3")
for item in range(len(links)):
hemisphere = {}
# Find Element on Each Loop
browser.find_by_css("a.product-item h3")[item].click()
# Find Sample Image Anchor Tag & Extract <href>
sample_element = browser.find_link_by_text("Sample").first
hemisphere["img_url"] = sample_element["href"]
# Get Hemisphere Title
hemisphere["title"] = browser.find_by_css("h2.title").text
# Append Hemisphere Object to List
hemisphere_image_urls.append(hemisphere)
# Navigate Backwards
browser.back()
return hemisphere_image_urls
def scrape_hemisphere(html_text):
hemisphere_soup = soup(html_text, "html.parser")
try:
title_element = hemisphere_soup.find("h2", class_="title").get_text()
sample_element = hemisphere_soup.find("a", text="Sample").get("href")
except AttributeError:
title_element = None
sample_element = None
hemisphere = {
"title": title_element,
"img_url": sample_element
}
return hemisphere
def scrape_all():
executable_path = {'executable_path': ChromeDriverManager().install()}
return Browser('chrome', **executable_path, headless=False)
browser = init_browser()
browser = init_browser()
news_title, news_paragraph = mars_news(browser)
data = {
"news_title": news_title,
"news_paragraph": news_paragraph,
"featured_image_url": featured_image,
"fact_table": str(Mars_Facts),
"hemisphere_images": hemisphere(browser)
}
return data
if __name__ == "__main__":
print(scrape_all())
|
[
"noreply@github.com"
] |
Crobinson17.noreply@github.com
|
d0a158d1a2258f7f6db2c3798c6bd1c3ba01b883
|
42f5eaf16bfd7076cb5a598cf2f239faa575f28b
|
/05-grpc-google-cloud-speech/python/google/ads/googleads/v1/common/ad_asset_pb2.py
|
ccd03074fc01282c95108b7ddb565cf08ab53d4e
|
[] |
no_license
|
jiriklepl/IMW-2019
|
ab0e1c791a794ccf8a6a8d8d4e732c29acee134c
|
921c85d3c8132114ad90db8deb52eb5ddc06c720
|
refs/heads/master
| 2020-08-28T13:29:15.087785
| 2019-12-15T17:12:24
| 2019-12-15T17:12:24
| 217,711,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 8,409
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v1/common/ad_asset.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.googleads.v1.enums import served_asset_field_type_pb2 as google_dot_ads_dot_googleads_dot_v1_dot_enums_dot_served__asset__field__type__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v1/common/ad_asset.proto',
package='google.ads.googleads.v1.common',
syntax='proto3',
serialized_options=b'\n\"com.google.ads.googleads.v1.commonB\014AdAssetProtoP\001ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v1/common;common\242\002\003GAA\252\002\036Google.Ads.GoogleAds.V1.Common\312\002\036Google\\Ads\\GoogleAds\\V1\\Common\352\002\"Google::Ads::GoogleAds::V1::Common',
serialized_pb=b'\n-google/ads/googleads/v1/common/ad_asset.proto\x12\x1egoogle.ads.googleads.v1.common\x1a;google/ads/googleads/v1/enums/served_asset_field_type.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/wrappers.proto\"\x9d\x01\n\x0b\x41\x64TextAsset\x12*\n\x04text\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x62\n\x0cpinned_field\x18\x02 \x01(\x0e\x32L.google.ads.googleads.v1.enums.ServedAssetFieldTypeEnum.ServedAssetFieldType\";\n\x0c\x41\x64ImageAsset\x12+\n\x05\x61sset\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\";\n\x0c\x41\x64VideoAsset\x12+\n\x05\x61sset\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValue\"A\n\x12\x41\x64MediaBundleAsset\x12+\n\x05\x61sset\x18\x01 \x01(\x0b\x32\x1c.google.protobuf.StringValueB\xe7\x01\n\"com.google.ads.googleads.v1.commonB\x0c\x41\x64\x41ssetProtoP\x01ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v1/common;common\xa2\x02\x03GAA\xaa\x02\x1eGoogle.Ads.GoogleAds.V1.Common\xca\x02\x1eGoogle\\Ads\\GoogleAds\\V1\\Common\xea\x02\"Google::Ads::GoogleAds::V1::Commonb\x06proto3'
,
dependencies=[google_dot_ads_dot_googleads_dot_v1_dot_enums_dot_served__asset__field__type__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,])
_ADTEXTASSET = _descriptor.Descriptor(
name='AdTextAsset',
full_name='google.ads.googleads.v1.common.AdTextAsset',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='text', full_name='google.ads.googleads.v1.common.AdTextAsset.text', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pinned_field', full_name='google.ads.googleads.v1.common.AdTextAsset.pinned_field', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=205,
serialized_end=362,
)
_ADIMAGEASSET = _descriptor.Descriptor(
name='AdImageAsset',
full_name='google.ads.googleads.v1.common.AdImageAsset',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='asset', full_name='google.ads.googleads.v1.common.AdImageAsset.asset', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=364,
serialized_end=423,
)
_ADVIDEOASSET = _descriptor.Descriptor(
name='AdVideoAsset',
full_name='google.ads.googleads.v1.common.AdVideoAsset',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='asset', full_name='google.ads.googleads.v1.common.AdVideoAsset.asset', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=425,
serialized_end=484,
)
_ADMEDIABUNDLEASSET = _descriptor.Descriptor(
name='AdMediaBundleAsset',
full_name='google.ads.googleads.v1.common.AdMediaBundleAsset',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='asset', full_name='google.ads.googleads.v1.common.AdMediaBundleAsset.asset', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=486,
serialized_end=551,
)
_ADTEXTASSET.fields_by_name['text'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ADTEXTASSET.fields_by_name['pinned_field'].enum_type = google_dot_ads_dot_googleads_dot_v1_dot_enums_dot_served__asset__field__type__pb2._SERVEDASSETFIELDTYPEENUM_SERVEDASSETFIELDTYPE
_ADIMAGEASSET.fields_by_name['asset'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ADVIDEOASSET.fields_by_name['asset'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ADMEDIABUNDLEASSET.fields_by_name['asset'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
DESCRIPTOR.message_types_by_name['AdTextAsset'] = _ADTEXTASSET
DESCRIPTOR.message_types_by_name['AdImageAsset'] = _ADIMAGEASSET
DESCRIPTOR.message_types_by_name['AdVideoAsset'] = _ADVIDEOASSET
DESCRIPTOR.message_types_by_name['AdMediaBundleAsset'] = _ADMEDIABUNDLEASSET
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AdTextAsset = _reflection.GeneratedProtocolMessageType('AdTextAsset', (_message.Message,), {
'DESCRIPTOR' : _ADTEXTASSET,
'__module__' : 'google.ads.googleads.v1.common.ad_asset_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.common.AdTextAsset)
})
_sym_db.RegisterMessage(AdTextAsset)
AdImageAsset = _reflection.GeneratedProtocolMessageType('AdImageAsset', (_message.Message,), {
'DESCRIPTOR' : _ADIMAGEASSET,
'__module__' : 'google.ads.googleads.v1.common.ad_asset_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.common.AdImageAsset)
})
_sym_db.RegisterMessage(AdImageAsset)
AdVideoAsset = _reflection.GeneratedProtocolMessageType('AdVideoAsset', (_message.Message,), {
'DESCRIPTOR' : _ADVIDEOASSET,
'__module__' : 'google.ads.googleads.v1.common.ad_asset_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.common.AdVideoAsset)
})
_sym_db.RegisterMessage(AdVideoAsset)
AdMediaBundleAsset = _reflection.GeneratedProtocolMessageType('AdMediaBundleAsset', (_message.Message,), {
'DESCRIPTOR' : _ADMEDIABUNDLEASSET,
'__module__' : 'google.ads.googleads.v1.common.ad_asset_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.common.AdMediaBundleAsset)
})
_sym_db.RegisterMessage(AdMediaBundleAsset)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"jiriklepl@seznam.cz"
] |
jiriklepl@seznam.cz
|
bf6ac9b7698ee3bb2912c54fec4693bfd0c6b505
|
7f57e37222fa7d97e6b9abc592e353f87de36025
|
/print_temp.py
|
c28cbf1fcc22d1c4a0b3e2ea6558ae1ebf8a1d04
|
[] |
no_license
|
asanchez78/DS18B20-temperature-log
|
db4dde404d62965ba59e765cff2e108a1aa73b1d
|
6059076d065038c99d58f9c3458ffbad2889696c
|
refs/heads/master
| 2021-05-09T23:29:09.760489
| 2018-01-24T16:13:14
| 2018-01-24T16:13:14
| 118,789,313
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 909
|
py
|
#!/usr/bin/python3
import os
import glob
import time
import datetime
from decimal import *
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
temp_f = temp_c * 9.0 / 5.0 + 32.0
# return temp_c, temp_f
return temp_f
while True:
temp = read_temp()
theTime = str(datetime.datetime.now())
print(theTime + "\n")
print(temp)
time.sleep(3)
|
[
"anthony@xebix.com"
] |
anthony@xebix.com
|
ce279df001f150b795732de3fb5d8ae56714c416
|
e34f21d6836d6837b6856968162148891c29390c
|
/processing.py
|
78cb92d6e2670f6205a6b8912ea97d5deb5b2b38
|
[] |
no_license
|
MuztaghAta/handwirtten-digit-recognization
|
f7dcd0e7333ec134e835e37295ac2e48bb79157a
|
e83e7522663be453333265c4074e816a4fa0193c
|
refs/heads/master
| 2022-12-14T19:00:13.858079
| 2020-02-01T14:42:59
| 2020-02-01T14:42:59
| 164,705,898
| 0
| 0
| null | 2022-11-21T22:43:49
| 2019-01-08T18:06:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,290
|
py
|
"""This program pre-processes self-captured images containing handwritten
digits so that the trained NN model can recognize efficiently. The idea is to
convert digits in the original image to digits that have the same format with
MNIST images that were used to train the NN model.
References:
http://www.hackevolve.com/recognize-handwritten-digits-1/
"""
import cv2
import imutils
import numpy as np
import matplotlib.image as mpimg
def processing(path):
"""Process image: resize, grey, remove noise, normalize
"""
# load image
i_orig = mpimg.imread(path)
# resize but keep the aspect ratio
i_resize = imutils.resize(i_orig, width=320)
# convert to grays cale image
i_gray = cv2.cvtColor(i_resize, cv2.COLOR_BGR2GRAY)
# Blackhat to reveal dark regions on a light background
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
i_black_hat = cv2.morphologyEx(i_gray, cv2.MORPH_BLACKHAT, kernel)
# threshold the image to further reduce noise
_, i_thresh = cv2.threshold(i_black_hat, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# dilate the image to grow foreground pixels
i_dilate = cv2.dilate(i_thresh, None)
# check processing results
# plt.imshow(i_dilate) # i_resize, i_gray, i_black_hat, i_thresh, i_dilate
# plt.show()
return i_resize, i_dilate
def segmentation(i_resize, i_dilate, flat=False):
"""Segment the processed image and extract the digits in the image
"""
_, contours, _ = cv2.findContours(i_dilate.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
img = i_resize.copy()
digits = []
boxes = []
i = 0
for cnt in contours:
# Check the area of contour, if it is very small ignore it
if cv2.contourArea(cnt) < 10:
continue
# filtered contours are detected
(x, y, w, h) = cv2.boundingRect(cnt)
# take ROI (i.e. digits) of the contour
roi = i_dilate[y:y+h, x:x+w]
# resize to a size of MNIST data without border
roi = cv2.resize(roi, (20, 20))
# add border with black (background) pixels
black = [0, 0, 0]
roi = cv2.copyMakeBorder(roi, 4, 4, 4, 4,
cv2.BORDER_CONSTANT, value=black)
# resize to a size compatible with MNIST data
roi = cv2.resize(roi, (28, 28))
# save (before normalization) the digits as image files for checking
cv2.imwrite('roi' + str(i) + '.png', roi)
# normalize the image since the MNIST data were normalized
roi = roi / 255.0
if flat:
# reshape to a flat vector for fully connected network model
roi = np.reshape(roi, (784,))
else:
# reshape to a array for CNN model
roi = np.reshape(roi, (28, 28, 1))
# mark the digits on the image
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
digits.append(roi) # or roi_array
boxes.append((x, y, w, h))
i += 1
# check if segmentation correct
# cv2.imshow('Image', img)
# cv2.waitKey(0)
digits = np.array(digits)
return digits, boxes
|
[
"noreply@github.com"
] |
MuztaghAta.noreply@github.com
|
cdda453cc0a86c9b79620fc9b24b4a61db406d90
|
03922080543f7ef49ee594c7cd774ca7567cc57e
|
/algoritmos/exercicio.py
|
92df7cf8459adc8cc591944293ed06255ece0c0d
|
[] |
no_license
|
Elaineshield/aula-cc
|
ae3219a645a47b89ffcc9aedac7505ba0eeed497
|
8fb782d643a6f5c1b92c677718231ceec6ab1fec
|
refs/heads/main
| 2023-04-09T17:44:03.094971
| 2021-04-23T00:31:04
| 2021-04-23T00:31:04
| 360,719,046
| 0
| 0
| null | 2021-04-23T00:37:26
| 2021-04-23T00:37:26
| null |
UTF-8
|
Python
| false
| false
| 16
|
py
|
# qualquer coisa
|
[
"rainhadeles2000brokenbones@gmail.com"
] |
rainhadeles2000brokenbones@gmail.com
|
7e858475947aaf17e9d62c60f18dcbf653d58043
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_wallowing.py
|
9ea44930d85d69937dbebfd471045b0321491801
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
from xai.brain.wordbase.verbs._wallow import _WALLOW
#calss header
class _WALLOWING(_WALLOW, ):
def __init__(self,):
_WALLOW.__init__(self)
self.name = "WALLOWING"
self.specie = 'verbs'
self.basic = "wallow"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
dee3659b5ed35d15e0d69828401fdbc5fa5646f3
|
ff90afd185dddadd654d5b12ff385ccb8305aff4
|
/QuotesBot.py
|
30caa8610838c50b2d1da555c2e271b1971dfe02
|
[] |
no_license
|
MaicoLeberle/QuotesBot
|
ffee622d1f1d42fa1c05e1de7874515f790d003d
|
66541a71c85b04537400f02aa9e9cac5fbb03c78
|
refs/heads/main
| 2023-06-01T04:14:45.746652
| 2021-06-11T14:59:55
| 2021-06-11T14:59:55
| 375,852,454
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,331
|
py
|
import logging
import sys
from random import randint
import pandas
import datetime
from telegram import Update, ForceReply, ParseMode
from telegram.ext import \
Updater, CommandHandler, MessageHandler, Filters, CallbackContext
# Enable logging, mostly for debugging
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
""" Send our greetings to the user. """
def start_command(update: Update, context: CallbackContext) -> None:
user = update.effective_user
update.message.reply_markdown_v2(fr'Hi {user.mention_markdown_v2()}\!')
update.message.reply_text("Type /help to list commands.",\
parse_mode=ParseMode.MARKDOWN)
""" Clear all scheduled quotes. """
def finish_command(update: Update, context: CallbackContext) -> None:
for job in context.job_queue.jobs():
job.schedule_removal()
update.message.reply_text(
"The schedule has been *cleared*.", parse_mode=ParseMode.MARKDOWN)
""" List all commands and their specifications. """
def help_command(update: Update, context: CallbackContext) -> None:
msg = ""
msg += "*/start* - welcomes you.\n"
msg += "*/now* - prints the current date and time " \
"(in *YYYY:MM:DD - HH:MM:SS* format).\n"
msg += "*/random_quote* - immediately sends a quote.\n"
msg += "*/set_period* <PARAMETER> - schedule periodic quotes. " \
" Supported format for PARAMETER: *HH:MM:SS*.\n"
msg += "*/once* <PARAMETER> - schedules a one-time-only quote."\
"Supported format for PARAMETER: *HH:MM:SS*.\n"
msg += "*/set_quotes* <PARAMETER> - used to resume or pause the scheduler"\
" (note: pausing the scheduler does not remove the scheduled quotes)."\
"PARAMETER must be either *on* or *off*.\n"
msg += "*/finish* clears the schedule, removing all pending quotes.\n"
msg += "\n*/help* prints this message.\n"
update.message.reply_text(msg, parse_mode=ParseMode.MARKDOWN)
""" Non-command messages will be echoed. """
def echo(update: Update, context: CallbackContext) -> None:
update.message.reply_text(update.message.text)
""" Send date and time in YYYY:MM:DD - HH:MM:SS format. """
def now_command(update: Update, context: CallbackContext) -> None:
update.message.reply_text(\
datetime.datetime.now().strftime("%Y.%m.%d - %H:%M:%S"))
""" Enable or disable all scheduled jobs. """
def set_quotes_command(update: Update, context: CallbackContext) -> None:
params = context.args
if len(params) != 1:
update.message.reply_text(
"The set_quotes command takes a parameter, either \"on\" or "
"\"off\".")
else:
toggle = params[0].upper().strip()
if toggle == "ON":
for job in context.job_queue.jobs():
job.enabled = True
context.job_queue.start()
update.message.reply_text(\
"Schedule is *active*.", parse_mode=ParseMode.MARKDOWN)
elif toggle == "OFF":
for job in context.job_queue.jobs():
job.enabled = False
update.message.reply_text(\
"The whole schedule has been *paused*.",\
parse_mode=ParseMode.MARKDOWN)
else:
update.message.reply_text("The set_quotes command takes a \
parameter, either \"on\" or \"off\".")
""" Schedule a new job to be issued periodically. """
def set_period_command(update: Update, context: CallbackContext) -> None:
period_param = context.args
if len(period_param) != 1:
update.message.reply_text(\
"The set\_period command takes a parameter."\
" Supported format: *HH:MM:SS*, where 00 <= *HH* <= 99, "\
"00 <= *MM* <= 59 and 00 <= *SS* <= 59.",\
parse_mode=ParseMode.MARKDOWN)
else:
context.job_queue.start()
# Split the time parameter in hours, minutes and seconds
param = period_param[0].strip().split(':',3)
if not valid_time(param):
update.message.reply_text(\
"The set_period command takes a " \
"parameter representing the time between quotes. " \
"Supported format: *HH:MM:SS*, where 00 <= *HH* <= 99, " \
"00 <= *MM* <= 59 and 00 <= *SS* <= 59.",\
parse_mode=ParseMode.MARKDOWN)
else:
hours = int(param[0])
minutes = int(param[1])
seconds = int(param[2])
# Finally, we schedule the job.
period = datetime.timedelta(hours=hours,minutes=minutes,\
seconds=seconds)
context.job_queue.run_repeating(\
random_quote, period, context=update.message.chat_id)
update.message.reply_text(\
"A random quote will be issued every *" + \
period_param[0].strip() + "*.", parse_mode=ParseMode.MARKDOWN)
""" Schedule a one-time-only quote. """
def once_command(update: Update, context: CallbackContext) -> None:
period_param = context.args
if len(period_param) != 1:
update.message.reply_text(\
"The */once* command takes a time parameter. Supported format: "\
"*HH:MM:SS*, where 00 <= *HH* <= 99, 00 <= *MM* <= 59 and "\
"00 <= *SS* <= 59.", parse_mode=ParseMode.MARKDOWN)
else:
context.job_queue.start()
# Split the time parameter in hours, minutes and seconds
param = period_param[0].strip().split(':',3)
if not valid_time(param):
update.message.reply_text(\
"The once command takes a time parameter."\
" Supported format: HH:MM:SS, where 00 <= HH <= 99, "\
"00 <= MM <= 59 and 00 <= SS <= 59.")
else:
hours = int(param[0])
minutes = int(param[1])
seconds = int(param[2])
# Finally, we schedule the job.
period = datetime.timedelta(\
hours=hours, minutes=minutes,seconds=seconds)
context.job_queue.run_once(\
random_quote, period, context=update.message.chat_id)
update.message.reply_text(\
"A random quote will be issued in *" + \
period_param[0].strip() + "*.", parse_mode=ParseMode.MARKDOWN)
""" Auxiliary function for once_command and set_period_command. """
def random_quote(context: CallbackContext) -> None:
# Open the JSON file containing the quotes, and convert it to a Pandas
# DataFrame (iterable).
data_frame = pandas.read_json("quotes.json")
quote_number = randint(0,len(data_frame))
msg = '\"' + str(data_frame["quoteText"][quote_number]) + '\" - ' + \
str(data_frame["quoteAuthor"][quote_number])
context.bot.send_message(chat_id=context.job.context,text=msg)
""" Check that the input parameter has the right format. """
def valid_time(param: str) -> (str, str, str):
# First, check that we exactly three elements (hours, minutes, seconds).
if not (len(param) == 3):
return False
# Second, check if these elements are made of digits.
if not (param[0].isdigit() and param[1].isdigit() and param[2].isdigit()):
return False
# Next, cast these elements to integerss, and check that they belong in the
# appropriate range.
hours = int(param[0])
minutes = int(param[1])
seconds = int(param[2])
if 0 > hours or hours > 99 or \
0 > minutes or minutes > 59 \
or 0 > seconds or seconds > 59:
return False
return True
""" Send a random quote. """
def random_quote_command(update: Update, context: CallbackContext) -> None:
params = context.args
if len(params) != 0:
update.message.reply_text("Discarding the provided parameters - the "
"random_quote command takes none.")
# Open the JSON file containing the quotes, and convert it to a Pandas
# DataFrame (iterable).
data_frame = pandas.read_json("quotes.json")
quote_number = randint(0,len(data_frame))
msg = '\"' + str(data_frame["quoteText"][quote_number]) + '\" - ' + \
str(data_frame["quoteAuthor"][quote_number])
update.message.reply_text(msg)
def main() -> None:
if len(sys.argv) != 2:
print("Only supported usage: BaseBot.py <TOKEN>")
sys.exit()
# Start the bot by creating the Updater and passing it the input token.
token = sys.argv[1]
try:
updater = Updater(token)
except telegram.error.InvalidToken:
print(\
"The given token does not correspond to any working Telegram bot.")
sys.exit()
# Get the dispatcher and register the handlers corresponding to the
# commands recognized by the bot.
dispatcher = updater.dispatcher
dispatcher.add_handler(
CommandHandler("start", start_command))
dispatcher.add_handler(
CommandHandler("finish", finish_command))
dispatcher.add_handler(
CommandHandler("help", help_command))
dispatcher.add_handler(
CommandHandler("now", now_command))
dispatcher.add_handler(
CommandHandler("set_quotes", set_quotes_command))
dispatcher.add_handler(
CommandHandler("set_period", set_period_command))
dispatcher.add_handler(
CommandHandler("once", once_command))
dispatcher.add_handler(
CommandHandler("random_quote", random_quote_command))
# Echo all non-command messages.
dispatcher.add_handler(
MessageHandler(Filters.text & ~Filters.command, echo))
# Start the Bot.
updater.start_polling()
# Run the bot until pressing Ctrl-C.
updater.idle()
if __name__ == '__main__':
main()
|
[
"maico.leberle@gmail.com"
] |
maico.leberle@gmail.com
|
14ec9fc3a79b66b285606d350d30ad3ab04e350f
|
64d89e2c0468ce6a7a55a6c3415b7fce4f049ea1
|
/Taller de Estructuras de Control Selectivas/Ejercicio_11.py
|
1006690172dd281146550084445d15a8a26d613a
|
[
"MIT"
] |
permissive
|
JRobayo99/Talleres-de-algotimos
|
16c544725de5aff3b6c6c804d8958837b7acc13f
|
684ad3a4a21ada2f8347a6bae6eaa845d5abb2dc
|
refs/heads/main
| 2023-07-31T07:29:08.939938
| 2021-09-19T21:14:41
| 2021-09-19T21:14:41
| 392,040,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
tf=int(input("Ingrese la tempratura en grados Fahrenheit: "))
if(tf > 85):
print("Natación")
elif(70 < tf and tf<=85):
print("Tenis")
elif(31 < tf and tf<=70):
print("Golf")
elif(9<tf and tf<=32):
print("Esquí")
elif(tf<=10):
print("Marcha")
|
[
"jramire23664@universidadean.edu.co"
] |
jramire23664@universidadean.edu.co
|
07da54663e83619ebf814394a3a05b0043f02146
|
0fa00ecf2dd671515dc001d4b14049ec6a0c1f1c
|
/custom_components/spook/services.py
|
827880ce6115fd6c994621ecc4451b0afda60ebd
|
[
"Unlicense"
] |
permissive
|
bacco007/HomeAssistantConfig
|
d91a5368344f50abbea881bd1e6dfc57a0e456ca
|
8548d9999ddd54f13d6a307e013abcb8c897a74e
|
refs/heads/master
| 2023-08-30T07:07:33.571959
| 2023-08-29T20:00:00
| 2023-08-29T20:00:00
| 230,585,631
| 98
| 16
|
Unlicense
| 2023-09-09T08:28:39
| 2019-12-28T09:05:02
|
Python
|
UTF-8
|
Python
| false
| false
| 10,941
|
py
|
"""Spook - Not your homie."""
from __future__ import annotations
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
import importlib
from pathlib import Path
from typing import TYPE_CHECKING, Any, final
import voluptuous as vol
from homeassistant.core import (
HomeAssistant,
Service,
ServiceCall,
ServiceResponse,
SupportsResponse,
callback,
)
from homeassistant.helpers.entity_component import DATA_INSTANCES, EntityComponent
from homeassistant.helpers.entity_platform import DATA_ENTITY_PLATFORM
from homeassistant.helpers.service import (
SERVICE_DESCRIPTION_CACHE,
_load_services_file,
async_register_admin_service,
async_set_service_schema,
)
from homeassistant.loader import async_get_integration
from .const import DOMAIN, LOGGER
if TYPE_CHECKING:
from homeassistant.helpers.entity import Entity
class AbstractSpookServiceBase(ABC):
"""Abstract base class to hold a Spook service."""
hass: HomeAssistant
domain: str
service: str
schema: dict[str | vol.Marker, Any] | None = None
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the service."""
self.hass = hass
@abstractmethod
@callback
def async_register(self) -> None:
"""Handle the service call."""
raise NotImplementedError
@final
@callback
def async_unregister(self) -> None:
"""Unregister the service from Home Assistant."""
LOGGER.debug(
"Unregistering Spook service: %s.%s",
self.domain,
self.service,
)
self.hass.services.async_remove(self.domain, self.service)
class ReplaceExistingService(AbstractSpookServiceBase):
"""Service replaces/may replace an existing service."""
overriden_service: Service | None = None
class AbstractSpookService(AbstractSpookServiceBase):
"""Abstract class to hold a Spook service."""
supports_response: SupportsResponse = SupportsResponse.NONE
@final
@callback
def async_register(self) -> None:
"""Register the service with Home Assistant."""
# Only register the service if the domain is the spook integration
# or if the target integration is loaded.
if self.domain != DOMAIN and self.domain not in self.hass.config.components:
LOGGER.debug(
"Not registering Spook %s.%s service, %s is not loaded",
self.domain,
self.service,
self.domain,
)
LOGGER.debug(
"Registering Spook service: %s.%s",
self.domain,
self.service,
)
self.hass.services.async_register(
domain=self.domain,
service=self.service,
service_func=self.async_handle_service,
schema=vol.Schema(self.schema) if self.schema else None,
supports_response=self.supports_response,
)
@abstractmethod
async def async_handle_service(self, call: ServiceCall) -> ServiceResponse:
"""Handle the service call."""
raise NotImplementedError
class AbstractSpookAdminService(AbstractSpookServiceBase):
"""Abstract class to hold a Spook admin service."""
@final
@callback
def async_register(self) -> None:
"""Register the service with Home Assistant."""
if self.domain != DOMAIN and self.domain not in self.hass.config.components:
LOGGER.debug(
"Not registering Spook %s.%s admin service, %s is not loaded",
self.domain,
self.service,
self.domain,
)
return
LOGGER.debug(
"Registering Spook admin service: %s.%s",
self.domain,
self.service,
)
async_register_admin_service(
hass=self.hass,
domain=self.domain,
service=self.service,
service_func=self.async_handle_service,
schema=vol.Schema(self.schema) if self.schema else None,
)
@abstractmethod
async def async_handle_service(self, call: ServiceCall) -> None:
"""Handle the service call."""
raise NotImplementedError
class AbstractSpookEntityService(AbstractSpookServiceBase):
"""Abstract class to hold a Spook entity service."""
platform: str
required_features: list[int] | None = None
supports_response: SupportsResponse = SupportsResponse.NONE
@final
@callback
def async_register(self) -> None:
"""Register the service with Home Assistant."""
LOGGER.debug(
"Registering Spook entity service: %s.%s for platform %s",
self.domain,
self.service,
self.platform,
)
if not (
platform := next(
platform
for platform in self.hass.data[DATA_ENTITY_PLATFORM][self.domain]
if platform.domain == self.platform
)
):
msg = (
f"Could not find platform {self.platform} for domain "
f"{self.domain} to register service: "
f"{self.domain}.{self.service}",
)
raise RuntimeError(msg)
platform.async_register_entity_service(
name=self.service,
func=self.async_handle_service,
schema=self.schema,
required_features=self.required_features,
supports_response=self.supports_response,
)
@abstractmethod
async def async_handle_service(
self,
entity: Entity,
call: ServiceCall,
) -> ServiceResponse:
"""Handle the service call."""
raise NotImplementedError
class AbstractSpookEntityComponentService(AbstractSpookServiceBase):
"""Abstract class to hold a Spook entity component service."""
required_features: list[int] | None = None
supports_response: SupportsResponse = SupportsResponse.NONE
@final
@callback
def async_register(self) -> None:
"""Register the service with Home Assistant."""
LOGGER.debug(
"Registering Spook entity component service: %s.%s",
self.domain,
self.service,
)
if self.domain not in self.hass.data[DATA_INSTANCES]:
msg = (
f"Could not find entity component {self.domain} to register "
f"service: {self.domain}.{self.service}",
)
raise RuntimeError(msg)
component: EntityComponent[Entity] = self.hass.data[DATA_INSTANCES][self.domain]
component.async_register_entity_service(
name=self.service,
func=self.async_handle_service,
schema=self.schema,
required_features=self.required_features,
supports_response=self.supports_response,
)
@abstractmethod
async def async_handle_service(
self,
entity: Entity,
call: ServiceCall,
) -> ServiceResponse:
"""Handle the service call."""
raise NotImplementedError
@dataclass
class SpookServiceManager:
"""Class to manage Spook services."""
hass: HomeAssistant
_services: set[AbstractSpookService] = field(default_factory=set)
_service_schemas: dict[str, Any] = field(default_factory=dict)
def __post_init__(self) -> None:
"""Post initialization."""
LOGGER.debug("Spook service manager initialized")
async def async_setup(self) -> None:
"""Set up the Spook services."""
LOGGER.debug("Setting up Spook services")
integration = await async_get_integration(self.hass, DOMAIN)
self._service_schemas = await self.hass.async_add_executor_job(
_load_services_file,
self.hass,
integration,
)
# Load all services
for module_file in Path(__file__).parent.rglob("ectoplasms/*/services/*.py"):
if module_file.name == "__init__.py":
continue
module_path = str(module_file.relative_to(Path(__file__).parent))[
:-3
].replace("/", ".")
module = importlib.import_module(f".{module_path}", __package__)
service = module.SpookService(self.hass)
if isinstance(
service,
ReplaceExistingService,
) and self.hass.services.has_service(service.domain, service.service):
LOGGER.debug(
"Unregistering service that will be overriden service: %s.%s",
service.domain,
service.service,
)
# pylint: disable=protected-access
service.overriden_service = (
self.hass.services._services[service.domain] # noqa: SLF001
).pop(service.service)
self.async_register_service(service)
@callback
def async_register_service(self, service: AbstractSpookService) -> None:
"""Register a Spook service."""
service.async_register()
self._services.add(service)
# Override service description with Spook's if the service is not
# for the Spook integration.
if service.domain != DOMAIN and (
service_schema := self._service_schemas.get(
f"{service.domain}_{service.service}",
)
):
LOGGER.debug(
"Injecting Spook service schema for: %s.%s",
service.domain,
service.service,
)
async_set_service_schema(
self.hass,
domain=service.domain,
service=service.service,
schema=service_schema,
)
@callback
def async_on_unload(self) -> None:
"""Tear down the Spook services."""
LOGGER.debug("Tearing down Spook services")
for service in self._services:
LOGGER.debug(
"Unregistering service: %s.%s",
service.domain,
service.service,
)
service.async_unregister()
if (
isinstance(service, ReplaceExistingService)
and service.overriden_service
):
LOGGER.debug(
"Restoring service that was overriden previously: %s.%s",
service.domain,
service.service,
)
# pylint: disable-next=protected-access
self.hass.services._services.setdefault( # noqa: SLF001
service.domain,
{},
)[service.service] = service.overriden_service
# Flush service description schema cache
self.hass.data.pop(SERVICE_DESCRIPTION_CACHE, None)
|
[
"thomas@thomasbaxter.info"
] |
thomas@thomasbaxter.info
|
812dd0904f388324462562aab2d50b93971aaeed
|
d243e017ad9248b7852359797453b8078ab055df
|
/bin/extract_bookings_single_attachment.py
|
a3953b7a46f7d6c469f2dd3e432ae5b904e893f3
|
[] |
no_license
|
capitolmuckrakr/lvmpd_emails
|
5d436907b694df5fde5472a970ca5e81cfbd6b97
|
ff0d0752819ed03a61954dac4a405c7f85d41a1a
|
refs/heads/master
| 2022-12-09T07:41:17.232160
| 2020-09-01T13:58:41
| 2020-09-01T13:58:41
| 109,310,617
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,796
|
py
|
from __future__ import print_function
from bs4 import BeautifulSoup
import re, pandas as pd, sys
def extract_booking(booking):
booking_page = open(booking)
soup = BeautifulSoup(booking_page,'html.parser')
table = soup.find_all('table')[-1]
table = [row for row in table][1]
ws_fixer = re.compile(r"\s+")
cols = [ws_fixer.sub("_",col.text) for col in [row for row in table.find_all('tr')][0].find_all('td')]
if cols[0] == '':
cols[0] = 'Row'
rows = table.find_all('tr')[1:]
t1 = pd.DataFrame(columns=cols, index = range(1,len(rows)+1))
row_marker = 0
for row in rows:
column_marker = 0
columns = row.find_all('td')
for column in columns:
t1.iat[row_marker,column_marker] = column.get_text()
column_marker+=1
row_marker+=1
columns_to_fill = ['Booking_Date','Arrest_Date','Time', 'ID_Number', 'First_Name','S', 'R', 'Age', 'Charge_Date','Charges', 'Type', 'Event_Number', 'Arrest_Officer','St','Last_Name', 'Middle_Name']# some values are not repeated across rows and need to be filled in
for col_name in columns_to_fill:
col = ''
previous = ''
new_col = []
if col_name in t1.columns:
for i,x in t1[col_name].iteritems():
if x == '':
if not col_name in ['Last_Name', 'Middle_Name']:
x = previous
else:
if not previous == '':
if t1.loc[i,'First_Name'] == t1.loc[i - 1,'First_Name']:
x = previous
col = x
previous = col
new_col.append(col)
t1[col_name] = new_col
t1['Time'] = pd.to_datetime(t1['Booking_Date'] + ' ' + t1['Time'])
t1['Booking_Date'] = pd.to_datetime(t1['Booking_Date'])
for option_col in ['Arrest_Date','Charge_Date']:
if option_col in t1.columns:
t1[option_col] = pd.to_datetime(t1[option_col])
t1.Charges.replace(ws_fixer," ",regex=True,inplace=True)
t1.Last_Name.replace(ws_fixer," ",regex=True,inplace=True)
t1.First_Name.replace(ws_fixer," ",regex=True,inplace=True)
t1.Middle_Name.replace(ws_fixer," ",regex=True,inplace=True)
if list(t1.Row.astype(int)) == list(t1.index):
del t1['Row']
col_names = {c:c.lower() for c in list(t1.columns)}
col_names['ID_Number'] = 'id'
col_names['S']='sex'
col_names['R']='race'
col_names['St']='state_'
col_names['Time']='booking_time'
t1.rename(columns=col_names,inplace=True)
t1['filename'] = booking.split('/')[-1]
return t1
if __name__ == '__main__':
booking_file = sys.argv[1]
booking = extract_booking(booking_file)
print(booking.to_csv())
|
[
"alex@capitolmuckraker.com"
] |
alex@capitolmuckraker.com
|
413efcc1e21b7e2d3f6ce520a53c0c72361966b3
|
bc06438b13fac1bf1f5e67689ab384b840fd1d66
|
/src/Repository.py
|
7b8ffafa04c81ff3fb35c40cd1335f393cacc9b2
|
[] |
no_license
|
vanpana/Vertebral-Check
|
2a68691687481e346abafd5d4e01632c2da5a1e5
|
20dd07c17380b368ad03cd5b0ca7ce1f3f7912c3
|
refs/heads/master
| 2020-03-14T14:01:30.462122
| 2018-05-03T11:16:32
| 2018-05-03T11:16:32
| 131,644,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
from random import shuffle
class Repository:
def __init__(self, filename):
self.data = {}
self.__load_data_from_file(filename)
def __load_data_from_file(self, filename):
with open(filename, 'r') as file:
for line in file:
line = line.strip("\n").split(",")
if not line[-1] in self.data:
self.data[line[-1]] = []
self.data[line[-1]].append([float(x) for x in line[:-1]])
def shuffle(self):
# Shuffles the data set
[shuffle(self.data[key]) for key in self.data]
|
[
"van.panaite@gmail.com"
] |
van.panaite@gmail.com
|
723b1623f4a7e4d3638578844dbe53a0e285fdaa
|
7da8b92ff8b128ae2cdb0c36af5a15c211ff070b
|
/toolutils/readconfig.py
|
acc14cda87b770896eba34dcfc9ecccca0a7cd82
|
[] |
no_license
|
XIAOJIEZO/auto_web
|
a30d6f8377dad93d04b8a953a4c5cfd8cc46bed9
|
4019597d1b455bf48cde418abe7d0159b6831550
|
refs/heads/main
| 2023-06-07T02:15:24.270390
| 2021-06-21T15:12:56
| 2021-06-21T15:12:56
| 372,169,117
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
py
|
import yaml
class ReadConfig(object):
def __init__(self):
with open(r'config/GlobalGonfig.yaml', 'r', encoding='utf8') as f:
self.__result = f.read()
def get_test_host(self):
return yaml.load(self.__result, Loader=yaml.FullLoader)["HOST"]["TEST"]
|
[
"Liujie26018"
] |
Liujie26018
|
452c687b93acd18841ab408b2da5e5db7a0a0179
|
50a8f856343923b972f549d88ff3253c66e91229
|
/looppay300.py
|
c5a19484266b3e95bcca99d4ee52ac1a284029a9
|
[] |
no_license
|
yudhaprimadiansyah/MyRandomPythonScript
|
d34ec525a6f7180d0804340595310b5a57d840d6
|
82e1300362bcdb26b1cece2652f773b9a695da8e
|
refs/heads/master
| 2021-03-28T14:18:02.719209
| 2020-03-17T03:21:37
| 2020-03-17T03:21:37
| 247,869,486
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
file = open("payload.py", 'w')
data = ""
string = "0123456789ABCDEF"
payload = ""
for i in string:
for a in string:
payload += "(('"+a+"'*3)+('"+i+"'*4)+'\\x"+i+a+"')+"
file.write(payload)
file.close()
|
[
"yudhaprimadiansyah@gmail.com"
] |
yudhaprimadiansyah@gmail.com
|
5ccf71e79cc611640bbb4fcc4e86577cadc47f1f
|
0cbc02dd7d1efbe61de04dcf1c6eccb6496bf074
|
/month01/day16_adv/test/test01_generator.py
|
db25cdc6cf2900234371d72c41e67431d2e088a1
|
[] |
no_license
|
fsym-fs/Python_AID
|
0b1755c15e20b214940041e81bedb2d5ec99e3f9
|
f806bb02cdb1670cfbea6e57846abddf3972b73b
|
refs/heads/master
| 2021-03-20T06:57:45.441245
| 2020-05-27T14:13:45
| 2020-05-27T14:13:45
| 247,187,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 830
|
py
|
"""
参照enumeration自定义my_enumeration函数
将list01中,长度大于2的元素设置为空字符串
"""
# list01 = ["唐僧", "猪八戒", "悟空"]
# dict01 = {"唐僧": 101, "猪八戒": 102, "悟空": 103}
# for i in enumerate(list01):
# print(i)
#
#
# def my_enumeration(a):
# index = 0
# for item in a:
# yield (index, item)
# index += 1
#
#
# for i in my_enumeration(list01):
# print(i)
# for i, k in my_enumeration(dict01.items()):
# print(i, k)
"自定义my_zip函数"
list01 = [
[1,2,3],
[4,5,6],
[7,8,9]
]
for i in zip(*list01):
print(i)
def my_zip(*args):
li = []
for i in range(len(args)):
for j in range(len(args[i])):
li.append(args[j][i])
yield tuple(li)
li = []
for i in my_zip(*list01):
print(i)
|
[
"1085414029@qq.com"
] |
1085414029@qq.com
|
48cbdbd8b37d6f37e77826c455bf1a4d78b8583f
|
76b6c2b41ba11ca0ca86d3ae9baa5f189a9c6534
|
/lib/__init__.py
|
f2a59c20c32f71fcefe96e1e24b91e5df0c843c9
|
[] |
no_license
|
startagain2016/J2ExpSuite
|
8faea7ac777604c9a221299f9577ac0bf4261fd2
|
232d08fe1e274e328bbb8f3e982040f7ba4b4a72
|
refs/heads/master
| 2023-03-22T00:02:22.015037
| 2020-06-23T09:10:44
| 2020-06-23T09:10:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 61
|
py
|
from lib.Color import Vcolors
from lib.Urldeal import urldeal
|
[
"noreply@github.com"
] |
startagain2016.noreply@github.com
|
f76e1d26afef9c3db74abb87ed94e348cbea7826
|
0c497f3053a44ea12b6209b9343871ce5f967ea4
|
/Simulator/power_consum_measure.py
|
4e5f95416f6a0a7e5671872f70e16becfc6aed3f
|
[] |
no_license
|
9511batayan/Eclipse
|
fcab1aa9883348de51bd3a9016019f85a9ac71b6
|
564f4da014dcb36009a436baa9f03ca82c45699e
|
refs/heads/master
| 2022-08-17T16:57:26.448704
| 2018-08-07T16:10:14
| 2018-08-07T16:10:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
'''
Created on 2018/08/03
@author: 皓司
'''
from MobileSensorNode import MobileSensorNode
import math
def main():
robot=MobileSensorNode(5,5)
x=robot.getx()
y=robot.gety()
for i :
dis=math.sqrt(i*i+y*y)
output=robot.current_com_power_consum(dis)
i +=2.5
if(i>=10):
break
print(output)
if __name__ == '__main__':
main()
|
[
"14ec043@ms.dendai.ac.jp"
] |
14ec043@ms.dendai.ac.jp
|
fc6f327544eb21819c633b2eb0612a5fb4929879
|
62b51f02fb209cf036fdbcf00523efdaf318bb2d
|
/wuaiwow/models/__init__.py
|
98a8f6eac2f210d4cc56d4b16d37561d3ee5a95a
|
[
"BSD-2-Clause"
] |
permissive
|
NlxF/wuaiwow
|
58f916ac1855a671908350206f1bb1b1ed1395c9
|
6e4ddd4a2bd98f78aa53f05314ca02f72b579053
|
refs/heads/master
| 2020-04-23T22:30:03.784811
| 2019-04-10T12:03:49
| 2019-04-10T12:03:49
| 171,503,902
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
# coding:utf-8
from users import (User, UserIp, UserInvitation, Characters, Permission, Role, Message,
UserMessage, UserOnline, PermissionRole, permission_has_role, role_is_admin)
from info import Donate, GuildInfo, Agreement
from prompt import AlivePrompt, LevelPrompt, RacePrompt, JobPrompt, GenderPrompt, MoneyPrompt
from news import News, Sidebar
from tasks import TaskResult
__all__ = ['User', 'UserIp', 'UserOnline', 'UserInvitation', 'Characters',
'GuildInfo', 'Donate', 'Agreement', 'AlivePrompt', 'LevelPrompt', 'RacePrompt', 'JobPrompt',
'GenderPrompt', 'MoneyPrompt', 'News', 'Sidebar', 'Permission', 'Role', 'Message', 'UserMessage',
'PermissionRole', 'permission_has_role', 'role_is_admin','TaskResult']
|
[
"luxiaofei911@gmail.com"
] |
luxiaofei911@gmail.com
|
55f45f1d5d803ee3ba85a0e03a7b60a7cc2865db
|
0a2f6950f662a441e967af8009826c20444d16f9
|
/code_20201031/scrapy_20200529/scrapy_20200529/items.py
|
9b5944b512150ac85167ed6ef9e17a8d4fbf9c5b
|
[] |
no_license
|
nxhuy-github/code_20201031
|
5827028bc4c3842342055ba4224d8581cc56b770
|
b034827a293d9acfa1376e5f43e77e417c190985
|
refs/heads/main
| 2023-01-04T23:57:26.031054
| 2020-10-31T18:37:21
| 2020-10-31T18:37:21
| 308,937,305
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class Scrapy20200529Item(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
|
[
"nxhuy2611@gmail.com"
] |
nxhuy2611@gmail.com
|
dcb6a27e3447c048454e9444d9290b87e271567a
|
e632216f8f6de621269a008ccef78b1a1d76e89e
|
/first/venv/bin/pip3.7
|
c7278d9940bc527b25a53a94316d0f6cec694a2d
|
[] |
no_license
|
Giuseppe-Meo/python-pi-example
|
d250114c156e6271c6994fda0a243efe33adec84
|
d84e921a5d100015ae8016d186edecff7f0477ee
|
refs/heads/master
| 2022-11-05T14:07:00.760047
| 2020-06-23T20:23:56
| 2020-06-23T20:23:56
| 274,478,206
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
7
|
#!/home/giusepps/PycharmProjects/first/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"gm17417@essex.ac.uk"
] |
gm17417@essex.ac.uk
|
15fee65ff908675945f192902b93b79cf9fb3874
|
587dda9745b55127e29fa7588dc501ce21c761a9
|
/scraper/findhome/pipelines.py
|
a4eda7de20f2d16ac859827922aa6f06f01e653b
|
[] |
no_license
|
tayfun/findhome
|
e7dd842c0e7683fa214a99b012fd6216db948d97
|
a0bd8f13f51ea043dfe50f838419846d75a9d2ee
|
refs/heads/master
| 2021-01-10T02:52:28.950045
| 2015-10-11T11:12:26
| 2015-10-11T11:12:26
| 44,049,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class FindhomePipeline(object):
def process_item(self, item, spider):
return item
|
[
"totayfun@gmail.com"
] |
totayfun@gmail.com
|
99e7b2d501dbb03573e08ed3e6e37b95c7453772
|
bb375781e942087f7dae0a1c4d1da6584c5e6af3
|
/settings.py
|
f9854a3588fea27e284b56250709fcd8e32770ac
|
[] |
no_license
|
magmax/DjangoBasic
|
ed3a93a16188cb63e18339f71ec3961239f736ad
|
99f028664bc91457722b7e2eb439bec3750688e8
|
refs/heads/master
| 2021-01-23T13:28:48.766257
| 2011-08-15T07:44:51
| 2011-08-15T07:44:51
| 2,208,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,049
|
py
|
# Django settings for myblog project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'database.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'wsplw$5-^1vzvqnnw44u+kke#uhz%i5ut&(sgu6)aor8hr$-o)'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'myblog.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
[
"miguelangel.garcia@gmail.com"
] |
miguelangel.garcia@gmail.com
|
07bda099dee874a5f2c802b1c82fb169bd649c23
|
7794e80641a611a77ae9d7d42bd2009edb1e8a47
|
/algoritmos_fabio01_parte02/f1_p2_q7_dias_horas_minutos.py
|
fd0de9b0a300711937af1f545b0fad305f6e4799
|
[] |
no_license
|
rogeriosilva-ifpi/ifpi_ads_algoritmos_2020
|
bec767ff82f4a9444d552f22017f26b2915e6357
|
bd194b8fcefbdf4b66245e216e546632e1db279e
|
refs/heads/master
| 2022-12-09T15:28:45.730262
| 2020-09-03T12:51:14
| 2020-09-03T12:51:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
# Leia um número inteiro de minutos, calcule e escreva quantos dias, quantas horas e quantos minutos ele corresponde.
minuto = int(input('Digite o número em minutos: '))
dias = minuto // 1440
horas = minuto % 1440 // 60
minutos = horas % 60
print(f'{minuto} minutos equivalem a {dias} dia(s) {horas} horas(s) e {minutos} minuto(s)')
|
[
"noreply@github.com"
] |
rogeriosilva-ifpi.noreply@github.com
|
f2cd458719a1b2a6c45725aba38870be9cf8ff30
|
37181d99530026ddb3a58e2864a55096bf9003a5
|
/DataMining/HW1/Amazon/Utility.py
|
3c691e1753b5ff147492fe19c4b118bcbda7e946
|
[] |
no_license
|
ktapsyman/Nccucs_work
|
de0ddaf93a5063efbe19f2832a97e91e4618725e
|
4a09b08851a29f848555fa8a881e72e64f8f0dc3
|
refs/heads/master
| 2018-09-16T04:17:50.786908
| 2018-06-20T01:56:46
| 2018-06-20T01:56:46
| 105,001,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
import pandas as pd
def ReadTrainingData(Filepath):
return pd.read_csv(Filepath)
def ReadTestingData(Filepath):
return pd.read_csv(Filepath)
|
[
"ktapsyman@gmail.com"
] |
ktapsyman@gmail.com
|
eb6622db95220cd2835e245ac0a2705598911330
|
1a25d9db773325e0e9e53385208b629fe697c635
|
/Push/BrowserComponent.py
|
9ceb5cd1c004c55b2d0109b92e89939ea0128066
|
[] |
no_license
|
konszpic/AbletonLive9_RemoteScripts
|
d01f1b99c4d81cff48728c4e68e04a6ab170c40f
|
b80d4a4c4add6357f9c807dcdec47b24a2a708f8
|
refs/heads/master
| 2021-01-24T20:01:41.888125
| 2013-11-21T08:18:01
| 2013-11-21T08:18:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33,575
|
py
|
#Embedded file name: /Users/versonator/Jenkins/live/Projects/AppLive/Resources/MIDI Remote Scripts/Push/BrowserComponent.py
from __future__ import with_statement
from functools import partial
from itertools import izip, chain, imap
import string
import re
import consts
import Live
FilterType = Live.Browser.FilterType
DeviceType = Live.Device.DeviceType
from _Framework import Task
from _Framework.CompoundComponent import CompoundComponent
from _Framework.Util import first, find_if, index_if, clamp, in_range, BooleanContext, nop, const, lazy_attribute, memoize
from _Framework.DisplayDataSource import DisplayDataSource
from _Framework.SubjectSlot import subject_slot, subject_slot_group, SlotManager, Subject
from ScrollableList import ActionListItem, ActionList, ListComponent, DefaultItemFormatter
class VirtualBrowserItem(object):
"""
Quacks like a Live.Browser.BrowserItem
"""
source = ''
is_device = False
is_loadable = False
def __init__(self, name = '', children_query = nop, is_folder = False):
self.name = name
self.is_folder = is_folder
self.children_query = children_query
@lazy_attribute
def children(self):
return self.children_query()
@property
def is_selected(self):
return find_if(lambda x: x.is_selected, self.children)
def __str__(self):
return self.name
class BrowserListItem(ActionListItem):
"""
List item representing a browser element
"""
def __str__(self):
import os
return os.path.splitext(self.content.name)[0] if self.content else ''
def action(self):
if self.container and self.container.browser:
self.container.browser.load_item(self.content)
@property
def supports_action(self):
return self.container and self.container.browser and self.content != None and self.content.is_loadable
class BrowserList(ActionList):
"""
Component for representing lists of browser items
"""
browser = None
item_type = BrowserListItem
def __init__(self, browser = None, *a, **k):
super(BrowserList, self).__init__(*a, **k)
self.browser = browser
class BrowserModel(Subject, SlotManager):
"""
A browser model provides the data to a browser component as a
sequence of BrowserLists.
The BrowserComponent will use equality to discard equivalent
models and prevent unnecessary updating, override it when
neccesary.
"""
__subject_events__ = ('content_lists', 'selection_updated')
empty_list_messages = []
def __init__(self, browser = None, *a, **k):
super(BrowserModel, self).__init__(*a, **k)
self._browser = browser
def can_be_exchanged(self, model):
return isinstance(model, BrowserModel)
def exchange_model(self, model):
"""
Tries to replace itself with the settings of a given
model. Returns true if it succeeds or false if the current
model can not represent the same set of values.
"""
if self.can_be_exchanged(model):
self._browser = model._browser
return True
return False
@property
def content_lists(self):
"""
Returns a set of ActionLists that hold the hierarchy of
content for the browser.
"""
return NotImplementedError
def update_content(self):
"""
Called when the browser contents have changed.
"""
raise NotImplementedError
def update_selection(self):
"""
Called when the browser selection might have changed.
"""
raise NotImplementedError
@property
def browser(self):
return self._browser
def make_content_list(self):
return BrowserList(browser=self._browser)
class EmptyBrowserModel(BrowserModel):
"""
A browser model that never returns anything, to be used for
hotswap targets that do not make sense in the L9C.
"""
empty_list_messages = ['Nothing to browse']
@property
def content_lists(self):
return tuple()
def update_content(self):
self.notify_content_lists()
def update_selection(self):
pass
def can_be_exchanged(self, model):
return isinstance(model, EmptyBrowserModel) and super(EmptyBrowserModel, self).can_be_exchanged(model)
class FullBrowserModel(BrowserModel):
"""
A browser model that provides an abstract hierarchical query model
for simpler implementation. Note that this can result in endless
nesting, which the BrowserComponent does not support so far.
It always provides at least two columns.
"""
empty_list_messages = ['<no tags>',
'<no devices>',
'<no presets>',
'<no presets>']
def __init__(self, *a, **k):
super(FullBrowserModel, self).__init__(*a, **k)
self._contents = []
self._num_contents = 0
self._push_content_list()
self._inside_item_activated_notification = BooleanContext()
def get_root_children(self):
"""
Query for the initial items.
"""
return self.browser.tags
def get_children(self, item, level):
"""
Query for children of node.
"""
return item.children
@property
def content_lists(self):
return map(first, self._contents[:self._num_contents])
def can_be_exchanged(self, model):
return isinstance(model, FullBrowserModel) and super(FullBrowserModel, self).can_be_exchanged(model)
def update_content(self):
root, _ = self._contents[0]
root.assign_items(self.get_root_children())
self.update_selection()
def update_selection(self):
target = self._browser.hotswap_target
last_seleced_list_index = None
if self._browser.hotswap_target != None:
if isinstance(target, Live.DrumPad.DrumPad) and (not target.chains or not target.chains[0].devices):
for content_list in self.content_lists:
content_list.select_item_index_with_offset(0, 0)
else:
list_index = 0
while list_index < self._num_contents:
content_list, _ = self._contents[list_index]
items = content_list.items
index = index_if(lambda x: x.content.is_selected, items)
if in_range(index, 0, len(items)):
content_list.select_item_index_with_offset(index, 2)
last_seleced_list_index = list_index
list_index += 1
if last_seleced_list_index != None:
self.notify_selection_updated(last_seleced_list_index)
def _push_content_list(self):
if self._num_contents < len(self._contents):
self._num_contents += 1
content = self._contents[self._num_contents - 1]
else:
raise self._num_contents == len(self._contents) or AssertionError
content = self.make_content_list()
level = len(self._contents)
slot = self.register_slot(content, partial(self._on_item_activated, level), 'item_activated')
self._contents.append((content, slot))
self._num_contents = len(self._contents)
return content
def _pop_content_list(self):
raise self._num_contents > 1 or AssertionError
self._num_contents -= 1
def _fit_content_lists(self, requested_lists):
"""
Ensures that there are exactly 'request_lists' number of
content lists. Returns whether a change was needed or not.
"""
raise requested_lists > 0 or AssertionError
if requested_lists != self._num_contents:
while requested_lists < self._num_contents:
self._pop_content_list()
while requested_lists > self._num_contents:
self._push_content_list()
def _finalize_content_lists_change(self):
"""
After a series of push/pop/fit operations, this makes sure
that we only have as many content lists referenced as
necessary.
"""
while self._num_contents < len(self._contents):
_, slot = self._contents.pop()
self.disconnect_disconnectable(slot)
raise self._num_contents == len(self._contents) or AssertionError
def _on_item_activated(self, level):
old_num_contents = self._num_contents
with self._inside_item_activated_notification():
contents, _ = self._contents[level]
selected = contents.selected_item
if selected != None:
is_folder = selected.content.is_folder
children = self.get_children(selected.content, level) if selected != None else []
(children or is_folder or level < 1) and self._fit_content_lists(level + 2)
child_contents, _ = self._contents[level + 1]
child_contents.assign_items(children)
else:
self._fit_content_lists(level + 1)
if not self._inside_item_activated_notification:
self._finalize_content_lists_change()
if old_num_contents != self._num_contents:
self.notify_content_lists()
class BrowserQuery(object):
"""
Base class for browser queries. Is capable of creating a subfolder for wrapping
all results of the query.
"""
def __init__(self, subfolder = None, *a, **k):
self.subfolder = subfolder
def __call__(self, browser):
if self.subfolder:
return [VirtualBrowserItem(name=self.subfolder, children_query=partial(self.query, browser), is_folder=True)]
else:
return self.query(browser)
def query(self, browser):
raise NotImplementedError
class PathBrowserQuery(BrowserQuery):
"""
Includes the element for the given path.
"""
def __init__(self, path = tuple(), *a, **k):
super(PathBrowserQuery, self).__init__(*a, **k)
raise path or AssertionError
self.path = path
def query(self, browser):
return self._find_item(self.path, browser.tags, browser) or []
def _find_item(self, path, items = None, browser = None):
name = path[0]
elem = find_if(lambda x: x.name == name, items)
if elem:
return [elem] if len(path) == 1 else self._find_item(path[1:], elem.children)
class TagBrowserQuery(BrowserQuery):
"""
Query that merges the contents of the specified subtrees of
the browser. It will first merge the contents of all the paths
specified in the 'include' list. A path is either the name of a
tag or a list with the name of the tag/folders that describe the
path. Then it drops the items that are in the 'exclude' list.
"""
def __init__(self, include = tuple(), exclude = tuple(), *a, **k):
super(TagBrowserQuery, self).__init__(*a, **k)
self.include = include
self.exclude = exclude
def query(self, browser):
return filter(lambda item: item.name not in self.exclude, sum(map(partial(self._extract_path, browser=browser), self.include), tuple()))
def _extract_path(self, path, items = None, browser = None):
if isinstance(path, (str, unicode)):
path = [path]
if items is None:
items = browser.tags
if path:
name = path[0]
elem = find_if(lambda x: x.name == name, items)
if elem:
items = self._extract_path(path[1:], elem.children)
return tuple(items)
class SourceBrowserQuery(TagBrowserQuery):
"""
Like TagBrowserQuery, but adds a top-level source selection.
"""
def __init__(self, *a, **k):
super(SourceBrowserQuery, self).__init__(*a, **k)
def query(self, browser):
root = super(SourceBrowserQuery, self).query(browser)
groups = dict()
for item in root:
groups.setdefault(item.source, []).append(item)
return map(lambda (k, g): VirtualBrowserItem(name=k, children_query=const(g)), sorted(groups.items(), key=first))
class PlacesBrowserQuery(BrowserQuery):
"""
Query that fetches all places of the browser
"""
def __init__(self, *a, **k):
super(PlacesBrowserQuery, self).__init__(*a, **k)
def query(self, browser):
return tuple(browser.packs) + tuple(browser.places)
class QueryingBrowserModel(FullBrowserModel):
"""
Browser model that takes query objects to build up the model hierarchy
"""
empty_list_messages = ['<no devices>',
'<no presets>',
'<no presets>',
'<no presets>']
def __init__(self, queries = [], *a, **k):
super(QueryingBrowserModel, self).__init__(*a, **k)
self.queries = queries
def get_root_children(self):
browser = self.browser
return chain(*imap(lambda q: q(browser), self.queries))
def can_be_exchanged(self, model):
return isinstance(model, QueryingBrowserModel) and super(QueryingBrowserModel, self).can_be_exchanged(model)
def exchange_model(self, model):
if super(QueryingBrowserModel, self).exchange_model(model):
self.queries = model.queries
return True
PLACES_LABEL = 'Places'
def make_midi_effect_browser_model(browser):
midi_effects = TagBrowserQuery(include=['MIDI Effects'])
max = TagBrowserQuery(include=[['Max for Live', 'Max MIDI Effect']], subfolder='Max for Live')
places = PlacesBrowserQuery(subfolder=PLACES_LABEL)
return QueryingBrowserModel(browser=browser, queries=[midi_effects, max, places])
def make_audio_effect_browser_model(browser):
audio_effects = TagBrowserQuery(include=['Audio Effects'])
max = TagBrowserQuery(include=[['Max for Live', 'Max Audio Effect']], subfolder='Max for Live')
places = PlacesBrowserQuery(subfolder=PLACES_LABEL)
return QueryingBrowserModel(browser=browser, queries=[audio_effects, max, places])
def make_instruments_browser_model(browser):
instrument_rack = PathBrowserQuery(path=['Instruments', 'Instrument Rack'])
drums = SourceBrowserQuery(include=['Drums'], exclude=['Drum Hits'], subfolder='Drum Rack')
instruments = TagBrowserQuery(include=['Instruments'], exclude=['Drum Rack', 'Instrument Rack'])
drum_hits = TagBrowserQuery(include=[['Drums', 'Drum Hits']], subfolder='Drum Hits')
max = TagBrowserQuery(include=[['Max for Live', 'Max Instrument']], subfolder='Max for Live')
places = PlacesBrowserQuery(subfolder=PLACES_LABEL)
return QueryingBrowserModel(browser=browser, queries=[instrument_rack,
drums,
instruments,
max,
drum_hits,
places])
def make_drum_pad_browser_model(browser):
drums = TagBrowserQuery(include=[['Drums', 'Drum Hits']])
samples = SourceBrowserQuery(include=['Samples'], subfolder='Samples')
instruments = TagBrowserQuery(include=['Instruments'])
max = TagBrowserQuery(include=[['Max for Live', 'Max Instrument']], subfolder='Max for Live')
places = PlacesBrowserQuery(subfolder=PLACES_LABEL)
return QueryingBrowserModel(browser=browser, queries=[drums,
samples,
instruments,
max,
places])
def make_fallback_browser_model(browser):
return EmptyBrowserModel(browser=browser)
def filter_type_for_hotswap_target(target):
"""
Returns the appropriate browser filter type for a given hotswap target.
"""
if isinstance(target, Live.Device.Device):
if target.type == DeviceType.instrument:
return FilterType.instrument_hotswap
elif target.type == DeviceType.audio_effect:
return FilterType.audio_effect_hotswap
elif target.type == DeviceType.midi_effect:
return FilterType.midi_effect_hotswap
else:
FilterType.disabled
elif isinstance(target, Live.DrumPad.DrumPad):
return FilterType.drum_pad_hotswap
elif isinstance(target, Live.Chain.Chain):
return filter_type_for_hotswap_target(target.canonical_parent) if target else FilterType.disabled
return FilterType.disabled
def make_browser_model(browser, filter_type = None):
"""
Factory that returns an appropriate browser model depending on the
browser filter type and hotswap target.
"""
factories = {FilterType.instrument_hotswap: make_instruments_browser_model,
FilterType.drum_pad_hotswap: make_drum_pad_browser_model,
FilterType.audio_effect_hotswap: make_audio_effect_browser_model,
FilterType.midi_effect_hotswap: make_midi_effect_browser_model}
if filter_type == None:
filter_type = filter_type_for_browser(browser)
return factories.get(filter_type, make_fallback_browser_model)(browser)
def filter_type_for_browser(browser):
filter_type = filter_type_for_hotswap_target(browser.hotswap_target)
if filter_type == FilterType.disabled:
filter_type = browser.filter_type
return filter_type
def make_stem_cleaner(stem):
""" Returns a function that can be used to remove the stem from a sentence """
if stem[-1] == 's':
stem = stem[:-1]
if len(stem) > 2:
return _memoized_stem_cleaner(stem)
return nop
@memoize
def _memoized_stem_cleaner(stem):
ellipsis = consts.CHAR_ELLIPSIS
rule1 = re.compile(u'([a-z])' + stem + u's?([ A-Z])')
rule2 = re.compile(u'[' + ellipsis + ' \\-]' + stem + u's?([\\-' + ellipsis + u' A-Z])')
rule3 = re.compile(u'' + stem + u's?$')
def cleaner(short_name):
short_name = ' ' + short_name
short_name = rule1.sub(u'\\1' + ellipsis + u'\\2', short_name)
short_name = rule2.sub(ellipsis + u'\\1', short_name)
short_name = rule3.sub(ellipsis, short_name)
return short_name.strip(' ')
return cleaner
def split_stem(sentence):
""" Splits camel cased sentence into words """
sentence = re.sub('([a-z])([A-Z])', u'\\1 \\2', sentence)
return sentence.split()
_stripper_double_spaces = re.compile(u' [\\- ]*')
_stripper_double_ellipsis = re.compile(consts.CHAR_ELLIPSIS + u'+')
_stripper_space_ellipsis = re.compile(u'[\\- ]?' + consts.CHAR_ELLIPSIS + u'[\\- ]?')
def full_strip(string):
""" Strip string for double spaces and dashes """
string = _stripper_double_spaces.sub(' ', string)
string = _stripper_double_ellipsis.sub(consts.CHAR_ELLIPSIS, string)
string = _stripper_space_ellipsis.sub(consts.CHAR_ELLIPSIS, string)
return string.strip(' ')
class BrowserComponent(CompoundComponent):
"""
Component for controlling the Live library browser. It has 4
browsing columns that are controlled by encoders and state
buttons. The contents of these lists are provided by a browser
model -- see BrowserModel and derivatives.
"""
__subject_events__ = ('load_item',)
NUM_COLUMNS = 4
COLUMN_SIZE = 4
def __init__(self, browser = None, *a, **k):
super(BrowserComponent, self).__init__(*a, **k)
self._browser = browser or self.application().browser
self._browser_model = make_fallback_browser_model(self._browser)
num_data_sources = self.NUM_COLUMNS * self.COLUMN_SIZE
self._data_sources = map(DisplayDataSource, ('',) * num_data_sources)
self._last_loaded_item = None
self._default_item_formatter = DefaultItemFormatter()
self._list_components = self.register_components(*[ ListComponent() for _ in xrange(self.NUM_COLUMNS) ])
for i, component in enumerate(self._list_components):
component.do_trigger_action = lambda item: self._do_load_item(item)
component.last_action_item = lambda : self._last_loaded_item
component.item_formatter = partial(self._item_formatter, i)
self._select_buttons = []
self._state_buttons = []
self._encoder_controls = []
self._enter_button = None
self._exit_button = None
self._shift_button = None
self._on_list_item_action.replace_subjects(self._list_components)
self._on_hotswap_target_changed.subject = self._browser
self._on_filter_type_changed.subject = self._browser
self._on_browser_full_refresh.subject = self._browser
self._scroll_offset = 0
self._max_scroll_offset = 0
self._max_hierarchy = 0
self._last_filter_type = None
self._skip_next_preselection = False
self._browser_model_dirty = True
self._on_content_lists_changed()
def set_display_line1(self, display):
self.set_display_line_with_index(display, 0)
def set_display_line2(self, display):
self.set_display_line_with_index(display, 1)
def set_display_line3(self, display):
self.set_display_line_with_index(display, 2)
def set_display_line4(self, display):
self.set_display_line_with_index(display, 3)
def set_enter_button(self, button):
self._enter_button = button
self._on_enter_value.subject = button
self._update_navigation_button_state()
def set_exit_button(self, button):
self._exit_button = button
self._on_exit_value.subject = button
self._update_navigation_button_state()
def set_display_line_with_index(self, display, index):
if display:
sources = self._data_sources[index::self.COLUMN_SIZE]
display.set_data_sources(sources)
def set_select_buttons(self, buttons):
for button in buttons or []:
if button:
button.reset()
self._on_select_matrix_value.subject = buttons or None
self._select_buttons = buttons
buttons = buttons or (None, None, None, None, None, None, None, None)
for component, button in izip(self._list_components, buttons[1::2]):
self._set_button_if_enabled(component, 'action_button', button)
for component, button in izip(self._list_components, buttons[::2]):
if self._shift_button and self._shift_button.is_pressed():
self._set_button_if_enabled(component, 'prev_page_button', button)
self._set_button_if_enabled(component, 'select_prev_button', None)
else:
self._set_button_if_enabled(component, 'prev_page_button', None)
self._set_button_if_enabled(component, 'select_prev_button', button)
def set_state_buttons(self, buttons):
for button in buttons or []:
if button:
button.reset()
self._on_state_matrix_value.subject = buttons or None
self._state_buttons = buttons
buttons = buttons or (None, None, None, None, None, None, None, None)
for component, button in izip(self._list_components, buttons[::2]):
if self._shift_button and self._shift_button.is_pressed():
self._set_button_if_enabled(component, 'next_page_button', button)
self._set_button_if_enabled(component, 'select_next_button', None)
else:
self._set_button_if_enabled(component, 'next_page_button', None)
self._set_button_if_enabled(component, 'select_next_button', button)
for button in buttons[1::2]:
if button and self.is_enabled():
button.set_light('DefaultButton.Disabled')
def set_shift_button(self, button):
self._shift_button = button
self._on_shift_button.subject = button
@subject_slot('value')
def _on_shift_button(self, value):
self.set_select_buttons(self._select_buttons)
self.set_state_buttons(self._state_buttons)
def _set_button_if_enabled(self, component, name, button):
setter = getattr(component, 'set_' + name)
if component.is_enabled(explicit=True):
setter(button)
else:
setter(None)
if button and self.is_enabled():
button.set_light('DefaultButton.Disabled')
def set_encoder_controls(self, encoder_controls):
if encoder_controls:
num_active_lists = len(self._browser_model.content_lists) - self._scroll_offset
num_assignable_lists = min(num_active_lists, len(encoder_controls) / 2)
index = 0
for component in self._list_components[:num_assignable_lists - 1]:
component.set_encoder_controls(encoder_controls[index:index + 2])
index += 2
self._list_components[num_assignable_lists - 1].set_encoder_controls(encoder_controls[index:])
else:
for component in self._list_components:
component.set_encoder_controls([])
self._encoder_controls = encoder_controls
def update(self):
if self.is_enabled():
self.set_state_buttons(self._state_buttons)
self.set_select_buttons(self._select_buttons)
self._update_browser_model()
self._update_navigation_button_state()
def reset_load_memory(self):
self._update_load_memory(None)
def _do_load_item(self, item):
self.do_load_item(item)
self._update_load_memory(item)
self._skip_next_preselection = True
def reset_skip_next_preselection():
self._skip_next_preselection = False
self._tasks.add(Task.run(reset_skip_next_preselection))
def _update_load_memory(self, item):
self._last_loaded_item = item
for component in self._list_components:
component.update()
def do_load_item(self, item):
item.action()
self.notify_load_item(item.content)
def back_to_top(self):
self._set_scroll_offset(0)
def _set_scroll_offset(self, offset):
self._scroll_offset = offset
self._on_content_lists_changed()
scrollable_list = self._list_components[-1].scrollable_list
if scrollable_list:
scrollable_list.request_notify_item_activated()
def _update_navigation_button_state(self):
if self._exit_button:
self._exit_button.set_light(self._scroll_offset > 0)
if self._enter_button:
self._enter_button.set_light(self._scroll_offset < self._max_scroll_offset)
def _shorten_item_name(self, shortening_limit, list_index, item_name):
"""
Creates the name of an item shortened by removing words from the parents name
"""
def is_short_enough(item_name):
return len(item_name) <= 9
content_lists = self._browser_model.content_lists
parent_lists = reversed(content_lists[max(0, list_index - 3):list_index])
for content_list in parent_lists:
if is_short_enough(item_name):
break
parent_name = unicode(content_list.selected_item)
stems = split_stem(parent_name)
for stem in stems:
short_name = make_stem_cleaner(stem)(item_name)
short_name = full_strip(short_name)
item_name = short_name if len(short_name) > 4 else item_name
if is_short_enough(item_name):
break
return item_name[:-1] if len(item_name) >= shortening_limit and item_name[-1] == consts.CHAR_ELLIPSIS else item_name
def _item_formatter(self, depth, index, item, action_in_progress):
display_string = ''
separator_length = len(self._data_sources[self.COLUMN_SIZE * depth].separator)
shortening_limit = 16 - separator_length
if item:
item_name = 'Loading...' if action_in_progress else self._shorten_item_name(shortening_limit, depth + self._scroll_offset, unicode(item))
display_string = consts.CHAR_SELECT if item and item.is_selected else ' '
display_string += item_name
if depth == len(self._list_components) - 1 and item.is_selected and self._scroll_offset < self._max_hierarchy:
display_string = string.ljust(display_string, consts.DISPLAY_LENGTH / 4 - 1)
shortening_limit += 1
display_string = display_string[:shortening_limit] + consts.CHAR_ARROW_RIGHT
if depth == 0 and self._scroll_offset > 0:
prefix = consts.CHAR_ARROW_LEFT if index == 0 else ' '
display_string = prefix + display_string
return display_string[:shortening_limit + 1]
@subject_slot('value')
def _on_enter_value(self, value):
if value:
self._set_scroll_offset(min(self._max_scroll_offset, self._scroll_offset + 1))
@subject_slot('value')
def _on_exit_value(self, value):
if value:
self._set_scroll_offset(max(0, self._scroll_offset - 1))
@subject_slot('hotswap_target')
def _on_hotswap_target_changed(self):
if not self._skip_next_preselection:
self._set_scroll_offset(0)
self._update_browser_model()
@subject_slot('filter_type')
def _on_filter_type_changed(self):
self._update_browser_model()
@subject_slot('full_refresh')
def _on_browser_full_refresh(self):
self._browser_model_dirty = True
def _update_browser_model(self):
if self.is_enabled():
self._do_update_browser_model()
def _do_update_browser_model(self):
filter_type = filter_type_for_browser(self._browser)
if filter_type != self._last_filter_type:
self._last_filter_type = filter_type
new_model = make_browser_model(self._browser, filter_type)
if self._browser_model and self._browser_model.can_be_exchanged(new_model) and new_model.can_be_exchanged(self._browser_model):
self._browser_model.exchange_model(new_model)
new_model.disconnect()
else:
self.disconnect_disconnectable(self._browser_model)
self._browser_model = self.register_slot_manager(new_model)
self._on_content_lists_changed.subject = self._browser_model
self._on_selection_updated.subject = self._browser_model
for contents in self._browser_model.content_lists:
contents.selected_item_index = 0
self._browser_model.update_content()
elif self._browser_model_dirty:
self._browser_model.update_content()
elif not self._skip_next_preselection:
self._browser_model.update_selection()
self._skip_next_preselection = False
self._browser_model_dirty = False
@subject_slot_group('item_action')
def _on_list_item_action(self, item, _):
self.notify_load_item(item.content)
@subject_slot('selection_updated')
def _on_selection_updated(self, index):
more_content_available = len(self._browser_model.content_lists) > self.NUM_COLUMNS + self._scroll_offset
required_scroll_offset = index - (self.NUM_COLUMNS - 1)
if more_content_available and required_scroll_offset > self._scroll_offset:
self._set_scroll_offset(self._scroll_offset + 1)
self._browser_model.update_selection()
@subject_slot('content_lists')
def _on_content_lists_changed(self):
components = self._list_components
contents = self._browser_model.content_lists[self._scroll_offset:]
messages = self._browser_model.empty_list_messages
scroll_depth = len(self._browser_model.content_lists) - len(self._list_components)
self._max_scroll_offset = max(0, scroll_depth + 2)
self._max_hierarchy = max(0, scroll_depth)
for component, content, message in map(None, components, contents, messages):
if component != None:
component.scrollable_list = content
component.empty_list_message = message
active_lists = len(contents)
num_head = clamp(active_lists - 1, 0, self.NUM_COLUMNS - 1)
head = components[:num_head]
last = components[num_head:]
def set_data_sources_with_separator(component, sources, separator):
for source in sources:
source.separator = separator
component.set_data_sources(sources)
component.set_enabled(True)
for idx, component in enumerate(head):
offset = idx * self.COLUMN_SIZE
sources = self._data_sources[offset:offset + self.COLUMN_SIZE]
set_data_sources_with_separator(component, sources, '|')
if last:
offset = num_head * self.COLUMN_SIZE
scrollable_list = last[0].scrollable_list
if scrollable_list and find_if(lambda item: item.content.is_folder, scrollable_list.items):
sources = self._data_sources[offset:offset + self.COLUMN_SIZE]
map(DisplayDataSource.clear, self._data_sources[offset + self.COLUMN_SIZE:])
else:
sources = self._data_sources[offset:]
set_data_sources_with_separator(last[0], sources, '')
for component in last[1:]:
component.set_enabled(False)
self.set_select_buttons(self._select_buttons)
self.set_state_buttons(self._state_buttons)
self.set_encoder_controls(self._encoder_controls)
self._update_navigation_button_state()
@subject_slot('value')
def _on_select_matrix_value(self, value, *_):
pass
@subject_slot('value')
def _on_state_matrix_value(self, value, *_):
pass
@subject_slot('value')
def _on_encoder_matrix_value(self, value, *_):
pass
|
[
"julien@julienbayle.net"
] |
julien@julienbayle.net
|
32c67e306f3164cb62c7f999d0694d5a526f9726
|
ab2c7c93cac411cb7419e1813987c101931d3d5b
|
/boundary_checking/test_Range4.py
|
2c9ae0d77bb1d3925112218795dd487982cbf492
|
[] |
no_license
|
vash-hsu/python
|
574ca2a4ef76e8a36f489010403c8e699ddf5b88
|
ad3de1dafbe61d1180ab3e2283f758de3d4edd68
|
refs/heads/master
| 2021-01-21T21:43:26.649602
| 2016-05-11T16:21:22
| 2016-05-11T16:21:22
| 18,483,788
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,450
|
py
|
#!/usr/bin/env python
import unittest
from Range4 import Range
from Range4 import div_mod
def log_print(label, message):
print("%s: %s" % (label, message))
def log_title(message):
log_print('CASE', message)
def log_desc(message):
log_print('DESC', message)
class BoundaryTest4Range(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.my_start, cls.my_stop, cls.my_step = (3, 19, 3)
def setUp(self):
print('')
title = '.'.join(self.id().split('.')[1:])
log_title(title)
# def tearDown(self):
# print
def test_hardcode_happy_path_index(self):
my_start, my_stop, my_step = (3, 17, 4)
common_example = Range(my_start, my_stop, my_step)
'''
[0, 1, 2, 3] = [3, 7, 11, 15]
'''
self.assertEqual(common_example[0], 3)
self.assertEqual(common_example[1], 7)
self.assertEqual(common_example[2], 11)
self.assertEqual(common_example[3], 15)
with self.assertRaises(IndexError):
example = common_example[4]
def test_hardcode_happy_path_slice(self):
my_start, my_stop, my_step = (3, 17, 4)
common_example = Range(my_start, my_stop, my_step)
'''
[0, 1, 2, 3] = [3, 7, 11, 15]
[0:2] = [3, 7]
[2:4] = [11, 15]
[3:5] = [15, ]
'''
self.assertEqual(common_example[0:2], [3, 7])
self.assertEqual(common_example[2:4], [11, 15])
self.assertEqual(common_example[3:5], [15, ])
def test_RAT_happy_path_index(self):
my_start, my_stop, my_step = (self.my_start, self.my_stop, self.my_step)
common_example = Range(my_start, my_stop, my_step)
for i in xrange(1, (my_stop-my_start)/my_step):
result = common_example[i]
expected = i*my_step + my_start
msg = ['to validate', str(result), 'whose offset', "[%d]" % i,
'exists in', str(list(common_example)),
"equivalent to %d" % expected]
log_desc(' '.join(msg))
self.assertIn(result, common_example)
self.assertEqual(result, expected)
def test_RAT_happy_path_slice(self):
my_start, my_stop, my_step = (self.my_start, self.my_stop, self.my_step)
common_example = Range(my_start, my_stop, my_step)
for i in xrange(1, (my_stop-my_start)/my_step):
result = common_example[i:i+2]
for j in result:
msg = ['to validate', str(j), 'from', str(result),
'whose slice', "[%d:%d]" % (i, i+2),
'exists in', str(list(common_example))]
log_desc(' '.join(msg))
self.assertIn(j, common_example,
"%s, whose slice [%d:%d] is not defined in %s" %
(str(j), i, i+2, str(list(common_example)))
)
def test_FAST_upper_bound_slice(self):
my_start, my_stop, my_step = (self.my_start, self.my_stop, self.my_step)
common_example = Range(my_start, my_stop, my_step)
upper_bound = (my_stop - my_start)/my_step + 1
for i in xrange(upper_bound - 1, upper_bound):
result = common_example[i:i+2]
for j in result:
msg = ['to validate', str(j), 'from', str(result),
'whose slice', "[%d:%d]" % (i, i+2),
'exists in', str(list(common_example))]
log_desc(' '.join(msg))
self.assertIn(j, common_example,
"%s, whose slice [%d:%d] is not defined in %s" %
(str(j), i, i+2, str(list(common_example)))
)
def test_FAST_lower_bound_slice(self):
my_start, my_stop, my_step = (self.my_start, self.my_stop, self.my_step)
common_example = Range(my_start, my_stop, my_step)
i = 0
result = common_example[i:i+2]
for j in result:
msg = ['to validate', str(j), 'from', str(result),
'whose slice', "[%d:%d]" % (i, i+2),
'exists in', str(list(common_example))]
log_desc(' '.join(msg))
self.assertIn(j, common_example,
"%s, whose slice [%d:%d] is not defined in %s" %
(str(j), i, i+2, str(list(common_example)))
)
def test_hardcode_div_mod(self):
self.assertEqual(div_mod(10, 3), (3, 1))
self.assertEqual(div_mod(10, 2), (5, 0))
def test_hardcode_range_len_getitem(self):
example = Range(1)
self.assertEqual(len(example), 1)
self.assertEqual(example[0], 0)
example = Range(2)
self.assertEqual(len(example), 2)
self.assertEqual(example[0], 0)
self.assertEqual(example[1], 1)
def test_FET_range_empty_from0to0(self):
# empty set, where 0 ~ 0
parameter = 0
example = Range(parameter)
expected = 0
msg = ['to validate length of', str(list(example)),
'from Range(%d)' % parameter,
'which should be equal to %d' % expected]
log_desc(' '.join(msg))
self.assertEqual(len(example), expected)
def test_FET_range_minimal_set(self):
# minimal set, where 0 ~ 1
example = Range(0, 1)
expected = 1
msg = ['to validate length of', str(list(example)),
'from Range(0, 1)',
'which should be equal to %d' % expected]
log_desc(' '.join(msg))
self.assertEqual(len(example), expected)
def test_FET_range_large_step(self):
# minimal set, where step larger than distance between start and end
example = Range(1, 3, 4)
expected = 1
msg = ['to validate length of', str(list(example)),
'from Range(1, 3, 4)',
'which should be equal to %d' % expected]
log_desc(' '.join(msg))
self.assertEqual(len(example), expected)
def test_FET_range_out_of_order(self):
# empyt set, where out-of-order
example = Range(2, 1)
expected = 0
msg = ['to validate length of', str(list(example)),
'from Range(2, 1)',
'which should be equal to %d' % expected]
log_desc(' '.join(msg))
self.assertEqual(len(example), expected)
if __name__ == '__main__':
unittest.main()
|
[
"vash.hsu@gmail.com"
] |
vash.hsu@gmail.com
|
6f24047a7dd29849aee3c48d35c190d56f99b757
|
16beed9ab443dee7cacefea9c3db8c8ca381d1ab
|
/scripts/BreakpointsSave.py
|
16385c3128c5773ae81b923bcc07601e3454bae0
|
[] |
no_license
|
YuKnight/jeb2-samplecode
|
4cc010757345c6ec6f7fee0913fa0388686b0e4e
|
a26b0597da66ff41eaff54c7a35aed0266512fdf
|
refs/heads/master
| 2020-04-29T11:57:03.043105
| 2019-02-08T23:09:01
| 2019-02-08T23:09:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,599
|
py
|
"""
JEB script to save (persist) the breakpoints (location and state) of the current debugging session to a file.
- Breakpoints file: [JEB]/bin/breakpoints.txt
- See converse script to reload breakpoints onto a debugging session: BreakpointsLoad.py
The breakpoints file is JSON formatted and can be edited manually as well. Structure
ProjectName:
DebuggerName:
BreakpointsList: (= dictionary with "address" and "enabled" keys)
Example:
{
"/analysis/appcheck-debug.apk.jdb2": {
"VM": [
{
"address": "Lcom/xyz/appcheck/AppCheck$1;-><init>(Lcom/xyz/appcheck/AppCheck;)V+4h",
"enabled": true
},
{
"address": "Lcom/xyz/appcheck/AppCheck$1;->run()V+2h",
"enabled": true
}
]
}
"/analysis/malware_yil1.apk.jdb2": {
"VM": [
{
"address": "Lcom/malyy/a/b/c;->f()V+0h",
"enabled": false
}
]
}
}
Refer to SCRIPTS.TXT for more information.
"""
import json
import os
import time
from com.pnfsoftware.jeb.client.api import IScript
from com.pnfsoftware.jeb.core import RuntimeProjectUtil
from com.pnfsoftware.jeb.core.units.code.debug import IDebuggerUnit
class BreakpointsSave(IScript):
def run(self, ctx):
engctx = ctx.getEnginesContext()
if not engctx:
print('Back-end engines not initialized')
return
projects = engctx.getProjects()
if not projects:
print('There is no opened project')
return
prj = projects[0]
prjname = prj.getName()
prgdir = ctx.getProgramDirectory()
bpfile = os.path.join(prgdir, 'breakpoints.txt')
with open(bpfile, 'r+') as f:
try:
bpdict = json.load(f)
except:
bpdict = {}
#print('Current breakpoints file:', bpdict)
units = RuntimeProjectUtil.findUnitsByType(prj, IDebuggerUnit, False)
if not units:
print('No unit available')
return
d = {}
cnt = 0
for dbg in units:
# may be null for a detached debugger
bplist = dbg.getBreakpoints()
if bplist:
a = []
for bp in bplist:
address = bp.getAddress()
enabled = bp.isEnabled()
#print('- Debugger: %s (for %s): %s (%s)' % (dbg.getName(), dbg.getPotentialDebuggees(), address, 'enabled' if enabled else 'disabled'))
a.append({'address': address, 'enabled': enabled})
cnt += 1
d[dbg.getName()] = a
bpdict[prjname] = d
with open(bpfile, 'w') as f:
try:
json.dump(bpdict, f, indent=True)
except Exception as e:
print('ERROR: Cannot save to breakpoints file: %s' % e)
print('Breakpoints saved: %d.' % cnt)
|
[
"nico@pnfsoftware.com"
] |
nico@pnfsoftware.com
|
f760fe424b29472e41c81cbbd59ab91d8e8e2170
|
d0701226f795ed1e4da862d9f51320fe04026336
|
/free-python/turtle1.py
|
d922ada07c916bb3f20cb45fe3916ce1b7815af1
|
[] |
no_license
|
gitdxb/learning-python
|
329a8250c56088c7d90a33ae20aa34188eec2a4a
|
afdd4769f0b2f87eba3fdbd4d8995e201641fe68
|
refs/heads/master
| 2023-02-11T10:21:32.756320
| 2021-01-05T19:54:40
| 2021-01-05T19:54:40
| 288,846,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
from turtle import Turtle
tina = Turtle()
def draw(name, r, col):
name.color(col)
name.dot(r*2)
draw(tina, 150, "blue")
draw(tina, 100, "red")
draw(tina, 50, "yellow")
|
[
"trantambls@gmail.com"
] |
trantambls@gmail.com
|
479e5ba6b20dc98c79280fad0aef7d90f72fb86d
|
0a7d49300a547eecc823b78a891057f1017db1b2
|
/net_spider/dou_log.py
|
31cba7e5c57b7bf034e514b7b4d8ad0eb22a1577
|
[] |
no_license
|
PeterZhangxing/codewars
|
f315b2ce610207e84a2f0927bc47b4b1dd89bee4
|
8e4dfaaeae782a37f6baca4c024b1c2a1dc83cba
|
refs/heads/master
| 2020-09-22T12:09:59.419919
| 2020-03-02T12:52:55
| 2020-03-02T12:52:55
| 224,330,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,327
|
py
|
from selenium import webdriver
import time
import requests
from lxml import etree
import random
class DouBanSpider(object):
def __init__(self,username,passwd,browser="Chrome",base_url="https://www.douban.com/"):
self.base_url = base_url
if hasattr(webdriver,browser):
self.driver = getattr(webdriver,browser)()
else:
raise NameError("no such browser!")
if not username or not passwd:
raise ValueError("username and password cannot be null")
self.username = username
self.passwd = passwd
self.get_logged_cookie()
def get_logged_cookie(self):
# 请求豆瓣登录页面
self.driver.get("https://www.douban.com/")
# 切换到包括登录信息的iframe
log_iframe = self.driver.find_element_by_xpath("//div[@class='login']/iframe")
self.driver.switch_to.frame(log_iframe)
# 点击,从手机认证码切换至用户名密码登录
self.driver.find_element_by_class_name("account-tab-account").click()
# 填写用户名密码
self.driver.find_element_by_id("username").send_keys(self.username)
self.driver.find_element_by_id("password").send_keys(self.passwd)
# 发送登录请求
sleep_time = random.randint(2,5)
time.sleep(sleep_time)
try:
self.driver.find_element_by_link_text("登录豆瓣").click()
except Exception as e:
exit(str(e))
# 等待3秒后,获取cookie,关闭浏览器
time.sleep(6)
self.logged_cookie = {i['name']:i["value"] for i in self.driver.get_cookies()}
self.driver.quit()
def get_user_name(self):
# 使用刚才利用selenium获取的cookie,爬取登录后的页面内容
dou_html_str = requests.get(
url="https://www.douban.com/",
cookies=self.logged_cookie,
).content.decode()
formated_html = etree.HTML(dou_html_str)
nickname = formated_html.xpath(
"//div[@class='top-nav-info']//li[@class='nav-user-account']/a[@class='bn-more']/span[1]/text()")[0]
return nickname.split("的")[0]
if __name__ == '__main__':
myspider = DouBanSpider("18687027119","zx20_05")
nick_name = myspider.get_user_name()
print(nick_name)
|
[
"964725349@qq.com"
] |
964725349@qq.com
|
c7bf08d78ff048e8a26e5a89ca4ffdf7a27c0bd8
|
5123ec1c7bee26de2810affc240802563a372777
|
/mapthree/venv/bin/runxlrd.py
|
c4ec9823dbbf7593699816cadd30b79d5dcbd54c
|
[
"MIT"
] |
permissive
|
bitbyt3r/mapthree
|
7371fed9dbbc2b655129b35ec86e1acfcf76ec7f
|
8579330b0f5dcae8a3bb4dcd7658b084628e3b0f
|
refs/heads/master
| 2020-12-03T03:56:48.074806
| 2017-08-01T21:39:06
| 2017-08-01T21:39:06
| 95,790,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,333
|
py
|
#!/extra/mark25/mapthree/mapthree/venv/bin/python3
# Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd
# This script is part of the xlrd package, which is released under a
# BSD-style licence.
from __future__ import print_function
cmd_doc = """
Commands:
2rows Print the contents of first and last row in each sheet
3rows Print the contents of first, second and last row in each sheet
bench Same as "show", but doesn't print -- for profiling
biff_count[1] Print a count of each type of BIFF record in the file
biff_dump[1] Print a dump (char and hex) of the BIFF records in the file
fonts hdr + print a dump of all font objects
hdr Mini-overview of file (no per-sheet information)
hotshot Do a hotshot profile run e.g. ... -f1 hotshot bench bigfile*.xls
labels Dump of sheet.col_label_ranges and ...row... for each sheet
name_dump Dump of each object in book.name_obj_list
names Print brief information for each NAME record
ov Overview of file
profile Like "hotshot", but uses cProfile
show Print the contents of all rows in each sheet
version[0] Print versions of xlrd and Python and exit
xfc Print "XF counts" and cell-type counts -- see code for details
[0] means no file arg
[1] means only one file arg i.e. no glob.glob pattern
"""
options = None
if __name__ == "__main__":
PSYCO = 0
import xlrd
import sys, time, glob, traceback, gc
from xlrd.timemachine import xrange, REPR
class LogHandler(object):
def __init__(self, logfileobj):
self.logfileobj = logfileobj
self.fileheading = None
self.shown = 0
def setfileheading(self, fileheading):
self.fileheading = fileheading
self.shown = 0
def write(self, text):
if self.fileheading and not self.shown:
self.logfileobj.write(self.fileheading)
self.shown = 1
self.logfileobj.write(text)
null_cell = xlrd.empty_cell
def show_row(bk, sh, rowx, colrange, printit):
if bk.ragged_rows:
colrange = range(sh.row_len(rowx))
if not colrange: return
if printit: print()
if bk.formatting_info:
for colx, ty, val, cxfx in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r, xfx: %s"
% (xlrd.colname(colx), rowx+1, ty, val, cxfx))
else:
for colx, ty, val, _unused in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r" % (xlrd.colname(colx), rowx+1, ty, val))
def get_row_data(bk, sh, rowx, colrange):
result = []
dmode = bk.datemode
ctys = sh.row_types(rowx)
cvals = sh.row_values(rowx)
for colx in colrange:
cty = ctys[colx]
cval = cvals[colx]
if bk.formatting_info:
cxfx = str(sh.cell_xf_index(rowx, colx))
else:
cxfx = ''
if cty == xlrd.XL_CELL_DATE:
try:
showval = xlrd.xldate_as_tuple(cval, dmode)
except xlrd.XLDateError as e:
showval = "%s:%s" % (type(e).__name__, e)
cty = xlrd.XL_CELL_ERROR
elif cty == xlrd.XL_CELL_ERROR:
showval = xlrd.error_text_from_code.get(cval, '<Unknown error code 0x%02x>' % cval)
else:
showval = cval
result.append((colx, cty, showval, cxfx))
return result
def bk_header(bk):
print()
print("BIFF version: %s; datemode: %s"
% (xlrd.biff_text_from_num[bk.biff_version], bk.datemode))
print("codepage: %r (encoding: %s); countries: %r"
% (bk.codepage, bk.encoding, bk.countries))
print("Last saved by: %r" % bk.user_name)
print("Number of data sheets: %d" % bk.nsheets)
print("Use mmap: %d; Formatting: %d; On demand: %d"
% (bk.use_mmap, bk.formatting_info, bk.on_demand))
print("Ragged rows: %d" % bk.ragged_rows)
if bk.formatting_info:
print("FORMATs: %d, FONTs: %d, XFs: %d"
% (len(bk.format_list), len(bk.font_list), len(bk.xf_list)))
if not options.suppress_timing:
print("Load time: %.2f seconds (stage 1) %.2f seconds (stage 2)"
% (bk.load_time_stage_1, bk.load_time_stage_2))
print()
def show_fonts(bk):
print("Fonts:")
for x in xrange(len(bk.font_list)):
font = bk.font_list[x]
font.dump(header='== Index %d ==' % x, indent=4)
def show_names(bk, dump=0):
bk_header(bk)
if bk.biff_version < 50:
print("Names not extracted in this BIFF version")
return
nlist = bk.name_obj_list
print("Name list: %d entries" % len(nlist))
for nobj in nlist:
if dump:
nobj.dump(sys.stdout,
header="\n=== Dump of name_obj_list[%d] ===" % nobj.name_index)
else:
print("[%d]\tName:%r macro:%r scope:%d\n\tresult:%r\n"
% (nobj.name_index, nobj.name, nobj.macro, nobj.scope, nobj.result))
def print_labels(sh, labs, title):
if not labs:return
for rlo, rhi, clo, chi in labs:
print("%s label range %s:%s contains:"
% (title, xlrd.cellname(rlo, clo), xlrd.cellname(rhi-1, chi-1)))
for rx in xrange(rlo, rhi):
for cx in xrange(clo, chi):
print(" %s: %r" % (xlrd.cellname(rx, cx), sh.cell_value(rx, cx)))
def show_labels(bk):
# bk_header(bk)
hdr = 0
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
clabs = sh.col_label_ranges
rlabs = sh.row_label_ranges
if clabs or rlabs:
if not hdr:
bk_header(bk)
hdr = 1
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
print_labels(sh, clabs, 'Col')
print_labels(sh, rlabs, 'Row')
if bk.on_demand: bk.unload_sheet(shx)
def show(bk, nshow=65535, printit=1):
bk_header(bk)
if 0:
rclist = xlrd.sheet.rc_stats.items()
rclist = sorted(rclist)
print("rc stats")
for k, v in rclist:
print("0x%04x %7d" % (k, v))
if options.onesheet:
try:
shx = int(options.onesheet)
except ValueError:
shx = bk.sheet_by_name(options.onesheet).number
shxrange = [shx]
else:
shxrange = range(bk.nsheets)
# print("shxrange", list(shxrange))
for shx in shxrange:
sh = bk.sheet_by_index(shx)
nrows, ncols = sh.nrows, sh.ncols
colrange = range(ncols)
anshow = min(nshow, nrows)
print("sheet %d: name = %s; nrows = %d; ncols = %d" %
(shx, REPR(sh.name), sh.nrows, sh.ncols))
if nrows and ncols:
# Beat the bounds
for rowx in xrange(nrows):
nc = sh.row_len(rowx)
if nc:
_junk = sh.row_types(rowx)[nc-1]
_junk = sh.row_values(rowx)[nc-1]
_junk = sh.cell(rowx, nc-1)
for rowx in xrange(anshow-1):
if not printit and rowx % 10000 == 1 and rowx > 1:
print("done %d rows" % (rowx-1,))
show_row(bk, sh, rowx, colrange, printit)
if anshow and nrows:
show_row(bk, sh, nrows-1, colrange, printit)
print()
if bk.on_demand: bk.unload_sheet(shx)
def count_xfs(bk):
bk_header(bk)
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
nrows, ncols = sh.nrows, sh.ncols
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
# Access all xfindexes to force gathering stats
type_stats = [0, 0, 0, 0, 0, 0, 0]
for rowx in xrange(nrows):
for colx in xrange(sh.row_len(rowx)):
xfx = sh.cell_xf_index(rowx, colx)
assert xfx >= 0
cty = sh.cell_type(rowx, colx)
type_stats[cty] += 1
print("XF stats", sh._xf_index_stats)
print("type stats", type_stats)
print()
if bk.on_demand: bk.unload_sheet(shx)
def main(cmd_args):
import optparse
global options, PSYCO
usage = "\n%prog [options] command [input-file-patterns]\n" + cmd_doc
oparser = optparse.OptionParser(usage)
oparser.add_option(
"-l", "--logfilename",
default="",
help="contains error messages")
oparser.add_option(
"-v", "--verbosity",
type="int", default=0,
help="level of information and diagnostics provided")
oparser.add_option(
"-m", "--mmap",
type="int", default=-1,
help="1: use mmap; 0: don't use mmap; -1: accept heuristic")
oparser.add_option(
"-e", "--encoding",
default="",
help="encoding override")
oparser.add_option(
"-f", "--formatting",
type="int", default=0,
help="0 (default): no fmt info\n"
"1: fmt info (all cells)\n"
)
oparser.add_option(
"-g", "--gc",
type="int", default=0,
help="0: auto gc enabled; 1: auto gc disabled, manual collect after each file; 2: no gc")
oparser.add_option(
"-s", "--onesheet",
default="",
help="restrict output to this sheet (name or index)")
oparser.add_option(
"-u", "--unnumbered",
action="store_true", default=0,
help="omit line numbers or offsets in biff_dump")
oparser.add_option(
"-d", "--on-demand",
action="store_true", default=0,
help="load sheets on demand instead of all at once")
oparser.add_option(
"-t", "--suppress-timing",
action="store_true", default=0,
help="don't print timings (diffs are less messy)")
oparser.add_option(
"-r", "--ragged-rows",
action="store_true", default=0,
help="open_workbook(..., ragged_rows=True)")
options, args = oparser.parse_args(cmd_args)
if len(args) == 1 and args[0] in ("version", ):
pass
elif len(args) < 2:
oparser.error("Expected at least 2 args, found %d" % len(args))
cmd = args[0]
xlrd_version = getattr(xlrd, "__VERSION__", "unknown; before 0.5")
if cmd == 'biff_dump':
xlrd.dump(args[1], unnumbered=options.unnumbered)
sys.exit(0)
if cmd == 'biff_count':
xlrd.count_records(args[1])
sys.exit(0)
if cmd == 'version':
print("xlrd: %s, from %s" % (xlrd_version, xlrd.__file__))
print("Python:", sys.version)
sys.exit(0)
if options.logfilename:
logfile = LogHandler(open(options.logfilename, 'w'))
else:
logfile = sys.stdout
mmap_opt = options.mmap
mmap_arg = xlrd.USE_MMAP
if mmap_opt in (1, 0):
mmap_arg = mmap_opt
elif mmap_opt != -1:
print('Unexpected value (%r) for mmap option -- assuming default' % mmap_opt)
fmt_opt = options.formatting | (cmd in ('xfc', ))
gc_mode = options.gc
if gc_mode:
gc.disable()
for pattern in args[1:]:
for fname in glob.glob(pattern):
print("\n=== File: %s ===" % fname)
if logfile != sys.stdout:
logfile.setfileheading("\n=== File: %s ===\n" % fname)
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC before open:", n_unreachable, "unreachable objects")
if PSYCO:
import psyco
psyco.full()
PSYCO = 0
try:
t0 = time.time()
bk = xlrd.open_workbook(fname,
verbosity=options.verbosity, logfile=logfile,
use_mmap=mmap_arg,
encoding_override=options.encoding,
formatting_info=fmt_opt,
on_demand=options.on_demand,
ragged_rows=options.ragged_rows,
)
t1 = time.time()
if not options.suppress_timing:
print("Open took %.2f seconds" % (t1-t0,))
except xlrd.XLRDError as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
continue
except KeyboardInterrupt:
print("*** KeyboardInterrupt ***")
traceback.print_exc(file=sys.stdout)
sys.exit(1)
except BaseException as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
traceback.print_exc(file=sys.stdout)
continue
t0 = time.time()
if cmd == 'hdr':
bk_header(bk)
elif cmd == 'ov': # OverView
show(bk, 0)
elif cmd == 'show': # all rows
show(bk)
elif cmd == '2rows': # first row and last row
show(bk, 2)
elif cmd == '3rows': # first row, 2nd row and last row
show(bk, 3)
elif cmd == 'bench':
show(bk, printit=0)
elif cmd == 'fonts':
bk_header(bk)
show_fonts(bk)
elif cmd == 'names': # named reference list
show_names(bk)
elif cmd == 'name_dump': # named reference list
show_names(bk, dump=1)
elif cmd == 'labels':
show_labels(bk)
elif cmd == 'xfc':
count_xfs(bk)
else:
print("*** Unknown command <%s>" % cmd)
sys.exit(1)
del bk
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC post cmd:", fname, "->", n_unreachable, "unreachable objects")
if not options.suppress_timing:
t1 = time.time()
print("\ncommand took %.2f seconds\n" % (t1-t0,))
return None
av = sys.argv[1:]
if not av:
main(av)
firstarg = av[0].lower()
if firstarg == "hotshot":
import hotshot, hotshot.stats
av = av[1:]
prof_log_name = "XXXX.prof"
prof = hotshot.Profile(prof_log_name)
# benchtime, result = prof.runcall(main, *av)
result = prof.runcall(main, *(av, ))
print("result", repr(result))
prof.close()
stats = hotshot.stats.load(prof_log_name)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
elif firstarg == "profile":
import cProfile
av = av[1:]
cProfile.run('main(av)', 'YYYY.prof')
import pstats
p = pstats.Stats('YYYY.prof')
p.strip_dirs().sort_stats('cumulative').print_stats(30)
elif firstarg == "psyco":
PSYCO = 1
main(av[1:])
else:
main(av)
|
[
"mark@hackafe.net"
] |
mark@hackafe.net
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.