blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6b8fd04745c1d69c819cc2f7e2ca46f5d6b25e51 | de23310ac55a7e72e853ca43ebdbce28358a4bb9 | /models/residual_viz.py | 35bfe4f16c0edf9e74ad6977b2482c5ad2f72ede | [] | no_license | mehdidc/zoo | ea5f97b1402e9501db53cd418271614afe378dc0 | 194efb0098679c065de51b0f4d4864cb415b17f7 | refs/heads/master | 2020-04-28T02:28:37.576822 | 2016-10-31T06:06:21 | 2016-10-31T06:06:21 | 174,899,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,157 | py | #!/usr/bin/env python
"""
Lasagne implementation of CIFAR-10 examples from "Deep Residual Learning for Image Recognition" (http://arxiv.org/abs/1512.03385)
With n=5, i.e. 32-layer network from the paper, this achieves a validation error of 6.88% (vs 7.51% in the paper).
The accuracy has not yet been tested for the other values of n.
"""
from __future__ import print_function
import sys
import os
import time
import string
import random
import pickle
import numpy as np
import theano
import theano.tensor as T
import lasagne
# ##################### Load data from CIFAR-10 dataset #######################
# this code assumes the cifar dataset from 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
# has been extracted in current working directory
def unpickle(file):
import cPickle
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
def load_data():
xs = []
ys = []
for j in range(5):
d = unpickle('cifar-10-batches-py/data_batch_'+`j+1`)
x = d['data']
y = d['labels']
xs.append(x)
ys.append(y)
d = unpickle('cifar-10-batches-py/test_batch')
xs.append(d['data'])
ys.append(d['labels'])
x = np.concatenate(xs)/np.float32(255)
y = np.concatenate(ys)
x = np.dstack((x[:, :1024], x[:, 1024:2048], x[:, 2048:]))
x = x.reshape((x.shape[0], 32, 32, 3)).transpose(0,3,1,2)
# subtract per-pixel mean
pixel_mean = np.mean(x[0:50000],axis=0)
#pickle.dump(pixel_mean, open("cifar10-pixel_mean.pkl","wb"))
x -= pixel_mean
# create mirrored images
X_train = x[0:50000,:,:,:]
Y_train = y[0:50000]
X_train_flip = X_train[:,:,:,::-1]
Y_train_flip = Y_train
X_train = np.concatenate((X_train,X_train_flip),axis=0)
Y_train = np.concatenate((Y_train,Y_train_flip),axis=0)
# shuffle arrays
from random import shuffle
train_index = [i for i in range(100000)]
test_index = [i for i in range(10000)]
random.shuffle(train_index)
random.shuffle(test_index)
train_index = np.array(train_index)
test_index = np.array(test_index)
X_train = X_train[train_index,:,:,:]
Y_train = Y_train[train_index]
X_test = x[test_index+50000,:,:,:]
Y_test = y[test_index+50000]
return dict(
X_train=lasagne.utils.floatX(X_train),
Y_train=Y_train.astype('int32'),
X_test = lasagne.utils.floatX(X_test),
Y_test = Y_test.astype('int32'),)
# ##################### Build the neural network model #######################
#from lasagne.layers import Conv2DLayer as ConvLayer
from lasagne.layers.conv import Conv2DLayer as ConvLayer
from lasagne.layers import ElemwiseSumLayer
from lasagne.layers import InputLayer
from lasagne.layers import DenseLayer
from lasagne.layers import GlobalPoolLayer
from lasagne.layers import PadLayer
from lasagne.layers import Pool2DLayer
from lasagne.layers import NonlinearityLayer
from lasagne.nonlinearities import softmax, rectify
# NB! from pull request #461 : https://github.com/f0k/Lasagne/blob/98b5581fa830cda3d3f838506ef14e5811a35ef7/lasagne/layers/normalization.py
from normalization import batch_norm
def build_cnn(input_var=None, n=5):
# create a residual learning building block with two stacked 3x3 convlayers as in paper
def residual_block(l, increase_dim=False, projection=False):
input_num_filters = l.output_shape[1]
if increase_dim:
first_stride = (2,2)
out_num_filters = input_num_filters*2
else:
first_stride = (1,1)
out_num_filters = input_num_filters
stack_1 = batch_norm(ConvLayer(l, num_filters=out_num_filters, filter_size=(3,3), stride=first_stride, nonlinearity=rectify, pad='same', W=lasagne.init.HeNormal(gain='relu')))
stack_2 = batch_norm(ConvLayer(stack_1, num_filters=out_num_filters, filter_size=(3,3), stride=(1,1), nonlinearity=None, pad='same', W=lasagne.init.HeNormal(gain='relu')))
# add shortcut connections
if increase_dim:
if projection:
# projection shortcut, as option B in paper
projection = ConvLayer(l, num_filters=out_num_filters, filter_size=(1,1), stride=(2,2), nonlinearity=None, pad='same', b=None)
block = NonlinearityLayer(batch_norm(ElemwiseSumLayer([stack_2, projection])),nonlinearity=rectify)
else:
# identity shortcut, as option A in paper
# we use a pooling layer to get identity with strides, since identity layers with stride don't exist in Lasagne
identity = Pool2DLayer(l, pool_size=1, stride=(2,2), mode='average_exc_pad')
padding = PadLayer(identity, [out_num_filters/4,0,0], batch_ndim=1)
block = NonlinearityLayer(batch_norm(ElemwiseSumLayer([stack_2, padding])),nonlinearity=rectify)
else:
block = NonlinearityLayer(batch_norm(ElemwiseSumLayer([stack_2, l])),nonlinearity=rectify)
return block
# Building the network
l_in = InputLayer(shape=(None, 3, 32, 32), input_var=input_var)
# first layer, output is 16 x 32 x 32
l = batch_norm(ConvLayer(l_in, num_filters=16, filter_size=(3,3), stride=(1,1), nonlinearity=rectify, pad='same', W=lasagne.init.HeNormal(gain='relu')))
# first stack of residual blocks, output is 16 x 32 x 32
for _ in range(n):
l = residual_block(l)
# second stack of residual blocks, output is 32 x 16 x 16
l = residual_block(l, increase_dim=True)
for _ in range(1,n):
l = residual_block(l)
# third stack of residual blocks, output is 64 x 8 x 8
l = residual_block(l, increase_dim=True)
for _ in range(1,n):
l = residual_block(l)
# average pooling
l = GlobalPoolLayer(l)
# fully connected layer
network = DenseLayer(
l, num_units=10,
nonlinearity=softmax)
return network
# ############################# Batch iterator ###############################
def iterate_minibatches(inputs, targets, batchsize, shuffle=False, augment=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
if augment:
# as in paper :
# pad feature arrays with 4 pixels on each side
# and do random cropping of 32x32
padded = np.pad(inputs[excerpt],((0,0),(0,0),(4,4),(4,4)),mode='constant')
random_cropped = np.zeros(inputs[excerpt].shape, dtype=np.float32)
crops = np.random.random_integers(0,high=8,size=(batchsize,2))
for r in range(batchsize):
random_cropped[r,:,:,:] = padded[r,:,crops[r,0]:(crops[r,0]+32),crops[r,1]:(crops[r,1]+32)]
inp_exc = random_cropped
else:
inp_exc = inputs[excerpt]
yield inp_exc, targets[excerpt]
# ############################## Main program ################################
def main(n=5, num_epochs=82):
# Load the dataset
print("Loading data...")
data = load_data()
X_train = data['X_train']
Y_train = data['Y_train']
X_test = data['X_test']
Y_test = data['Y_test']
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
# Create neural network model
print("Building model and compiling functions...")
network = build_cnn(input_var, n)
print("number of parameters in model: %d" % lasagne.layers.count_params(network))
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# add weight decay
all_layers = lasagne.layers.get_all_layers(network)
l2_penalty = lasagne.regularization.regularize_layer_params(all_layers, lasagne.regularization.l2) * 0.0001
loss = loss + l2_penalty
# Create update expressions for training
# Stochastic Gradient Descent (SGD) with momentum
params = lasagne.layers.get_all_params(network, trainable=True)
lr = 0.1
sh_lr = theano.shared(lasagne.utils.floatX(lr))
updates = lasagne.updates.momentum(
loss, params, learning_rate=sh_lr, momentum=0.9)
# Create a loss expression for validation/testing
test_prediction = lasagne.layers.get_output(network)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], loss, updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, Y_train, 128, shuffle=True, augment=True):
inputs, targets = batch
train_err += train_fn(inputs, targets)
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_test, Y_test, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# adjust learning rate as in paper
# 32k and 48k iterations should be roughly equivalent to 41 and 61 epochs
if (epoch+1) == 41 or (epoch+1) == 61:
new_lr = sh_lr.get_value() * 0.1
print("New LR:"+str(new_lr))
sh_lr.set_value(lasagne.utils.floatX(new_lr))
# After training, we compute and print the test error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, Y_test, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
# dump the network weights to a file :
np.savez('cifar10_deep_residual_model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('cifar10_deep_residual_model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
if __name__ == '__main__':
from lasagnekit.misc.draw_net import draw_to_file
from lasagne.layers import get_all_layers
import residual
import residualv2
import residualv3
import residualv4
from hp_toolkit.hp import instantiate_default
cnn = build_cnn(input_var=None, n=5)
layers = get_all_layers(cnn)
draw_to_file(layers, "residual_other.svg")
hp = instantiate_default(residual.params)
cnn = residual.build_model(**hp)
cnn = cnn.output_layers[0]
layers = get_all_layers(cnn)
draw_to_file(layers, "residual.svg")
hp = instantiate_default(residualv2.params)
cnn = residualv2.build_model(**hp)
cnn = cnn.output_layers[0]
layers = get_all_layers(cnn)
draw_to_file(layers, "residualv2.svg")
hp = instantiate_default(residualv3.params)
cnn = residualv3.build_model(**hp)
cnn = cnn.output_layers[0]
layers = get_all_layers(cnn)
draw_to_file(layers, "residualv3.svg")
hp = instantiate_default(residualv4.params)
cnn = residualv4.build_model(**hp)
cnn = cnn.output_layers[0]
layers = get_all_layers(cnn)
draw_to_file(layers, "residualv4.svg")
| [
"mehdi@cherti.name"
] | mehdi@cherti.name |
2e2462e1cee4dda266056451ce2d7caaf0cbdad7 | d77decc90e901a1ba7b1c20bb3058deb1f3fabe1 | /coba.py | 0b92d661fd2fff83d96e6c7cca49146900318a45 | [] | no_license | widhera/PerpusOnAir | 30689d2ffc1078fff6936fc6d4df41a03dcc5372 | 10d59d171e484c220cbfaf141e106651b200c7b1 | refs/heads/master | 2020-04-10T09:33:32.961446 | 2018-12-19T10:04:27 | 2018-12-19T10:04:27 | 160,940,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | import pymysql
db = pymysql.connect("localhost", "root", "yoza", "perpusonair")
cursor = db.cursor()
query = "SELECT * from buku "
cursor.execute(query)
result = cursor.fetchall()
print result | [
"widheramahana@gmail.com"
] | widheramahana@gmail.com |
03600cc8214045434b642323a45c09a881382679 | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /syn_mem_corruption_3switch_fuzzer_mcs/intermcs_5_/replay_config.py | 7aef30971cd0c4b996594cb00d2313e431ebf28b | [] | no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,318 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import Replayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pox.py --verbose openflow.of_01 --address=__address__ --port=__port__ openflow.discovery forwarding.l2_multi_syn_mem_corruption', label='c1', address='127.0.0.1', cwd='pox')],
topology_class=MeshTopology,
topology_params="num_switches=4",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
kill_controllers_on_exit=True)
control_flow = Replayer(simulation_config, "experiments/syn_mem_corruption_3switch_fuzzer_mcs/intermcs_5_/mcs.trace.notimeouts",
input_logger=InputLogger(),
wait_on_deterministic_values=False,
allow_unexpected_messages=False,
delay_flow_mods=False,
default_dp_permit=False,
pass_through_whitelisted_messages=False,
invariant_check_name='InvariantChecker.check_liveness',
bug_signature="c1")
| [
"b-github.com@wundsam.net"
] | b-github.com@wundsam.net |
c71e156f811307de345da807ee15cfe276b92a55 | f23c1741a63acd9d431077c4b2068e4072a72d51 | /permutation.py | a92b11d13ab718087d9f9ce651ba2472f6a711a6 | [] | no_license | Martin9527/LeetCodeTest | b188c997ab01a38201bd5ba792cdc104ca79d1d4 | 5f860c8fd2d7d7ff94eca6065d643cc4ea204abf | refs/heads/master | 2020-05-23T11:21:54.543063 | 2019-12-08T10:37:42 | 2019-12-08T10:37:42 | 186,735,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,301 | py | class Solution(object):
def permute(self,nums):
size = len(nums)
if not size :
return []
result = []
curAns = []
usedNums = set()
self.backTrack(nums,size,curAns,usedNums,result)
return result
def backTrack(self,nums,size,curAns,usedNums,result):
if size == len(curAns):
import copy
ans = copy.deepcopy(curAns)
result.append(ans)
return
for j in range(size):
if nums[j] not in usedNums:
usedNums.add(nums[j])
curAns.append(nums[j])
self.backTrack(nums,size,curAns,usedNums,result)
usedNums.remove(nums[j])
curAns.pop()
def permuteUnique(self,nums):
size = len(nums)
if size < 1:
return []
res = []
usedNums = set()
def backTrack(nums,begin,curAns,usedNums):
if len(curAns) == size:
res.append(curAns[:])
return
hashMap = set()
for j in xrange(size):
if nums[j] in hashMap:
continue
else:
hashMap.add(nums[j])
if nums[j] not in usedNums:
usedNums.add(nums[j])
curAns.append(nums[j])
self.backTrack(nums,size,curAns,usedNums)
usedNums.remove(nums[j])
curAns.pop()
nums.sort()
backTrack(nums,0,[],usedNums)
print 'length: ',len(res)
return res
if __name__ == '__main__':
s = Solution()
nums = [1,1,2]
ans = s.permute(nums)
print 'AA: ',len(ans),ans | [
"="
] | = |
94e70bf6deabed67dd9378651c4c5af909762b47 | 0d8486c1d55c40bebea7c5428930f18165d2d0e9 | /tests/asp/AllAnswerSets/tight/7-queens.asp.test.py | 58599c3c79d3455d9b87dcc51342b0dc08b3fe6f | [
"Apache-2.0"
] | permissive | bernardocuteri/wasp | 6f81bf6aa8fb273c91bbf68ecce4ecb195a55953 | 05c8f961776dbdbf7afbf905ee00fc262eba51ad | refs/heads/master | 2021-06-08T11:58:25.080818 | 2020-10-05T16:57:37 | 2020-10-05T16:57:37 | 124,245,808 | 0 | 0 | Apache-2.0 | 2018-03-07T14:13:16 | 2018-03-07T14:13:16 | null | UTF-8 | Python | false | false | 11,082 | py | input = """
1 2 0 0
1 3 0 0
1 4 0 0
1 5 0 0
1 6 0 0
1 7 0 0
1 8 0 0
1 9 0 0
1 10 7 6 11 12 13 14 15 16 17
1 11 7 6 10 12 13 14 15 16 17
1 12 7 6 10 11 13 14 15 16 17
1 13 7 6 10 11 12 14 15 16 17
1 14 7 6 10 11 12 13 15 16 17
1 15 7 6 10 11 12 13 14 16 17
1 16 7 6 10 11 12 13 14 15 17
1 17 0 0
1 18 7 6 19 20 21 22 23 24 25
1 19 7 6 18 20 21 22 23 24 25
1 20 7 6 18 19 21 22 23 24 25
1 21 7 6 18 19 20 22 23 24 25
1 22 7 6 18 19 20 21 23 24 25
1 23 7 6 18 19 20 21 22 24 25
1 24 7 6 18 19 20 21 22 23 25
1 25 0 0
1 26 7 6 27 28 29 30 31 32 33
1 27 7 6 26 28 29 30 31 32 33
1 28 7 6 26 27 29 30 31 32 33
1 29 7 6 26 27 28 30 31 32 33
1 30 7 6 26 27 28 29 31 32 33
1 31 7 6 26 27 28 29 30 32 33
1 32 7 6 26 27 28 29 30 31 33
1 33 0 0
1 34 7 6 35 36 37 38 39 40 41
1 35 7 6 34 36 37 38 39 40 41
1 36 7 6 34 35 37 38 39 40 41
1 37 7 6 34 35 36 38 39 40 41
1 38 7 6 34 35 36 37 39 40 41
1 39 7 6 34 35 36 37 38 40 41
1 40 7 6 34 35 36 37 38 39 41
1 41 0 0
1 42 7 6 43 44 45 46 47 48 49
1 43 7 6 42 44 45 46 47 48 49
1 44 7 6 42 43 45 46 47 48 49
1 45 7 6 42 43 44 46 47 48 49
1 46 7 6 42 43 44 45 47 48 49
1 47 7 6 42 43 44 45 46 48 49
1 48 7 6 42 43 44 45 46 47 49
1 49 0 0
1 50 7 6 51 52 53 54 55 56 57
1 51 7 6 50 52 53 54 55 56 57
1 52 7 6 50 51 53 54 55 56 57
1 53 7 6 50 51 52 54 55 56 57
1 54 7 6 50 51 52 53 55 56 57
1 55 7 6 50 51 52 53 54 56 57
1 56 7 6 50 51 52 53 54 55 57
1 57 0 0
1 58 7 6 59 60 61 62 63 64 65
1 59 7 6 58 60 61 62 63 64 65
1 60 7 6 58 59 61 62 63 64 65
1 61 7 6 58 59 60 62 63 64 65
1 62 7 6 58 59 60 61 63 64 65
1 63 7 6 58 59 60 61 62 64 65
1 64 7 6 58 59 60 61 62 63 65
1 65 0 0
1 1 2 0 64 56
1 1 2 0 64 48
1 1 2 0 64 40
1 1 2 0 64 32
1 1 2 0 64 24
1 1 2 0 64 16
1 1 2 0 63 55
1 1 2 0 63 47
1 1 2 0 63 39
1 1 2 0 63 31
1 1 2 0 63 23
1 1 2 0 63 15
1 1 2 0 62 54
1 1 2 0 62 46
1 1 2 0 62 38
1 1 2 0 62 30
1 1 2 0 62 22
1 1 2 0 62 14
1 1 2 0 61 53
1 1 2 0 61 45
1 1 2 0 61 37
1 1 2 0 61 29
1 1 2 0 61 21
1 1 2 0 61 13
1 1 2 0 60 52
1 1 2 0 60 44
1 1 2 0 60 36
1 1 2 0 60 28
1 1 2 0 60 20
1 1 2 0 60 12
1 1 2 0 59 51
1 1 2 0 59 43
1 1 2 0 59 35
1 1 2 0 59 27
1 1 2 0 59 19
1 1 2 0 59 11
1 1 2 0 58 50
1 1 2 0 58 42
1 1 2 0 58 34
1 1 2 0 58 26
1 1 2 0 58 18
1 1 2 0 58 10
1 1 2 0 56 64
1 1 2 0 56 48
1 1 2 0 56 40
1 1 2 0 56 32
1 1 2 0 56 24
1 1 2 0 56 16
1 1 2 0 55 63
1 1 2 0 55 47
1 1 2 0 55 39
1 1 2 0 55 31
1 1 2 0 55 23
1 1 2 0 55 15
1 1 2 0 54 62
1 1 2 0 54 46
1 1 2 0 54 38
1 1 2 0 54 30
1 1 2 0 54 22
1 1 2 0 54 14
1 1 2 0 53 61
1 1 2 0 53 45
1 1 2 0 53 37
1 1 2 0 53 29
1 1 2 0 53 21
1 1 2 0 53 13
1 1 2 0 52 60
1 1 2 0 52 44
1 1 2 0 52 36
1 1 2 0 52 28
1 1 2 0 52 20
1 1 2 0 52 12
1 1 2 0 51 59
1 1 2 0 51 43
1 1 2 0 51 35
1 1 2 0 51 27
1 1 2 0 51 19
1 1 2 0 51 11
1 1 2 0 50 58
1 1 2 0 50 42
1 1 2 0 50 34
1 1 2 0 50 26
1 1 2 0 50 18
1 1 2 0 50 10
1 1 2 0 48 64
1 1 2 0 48 56
1 1 2 0 48 40
1 1 2 0 48 32
1 1 2 0 48 24
1 1 2 0 48 16
1 1 2 0 47 63
1 1 2 0 47 55
1 1 2 0 47 39
1 1 2 0 47 31
1 1 2 0 47 23
1 1 2 0 47 15
1 1 2 0 46 62
1 1 2 0 46 54
1 1 2 0 46 38
1 1 2 0 46 30
1 1 2 0 46 22
1 1 2 0 46 14
1 1 2 0 45 61
1 1 2 0 45 53
1 1 2 0 45 37
1 1 2 0 45 29
1 1 2 0 45 21
1 1 2 0 45 13
1 1 2 0 44 60
1 1 2 0 44 52
1 1 2 0 44 36
1 1 2 0 44 28
1 1 2 0 44 20
1 1 2 0 44 12
1 1 2 0 43 59
1 1 2 0 43 51
1 1 2 0 43 35
1 1 2 0 43 27
1 1 2 0 43 19
1 1 2 0 43 11
1 1 2 0 42 58
1 1 2 0 42 50
1 1 2 0 42 34
1 1 2 0 42 26
1 1 2 0 42 18
1 1 2 0 42 10
1 1 2 0 40 64
1 1 2 0 40 56
1 1 2 0 40 48
1 1 2 0 40 32
1 1 2 0 40 24
1 1 2 0 40 16
1 1 2 0 39 63
1 1 2 0 39 55
1 1 2 0 39 47
1 1 2 0 39 31
1 1 2 0 39 23
1 1 2 0 39 15
1 1 2 0 38 62
1 1 2 0 38 54
1 1 2 0 38 46
1 1 2 0 38 30
1 1 2 0 38 22
1 1 2 0 38 14
1 1 2 0 37 61
1 1 2 0 37 53
1 1 2 0 37 45
1 1 2 0 37 29
1 1 2 0 37 21
1 1 2 0 37 13
1 1 2 0 36 60
1 1 2 0 36 52
1 1 2 0 36 44
1 1 2 0 36 28
1 1 2 0 36 20
1 1 2 0 36 12
1 1 2 0 35 59
1 1 2 0 35 51
1 1 2 0 35 43
1 1 2 0 35 27
1 1 2 0 35 19
1 1 2 0 35 11
1 1 2 0 34 58
1 1 2 0 34 50
1 1 2 0 34 42
1 1 2 0 34 26
1 1 2 0 34 18
1 1 2 0 34 10
1 1 2 0 32 64
1 1 2 0 32 56
1 1 2 0 32 48
1 1 2 0 32 40
1 1 2 0 32 24
1 1 2 0 32 16
1 1 2 0 31 63
1 1 2 0 31 55
1 1 2 0 31 47
1 1 2 0 31 39
1 1 2 0 31 23
1 1 2 0 31 15
1 1 2 0 30 62
1 1 2 0 30 54
1 1 2 0 30 46
1 1 2 0 30 38
1 1 2 0 30 22
1 1 2 0 30 14
1 1 2 0 29 61
1 1 2 0 29 53
1 1 2 0 29 45
1 1 2 0 29 37
1 1 2 0 29 21
1 1 2 0 29 13
1 1 2 0 28 60
1 1 2 0 28 52
1 1 2 0 28 44
1 1 2 0 28 36
1 1 2 0 28 20
1 1 2 0 28 12
1 1 2 0 27 59
1 1 2 0 27 51
1 1 2 0 27 43
1 1 2 0 27 35
1 1 2 0 27 19
1 1 2 0 27 11
1 1 2 0 26 58
1 1 2 0 26 50
1 1 2 0 26 42
1 1 2 0 26 34
1 1 2 0 26 18
1 1 2 0 26 10
1 1 2 0 24 64
1 1 2 0 24 56
1 1 2 0 24 48
1 1 2 0 24 40
1 1 2 0 24 32
1 1 2 0 24 16
1 1 2 0 23 63
1 1 2 0 23 55
1 1 2 0 23 47
1 1 2 0 23 39
1 1 2 0 23 31
1 1 2 0 23 15
1 1 2 0 22 62
1 1 2 0 22 54
1 1 2 0 22 46
1 1 2 0 22 38
1 1 2 0 22 30
1 1 2 0 22 14
1 1 2 0 21 61
1 1 2 0 21 53
1 1 2 0 21 45
1 1 2 0 21 37
1 1 2 0 21 29
1 1 2 0 21 13
1 1 2 0 20 60
1 1 2 0 20 52
1 1 2 0 20 44
1 1 2 0 20 36
1 1 2 0 20 28
1 1 2 0 20 12
1 1 2 0 19 59
1 1 2 0 19 51
1 1 2 0 19 43
1 1 2 0 19 35
1 1 2 0 19 27
1 1 2 0 19 11
1 1 2 0 18 58
1 1 2 0 18 50
1 1 2 0 18 42
1 1 2 0 18 34
1 1 2 0 18 26
1 1 2 0 18 10
1 1 2 0 16 64
1 1 2 0 16 56
1 1 2 0 16 48
1 1 2 0 16 40
1 1 2 0 16 32
1 1 2 0 16 24
1 1 2 0 15 63
1 1 2 0 15 55
1 1 2 0 15 47
1 1 2 0 15 39
1 1 2 0 15 31
1 1 2 0 15 23
1 1 2 0 14 62
1 1 2 0 14 54
1 1 2 0 14 46
1 1 2 0 14 38
1 1 2 0 14 30
1 1 2 0 14 22
1 1 2 0 13 61
1 1 2 0 13 53
1 1 2 0 13 45
1 1 2 0 13 37
1 1 2 0 13 29
1 1 2 0 13 21
1 1 2 0 12 60
1 1 2 0 12 52
1 1 2 0 12 44
1 1 2 0 12 36
1 1 2 0 12 28
1 1 2 0 12 20
1 1 2 0 11 59
1 1 2 0 11 51
1 1 2 0 11 43
1 1 2 0 11 35
1 1 2 0 11 27
1 1 2 0 11 19
1 1 2 0 10 58
1 1 2 0 10 50
1 1 2 0 10 42
1 1 2 0 10 34
1 1 2 0 10 26
1 1 2 0 10 18
1 1 2 0 63 56
1 1 2 0 62 55
1 1 2 0 62 48
1 1 2 0 61 54
1 1 2 0 61 47
1 1 2 0 61 40
1 1 2 0 60 53
1 1 2 0 60 46
1 1 2 0 60 39
1 1 2 0 60 32
1 1 2 0 59 52
1 1 2 0 59 45
1 1 2 0 59 38
1 1 2 0 59 31
1 1 2 0 59 24
1 1 2 0 58 51
1 1 2 0 58 44
1 1 2 0 58 37
1 1 2 0 58 30
1 1 2 0 58 23
1 1 2 0 58 16
1 1 2 0 55 48
1 1 2 0 54 47
1 1 2 0 54 40
1 1 2 0 53 46
1 1 2 0 53 39
1 1 2 0 53 32
1 1 2 0 52 45
1 1 2 0 52 38
1 1 2 0 52 31
1 1 2 0 52 24
1 1 2 0 51 44
1 1 2 0 51 37
1 1 2 0 51 30
1 1 2 0 51 23
1 1 2 0 51 16
1 1 2 0 50 43
1 1 2 0 50 36
1 1 2 0 50 29
1 1 2 0 50 22
1 1 2 0 50 15
1 1 2 0 47 40
1 1 2 0 46 39
1 1 2 0 46 32
1 1 2 0 45 38
1 1 2 0 45 31
1 1 2 0 45 24
1 1 2 0 44 37
1 1 2 0 44 30
1 1 2 0 44 23
1 1 2 0 44 16
1 1 2 0 43 36
1 1 2 0 43 29
1 1 2 0 43 22
1 1 2 0 43 15
1 1 2 0 42 35
1 1 2 0 42 28
1 1 2 0 42 21
1 1 2 0 42 14
1 1 2 0 39 32
1 1 2 0 38 31
1 1 2 0 38 24
1 1 2 0 37 30
1 1 2 0 37 23
1 1 2 0 37 16
1 1 2 0 36 29
1 1 2 0 36 22
1 1 2 0 36 15
1 1 2 0 35 28
1 1 2 0 35 21
1 1 2 0 35 14
1 1 2 0 34 27
1 1 2 0 34 20
1 1 2 0 34 13
1 1 2 0 31 24
1 1 2 0 30 23
1 1 2 0 30 16
1 1 2 0 29 22
1 1 2 0 29 15
1 1 2 0 28 21
1 1 2 0 28 14
1 1 2 0 27 20
1 1 2 0 27 13
1 1 2 0 26 19
1 1 2 0 26 12
1 1 2 0 23 16
1 1 2 0 22 15
1 1 2 0 21 14
1 1 2 0 20 13
1 1 2 0 19 12
1 1 2 0 18 11
1 1 2 0 64 55
1 1 2 0 64 46
1 1 2 0 64 37
1 1 2 0 64 28
1 1 2 0 64 19
1 1 2 0 64 10
1 1 2 0 63 54
1 1 2 0 63 45
1 1 2 0 63 36
1 1 2 0 63 27
1 1 2 0 63 18
1 1 2 0 62 53
1 1 2 0 62 44
1 1 2 0 62 35
1 1 2 0 62 26
1 1 2 0 61 52
1 1 2 0 61 43
1 1 2 0 61 34
1 1 2 0 60 51
1 1 2 0 60 42
1 1 2 0 59 50
1 1 2 0 56 47
1 1 2 0 56 38
1 1 2 0 56 29
1 1 2 0 56 20
1 1 2 0 56 11
1 1 2 0 55 46
1 1 2 0 55 37
1 1 2 0 55 28
1 1 2 0 55 19
1 1 2 0 55 10
1 1 2 0 54 45
1 1 2 0 54 36
1 1 2 0 54 27
1 1 2 0 54 18
1 1 2 0 53 44
1 1 2 0 53 35
1 1 2 0 53 26
1 1 2 0 52 43
1 1 2 0 52 34
1 1 2 0 51 42
1 1 2 0 48 39
1 1 2 0 48 30
1 1 2 0 48 21
1 1 2 0 48 12
1 1 2 0 47 38
1 1 2 0 47 29
1 1 2 0 47 20
1 1 2 0 47 11
1 1 2 0 46 37
1 1 2 0 46 28
1 1 2 0 46 19
1 1 2 0 46 10
1 1 2 0 45 36
1 1 2 0 45 27
1 1 2 0 45 18
1 1 2 0 44 35
1 1 2 0 44 26
1 1 2 0 43 34
1 1 2 0 40 31
1 1 2 0 40 22
1 1 2 0 40 13
1 1 2 0 39 30
1 1 2 0 39 21
1 1 2 0 39 12
1 1 2 0 38 29
1 1 2 0 38 20
1 1 2 0 38 11
1 1 2 0 37 28
1 1 2 0 37 19
1 1 2 0 37 10
1 1 2 0 36 27
1 1 2 0 36 18
1 1 2 0 35 26
1 1 2 0 32 23
1 1 2 0 32 14
1 1 2 0 31 22
1 1 2 0 31 13
1 1 2 0 30 21
1 1 2 0 30 12
1 1 2 0 29 20
1 1 2 0 29 11
1 1 2 0 28 19
1 1 2 0 28 10
1 1 2 0 27 18
1 1 2 0 24 15
1 1 2 0 23 14
1 1 2 0 22 13
1 1 2 0 21 12
1 1 2 0 20 11
1 1 2 0 19 10
0
10 q(6,1)
11 q(6,2)
12 q(6,3)
13 q(6,4)
14 q(6,5)
15 q(6,6)
16 q(6,7)
18 q(5,1)
19 q(5,2)
20 q(5,3)
21 q(5,4)
22 q(5,5)
23 q(5,6)
24 q(5,7)
26 q(4,1)
27 q(4,2)
28 q(4,3)
29 q(4,4)
30 q(4,5)
31 q(4,6)
32 q(4,7)
34 q(3,1)
35 q(3,2)
36 q(3,3)
37 q(3,4)
38 q(3,5)
39 q(3,6)
40 q(3,7)
42 q(2,1)
43 q(2,2)
44 q(2,3)
45 q(2,4)
46 q(2,5)
47 q(2,6)
48 q(2,7)
50 q(1,1)
51 q(1,2)
52 q(1,3)
53 q(1,4)
54 q(1,5)
55 q(1,6)
56 q(1,7)
58 q(0,1)
59 q(0,2)
60 q(0,3)
61 q(0,4)
62 q(0,5)
63 q(0,6)
64 q(0,7)
0
B+
0
B-
1
0
1
"""
output = """
{q(6,6), q(5,3), q(4,7), q(3,4), q(2,1), q(1,5), q(0,2)}
{q(6,6), q(5,3), q(4,1), q(3,4), q(2,7), q(1,5), q(0,2)}
{q(6,5), q(5,1), q(4,6), q(3,4), q(2,2), q(1,7), q(0,3)}
{q(6,3), q(5,1), q(4,6), q(3,4), q(2,2), q(1,7), q(0,5)}
{q(6,5), q(5,7), q(4,2), q(3,4), q(2,6), q(1,1), q(0,3)}
{q(6,2), q(5,5), q(4,7), q(3,4), q(2,1), q(1,3), q(0,6)}
{q(6,2), q(5,5), q(4,1), q(3,4), q(2,7), q(1,3), q(0,6)}
{q(6,3), q(5,7), q(4,2), q(3,4), q(2,6), q(1,1), q(0,5)}
{q(6,5), q(5,1), q(4,4), q(3,7), q(2,3), q(1,6), q(0,2)}
{q(6,5), q(5,2), q(4,6), q(3,3), q(2,7), q(1,4), q(0,1)}
{q(6,5), q(5,7), q(4,2), q(3,6), q(2,3), q(1,1), q(0,4)}
{q(6,5), q(5,3), q(4,1), q(3,6), q(2,4), q(1,2), q(0,7)}
{q(6,3), q(5,1), q(4,6), q(3,2), q(2,5), q(1,7), q(0,4)}
{q(6,6), q(5,1), q(4,3), q(3,5), q(2,7), q(1,2), q(0,4)}
{q(6,4), q(5,1), q(4,3), q(3,6), q(2,2), q(1,7), q(0,5)}
{q(6,4), q(5,1), q(4,5), q(3,2), q(2,6), q(1,3), q(0,7)}
{q(6,1), q(5,6), q(4,4), q(3,2), q(2,7), q(1,5), q(0,3)}
{q(6,7), q(5,2), q(4,4), q(3,6), q(2,1), q(1,3), q(0,5)}
{q(6,3), q(5,7), q(4,4), q(3,1), q(2,5), q(1,2), q(0,6)}
{q(6,2), q(5,4), q(4,1), q(3,7), q(2,5), q(1,3), q(0,6)}
{q(6,6), q(5,4), q(4,2), q(3,7), q(2,5), q(1,3), q(0,1)}
{q(6,3), q(5,6), q(4,2), q(3,5), q(2,1), q(1,4), q(0,7)}
{q(6,3), q(5,5), q(4,7), q(3,2), q(2,4), q(1,6), q(0,1)}
{q(6,4), q(5,7), q(4,3), q(3,6), q(2,2), q(1,5), q(0,1)}
{q(6,7), q(5,4), q(4,1), q(3,5), q(2,2), q(1,6), q(0,3)}
{q(6,6), q(5,2), q(4,5), q(3,1), q(2,4), q(1,7), q(0,3)}
{q(6,2), q(5,6), q(4,3), q(3,7), q(2,4), q(1,1), q(0,5)}
{q(6,2), q(5,4), q(4,6), q(3,1), q(2,3), q(1,5), q(0,7)}
{q(6,7), q(5,3), q(4,6), q(3,2), q(2,5), q(1,1), q(0,4)}
{q(6,2), q(5,5), q(4,3), q(3,1), q(2,7), q(1,4), q(0,6)}
{q(6,4), q(5,7), q(4,5), q(3,2), q(2,6), q(1,1), q(0,3)}
{q(6,7), q(5,5), q(4,3), q(3,1), q(2,6), q(1,4), q(0,2)}
{q(6,2), q(5,7), q(4,5), q(3,3), q(2,1), q(1,6), q(0,4)}
{q(6,1), q(5,3), q(4,5), q(3,7), q(2,2), q(1,4), q(0,6)}
{q(6,6), q(5,3), q(4,5), q(3,7), q(2,1), q(1,4), q(0,2)}
{q(6,4), q(5,6), q(4,1), q(3,3), q(2,5), q(1,7), q(0,2)}
{q(6,1), q(5,5), q(4,2), q(3,6), q(2,3), q(1,7), q(0,4)}
{q(6,4), q(5,2), q(4,7), q(3,5), q(2,3), q(1,1), q(0,6)}
{q(6,6), q(5,4), q(4,7), q(3,1), q(2,3), q(1,5), q(0,2)}
{q(6,1), q(5,4), q(4,7), q(3,3), q(2,6), q(1,2), q(0,5)}
"""
| [
"carminedodaro@gmail.com"
] | carminedodaro@gmail.com |
a7733a3cb52937e8a091ff034739072679920e13 | 7cb839566d9bc2a4cdc1da7af1044ab006642afa | /emojiconverter/facetoemoji/views.py | f09b689fca4b6d233f7ae9d1eb2ec82b8aeaa5b2 | [] | no_license | FalakChhikara/FaceEmoji | 84a4195791099fc0b9ca6e8ba4c38f224dfc4ed6 | ed14a2d03663eb10f594ede762d0b37a6cf3174b | refs/heads/master | 2022-06-07T19:24:47.221178 | 2020-05-07T03:24:11 | 2020-05-07T03:24:11 | 261,701,635 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,878 | py | import cv2
from django.shortcuts import render, redirect
import numpy as np
import pathlib
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
filepath='Model.{epoch:02d}-{val_acc:.4f}.hdf5'
checkpointer = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=False, mode='auto')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=0, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0)
early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='auto')
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
def fbeta(y_true, y_pred, threshold_shift=0):
beta = 1
y_pred = K.clip(y_pred, 0, 1)
y_pred_bin = K.round(y_pred + threshold_shift)
tp = K.sum(K.round(y_true * y_pred_bin), axis=1) + K.epsilon()
fp = K.sum(K.round(K.clip(y_pred_bin - y_true, 0, 1)), axis=1)
fn = K.sum(K.round(K.clip(y_true - y_pred, 0, 1)), axis=1)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
beta_squared = beta ** 2
return K.mean((beta_squared + 1) * (precision * recall) / (beta_squared * precision + recall + K.epsilon()))
def homepage(request):
return render(request, 'facetoemoji/home.html')
def _locate_faces(image):
faces = faceCascade.detectMultiScale(
image
)
return faces # list of (x, y, w, h)
def find_faces(image):
faces_coordinates = _locate_faces(image)
cutted_faces = [image[y:y + h, x:x + w] for (x, y, w, h) in faces_coordinates]
normalized_faces = [_normalize_face(face) for face in cutted_faces]
return zip(normalized_faces, faces_coordinates)
def _normalize_face(face):
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
face = cv2.resize(face, (350, 350))
return face
def expr(image,model):
# image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
image = cv2.resize(image,(48,48))
image = np.stack((image,)*1, axis=-1)
image = np.expand_dims(image, axis=0)
arr = model.predict(image)
# print(arr)
result = arr[0].argmax()
return result
def webcam(request):
cap = None
video = cv2.VideoCapture(0)
video.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
video.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
cdir = str(pathlib.Path(__file__).parent.absolute())
modelpath = cdir + '\\' + 'weights.h5'
model = keras.models.load_model(modelpath, custom_objects={"fbeta": fbeta})
# emotions = ['anger', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
while True:
check, frame = video.read()
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
cv2.imshow('Face', frame)
print(type(frame))
print(modelpath)
for face, (x, y, w, h) in find_faces(frame):
prediction = expr(face, model)
# /content/4.png
idir = cdir + '\\' + 'graphics' + '\\' + str(prediction) + '.png'
print(idir)
em = cv2.imread(idir)
print(type(em))
# em = cv2.cvtColor(em, cv2.COLOR_RGB2BGR)
em = cv2.resize(em, (w, h))
frame[y:y + h, x:x + w] = em
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (10, 50)
fontScale = 1
fontColor = (255, 255, 255)
lineType = 2
cv2.putText(frame, 'Press Q to quit',
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
cv2.imshow('emoji', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video.release()
cv2.destroyAllWindows()
return redirect('facetoemoji:homepage')
| [
"falakchhikara2001@gmail.com"
] | falakchhikara2001@gmail.com |
086b967156eedb27a1230bbede5ad1ea56377365 | 4e01bb5dfd42e1a80ac63ec55e98c0ae11ceeaac | /exercises/implementation/append_and_delete.py | f9cf77884b59e05fefd23a2e891e56a215a43d1d | [] | no_license | 98sean98/hackerrank | 990c0e074180837586bbfce839712b784704ad31 | 0d633a2a2ff1ddbbab67d99ad7650020c214fb0b | refs/heads/master | 2022-04-16T22:34:24.724014 | 2020-04-12T21:25:47 | 2020-04-12T21:25:47 | 255,172,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | #!/bin/python
import sys
s = "abaaaa"
t = "abaaaaaaa"
k = 5
# s = raw_input().strip()
# t = raw_input().strip()
# k = int(raw_input().strip())
count = 0
# remove the first letter
def remove_first_letter(oldstr):
if oldstr != "":
return oldstr[1:]
else:
return oldstr
# return first letter
def first_letter(string):
if string != "":
return string[0]
else:
return ""
# length of s is longer than k
if len(s) > k:
# compare the first letter of two strings and keep on removing until the first letters are different
while first_letter(s) == first_letter(t):
s = remove_first_letter(s)
t = remove_first_letter(t)
count = len(s) + len(t)
# length of s is equal to k
elif len(s) == k:
count = 0
elif len(s) == len(t) and k >= len(s):
count = k
elif len(s) < k:
print 's < k'
# compare the first letter of two strings and keep on removing until the first letters are different
while first_letter(s) == first_letter(t) and (len(s) != 0 and len(t) != 0):
print len(s)
s = remove_first_letter(s)
t = remove_first_letter(t)
if(len(t) % 2) and k >= len(t):
count = k
# elif():
print count
if (count == k):
print 'Yes'
else:
print 'No'
| [
"seanchok@gmail.com"
] | seanchok@gmail.com |
1abd82cd32e985e35728a631c81c33ef0fe62b70 | 481ce69bd3611715fef0be99c655c95d67f16d5f | /riopy/tests/test_symops.py | a44f6bd76716d46e50bf17f299cbedb403e45b81 | [
"BSD-3-Clause"
] | permissive | fsimkovic/riopy | 0ffed18c72573e824affa97d5c17ca462c5f2031 | 5dc4083d1c0919d94ceeac802d3fb40748b947f3 | refs/heads/master | 2021-03-24T10:14:25.904758 | 2018-08-30T13:16:02 | 2018-08-30T13:16:02 | 117,836,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py |
import unittest
from riopy.symops import SymmetryOperator
class SymmetryOperatorTest(unittest.TestCase):
def test___init___1(self):
symops = SymmetryOperator.ops("P1")
self.assertTrue(len(symops) == 1)
self.assertTupleEqual((0.0, 0.0, 0.0), symops[0].t().as_double())
self.assertTupleEqual((1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0), symops[0].r().as_double())
if __name__ == "__main__":
unittest.main(verbosity=2)
| [
"felixsimkovic@me.com"
] | felixsimkovic@me.com |
83a1bb3a2cdd1a52239b03c71eef467737b35324 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-4096.py | 8f4fbd00afb059796231d50db43e5910e4bdb267 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,757 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> $Type:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
944dd514a48ddc17daad3a0c927a6afa0d754240 | f99a71291f8b2acdeedf7bf8bc7e37ad3a4c79e1 | /backend/main/app/modules/mail.py | 599e6746e5afbdf05946c4035c8314cfef2d3de1 | [] | no_license | benson40111/SmartBike_Parking_Project | 8190ced843958be7dba639f58689d6194d19d420 | d94eab83d1d495ec5abffad651e7c338671b12e2 | refs/heads/master | 2021-09-24T09:17:53.763744 | 2018-10-06T14:12:18 | 2018-10-06T14:12:18 | 116,125,717 | 0 | 1 | null | 2018-10-06T14:12:06 | 2018-01-03T10:46:33 | HTML | UTF-8 | Python | false | false | 941 | py | import smtplib
from app.conf.config import gmail_user, gmail_password
from email.mime.text import MIMEText
from email.header import Header
from app import logger
class mail:
def __init__(self):
self.server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
self.server.ehlo()
self.server.login(gmail_user, gmail_password)
logger.info("success connected")
self.me = "smart_park"
def send(self, to, subject, message):
msg = MIMEText(message, 'html', 'utf-8')
msg['Subject'] = Header(subject, 'utf-8')
msg['From'] = Header(self.me, 'utf-8')
msg['To'] = Header(to, 'utf-8')
try:
self.server.sendmail(self.me, to, msg.as_string())
logger.info("Email sent receiver: {}".format(to))
except smtplib.SMTPException as e:
self.server.ehlo()
self.server.login(gmail_user, gmail_password)
logger.error(e)
| [
"madness48596@gmail.com"
] | madness48596@gmail.com |
96aac0b4b4bb06d1a1361336110a66ef306f8784 | cbda89443b351bb2047180dad4e300c13dc3df7f | /Crystals/Morpurgo_sp_outer/Jobs/TIPS_Pc/TIPS_Pc_cation_neut_inner0_outer2/TIPS_Pc_cation_neut_inner0_outer2.py | a0c28b5d437cb4a23e82114742f6ee0128900f05 | [] | no_license | sheridanfew/pythonpolarisation | 080f52979f98d26360a46412a10c8e3f51ee4549 | 178e2684e9a239a8e60af5f7b1eb414ac5f31e92 | refs/heads/master | 2021-07-10T01:07:40.978790 | 2021-03-11T16:56:37 | 2021-03-11T16:56:37 | 96,101,351 | 0 | 0 | null | 2017-07-03T13:37:06 | 2017-07-03T10:54:52 | null | UTF-8 | Python | false | false | 6,693 | py | import sys
sys.path.append('../../../../../')
from BasicElements import *
from BasicElements.Register import GetRegister
from BasicElements.MoleculeFactory import ReadMoleculeType
from BasicElements.MoleculeFactory import GetMolecule
from BasicElements.Crystal import *
from Polarizability.GetDipoles import get_dipoles,split_dipoles_onto_atoms
from Polarizability import *
from Polarizability.GetEnergyFromDips import *
from Polarizability.JMatrix import JMatrix
import numpy as np
from math import *
from time import gmtime, strftime
import os
print strftime("%a, %d %b %Y %X +0000", gmtime())
name='TIPS_Pc_cation_neut_inner0_outer2'
#For crystals here, all cubic and centred at centre
insize=0
#number of TVs in each dir central mol is from edge of inner region
outsize=2
mols_cen=['TIPS_Pc_cation_aniso_cifstruct_chelpg.xyz']
mols_sur=['TIPS_Pc_neut_aniso_cifstruct_chelpg.xyz']
mols_outer=['sp_TIPS_Pc_neut.xyz']
#From cif:
'''
TIPS
data_k01029
_cell_length_a 7.5650(15)
_cell_length_b 7.7500(15)
_cell_length_c 16.835(3)
_cell_angle_alpha 89.15(3)
_cell_angle_beta 78.42(3)
_cell_angle_gamma 83.63(3)
_cell_volume 960.9(3)
'''
#Get translation vectors:
a=7.565015/0.5291772109217
b=7.750015/0.5291772109217
c=16.8353/0.5291772109217
alpha=89.153*(pi/180)
beta=78.423*(pi/180)
gamma=83.633*(pi/180)
cif_unit_cell_volume=960.9/(a*b*c*(0.5291772109217**3))
cell_volume=sqrt(1 - (cos(alpha)**2) - (cos(beta)**2) - (cos(gamma)**2) + (2*cos(alpha)*cos(beta)*cos(gamma)))
#Converts frac coords to carts
matrix_to_cartesian=np.matrix( [[a, b*cos(gamma), c*cos(beta)],
[0, b*sin(gamma), c*(cos(alpha) - cos(beta)*cos(gamma))/sin(gamma)],
[0, 0, c*cell_volume/sin(gamma)]])
#carts to frac
matrix_to_fractional=matrix_to_cartesian.I
#TVs, TV[0,1,2] are the three translation vectors.
TV=matrix_to_cartesian.T
cut=8.0
totsize=insize+outsize
#number of TVs in each dir nearest c inner mol is from edge of outer region
cenpos=[totsize,totsize,totsize]
length=[2*totsize+1,2*totsize+1,2*totsize+1]
maxTVs=insize
outer_maxTVs=insize+outsize
#for diamond outer, don't specify for cube and will fill to cube edges.
print 'name: ',name,'mols_cen: ', mols_cen,' mols_sur: ',mols_sur,' TVs: ', TV
# Place Molecules
prot_neut_cry=Crystal(name=name,mols_cen=mols_cen,mols_sur=mols_sur,cenpos=cenpos,length=length,TVs=TV,maxTVs=maxTVs,mols_outer=mols_outer,outer_maxTVs=outer_maxTVs)
#prot_neut_cry._mols contains all molecules.
#mols[0] contains a list of all molecules in position a, mols[1] all mols in pos'n b, etc.
#mols[0][x,y,z] contains molecule a in position x,y,z
#mols may as such be iterated over in a number of ways to consider different molecules.
prot_neut_cry().print_posns()
#Calculate Properties:
print strftime("%a, %d %b %Y %X +0000", gmtime())
E0 = np.matrix([0.,0.,0.])
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc jm'
jm = JMatrix(cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc dips:'
d = get_dipoles(E0=E0,jm=jm._m,cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
Efield = get_electric_field(E0)
potential = get_potential()
print strftime("%a, %d %b %Y %X +0000", gmtime())
#print 'dips', d
print 'splitting dips onto atoms'
split_d = split_dipoles_onto_atoms(d)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'summing dips:'
tot = np.matrix([0.,0.,0.])
for dd in split_d:
tot += dd
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'total dip moment', tot
Uqq = np.multiply(get_U_qq(potential=potential),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqq', Uqq
Uqd = np.multiply(get_U_qdip(dips=d,Efield=Efield),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqd', Uqd
Udd = np.multiply(get_U_dipdip(jm=jm._m,dips=d.T),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Udd', Udd
energyev = Udd+Uqd+Uqq
print 'energyev', energyev
energy=energyev/27.211
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Making .dat cross sections for gnuplot'
# print TVs
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_TVs.dat' % name, 'w')
TVstr=str(str(TV[0,0]) + ' ' + str(TV[0,1]) + ' ' + str(TV[0,2]) + '\n' + str(TV[1,0]) + ' ' + str(TV[1,1]) + ' ' + str(TV[1,2]) + '\n' + str(TV[2,0]) + ' ' + str(TV[2,1]) + ' ' + str(TV[2,2])+ '\n')
f.write(TVstr)
f.flush()
f.close()
# print dipoles
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_dipoles.dat' % name, 'w')
for dd in split_d:
dstr=str(dd)
f.write(dstr)
f.write('\n')
f.flush()
f.close()
# print properties for charge in centrepos
time=strftime("%a, %d %b %Y %X +0000", gmtime())
f = open('%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\tenergyev\tUqq\tUqd\tUdd\tTotdip_x\tTotdip_y\tTotdip_z')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,energyev,Uqq,Uqd,Udd,tot[0,0],tot[0,1],tot[0,2]))
f.flush()
f.close()
# print header for reorgs
f = open('reorg_energies_%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\ta\tb\tc\tmolincell\tReorg(eV)')
f.flush()
f.close()
# REORGANISATION ENERGIES
#Note that this assumes a cube, and values for which
for dist in range(0,(length[0]/2)+1,1):
print '\n\nDIST: ', dist, '\n'
for a in range(prot_neut_cry()._cenpos[0]-dist,prot_neut_cry()._cenpos[0]+dist+1,1):
for b in range(prot_neut_cry()._cenpos[1]-dist,prot_neut_cry()._cenpos[1]+dist+1,1):
for c in range(prot_neut_cry()._cenpos[2]-dist,prot_neut_cry()._cenpos[2]+dist+1,1):
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'a,b,c',a,b,c
for molincell in range(0,len(prot_neut_cry()._mols),1):
prot_neut_cry().calc_reorg(a1=prot_neut_cry()._cenpos[0],b1=prot_neut_cry()._cenpos[1],c1=prot_neut_cry()._cenpos[2],molincell1=0,a2=a,b2=b,c2=c,molincell2=molincell,dips=d,oldUqd=Uqd)
print 'Reorg: ', prot_neut_cry()._reorgs[molincell][a][b][c]
f = open('reorg_energies_%s_properties.csv' % name, 'a')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,a,b,c,molincell,prot_neut_cry()._reorgs[molincell][a][b][c]))
f.flush()
f.close()
# Redo this and overwrite after each set to ensure we have some even if not all reorgs complete
prot_neut_cry().print_reorgs()
print 'Job Completed Successfully.'
| [
"sheridan.few@gmail.com"
] | sheridan.few@gmail.com |
4500182d81ed9e0c2cdaa86f3343436b856ccd07 | 325b1cef3e82013abbf9c8270c5ec7b44b9adc2f | /lab7/informatics/d/for/I.py | 17a518b58ea2b225e83fdac36cc7caef04cd1057 | [] | no_license | shagyrovmaksat/WDSpring2021 | adb4456d6ee456aab479c048f209f87031bd9842 | 7884ac7aac2603d5d1814bacc9e7293c48f2180f | refs/heads/main | 2023-04-02T06:32:36.712900 | 2021-04-13T07:51:22 | 2021-04-13T07:51:22 | 334,648,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | import math
n = int(input())
cnt = 0
for i in range(1, math.floor(math.sqrt(n)) + 1):
if n % i == 0:
if(n // i == i):
cnt += 1
else:
cnt += 2
print(cnt) | [
"noreply@github.com"
] | noreply@github.com |
59e1363d026e1cf5c641f40106aba606a342065e | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_337/ch169_2020_06_21_16_48_03_433219.py | d50363959fd13d06ed505512e563e82d36dc80ab | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | login = input('Login?')
lista = []
while login != 'fim':
if not login in lista:
lista.append(login)
else:
i = 1
k = True
while k:
login2 = login+str(i)
if not login2 in lista:
lista.append(login2)
k = False
i+=1
login = input('Login?')
for nome in lista:
print(nome) | [
"you@example.com"
] | you@example.com |
48ef9a81a9fa311571610eb4ba62f12c78f8c6f7 | 8fdffd0ef99fa16201c4e75d16e15ccf0c6698e3 | /assign3_01.py | 532e8623ab2afa47a29c0d6292bf11bb073286e5 | [] | no_license | dirtyfish/AI-2014 | 616d18f01a8efe39e0c0c6d022f0ffbdcf0e1a54 | 7ed6fd8519139b22c91eeee768b8703ac2eea3cc | refs/heads/master | 2021-01-02T08:22:00.514362 | 2014-04-20T17:35:06 | 2014-04-20T17:35:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,664 | py | #!/usr/bin/env python
"""This examples demonstrates a simplish water effect of an
image. It attempts to create a hardware display surface that
can use pageflipping for faster updates. Note that the colormap
from the loaded GIF image is copied to the colormap for the
display surface.
This is based on the demo named F2KWarp by Brad Graham of Freedom2000
done in BlitzBasic. I was just translating the BlitzBasic code to
pygame to compare the results. I didn't bother porting the text and
sound stuff, that's an easy enough challenge for the reader :]"""
import pygame, os, random
from pygame.locals import *
from math import sin
main_dir = os.path.split(os.path.abspath(__file__))[0]
black = (255,255,255) #almost white
letters= ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
imagenamelist=[]
bitmaplist=[]
def getrandomfilename():
return letters[random.randint(0,len(letters)-1)]+str(random.randint(1,10))+".jpg"
def main():
#initialize and setup screen
pygame.init()
mainClock = pygame.time.Clock()
screen = pygame.display.set_mode((640, 480), HWSURFACE|DOUBLEBUF)
#load image and quadruple
for x in range(5):
imagename= os.path.join(main_dir, 'RESIZED_30x30',getrandomfilename())
bitmap = pygame.image.load(imagename)
#bitmap = pygame.transform.scale2x(bitmap)
bitmap = pygame.transform.scale2x(bitmap)
imagenamelist.append(imagename)
bitmaplist.append(bitmap)
print imagenamelist
#get the image and screen in the same format
if screen.get_bitsize() == 8:
screen.set_palette(bitmap.get_palette())
else:
bitmap = bitmap.convert()
#prep some variables
anim = 0.0
#mainloop
stopevents = QUIT, KEYDOWN, MOUSEBUTTONDOWN
frame=0
while 1:
xblocks = range(00, 640, 24)
yblocks = range(00, 480, 24)
frame+=1
adjust=100-frame
if adjust<0:adjust=0
screen.fill(black)
for e in pygame.event.get():
if e.type in stopevents:
return
bitmapnr=-1
if frame<150:
for bitmap in bitmaplist:
bitmapnr+=1
anim = anim + 0.04
for x in xblocks:
xpos = (x + (sin(anim+bitmapnr+adjust + x * .03) * 15)) + 0
for y in yblocks:
ypos = (y + (sin(anim+bitmapnr + y * .03) * 15)) + 0
screen.blit(bitmap, (x+130*bitmapnr, y+adjust), (xpos, ypos, 23,23))
if frame>150:
for bitmap in bitmaplist:
bitmapnr+=1
anim = anim + 0.04
for x in xblocks:
xpos = x#(x + (sin(anim+bitmapnr+adjust + x * .03) * 15)) + 0
for y in yblocks:
ypos = y#(y + (sin(anim+bitmapnr + y * .03) * 15)) + 0
screen.blit(bitmap, (x+130*bitmapnr, y+adjust), (xpos, ypos, 25,25))
xblocks = range(00, 640, 48)
yblocks = range(00, 480, 48)
bitmap=bitmaplist[frame/100%5]
#bitmap = pygame.transform.scale2x(bitmap)
if 1:
anim = anim + 0.04
for x in xblocks:
xpos = x#(x + (sin(anim+bitmapnr+adjust + x * .03) * 15)) + 0
for y in yblocks:
ypos = y#(y + (sin(anim+bitmapnr + y * .03) * 15)) + 0
screen.blit(bitmap, (x+200, y+200), (xpos, ypos, 47,47))
pygame.display.flip()
mainClock.tick(30)
if __name__ == '__main__': main()
| [
"espenvh@gmail.com"
] | espenvh@gmail.com |
1825529811ad6388a02fe1b5071b9d69c47ea4b5 | 9259d9ede798102042f88177e94be765bb487929 | /tut/settings.py | bff22769574e4c9c236a30548dddd0f5dff3cc51 | [] | no_license | malcolms7/tut | 417335a8bb24606e9a8420fe1fd433f250c46444 | fecadae8f88786ddf237150473ba5890c1ad88ab | refs/heads/master | 2016-08-06T15:37:01.398099 | 2015-06-26T11:06:33 | 2015-06-26T11:06:33 | 38,108,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,764 | py | """
Django settings for tut project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_oofl8@ny!famm_+0r7fqroankf0brwqs*8!$f()9h&6us$clv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'tut.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tut.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'tut',
'USER': 'swordfish',
'PASSWORD': 'theguru',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| [
"malcolm@swordfish.co.za"
] | malcolm@swordfish.co.za |
4b4b0881f30e1b3c546fe8a0e886bdc18dba6a0d | 88ec9caf2c504f83bf192ca7fac6b712b6e1c2f7 | /New_Year_Candles.py | 2886ad4b9cfd32b66fe3f94eb42e610b6e0cb622 | [] | no_license | nitinverma99/Codeforces---1000 | 69ceb3fb0ee155e1e1574d884a49412bb0854d86 | f7f388cd2319e9425d63065717c0e612d46799dc | refs/heads/master | 2023-05-11T22:28:17.987429 | 2021-06-04T19:07:00 | 2021-06-04T19:07:00 | 373,936,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | a,b = list(map(int, input().split()))
# print(a + (a-1)//(b-1))
total = a
left = 0
while(a>=b):
total += a//b
a = a//b + a%b
print(total) | [
"nitinv0504@gmail.com"
] | nitinv0504@gmail.com |
1a462c2c8ad88fb921bfca9717f9c097a1acc83c | 3fe53dceb5e2b66e4b4f16b8b2826bb622c814f4 | /api/settings.py | 1ef374bf50a898d5d14703cbf2becf1945877365 | [] | no_license | terror12/arrecs_backend | e4d2a7df484d5d7d5cec82fe95d1aaf4363a0db1 | e465627a2e72afb9337fb98e2750dcef232372de | refs/heads/master | 2022-12-07T02:45:17.660044 | 2020-09-05T11:28:27 | 2020-09-05T11:28:27 | 287,525,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,650 | py | """
Django settings for api project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x4!%n-^*l@d5=4x66-agft00l(g@x&qor&8(h1_t52s7&t624)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework', # add this
'corsheaders', # add this
'core' # add this
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware', # add this
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_WHITELIST = (
'https://arrecs-frontend.herokuapp.com',
)
ROOT_URLCONF = 'api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'd6r2ijclhv9vpt',
'USER': 'sagmjrlksvfryc',
'PASSWORD': '9a56ac86db5b0cf924374ef19de0c5578cb231a3ee8cf3cd826ffa97b9c885bd',
'HOST': 'ec2-52-202-66-191.compute-1.amazonaws.com',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/' # add this
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') # add this
django_heroku.settings(locals()) | [
"ascerra@redhat.com"
] | ascerra@redhat.com |
c7eff140736aa3fb99e332946ccc1d762259cc03 | 4d9bdc1444ab73858a123b8273b72e1d74a9233d | /funNLearn/src/main/java/dsAlgo/hashmap/common_user_browsing_history.py | 178e2090b3ecca365c535fa19ff5e9caec2706a8 | [] | no_license | vishalpmittal/practice-fun | f7ca1389d758f93ddf2ddc3a58f2592b7caabab4 | 727dec2e23e765925a5e7e003fc99aeaf25111e9 | refs/heads/master | 2022-07-11T18:31:49.574410 | 2022-02-26T23:05:12 | 2022-02-26T23:05:12 | 51,132,794 | 0 | 1 | null | 2022-06-29T19:34:05 | 2016-02-05T07:34:32 | JavaScript | UTF-8 | Python | false | false | 3,593 | py | """
Tag: list, hashmap
We have some clickstream data that we gathered on our client's website.
Using cookies, we collected snippets of users' anonymized URL histories while they browsed the site.
The histories are in chronological order, and no URL was visited more than once per person.
Write a function that takes two users' browsing histories as input and returns the longest contiguous
sequence of URLs that appears in both.
Sample input:
user0 = ["/start", "/green", "/blue", "/pink", "/register", "/orange", "/one/two"]
user1 = ["/start", "/pink", "/register", "/orange", "/red", "a"]
user2 = ["a", "/one", "/two"]
user3 = ["/pink", "/orange", "/yellow", "/plum", "/blue", "/tan", "/red", "/amber", "/HotRodPink",
"/CornflowerBlue", "/LightGoldenRodYellow", "/BritishRacingGreen"]
user4 = ["/pink", "/orange", "/amber", "/BritishRacingGreen", "/plum", "/blue", "/tan", "/red",
"/lavender", "/HotRodPink", "/CornflowerBlue", "/LightGoldenRodYellow"]
user5 = ["a"]
user6 = ["/pink","/orange","/six","/plum","/seven","/tan","/red", "/amber"]
Sample output:
findContiguousHistory(user0, user1) => ["/pink", "/register", "/orange"]
findContiguousHistory(user0, user2) => [] (empty)
findContiguousHistory(user2, user1) => ["a"]
findContiguousHistory(user5, user2) => ["a"]
findContiguousHistory(user3, user4) => ["/plum", "/blue", "/tan", "/red"]
findContiguousHistory(user4, user3) => ["/plum", "/blue", "/tan", "/red"]
findContiguousHistory(user3, user6) => ["/tan", "/red", "/amber"]
"""
from typing import List
def get_current_match_path(pos1, pos2, l1, l2):
matching_list= []
while(pos1< len(l1) and pos2 < len(l2) and l1[pos1] == l2[pos2]):
matching_list.append(l1[pos1])
pos1+=1
pos2+=1
return matching_list
def findContiguousHistory(l1: List[str], l2: List[str]) -> List[str]:
p1 = {}
for c in range(len(l1)):
p1[l1[c]] = c
p2 = {}
for c in range(len(l2)):
p2[l2[c]] = c
longest_path = []
c = 0
while c < len(l2):
if l2[c] in p1:
pos1 = p1[l2[c]]
pos2 = c
current_path = get_current_match_path(pos1, pos2, l1, l2)
if len(current_path) > len(longest_path):
longest_path = current_path
c += 1
return longest_path
user0 = ["/start", "/green", "/blue", "/pink", "/register", "/orange", "/one/two"]
user1 = ["/start", "/pink", "/register", "/orange", "/red", "a"]
user2 = ["a", "/one", "/two"]
user3 = ["/pink", "/orange", "/yellow", "/plum", "/blue", "/tan", "/red", "/amber", "/HotRodPink",
"/CornflowerBlue", "/LightGoldenRodYellow", "/BritishRacingGreen"]
user4 = ["/pink", "/orange", "/amber", "/BritishRacingGreen", "/plum", "/blue", "/tan", "/red",
"/lavender", "/HotRodPink", "/CornflowerBlue", "/LightGoldenRodYellow"]
user5 = ["a"]
user6 = ["/pink","/orange","/six","/plum","/seven","/tan","/red", "/amber"]
assert(findContiguousHistory(user0, user1) == ["/pink", "/register", "/orange"])
assert(findContiguousHistory(user0, user2) == [])
assert(findContiguousHistory(user2, user1) == ["a"] )
assert(findContiguousHistory(user5, user2) == ["a"])
assert(findContiguousHistory(user3, user4) == ["/plum", "/blue", "/tan", "/red"])
assert(findContiguousHistory(user4, user3) == ["/plum", "/blue", "/tan", "/red"])
assert(findContiguousHistory(user3, user6) == ["/tan", "/red", "/amber"])
print("Tests PASSED!")
| [
"vmittal@barracuda.com"
] | vmittal@barracuda.com |
e613b4269825bc5de44e5ac692827adf17b711a4 | a5f6d1d089456196c8282bcdb31db44be3ebaeed | /testSet/testSubmitFeedback.py | 1fe219b5e496bb6766bab860dfff882b79f3d553 | [] | no_license | ChristianXu/BTCCQA_Mobi | 9fa9727d61715668bed6d4e64ddb87371acc98e1 | bc9fb3ce31478c801c8896b6b2c2580e9b2c1b89 | refs/heads/master | 2020-09-27T16:08:18.957108 | 2017-03-07T02:54:48 | 2017-03-07T02:54:48 | 66,329,331 | 0 | 0 | null | 2017-03-07T02:54:49 | 2016-08-23T03:19:30 | Python | UTF-8 | Python | false | false | 1,431 | py | __author__ = 'sara'
import unittest
import logging
from comm import get_element
from comm import ReadConfig
from comm import bsnsCommon
from time import sleep
from comm import common
from comm import Log
logger = logging.getLogger()
get_element = get_element
class TestSubmitFeedback(unittest.TestCase):
def setUp(self):
# test Start
logger.info("Test submit feedback!")
def test_submit_feedback(self):
sleep(5)
get_element("me", "me").click()
get_element("me", "Feedback").click()
sleep(5)
get_element("me", "Feedback_context").send_keys('What is this,it is really,i believe that everything will turn out fine. ')
sleep(3)
get_element("common", "submit").click()
sleep(3)
self.check_submit_success('Feedback submit successful')
sleep(2)
#点击左上角的返回按钮
get_element("common", "upper_left_back").click()
def check_submit_success(self,result_msg):
if get_element("me", "Feedback_submit_successful") is not None:
logger.info(result_msg + 'ok!')
sleep(2)
# Click close popup
get_element("me", "close_feedback_success_popup").click()
else:
logger.info(result_msg + 'NG!')
def tearDown(self):
# bsnsCommon.logout()
# test end
logger.info("Test submit feedback end") | [
"1638306719@qq.com"
] | 1638306719@qq.com |
c5834f02447506a5bf1c385dce52dce8b33f03af | 9d27a601c5418b20aaa93ee0e792d4c585843427 | /src/classify_emotion.py | 11e901e89bae8d60db8b0b3c150c40638f74fd38 | [
"MIT"
] | permissive | NathanHouwaart/EmotionRecognition | cbd6676a42872600052da0e9c60fb5b3a480eb2d | 4eb4478286a1dc925f8b4983923e31c421700338 | refs/heads/master | 2022-04-20T18:19:37.918100 | 2020-04-19T18:39:23 | 2020-04-19T18:39:23 | 257,060,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,026 | py | import os
import cv2
import time
import numpy as np
import onnxruntime
import sys
import matplotlib.pyplot as plt
class FERModel:
def __init__(self, model_path):
self.dimensions = (64, 64)
self.model_path = model_path
self.session = onnxruntime.InferenceSession(self.model_path, None)
self.input_data_name = self.session.get_inputs()[0].name
self.input_emotion_name = self.session.get_inputs()[1].name
self.output_name = self.session.get_outputs()[2].name
self.emotion_table = [[0, 1, 2, 3, 4, 5, 6, 7]]
for x in self.session.get_inputs():
print("Input: {}".format(x))
for x in self.session.get_outputs():
print("Output: {}".format(x))
def predict(self, file):
image = cv2.imread(file)
# Preprocess
gray_start = time.time()
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray_end = time.time()
resize_start = time.time()
resized = cv2.resize(gray_image, self.dimensions,
interpolation=cv2.INTER_AREA)
resize_end = time.time()
# Transform data
data = np.array(resized, dtype=np.float32)
input_data = np.array([data]).reshape(
[1] + [1] + list(self.dimensions))
model_start = time.time()
res = self.session.run([self.output_name], {
self.input_data_name: input_data, self.input_emotion_name: self.emotion_table})
model_end = time.time()
processed, probability = self.postprocess(res[0])
emotions = FERModel.emotion_map(processed, len(processed))
return {"dominant emotion": emotions[0],
"results": emotions,
"probabilities": probability,
"runtime": {
"grayscale": gray_end - gray_start,
"resize": resize_end - resize_start,
"model": model_end - model_start
}}
@staticmethod
def softmax(x):
"""Compute softmax values (probabilities from 0 to 1) for each possible label."""
x = x.reshape(-1)
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
@staticmethod
def postprocess(scores):
"""This function takes the scores generated by the network and
returns the class IDs in decreasing order of probability."""
prob = FERModel.softmax(scores)
prob = np.squeeze(prob)
classes = np.argsort(prob)
classes = classes[::-1]
guesses = []
for i in classes:
guesses.append(str(round(float(np.format_float_positional(prob[i] * 100)), 2)) + "%")
return classes, guesses
@staticmethod
def emotion_map(classes, N=1):
"""Take the most probable labels (output of postprocess) and returns the
top N emotional labels that fit the picture."""
emotion_table = {"neutral": 0, "happiness": 1, "surprise": 2, "sadness": 3,
"anger": 4, "disgust": 5, "fear": 6, "contempt": 7}
emotion_keys = list(emotion_table.keys())
emotions = []
for i in range(N):
emotions.append(emotion_keys[classes[i]])
return emotions
if __name__ == "__main__":
f = open("test_img/classify_emotion.log", 'w')
sys.stdout = f
def print_runtime(name, time):
print("Runtime {:10} avg: {: 10.6f}ms stdev: {: 10.6f}ms".format(
name, mean(time) / 1000_000, stdev(time) / 1000_000))
from statistics import stdev, mean
model = FERModel("model.onnx")
iterations = 10
for root, dirs, files in os.walk(os.path.abspath("./test_img/")):
for file in files:
absfile = os.path.join(root, file)
dominant_emotion = None
emotions = None
probability = None
probabilies = None
# Time is in nanoseconds
time_grayscale = list()
time_resize = list()
time_model = list()
for _ in range(iterations):
res = model.predict(absfile)
dominant_emotion = res['dominant emotion'] # Shouldn't change between runs
probability = res['probabilities'][0]
emotions = res['results']
probabilies = res['probabilities']
time_grayscale.append(res['runtime']['grayscale'])
time_resize.append(res['runtime']['resize'])
time_model.append(res['runtime']['model'])
print("\n")
print("File: {}".format(file))
print("Emotion: {}".format(dominant_emotion))
print("Probability: {}".format(probability))
print("All emotions: {}".format(emotions))
print("All probabilities: {}".format(probabilies))
print_runtime("grayscale", time_grayscale)
print_runtime("resize", time_grayscale)
print_runtime("model", time_model)
f.close() | [
"nathan.houwaart@student.hu.nl"
] | nathan.houwaart@student.hu.nl |
fc260cf8fb0b600ddeb4d654025ef404f5827a97 | 6711cd9a995cefbcde18a83017a07c588d0294f5 | /accounts/urls.py | 18fadabd1e6f86d2918522574933576b7beb5c43 | [] | no_license | RohithSangati/AuthenticationApp | f68b3b0391c0c4f4f2f511d92674f1c923ca766f | fa09d15a5bd220027d152968d32a7150e29d924c | refs/heads/master | 2023-06-27T08:37:36.631520 | 2021-07-28T10:01:09 | 2021-07-28T10:01:09 | 389,277,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | from django.urls import path
from . import views
urlpatterns = [
path('',views.home,name='home'),
path('register',views.register,name='register'),
path('login',views.login,name='login'),
path('logout',views.logout,name='logout')
] | [
"sangatirohith@gmail.com"
] | sangatirohith@gmail.com |
7e74abaeb0078b3ee92242a7cc866c13d76bc37f | 81982a278946fab96d74e3f711c937647faec036 | /Trabalhos/a1.py | 32584fb6bf8a53c7a44f632933f6fc2cdb41d8aa | [] | no_license | juanengml/Prog1UTFPR | 3f1b71888a0883a4e12922a0c09cce622ca27458 | aca289ffece71b4ca4339fa8779a1d2a9076aecc | refs/heads/master | 2021-06-23T09:58:37.167188 | 2019-06-14T01:21:51 | 2019-06-14T01:21:51 | 145,451,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | #Escreva um programa que leia duas matrizes 3x3 e apresente na tela o resultado da multiplicacao destas matrizes.
import numpy as np
a = np.matrix('1 2 3 ; 4 5 6; 7 8 9')
b = np.matrix('1 2 3 ; 4 5 6; 7 8 9')
print np.dot(a,b)
| [
"juanengml@gmail.com"
] | juanengml@gmail.com |
d8e69b8e34aeccacee227ed7afc368d4d8ea68a0 | 7492136ed1c7ea853988d3a8487970a59eacc7b5 | /code/geopaparazzi/projects/migrations/0003_auto_20200115_1258.py | 2cdca5a526734e46dff293eb15e3dd9a29ad8d25 | [] | no_license | romanDj/server-geopaparazzi | 85e5184406ee0563e3be93dd8d4b824c9fc31895 | ce73cb18ef1d03525281847b8121f89cae13a8b7 | refs/heads/master | 2022-03-29T15:19:36.877833 | 2020-01-15T19:16:14 | 2020-01-15T19:16:14 | 229,759,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,422 | py | # Generated by Django 2.2.9 on 2020-01-15 12:58
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('projects', '0002_subdivision_participants'),
]
operations = [
migrations.AlterModelOptions(
name='project',
options={'verbose_name': 'Проект', 'verbose_name_plural': 'Проекты'},
),
migrations.AlterModelOptions(
name='subdivision',
options={'verbose_name': 'Подразделение', 'verbose_name_plural': 'Подразделения'},
),
migrations.AddField(
model_name='subdivision',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 15, 12, 58, 21, 576543), verbose_name='Дата создания'),
),
migrations.AlterField(
model_name='project',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.Subdivision', verbose_name='подразделение'),
),
migrations.AlterField(
model_name='subdivision',
name='participants',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL, verbose_name='участники'),
),
]
| [
"true-rock99@ya.ru"
] | true-rock99@ya.ru |
285b5d35eb6f94c89715ad4fe68307437cf9ffc0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/302/92006/submittedfiles/testes.py | 8d4dc26344d08e3707ea45e11e79240ce3625d53 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,405 | py | lista1 = [1, 3, 4,]
lista1[len(lista1)-1]
print(len(lista1))
'''a = [8.0 , 5.0 , 10.0 , 5.0]
print(a)
print(len(a))
a.append(0.0)
print(len(a))
for i in range(len(a)-1, 0 , -1):
if i ==1:
a[1] = 2.0
else:
a[i] = a[i-1]
print(a)
print(len(a))
'''
'''
a = []
for i in range(1,5,1):
a.append(float(input('Digite o elemento: ')))
print(a)
print(sum(a))
print(len(a))
del a[1]
print(' a é igual: ', a)
print(len(a))
'''
'''
a = []
for i in range(1,11,1):
a.append(float(input('Digite o elemento: ')))
print(a)
for i in range(9, -1, -1):
print(a[i])
'''
'''
while(True):
n = int(input('DIgite o número de notas: '))
if n > 0:
break
notas = []
for i in range(0,n,1):
notas.append(float(input('Digite a nota%d: ' %(i+1))))
media = 0
for i in range(0,n,1):
media += notas[i]/n
print(notas)
print(media)
'''
'''
from minha_bib import primo
n = int(input('Digite n: '))
if primo(n):
print('Primo')
else:
print('Não é primo ')
'''
#exercício 15
'''
n = int(input('Digite o valor de n: '))
if n > 9999999 and n <=99999999:
soma = 0
while(n!=0):
resto = n%10
n = (n-resto)//10
soma = soma + resto
print(soma)
else:
print('Não Sei')
'''
#exercício 16
'''
while(True):
t1 = int(input('Digite o número de tomadas da T1: '))
t2 = int(input('Digite o número de tomadas da T2: '))
t3 = int(input('Digite o número de tomadas da T3: '))
t4 = int(input('Digite o número de tomadas da T4: '))
if t1 > 0 and t2 > 0 and t3 > 0 and t4 > 0:
n = t1 + (t2-1) + (t3-1) + (t4-1)
print(n)
break
else:
print("O NÚMERO DE TOMADAS TEM QUE SER MAIOR QUE 0, DIGITE NOVAMENTE\n")
'''
#Exercício 17
'''
a = int(input('Digite o primeiro número: '))
b = int(input('Digite o segundo número: '))
c = int(input('Digite o terceiro número: '))
d = int(input('Digite o quarto número: '))
if a > b and b < c and c > d:
print('S')
elif a < b and b > c and c > d:
print('S')
elif c > b and c > d and a < b:
print('S')
elif d > c and c > b and b > a:
print('S')
elif a > b and b == c and c == d:
print('S')
elif a > b and b < c and c == d:
print('S')
elif b > a and b > c and c == d:
print('S')
elif c > b and c > d and a == b:
print('S')
elif d > c and b == c and b == a:
print('S')
elif d > c and c < b and a == b:
print('S')
else:
print('N')
'''
#Exercício 20
'''
a = int(input('Digite o primeiro número: '))
b = int(input('Digite o segundo número: '))
for i in range(1000000,0,-1):
if a%i == 0 and b%i == 0:
print(i)
break
'''
#Exercício 21
'''
n = int(input('Digite n: '))
a = int(input('Digite a: '))
b = int(input('Digite b: '))
i = 2
while i <= n+1:
if i%a!=0 and i%b!=0:
n = n+1
if i%a == 0 or i%b == 0:
print(i)
i = i +1
'''
#Exercício 22
'''
while(True):
p = int(input(' Digite p: '))
q = int(input(' Digite q: '))
if q >= p :
break
if str(p) in str(q):
print('S')
else:
print('N')
'''
#Fatorial
'''
while(True):
while(True):
n = int(input('Digite um numero positivo: '))
if n >=0:
break
f = 1
for i in range(2,n+1,1):
f = f*i
print('%d!=%d' %(n,f))
opt = input('deseja continuar? [S ou N]')
if opt == 'N':
print('\n\nATE BREVE!')
break
'''
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
292d5ee0888ecac142f7b24a25d3c8b3cd5a498e | f18593e4501ba4c4d7c0f1453250905449aa5cdb | /test.py | ad4613a25e7a1d57061da5f9f24ad4933b917131 | [] | no_license | scottsfarley1993/python-neotoma | 9ebf770403cfaf4dad9c6f3af9099a6156f11eef | 8ef27dd9635056a63747080385a7b0532db49613 | refs/heads/master | 2021-01-21T16:53:57.912348 | 2016-02-23T00:57:55 | 2016-02-23T00:57:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | import urllib2
from multiprocessing.dummy import Pool as ThreadPool
def dbInit(host, user, pw, database):
| [
"scott@firesphere.org"
] | scott@firesphere.org |
977ff947eef1978e0f1e2b0d8ecbe588b9c14a93 | d2798cd94a17d8c6d86ca336c9348fb0a34294b5 | /src/3/ngitung.py | b0bc75425a30463c60fc8ee861f74b7f91fc7a16 | [
"MIT"
] | permissive | dimasmaulana24/pemrograman2 | 9fd410d2c754483d42bb1189aa74d1fd28932fec | 8eabbd7172e66091511ae9b41dcfd2bcaca9805d | refs/heads/master | 2020-08-18T23:08:56.273475 | 2019-10-17T17:17:00 | 2019-10-17T17:17:00 | 215,845,712 | 0 | 0 | MIT | 2019-10-17T17:11:46 | 2019-10-17T17:11:46 | null | UTF-8 | Python | false | false | 425 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 11 06:24:23 2019
@author: awangga
"""
class Ngitung:
def __init__(self, a, b):
self.a = a
self.b = b
def Penambahan(self):
r = self.a + self.b
return r
def Pengurangan(self):
r = self.a - self.b
return r
def Perkalian(self):
r = self.a * self.b
return r
def Pembagian(self):
r = self.a / self.b
return r | [
"rolly@awang.ga"
] | rolly@awang.ga |
e5af3a05af1c55f4de514b9b82f99141101c9200 | 8aa0d1d407bb1c66d01261f7e2c4e9832e856a2d | /experiments/experiments_gdsc/hyperparameter/plots/plot_nmtf_gibbs_hyperparameter.py | dd3218e5fb59f547aca48d1125e82075eea0af28 | [] | no_license | garedaba/BNMTF_ARD | 59e3ec1dbfd2a9ab9f4ec61368ec06e3783c3ee4 | 0a89e4b4971ff66c25010bd53ee2622aeaf69ae9 | refs/heads/master | 2022-01-16T06:57:12.581285 | 2018-06-10T10:22:12 | 2018-06-10T10:22:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,261 | py | """
Plot the performances of NMTF Gibbs for different hyperparameter values, for
three different sparsity levels.
"""
import matplotlib.pyplot as plt
import numpy
''' Plot settings. '''
MSE_min, MSE_max = 600, 1400
values_lambda = [0.0001, 0.001, 0.01, 0.1, 1., 10., 100.]
fractions_unknown = [0.2, 0.5, 0.8]
folder_plots = "./"
folder_results = "./../results/"
plot_file = folder_plots+"nmtf_gibbs_hyperparameter.png"
''' Load in the performances. '''
performances = eval(open(folder_results+'nmtf_gibbs.txt','r').read())
average_performances = {
fraction: [
numpy.mean(performances[fraction][lamb])
for lamb in values_lambda
]
for fraction in fractions_unknown
}
''' Plot the performances - one line per fraction. '''
fig = plt.figure(figsize=(2.5,1.9))
fig.subplots_adjust(left=0.17, right=0.98, bottom=0.17, top=0.98)
plt.xlabel('lambdaF, lambdaS, lambdaG', fontsize=8, labelpad=1)
plt.xscale("log")
plt.xticks(fontsize=6)
plt.ylabel('MSE', fontsize=8, labelpad=1)
plt.yticks(range(0,MSE_max+1,200),fontsize=6)
plt.ylim(MSE_min, MSE_max)
for fraction in fractions_unknown:
x = values_lambda
y = average_performances[fraction]
plt.plot(x, y, label='Fraction %s' % fraction)
plt.savefig(plot_file, dpi=600) | [
"tab43@cam.ac.uk"
] | tab43@cam.ac.uk |
6d89a4cdf4198eb91c73355a6b6c82e8c64fd3d0 | f0f8760fc044215b46d9f58357df2d85baba5bd4 | /qa/users/apps.py | 1db58ba3fa6d3c571d5fb327386b005409113f6d | [
"Apache-2.0"
] | permissive | xhh1105/django | e5f5691a343b96ca8551ce32c1e13650957bbdb4 | aad4b0c38ad5069aee1acf7fa173e9f1ec26d34f | refs/heads/master | 2022-11-23T18:19:17.302857 | 2019-07-18T11:52:07 | 2019-07-18T11:52:07 | 197,558,576 | 0 | 0 | Apache-2.0 | 2022-11-22T04:08:55 | 2019-07-18T09:43:09 | Python | UTF-8 | Python | false | false | 302 | py | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class UsersConfig(AppConfig):
name = "qa.users"
verbose_name = _("Users")
def ready(self):
try:
import qa.users.signals # noqa F401
except ImportError:
pass
| [
"xhh1105@gmail.com"
] | xhh1105@gmail.com |
f76699d346192bb4d093ede35ffb7ae1693a1b0d | a266a3c9adf8be327593a6e3e8e49b6f68f32caf | /ArticleSpider/spiders/lagou_hr.py | 3a63d40cf073be95754fae00f4a5601f0dc1fee1 | [] | no_license | duyanyong2017/ArticleSpider | 28309017ee823b2cb6afacda4f174ce7631f9c27 | 25859f08852de350b091aa02b8ed04cf30268bac | refs/heads/master | 2022-08-03T14:18:35.515197 | 2020-05-28T14:40:56 | 2020-05-28T14:40:56 | 265,598,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,047 | py | # -*- coding: utf-8 -*-
import os
import pickle
import time
from datetime import datetime
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
import selenium
from settings import BASE_DIR
from items import LagouJobItemLoader, LagouJobItem
from utils.common import get_md5
class LagouHrSpider(scrapy.Spider):
name = 'lagou_hr'
allowed_domains = ['www.lagou.com']
start_urls = ['https://www.lagou.com/jobs/7146434.html']
# -s USER_AGENT="Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0'}
def parse(self, response):
itemloader = LagouJobItemLoader(item=LagouJobItem(), response=response)
itemloader.add_css("title", ".job-name::attr(title)")
itemloader.add_value("url", response.url)
itemloader.add_value('url_object_id', get_md5(response.url))
itemloader.add_css("salary", ".job_request .salary::text")
itemloader.add_xpath("job_city", "//*[@class='job_request']/h3/span[2]/text()")
itemloader.add_xpath("work_years", "//*[@class='job_request']/h3/span[3]/text()")
itemloader.add_xpath("degree_need", "//*[@class='job_request']/h3/span[4]/text()")
itemloader.add_xpath("job_type", "//*[@class='job_request']/h3/span[5]/text()")
itemloader.add_css("tags", '.position-label li::text')
itemloader.add_css('publish_time', '.publish_time::text')
itemloader.add_css('job_advantage', '.job-advantage p::text')
itemloader.add_css('job_desc', '.job_bt div')
itemloader.add_css('job_addr', '.work_addr')
itemloader.add_css('company_name', '#job_company dt a img::attr(alt)')
itemloader.add_css('company_url', '#job_company dt a::attr(href)')
itemloader.add_value('crawl_time', datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
job_item = itemloader.load_item()
return job_item
| [
"18896738910@163.com"
] | 18896738910@163.com |
540a90e5ada5365bcdd02cc93f075cf3bbcc7940 | aba9b00edec394f1389a7ecf88a290112303414d | /semestr_8/analiza_obrazu/projekt/image_anal.py | 27ef23203928b6d0aa4647a3b8b98e00f6ab0559 | [] | no_license | torgiren/szkola | 2aca12807f0030f8e2ae2dfcb808bf7cae5e2e27 | 5ed18bed273ab25b8e52a488e28af239b8beb89c | refs/heads/master | 2020-12-25T18:18:36.317496 | 2014-04-27T23:43:21 | 2014-04-27T23:43:21 | 3,892,030 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 33,859 | py | #*-* coding: utf8 *-*
"""Moduł zawierający klasy używane do przetwarzania obrazu"""
import numpy as np
from scipy import misc
import itertools
from pprint import pprint
from glob import glob
import os
class NoImageError(Exception):
"""Wyjątek sygnalizujący próbę operowania na niewczytanym obrazie"""
pass
class NoSuchMethodError(Exception):
"""Wyjątek sygnalizujący podanie złej metody operacji"""
pass
class FilterSizeError(Exception):
"""Wyjątek sygnalizujący błędny format filtru"""
pass
def gen_filename(down, left, up, right):
return "%05dx%05dx%05dx%05d.png" % (down, left, up, right)
def find_left(tab, point, factor=1):
sizey = point[0] - point[2]
tmp = np.array(filter(lambda x: x[1] < point[1], tab))
tmp = np.array(filter(lambda x: (x[0] > (point[0] - sizey * factor) and x[2] < (point[0] - sizey * factor)) or
(x[0] > (point[2] + sizey * factor) and x[2] < (point[2] + sizey * factor)), tmp))
print type(tmp)
print tmp.shape
if not len(tmp):
return np.array(None)
indices = tmp[:,1].argsort()
indices = indices[::-1]
tmp = tmp[indices]
return tmp
def find_down(tab, point, factor=1):
sizex = point[3] - point[1]
tmp = np.array(filter(lambda x: x[2] > point[0], tab))
tmp = np.array(filter(lambda x: ((x[1] - sizex * factor) < point[1] and (x[3] + sizex * factor) > point[1]) or
((x[1] - sizex * factor) < point[3] and (x[3] + sizex * factor) > point[3]), tmp))
if not len(tmp):
return np.array(None)
indices = tmp[:,0].argsort()
tmp = tmp[indices]
return tmp
class ImageAnal:
"""Klasa przetwarzająca obrazy"""
def image_loaded(fn):
"""dekorator.
Sprawdza czy został załadowany obraz"""
def wrapped(self, *args, **kwargs):
if self.__image is None:
raise NoImageError()
return fn(self, *args, **kwargs)
return wrapped
def __init__(self, path=None):
"""Konstruktor obiektu ImageAnal"""
self.__image = None
if path:
self.load_image(path)
def load_image(self, path):
"""Wczytuje obraz z pliku <path>"""
self.__image = misc.imread(path)
def open(self, path):
"""Wczytuje obraz z pliku"""
self.load_image(path)
@image_loaded
def negative(self):
"""Tworzy negatyw obrazu"""
self.__image = 255 - self.__image
@image_loaded
def grayscale(self, method=1):
"""Konwertuje do odcieni szarości.
method:
1 (default) wykorzystuje metodę wartości średniej kolorów
2 wykorzystuje wzór 0.3*R+0.59*G+0.11*B
Obsługa tylko formatu RGB"""
if method == 1:
self.__grayscale1()
elif method == 2:
self.__grayscale2()
else:
raise NoSuchMethodError()
# @image_loaded
# def convert(self, fmt):
# self.__image = self.__image.convert(fmt)
# """Konwertuje obraz do zadanego formatu"""
@image_loaded
def normalize(self):
data = self.__image
R = data[:, 0]
G = data[:, 1]
B = data[:, 2]
R = (R - R.min()) * 255 / R.max()
G = (G - G.min()) * 255 / G.max()
B = (B - B.min()) * 255 / B.max()
data[:, 0] = R
data[:, 1] = G
data[:, 2] = B
self.__image = data
@image_loaded
def scale(self, factor):
if factor < 1:
self.__scale_down(factor)
else:
self.__scale_up(factor)
@image_loaded
def progowanie(self, method="global", otoczenie=5, odchylenie=15):
"""Przeprowadza progowanie obrazka.
metody:
global - progowanie globalne
local - progowanie lokalne
mixed - progowanie mieszane
parametry:
otoczenie = rozmiar otoczenia pixela
odchylenie - stopień ochylenia od średniej"""
self.__grayscale1()
if method == "global":
self.__progowanie_globalne()
elif method == "local":
self.__progowanie_lokalne(otoczenie=otoczenie)
elif method == "mixed":
self.__progowanie_mieszane(
otoczenie=otoczenie, odchylenie=odchylenie)
@image_loaded
def splot(self, filter):
filter = np.array(filter, dtype=np.int8)
if filter.shape != (3, 3):
raise(FilterSizeError)
data = self.__image
new = self.__expand(data, 1)
new = np.array(new, dtype=np.int32)
# new = np.array(new, dtype=np.uint8)
# print (filter[0,0] * new[:-2,:-2])[160,130]
# print (filter[0,1] * new[:-2,1:-1])[160,130]
# print (filter[0,2] * new[:-2,2:])[160,130]
# print (filter[1,0] * new[1:-1,:-2])[160,130]
# print (filter[1,1] * new[1:-1,1:-1])[160,130]
# print (filter[1,2] * new[1:-1,2:])[160,130]
# print (filter[2,0] * new[2:,:-2])[160,130]
# print (filter[2,1] * new[2:,1:-1])[160,130]
# print (filter[2,2] * new[2:,2:])[160,130]
new = (filter[0, 0] * new[:-2, :-2] + filter[0, 1] * new[:-2, 1:-1] +
filter[0, 2] * new[:-2, 2:] + filter[1, 0] * new[1:-1, :-2] +
filter[1, 1] * new[1:-1, 1:-1] + filter[1, 2] * new[1:-1, 2:] +
filter[2, 0] * new[2:, :-2] + filter[2, 1] * new[2:, 1:-1] +
filter[2, 2] * new[2:, 2:])
new = new / (filter.sum())
new -= 255
new = new * (new < 0)
new += 255
new = new * (new > 0)
data = np.array(new, dtype=np.uint8)
self.__image = data
# self.normalize()
@image_loaded
def brightness(self, val):
data = self.__image
new = np.array(data[:, :, :3], dtype=np.int32)
new += val
new = self.__shrink_values(new)
self.__image[:, :, :3] = new
@image_loaded
def roberts(self):
data = self.__image
new = self.__expand(np.array(data, np.int32), 1)
data[:, :] = self.__shrink_values(abs(new[1:-1, 1:-1] - new[2:, 2:]) +
abs(new[2:, 1:-1] - new[1:-1, 2:]))
self.__image = data
@image_loaded
def sobel(self):
data = self.__image
new = self.__expand(np.array(data, np.int32), 1)
new[1:-1, 1:-1] = (((new[2:, :-2] + 2 * new[2:, 1:-1] + new[2:, 2:]) -
(new[:-2, :-2] + 2 * new[:-2, 1:-1] + new[:-2, 2:])) ** 2 +
((new[:-2, 2:] + 2 * new[1:-1, :-2] + new[2:, 2:]) -
(new[:-2, :-2] + 2 * new[1:-1, :-2] + new[2:, :-2])) ** 2) ** (0.5)
new = self.__shrink_values(new)
data = new[1:-1, 1:-1]
self.__image = data
@image_loaded
def rotate(self, angle):
angle = np.deg2rad(angle)
data = self.__image
px = data.shape[0] / 2
py = data.shape[1] / 2
new = np.zeros(
(data.shape[0] * 3, data.shape[1] * 3, data.shape[2]), np.uint8)
for i, j in itertools.product(np.arange(0, data.shape[0]), np.arange(0, data.shape[1]), repeat=1):
new[np.cos(angle) * i - np.sin(angle) * j + px, np.sin(
angle) * i + np.cos(angle) * j + py] = data[i, j]
horiz = np.nonzero(new.sum(axis=0) != 0)[0]
vert = np.nonzero(new.sum(axis=1) != 0)[0]
new = new[vert[0]:vert[-1], horiz[0]:horiz[-1]]
self.__image = new
@image_loaded
def szum(self, prop, method):
if method == 'solpieprz':
self.__szum_solpieprz(prop)
elif method == 'rownomierny1':
self.__szum_rownomierny1(prop)
elif method == 'rownomierny2':
self.__szum_rownomierny1(prop)
@image_loaded
def odszumianie(self, method):
if method == 'srednia':
self.__odszumianie_srednie(self)
elif method == 'mediana':
self.__odszumianie_medianowe(self)
elif method == 'mediana2':
self.__odszymianie_medianowe2(self)
else:
raise NoSuchMethodError()
@image_loaded
def maska(self):
data = self.__image
data = data[:, :, 0]
data = (data < 125) * 1
tmp = np.zeros(data.shape)
tmp[1:-1, 1:-1] = ((data[1:-1, :-2] == 0) & (data[1:-1, 1:-1] == 1) & (data[1:-1, 2:] == 1) & (data[:-2, 2:] == 1) & (data[2:, 2:] == 1) |
(data[2:, 1:-1] == 0) & (data[1:-1, 1:-1] == 1) & (data[:-2, 2:] == 1) & (data[:-2, 1:-1] == 1) & (data[:-2, :-2] == 1) |
(data[2:, 1:-1] == 0) & (data[1:-1, 1:-1] == 1) & (data[:-2, 2:] == 1) & (data[:-2, 1:-1] == 1) & (data[:-2, :-2] == 1) |
(data[:-2, 1:-1] == 0) & (data[1:-1, 1:-1] == 1) & (data[2:, :-2] == 1) & (data[2:, 1:-1] == 1) & (data[2:, 2:] == 1))
self.__image = np.zeros((data.shape[0], data.shape[1], 3))
self.__image[:, :, 0] = tmp
self.__image[:, :, 1] = tmp
self.__image[:, :, 2] = tmp
@image_loaded
def KKM(self):
# np.set_printoptions(linewidth=504, threshold='nan')
czworki = [3, 6, 7, 12, 14, 15, 24, 28, 30, 48, 56, 60, 96,
112, 120, 129, 131, 135, 192, 193, 195, 224, 225, 240]
wyciecia = [3, 5, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31, 48, 52, 53, 54, 55, 56, 60, 61, 62, 63, 65, 67, 69, 71, 77, 79, 80, 81, 83, 84, 85, 86, 87, 88, 89, 91, 92, 93, 94, 95, 97, 99, 101, 103, 109, 111, 112, 113, 115, 116, 117, 118, 119, 120, 121, 123, 124, 125, 126, 127, 131, 133, 135, 141, 143, 149, 151, 157, 159, 181, 183, 189, 191, 192, 193, 195, 197, 199, 205, 207, 208, 209, 211, 212, 213, 214, 215, 216, 217, 219, 220, 221, 222, 223, 224, 225, 227, 229, 231, 237, 239, 240, 241, 243, 244, 245, 246, 247, 248, 249, 251, 252, 253, 254, 255]
data = self.__image
print data.shape
data = data[:, :, 0]
data = (data < 125) * 1
# data[2,2] = 1
# data = np.array([[0,0,0,0,0,0,0,0,0],
# [0,0,0,0,0,1,0,0,0],
# [0,1,0,0,0,1,1,0,0],
# [0,1,0,0,0,1,1,0,0],
# [1,1,1,0,0,1,1,0,0],
# [0,1,0,0,0,1,1,0,0],
# [0,0,0,0,0,0,0,1,0],
# [0,0,0,0,0,0,0,0,0]])
old = np.zeros(data.shape)
iter = 0
verb = False
while not np.array_equal(old, data):
print "iteracja: ", iter
iter += 1
old = data.copy()
if verb:
print "Poczatkowe"
print data
#krok I
pion = np.zeros(data.shape)
pion[1:-1, 1:-1] = (data[:-2, 1:-1] == 0) | (data[2:, 1:-1] == 0) |\
(data[1:-1, :-2] == 0) | (data[1:-1, 2:] == 0)
# pion = pion < 4
pion = (data == 1) * pion
# data = (data * (-pion)) + (pion * 2)
data = data + pion
if verb:
print "Po kroku I"
print data
#krok II
pion = np.zeros(data.shape)
pion[1:-1, 1:-1] = (data[:-2, :-2] == 0) | (data[:-2, 2:] == 0) |\
(data[2:, :-2] == 0) | (data[2:, 2:] == 0)
# pion = pion < 4
pion = (data == 1) * pion
# data = (data * (-pion)) + (pion * 3)
data = data + pion * 2
if verb:
print "Po kroku II"
print data
#krok III
tmp = np.zeros(data.shape)
tmp[1:-1, 1:-1] = 1 * (data[:-2, :-2] > 0) +\
2 * (data[1:-1, :-2] > 0) +\
4 * (data[2:, :-2] > 0) +\
128 * (data[:-2, 1:-1] > 0) +\
8 * (data[2:, 1:-1] > 0) +\
64 * (data[:-2, 2:] > 0) +\
32 * (data[1:-1, 2:] > 0) +\
16 * (data[2:, 2:] > 0)
tmp = (data == 2) * tmp
tmp2 = np.zeros(tmp.shape, dtype=np.bool)
for i in czworki:
tmp2 |= (tmp == i)
data += (tmp2 * 2)
if verb:
print "Po kroku III"
print data
#krok IV
tmp = np.zeros(data.shape)
tmp[1:-1, 1:-1] = 1 * (data[:-2, :-2] > 0) +\
2 * (data[1:-1, :-2] > 0) +\
4 * (data[2:, :-2] > 0) +\
128 * (data[:-2, 1:-1] > 0) +\
8 * (data[2:, 1:-1] > 0) +\
64 * (data[:-2, 2:] > 0) +\
32 * (data[1:-1, 2:] > 0) +\
16 * (data[2:, 2:] > 0)
tmp = (data == 4) * tmp
tmp2 = np.zeros(tmp.shape, dtype=np.bool)
for i in wyciecia:
tmp2 |= (tmp == i)
tmp = (tmp > 0) - tmp2
data = data * (data != 4) + tmp * 1 + tmp2 * 0
if verb:
print "Po kroku IV"
print data
#krok V
tmp = np.zeros(data.shape)
tmp[1:-1, 1:-1] = 1 * (data[:-2, :-2] > 0) +\
2 * (data[1:-1, :-2] > 0) +\
4 * (data[2:, :-2] > 0) +\
128 * (data[:-2, 1:-1] > 0) +\
8 * (data[2:, 1:-1] > 0) +\
64 * (data[:-2, 2:] > 0) +\
32 * (data[1:-1, 2:] > 0) +\
16 * (data[2:, 2:] > 0)
tmp = (data == 2) * tmp
tmp2 = np.zeros(tmp.shape, dtype=np.bool)
for i in wyciecia:
tmp2 |= (tmp == i)
tmp = (tmp > 0) - tmp2
data = data * (data != 2) + tmp * 1 + tmp2 * 0
if verb:
print "Po kroku V"
print data
#krok VI
tmp = np.zeros(data.shape)
tmp[1:-1, 1:-1] = 1 * (data[:-2, :-2] > 0) +\
2 * (data[1:-1, :-2] > 0) +\
4 * (data[2:, :-2] > 0) +\
128 * (data[:-2, 1:-1] > 0) +\
8 * (data[2:, 1:-1] > 0) +\
64 * (data[:-2, 2:] > 0) +\
32 * (data[1:-1, 2:] > 0) +\
16 * (data[2:, 2:] > 0)
tmp = (data == 3) * tmp
tmp2 = np.zeros(tmp.shape, dtype=np.bool)
for i in wyciecia:
tmp2 |= (tmp == i)
tmp = (tmp > 0) - tmp2
data = data * (data != 3) + tmp * 1 + tmp2 * 0
if verb:
print "Po kroku VI"
print data
data = data * 255
print data.shape
self.__image = np.zeros((data.shape[0], data.shape[1], 3))
print self.__image.shape
self.__image[:, :, 0] = data
self.__image[:, :, 1] = data
self.__image[:, :, 2] = data
print self.__image.shape
# self.__image = data
@image_loaded
def save(self, path):
"""Zapisuje obraz do pliku"""
self.__clear_alpha()
misc.imsave(path, self.__image)
def __grayscale1(self):
"""Konwersja do skali szarości"""
data = self.__image
# data[:,:] = 3 * (data[:,:].mean())
# x = [4 * (int(x.mean()),) for x in data]
new = np.array(data, dtype=np.uint32)
new[:, :, 0] += data[:, :, 1]
new[:, :, 0] += data[:, :, 2]
new[:, :, 0] /= 3
data[:, :, 1] = data[:, :, 2] = data[:, :, 0] = new[:, :, 0]
self.__image = data
def __scale_down(self, factor):
factor = (int)(factor ** (-1))
data = self.__image
data = np.array(data[::factor, ::factor, :])
self.__image = data
def __scale_up(self, factor):
data = self.__image
new = np.zeros(
(data.shape[0] * factor, data.shape[1] * factor, data.shape[2]))
for x in xrange(data.shape[0]):
for y in xrange(data.shape[1]):
new[x * factor:(x + 1) * factor, y *
factor:(y + 1) * factor, :] = data[x, y, :]
self.__image = new
def __progowanie_globalne(self, *args, **kwargs):
data = self.__image
mean = self.__prog_globalny()
# mean = data[:, :, 0].mean()
data = (data > mean) * 255.
self.__image = data
def __progowanie_lokalne(self, otoczenie=5, *argx, **kwargs):
data = self.__image
prog = self.__prog_lokalny(otoczenie)
data = (data > prog) * 255
self.__image = data
def __progowanie_mieszane(self, otoczenie, odchylenie):
data = self.__image
prog = self.__prog_mieszany(otoczenie, odchylenie)
data = (data > prog) * 255
self.__image = data
def __prog_globalny(self):
data = self.__image
return data[:, :, 0].mean()
def __prog_lokalny(self, otoczenie):
data = self.__image
new = self.__expand(data, otoczenie)
prog = np.zeros(data.shape)
# for x in xrange(otoczenie, new.shape[0] - otoczenie):
# for y in xrange(otoczenie, new.shape[1] - otoczenie):
# prog[x - otoczenie, y - otoczenie] = new[x - otoczenie: x + otoczenie, y - otoczenie:y + otoczenie, :3].mean()
for d in itertools.product(np.arange(0, 2 * otoczenie + 1), repeat=2):
prog[:, :] += new[d[0]:new.shape[0] - 2 * otoczenie + d[0],
d[1]:new.shape[1] - 2 * otoczenie + d[1]]
prog /= (2 * otoczenie + 1) ** 2
# print prog
return prog
def __prog_mieszany(self, otoczenie, odchylenie):
globa = self.__prog_globaalny()
prog = self.__prog_lokalny(otoczenie)
prog -= (globa + odchylenie)
prog = prog * (prog > 0)
prog -= 2 * odchylenie
prog = prog * (prog < 0)
prog += (globa + odchylenie)
return prog
def __expand(self, src, otoczenie):
data = src.copy()
left = data[:, 0, :]
right = data[:, -1, :]
for i in xrange(otoczenie - 1):
left = np.column_stack((left, data[:, 0, :]))
right = np.column_stack((right, data[:, -1, :]))
left = left.reshape((data.shape[0], -1, data.shape[2]))
right = right.reshape((data.shape[0], -1, data.shape[2]))
data = np.column_stack((left, data, right))
top = data[0, :, :]
bottom = data[-1, :, :]
for i in xrange(otoczenie - 1):
top = np.column_stack((top, data[0, :, :]))
bottom = np.column_stack((bottom, data[-1, :, :]))
top = top.reshape((-1, data.shape[1], data.shape[2]))
bottom = bottom.reshape((-1, data.shape[1], data.shape[2]))
data = np.vstack((top, data, bottom))
return data
def __clear_alpha(self):
# print "clear alpha"
if len(self.__image.shape) > 2:
if self.__image.shape[2] == 4:
self.__image[:, :, 3] = 255
pass
def __shrink_values(self, src):
data = src.copy()
data = data * (data > 0)
data -= 255
data = data * (data < 0)
data += 255
return data
def __szum_solpieprz(self, prop):
data = self.__image
prop *= 100
s = data.shape[0] * data.shape[1]
s2 = (data.shape[0], data.shape[1])
r = np.random.randint(100, size=s).reshape(s2)
# R = r < prop
r2 = np.random.randint(2, size=s).reshape(s2)
data = data * (1 - r).repeat(
4).reshape(data.shape) + r2.repeat(4).reshape(data.shape)
self.__image = data
def __szum_rownomierny1(self, prop):
data = self.__image
prop *= 100
s2 = (data.shape[0], data.shape[1])
r = np.random.randint(100, size=s2).reshape(s2)
r = r < prop
tmp = np.array(data, dtype=np.int64)
r2 = np.random.randint(20, size=s2).reshape(s2) - 10
r2 = r2 + (r2 > 0) * 20 - (r2 < 0) * 20
r2 = r2 * r
r2 = r2.repeat(4).reshape(data.shape)
tmp += r2
tmp = tmp * (tmp > 0)
tmp -= 255
tmp = tmp * (tmp < 0)
tmp += 255
self.__image = tmp
def __szum_rownomierny2(self, prop):
data = self.__image
prop *= 100
s = reduce(lambda x, y: x * y, data.shape)
r = np.random.randint(100, size=s).reshape(s)
r = r < prop
tmp = np.array(data, dtype=np.int64)
r2 = np.random.randint(20, size=s) - 10
r2 = r2 * r
r2 = r2 + (r2 > 0) * 20 - (r2 < 0) * 20
r2 = r2.reshape(data.shape)
tmp += r2
tmp = tmp * (tmp > 0)
tmp -= 255
tmp = tmp * (tmp < 0)
tmp += 255
self.__image = tmp
def segment1(self, directory):
def ranges(val):
lines = []
tmp = 0
combo = False
for (i, j) in enumerate(hist):
if j > 0 and not combo:
combo = True
tmp = i
elif not j and combo:
combo = False
lines.append([tmp, i])
if combo:
lines.append([tmp, i])
return lines
# print type(self.__image)
# print self.__image.shape
data = (self.__image[:, :, 0] < 127) * 1
misc.imsave('binary.png', data)
hist = data.sum(axis=1)
lines = ranges(hist)
# print lines
num = 0
for l in lines:
line = data[l[0]:l[1], :]
hist = line.sum(axis=0)
chars = ranges(hist)
for c in chars:
path = directory + '/%05d.png' % num
# print path
c1 = data[l[0]:l[1], c[0]:c[1]]
hist = c1.sum(axis=1)
lines2 = ranges(hist)
# print lines2
# if lines2:
litera = misc.imresize(data[l[0] + lines2[0][0]:l[0] + lines2[
-1][1], c[0]:c[1]], size=(100, 100))
litera = [litera, litera, litera]
# misc.imsave(path, data[l[0]+lines2[0][0]:l[0]+lines2[-1][1], c[0]:c[1]])
misc.imsave(path, litera)
# else:
# misc.imsave(path, data[l[0]:l[1], c[0]:c[1]])
num += 1
def segment2(self, directory):
print "Segment2"
def neighbour(data, p):
p = list(p)
if p[0] == 0:
p[0] = 1
if p[1] == 0:
p[1] = 1
return set([tuple(i + p - (1, 1)) for i in np.transpose(data[p[0] - 1:p[0] + 2, p[1] - 1:p[1] + 2].nonzero())])
# self.kkm2()
# print "po kkm"
# print self.__image.shape
all_chars = []
pprint(self.__image[:, :, 0])
data = (self.__image[:, :, 0] < 130) * 1
misc.imsave('binary.png', data)
buf = set()
checked = set()
num = 0
pprint(data)
licznik = 1
while data.sum():
checked = set()
buf.add(tuple(np.transpose(data.nonzero())[0]))
while buf:
# print "buf",buf
p = buf.pop()
# print "point",p
n = neighbour(data, p)
# print "neighbour", n
checked.add(p)
# print "checked", checked
buf = buf.union(n - checked)
# print "buf", buf
# print "**********"
print licznik
licznik += 1
checked = np.array(list(checked))
minx = checked[:, 0].min()
miny = checked[:, 1].min()
maxx = checked[:, 0].max() + 1
maxy = checked[:, 1].max() + 1
tmp = np.zeros((1 + maxx - minx, 1 + maxy - miny))
#path = directory + '/%05dx%05dx%05dx%05d.png'%(minx, maxy, maxx, miny)
#path = directory + '/%05dx%05dx%05dx%05d.png'%(maxx, miny, minx, maxy)
filename = gen_filename(maxx, miny, minx, maxy)
path = directory + '/' + filename
all_chars.append(
np.array(filename.split('.')[0].split('x'), dtype=int))
for i in checked:
data[i[0], i[1]] = 0
tmp[i[0] - minx, i[1] - miny] = 1
misc.imsave(path, tmp)
num += 1
# sklejanie kropek z literkami i i j
files = glob(directory + "/*.png")
print "szukam kandydatów na kropki"
i = files[4]
# a = ".".join(i.split('/')[-1].split('.')[:-1]).split('x')
poz = np.array([".".join(i.split(
'/')[-1].split('.')[:-1]).split('x') for i in files], dtype=int)
# poz = [(int(i[0]), int(i[1]), int(i[2]), int(i[3])) for i in poz]
print poz
poz = np.array([i.tolist() + [i[0] - i[2], i[3] - i[1]] for i in poz])
# print poz
poz.tofile("/tmp/poz.txt", sep="&")
kropki = [tuple(i) for i in poz if i[4] < (poz[:, 4].mean() - 0.5 * poz[:, 4].std()) and i[5] < (poz[:, 4].mean() - 0.5 * poz[:, 4].std())]
# print poz[:, 4].mean() - 2 * poz[:, 4].std()
print kropki
kropki = set(kropki)
kropki_iter = kropki.copy()
# print "all chars"
# pprint(all_chars)
for k in kropki_iter:
found = False
print "Sprawdzam kropke:", k
lista = find_left(poz, k)
if not lista.shape:
found = True
while not found:
if not len(lista):
found = True
break
tmp = lista[0]
lista = lista[1:]
#pprint(kropki)
# tmp = np.array(filter(lambda x: x[1] < k[1], poz))
# tmp = filter(lambda x: x[1] == tmp[:, 1].max(), tmp)[0]
print "literka na lewo: ", tmp
if (tmp[0] > (k[2] - k[4])) and (tmp[0] < k[0] + k[4]):
if tuple(tmp) in kropki_iter:
print "warunek mówi że na końcu, ale jest koło innej kropki więc to jest kropka!!!"
else:
print "kropka na końcu"
found = True
kropki.remove(k)
else:
mid = (float(tmp[0]) + tmp[2]) / 2.0
top = float(tmp[2])
print "mid i top oraz k[0]:", mid, top, k[0]
print "mid - k[0], top - k[0]", mid - k[0], top - k[0]
if abs(mid - k[0]) < abs(top - k[0]):
print "Kropka na końcu. drugi warunek"
kropki.remove(k)
found = True
else:
print "Kropka do doklejenia", k
mid = float(k[1] + k[3]) / 2.0
print filter(lambda x: x[1] <= mid and x[3] >= mid, all_chars)
found = True
print ""
print "Kropki nad literami: ", kropki
for i in kropki:
print "Sklejam kropke", i
doklejka = find_down(poz, i)
if not doklejka.shape:
continue
doklejka = doklejka[0]
print "doklejka: ", doklejka
print doklejka[0]
maxy = doklejka[0]
miny = i[2]
if doklejka[1] < i[1]:
minx = doklejka[1]
else:
minx = i[1]
if doklejka[3] > i[3]:
maxx = doklejka[3]
else:
maxx = i[3]
sizex = maxx - minx + 1
sizey = maxy - miny + 1
new = np.zeros((sizex , sizey )).T
dx = i[1] - minx
dy = i[2] - miny
filename = gen_filename(i[0], i[1], i[2], i[3])
path = directory + '/' + filename
img = misc.imread(path)
print filename
os.remove(directory + '/' + filename)
odx = dy
dox = dy + i[0] - i[2] + 1
ody = dx
doy = dx + i[3] - i[1] + 1
print "minx=%d, maxx=%d, miny=%d, maxy=%d"%(minx, maxx, miny, maxy)
print "sizex=%d, sizey=%d"%(sizex, sizey)
print "new.shape", new.shape
print "img.shape", img.shape
print ody,":", doy, ", ",odx,":", dox
print "..."
new[odx:dox, ody:doy] = img
dx = doklejka[1] - minx
dy = doklejka[2] - miny
filename = gen_filename(doklejka[0], doklejka[1], doklejka[2], doklejka[3])
path = directory + '/' + filename
img = misc.imread(path)
print filename
os.remove(directory + '/' + filename)
odx = dy
dox = dy + doklejka[0] - doklejka[2] + 1
ody = dx
doy = dx + doklejka[3] - doklejka[1] + 1
print "minx=%d, maxx=%d, miny=%d, maxy=%d"%(minx, maxx, miny, maxy)
print "sizex=%d, sizey=%d"%(sizex, sizey)
print "new.shape", new.shape
print "img.shape", img.shape
print ody,":", doy, ", ",odx,":", dox
print "..."
new[odx:dox, ody:doy] = img
filename = gen_filename(maxy, minx, miny, maxx)
misc.imsave(directory + '/' + filename, new)
def resize2(self, size):
self.__image = misc.imresize(self.__image__, size)
return self.__image__
def kkm2(self):
czworki = [3, 6, 7, 12, 14, 15, 24, 28, 30, 48, 56, 60, 96,
112, 120, 129, 131, 135, 192, 193, 195, 224, 225, 240]
wyciecia = [3, 5, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31, 48, 52, 53, 54, 55, 56, 60, 61, 62, 63, 65, 67, 69, 71, 77, 79, 80, 81, 83, 84, 85, 86, 87, 88, 89, 91, 92, 93, 94, 95, 97, 99, 101, 103, 109, 111, 112, 113, 115, 116, 117, 118, 119, 120, 121, 123, 124, 125, 126, 127, 131, 133, 135, 141, 143, 149, 151, 157, 159, 181, 183, 189, 191, 192, 193, 195, 197, 199, 205, 207, 208, 209, 211, 212, 213, 214, 215, 216, 217, 219, 220, 221, 222, 223, 224, 225, 227, 229, 231, 237, 239, 240, 241, 243, 244, 245, 246, 247, 248, 249, 251, 252, 253, 254, 255]
#sprawdzarka = [[128, 64, 32], [1, 0, 16], [2, 4, 8]]
def sprawdzarka(obj, p):
tmp = 1 * ((data[p[0] - 1:p[0] + 2, p[1] - 1:p[1] + 2]) > 0)
macierz = np.array([[128, 64, 32], [1, 0, 16], [2, 4, 8]])
#macierz = np.array([[128, 1, 2], [64,0,4], [32,16,8]])
suma = (tmp * macierz).sum()
# print "DEBUG"
# print p
# pprint(data[p[0]-1:p[0]+2,p[1]-1:p[1]+2])
# pprint(tmp)
# print suma
return suma
data = self.__expand(self.__image, 1)[:, :, 0]
data = 1 * (data < 127)
data[0, :] = 0
data[-1, :] = 0
data[:, 0] = 0
data[:, -1] = 0
old = np.zeros(data.shape)
DEBUG = True
while not (old == data).all():
print "iteracja"
old = data.copy()
#krok 1
sasiedzi = 1 * (data[1:-1, :-2] == 0) + 1 * (data[1:-1, 2:] == 0) +\
1 * (data[:-2, 1:-1] == 0) + 1 * (data[2:, 1:-1] == 0)
sasiedzi = (sasiedzi > 0)
sasiedzi = (data[1:-1, 1:-1] == 1) * sasiedzi
data[1:-1, 1:-1] = data[1:-1, 1:-1] + sasiedzi
if DEBUG:
print "Krok 1"
pprint(data)
#krok 2
sasiedzi = 1 * (data[:-2, :-2] == 0) + 1 * (data[2:, 2:] == 0) +\
1 * (data[:-2, 2:] == 0) + 1 * (data[2:, :-2] == 0)
sasiedzi = (sasiedzi > 0)
sasiedzi = (data[1:-1, 1:-1] == 1) * sasiedzi
data[1:-1, 1:-1] = data[1:-1, 1:-1] + sasiedzi * 2.0
if DEBUG:
print "Krok 2"
pprint(data)
#krok 3
# data2 = data.copy()
tmp = np.transpose((data == 2).nonzero())
for d in tmp:
if sprawdzarka(self, d) in czworki:
data[d[0], d[1]] = 4
if DEBUG:
print "Krok 3"
pprint(data)
#krok 4
#data2 = data.copy()
tmp = np.transpose((data == 4).nonzero())
for c in tmp:
if sprawdzarka(self, c) not in wyciecia:
data[c[0], c[1]] = 1
else:
data[c[0], c[1]] = 0
if DEBUG:
print "Krok 4"
pprint(data)
#krok 5
#data2 = data.copy()
tmp = np.transpose((data == 2).nonzero())
for c in tmp:
if sprawdzarka(self, c) not in wyciecia:
data[c[0], c[1]] = 1
else:
data[c[0], c[1]] = 0
if DEBUG:
print "Krok 5"
pprint(data)
#krok 6
#data2 = data.copy()
tmp = np.transpose((data == 3).nonzero())
for c in tmp:
if sprawdzarka(self, c) not in wyciecia:
data[c[0], c[1]] = 1
else:
data[c[0], c[1]] = 0
if DEBUG:
print "Krok 6"
pprint(data)
# print type(data)
# print "Po kkm2"
data = data[1:-1, 1:-1] * 255
wynik = []
for i in data:
tmp = []
for j in i:
tmp.append([j, j, j])
wynik.append(tmp)
self.__image = np.array(wynik)
self.negative()
print "A"
pprint(data)
pprint(self.__image)
print "B"
def shape(self):
return self.__image.shape
| [
"torgiren@gmail.com"
] | torgiren@gmail.com |
317532f1b4fd432f8a028467d86425206e67c3a8 | c8fed2de2e73811c57cca397abd2e3323df33bba | /FeatureExtractor/correlation_identifier.py | c2ea4cec85d5cad26183ec9b56e3355ac658a1f1 | [] | no_license | jsonbao/MolecularFeatureEngineering | 267b29e85e82f6d2916157b858f8ecd7658ce09b | 8498cc487eca66a976aaf18f8f63918c29640684 | refs/heads/master | 2021-01-15T09:03:07.750254 | 2016-04-13T21:08:56 | 2016-04-13T21:08:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,309 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import config
import molecule_feature_matrix
import csv
import os
# from sklearn import linear_model
__author__="Aakash Ravi"
# def identify_uniform_features( feature_matrix, num_features ):
# "This function takes in a matrix of size (n x m), where n is the \
# number of fragments, and m the number of features. It then computes \
# features which do not differ a lot in the data set. \
# This is done by taking the diagonal values of the covariance \
# matrix, which correspond to the variation of a certain feature \
# and taking the square root, obtaining the standard deviation. \
# We then divide the standard deviation by the mean of the feature, \
# this way we have a normalized score that we can compare accross \
# features. We can then use this score to identify the 'best' features- \
# features with the lowest variance - and return their indices. \
# This score is also known as the 'Coefficient \
# of Variance'"
# # Avoid degenerate cases when our dataset is sometimes empty
# if feature_matrix == []:
# print("ERROR: empty feature matrix, couldn't identify \
# uniform features")
# return []
# cv_matrix = np.cov(feature_matrix, None, rowvar=0)
# # Take diagonal variance values and compute the standard deviation
# d = np.diag(cv_matrix)
# # Compute the standard deviation
# std_deviation = np.sqrt(d)
# # Divide by the mean for the feature
# mean_features = np.mean(feature_matrix, axis=0)
# # We need to take the absolute value of the mean since the mean may be
# # negative. We only care about the ratio between the mean and standard
# # deviation, so dividing by the absolute value suffices.
# variance_score = np.divide(std_deviation,np.absolute(mean_features))
# # Take the features with the lowest scores -
# # these correspond to features with the lowest variation
# indices = np.argpartition(variance_score,num_features)[0:num_features]
# return indices
# def get_top_features( feature_matrix, num_features ):
# "This function performs performs logistic regression on our sample fragment \
# data and finds coefficients for the features. Using these coefficients the \
# function will return the most important features that correspond to the active \
# molecules by choosing the features that correspond to the highest coefficient values."
# log_reg = linear_model.LogisticRegression(solver = 'liblinear')
# TRAINING_DATA = np.array(feature_matrix)[0:len(feature_matrix)*.8,0:len(feature_matrix[0])-1]
# TEST_DATA = np.array(feature_matrix)[len(feature_matrix)*.8:len(feature_matrix), \
# 0:len(feature_matrix[0])-1]
# TRAINING_RESULTS = np.array(feature_matrix)[0:len(feature_matrix)*.8,len(feature_matrix[0])-1]
# TEST_RESULTS = np.array(feature_matrix)[len(feature_matrix) *.8:len(feature_matrix), \
# len(feature_matrix[0])-1]
# print(log_reg.fit(TRAINING_DATA, TRAINING_RESULTS))
# def identify_correlated_features( feature_matrix, \
# num_features, threshold = .80):
# "This function takes as input the feature_matrix, and returns a subset of features that are \
# highly representative of all the features. This subset will be in the form of a vector containing \
# the indices of the subset of features. \
# This is done by finding features with a lot of 'neighbors' in the correlation matrix. A feature \
# i has neighbor feature j, if corr(i,j) >= threshold (so neighbors are highly correlated). We will \
# then identify num_features features with the highest amount of neighbors. Credits to this method \
# goes to Ondrej Micka."
# # Avoid degenerate cases when our dataset is sometimes empty
# if feature_matrix == []:
# print("ERROR: empty feature matrix, couldn't identify \
# uniform features")
# return []
# DATA_DIRECTORY = config.DATA_DIRECTORY
# if molecule_feature_matrix.DEBUG:
# with open(os.path.join(DATA_DIRECTORY,'all_descriptors.csv')) as f_handle:
# reader = csv.reader(f_handle)
# # Gets the first line
# all_descriptor_names = next(reader)
# cv_matrix = np.cov(feature_matrix, None, rowvar=0)
# neighbor_matrix = _get_neighbor_matrix(cv_matrix, threshold)
# # Vector holding the degree (number of neighbors) for every feature
# degree_vector = []
# for row in neighbor_matrix:
# deg = len(filter(lambda x: x == 1, row))
# # We subtract -1 since a feature is always perfectly correlated to itself
# degree_vector.append(deg - 1)
# if degree_vector == []:
# max_degree_feature = 0
# else:
# max_degree_feature = max(degree_vector)
# index_of_max_feature = degree_vector.index(max_degree_feature)
# # Keep track of all features that have some sort of correlation
# features_with_neighbors = [True]*len(degree_vector)
# # This vector will keep track of features that have been removed from consideration,
# # because they were heavily correlated with other features. It's usage will become
# # clear later.
# unecessary_features = []
# if molecule_feature_matrix.DEBUG:
# print "Correlated feature removing details: "
# if molecule_feature_matrix.DEBUG:
# neighborhood_filename = os.path.join(DATA_DIRECTORY,"Covariance_Neighborhoods")
# # While there are correlated features, we choose the feature with highest degree,
# # the one with the most neighbors, as a representant of some 'neighbor class'. We
# # then delete all features that are correlated with this representant (if it wasn't)
# # already chosen)
# significant_features = []
# while(max_degree_feature > 0):
# significant_features.append(index_of_max_feature)
# if molecule_feature_matrix.DEBUG:
# with open(neighborhood_filename,'w+') as f_handle:
# f_handle.write("\n\nNeighborhood for " + all_descriptor_names[index_of_max_feature] + "\n")
# # We start to clean up the neighbor matrix by making sure all neighbors of our
# # chosen representative no longer count as neighbors for other feaures since
# # they will be removed.
# for j in range(0,len(cv_matrix)):
# # Perform for every neighbor of our chosen 'max' feature
# if (j != index_of_max_feature) and \
# features_with_neighbors[j] and \
# (cv_matrix[index_of_max_feature][j] >= threshold):
# # First reduce the degree of all j's neighbors, since we will be removing it
# for k in range(0,len(cv_matrix)):
# if features_with_neighbors[k] and (cv_matrix[k][j]>=threshold):
# degree_vector[k] -= 1
# # Add the feature to the list of unecessary features
# unecessary_features.append(j)
# if molecule_feature_matrix.DEBUG:
# with open(neighborhood_filename,'a') as f_handle:
# f_handle.write(all_descriptor_names[j]+",")
# # Next, we finally remove all neighbors of i, since we already chose i as one of our features
# # and we don't want correlated features
# for j in range(0,len(cv_matrix)):
# if (j != index_of_max_feature) and \
# features_with_neighbors[j] and \
# (cv_matrix[index_of_max_feature][j] >= threshold):
# degree_vector[j] = 0
# features_with_neighbors[j] = False
# # Then move on to the next feature with neighbors, until we have chosen all of them
# max_degree_feature = max(degree_vector)
# index_of_max_feature = degree_vector.index(max_degree_feature)
# # Only keep the representants of each 'neighbor class' found from the previous
# # method, as well as features that are not correlated heavily with any other features.
# all_features = np.arange(len(feature_matrix[0]))
# non_redundant_features = np.delete(all_features, unecessary_features, 0)
# significant_features.extend(non_redundant_features)
# # Return the requested amount of significant features
# if (len(significant_features) <= num_features):
# return significant_features
# else:
# return significant_features[0:num_features-1]
# def _get_neighbor_matrix(covariance_matrix, threshold):
# "Returns a matrix M, where M(i,j)=M(j,i)=1 if cov(feature i, feature j)>=threshold, \
# and M(i,j)=M(j,i)=0 otherwise."
# neighbor_matrix = np.zeros(shape=(len(covariance_matrix),len(covariance_matrix)))
# for i in range(0, len(covariance_matrix)):
# for j in range(0, len(covariance_matrix[i])):
# if covariance_matrix[i][j] >= threshold:
# neighbor_matrix[i][j] =1
# return neighbor_matrix
# Look at the correlation matrix as a matrix of neighbours and count degrees for every feature
def _count_degrees(matrix,corr_threshold):
degs = []
for row in matrix:
deg = len(filter(lambda x: x >= corr_threshold, row))
degs.append(deg -1) #-1 is for the loop in every vertex
return degs
def identify_correlated_features( feature_matrix, \
num_features, corr_threshold = .80):
# Avoid degenerate cases when our dataset is sometimes empty
if feature_matrix == []:
print("ERROR: empty feature matrix, couldn't identify \
uniform features")
return []
DATA_DIRECTORY = config.DATA_DIRECTORY
if molecule_feature_matrix.DEBUG:
with open(os.path.join(DATA_DIRECTORY,'all_descriptors.csv')) as f_handle:
reader = csv.reader(f_handle)
# Gets the first line
all_descriptor_names = next(reader)
corr_matrix = np.corrcoef(feature_matrix,None,rowvar=0)
degrees = _count_degrees(corr_matrix,corr_threshold)
chosen = [True]*len(degrees)
isCorrelated = lambda i,j: corr_matrix[i][j] >= corr_threshold
if degrees == []:
m = 0
else:
m = max(degrees)
i = degrees.index(m)
if molecule_feature_matrix.DEBUG:
neighborhood_filename = os.path.join(DATA_DIRECTORY,"Covariance_Neighborhoods")
open(neighborhood_filename,'w+')
# While there are still some correlated features, we choose feature with highest degree as a representitive and we
# remove all features that are correlated with it (and weren't chosen yet already)
while(m > 0):
if molecule_feature_matrix.DEBUG:
with open(neighborhood_filename,'a') as f_handle:
f_handle.write("\n\nNeighborhood for " + all_descriptor_names[i] + "\n")
for j in range(0,len(corr_matrix)):
# For every neighboro four chosen represantative
if (j != i) and chosen[j] and isCorrelated(i,j):
# Reduce the degree of all of j's neighbors, since we are about to remove it
for k in range(0,len(corr_matrix)):
if chosen[k] and isCorrelated(k,j):
degrees[k] -= 1
if molecule_feature_matrix.DEBUG:
with open(neighborhood_filename,'a') as f_handle:
f_handle.write(all_descriptor_names[j]+",")
# Delete all neighbors of our chosen representative
# The neighbors can no longer be chosen features in further iterations
for j in range(0,len(corr_matrix)):
if (j != i) and chosen[j] and isCorrelated(i,j):
degrees[j] = 0
chosen[j] = False
m = max(degrees)
i = degrees.index(m)
if molecule_feature_matrix.DEBUG:
with open(neighborhood_filename,'a') as f_handle:
f_handle.write('\n')
significant_features = np.where(np.array(chosen) == True)[0]
# Return the requested amount of significant features
if (len(significant_features) <= num_features):
return significant_features
else:
return significant_features[0:num_features-1]
| [
"aakash_ravi@hotmail.com"
] | aakash_ravi@hotmail.com |
f72cc027f3c302262eb787439af41f96e2076840 | 8567618001fad9f083d2fa76a2f219fff6bb8c9b | /bigfile.py | fb0a84404d520f5ca4fa7afdc49590e1a6b5248a | [] | no_license | li-xirong/w2vvpp | 4490537a78f9e948ca4783e20fa133accccd9faf | 51d96e654ffbb7ce6cb2ec03e91d1ab5301f92c7 | refs/heads/master | 2022-07-15T01:02:18.436532 | 2020-01-01T04:16:40 | 2020-01-01T04:16:40 | 200,593,330 | 28 | 16 | null | 2022-06-21T22:59:55 | 2019-08-05T06:14:03 | Python | UTF-8 | Python | false | false | 5,142 | py | import os, sys, array
import numpy as np
class BigFile:
def __init__(self, datadir, bin_file="feature.bin"):
self.nr_of_images, self.ndims = map(int, open(os.path.join(datadir,'shape.txt')).readline().split())
id_file = os.path.join(datadir, "id.txt")
self.names = open(id_file).read().strip().split()
assert(len(self.names) == self.nr_of_images)
self.name2index = dict(zip(self.names, range(self.nr_of_images)))
self.binary_file = os.path.join(datadir, bin_file)
print ("[%s] %dx%d instances loaded from %s" % (self.__class__.__name__, self.nr_of_images, self.ndims, datadir))
def readall(self, isname=True):
# requested = set(requested)
# if isname:
# index_name_array = [(self.name2index[x], x) for x in requested if x in self.name2index]
# else:
# assert(min(requested)>=0)
# assert(max(requested)<len(self.names))
# index_name_array = [(x, self.names[x]) for x in requested]
# if len(index_name_array) == 0:
# return [], []
index_name_array = [(self.name2index[x], x) for x in set(self.names) if x in self.name2index]
index_name_array.sort(key=lambda v:v[0])
sorted_index = [x[0] for x in index_name_array]
nr_of_images = len(index_name_array)
vecs = [None] * nr_of_images
offset = np.float32(1).nbytes * self.ndims
res = array.array('f')
fr = open(self.binary_file, 'rb')
fr.seek(index_name_array[0][0] * offset)
res.fromfile(fr, self.ndims)
previous = index_name_array[0][0]
for next in sorted_index[1:]:
move = (next-1-previous) * offset
#print next, move
fr.seek(move, 1)
res.fromfile(fr, self.ndims)
previous = next
fr.close()
return [x[1] for x in index_name_array], [ res[i*self.ndims:(i+1)*self.ndims].tolist() for i in range(nr_of_images) ]
def read(self, requested, isname=True):
requested = set(requested)
if isname:
index_name_array = [(self.name2index[x], x) for x in requested if x in self.name2index]
else:
assert(min(requested)>=0)
assert(max(requested)<len(self.names))
index_name_array = [(x, self.names[x]) for x in requested]
if len(index_name_array) == 0:
return [], []
index_name_array.sort(key=lambda v:v[0])
sorted_index = [x[0] for x in index_name_array]
nr_of_images = len(index_name_array)
vecs = [None] * nr_of_images
offset = np.float32(1).nbytes * self.ndims
res = array.array('f')
fr = open(self.binary_file, 'rb')
fr.seek(index_name_array[0][0] * offset)
res.fromfile(fr, self.ndims)
previous = index_name_array[0][0]
for next in sorted_index[1:]:
move = (next-1-previous) * offset
#print next, move
fr.seek(move, 1)
res.fromfile(fr, self.ndims)
previous = next
fr.close()
return [x[1] for x in index_name_array], [ res[i*self.ndims:(i+1)*self.ndims].tolist() for i in range(nr_of_images) ]
def read_one(self, name):
renamed, vectors = self.read([name])
return vectors[0]
def shape(self):
return [self.nr_of_images, self.ndims]
class StreamFile:
def __init__(self, datadir):
self.feat_dir = datadir
self.nr_of_images, self.ndims = map(int, open(os.path.join(datadir,'shape.txt')).readline().split())
id_file = os.path.join(datadir, "id.txt")
self.names = open(id_file).read().strip().split()
assert(len(self.names) == self.nr_of_images)
self.name2index = dict(zip(self.names, range(self.nr_of_images)))
self.binary_file = os.path.join(datadir, "feature.bin")
print ("[%s] %dx%d instances loaded from %s" % (self.__class__.__name__, self.nr_of_images, self.ndims, datadir))
self.fr = None
self.current = 0
def open(self):
self.fr = open(os.path.join(self.feat_dir,'feature.bin'), 'rb')
self.current = 0
def close(self):
if self.fr:
self.fr.close()
self.fr = None
def __iter__(self):
return self
def next(self):
if self.current >= self.nr_of_images:
self.close()
raise StopIteration
else:
res = array.array('f')
res.fromfile(self.fr, self.ndims)
_id = self.names[self.current]
self.current += 1
return _id, res.tolist()
if __name__ == '__main__':
feat_dir = 'toydata/FeatureData/f1'
bigfile = BigFile(feat_dir)
imset = str.split('b z a a b c')
renamed, vectors = bigfile.read(imset)
for name,vec in zip(renamed, vectors):
print name, vec
bigfile = StreamFile(feat_dir)
bigfile.open()
for name, vec in bigfile:
print name, vec
bigfile.close()
| [
"xirong_li@126.com"
] | xirong_li@126.com |
ccc220f2b8000f31ef1d3cf1ab310fba566c2af0 | b4691d2ef3249ad976ad476b68c5822058ba4cb3 | /LSTM_dynamic_padding.py | eebce7dfa28ad053b7a7e8279e8ecc18a65500b6 | [] | no_license | lucapierdicca/Train_Eval_ActivityRecoLSTM | 202bc401e8df5132195a62bb675fb9ad1f731d14 | faf722b9517f1b8fb950798f79ada305cce2bb75 | refs/heads/master | 2020-03-23T01:44:09.933072 | 2018-09-28T10:03:32 | 2018-09-28T10:03:32 | 140,934,241 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 25,365 | py | import tensorflow as tf
import numpy as np
import pickle
from pprint import pprint
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
import os
import time
import datetime
#-----------------------------------------------------------------
#--------------------------FAKE DATASET---------------------------
#-----------------------------------------------------------------
'''
# ========fake parameters=======
max_class_id = 5 # y_true = activity
max_n_frame = 7 # max_n_frames
max_freq = 3 # obj_freq_in_frame
n_feature = 5 # bag-of-objects
n_video = 22
# ============creation==========
dataset_detection_video = []
for i in range(n_video):
current_class_id = np.random.randint(0,max_class_id)
current_n_frame = np.random.randint(10,10+max_n_frame)
#current_n_frame = 400
current_objs_in_frame = []
for j in range(current_n_frame):
current_n_objs_in_frame = np.random.randint(1,5)
current_frame = np.random.choice(n_feature, current_n_objs_in_frame, replace=True)
current_frame += 1
current_objs_in_frame.append({'obj_class_ids': current_frame,
'obj_rois':0})
dataset_detection_video.append({'frames_info':current_objs_in_frame,
'class_id':current_class_id,
'reduced_fps':4,
'final_nframes':len(current_objs_in_frame)})
pickle.dump(dataset_detection_video, open('dataset_detection_video.pickle', 'wb'))
dataset_detection_video = pickle.load(open('dataset_detection_video.pickle', 'rb'))
'''
#-----------------------------------------------------------------
#--------------------------TRUE DATASET---------------------------
#-----------------------------------------------------------------
#=============loading data==============
pickle_path = './PersonalCare/pickle'
dataset_detection_video = [pickle.load(open(pickle_path+'/'+video_pickle,'rb')) for video_pickle in os.listdir(pickle_path) if 'face' not in pickle.load(open(pickle_path+'/'+video_pickle,'rb'))['class_id']]
classlbl_to_classid = {}
classid = 0
for i in dataset_detection_video:
classlbl = i['class_id'].lower().replace(' ','')
if classlbl not in classlbl_to_classid:
classlbl_to_classid[classlbl] = classid
classid += 1
i['class_id'] = classlbl_to_classid[classlbl]
classid_to_classlbl = {value:key for key,value in classlbl_to_classid.items()}
# filtering data -> videos must be at least 5 s
dataset_detection_video = [i for i in dataset_detection_video if (i['final_nframes']//i['reduced_fps']) >= 5]
# classes distribution
class_statistics = {}
for i in dataset_detection_video:
if classlbl not in class_statistics:
class_statistics[classlbl] = 1
else:
class_statistics[classlbl] += 1
for activity in class_statistics.keys():
activity = (class_statistics[activity], class_statistics[activity]*100/len(dataset_detection_video))
print('Video: %d' % len(dataset_detection_video))
print('Activities:')
print(classlbl_to_classid)
print('Activity distribution:')
print(class_statistics)
#============true parameters==========
max_class_id = 7 # y_true = activity
n_feature = 33 # bag-of-objects
'''
#==================BAG-OF-TF-IDFSUBSETOBJS===============
dataset_boo_video = []
mapping = {5:0,6:1,7:2,8:3,10:4,12:5,33:6}
for video in dataset_detection_video:
video_boo_matrix = np.zeros((video['final_nframes'],n_feature), dtype=np.uint8)
for index, frame in enumerate(video['frames_info']) :
boo = {}
for obj in frame['obj_class_ids']:
if obj in list(mapping.keys()):
if obj not in boo:
boo[obj] = 1
else:
boo[obj] += 1
for class_id_index, obj_freq in boo.items():
video_boo_matrix[index][mapping[class_id_index]] = obj_freq
video_boo_matrix = video_boo_matrix[~np.all(video_boo_matrix == 0, axis=1)]
dataset_boo_video.append({'class_id': video['class_id'],
'final_nframes': video['final_nframes'],
'reduced_fps':video['reduced_fps'],
'sequence': video_boo_matrix})
# filtro i video che hanno una sequence length minore del batch length
dataset_boo_video = [i for i in dataset_boo_video if i['sequence'].shape[0]>=9]
'''
#==================BAG-OF-OBJS===============
dataset_boo_video = []
for video in dataset_detection_video:
video_boo_matrix = np.zeros((video['final_nframes'],n_feature), dtype=np.uint8)
for index, frame in enumerate(video['frames_info']) :
boo = {}
for obj in frame['obj_class_ids']:
if obj not in boo:
boo[obj] = 1
else:
boo[obj] += 1
for class_id_index, obj_freq in boo.items():
video_boo_matrix[index][class_id_index-1] = obj_freq
dataset_boo_video.append({'class_id': video['class_id'],
'final_nframes': video['final_nframes'],
'reduced_fps':video['reduced_fps'],
'sequence': video_boo_matrix})
#==============BATCHED BAG-OF-OBJS============
dataset_batchedboo_video = []
for video in dataset_boo_video:
n_frame = video['final_nframes']
n_batch = 9
video_batchedboo_matrix = np.zeros((int(n_frame/n_batch),n_feature))
iteration = int(n_frame/n_batch)
for i in range(iteration):
frame_batch = video['sequence'][(n_batch*i):((n_batch*i)+n_batch),:]
video_batchedboo_matrix[i] = np.sum(frame_batch, axis=0)
dataset_batchedboo_video.append({'class_id': video['class_id'],
'final_nframes': video['final_nframes'],
'reduced_fps':video['reduced_fps'],
'sequence': video_batchedboo_matrix})
# l = []
# for video_b in dataset_batchedboo_video:
# n_b = video_b['sequence'].shape[0]*video_b['sequence'].shape[1]
# l.append([(n_b-np.count_nonzero(video_b['sequence']))*100/n_b])
'''
from sklearn.cluster import KMeans
sequences = dataset_batchedboo_video[0]['sequence']
for i in range(1,len(dataset_batchedboo_video)):
sequences = np.vstack((sequences,dataset_batchedboo_video[i]['sequence']))
print(sequences.shape)
print(np.unique(sequences,axis=0).shape)
kmeans = KMeans(n_clusters=200, random_state=0, n_jobs=-1).fit(sequences)
labels = list(kmeans.labels_)
codebook = list(kmeans.cluster_centers_)
for video in dataset_batchedboo_video:
curr_seq_len = video['sequence'].shape[0]
curr_labels = labels[:curr_seq_len]
for j in range(curr_seq_len):
video['sequence'][j,:] = codebook[curr_labels[j]]
labels = labels[curr_seq_len:]
'''
'''
#================AVG-SPEED and AVG-VELOCITY=========================
def inside(start, end, c_start, c_end):
frame_batch_range = set(range(start,end+1))
contiguous_range = set(range(c_start, c_end+1))
if len(frame_batch_range.intersection(contiguous_range)) > 0:
return 1
else:
return 0
def centroid_roi(roi):
return (roi[2]+roi[0])/2, (roi[3]+roi[1])/2
dataset_batchedvelocity_video, dataset_batchedspeed_video, prova = [], [], []
for video in dataset_detection_video:
# costruzione della struttura dati contenente i centroidi degli oggetti nei frame
centroids_list = []
for frame in video['frames_info']:
centroids_list.append([[] for _ in range(33)])
objs = frame['obj_class_ids']
rois = frame['obj_rois']
for i in range(objs.shape[0]):
curr_obj_roi = rois[i]
curr_obj_id = objs[i]-1
(x, y) = centroid_roi(curr_obj_roi)
centroids_list[-1][curr_obj_id].append((int(x),int(y)))
# encoding di centroids_list in una binary matrix
# da usare dopo per ottenere objid_to_contiguous_intervals
n = video['final_nframes']
all_objs = set({})
for i in range(n):
objs = video['frames_info'][i]['obj_class_ids']
all_objs = all_objs.union(set(objs))
all_objs = sorted(list(all_objs))
binary_sequence = np.zeros((len(centroids_list),33), dtype=np.uint8)
for i in all_objs:
for index,j in enumerate(centroids_list):
if len(j[i-1]) != 0: #basta che sia presente almeno una volta
binary_sequence[index,i-1] = 1
#img = Image.fromarray(binary_sequence.astype(np.uint8)*255)
#img.show()
# costruzione di objid_to_contiguous_intervals
binary_sequence = np.vstack([binary_sequence,np.repeat(2,33)])
objid_to_contiguous_intervals = {}
for i in all_objs:
contiguous_intervals = []
t_zero, t_uno = 2, 2
for index,curr_value in enumerate(binary_sequence[:,i-1]):
t_due = t_uno
t_uno = t_zero
t_zero = curr_value
if (t_due,t_uno,t_zero)==(0,1,1) or (t_due,t_uno,t_zero)==(2,1,1):
temp=[]
temp.append(index-1)
elif (t_due,t_uno,t_zero)==(1,1,0) or (t_due,t_uno,t_zero)==(1,1,2):
temp.append(index-1)
temp.append(temp[1]-temp[0]+1)
contiguous_intervals.append(list(temp))
objid_to_contiguous_intervals[i] = contiguous_intervals
# costruzione di objid_to_listavgspeedincontiguous
# calcolo della avg speed per ogni continguo sfruttando objid_to_contiguous_intervals
objid_to_listavgspeedincontiguous = {}
for i in objid_to_contiguous_intervals.keys():
if len(objid_to_contiguous_intervals[i])>0:
objid_to_listavgspeedincontiguous[i] = []
curr_obj_contiguous_list = objid_to_contiguous_intervals[i]
for j in curr_obj_contiguous_list:
coord_list = []
start_frame = j[0]
end_frame = j[1]
frame_length = j[2]
start_coord = (centroids_list[j[0]][i-1][0], 0) #se ce n'è più di uno seleziona il primo
coord_list.append(start_coord)
for k in range(start_frame+1,end_frame+1):
temp = []
for index,next_centroid in enumerate(centroids_list[k][i-1]): #se ce n'è più di uno seleziona quello più vicino
euc_dist = np.sqrt(np.power(next_centroid[0]-coord_list[-1][0][0], 2) + np.power(next_centroid[1]-coord_list[-1][0][1], 2))
#print(euc_dist)
temp.append((index, euc_dist))
temp.sort(key=lambda x: x[1])
coord_list.append((centroids_list[k][i-1][temp[0][0]], coord_list[-1][1]+temp[0][1]))
#print(coord_list)
objid_to_listavgspeedincontiguous[i].append((coord_list[0][0], coord_list[-1][0], coord_list[-1][1]/frame_length, frame_length))
# a questo punto abbiamo 2 strutture dati:
# 1. objid_to_contiguous_intervals (dict)
# .keys = objid (int)
# .value = start, end, length degli intervalli contigui (list of lists)
# 2. objid_to_listavgspeedincontiguous (dict)
# .keys = objid (int)
# .value = speed nel corrispettivo contiguo
# sfruttando queste due vengono costruite le speed features
n_frame = video['final_nframes']
n_batch = 9
video_batchedspeed_matrix = np.zeros((int(n_frame/n_batch),n_feature))
video_batchedvelocity_matrix = np.zeros((int(n_frame/n_batch),n_feature*2))
iteration = int(n_frame/n_batch)
for i in range(iteration):
temp = {}
start_frame_batch = n_batch*i
end_frame_batch = (n_batch*i)+n_batch
for objid, contiguous_list in objid_to_contiguous_intervals.items():
for c_index, contiguous in enumerate(contiguous_list):
if inside(start_frame_batch, end_frame_batch, contiguous[0], contiguous[1]):
temp[objid] = (np.subtract(objid_to_listavgspeedincontiguous[objid][c_index][1],objid_to_listavgspeedincontiguous[objid][c_index][0])/objid_to_listavgspeedincontiguous[objid][c_index][3], objid_to_listavgspeedincontiguous[objid][c_index][2]) # sostituisci sempre con l'ultimo
#prova.append([i, objid, start_frame_batch, end_frame_batch, contiguous[0], contiguous[1], objid_to_listavgspeedincontiguous[objid][c_index]])
for objid, values in temp.items():
video_batchedspeed_matrix[i][objid-1] = values[1]
video_batchedvelocity_matrix[i][objid-1] = values[0][0]
video_batchedvelocity_matrix[i][objid] = values[0][1]
dataset_batchedspeed_video.append({'class_id': video['class_id'],
'final_nframes': video['final_nframes'],
'reduced_fps':video['reduced_fps'],
'sequence': video_batchedspeed_matrix})
dataset_batchedvelocity_video.append({'class_id': video['class_id'],
'final_nframes': video['final_nframes'],
'reduced_fps':video['reduced_fps'],
'sequence': video_batchedvelocity_matrix})
# minimum_speed = 0.0
# maximum_speed = 100.0
# for video in dataset_batchedspeed_video:
# video['sequence'] = np.where(video['sequence']>maximum_speed,maximum_speed,video['sequence'])
s = b = np.zeros((1,33))
max_s = np.zeros((33,))
max_b = np.zeros((33,))
l = []
for video_s, video_b in zip(dataset_batchedspeed_video, dataset_batchedboo_video):
n_s = video_s['sequence'].shape[0]*video_s['sequence'].shape[1]
n_b = video_b['sequence'].shape[0]*video_b['sequence'].shape[1]
l.append([(n_s-np.count_nonzero(video_s['sequence']))*100/n_s, (n_b-np.count_nonzero(video_b['sequence']))*100/n_b])
s=s+np.count_nonzero(video_s['sequence'], axis=0)
b=b+np.count_nonzero(video_b['sequence'], axis=0)
for index,i in enumerate(np.max(video_s['sequence'], axis=0).astype(int)):
if i>=max_s[index]:
max_s[index] = i
for index,i in enumerate(np.max(video_b['sequence'], axis=0).astype(int)):
if i>=max_b[index]:
max_b[index] = i
'''
'''
#================BATCHED BOO & NORM-SPEED MULTIPL======================
# # speed normalizing and frequency weighting
# for video_s, video_b in zip(dataset_batchedspeed_video, dataset_batchedboo_video):
# #video_s['sequence'] = video_s['sequence']/maximum_speed
# video_s['sequence'] = np.concatenate((video_s['sequence'],video_b['sequence']), axis=1)
'''
'''
#==================CO-OCC FREQ OBJS================
dataset_cooc_video = []
for video in dataset_boo_video:
n_frame = video['final_nframes']
n_batch = 3*video['reduced_fps']
iteration = int(n_frame//(n_batch//2))
cooc_flat_seq_matrix = np.zeros((iteration, (n_feature-1)*(n_feature+1-1)//2), dtype=np.uint8)
for i in range(iteration):
if n_batch+((n_batch//2)*i) <= n_frame:
end = int(n_batch+((n_batch//2)*i))
else:
end = n_frame
frame_batch = video['sequence'][int(n_batch/pòè/2)*i:end,:]
frame_batch = np.where(frame_batch>0,1,0)
cooc_tri_upper = np.triu(frame_batch.T @ frame_batch, 1)
cooc_flat_index = 0
for j in range(n_feature-1):
for k in range((j+1),n_feature):
cooc_flat_seq_matrix[i, cooc_flat_index] = cooc_tri_upper[j,k]
cooc_flat_index+=1
dataset_cooc_video.append({'class_id': video['class_id'],
'final_nframes': video['final_nframes'],
'reduced_fps':video['reduced_fps'],
'sequence': cooc_flat_seq_matrix})#np.where(cooc_flat_seq_matrix>0,1,0)
from sklearn.metrics.pairwise import cosine_similarity
results,mean,percent = [],[],[]
for video in dataset_cooc_video:
results.append([cosine_similarity(video['sequence'][i+1].reshape(1,-1),video['sequence'][i].reshape(1,-1))[0][0] for i in range(video['sequence'].shape[0]-1)])
mean.append(sum(results[-1])/len(results[-1]))
nonzero = 0.0
for i in video['sequence']:
if np.count_nonzero(i) == 0:
nonzero += 1.0
percent.append(nonzero/video['sequence'].shape[0]*100)
'''
'''
dataset_cooc_video = []
for video in dataset_boo_video:
n_frame = video['final_nframes']
n_batch = 30
video_batchedboo_matrix = np.zeros((int(n_frame/n_batch),n_feature))
iteration = int(n_frame/n_batch)
cooc_flat_seq_matrix = np.zeros((iteration, (n_feature-1)*(n_feature+1-1)//2), dtype=np.uint8)
for i in range(iteration):
frame_batch = video['sequence'][(n_batch*i):((n_batch*i)+n_batch),:]
frame_batch = np.where(frame_batch>0,1,0)
cooc_tri_upper = np.triu(frame_batch.T @ frame_batch, 1)
cooc_flat_index = 0
for j in range(n_feature-1):
for k in range((j+1),n_feature):
cooc_flat_seq_matrix[i, cooc_flat_index] = cooc_tri_upper[j,k]
cooc_flat_index+=1
dataset_cooc_video.append({'class_id': video['class_id'],
'final_nframes': video['final_nframes'],
'reduced_fps':video['reduced_fps'],
'sequence': cooc_flat_seq_matrix})#np.where(cooc_flat_seq_matrix>0,1,0)
'''
#============final transformation (sequence and one_hot)===========
X,y,seq_len=[],[],[]
for index,i in enumerate(dataset_batchedboo_video):
X.append([frame_detection.tolist() for frame_detection in i['sequence']])
one_hot = [0]*max_class_id
one_hot[i['class_id']-1] = 1
y.append(one_hot)
seq_len.append(i['sequence'].shape[0])
#==========splitting==============
X_train, X_test, y_train, y_test, seq_len_train, seq_len_test = \
train_test_split(X,y,seq_len,test_size=0.2, random_state=0)#, stratify=y)
print('Train len %d' % len(X_train))
print('Test len %d' % len(X_test))
# =====dataset statistics=====
min_n_frame = min(seq_len)
max_n_frame = max(seq_len)
print('Full')
print(np.histogram([i['sequence'].shape[0] for i in dataset_batchedboo_video], bins=range(min_n_frame,max_n_frame+50,50)))
print('Train')
print(np.histogram(seq_len_train, bins=range(min_n_frame,max_n_frame+50,50)))
print('Test')
print(np.histogram(seq_len_test, bins=range(min_n_frame,max_n_frame+50,50)))
#-----------------------------------------------------------------------------
#------------------------------------NETWORK----------------------------------
#-----------------------------------------------------------------------------
# NN params
lstm_in_cell_units=20 # design choice (hyperparameter)
# training params
n_epoch = 100
train_batch_size=32
train_fakebatch_size = len(X_train)
test_fakebatch_size = len(X_test)
learning_rate=0.0005
#learning_rate=0.05
# ********************************************************
#!!!!IMPORTANTEEEEE!!!
# handling last batch remainder
n_iteration = len(X_train)//train_batch_size
print(n_iteration)
# *********************************************************
zipped_train_data = list(zip(X_train,y_train,seq_len_train))
zipped_test_data = list(zip(X_test,y_test,seq_len_test))
#=========================graph===========================
#tf.set_random_seed(1234)
lstmstate_batch_size = tf.placeholder(tf.int32, shape=[])
# dataset
train_data = tf.data.Dataset.from_generator(lambda: zipped_train_data, (tf.int32, tf.int32, tf.int32))
test_data = tf.data.Dataset.from_generator(lambda: zipped_test_data, (tf.int32, tf.int32, tf.int32))
# shuffle (whole) train_data
train_data = train_data.shuffle(buffer_size=len(X_train))
# obtain a padded_batch (recall that we are working with sequences!)
shape = ([None,len(X[0][0])],[max_class_id],[])
train_data_batch = train_data.padded_batch(train_batch_size, padded_shapes=shape)
# fake batches, they're the entire train and test dataset -> just needed to pad them!
# they will be used in the validation phase (not for training)
train_data_fakebatch = train_data.padded_batch(train_fakebatch_size, padded_shapes=shape)
test_data_fakebatch = test_data.padded_batch(test_fakebatch_size, padded_shapes=shape)
# iterator structure(s) - it is needed to make a reinitializable iterator (TF docs) -> dataset parametrization (without placeholders)
iterator = tf.data.Iterator.from_structure(train_data_batch.output_types, train_data_batch.output_shapes)
# this is the op that makes the magic -> dataset parametrization
train_iterator_init = iterator.make_initializer(train_data_batch)
faketrain_iterator_init = iterator.make_initializer(train_data_fakebatch)
faketest_iterator_init = iterator.make_initializer(test_data_fakebatch)
# so, to clarify, this is a "parameterized" op and its output depends on the particular iterator
# initialization op executed before it during the session
# therefore from now on all the ops in the graph are "parameterized" -> not specialized on train or test
# IN OTHER WORDS, THE DATASET NOW BECOMES A PARAMETER THAT WE CAN SET DURING THE SESSION PHASE
# THANKS TO THE EXECUTION OF THE OP train_iterator_init OR test_iterator_init BEFORE THE EXECUTION OF THE OP next_batch
next_batch = iterator.get_next()
# split the batch in X, y, seq_len
# they will be singularly used in different ops
current_X_batch = tf.cast(next_batch[0], dtype=tf.float32)
current_y_batch = next_batch[1]
current_seq_len_batch = tf.reshape(next_batch[2], (1,-1))[0]
# lstm
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(lstm_in_cell_units, state_is_tuple=True)
#state_c, state_h = lstm_cell.zero_state(lstmstate_batch_size, tf.float32)
#initial_state = tf.nn.rnn_cell.LSTMStateTuple(tf.Variable(state_c, trainable=False), tf.Variable(state_h, trainable=False))
initial_state = lstm_cell.zero_state(lstmstate_batch_size, tf.float32)
_, states = tf.nn.dynamic_rnn(lstm_cell, current_X_batch, initial_state=initial_state, sequence_length=current_seq_len_batch, dtype=tf.float32)
# last_step_output done right (each instance will have it's own seq_len therefore the right last ouptut for each instance must be taken)
#last_step_output = tf.gather_nd(outputs, tf.stack([tf.range(tf.shape(current_X_batch)[0]), current_seq_len_batch-1], axis=1))
# logits
#hidden_state = output per cui last_step_output è superfluo, grazie a current_seq_len_batch ritorna l'hidden_state del giusto timestep
#states è una tupla (cell_state, hidden_state) dell'ultimo timestep (in base a current_seq_len_batch)
logits = tf.layers.dense(states[1], units=max_class_id)
# loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=current_y_batch))
# optimization (only during training phase (OBVIOUSLY))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
# ops for accuracy and confusion matrix
y_pred = tf.argmax(logits, 1)
y_true = tf.argmax(current_y_batch, 1)
correct_pred = tf.equal(y_pred, y_true)
accuracy = tf.reduce_mean(tf.cast(correct_pred, dtype=tf.float32))
init = tf.global_variables_initializer()
# debugging & training visualization
all_variables = tf.global_variables()
for i in all_variables:
tf.summary.histogram(i.name.replace(':','_'), i)
summaries = tf.summary.merge_all()
losses = {
'train_loss':[],
'train_acc':[],
'test_loss':[],
'test_acc':[]
}
#==========================session==========================
with tf.Session() as sess:
sess.run(init)
writer = tf.summary.FileWriter("variable_histograms")
#***************** TRAINING ************************
for i in range(n_epoch):
writer.add_summary(sess.run(summaries), global_step=i)
start_epoch_time = time.time()
print('\nEpoch: %d/%d' % ((i+1), n_epoch))
sess.run(train_iterator_init)
for j in range(n_iteration):
start_batch_time = time.time()
_, batch_loss = sess.run((optimizer, loss), feed_dict={lstmstate_batch_size:train_batch_size})
batch_time = str(datetime.timedelta(seconds=round(time.time()-start_batch_time, 2)))
print('Batch: %d/%d - Loss: %f - Time: %s' % ((j+1), n_iteration, batch_loss, batch_time))
# print('Batch')
# results = sess.run((#current_X_batch,
# #current_y_batch,
# #current_seq_len_batch,
# states), feed_dict={lstmstate_batch_size:train_batch_size})
# print(results[0][1])
#****************** VALIDATION ******************
epoch_time = str(datetime.timedelta(seconds=round(time.time()-start_epoch_time, 2)))
print('Tot epoch time: %s' % (epoch_time))
# end of every epoch
sess.run(faketrain_iterator_init)
train_loss, train_acc = sess.run((loss, accuracy),feed_dict={lstmstate_batch_size:train_fakebatch_size})
print('\nTrain_loss: %f' % train_loss)
print('Train_acc: %f' % train_acc)
sess.run(faketest_iterator_init)
test_loss, test_acc = sess.run((loss, accuracy),feed_dict={lstmstate_batch_size:test_fakebatch_size})
print('Test_loss: %f' % test_loss)
print('Test_acc: %f' % test_acc)
losses['train_loss'].append(train_loss)
losses['train_acc'].append(train_acc)
losses['test_loss'].append(test_loss)
losses['test_acc'].append(test_acc)
sess.run(faketest_iterator_init)
test_y_true, test_y_pred = sess.run((y_true, y_pred),feed_dict={lstmstate_batch_size:test_fakebatch_size})
print()
print(classid_to_classlbl)
print()
print(confusion_matrix(test_y_true, test_y_pred))
print()
print(classification_report(test_y_true, test_y_pred))
print()
misclassified_nframe = [seq_len[i[0]]*n_batch for i in np.argwhere(np.equal(test_y_true,test_y_pred)==False)]
print(misclassified_nframe)
pickle.dump(losses, open('losses.pickle','wb'))
#TODO
# + shuffle the batch
# double check the class distro of the batch!!!! It must be similar to the whole dataset class distro!!!!
# + handle padding loss mask
# rifletti su dynamic vs static e sul fatto del padding
# tu l'hai fatto basandoti sulla sequenza che ha la lunghezza massima
# TRA TUTTE QUELLE PRESENTI NEL DATASET e non TRA TUTTE QUELLE ALL'INTERNO DI UN BATCH
# SEE GERON TEXTBOOK
# + tanh default inner LSTM state activation (known to be the best for LSTMs)
# check batch normalization ???
# GLOROT/HE weights initialization
# add accuracy op (look at the link)
# take a look at the graph
# add the Tensorboard loss function handler
# add the evaluation mode
#
# tune the hyperparameters?
| [
"luca.pierdicca@gmail.com"
] | luca.pierdicca@gmail.com |
47ee46aa5db53692f70c96faa5a2d6d1f3588a9d | 59611a23bae874752b349c913e23a007eb9ad718 | /controllers/default.py | 37e3f038e46567b87300b7ef53b4efcd743a6010 | [
"MIT"
] | permissive | vahidnouri/Parkinson_registry | b60504cd19901073c3727a0652f96a6ac9e4d378 | ecd993d456807f66e8e7b6ada0e07e2f8b14af23 | refs/heads/master | 2020-07-29T20:01:50.509872 | 2020-06-30T08:11:29 | 2020-06-30T08:11:29 | 209,942,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,425 | py | # -*- coding: utf-8 -*-
IP = '192.168.25.32\\Genetic Drive'
permission_denied = lambda: dict(msg='permission denied!')
@auth.requires_login()
def index():
if permit('reception'):
user_signature = False
else:
user_signature = True
msg = None
user = auth.user
deletable = auth.user.admin_
export = FORM(
INPUT(_type='submit', _value='CSV', _class='btn btn-sm mt-1 btn-outline-secondary float-right'),
_action=URL('default','output.csv')
)
if request.extension == 'csv':
return csv()
links = [
lambda r: A('پذیرش', _href=URL("default", "reception_section", args=[r.id_code])),
lambda r: A('بیمار', _href=URL("default", "patient_section", args=[r.id_code])),
lambda r: A('پزشک', _href=URL("default", "physician_section", args=[r.id_code])),
lambda r: A('آزمایشگاه', _href=URL("default", "lab_section", args=[r.id_code])),
lambda r: A('ژنها 1 تا 10', _href=URL("default", "genes_1_10", args=[r.id_code])),
lambda r: A('ژنها 11 تا 20', _href=URL("default", "genes_11_20", args=[r.id_code])),
lambda r: A('ژنها 21 تا 30', _href=URL("default", "genes_21_30", args=[r.id_code])),
lambda r: A('ژنها 31 تا 40', _href=URL("default", "genes_31_40", args=[r.id_code])),
lambda r: A('ژنها 41 تا 50', _href=URL("default", "genes_41_50", args=[r.id_code])),
lambda r: A('ژنها 51 تا 60', _href=URL("default", "genes_51_60", args=[r.id_code])),
lambda r: A('ژنها 61 تا 70', _href=URL("default", "genes_61_70", args=[r.id_code])),
lambda r: A('ژنها 71 تا 80', _href=URL("default", "genes_71_80", args=[r.id_code])),
lambda r: A('ژنها 81 تا 90', _href=URL("default", "genes_81_90", args=[r.id_code])),
lambda r: A('ژنها 91 تا 100', _href=URL("default", "genes_91_100", args=[r.id_code])),
]
db.principal_info.id.readable = False
grid = SQLFORM.grid(
db.principal_info,
advanced_search = False,
deletable=deletable,
csv=False,
user_signature = user_signature,
links = links,
)
return locals()
@auth.requires_login()
def reception_section():
if permit('reception'):
editable = True
else:
editable = False
msg = None
tbl = db.reception_section
#record = tbl(request.args(0))
record = db(tbl.id_code==request.args(0)).select().first()
form = SQLFORM(tbl,record)
form.vars.id_code = request.args(0)
if editable:
if form.process().accepted:
#response.flash("Success")
msg = 'success'
redirect(URL("default", "index"))
elif form.errors:
msg = form.errors
#response.flash("Error")
return locals()
@auth.requires_login()
def patient_section():
if permit('patient'):
editable = True
else:
editable = False
msg = None
tbl = db.patient_section
record = db(tbl.id_code==request.args(0)).select().first()
tbl.id.readable = False
form = SQLFORM(tbl,record)
form.vars.id_code = request.args(0)
if editable:
if form.process().accepted:
#response.flash("Success")
print(form.vars)
msg = 'success'
redirect(URL("default", "index"))
elif form.errors:
msg = form.errors
#response.flash("Error")
return locals()
@auth.requires_login()
def physician_section():
if permit('physician'):
editable = True
else:
editable = False
msg = None
tbl = db.physician_section
record = db(tbl.id_code==request.args(0)).select().first()
tbl.id.readable = False
if db.reception_section.gender == "مرد":
check_gender = True
form = SQLFORM(tbl,record)
form.vars.id_code = request.args(0)
if editable:
if form.process().accepted:
#response.flash("Success")
msg = 'success'
redirect(URL("default", "index"))
elif form.errors:
msg = form.errors
#response.flash("Error")
return locals()
@auth.requires_login()
def lab_section():
if permit('lab'):
editable = True
else:
editable = False
msg = None
tbl = db.lab_section
record = db(tbl.id_code==request.args(0)).select().first()
tbl.id.readable = False
form = SQLFORM(tbl,record)
form.vars.id_code = request.args(0)
if editable:
if form.process().accepted:
#response.flash("Success")
msg = 'success'
redirect(URL("default", "index"))
elif form.errors:
msg = form.errors
#response.flash("Error")
return locals()
@auth.requires_login()
def genes_1_10():
if permit('genes'):
editable = True
else:
editable = False
msg = None
tbl = db.genes_1_10
record = db(tbl.id_code==request.args(0)).select().first()
tbl.id.readable = False
form = SQLFORM(tbl,record)
form.vars.id_code = request.args(0)
if editable:
if form.process().accepted:
#response.flash("Success")
msg = 'success'
redirect(URL("default", "index"))
elif form.errors:
msg = form.errors
#response.flash("Error")
return locals()
@auth.requires_login()
def genes_11_20():
if permit('genes'):
editable = True
else:
editable = False
msg = None
tbl = db.genes_11_20
record = db(tbl.id_code==request.args(0)).select().first()
tbl.id.readable = False
form = SQLFORM(tbl,record)
form.vars.id_code = request.args(0)
if editable:
if form.process().accepted:
#response.flash("Success")
msg = 'success'
redirect(URL("default", "index"))
elif form.errors:
msg = form.errors
#response.flash("Error")
return locals()
@auth.requires_login()
def genes_21_30():
if permit('genes'):
editable = True
else:
editable = False
msg = None
tbl = db.genes_21_30
record = db(tbl.id_code==request.args(0)).select().first()
tbl.id.readable = False
form = SQLFORM(tbl,record)
form.vars.id_code = request.args(0)
if editable:
if form.process().accepted:
#response.flash("Success")
msg = 'success'
redirect(URL("default", "index"))
elif form.errors:
msg = form.errors
#response.flash("Error")
return locals()
@auth.requires_login()
def genes_31_40():
if permit('genes'):
editable = True
else:
editable = False
msg = None
tbl = db.genes_31_40
record = db(tbl.id_code==request.args(0)).select().first()
tbl.id.readable = False
form = SQLFORM(tbl,record)
form.vars.id_code = request.args(0)
if editable:
if form.process().accepted:
#response.flash("Success")
msg = 'success'
redirect(URL("default", "index"))
elif form.errors:
msg = form.errors
#response.flash("Error")
return locals()
@auth.requires_login()
def genes_41_50():
if permit('genes'):
editable = True
else:
editable = False
msg = None
tbl = db.genes_41_50
record = db(tbl.id_code==request.args(0)).select().first()
tbl.id.readable = False
form = SQLFORM(tbl,record)
form.vars.id_code = request.args(0)
if editable:
if form.process().accepted:
#response.flash("Success")
msg = 'success'
redirect(URL("default", "index"))
elif form.errors:
msg = form.errors
#response.flash("Error")
return locals()
@auth.requires_login()
def genes_51_60():
if permit('genes'):
editable = True
else:
editable = False
msg = None
tbl = db.genes_51_60
record = db(tbl.id_code==request.args(0)).select().first()
tbl.id.readable = False
form = SQLFORM(tbl,record)
form.vars.id_code = request.args(0)
if editable:
if form.process().accepted:
#response.flash("Success")
msg = 'success'
redirect(URL("default", "index"))
elif form.errors:
msg = form.errors
#response.flash("Error")
return locals()
@auth.requires_login()
def genes_61_70():
if permit('genes'):
editable = True
else:
editable = False
msg = None
tbl = db.genes_61_70
record = db(tbl.id_code==request.args(0)).select().first()
tbl.id.readable = False
form = SQLFORM(tbl,record)
form.vars.id_code = request.args(0)
if editable:
if form.process().accepted:
#response.flash("Success")
msg = 'success'
redirect(URL("default", "index"))
elif form.errors:
msg = form.errors
#response.flash("Error")
return locals()
@auth.requires_login()
def genes_71_80():
if permit('genes'):
editable = True
else:
editable = False
msg = None
tbl = db.genes_71_80
record = db(tbl.id_code==request.args(0)).select().first()
tbl.id.readable = False
form = SQLFORM(tbl,record)
form.vars.id_code = request.args(0)
if editable:
if form.process().accepted:
#response.flash("Success")
msg = 'success'
redirect(URL("default", "index"))
elif form.errors:
msg = form.errors
#response.flash("Error")
return locals()
@auth.requires_login()
def genes_81_90():
if permit('genes'):
editable = True
else:
editable = False
msg = None
tbl = db.genes_81_90
record = db(tbl.id_code==request.args(0)).select().first()
tbl.id.readable = False
form = SQLFORM(tbl,record)
form.vars.id_code = request.args(0)
if editable:
if form.process().accepted:
#response.flash("Success")
msg = 'success'
redirect(URL("default", "index"))
elif form.errors:
msg = form.errors
#response.flash("Error")
return locals()
@auth.requires_login()
def genes_91_100():
if permit('genes'):
editable = True
else:
editable = False
msg = None
tbl = db.genes_91_100
record = db(tbl.id_code==request.args(0)).select().first()
tbl.id.readable = False
form = SQLFORM(tbl,record)
form.vars.id_code = request.args(0)
if editable:
if form.process().accepted:
#response.flash("Success")
msg = 'success'
redirect(URL("default", "index"))
elif form.errors:
msg = form.errors
#response.flash("Error")
return locals()
@auth.requires_login()
def output():
from os import path
if not permit('admin_'):
return permission_denied()
msg = None
data = ''
tables = [
(db.principal_info,1),
(db.reception_section,2),
(db.patient_section,2),
(db.physician_section,2),
(db.lab_section,2),
(db.genes_1_10,2),
(db.genes_11_20,2),
(db.genes_21_30,2),
(db.genes_31_40,2),
(db.genes_41_50,2),
(db.genes_51_60,2),
(db.genes_61_70,2),
(db.genes_71_80,2),
(db.genes_81_90,2),
(db.genes_91_100,2),
]
field_name = [t[0].fields[t[1]:] for t in tables]
labels = [[f.label for f in t[0]][t[1]:] for t in tables]
header = ','.join([','.join(l) for l in labels])
data += header
for p in db(tables[0][0]).select():
rec = []
id_code = p.get('id_code')
for t in range(len(tables)):
r = db(tables[t][0].id_code == id_code).select().first()
for f in field_name[t]:
if r:
v = r.get(f, '')
v = '' if v == None else str(v)
v = v.replace(',', '_')
v = v.replace('،', '_')
v = v.replace('-', '_')
rec.append(v)
else:
rec.append('')
data += ('\n' + ','.join(rec))
return data
def user():
return dict(form=auth())
def permit(role):
if not db.auth_user(auth.user.get("id")).get(role):
if db.auth_user(auth.user.get("id")).get('admin_'):
return True
return False
return True
@auth.requires_login()
def userman():
if not permit('admin_'):
return permission_denied()
msg = None
grid = SQLFORM.grid(db.auth_user,)
return locals() | [
"noreply@github.com"
] | noreply@github.com |
4d56eaa289df490614282d92d296eda8adb8a58a | c43c88015f9498aed5f3b5a339d245c31781444e | /Free/plm/report/component_report.py | cbebbbcac4378084e142a272266da61b6cff36e8 | [] | no_license | mulaudzicalvin/perpul | 65106d41d5197fea17628ac1a7fa7e581d29d75e | 00e3a5ee1771d2e09a48460ca23c2e9c2ef507d6 | refs/heads/master | 2020-03-09T18:39:33.131420 | 2018-02-05T05:17:36 | 2018-02-05T05:17:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,346 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OmniaSolutions, Your own solutions
# Copyright (C) 2010 OmniaSolutions (<http://omniasolutions.eu>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from .book_collector import BookCollector
from .book_collector import packDocuments
from datetime import datetime
from dateutil import tz
import base64
from flectra import _
from flectra import api
from flectra import models
from flectra.exceptions import UserError
from flectra.addons.plm.report.book_collector import getBottomMessage
def getEmptyDocument():
return base64.b64decode(b"""JVBERi0xLjQKJcOkw7zDtsOfCjIgMCBvYmoKPDwvTGVuZ3RoIDMgMCBSL0ZpbHRlci9GbGF0ZURl
Y29kZT4+CnN0cmVhbQp4nG2NTQvCMBBE7/kVexYSZ2M2aSEErG0P3goBD+LNj5tgL/59t/QgiCzM
Djx4A8f0Ni8CQZu04jw1gV1D882cNvRcmd78MF01EhWlGFyieqXtyOQ91fs5gwtneOyK1b9mgCAi
lks9mqGa6a+Lgw7/uJKKBM1ibIv1GfulShHJ6EpKGQf0GDCiLzZkhmLm785EH25LLk8KZW5kc3Ry
ZWFtCmVuZG9iagoKMyAwIG9iagoxNTAKZW5kb2JqCgo1IDAgb2JqCjw8L0xlbmd0aCA2IDAgUi9G
aWx0ZXIvRmxhdGVEZWNvZGUvTGVuZ3RoMSAxMDU5Mj4+CnN0cmVhbQp4nOV5f1hb15XgvffptwR6
EkIICdCTH49fkhBG/LBjMM8gQAQMGIwjsAHJSPxwDMKS7MROGpM28Q9S1+40zSSTr60nm2nTjSd+
OJ4Jme7GpHG6O9O69X7pzkybeuLOZOfrbEOdzib90sSGPfdJYDuTdubb2e/bP/YhvXfOueece+65
55x7HkolDsaQAc0hBolj05FZsVzgEEI/QAibxw6luLPi/TzA1xEiqvHZieky/9u/Qoj5BUJq5cT+
w+M/P1z+EkK69xEyPz8Zi0SbXvrreoQKnwQddZNACK0cVgMO+lDx5HTqwT7Vww2AAz8K7o+PRa5X
/NdyhIo2AV4yHXlwdrfyUQbwMODcTGQ69j9KfgXzF80hpJmdjSdTb6DiVWCl8txsIja7f+FUFKFS
FuzbDjQMf/QyAKiiOGEUSpVao9XpDVnZRtaE/j+7lKdQLoorG5Exc7/rYs6hfHQBodX3KHb7vtK1
+vH/TSs08h2bsQu9iX6L/Zigh3EOGkJRFEcPo3nsv5Mbb8FdMPY59DMYn0GnsPqztWIXLsFZoGFI
5vscuoL+/jMZD6DX0Pt3zwG0p9Dz6Byl4zbQ9SR+A3fhKOigmrvgtuezVJF9cDsN3wfhPk1whnoD
MuZv0R7yGnkXnUEvZezLRu/hADw7wcJXMgo6Uf+/ULoIVujQBDqMjoG0fCkbb/4UaVf/F+i6F70O
hA70EDq1LvERludgdGh1nXbfuo1R8gTOwSXo6+gjFFCa8EWExNbB0MDO/r4dvT3d27s67+0Itre1
Blqat4lNWxsbttyzeVN9Xe3GKl+l11NWWiIU8xtcTpvFxBqzs/Q6rUatUioYWK2nlW8Lc1JJWFKU
8MGgl+J8BAiROwhhiQNS2908EheW2bi7OUXgHP8Up5jmFNc5Mcs1oAavh2vlOelKgOcW8dCOEMCn
AvwgJy3L8HYZVpTISBYgLhdIcK22yQAn4TDXKrUdmpxvDQdA34Je18K3xHReD1rQ6QHUAySV8bML
uGwrlgFS1nrPAkGaLDqtxAitkajUuyPUGnC4XINeT4eUzQfkIdQiq5RULZJaVslNUdPRE9yCZ2n+
i4ss2ht2G6J8NLInJDERkJ1nWufnj0smt1TOB6TyI+/aYOUxycMHWiU31drZtz5P5+0psaQUWJ6b
/xDBcvjl9+6mRDIUlcB+iCjYBu6dn2/jubb58HxkcXVuL8+x/PyCwTA/2woeRr0hkFpc/YsnHFLb
FwclNjyJ78kstq2vU8rZsTskEaGNm4wABT5NvGuTw2UaXOPp/V3DCBwB7gCfulx04U8simgvINLc
jlAa59BexwUk+tyDEgnTkaW1kdwBOjK3NrIuHuZhNzv7Q/OSQuiI8q3g4yci0txeiKd9dCt4Vsr+
jcPFz5tN3GbfoMzLgVUd0SlOUpaAW0DqTgGIFCoyz8pI9m/Sj2UHTFBiMnObeVBD9bTyreHM59Ck
DRRwXo8UdKe3fmdIEgMAiJHMHrUuVPlAIhKGLZoKyNsn+fhZycI3r+8nNat1qj8ki2TEJEuLhMJj
GSnJ1xqgM3Ot8+FA2gSqi98RehX5V68v1HCOl/2oBg0GKLO1BeKqpHU+FB2XnGFHFDJtnAs5XJI4
CBs8yIdigzTQwEPl12E6lzyjRFp2hjr7+c4dQ6FNGUPSA1SdQmj9lBo+5EirgZCTNIKGCxEHMwiM
LBC4NgD45ga4S2pBA18WHC5Taag2N3Ah7EBr3GCGVM61xgIZPorfpVRJw6kluKZNRVHQ0xJ0uAZd
6cvrITDMZSYGCQ11anBtiBGgEgCNgBqZRH1pozHPhfgYP8hPcpLYG6Jro+6RvZxxhuzzzF7tvAu7
w1ngJuSC4TWEOlNqczvudK7ULuPraPBTwx1rw9y8hu/sn6fK+YxCBJZ3SIiGsLjJ5JCzn+Yz3xaB
JIaMlvN5fkEUaS5P0rSd5zui83x/qEHmhgryOccROpcZdeLOnc1eDxSz5gUen9ixIOIT/UOhV1lo
oU7sDF0gmLSEmwcXimEs9CoHZ4VMJZRKiRThKEI19QGikfkdr4oIzcmjCpkg42OLGMk0zRoNo7FF
kqaxazQCNEWaJso0esEu2SbBx1C/W7ko3Z+HByfnw4M0xpEVPAIfLGF+K3iH37qAicog6fhYs6Tn
mym9idKb0nQVpashMrAVez1H5tlW/kObVz4cUYCekMoB6HjVqHIBI1/DBbUCLVcvqJQ/a7jAEADR
AkPJSkq+oFbhmw0XMKX7TS6T4DK5AoRbKcZPr0wqBz5+MaCgPQNG06vvKQqUXWgLekEsU/gsPlLj
xX4Ptniw0oPtZqw14yF2H0sYZ5FoMAaLijaOGBZXl8R2vSmIDKyBEIOhMdfX2NTY08hAZ1E2gnJx
7qBhykACZamyx8o+KFMYymzq0c9jnMK4FeN6jHFdDj96nxM7bQrU5B9ehi9cy+bNm4eHsW942e+7
Mmwyb/aNDruXq33uKxur0OgwHk5fOf68IsZfvZXU1lSqamvq/NVFJM/El1ZifkM2ybUUqXIt2YTf
UElKp0PeXLF3t697JuBsjM0dn4s1/voXG2PhUAlsXKI3ENlasDX66PFHo1s3HXnt+La55N4N+Nm/
sZVzZn7rfTUN3ZvcVVtHT4wtvKo2sJqV1xe5CkdVoKIuWFOxsWn0RGTv1+MNBos9C/ane/U95gLz
JjIhAZ0UK0+y+LgBM8c1+ASDFciCiBa1K8wWc7GZ0ZvNpYrSx0ovlzJNl0t/XEpKwZ8vb24M0qdY
UeYOXi+FjSsVS8OlS6VXS5XfKMViKS5oF3W9uqs6RpfXY2RdPUoralpuWqbuGh4+MHrA7U6MDIPP
RoaXR4Y3Vg0Pj4IvM46ordlK/NVWZU0lyfiIUAcyFzbu+9ah+HNT1dX7/iT5zn9bedvAbfJ66gp1
usI6j3cTZ8BvP7j4yDbxkVcffOCVh8Xf/jryB3urqvb+QWTvV6LV1dGvyA0jgncnJsL8FbKhr4k5
+VasseL8HKzJwUo2FwJHs7j6vpilNQQ1j6l/rCZqtd1OF1q8sSYYtmPC2kV7r50J22ftZ+ySfcl+
1a5CxnZkYS3EQj1TuCFIn6LNbA1acrttRqOl25ybjVa1WCuq51QsarJfsV8Zrq5uWq6mDnG7h5er
ZZeYNvuG/Rur3KNy4OCtzBYMgQIeUJvycl219abSWhe+Yt++e6K2btMWV3dXu/0Pb/3wyBH8BPmn
wu62qpVvP8I6XOytN23NzTbyka1ZXq9lpY15D95CeOISi/KN2JaFS3R1OlKirdOS+9QTaqJU56pJ
QImVsPQ/h5UHFFhBvdACsBXjIMJWNVarVfCOAYnap9oOSjFG2uz8bJKtBirWIqxBWWwW0WUJWnu+
vdzOGBh7fr4KU1esQiZilVAitAnHhO8JSqsM7hLGAf0mEP5R0KqA8NcC89Yu4Sn61ArlAvmtgH8p
4FeFvxTIiwI+LJwUyP0C7hAGBeIWsFbIFwiM/6XwE4E8K7woEJljt3C/QMqFzUKHwDgoF37un4Tf
CuRtAX9boLqYZwU8LhyCqRlB/MpTQb2AfyJQFuaKgPGLwncEclrAMFGPgIlR8AnyrUeIC6eF88KP
hBuCJuEUmoRR4ajwDeGS8I6gvg2uCipBjM0GkcAKosDUzwkYCZyYRxEAhLAwJ5wVloTrwvuCRi1Q
51gLy4LgxOKiXntOQZZGhbv1OgVjhELj9y9XQ5T4l00AYvfI8AFIGffwgREaLHDBLeFO0GskU2uG
D6xdlHYg4XP7/T7/6Mgw+72R4WrT5pFhUDV8nHW72cvscc0SFCpap9zwl76UGEIsV+1IP3A2I9el
WihhmGZgPf7hyolm/PPZa2/OYH9g5RTXvGOytaBcEKwN7mw+v7yx2m0rZOIQf/ZbHxEdPC03431f
nLgHYkz5z4/oDRXto7UQk0Or/4C/iX0oBzWJBYdMx0zkUNaxLHJYfVJNDjMnGWJ8alY5pyRK6iG9
LiuoVOZadM9S/mWTGW/2Qc7QogEWr9eLIkILKUZ8g9du9zbwGxrps7HwDngDHaM5UQpvf+fgXT0H
JcSWCTOO5WB1Tl5OaQ5zTIO1Gpxjxn0adY5aM2TOsZhzNGa1chih3N5cXGfGZu1wthIZRzXEzGiz
R3PUBjPdLchmecewD9KZlrZlP/v6cYWbxfL9MnU2TevhtHNNPL4jqxVfuIivnTt4882LK/y5c/j7
5MuKn9o7OuyflCjqblY6OjocikP2jpvfgtptX+lScFC7nciLnhPLbYXYZseKPEseOabHCvioHlPB
SxUsox1BI8Nxs9wcpzRz1JVecCXH+ZAPjmTWx/mYLb2+qz5S5RN9vb5Z31mf5FvyqcuRtf2GAzuo
RJ42O+go6C4yOqzdNoe7l82yCr1ICVXMv0w/mRoGJax6mMYcrNu9XtMzZQxqefqMKyktYgrx7VXL
BV6lNln91XX4GWttU1fl5Uv+fc/F6yY3YQbj+Vs3xifw53C0oKqJ99/n2hEa2sU8bHTkGP7xo8R3
Hu/IytaXuMuNV2i9U5yzNa/UTH95gLeyt7bkfk/uQ/qhX9ijPIUKkBudEQvy7FirP6kn8yqshU9+
uwXRTJ0VGLuci+3arKAgeJEXIy/r5bzgHu9VL6nyit5e76z3rFfyLnnVqBAXths1WNT0aq5qGE1+
d0FubnceKu8xZrEbwDnWdefIB94BOPFuuwZiZGOVnLACTa+SdPDmVTKNNC5oSGD5vKurN9EWQUWu
5XnrWyuv/qB66o/j03FCMA7jRydWnlo5VuRv5v3bS8qD3qFoLfXKjY8Si59vc+jLvB7jr2zNH4Nj
8NszXxksseWQy3rdm/J5YATHvKZ8DvH4nLjavgG3u7DSiTUqm4poGaiGq58I+KJwWSAO4QnhWYEx
Cvhd4QOBPCTQKsvsFPAWIV0nn6Rgp0AUgkUgP/y+8IlAnhcuCkQPguRdActVeacQpapoKb8s/JiW
8rQklHAFsHxfeFv4pcA8KTxPq/pu4SGQVcgKb0A1BVX3p0lumOhZ4P1EUDoFDGZOfZCefTet4+Fo
0CdgitwvG60UxJ2hYJNcwZ1QquNQoc8Lqs03BCyIrR3BqwK+REvzy2cEIu98s7sy+CM64VkBHxUw
rdPv06U7wXYHa7Qbi9qZqzzm+eIivtuViwp7GLvR0isaWSfrYxl2TovhXP/BsJ3uOz3ZTdjvG66G
rPD54VSnhZmWatr0wEEv1+0RuV7fUb7TdTuNZeAEzSOo34Dn8LX1a4UuL3etfyzE/lw+XUe+9qUv
1URODVp93pJssVDrzLMKduOlS8/cuj7OBFpLY2NfHatmlGrF9Wmt0dEYaTs5ectKE4fmCeSBclEZ
hNgwo0vikaA6pCb3qXCJqk1FFCqLipiyjazRqFCy5izookMGrFLjPgOrYg1DapUFeoNOFiOWZQlS
s2qigBurYBiNBtoiziJaei1zljMWyaKusswCeNZy1XLdomKyssNmkwmzSoXRoFaM6jAtpOlDD5IF
IJo9Jr/PnLc532fzgR/p4cWi148roaDiYfb40hI42uS3+QDaWCX33AJ1BnalywzjYjDjwudWJqP4
Gt6A3x6/9eKzc7duPYQfvYYvdtC6+u4nBbS+4t0rf6Kw3zov5wiCukH/c5uNVsStmwx4kx7L7ZKg
rqUtYZ6aCKpaFVGr8lQkRPAkfhATBmtwH2HIdq3GotVqdMiq/aaWaBdXr7+cDS8fNMqC+uwgg7RI
yzBKRfblbKLLZp9lv8O+zTIKtpgNsFH2MXBGBngeDul3WXV9DcCXKYcFyCnKwVJlJXxp8BMWUy4S
ZqEFZzlWZHtZhVqp0yCm26A0ajGhtcifdiqGZpu2lm7aRxw4wF6GdiBvcyN1LPSgJj+49rj7MoYO
dHStnxjWYv7OloD5TysnDq88QFuAX/8EWgBm182nmHH5uP8FscMTYqkZ3i1+zryOylAtelE8XG7F
OqvdSu6FY9uMK8xYb3aYSR3TxhAFY2GKGcauU7XP1s7VElTL1p6pvVqr0NbWWupRPe7srYd+Xazv
rQ/Xz9Vfr1eJMsBs4Hl6fhFfu2WDCDV7w4ai8m67HVXv0Bmtqm5tblE3YuX3Ddo9wUlMSy+cy8PQ
DcldlEl+8ziw7PevHVNwUNEOgqZSabrtocnWhGtrSqAIGzFfu5XJUWczuRZ6WNWTDds/P1IzsvJN
a46/ua+6Lx4oCiSf2XUk0F6/u6KkubJ/1+jhnR7RndtQVRXgmNftjdGOW1+zNQezuIKcis6Jhkiq
yUKYk/33OXMP/oPaoFOt5DHE4uu6Z6AXXhZoTsKrNHHA2aVFhSgl7rDc26s7oyNXdVinVtigQ8+D
F1PUDkex8qqSUSqLnKKz10nCzrNOyck4nVhyLjmvOxmfs8l52skY8535JL/fiMA9RqTsZnKRfHY3
LdMOEIoNdQl9EfFDJPz4QAIOKeGOg/p2g0Vdgy9OrJjH4Rha+e+2ik0ct6ncZiunzwrb0Nq7Bw7e
SQY2mldlsCarvKZRkX+G4GcwzrtrFTq9qO/Vk7D+rF7SM3oa54Xw5uXUY0m/pL+uZ3z6Jv1pPaPq
1qA7VkGX4F5bA2xpInGH8eczxsqmKX4ivxbRPgpynANbWFSB/kgsfbAYH+PwY06c5yx1EjgRH2cw
2oC1G9oRsHBoFs0hpVlO443QRSHkQR6MPKyH80Cb4LnqIVUe0dPrmfWc9UieJY86V9turMA3KlYr
SAWVMkEnVWHoKbXalL2FrKk7B8kR6k/3TweWf1f/tNYkpVtwuUmwqNS0q1rbkJJS8om9uqOyZKiQ
Gyiru9dnuXUU1qtUWhvb7+Wnnhz1bE78aSr84eP41+Mn+3mT6dZGjaZu6o+Yb+VtW/mWMFllLsjV
b05JDyb+88mu0kLwTdvqMhNi3oDIK0PTYvcD+cfzySH9MT0xFjuLCcueUWFVG1dcVSwWny2WipeK
VcXFFb6KporRinjF0YrzFZcqflRxo0LLq9uvQiOp6ikuRqasHVZrUU86MW9doQ0yliNO3rThap+8
ZJqFhdiVm+mBctdysS6P/ifAD87Av+o/NuJfydFvjA1umfXntfXv8R4+N1391l8VVzp0P1XmlDFv
lEX++HAf2/LQ6Cazfnt2QW6W+Mjig7/551hF10xLy0xXhVzjETb9kv8bVDZqbPgQOdO/n731Pwdm
bv9otNIGp+JzwKsBn2R+cEJI7Vppvf1LUObX0NuXmbyHAookmlYg1M2cQvXwtBA55NCQ8r+gUhiz
w7ef/EdkBNwC4wi+zWQz8sK3jI6DXBuiMXYVd+PvMdBhML3MS4qA4ofKv1WFVSvq5zQGTZ/murYv
Y4EZNWRshIMA+dAQQsyq4j3EyNQCvGvdzvC6zXBiA4YzUgoI8TTMQLinMrAC5aInM7ASzsNvZ2AV
9AyvZGA1OoK+n4E1yIJbM7AWZePBDKwHGybWfzmuxF/IwFkojhcycDbaSjiYHSu0gC2RkQyMEccU
ZWB4s2G2ZmAGiUxbBlagCuZYBlaiAkbKwCpUxvwgA6vRB8wHGVgDfv5FBtaiAnihScN6tEnpycAG
tEcZzcBZ6O+USxk4Gz2s+npLfPZwYmpiMsWVjZVz1VVV9VxfLMoFIykP1zEzVslt27+fkxmSXCKW
jCUOxaKVXFdHc2vftp0dPd3cVJKLcKlEJBqbjiTu5+Ljd8t3Te2NJSKpqfgM1x9LTI03x/dHtyXH
YjPRWILzcp8e5uj4ZxJ3xRJJStlYWVVfWXObRebwfkrsXzEKVjIxlUzFEkCcmuEGKvsrud5IKjaT
4iIzUW7numDP+PjUWEwmjsUSqQgwx1OTYPm+g4mpZHRqjM6WrFxfUEs8MRvP2JWKHYpx2yOpVCwZ
n5lMpWbv8fkeeOCBykiGeQx4K8fi077fN5Y6PBuLxpJTEzOw/MrJ1PT+LjBoJgmGH5RnBGvudGJb
fAY2an+ax8MlYzGOqk+C/vFYFEybTcT3xcZSlfHEhO+BqfunfGl9UzMTvttqqJbMPP8+adSC4pCP
h1ECTaEJNAn5yEEtHkPl8KxGVfBXD1AfiqEoPIMoAhwegDrQDHBVArQN7Yc/7g4NSRmLwTMGz0Oy
LOXsAqlm1AratqGdAPegbqBOyfwRuRYk4BkF/ml4JtD9QIuj8d87fxfI75XnoSNTwD8Do/0yZQpk
m4GyH2S3wSxjQJ2R9SeAxytb9PuluXX5fzvnLpmWXOfZCFZSL1aims/UcluH91+Z7d/nqfSeTMha
UrLuNOeUrHsAOPplrl5ZknoqJc82I3Pt/IwZe2DGcZCnfr3NOSbrTgGe1hwHeDLj833ooLzWJHBS
ubW1JWHmf7lDNDYTEJ3xT/mLWndInnO7TE/JsUbHJmVsFt0DJ5MPPSD/VQLP3ZrHMnorZWgaOP9P
5VKQObOyH2Pyjk8Ab3r3K2Wd07CbXRkPzcj5QD108I41pn3zuyKxTX6mM2r/XXroztInlV2zPpmx
f1yeJ+21WbjHwe8x2duVMnVCXuMU7OEUQHfaR3dsIkP7tDVrtty9nv+XczPpRmPVhd5An3GJG7V/
f73W+Y7/2sDf+X82UHWt99rcNema4hpmBn7GWJ3xt/DoWzfeIj1v4abvYud33/kuof3zf1jSZbX1
Xgpfmr3EvNZe4USL2PfK6CunXzn/yjuvKOOfYOfHNz4m8Y+PfkzEj3H8z7DxovMiiV/Ezpd7Xl59
mXnpXLPT+MLRF8j5F/DsC7jpBcw+zT1d9TQz+zT+w6cKnL6vNn2VfPnxqPP8l/AXe5xO9Hj4cXLm
cXzmC/jzgLIHuYMkFV51JkdXnbMwfxy+M+2rzny/bUDtZwZUzKqT2nl+pdLftrQXX4/g8GiNcxRk
nTd9N79xkzl/E6MRLI5os9qO7jm95xt7mN1DbqdvCKOh8BA5M/T+EHEO4Ry/eUAJrlCATiPjZJqY
HibOnGZUmv57Xc5eUBfvPtp9upvZ3s47723nnMYgFoN6Y1sbGGRsd7aTgqBjwOrPHTBh4wDrNw4Q
jAawHw34jKtGYjSOGo8a6Q8MiMxZsRIv4jMLO/vd7s5F9Wpfp6Tu3S3hE5LQT+/ijiFJdUJCA0O7
QwsYf2nw8VOnUHNhp1TdH5LChYOdUhQAkQJzALCFC1bUPJhMptzyhZNud8qN4OseScp4MnUQsFQy
hdzuZFLmgS8gKQw4UJPuJECQWVRJEidTFEiiJIyjJP2mgHaQSlNR2wjE0/8GPq6LbQplbmRzdHJl
YW0KZW5kb2JqCgo2IDAgb2JqCjY5MDUKZW5kb2JqCgo3IDAgb2JqCjw8L1R5cGUvRm9udERlc2Ny
aXB0b3IvRm9udE5hbWUvQkFBQUFBK0xpYmVyYXRpb25TZXJpZi1Cb2xkCi9GbGFncyA0Ci9Gb250
QkJveFstMTgyIC0zMDMgMTA4MyAxMDA3XS9JdGFsaWNBbmdsZSAwCi9Bc2NlbnQgODkxCi9EZXNj
ZW50IC0yMTYKL0NhcEhlaWdodCAxMDA3Ci9TdGVtViA4MAovRm9udEZpbGUyIDUgMCBSCj4+CmVu
ZG9iagoKOCAwIG9iago8PC9MZW5ndGggMzAxL0ZpbHRlci9GbGF0ZURlY29kZT4+CnN0cmVhbQp4
nF2Ry26DMBBF9/4KL9tFhE0S0kgIKSVBYtGHSvsBxB5SS8VYxlnw97Vn0lbqAnTGc689j6xuj601
IXv1k+og8MFY7WGerl4BP8PFWCZzro0Ktwj/auwdy6K3W+YAY2uHqSxZ9hZzc/ALvzvo6Qz3LHvx
GryxF373UXcx7q7OfcEINnDBqoprGOI9T7177kfI0LVqdUybsKyi5U/wvjjgOcaSSlGThtn1Cnxv
L8BKISpeNk3FwOp/Obkjy3lQn72PUhmlQmzXVeQcuWgSr5F3eeIN8T7xFjkXiQviU+Id8gb5gfSo
2dOdm8QH4iLxI+ll4pr0eH4kDZ6fiPHdhriOLAVxqk1S/cUWm711ldpOe/kZJ1dX7+MocXk4wzQ9
Y4H/LthNLtnw+wb8aJMuCmVuZHN0cmVhbQplbmRvYmoKCjkgMCBvYmoKPDwvVHlwZS9Gb250L1N1
YnR5cGUvVHJ1ZVR5cGUvQmFzZUZvbnQvQkFBQUFBK0xpYmVyYXRpb25TZXJpZi1Cb2xkCi9GaXJz
dENoYXIgMAovTGFzdENoYXIgMTcKL1dpZHRoc1szNjUgNTU2IDUwMCA0NDMgNTAwIDI1MCAyNTAg
NzIyIDU1NiA1NTYgMzMzIDcyMiA1MDAgNTAwIDI3NyAyNzcKNTU2IDQ0MyBdCi9Gb250RGVzY3Jp
cHRvciA3IDAgUgovVG9Vbmljb2RlIDggMCBSCj4+CmVuZG9iagoKMTAgMCBvYmoKPDwvRjEgOSAw
IFIKPj4KZW5kb2JqCgoxMSAwIG9iago8PC9Gb250IDEwIDAgUgovUHJvY1NldFsvUERGL1RleHRd
Cj4+CmVuZG9iagoKMSAwIG9iago8PC9UeXBlL1BhZ2UvUGFyZW50IDQgMCBSL1Jlc291cmNlcyAx
MSAwIFIvTWVkaWFCb3hbMCAwIDU5NSA4NDJdL0dyb3VwPDwvUy9UcmFuc3BhcmVuY3kvQ1MvRGV2
aWNlUkdCL0kgdHJ1ZT4+L0NvbnRlbnRzIDIgMCBSPj4KZW5kb2JqCgo0IDAgb2JqCjw8L1R5cGUv
UGFnZXMKL1Jlc291cmNlcyAxMSAwIFIKL01lZGlhQm94WyAwIDAgNTk1IDg0MiBdCi9LaWRzWyAx
IDAgUiBdCi9Db3VudCAxPj4KZW5kb2JqCgoxMiAwIG9iago8PC9UeXBlL0NhdGFsb2cvUGFnZXMg
NCAwIFIKL09wZW5BY3Rpb25bMSAwIFIgL1hZWiBudWxsIG51bGwgMF0KL0xhbmcoZW4tR0IpCj4+
CmVuZG9iagoKMTMgMCBvYmoKPDwvQXV0aG9yPEZFRkYwMDREMDA2MTAwNzQwMDc0MDA2NTAwNkYw
MDIwMDA0MjAwNkYwMDczMDA2MzAwNkYwMDZDMDA2Rj4KL0NyZWF0b3I8RkVGRjAwNTcwMDcyMDA2
OTAwNzQwMDY1MDA3Mj4KL1Byb2R1Y2VyPEZFRkYwMDRDMDA2OTAwNjIwMDcyMDA2NTAwNEYwMDY2
MDA2NjAwNjkwMDYzMDA2NTAwMjAwMDM1MDAyRTAwMzI+Ci9DcmVhdGlvbkRhdGUoRDoyMDE3MTAy
NDE3MTgwNiswMicwMCcpPj4KZW5kb2JqCgp4cmVmCjAgMTQKMDAwMDAwMDAwMCA2NTUzNSBmIAow
MDAwMDA4MTYzIDAwMDAwIG4gCjAwMDAwMDAwMTkgMDAwMDAgbiAKMDAwMDAwMDI0MCAwMDAwMCBu
IAowMDAwMDA4MzA2IDAwMDAwIG4gCjAwMDAwMDAyNjAgMDAwMDAgbiAKMDAwMDAwNzI1MCAwMDAw
MCBuIAowMDAwMDA3MjcxIDAwMDAwIG4gCjAwMDAwMDc0NzMgMDAwMDAgbiAKMDAwMDAwNzg0MyAw
MDAwMCBuIAowMDAwMDA4MDc2IDAwMDAwIG4gCjAwMDAwMDgxMDggMDAwMDAgbiAKMDAwMDAwODQw
NSAwMDAwMCBuIAowMDAwMDA4NTAyIDAwMDAwIG4gCnRyYWlsZXIKPDwvU2l6ZSAxNC9Sb290IDEy
IDAgUgovSW5mbyAxMyAwIFIKL0lEIFsgPEMzRDZBMzFBMTcxNkU1QjAyMjkxN0Y4QzkxQUM1MDk3
Pgo8QzNENkEzMUExNzE2RTVCMDIyOTE3RjhDOTFBQzUwOTc+IF0KL0RvY0NoZWNrc3VtIC8wQjMy
RjYxNzJGNDFCNzYwNjRBM0NDQjFEMTgxOTFCQgo+PgpzdGFydHhyZWYKODc0NwolJUVPRgo=""")
def commonInfos(env):
docRepository = env['plm.document']._get_filestore()
user = env['res.users'].browse(env.uid)
msg = getBottomMessage(user, env.context)
mainBookCollector = BookCollector(jumpFirst=False,
customTest=(False, msg),
bottomHeight=10)
return docRepository, mainBookCollector
class ReportProductPdf(models.AbstractModel):
_name = 'report.plm.product_pdf'
@api.model
def render_qweb_pdf(self, products=None, level=0, checkState=False):
docRepository, mainBookCollector = commonInfos(self.env)
documents = []
def getDocument(products, check):
out = []
for product in products:
for doc in product.linkeddocuments:
if check:
if doc.state in ['released', 'undermodify']:
out.append(doc)
continue
out.append(doc)
return out
for product in products:
documents.extend(getDocument(product, checkState))
if level > -1:
for childProduct in product._getChildrenBom(product, level):
documents.extend(getDocument(childProduct, checkState))
if len(documents) == 0:
content = getEmptyDocument()
else:
documentContent = packDocuments(docRepository,
documents,
mainBookCollector)
content = documentContent[0]
byteString = b"data:application/pdf;base64," + base64.b64encode(content)
return byteString.decode('UTF-8')
@api.model
def get_report_values(self, docids, data=None):
products = self.env['product.product'].browse(docids)
return {'docs': products,
'get_content': self.render_qweb_pdf}
class ReportOneLevelProductPdf(ReportProductPdf):
_name = 'report.plm.one_product_pdf'
class ReportAllLevelProductPdf(ReportProductPdf):
_name = 'report.plm.all_product_pdf'
class ReportProductionProductPdf(ReportProductPdf):
_name = 'report.plm.product_production_pdf_latest'
class ReportProductionOneProductPdf(ReportProductPdf):
_name = 'report.plm.product_production_one_pdf_latest'
class ReportProductionAllProductPdf(ReportProductPdf):
_name = 'report.plm.product_production_all_pdf_latest'
| [
"daniel.podvesker@perpul.co"
] | daniel.podvesker@perpul.co |
dfd2c9c84434876a23e1a72a9b5e5deb43e76f42 | de2cf38982336ba752dc83242d163b3aebfcac70 | /Python_BigPicture_1/mtsite/mtsite/wsgi.py | 91d111396bf6ee65e759c9e00797818f3572b62e | [] | no_license | goelg08/Python | 344b38edda9c459ce86cecf7de76699d84266924 | 596da249225cfdcb48360d45e644fc2c3205197f | refs/heads/master | 2021-10-07T17:46:29.295697 | 2021-09-25T11:41:54 | 2021-09-25T11:41:54 | 199,595,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for mtsite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mtsite.settings")
application = get_wsgi_application()
| [
"Priyanshu.Goel@factset.com"
] | Priyanshu.Goel@factset.com |
3dc11225bc631940673ee0dc4948b04e54bc4460 | aaa05e97411070603dc87ff8e79eae2dc0856320 | /aws/projects/004-phonebook-web-application/phonebook-app.py | 9f3db5dcae4cabfba1298d9fe32edd5a71f9e5b3 | [] | no_license | devinlimit/Clarusway_aws_devops_workshop | 3e9b74b4ac31bc02564d1946edb0c89bacdef0d7 | 0dff381ae54dd4983191a37dd06b96934e8ed80a | refs/heads/master | 2023-01-23T20:45:21.964022 | 2020-11-21T10:47:57 | 2020-11-21T10:47:57 | 292,507,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,038 | py | # Import Flask modules
from flask import Flask, request, render_template
from flaskext.mysql import MySQL
# Create an object named app
app = Flask(__name__)
db_endpoint = open("/home/ec2-user/dbserver.endpoint", 'r', encoding='UTF-8')
# Configure mysql database
app.config['MYSQL_DATABASE_HOST'] = db_endpoint.readline().strip()
app.config['MYSQL_DATABASE_USER'] = 'admin'
app.config['MYSQL_DATABASE_PASSWORD'] = 'Clarusway_1'
app.config['MYSQL_DATABASE_DB'] = 'phonebook'
app.config['MYSQL_DATABASE_PORT'] = 3306
db_endpoint.close()
mysql = MySQL()
mysql.init_app(app)
connection = mysql.connect()
connection.autocommit(True)
cursor = connection.cursor()
# Write a function named `init_todo_db` which initializes the todo db
# Create P table within sqlite db and populate with sample data
# Execute the code below only once.
def init_phonebook_db():
drop_table = 'DROP TABLE IF EXISTS phonebook.phonebook;'
phonebook_table = """
CREATE TABLE phonebook(
id INT NOT NULL AUTO_INCREMENT,
name VARCHAR(100) NOT NULL,
number VARCHAR(100) NOT NULL,
PRIMARY KEY (id)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
"""
data = """
INSERT INTO phonebook.phonebook (name, number)
VALUES
("Callahan", "1234567890"),
("Sergio Taco", "67854"),
("Vincenzo Altobelli", "876543554");
"""
cursor.execute(drop_table)
cursor.execute(phonebook_table)
cursor.execute(data)
# Write a function named `find_persons` which finds persons' record using the keyword from the phonebook table in the db,
# and returns result as list of dictionary
# `[{'id': 1, 'name':'XXXX', 'number': 'XXXXXX'}]`.
def find_persons(keyword):
query = f"""
SELECT * FROM phonebook WHERE name like '%{keyword.strip().lower()}%';
"""
cursor.execute(query)
result = cursor.fetchall()
persons =[{'id':row[0], 'name':row[1].strip().title(), 'number':row[2]} for row in result]
if len(persons) == 0:
persons = [{'name':'No Result', 'number':'No Result'}]
return persons
# Write a function named `insert_person` which inserts person into the phonebook table in the db,
# and returns text info about result of the operation
def insert_person(name, number):
query = f"""
SELECT * FROM phonebook WHERE name like '{name.strip().lower()}';
"""
cursor.execute(query)
row = cursor.fetchone()
if row is not None:
return f'Person with name {row[1].title()} already exits.'
insert = f"""
INSERT INTO phonebook (name, number)
VALUES ('{name.strip().lower()}', '{number}');
"""
cursor.execute(insert)
result = cursor.fetchall()
return f'Person {name.strip().title()} added to Phonebook successfully'
# Write a function named `update_person` which updates the person's record in the phonebook table,
# and returns text info about result of the operation
def update_person(name, number):
query = f"""
SELECT * FROM phonebook WHERE name like '{name.strip().lower()}';
"""
cursor.execute(query)
row = cursor.fetchone()
if row is None:
return f'Person with name {name.strip().title()} does not exist.'
update = f"""
UPDATE phonebook
SET name='{row[1]}', number = '{number}'
WHERE id= {row[0]};
"""
cursor.execute(update)
return f'Phone record of {name.strip().title()} is updated successfully'
# Write a function named `delete_person` which deletes person record from the phonebook table in the db,
# and returns returns text info about result of the operation
def delete_person(name):
query = f"""
SELECT * FROM phonebook WHERE name like '{name.strip().lower()}';
"""
cursor.execute(query)
row = cursor.fetchone()
if row is None:
return f'Person with name {name.strip().title()} does not exist, no need to delete.'
delete = f"""
DELETE FROM phonebook
WHERE id= {row[0]};
"""
cursor.execute(delete)
return f'Phone record of {name.strip().title()} is deleted from the phonebook successfully'
# Write a function named `find_records` which finds phone records by keyword using `GET` and `POST` methods,
# using template files named `index.html` given under `templates` folder
# and assign to the static route of ('/')
@app.route('/', methods=['GET', 'POST'])
def find_records():
if request.method == 'POST':
keyword = request.form['username']
persons = find_persons(keyword)
return render_template('index.html', persons=persons, keyword=keyword, show_result=True, developer_name='E2014_Devin')
else:
return render_template('index.html', show_result=False, developer_name='E2014_Devin')
# Write a function named `add_record` which inserts new record to the database using `GET` and `POST` methods,
# using template files named `add-update.html` given under `templates` folder
# and assign to the static route of ('add')
@app.route('/add', methods=['GET', 'POST'])
def add_record():
if request.method == 'POST':
name = request.form['username']
if name is None or name.strip() == "":
return render_template('add-update.html', not_valid=True, message='Invalid input: Name can not be empty', show_result=False, action_name='save', developer_name='E2014_Devin')
elif name.isdecimal():
return render_template('add-update.html', not_valid=True, message='Invalid input: Name of person should be text', show_result=False, action_name='save', developer_name='E2014_Devin')
phone_number = request.form['phonenumber']
if phone_number is None or phone_number.strip() == "":
return render_template('add-update.html', not_valid=True, message='Invalid input: Phone number can not be empty', show_result=False, action_name='save', developer_name='E2014_Devin')
elif not phone_number.isdecimal():
return render_template('add-update.html', not_valid=True, message='Invalid input: Phone number should be in numeric format', show_result=False, action_name='save', developer_name='E2014_Devin')
result = insert_person(name, phone_number)
return render_template('add-update.html', show_result=True, result=result, not_valid=False, action_name='save', developer_name='E2014_Devin')
else:
return render_template('add-update.html', show_result=False, not_valid=False, action_name='save', developer_name='E2014_Devin')
# Write a function named `update_record` which updates the record in the db using `GET` and `POST` methods,
# using template files named `add-update.html` given under `templates` folder
# and assign to the static route of ('update')
@app.route('/update', methods=['GET', 'POST'])
def update_record():
if request.method == 'POST':
name = request.form['username']
if name is None or name.strip() == "":
return render_template('add-update.html', not_valid=True, message='Invalid input: Name can not be empty', show_result=False, action_name='update', developer_name='E2014_Devin')
phone_number = request.form['phonenumber']
if phone_number is None or phone_number.strip() == "":
return render_template('add-update.html', not_valid=True, message='Invalid input: Phone number can not be empty', show_result=False, action_name='update', developer_name='E2014_Devin')
elif not phone_number.isdecimal():
return render_template('add-update.html', not_valid=True, message='Invalid input: Phone number should be in numeric format', show_result=False, action_name='update', developer_name='E2014_Devin')
result = update_person(name, phone_number)
return render_template('add-update.html', show_result=True, result=result, not_valid=False, action_name='update', developer_name='E2014_Devin')
else:
return render_template('add-update.html', show_result=False, not_valid=False, action_name='update', developer_name='E2014_Devin')
# Write a function named `delete_record` which updates the record in the db using `GET` and `POST` methods,
# using template files named `delete.html` given under `templates` folder
# and assign to the static route of ('delete')
@app.route('/delete', methods=['GET', 'POST'])
def delete_record():
if request.method == 'POST':
name = request.form['username']
if name is None or name.strip() == "":
return render_template('delete.html', not_valid=True, message='Invalid input: Name can not be empty', show_result=False, developer_name='E2014_Devin')
result = delete_person(name)
return render_template('delete.html', show_result=True, result=result, not_valid=False, developer_name='E2014_Devin')
else:
return render_template('delete.html', show_result=False, not_valid=False, developer_name='E2014_Devin')
# Add a statement to run the Flask application which can be reached from any host on port 80.
if __name__== '__main__':
init_phonebook_db()
#app.run(debug=True)
app.run(host='0.0.0.0', port=80)
| [
"devinlimit@gmail.com"
] | devinlimit@gmail.com |
cd7b1c735c48d2803238e6ba0ddab6a70ce5d66f | d0eccc58972cc3946b2a105551b8e48205d85c1d | /general/wit_clip.py | a85357da8f44f7db00bcd07879fd260bc71e3bcf | [
"MIT"
] | permissive | peternara/DALLE-datasets | 8848d0b3e1266dddee00a0ba15e85b2fd6eeb91b | 527e54aeac879bc4da669fa5c5b64c9354890728 | refs/heads/main | 2023-08-07T05:17:49.872327 | 2021-10-07T17:58:40 | 2021-10-07T17:58:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,463 | py | import os
import argparse
import time
import pickle
from tqdm import tqdm
import pandas as pd
from multiprocessing import cpu_count #, get_context
from helper_scripts.wit_url_downloader import download_wit_urls
from helper_scripts.wit_clip_class import CLIP
from helper_scripts.wit_dtype import DTYPE, DFLENGTH, DFLENGTH_ENGLISH
from helper_scripts.wit_image_downloader import wit_download_image
from concurrent.futures import ThreadPoolExecutor
os.environ['KMP_DUPLICATE_LIB_OK']='True'
ONLYENGLISH = True
MULTIPROCESSING = True
THREAD_COUNT = 2*cpu_count()+1
CHUNKSIZE = 10000
EMBEDDINGS_PER_PICKLE = 5000
SIMILARITIESFOLDER = './wit/witsimilarities'
EMBEDDINGSFOLDER = './wit/witembeddings'
WITURLFOLDER = './wit/witurls'
parser = argparse.ArgumentParser()
parser.add_argument('--wit_url_folder', type=str,
help='Download location for WIT urls.')
parser.add_argument('--onepercentsample',
dest='onepercentsample',
action='store_true',
help='Only download 1% sample file.')
parser.add_argument('--saveimages',
dest='saveimages',
action='store_true',
help='Save the images on the local drive.')
parser.add_argument('--saveembeddings',
dest='saveembeddings',
action='store_true',
help='Save the image embeddings on the local drive.')
parser.add_argument('--savewds',
dest='savewds',
action='store_true',
help='Save the images and best matching caption as WebDataset')
args = parser.parse_args()
wit_url_folder = args.wit_url_folder if args.wit_url_folder else WITURLFOLDER
clipper = CLIP()
os.makedirs(SIMILARITIESFOLDER, exist_ok=True)
if args.saveembeddings:
os.makedirs(EMBEDDINGSFOLDER, exist_ok=True)
dtv = list(DTYPE.keys())
caption_dict = {0:dtv[4], 1:dtv[5], 2:dtv[6], 3:dtv[7], 4:dtv[8], 5:dtv[15], 6:dtv[16]}
def task_done(future):
try:
result = future.result()
except:
return False
else:
return result
def process_row(row):
saveembeddings = row[18]
saveimages = row[19]
image_url = row[3]
captions = [
row[5], # row.page_title,
row[6], # row.section_title,
row[7], # row.hierarchical_section_title,
row[8], # row.caption_attribution_description,
row[9], # row.caption_alt_text_description,
row[16], # row.context_page_description,
row[17] # row.context_section_description
]
available_captions = [True if isinstance(x, str) else False for x in captions]
caption_tuples = [(i, x) for i, x in enumerate(captions) if available_captions[i]]
available_ids, captions = list(zip(*caption_tuples))
try:
image_request = wit_download_image(image_url, saveimages)
similarities, embeddings = clipper.return_similarities(image_request, captions, image_url)
similarities = {caption_dict[j]: round(similarities[i], 4) for i, j in enumerate(available_ids) }
except Exception as e:
print('Exception while trying to download {}'.format(image_url))
print(e)
return False, False, False
else:
if not saveembeddings:
embeddings = None
return row[0], similarities, embeddings
if __name__ == '__main__':
start = time.time()
global_counter = 0
download_wit_urls(urlfolder=wit_url_folder, onepercentsample=args.onepercentsample)
fns = sorted([x for x in os.listdir(wit_url_folder) if x[0] != '.' and '.tsv.gz' in x])
if not args.onepercentsample:
fns = [x for x in fns if '1percent' not in x]
for i, wit_filename in enumerate(fns):
print('Processing {}. file: {}...'.format(i+1, wit_filename))
if ONLYENGLISH:
dflen = DFLENGTH_ENGLISH[wit_filename]
else:
dflen = DFLENGTH[wit_filename]
pbar = tqdm(total=dflen)
similarities_dict = {}
embeddings_dict_counter = 0
if args.saveembeddings:
embeddings_dict = {}
if '1percent' in wit_filename:
prefix = "onepercent"
else:
prefix = 'main' + (wit_filename[-17])
with pd.read_csv(
os.path.join(wit_url_folder, wit_filename),
sep="\t",
compression="gzip",
chunksize=CHUNKSIZE,
quotechar='"',
dtype=DTYPE,
error_bad_lines=False
) as reader:
for i, df in enumerate(reader):
if ONLYENGLISH:
df = df[df['language'] == 'en']
# dflen = dflen - i*CHUNKSIZE
df['saveembeddings'] = args.saveembeddings
df['saveimages'] = args.saveimages
embeddings_dict = {}
results = []
if MULTIPROCESSING:
with ThreadPoolExecutor() as executor:
for res in executor.map(process_row, df.itertuples(name=None)):
results.append(res)
pbar.update()
else:
for row in tqdm(df.itertuples(name=None), total=dflen):
result = process_row(row)
results.append(result)
pbar.update()
for result in results:
if result[0] != False:
index, sim, emb = result
similarities_dict[index] = sim
if args.saveembeddings:
embeddings_dict[index] = emb
if len(embeddings_dict.keys()) >= EMBEDDINGS_PER_PICKLE:
with open(os.path.join(
EMBEDDINGSFOLDER,
'{}_{:05d}_image_embeddings.pkl'.format(prefix, embeddings_dict_counter)
), 'wb') as f:
pickle.dump(embeddings_dict, f)
embeddings_dict_counter += 1
embeddings_dict = {}
if len(embeddings_dict) > 0:
with open(os.path.join(
EMBEDDINGSFOLDER,
'{}_{:05d}_image_embeddings.pkl'.format(prefix, embeddings_dict_counter)
), 'wb') as f:
pickle.dump(embeddings_dict, f)
embeddings_dict_counter += 1
similarity_df = pd.DataFrame.from_dict(similarities_dict, orient='index')
similarity_df.index.name = 'index'
similarity_df.index = similarity_df.index.astype(int)
similarity_df = similarity_df.sort_index()
similarity_df.to_csv(
os.path.join(
SIMILARITIESFOLDER,
wit_filename.replace('.tsv.gz', '') + '_with_similarities_{:05d}'.format(i) + '.tsv'
), sep="\t")
global_counter += DFLENGTH_ENGLISH[wit_filename] if ONLYENGLISH else DFLENGTH[wit_filename]
pbar.close()
end = time.time()
elapsed = end - start
print('Finished processing {} WIT-rows in {:.2f} hours!'.format(global_counter, elapsed/(60*60))) | [
"robvanvolt@gmail.com"
] | robvanvolt@gmail.com |
d64a94d04f433741f0b07047cfc4f842fc484967 | 285ef22aace9c4aa40c6311ba49444cf1f7b7cc6 | /tools/accu-dump-xar | 3895664952fa2ca0a8af922c57570f0ae806e90b | [] | no_license | russel/hugo-site | 9d61e115bc63ef7eec9a8396a6c0a5606de8144a | c32a6207bb18c6b45dcae4fcfd9d8eb3b8d9cf32 | refs/heads/master | 2020-04-18T06:02:07.872251 | 2020-03-30T10:53:27 | 2020-03-30T10:53:27 | 167,303,291 | 0 | 0 | null | 2019-01-24T04:31:18 | 2019-01-24T04:31:18 | null | UTF-8 | Python | false | false | 7,359 | #!/usr/bin/python3
#
# accu-dump-xar
#
# Dump Xaraya journal files to individal JSON files.
import argparse
import datetime
import json
import pathlib
import sys
import pymysql
def toutf8(s):
try:
return s.encode('latin1').decode('utf-8')
except:
return s
def dump_articles(db, outputdir, pubtype, pubtypeid):
propids = { 96: "keywords", 97: "author", 98: "author-email",
99: "author2", 100: "author2-email" }
cursor = db.cursor()
article_sql = """\
select xar_aid, xar_title, xar_summary, xar_body, xar_pubdate
from xar_articles
where xar_pubtypeid={pubtypeid}""".format(pubtypeid=pubtypeid)
try:
cursor.execute(article_sql)
for row in cursor.fetchall():
article = {
"id": row[0],
"title": toutf8(row[1]),
"summary": toutf8(row[2]),
"body": toutf8(row[3]),
"date": datetime.datetime.fromtimestamp(row[4]).isoformat()
}
article_id = row[0]
cursor2 = db.cursor()
dyndata_sql = """\
select xar_dd_propid, xar_dd_value
from xar_dynamic_data
where xar_dd_itemid={}""".format(article_id)
cursor2.execute(dyndata_sql)
for row2 in cursor2.fetchall():
if row2[0] in propids:
article[propids[row2[0]]] = toutf8(row2[1])
cat_sql = """\
select xar_name, xar_description from xar_categories join xar_categories_linkage on xar_categories_linkage.xar_cid = xar_categories.xar_cid where xar_categories_linkage.xar_iid = {}""".format(article_id)
cursor2.execute(cat_sql)
article["category-id"] = []
article["category-name"] = []
for row2 in cursor2.fetchall():
article["category-id"].append(row2[0])
article["category-name"].append(row2[1])
outfile = pathlib.Path(outputdir, pubtype, "{:05}.json".format(article_id))
outfile.parent.mkdir(parents=True, exist_ok=True)
with outfile.open('w') as f:
json.dump(article, f, ensure_ascii=False, sort_keys=True, indent=4)
cursor2.close()
except Exception as err:
print("No articles read: {}.".format(err), file=sys.stderr)
sys.exit(1)
def dump_bookreviews(db, outputdir):
cursor = db.cursor()
article_sql = """\
select xar_rid, xar_title, xar_author, xar_isbn, xar_publisher, xar_pages, xar_price, xar_recommend, xar_rectext, xar_reviewer, xar_cvu, xar_subject, xar_review, xar_created, xar_modified
from xar_bookreviews"""
try:
cursor.execute(article_sql)
for row in cursor.fetchall():
review = {
"id": row[0],
"title": toutf8(row[1]),
"author": toutf8(row[2]),
"isbn": toutf8(row[3]),
"publisher": toutf8(row[4]),
"pages": toutf8(row[5]),
"price": toutf8(row[6]),
"rating": row[7],
"summary": toutf8(row[8]),
"reviewer": toutf8(row[9]),
"cvu": toutf8(row[10]),
"subject": toutf8(row[11]),
"review": toutf8(row[12]),
"created": row[13].isoformat(),
"modified": row[14].isoformat()
}
review_id = row[0]
outfile = pathlib.Path(outputdir, 'bookreviews', "{:05}.json".format(review_id))
outfile.parent.mkdir(parents=True, exist_ok=True)
with outfile.open('w') as f:
json.dump(review, f, ensure_ascii=False, sort_keys=True, indent=4)
except Exception as err:
print("No book reviews read: {}.".format(err), file=sys.stderr)
sys.exit(1)
def dump_pages(db, outputdir, pagetype, pagetypeid):
propids = { 26: "body", 27: "page-title", 30: "menu-title",
28: "page-title", 29: "body", 31: "menu-title", 46: "block" }
cursor = db.cursor()
page_sql = """\
select xar_pid, xar_name, xar_desc
from xar_xarpages_pages
where xar_status = 'ACTIVE' and xar_itemtype={pagetypeid}""".format(pagetypeid=pagetypeid)
try:
cursor.execute(page_sql)
for row in cursor.fetchall():
page = {
"id": row[0],
"name": toutf8(row[1]),
"description": toutf8(row[2])
}
page_id = row[0]
cursor2 = db.cursor()
dyndata_sql = """\
select xar_dd_propid, xar_dd_value
from xar_dynamic_data
where xar_dd_itemid={}""".format(page_id)
cursor2.execute(dyndata_sql)
for row2 in cursor2.fetchall():
if row2[0] in propids:
page[propids[row2[0]]] = toutf8(row2[1])
outfile = pathlib.Path(outputdir, pagetype, "{:05}.json".format(page_id))
outfile.parent.mkdir(parents=True, exist_ok=True)
with outfile.open('w') as f:
json.dump(page, f, ensure_ascii=False, sort_keys=True, indent=4)
cursor2.close()
except Exception as err:
print("No articles read: {}.".format(err), file=sys.stderr)
sys.exit(1)
def main():
parser = argparse.ArgumentParser(description='dump Xaraya articles to JSON')
parser.add_argument('--pubtype', dest='pubtype', action='store',
choices=['news', 'docs', 'weblinks',
'pdf', 'epub',
'blogs', 'journals',
'accupages', 'conferencepages',
'bookreviews'], default='journals',
help='type of publication', metavar='PUBTYPE')
parser.add_argument('--host', dest='host',
action='store', default='localhost',
help='database host', metavar='HOSTNAME')
parser.add_argument('--port', dest='port',
action='store', type=int, default='3306',
help='database port', metavar='PORT')
parser.add_argument('-o', '--output-dir', dest='outputdir',
action='store', default='.',
help='directory for output files', metavar='DIR')
parser.add_argument('-p', '--password', dest='password',
action='store', required=True,
help='database password', metavar='PASSWORD')
args = parser.parse_args()
pubtypes = { "news": 1, "docs": 2, "weblinks": 6,
"pdf": 14, "epub": 16,
"blogs": 10, "journals": 13 }
pagetypes = { "accupages": 3, "conferencepages": 4 }
try:
db = pymysql.connect(args.host, 'accuorg_xarad', args.password, 'accuorg_xar', port=args.port, charset='latin1')
if args.pubtype == 'bookreviews':
dump_bookreviews(db, args.outputdir)
elif args.pubtype.endswith('pages'):
dump_pages(db, args.outputdir, args.pubtype, pagetypes[args.pubtype])
else:
dump_articles(db, args.outputdir, args.pubtype, pubtypes[args.pubtype])
except Exception as err:
print("Database access failed: {}".format(err), file=sys.stderr)
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
main()
# Local Variables:
# mode: Python
# End:
| [
"jim.hague@acm.org"
] | jim.hague@acm.org | |
488ec8cc2ff42ab4f1bb862fb0893e4eeb043557 | 6a242a855a413f11213f2a7e2ced95619ca087a2 | /flaskapp.py | fcb07b123633ca9e996d708a190b0ce1e82e6a18 | [] | no_license | Knugn/ACC-A3 | a28750703180c36ac638890b0f39f465e942d53a | f4d1251072c125d41bd3fe7aa874ce4ace0ea4df | refs/heads/master | 2021-01-10T16:49:29.963467 | 2015-10-28T19:35:26 | 2015-10-28T19:35:26 | 43,636,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,757 | py | from flask import Flask, jsonify, url_for
import sys
import swiftutil
from celeryapp import celery
from celery import group
import celerytasks
import time
import json
import timeit
from collections import Counter
from operator import add
flask = Flask(__name__)
sc = swiftutil.getswiftconnection()
def setupcontexttask(flaskapp, celeryapp):
TaskBase = celeryapp.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with flaskapp.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celeryapp.Task = ContextTask
return
@flask.route('/')
def index():
return 'Index Page'
@flask.route('/about')
def about():
return 'This site provides a web service to count swedish pronouns in tweets.'
@flask.route('/count_pronouns')
def count_pronouns_usage():
return {
#'Count pronouns in all files in default bucket' : url_for(count_pronouns()),
'Count pronouns in file \'tweets/tweets_0.txt\' bucket' : url_for(count_pronouns('tweets','tweets_0.txt')),
}
@flask.route('/count_pronouns/')
@flask.route('/count_pronouns//<file_name>')
@flask.route('/count_pronouns/<bucket_name>/')
@flask.route('/count_pronouns/<bucket_name>/<file_name>')
def count_pronouns(bucket_name='tweets', file_name=None):
#global sc
if not bucket_name:
return 'Must specify a bucket.'
if not file_name:
return jsonify(count_pronouns_in_bucket(bucket_name))
return jsonify(count_pronouns_in_bucket_file(bucket_name, file_name))
#return json.dumps(pcountresults)
def count_pronouns_in_bucket(bucket_name):
t1 = timeit.default_timer()
global sc
(resp_header, obj_list) = sc.get_container(bucket_name)
taskgroup = group(celerytasks.count_pronouns.s(obj['name'], bucket_name) for obj in obj_list)()
partialresults = taskgroup.get()
return {
'combined_results': {
'bucket':bucket_name,
'pronoun_counts':dict(reduce(lambda c, pc: c.update(pc) or c, (Counter(pr['pronoun_counts']) for pr in partialresults))),
'computation_time':reduce(add, (pr['computation_time'] for pr in partialresults)),
'line_count':reduce(add, (pr['line_count'] for pr in partialresults)),
'tweet_count':reduce(add, (pr['tweet_count'] for pr in partialresults))
},
'partial_results':partialresults,
'real_time_taken':timeit.default_timer()-t1
}
# pcounttasks = {}
# for obj in obj_list:
# filename = obj['name']
# pcounttasks[filename] = (celerytasks.count_pronouns.delay(filename))
# pcountresults = {}
# for pctKey, pctVal in pcounttasks.iteritems():
# while not pctVal.ready():
# time.sleep(1)
# pcountresults[pctKey] = pctVal.get()
# return pcountresults
def count_pronouns_in_bucket_file(bucket_name, file_name):
#task = celerytasks.count_pronouns.delay(file_name, bucket_name)
task = celerytasks.count_pronouns.apply_async([file_name, bucket_name])
return task.wait()
@flask.route('/pronouncount/api/', methods=['GET'])
def pronoun_count():
global sc
#sc = swiftutil.getswiftconnection()
(resp_header, obj_list) = sc.get_container("tweets")
pcounttasks = {}
for obj in obj_list:
filename = obj['name']
pcounttasks[filename] = (celerytasks.count_pronouns.delay(filename))
pcountresults = {}
for pctKey, pctVal in pcounttasks.iteritems():
while not pctVal.ready():
time.sleep(1)
pcountresults[pctKey] = pctVal.get()
#sc.close()
return json.dumps(pcountresults)
if __name__ == '__main__':
setupcontexttask(flask, celery)
flask.run(host='0.0.0.0',debug=True)
| [
"david.ryman@hotmail.com"
] | david.ryman@hotmail.com |
dc5820795cb85edd778b49c8d81deb89ef8b37bd | f0202a8d38a859d79b720fbc1956ab4ba1c4ffb8 | /django_input_collection/migrations/0012_auto_20220421_2220.py | 9549cf228110d0a8b5075ba045ffaca124ceadd2 | [
"Apache-2.0"
] | permissive | pivotal-energy-solutions/django-input-collection | 534d7a2cdcba4da3922829c739917e2b13fd810c | 20ac7b0bc6a78a7c342ed319a2ac5522e28af976 | refs/heads/master | 2023-06-23T15:06:50.193333 | 2023-06-20T22:00:58 | 2023-06-20T22:00:58 | 146,794,774 | 0 | 0 | Apache-2.0 | 2022-10-12T21:52:07 | 2018-08-30T19:09:19 | Python | UTF-8 | Python | false | false | 2,981 | py | # -*- coding: utf-8 -*-
# Generated by Django 3.2.12 on 2022-04-21 22:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("django_input_collection", "0011_auto_20190308_2259"),
]
operations = [
migrations.AddField(
model_name="collectioninstrument",
name="test_requirement_type",
field=models.CharField(
choices=[
("all-pass", "All cases must pass"),
("one-pass", "At least one case must pass"),
("all-fail", "All cases must fail"),
],
default="all-pass",
max_length=20,
),
),
migrations.AlterField(
model_name="boundsuggestedresponse",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="case",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="collectedinput",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="collectioninstrument",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="collectionrequest",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="condition",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="conditiongroup",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="responsepolicy",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
migrations.AlterField(
model_name="suggestedresponse",
name="id",
field=models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
]
| [
"steven@pointcircle.com"
] | steven@pointcircle.com |
28ec5ecaa812f96b31614edf419ad219348942f6 | 75f55944d023ebf2531c53d406d17f301e9a0ab8 | /animechat2.py | e53106c2e170d2c81c7d1fc14905e15c5635dd0b | [] | no_license | yugo-salem/helloneko | 3b5aec877c9f9ae22cfe6fb302cd3e9de24f9e41 | 49692fc18f72d6d34f5ce51ddbb5822061bc6e2c | refs/heads/master | 2021-01-11T22:27:02.906980 | 2017-04-05T17:19:10 | 2017-04-05T17:19:10 | 78,963,332 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,767 | py | #!/usr/bin/env python
# encoding: utf-8
# pylint: disable=bad-whitespace
# pylint: disable=missing-docstring
import sys
import os
from datetime import datetime
import time
import locale
import threading
import curses
import requests
from flask import Flask, request, redirect, url_for
from flask.templating import render_template_string
import animechat_templates
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import chatdata
from uimod import uiclass
from slackmod import slackclass
exitflag=False
messages=[]
msgforpost=None
def hello():
return redirect(url_for("animechat"))
def msgs():
global messages
if exitflag:
func=request.environ.get('werkzeug.server.shutdown')
if func:
func()
for message in messages:
message["ts_datetime"]=datetime.fromtimestamp(message["ts"])
return render_template_string(animechat_templates.MESSAGES_IFRAME_TEMPLATE,messages=messages)
def animechat():
global messages
return render_template_string(animechat_templates.ANIMECHAT_TEMPLATE)
def htmlpost():
global msgforpost
msgforpost=request.args["msg"]
print(request.args)
print(msgforpost)
#return redirect(request.referrer)
#return redirect(url_for("animechat"))
#return "posted"
return redirect("/animechat")
def main():
locale.setlocale(locale.LC_ALL,"")
app=Flask("nekochat")
app.add_url_rule('/', view_func=hello)
app.add_url_rule('/animechat', view_func=animechat)
app.add_url_rule('/msgs', view_func=msgs)
app.add_url_rule('/postmsg', view_func=htmlpost)
t=threading.Thread(target=app.run)
t.start()
sbot=slackclass()
sbot.token=chatdata.token
sbot.test()
print("username="+sbot.username)
print("teamname="+sbot.teamname)
print("userid="+sbot.user_id)
print("teamid="+sbot.team_id)
#sbot.post("@yugosalem","test123")
sbot.get_user_id()
friendid=sbot.userdict[chatdata.friendname]
print("friendid="+friendid)
friendchannel=sbot.get_im_id(friendid)
print("friendchannel="+friendchannel)
sbot.getmsg_print(friendchannel,3)
global exitflag
global messages
global msgforpost
try:
while not exitflag:
print("update")
if msgforpost:
sbot.post(friendchannel,msgforpost)
msgforpost=None
messages=sbot.getmsg(friendchannel,30)
time.sleep(3);
except KeyboardInterrupt:
exitflag=True
#time.sleep(1)
#ui=uiclass()
#ui.uichannel=friendchannel
#ui.cursesinit()
#ui.getmsgfunc=sbot.getmsg
#ui.postmsgfunc=sbot.post
#ui.mainloop()
#ui.cursesdone()
print("end")
if __name__ == '__main__':
main()
| [
"cutenekochan777@gmail.com"
] | cutenekochan777@gmail.com |
d70f60641b1e6d7ecea83be0cb6d27700661fb24 | fe2753d5f7638f50dedd847a434461b935b90235 | /trail/spiders/easy.py | e83661159bc1f90893191e0bac89812e510fb75d | [] | no_license | teamzot/Crawler | eab63af2fcedfd507281eb577dde54ceae0537e8 | 634c76809d4f38123d38e9c4342c40bb2d34cac3 | refs/heads/master | 2020-03-31T18:27:43.097802 | 2018-10-15T20:58:27 | 2018-10-15T20:58:27 | 152,459,597 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,682 | py | # -*- coding: utf-8 -*-
import scrapy
import datetime
import logging
from trail.items import TrailItem
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy.loader import ItemLoader
from scrapy.http import Request
class EasySpider(CrawlSpider):
name = 'easy'
allowed_domains = ['www.1point3acres.com']
start_urls = ['http://www.1point3acres.com/bbs/forum-237-1.html']
rules = (
Rule(LinkExtractor(restrict_xpaths='//a[@class = "nxt"]')),
Rule(LinkExtractor(restrict_xpaths='//a[@class = "s xst"]'),callback='parse_item'),
)
def parse_item(self, response):
item = TrailItem()
item['PostTitle'] = response.xpath('//*[@id="thread_subject"]/text()').extract()
item['PostUser'] = response.xpath('//*[@class="authi"]/a/text()').extract_first()
item['PostTime'] = response.xpath('//*[@class="authi"]//span/@title').extract_first()
item['URL'] = response.url
item['SpiderTime'] = datetime.datetime.now()
table = response.xpath('//table[@class="cgtl mbm"]//td/text()').extract()
if len(table) == 27:
item['Year'] = table[0].strip()
item['Season'] = table[1].strip()
item['Source'] = table[2].strip()
item['JobFunction'] = table[3].strip()
item['JobType'] = table[4].strip()
item['Degree'] = table[5].strip()
item['Experience'] = table[6].strip()
item['ExperienceLevel'] = table[7].strip()
# item['Group'] = table[8].strip()
# item['InterestPoint'] = table[9].strip()
# item['Title'] = table[10].strip()
item['Level'] = table[11].strip()
item['PositionType'] = table[12].strip()
item['CompanyName'] = table[13].strip()
item['CompanyAltName'] = table[14].strip()
item['Area'] = table[15].strip()
# item['BaseSalary'] = table[16].strip()
# item['Equity'] = table[17].strip()
item['EquitySchedule'] = table[18].strip()
# item['SignBonus'] = table[19].strip()
# item['YearlyBonus'] = table[20].strip()
item['RelocationFee'] = table[21].strip()
item['OtherOffer'] = table[22].strip()
item['GreenCard'] = table[23].strip()
item['Satisfaction'] = table[24].strip()
item['PromotionPkg'] = table[25].strip()
item['AnnualRefresh'] = table[26].strip()
return item
else:
logging.warning(response.body)
return item | [
"zhujiaqi.apply@gmail.com"
] | zhujiaqi.apply@gmail.com |
a5ea641c931a8768c01f47ceb5f09ed009af4204 | 633944f913050debf0764c2a29cf3e88f912670e | /v8/depot_tools/bootstrap-3.8.0b1.chromium.1_bin/python3/lib/python3.8/encodings/mbcs.py | 5f56e6e4b8e55e8e1447875202fe15cbdaa2a26d | [
"BSD-3-Clause",
"bzip2-1.0.6",
"SunPro",
"Apache-2.0"
] | permissive | bopopescu/V8-lgtm | 0474c2ff39baf754f556ef57619ceae93e7320fd | da307e2f7abfca5fa0e860a809de6cd07fd1b72b | refs/heads/master | 2022-02-16T19:10:54.008520 | 2019-09-25T07:51:13 | 2019-09-25T07:51:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | ../../../../.cipd/pkgs/2/_current/lib/python3.8/encodings/mbcs.py | [
"jundong.xjd@antfin.com"
] | jundong.xjd@antfin.com |
ed3b09788c37f6f93130edadcb1bfe343d812d9b | 9d6faca0255f1772d065229cb8bd051fb3b4da9a | /model_mnist.py | 99b9536042a48781380498c33f5948d3c769f90e | [
"MIT"
] | permissive | NeuralNetworkingTechnologies/Conditional-Gans | 467b10a4e18511208fa89a393522621814c4828c | bd077c7bf3f193cebec02c9222b8147334133bc9 | refs/heads/master | 2021-08-18T16:32:31.751111 | 2017-11-23T08:50:00 | 2017-11-23T08:50:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,412 | py | from utils import load_mnist
from utils import save_images
from utils import vis_square
from utils import sample_label
import cv2
from ops import conv2d
from ops import lrelu
from ops import de_conv
from ops import fully_connect
from ops import conv_cond_concat
from ops import batch_normal
import tensorflow as tf
import numpy as np
learning_rate = 0.0002
batch_size = 64
EPOCH = 5
display_step = 1
sample_size = 100
y_dim = 10
channel = 1
def getNext_batch(input , data_y , batch_num):
return input[(batch_num)*batch_size : (batch_num + 1)*batch_size] \
, data_y[(batch_num)*batch_size : (batch_num + 1)*batch_size]
def shuffle_data(input , data_y):
random_permutation = np.random.permutation(len(input))
return input[random_permutation], data_y[random_permutation]
def dcgan(operation , data_name , output_size , sample_path , log_dir , model_path , visua_path , sample_num = 64):
if data_name == "mnist":
print("you use the mnist dataset")
data_array , data_y = load_mnist(data_name)
sample_z = np.random.uniform(-1 , 1 , size = [sample_num , 100])
y = tf.placeholder(tf.float32, [None , y_dim])
images = tf.placeholder(tf.float32, [batch_size, output_size, output_size, channel])
z = tf.placeholder(tf.float32, [None , sample_size])
z_sum = tf.summary.histogram("z", z)
fake_images = gern_net(batch_size, z , y , output_size)
G_image = tf.summary.image("G_out", fake_images)
sample_img = sample_net(sample_num , z , y , output_size)
##the loss of gerenate network
D_pro , D_logits = dis_net(images, y , weights, biases , False)
D_pro_sum = tf.summary.histogram("D_pro", D_pro)
G_pro, G_logits = dis_net(fake_images , y , weights, biases , True)
G_pro_sum = tf.summary.histogram("G_pro", G_pro)
D_fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(G_pro), logits=G_logits))
real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(D_pro), logits=D_logits))
G_fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(G_pro),logits=G_logits))
loss = real_loss + D_fake_loss
loss_sum = tf.summary.scalar("D_loss", loss)
G_loss_sum = tf.summary.scalar("G_loss", G_fake_loss)
merged_summary_op_d = tf.summary.merge([loss_sum, D_pro_sum])
merged_summary_op_g = tf.summary.merge([G_loss_sum, G_pro_sum, G_image, z_sum])
t_vars = tf.trainable_variables()
d_var = [var for var in t_vars if 'dis' in var.name]
g_var = [var for var in t_vars if 'gen' in var.name]
saver = tf.train.Saver()
#if train
if operation == 0:
opti_D = tf.train.AdamOptimizer(learning_rate=learning_rate , beta1=0.5).minimize(loss , var_list=d_var)
opti_G = tf.train.AdamOptimizer(learning_rate=learning_rate , beta1=0.5).minimize(G_fake_loss , var_list=g_var)
init = tf.global_variables_initializer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init)
summary_writer = tf.summary.FileWriter(log_dir , graph=sess.graph)
batch_num = 0
e = 0
step = 0
while e <= EPOCH:
data_array , data_y = shuffle_data(data_array, data_y)
while batch_num < len(data_array) / batch_size:
step = step + 1
realbatch_array , real_labels = getNext_batch(data_array , data_y , batch_num)
#Get the z
batch_z = np.random.uniform(-1 , 1 , size=[batch_size , sample_size])
#batch_z = np.random.normal(0 , 0.2 , size=[batch_size , sample_size])
_, summary_str = sess.run([opti_D, merged_summary_op_d],
feed_dict={images:realbatch_array, z:batch_z , y:real_labels})
summary_writer.add_summary(summary_str , step)
_, summary_str = sess.run([opti_G, merged_summary_op_g], feed_dict={z: batch_z , y:real_labels})
summary_writer.add_summary(summary_str , step)
batch_num += 1
# average_loss += loss_value
if step % display_step == 0:
D_loss = sess.run(loss , feed_dict = {images:realbatch_array , z:batch_z , y:real_labels})
fake_loss = sess.run(G_fake_loss , feed_dict = {z: batch_z , y:real_labels})
print("EPOCH %d step %d: D: loss = %.7f G: loss=%.7f " % (e , step , D_loss , fake_loss))
if np.mod(step , 50) == 1:
print("sample!")
sample_images = sess.run(sample_img , feed_dict={z:sample_z , y:sample_label()})
save_images(sample_images , [8 , 8] , './{}/train_{:02d}_{:04d}.png'.format(sample_path , e , step))
save_path = saver.save(sess, model_path)
e = e + 1
batch_num = 0
save_path = saver.save(sess , model_path)
print "Model saved in file: %s" % save_path
#test
elif operation == 1:
print("Test")
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
saver.restore(sess , model_path)
sample_z = np.random.uniform(1 , -1 , size=[sample_num , 100])
output = sess.run(sample_img , feed_dict={z:sample_z , y:sample_label()})
save_images(output , [8 , 8] , './{}/test{:02d}_{:04d}.png'.format(sample_path , 0 , 0))
image = cv2.imread('./{}/test{:02d}_{:04d}.png'.format(sample_path , 0 , 0) , 0)
cv2.imshow( "test" , image)
cv2.waitKey(-1)
print("Test finish!")
#visualize
else:
print("Visualize")
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
saver.restore(sess, model_path)
# visualize the weights 1 or you can change weights_2 .
conv_weights = sess.run([tf.get_collection('weight_2')])
vis_square(visua_path , conv_weights[0][0].transpose(3, 0, 1, 2), type=1)
# visualize the activation 1
ac = sess.run([tf.get_collection('ac_2')], feed_dict={images: data_array[:64], z:sample_z , y:sample_label()})
vis_square(visua_path , ac[0][0].transpose(3, 1, 2, 0), type=0)
print("the visualization finish!")
else:
print("other dataset!")
#####generate network
weights2 = {
'wd': tf.Variable(tf.random_normal([sample_size + y_dim , 1024] , stddev=0.02) , name='genw1') ,
'wc1': tf.Variable(tf.random_normal([1024 + y_dim , 7*7*2*64], stddev=0.02) , name='genw2'),
'wc2': tf.Variable(tf.random_normal([5 , 5 , 128 , 138], stddev=0.02) , name='genw3'),
'wc3': tf.Variable(tf.random_normal([5 , 5 , channel , 138], stddev=0.02) , name='genw4') ,
}
biases2 = {
'bd': tf.Variable(tf.zeros([1024]) , name='genb1') ,
'bc1': tf.Variable(tf.zeros([7*7*2*64]) , name='genb2'),
'bc2': tf.Variable(tf.zeros([128]) , name='genb3'),
'bc3': tf.Variable(tf.zeros([channel]) , name='genb4'),
}
def gern_net(batch_size , z , y , output_size):
yb = tf.reshape(y, shape=[batch_size, 1, 1, y_dim])
z = tf.concat([z , y] , 1)
c1 , c2 = output_size/4 , output_size/2
#10 stand for the num of labels
d1 = fully_connect(z , weights2['wd'] , biases2['bd'])
d1 = batch_normal(d1 , scope="genbn1")
d1 = tf.nn.relu(d1)
d1 = tf.concat([d1 , y] , 1)
d2 = fully_connect(d1 , weights2['wc1'] , biases2['bc1'])
d2 = batch_normal(d2 , scope="genbn2")
d2 = tf.nn.relu(d2)
d2 = tf.reshape(d2 , [batch_size , c1 , c1 , 64*2])
d2 = conv_cond_concat(d2 , yb)
d3 = de_conv(d2 , weights2['wc2'] , biases2['bc2'] , out_shape=[batch_size , c2 , c2 , 128])
d3 = batch_normal(d3 , scope="genbn3")
d3 = tf.nn.relu(d3)
d3 = conv_cond_concat(d3 , yb)
d4 = de_conv(d3 , weights2['wc3'] , biases2['bc3'] , out_shape=[batch_size , output_size , output_size , 1])
return tf.nn.sigmoid(d4)
def sample_net(batch_size , z , y, output_size):
yb = tf.reshape(y, shape=[batch_size, 1, 1, y_dim])
z = tf.concat([z, y], 1)
c1, c2 = output_size / 4, output_size / 2
# 10 stand for the num of labels
d1 = fully_connect(z, weights2['wd'], biases2['bd'])
d1 = batch_normal(d1, scope="genbn1" , reuse=True)
d1 = tf.nn.relu(d1)
d1 = tf.concat([d1, y], 1)
d2 = fully_connect(d1, weights2['wc1'], biases2['bc1'])
d2 = batch_normal(d2, scope="genbn2" , reuse=True)
d2 = tf.nn.relu(d2)
d2 = tf.reshape(d2, [batch_size, c1, c1, 64 * 2])
d2 = conv_cond_concat(d2, yb)
d3 = de_conv(d2, weights2['wc2'], biases2['bc2'], out_shape=[batch_size, c2, c2, 128])
d3 = batch_normal(d3, scope="genbn3" , reuse=True)
d3 = tf.nn.relu(d3)
d3 = conv_cond_concat(d3, yb)
d4 = de_conv(d3, weights2['wc3'], biases2['bc3'], out_shape=[batch_size, output_size, output_size, 1])
return tf.nn.sigmoid(d4)
######### discriminent_net
weights = {
'wc1': tf.Variable(tf.random_normal([5 , 5 , 11 , 10], stddev=0.02) , name='dis_w1'),
'wc2': tf.Variable(tf.random_normal([5 , 5 , 20 , 64], stddev=0.02) , name='dis_w2'),
'wc3' : tf.Variable(tf.random_normal([64*7*7 + y_dim , 1024] , stddev=0.02) , name='dis_w3') ,
'wd' : tf.Variable(tf.random_normal([1024 + y_dim , channel] , stddev=0.02) , name='dis_w4')
}
biases = {
'bc1': tf.Variable(tf.zeros([10]) , name = 'dis_b1') ,
'bc2': tf.Variable(tf.zeros([64]) , name = 'dis_b2'),
'bc3' : tf.Variable(tf.zeros([1024]) ,name = 'dis_b3') ,
'bd' : tf.Variable(tf.zeros([channel]) ,name= 'dis_b4')
}
def dis_net(data_array , y , weights , biases , reuse=False):
# mnist data's shape is (28 , 28 , 1)
yb = tf.reshape(y , shape=[batch_size, 1 , 1 , y_dim])
# concat
data_array = conv_cond_concat(data_array , yb)
conv1 = conv2d(data_array , weights['wc1'] , biases['bc1'])
tf.add_to_collection('weight_1', weights['wc1'])
conv1 = lrelu(conv1)
conv1 = conv_cond_concat(conv1 , yb)
tf.add_to_collection('ac_1' , conv1)
conv2 = conv2d(conv1 , weights['wc2'] , biases['bc2'])
conv2 = batch_normal(conv2 , scope="dis_bn1" , reuse=reuse)
conv2 = lrelu(conv2)
tf.add_to_collection('weight_2', weights['wc2'])
tf.add_to_collection('ac_2', conv2)
conv2 = tf.reshape(conv2 , [batch_size , -1])
conv2 = tf.concat([conv2 , y] , 1)
f1 = fully_connect(conv2 , weights['wc3'] , biases['bc3'])
f1 = batch_normal(f1 , scope="dis_bn2" , reuse=reuse)
f1 = lrelu(f1)
f1 = tf.concat([f1 , y] , 1)
out = fully_connect(f1 , weights['wd'] , biases['bd'])
return tf.nn.sigmoid(out) , out
| [
"noreply@github.com"
] | noreply@github.com |
f2d83f9c85e10655fd77903e001b57c1b6743ce8 | eeb06c4643850920a4b726631f2c5a8dadc3b920 | /consumer.py | 473f636e5e505ae4ecbdb417bf0413a405b94efd | [] | no_license | AlanFil/flask_project | 483fe3b94b3a6b658207616f3096c13906a6263f | 133be7e3320d57bdd5688909850d5bdc3e03af3b | refs/heads/master | 2023-03-29T18:45:59.596160 | 2021-04-08T19:28:13 | 2021-04-08T19:28:13 | 355,698,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,234 | py | import json
import pika
from main import Product, db
params = pika.URLParameters('amqps://zobyksdf:tEXN4vKXrQskmY9Lmi39zqk5IcdHzkSE@kangaroo.rmq.cloudamqp.com/zobyksdf')
connection = pika.BlockingConnection(params)
channel = connection.channel()
channel.queue_declare(queue='main')
def callback(ch, method, propreties, body):
print('Received in main')
data = json.loads(body)
print(data)
if propreties.content_type == 'product_created':
product = Product(id=data['id'], title=data['title'], image=data['image'])
db.session.add(product)
db.session.commit()
print('Product Created')
elif propreties.content_type == 'product_updated':
product = Product.query.get(data['id'])
product.title = data['title']
product.image = data['image']
db.session.commit()
print('Product Updated')
elif propreties.content_type == 'product_deleted':
product = Product.query.get(data['id'])
db.session.delete(product)
db.session.commit()
print('Product Deleted')
channel.basic_consume(queue='main', on_message_callback=callback, auto_ack=True)
print('Started Consuming')
channel.start_consuming()
channel.close()
| [
"maildoalana@gmail.com"
] | maildoalana@gmail.com |
9990f31750b3e822f4d86e256ebb9274b1118e58 | 02f4bd96aa7aba1e5e2734cd89b1db57afac0c51 | /ui/locker.py | c792beda4a65fb6cd2b5deeedee2553274d97429 | [] | no_license | jbadonai/file-encrypter | 6a72678279f4e452732da28eb90252599f11165d | b821daccce9748d2b8b543a767744395ff38cdd0 | refs/heads/main | 2023-04-09T10:36:50.960312 | 2021-04-11T10:14:03 | 2021-04-11T10:14:03 | 356,877,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,778 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'locker.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(659, 589)
MainWindow.setMinimumSize(QtCore.QSize(659, 589))
MainWindow.setMaximumSize(QtCore.QSize(659, 589))
MainWindow.setStyleSheet("background-color: rgb(69, 73, 74);\n"
"color: rgb(255, 255, 255);")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setStyleSheet("background-color: rgb(69, 73, 74);")
self.centralwidget.setObjectName("centralwidget")
self.textInput = QtWidgets.QLineEdit(self.centralwidget)
self.textInput.setGeometry(QtCore.QRect(10, 10, 641, 61))
font = QtGui.QFont()
font.setPointSize(10)
self.textInput.setFont(font)
self.textInput.setStyleSheet("background-color: rgb(60, 63, 65);")
self.textInput.setText("")
self.textInput.setFrame(False)
self.textInput.setAlignment(QtCore.Qt.AlignCenter)
self.textInput.setObjectName("textInput")
self.labelInfo = QtWidgets.QLabel(self.centralwidget)
self.labelInfo.setGeometry(QtCore.QRect(10, 500, 641, 71))
self.labelInfo.setStyleSheet("background-color: rgb(60, 63, 65);")
self.labelInfo.setFrameShape(QtWidgets.QFrame.NoFrame)
self.labelInfo.setAlignment(QtCore.Qt.AlignCenter)
self.labelInfo.setWordWrap(True)
self.labelInfo.setObjectName("labelInfo")
self.progressBar = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar.setGeometry(QtCore.QRect(10, 487, 641, 10))
self.progressBar.setMinimumSize(QtCore.QSize(0, 10))
self.progressBar.setMaximumSize(QtCore.QSize(16777215, 10))
self.progressBar.setProperty("value", 0)
self.progressBar.setAlignment(QtCore.Qt.AlignCenter)
self.progressBar.setObjectName("progressBar")
self.buttonStart = QtWidgets.QPushButton(self.centralwidget)
self.buttonStart.setGeometry(QtCore.QRect(530, 80, 121, 41))
self.buttonStart.setStyleSheet("background-color: rgb(60, 63, 65);")
self.buttonStart.setObjectName("buttonStart")
self.listView = QtWidgets.QTreeWidget(self.centralwidget)
self.listView.setGeometry(QtCore.QRect(10, 132, 641, 351))
self.listView.setStyleSheet("background-color: rgb(60, 63, 65);")
self.listView.setFrameShape(QtWidgets.QFrame.NoFrame)
self.listView.setObjectName("listView")
self.listView.headerItem().setText(0, "1")
self.frame = QtWidgets.QFrame(self.centralwidget)
self.frame.setGeometry(QtCore.QRect(10, 80, 341, 41))
self.frame.setStyleSheet("background-color: rgb(60, 63, 65);")
self.frame.setFrameShape(QtWidgets.QFrame.Box)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.gridLayout = QtWidgets.QGridLayout(self.frame)
self.gridLayout.setObjectName("gridLayout")
self.radioEncryptDecrypt = QtWidgets.QRadioButton(self.frame)
self.radioEncryptDecrypt.setObjectName("radioEncryptDecrypt")
self.gridLayout.addWidget(self.radioEncryptDecrypt, 0, 2, 1, 1)
self.radioEncrypt = QtWidgets.QRadioButton(self.frame)
self.radioEncrypt.setObjectName("radioEncrypt")
self.gridLayout.addWidget(self.radioEncrypt, 0, 0, 1, 1)
self.radioDecrypt = QtWidgets.QRadioButton(self.frame)
self.radioDecrypt.setObjectName("radioDecrypt")
self.gridLayout.addWidget(self.radioDecrypt, 0, 1, 1, 1)
self.frame_2 = QtWidgets.QFrame(self.centralwidget)
self.frame_2.setGeometry(QtCore.QRect(360, 80, 121, 41))
self.frame_2.setStyleSheet("background-color: rgb(60, 63, 65);")
self.frame_2.setFrameShape(QtWidgets.QFrame.Box)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.gridLayout_2 = QtWidgets.QGridLayout(self.frame_2)
self.gridLayout_2.setObjectName("gridLayout_2")
self.checkBoxEncryptOnlyFiles = QtWidgets.QCheckBox(self.frame_2)
self.checkBoxEncryptOnlyFiles.setChecked(True)
self.checkBoxEncryptOnlyFiles.setObjectName("checkBoxEncryptOnlyFiles")
self.gridLayout_2.addWidget(self.checkBoxEncryptOnlyFiles, 0, 0, 1, 1)
self.checkBoxRenameOutput = QtWidgets.QCheckBox(self.centralwidget)
self.checkBoxRenameOutput.setGeometry(QtCore.QRect(20, 570, 241, 20))
self.checkBoxRenameOutput.setObjectName("checkBoxRenameOutput")
self.checkBoxSetPassword = QtWidgets.QCheckBox(self.centralwidget)
self.checkBoxSetPassword.setGeometry(QtCore.QRect(300, 570, 241, 20))
self.checkBoxSetPassword.setObjectName("checkBoxSetPassword")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.textInput.setPlaceholderText(_translate("MainWindow", "Drag and Drop Files / Folders to Lock or unlocked here...."))
self.labelInfo.setText(_translate("MainWindow", "Info"))
self.buttonStart.setText(_translate("MainWindow", "Start"))
self.radioEncryptDecrypt.setText(_translate("MainWindow", "Encrypt/Decrypt"))
self.radioEncrypt.setText(_translate("MainWindow", "Encrypt"))
self.radioDecrypt.setText(_translate("MainWindow", "Decrypt"))
self.checkBoxEncryptOnlyFiles.setText(_translate("MainWindow", "Files Only"))
self.checkBoxRenameOutput.setText(_translate("MainWindow", "Rename Output(random name)"))
self.checkBoxSetPassword.setToolTip(_translate("MainWindow", "<html><head/><body><p><span style=\" font-weight:600;\">Check to</span>: <span style=\" font-style:italic;\">Set Encryption/Decription Password for curent task.</span></p><p><span style=\" font-weight:600;\">Uncheck to</span>:<span style=\" font-style:italic;\"> Use default Password.</span></p><p><span style=\" font-weight:600; text-decoration: underline;\">Warning!</span></p><p><span style=\" font-style:italic;\">Don\'t forget the password used for encryption. Your file will not be able to be decrypted if you forget password.</span></p><p><br/></p></body></html>"))
self.checkBoxSetPassword.setText(_translate("MainWindow", "Set Encrypting/Decrypting Password"))
| [
"jba_onlinework@yahoo.com"
] | jba_onlinework@yahoo.com |
e4d38da92d86aa517c776e552be806858ea7e31e | 948d84d2e3fc04e353a11384d8570308174242f5 | /17-Numpy/numpy-indexing.py | 11653d3652d5b8b607738f0216cf7655bc401292 | [] | no_license | omerfarukcelenk/PythonMaster | a0084a800b8a41cd2ad538a7ca3687c26dc679ec | 0db8f8b0ea2e1c2d810c542068cfcf1a3615f581 | refs/heads/main | 2023-04-16T17:42:05.501904 | 2021-04-26T21:19:27 | 2021-04-26T21:19:27 | 361,896,109 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | import numpy as np
numbers = np.array([0,5,10,15,20,25,50,75])
result = numbers[5]
result = numbers[-1]
result = numbers[0:3]
result = numbers[:3]
result = numbers[3:]
result = numbers[::]
result = numbers[::-1]
numbers2 = np.array([[0,5,10],[15,20,25],[50,75,85]])
result = numbers2[0]
result = numbers2[2]
result = numbers2[0,2]
result = numbers2[2,1]
result = numbers2[:,2]
result = numbers2[:,0]
result = numbers2[:,0:2]
result = numbers2[-1,:]
result = numbers2[:2,:2]
# print(result)
arr1 = np.arange(0,10)
# arr2 = arr1 # referans
arr2 = arr1.copy()
arr2[0] = 20
print(arr1)
print(arr2)
| [
"omerfar0133@gmail.com"
] | omerfar0133@gmail.com |
19ab53e2d0a45af4d68d2fe50e6b0e1e19b6ffd2 | 546199ff1eb5d4b5dbedf98cee9f8010857f4280 | /retrain_mixsvgp.py | a20ed065833547d877b5202e17435cd1c875e2a5 | [] | no_license | karltayeb/ipsc | 854db1e08fbb29cd97e38f288484d0e8a5d18200 | 3aa76431cda460c44baeea832b4897f5253053a6 | refs/heads/master | 2020-03-22T23:00:42.821625 | 2018-09-30T22:10:49 | 2018-09-30T22:10:49 | 140,784,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,139 | py | import numpy as np
import gpflow
import gpflow.multioutput.kernels as mk
import gpflow.multioutput.features as mf
from MixtureSVGP import MixtureSVGP, generate_updates
import pickle
from utils import load_data
import sys
import os
minibatch_size = 10000
grad_iters = 50
model_path = sys.argv[1]
params, assignments, elbos = pickle.load(open('output/models/mixsvgp_K2_L100_28369515', 'rb'))
pi, psi, rho = assignments['pi'], assignments['psi'], assignments['rho']
Phi, Lambda, Gamma = assignments['Phi'], assignments['Lambda'], assignments['Gamma']
N, K = Phi.shape
G, L = Lambda.shape
n_iters = 10
normalized_data_df, x, data_dict = load_data(
'data/quantile_normalized_no_projection.txt')
n_lines, n_samples, n_genes = x.shape
y = x.transpose(0, 2, 1)
T = n_samples
y = y[:N, :G, :]
x = np.tile(np.arange(T).astype(np.float64), (N, G, 1))
# create update functions
compute_weights, update_assignments = generate_updates(N, G, K, L, T)
mask = ~np.isnan(y.reshape(-1, 1)).squeeze()
num_data = mask.sum()
num_clusters = K * L + L
minibatch_size = np.minimum(num_data, minibatch_size)
X = x.reshape(-1, 1)[mask]
Y = y.reshape(-1, 1)[mask]
weights = compute_weights(Phi, Lambda, Gamma)
_, weight_idx, = np.unique(
np.tile(np.arange(N * G).reshape(
(N, G))[:, :, None], T).reshape(-1, 1)[mask], return_inverse=True)
# create model
kernel = mk.SharedIndependentMok(gpflow.kernels.RBF(1), num_clusters)
feature = mf.SharedIndependentMof(
gpflow.features.InducingPoints(np.arange(T).astype(
np.float64).reshape(-1, 1)))
m = MixtureSVGP(X, Y, weight_idx,
kern=kernel,
num_clusters=num_clusters, num_data=num_data,
likelihood=gpflow.likelihoods.Gaussian(),
feat=feature, minibatch_size=minibatch_size)
m.feature.feat.Z.trainable = False
m.assign(params)
# optimize model parameters
opt = gpflow.train.AdamOptimizer()
for _ in range(n_iters):
opt.minimize(m, maxiter=grad_iters, feed_dict={m.weights: weights})
out_path = 'model'
elbos.append(m.compute_log_likelihood(feed_dict={m.weights: weights}))
# save model
with open(model_path, 'wb') as f:
pickle.dump([m.read_trainables(), params, elbos], f)
for _ in range(n_iters):
# update assignments and mixture weights
Phi, Lambda, Gamma = update_assignments(
m, x, y, pi, psi, rho, Phi, Lambda, Gamma)
pi = Phi.sum(axis=0) / Phi.sum()
psi = Lambda.sum(axis=0) / Lambda.sum()
params = {'pi': pi, 'psi': psi, 'rho': rho,
'Phi': Phi, 'Lambda': Lambda, 'Gamma': Gamma}
# recompute weights
weights = compute_weights(Phi, Lambda, Gamma)
# reassign model data
elbos.append(m.compute_log_likelihood(feed_dict={m.weights: weights}))
print(elbos[-1])
# optimize gp parameters
opt = gpflow.train.AdamOptimizer()
opt.minimize(m, maxiter=grad_iters, feed_dict={m.weights: weights})
elbos.append(m.compute_log_likelihood(feed_dict={m.weights: weights}))
print(elbos[-1])
# save model
with open(model_path, 'wb') as f:
pickle.dump([m.read_trainables(), params, elbos], f)
| [
"karl.tayeb@gmail.com"
] | karl.tayeb@gmail.com |
4bee933c2f4a4be5274281cbf4d3a08f94853e73 | 2893069a3532da77c76c76c342c3b33d2c096f06 | /aoc2020/solvers/implementations/day20.py | 02865916888e3b15f96e97a211599283ba92c14e | [] | no_license | maxclaey/AoC-2020 | 0b26e5d46d54f569a31c4c653d642804a95d64db | 2ace7a478634af0ba0b4b264932cb233b3e57352 | refs/heads/master | 2023-02-04T20:53:24.355399 | 2020-12-25T08:15:54 | 2020-12-25T08:15:54 | 318,214,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,160 | py | import logging
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import numpy as np
from aoc2020.solvers import PuzzleSolver, SolverFactory
logger = logging.getLogger("SolverDay20")
@dataclass
class Augmentation:
rotation: int = 0
fliplr: bool = False
flipud: bool = False
class Direction(Enum):
TOP = (0, 1)
BOTTOM = (2, 1)
LEFT = (1, 0)
RIGHT = (1, 2)
@SolverFactory.register(day=20)
class SolverDay20(PuzzleSolver):
def __init__(self, input_file: Path):
super().__init__(input_file=input_file)
self.monster = np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
[0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0]
])
@property
def demo_result_1(self) -> Optional[int]:
return 20899048083289
@property
def demo_result_2(self) -> Optional[int]:
return 273
def _read_file(self) -> Dict[int, np.ndarray]:
tiles: Dict[int, np.ndarray] = {}
conv = {"#": 1, ".": 0}
with self._input_file.open(mode="r") as f:
cur_tile_id: int = 0
cur_lines: List[List[int]] = []
for line in f:
line = line.strip()
if len(line) == 0:
if len(cur_lines) > 0:
tiles[cur_tile_id] = np.asarray(cur_lines)
cur_lines = []
elif line.startswith("Tile "):
cur_tile_id = int(line.split(" ")[1][:-1])
else:
cur_lines.append(list(map(lambda x: conv[x], line)))
if len(cur_lines) > 0:
tiles[cur_tile_id] = np.asarray(cur_lines)
return tiles
def _find_neighbours(self) -> Dict[int, np.ndarray]:
tiles = self._input_data
neighbourmap: Dict[int, np.ndarray] = {}
for tile_id, target_tile in tiles.items():
borders = self._get_borders(tile=target_tile)
# Keep track of the neighbours
neighbours = np.zeros((3, 3), dtype=np.int)
# Loop over all other tiles
for pos_id, pos_tile in tiles.items():
if pos_id == tile_id:
continue
# Get all borders of the candidate tile
lines = list(self._get_borders(pos_tile).values())
# Also consider flipped lines
alllines = lines + list(map(np.flip, lines))
for line in alllines:
for direction, border in borders.items():
if np.array_equal(line, border):
assert neighbours[direction.value] == 0
neighbours[direction.value] = pos_id
neighbourmap[tile_id] = neighbours
return neighbourmap
@staticmethod
def _get_borders(
tile: np.ndarray, augmentation: Augmentation = Augmentation()
) -> Dict[Direction, np.ndarray]:
target_tile = SolverDay20._augment_matrix(
matrix=tile, augmentation=augmentation
)
return {
Direction.TOP: target_tile[0, :],
Direction.BOTTOM: target_tile[-1, :],
Direction.LEFT: target_tile[:, 0],
Direction.RIGHT: target_tile[:, -1]
}
@staticmethod
def _augment_matrix(
matrix: np.ndarray, augmentation: Augmentation = Augmentation()
) -> np.ndarray:
target_matrix = np.copy(matrix)
target_matrix = np.rot90(target_matrix, k=augmentation.rotation)
if augmentation.fliplr:
target_matrix = np.fliplr(target_matrix)
if augmentation.flipud:
target_matrix = np.flipud(target_matrix)
return target_matrix
@staticmethod
def _find_corners(neighbourmap: Dict[int, np.ndarray]) -> List[int]:
corners = []
for tile_id, neighbours in neighbourmap.items():
num_neigh = np.sum(np.minimum(neighbours, 1))
if num_neigh == 2:
corners.append(tile_id)
return corners
@staticmethod
def _find_augmentation(
tile: np.ndarray, line: np.ndarray, direction: Direction
) -> Augmentation:
for rotation in range(4):
for fliplr in range(2):
for flipud in range(2):
augmentation = Augmentation(
rotation=rotation,
fliplr=bool(fliplr),
flipud=bool(flipud)
)
borders = SolverDay20._get_borders(
tile=tile, augmentation=augmentation
)
if np.array_equal(borders[direction], line):
return augmentation
raise ValueError(f"Failed to find augmentation")
def _reconstruct(self) -> np.ndarray:
tiles = self._input_data
# Get the size of the tile matrix
matrix_size = int(np.sqrt(len(tiles)))
if not matrix_size**2 == len(tiles):
raise ValueError(f"Matrix is not square!")
# Placeholders for reconstruction
ids = np.zeros((matrix_size, matrix_size), dtype=np.int)
augmentations: Dict[int, Augmentation] = {}
# Find the neighbours and corners
neighbourmap = self._find_neighbours()
corners = self._find_corners(neighbourmap)
# Select one of the corners as top-left and define orientation
top_left_id = corners[0]
ids[0, 0] = top_left_id
augmentations[top_left_id] = Augmentation(
rotation=0,
fliplr=neighbourmap[top_left_id][Direction.LEFT.value] > 0,
flipud=neighbourmap[top_left_id][Direction.TOP.value] > 0,
)
# Get placeholder for reconstructed image
tile_size = tiles[top_left_id].shape[0] - 2
image = np.zeros(
(matrix_size * tile_size, matrix_size * tile_size), dtype=np.int
)
# Iterate over all tiles to find there bottom and right neighbour
for r in range(matrix_size):
for c in range(matrix_size):
tile_id = ids[r, c]
augmentation = augmentations[tile_id]
image[
r*tile_size:(r+1)*tile_size, c*tile_size:(c+1)*tile_size
] = self._augment_matrix(
matrix=tiles[tile_id], augmentation=augmentation
)[1:-1, 1:-1]
# Get the oriented neighbours
neighbours = self._augment_matrix(
matrix=neighbourmap[tile_id], augmentation=augmentation
)
bottom_neighbour = neighbours[2, 1]
right_neighbour = neighbours[1, 2]
# Get the bottom and right borders of the current tile
borders = self._get_borders(
tile=tiles[tile_id], augmentation=augmentation
)
# Store neighbour information
if c+1 < matrix_size and ids[r, c+1] == 0:
ids[r, c+1] = right_neighbour
augmentations[right_neighbour] = self._find_augmentation(
tile=tiles[right_neighbour],
line=borders[Direction.RIGHT],
direction=Direction.LEFT,
)
if r+1 < matrix_size and ids[r+1, c] == 0:
ids[r+1, c] = bottom_neighbour
augmentations[bottom_neighbour] = self._find_augmentation(
tile=tiles[bottom_neighbour],
line=borders[Direction.BOTTOM],
direction=Direction.TOP,
)
return image
def _search_monsters(self, image: np.ndarray) -> Tuple[int, int]:
rows = image.shape[0] - self.monster.shape[0] + 1
cols = image.shape[1] - self.monster.shape[1] + 1
subtract = np.zeros_like(image)
monsters = 0
# Slide monster over the image
for r in range(rows):
for c in range(cols):
patch = image[
r:r+self.monster.shape[0], c:c+self.monster.shape[1]
]
# Count matching monster pixels
matched = np.sum(
np.logical_and(
patch.astype(np.bool), self.monster.astype(np.bool)
)
)
# If monster is matched, keep track which pixels are used
if matched == np.sum(self.monster):
subtract[
r:r+self.monster.shape[0], c:c+self.monster.shape[1]
] += self.monster
monsters += 1
# Subtract all monster pixels from the image
res = image - np.minimum(subtract, 1)
# Return number of monsters and non monster pixels
return monsters, int(np.sum(res))
def solve_1(self) -> int:
neighbourmap: Dict[int, np.ndarray] = self._find_neighbours()
corners = self._find_corners(neighbourmap)
if len(corners) != 4:
logger.error(f"No solution found!")
return 0
return int(np.prod(corners))
def solve_2(self) -> int:
image = self._reconstruct()
for rotation in range(4):
for fliplr in range(2):
for flipud in range(2):
img = self._augment_matrix(
matrix=image,
augmentation=Augmentation(
rotation=rotation,
fliplr=bool(fliplr),
flipud=bool(flipud)
)
)
num_monsters, remaining_pixels = self._search_monsters(img)
if num_monsters > 0:
return remaining_pixels
logger.error(f"Could not find a solution for task 2")
return 0
| [
"maxim.claeys@robovision.eu"
] | maxim.claeys@robovision.eu |
66721f5989f7f1546552f135b1514f55b1b361c8 | 25bc83cf1c829694c6a8fea271060218822bd2c0 | /credTweakAttack/pass2path_model.py | 8f6d6e2560391c9a93af0b4a14c2adb5f7a1187f | [] | no_license | Bijeeta/credtweak | 5608569b5590786e6cdb6d9e3d9dd576c03064cd | c604599bac3267eecce105d35c2b05e90605c3f5 | refs/heads/master | 2022-08-19T11:02:32.182796 | 2022-08-03T20:48:26 | 2022-08-03T20:48:26 | 187,112,821 | 19 | 10 | null | 2020-09-12T19:28:56 | 2019-05-16T23:08:32 | Jupyter Notebook | UTF-8 | Python | false | false | 28,086 | py | '''
pass2path
A variant of seq2seq Encoder-Decoder RNN model that learns pairs of
(password, transition path), where given a password and a transition path, a
new password is generated.
This model is based on JayPark's seq2seq model (Python 2): https://github.com/JayParks/tf-seq2seq
Number of parameters:
m - dimension of embeddings
n - number of hidden units
C_dict - size of charcters dictionary
P_dict - size of transitions (paths) dictionary
For a single-stacked LSTM (including bias, each LSTM has 4 gates/layers):
# params = 4*(n*m + n^2 + n)
From input to embedings:
# params = C_dict * m
Softmax:
# params = n * P_dict
In our case:
1. Inputs -> Embeddings: 100 * 200 = 20000 (Encoder)
2. Embeddings -> Layer 1: 4*(200*128 + 128^2 + 128) = 168448 (Encoder)
3. Layer 1 -> Layer 2: 4*(128*128 + 128^2 +128) = 131584 (Encoder)
4. Layer 2 -> Layer 3: 131584 (Encoder)
5. Layer 3 -> Embeddings: 128 * 200 = 25600 (Decoder)
6. Embeddings -> Layer 1: 4*(200*128 + 128^2 + 128) = 168448 (Decoder)
7. Layer 1 -> Layer 2: 4*(128*128 + 128^2 +128) = 131584 (Decoder)
8. Layer 2 -> Layer 3: 131584 (Decoder)
9. Layer 3 -> Softmax: 128 * 12017 = 1538176 (Decoder)
Total # params: 2447008 (2.447M)
'''
# imports
import numpy as np
import math
import tensorflow as tf
import tensorflow.contrib.seq2seq as seq2seq
from tensorflow.python.ops.rnn_cell import GRUCell
from tensorflow.python.ops.rnn_cell import LSTMCell
from tensorflow.python.ops.rnn_cell import MultiRNNCell
from tensorflow.python.ops.rnn_cell import DropoutWrapper, ResidualWrapper, DeviceWrapper
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.layers.core import Dense
from tensorflow.python.util import nest
#from tensorflow.contrib.seq2seq.python.ops import attention_wrapper
from tensorflow.contrib.seq2seq.python.ops import beam_search_decoder
from tensorflow.contrib.seq2seq.python.ops import beam_search_ops
# Globals
_GO = '_GO'
EOS = '_EOS' # also function as PAD
UNK = '_UNK'
EXTRA_TOKENS = [_GO, EOS, UNK]
START_TOKEN = EXTRA_TOKENS.index(_GO) # start_token = 0
END_TOKEN = EXTRA_TOKENS.index(EOS) # end_token = 1
UNK_TOKEN = EXTRA_TOKENS.index(UNK)
class Pass2PathModel():
def __init__(self, config, mode):
'''
mode: train or decode
config: dictionary consisting of network's parameters
config uses tf's flags
'''
assert mode.lower() in ['train', 'decode']
self.config = config
self.mode = mode.lower()
self.cell_type = config['cell_type']
self.hidden_units = config['hidden_units']
self.depth = config['depth']
#self.attention_type = config['attention_type']
self.embedding_size = config['embedding_size']
#self.bidirectional = config.bidirectional
self.num_encoder_symbols = config['num_encoder_symbols'] # Embedding size
self.num_decoder_symbols = config['num_decoder_symbols'] # Embedding size
self.use_residual = config['use_residual']
#self.attn_input_feeding = config['attn_input_feeding']
self.use_dropout = config['use_dropout']
self.keep_prob = 1.0 - config['dropout_rate']
self.optimizer = config['optimizer']
self.learning_rate = config['learning_rate']
self.max_gradient_norm = config['max_gradient_norm']
self.global_step = tf.Variable(0, trainable=False, name='global_step')
self.global_epoch_step = tf.Variable(0, trainable=False, name='global_epoch_step')
self.global_epoch_step_op = \
tf.assign(self.global_epoch_step, self.global_epoch_step + 1)
self.dtype = tf.float16 if config['use_fp16'] else tf.float32 # for faster learning
self.keep_prob_placeholder = tf.placeholder(self.dtype, shape=[], name='keep_prob')
# BeamSearch only needed fo decoding
self.use_beamsearch_decode = False
if (self.mode == 'decode'):
self.beam_width = config['beam_width']
self.use_beamsearch_decode = True if self.beam_width > 1 else False
self.max_decode_step = config['max_decode_step']
self.build_model()
def build_model(self):
print("building model..")
# Building encoder and decoder networks
self.init_placeholders()
self.build_encoder()
self.build_decoder()
# Merge all the training summaries
self.summary_op = tf.summary.merge_all()
def init_placeholders(self):
# encoder_inputs: [batch_size, max_time_steps]
self.encoder_inputs = tf.placeholder(dtype=tf.int32,
shape=(None, None), name='encoder_inputs')
# encoder_inputs_length: [batch_size]
self.encoder_inputs_length = tf.placeholder(dtype=tf.int32, shape=(None,), name='encoder_inputs_length')
# get dynamic batch_size
self.batch_size = tf.shape(self.encoder_inputs)[0]
if (self.mode == 'train'):
# decoder_inputs: [batch_size, max_time_steps]
self.decoder_inputs = tf.placeholder(dtype=tf.int32, shape=(None, None), name='decoder_inputs')
# decoder_inputs_length: [batch_size]
self.decoder_inputs_length = tf.placeholder(dtype=tf.int32, shape=(None,), name='decoder_inputs_length')
decoder_start_token = tf.ones(shape=[self.batch_size, 1], dtype=tf.int32) * START_TOKEN
decoder_end_token = tf.ones(shape=[self.batch_size, 1], dtype=tf.int32) * END_TOKEN
# decoder_inputs_train: [batch_size , max_time_steps + 1]
# insert _GO symbol in front of each decoder input
self.decoder_inputs_train = tf.concat([decoder_start_token,
self.decoder_inputs], axis=1)
# decoder_inputs_length_train: [batch_size]
self.decoder_inputs_length_train = self.decoder_inputs_length + 1
# decoder_targets_train: [batch_size, max_time_steps + 1]
# insert EOS symbol at the end of each decoder input
self.decoder_targets_train = tf.concat([self.decoder_inputs,
decoder_end_token], axis=1)
def build_encoder(self):
print("building encoder..")
with tf.variable_scope('encoder'):
# Building encoder_cell
self.encoder_cell = self.build_encoder_cell()
# Initialize encoder_embeddings to have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = tf.random_uniform_initializer(-sqrt3, sqrt3, dtype=self.dtype)
self.encoder_embeddings = tf.get_variable(name='embedding',
shape=[self.num_encoder_symbols, self.embedding_size],
initializer=initializer, dtype=self.dtype)
# Embedded_inputs: [batch_size, time_step, embedding_size]
self.encoder_inputs_embedded = tf.nn.embedding_lookup(params=self.encoder_embeddings, ids=self.encoder_inputs)
# Input projection layer to feed embedded inputs to the cell
# ** Essential when use_residual=True to match input/output dims
input_layer = Dense(self.hidden_units, dtype=self.dtype, name='input_projection')
# Embedded inputs having gone through input projection layer
self.encoder_inputs_embedded = input_layer(self.encoder_inputs_embedded)
# Encode input sequences into context vectors:
# encoder_outputs: [batch_size, max_time_step, cell_output_size]
# encoder_state: [batch_size, cell_output_size]
self.encoder_outputs, self.encoder_last_state = tf.nn.dynamic_rnn(cell=self.encoder_cell, inputs=self.encoder_inputs_embedded,
sequence_length=self.encoder_inputs_length, dtype=self.dtype,
time_major=False)
def build_decoder(self):
print("building decoder...")
with tf.variable_scope('decoder'):
# Building decoder_cell and decoder_initial_state
self.decoder_cell, self.decoder_initial_state = self.build_decoder_cell()
# Initialize decoder embeddings to have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = tf.random_uniform_initializer(-sqrt3, sqrt3, dtype=self.dtype)
self.decoder_embeddings = tf.get_variable(name='embedding',
shape=[self.num_decoder_symbols, self.embedding_size],
initializer=initializer, dtype=self.dtype)
# Input projection layer to feed embedded inputs to the cell
# ** Essential when use_residual=True to match input/output dims
input_layer = Dense(self.hidden_units, dtype=self.dtype, name='input_projection')
# Output projection layer to convert cell_outputs to logits
output_layer = Dense(self.num_decoder_symbols, name='output_projection')
if self.mode == 'train':
# decoder_inputs_embedded: [batch_size, max_time_step + 1,
# embedding_size]
self.decoder_inputs_embedded = tf.nn.embedding_lookup(params=self.decoder_embeddings, ids=self.decoder_inputs_train)
# Embedded inputs having gone through input projection layer
self.decoder_inputs_embedded = input_layer(self.decoder_inputs_embedded)
# Helper to feed inputs for training: read inputs from dense
# ground truth vectors
training_helper = seq2seq.TrainingHelper(inputs=self.decoder_inputs_embedded,
sequence_length=self.decoder_inputs_length_train,
time_major=False,
name='training_helper')
training_decoder = seq2seq.BasicDecoder(cell=self.decoder_cell,
helper=training_helper,
initial_state=self.decoder_initial_state,
output_layer=output_layer)
#output_layer=None)
# Maximum decoder time_steps in current batch
max_decoder_length = tf.reduce_max(self.decoder_inputs_length_train)
(self.decoder_outputs_train, self.decoder_last_state_train,
self.decoder_outputs_length_train) = (seq2seq.dynamic_decode(decoder=training_decoder,
output_time_major=False,
impute_finished=True,
maximum_iterations=max_decoder_length))
# More efficient to do the projection on the
# batch-time-concatenated tensor
# logits_train: [batch_size, max_time_step + 1,
# num_decoder_symbols]
# self.decoder_logits_train =
# output_layer(self.decoder_outputs_train.rnn_output)
self.decoder_logits_train = tf.identity(self.decoder_outputs_train.rnn_output)
# Use argmax to extract decoder symbols to emit
self.decoder_pred_train = tf.argmax(self.decoder_logits_train, axis=-1,
name='decoder_pred_train')
# masks: masking for valid and padded time steps, [batch_size,
# max_time_step + 1]
masks = tf.sequence_mask(lengths=self.decoder_inputs_length_train,
maxlen=max_decoder_length, dtype=self.dtype, name='masks')
# Computes per word average cross-entropy over a batch
# Internally calls
# 'nn_ops.sparse_softmax_cross_entropy_with_logits' by default
self.loss = seq2seq.sequence_loss(logits=self.decoder_logits_train,
targets=self.decoder_targets_train,
weights=masks,
average_across_timesteps=True,
average_across_batch=True,)
# Training summary for the current batch_loss
tf.summary.scalar('loss', self.loss)
# Contruct graphs for minimizing loss
self.init_optimizer()
elif (self.mode == 'decode'):
# Start_tokens: [batch_size,] `int32` vector
start_tokens = tf.ones([self.batch_size,], tf.int32) * START_TOKEN
end_token = END_TOKEN
def embed_and_input_proj(inputs):
return input_layer(tf.nn.embedding_lookup(self.decoder_embeddings, inputs))
if not self.use_beamsearch_decode:
# Helper to feed inputs for greedy decoding: uses the
# argmax of the output
decoding_helper = seq2seq.GreedyEmbeddingHelper(start_tokens=start_tokens,
end_token=end_token,
embedding=embed_and_input_proj)
# Basic decoder performs greedy decoding at each time step
print("building greedy decoder..")
inference_decoder = seq2seq.BasicDecoder(cell=self.decoder_cell,
helper=decoding_helper,
initial_state=self.decoder_initial_state,
output_layer=output_layer)
else:
# Beamsearch is used to approximately find the most likely
# translation
print("building beamsearch decoder..")
inference_decoder = beam_search_decoder.BeamSearchDecoder(cell=self.decoder_cell,
embedding=embed_and_input_proj,
start_tokens=start_tokens,
end_token=end_token,
initial_state=self.decoder_initial_state,
beam_width=self.beam_width,
output_layer=output_layer,)
(self.decoder_outputs_decode, self.decoder_last_state_decode,
self.decoder_outputs_length_decode) = (seq2seq.dynamic_decode(decoder=inference_decoder,
output_time_major=False,
#impute_finished=True, # error occurs
maximum_iterations=self.max_decode_step))
if not self.use_beamsearch_decode:
# decoder_outputs_decode.sample_id: [batch_size,
# max_time_step]
# Or use argmax to find decoder symbols to emit:
# self.decoder_pred_decode =
# tf.argmax(self.decoder_outputs_decode.rnn_output,
# axis=-1,
# name='decoder_pred_decode')
# Here, we use expand_dims to be compatible with the result
# of the beamsearch decoder
# decoder_pred_decode: [batch_size, max_time_step, 1]
# (output_major=False)
self.decoder_pred_decode = tf.expand_dims(self.decoder_outputs_decode.sample_id, -1)
else:
# Use beam search to approximately find the most likely
# translation
# decoder_pred_decode: [batch_size, max_time_step,
# beam_width] (output_major=False)
self.decoder_pred_decode = self.decoder_outputs_decode.predicted_ids
self.decoder_pred_scores = self.decoder_outputs_decode.beam_search_decoder_output.scores
def build_single_cell(self):
cell_type = LSTMCell
if (self.cell_type.lower() == 'gru'):
cell_type = GRUCell
cell = cell_type(self.hidden_units)
if self.use_dropout:
cell = DropoutWrapper(cell, dtype=self.dtype,
output_keep_prob=self.keep_prob_placeholder,)
if self.use_residual:
cell = ResidualWrapper(cell)
return cell
# Building encoder cell
def build_encoder_cell(self):
# ADD GPU SUPPORT
return MultiRNNCell([self.build_single_cell() for i in range(self.depth)])
# Building decoder cell and attention. Also returns decoder_initial_state
def build_decoder_cell(self):
encoder_outputs = self.encoder_outputs
encoder_last_state = self.encoder_last_state
encoder_inputs_length = self.encoder_inputs_length
# To use BeamSearchDecoder, encoder_outputs, encoder_last_state,
# encoder_inputs_length
# needs to be tiled so that: [batch_size, .., ..] -> [batch_size x
# beam_width, .., ..]
if self.use_beamsearch_decode:
print("use beamsearch decoding..")
encoder_outputs = seq2seq.tile_batch(self.encoder_outputs, multiplier=self.beam_width)
encoder_last_state = nest.map_structure(lambda s: seq2seq.tile_batch(s, self.beam_width), self.encoder_last_state)
encoder_inputs_length = seq2seq.tile_batch(self.encoder_inputs_length, multiplier=self.beam_width)
# Building decoder_cell
self.decoder_cell_list = [
self.build_single_cell() for i in range(self.depth)]
# ADD GPU SUPPORT FOR DISTRIBUION
decoder_initial_state = encoder_last_state
# Also if beamsearch decoding is used, the batch_size argument in
# .zero_state
# should be ${decoder_beam_width} times to the origianl batch_size
batch_size = self.batch_size if not self.use_beamsearch_decode \
else self.batch_size * self.beam_width
initial_state = [state for state in encoder_last_state]
initial_state[-1] = self.decoder_cell_list[-1].zero_state(batch_size=batch_size, dtype=self.dtype)
decoder_initial_state = tuple(initial_state)
return MultiRNNCell(self.decoder_cell_list), decoder_initial_state
def init_optimizer(self):
print("setting optimizer..")
# Gradients and SGD update operation for training the model
trainable_params = tf.trainable_variables()
if self.optimizer.lower() == 'adadelta':
self.opt = tf.train.AdadeltaOptimizer(learning_rate=self.learning_rate)
elif self.optimizer.lower() == 'adam':
self.opt = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
elif self.optimizer.lower() == 'rmsprop':
self.opt = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate)
else:
self.opt = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
# Compute gradients of loss w.r.t. all trainable variables
#gradients = tf.gradients(self.loss, trainable_params) # OLD
gradients = self.opt.compute_gradients(self.loss)
# Clip gradients by a given maximum_gradient_norm
#clip_gradients, _ = tf.clip_by_global_norm(gradients, self.max_gradient_norm) # OLD
clip_gradients = [(tf.clip_by_value(grad, -self.max_gradient_norm, self.max_gradient_norm), var) for grad, var in gradients if grad is not None]
# Update the model
self.updates = self.opt.apply_gradients(clip_gradients, global_step=self.global_step)
def save(self, sess, path, var_list=None, global_step=None):
# var_list = None returns the list of all saveable variables
saver = tf.train.Saver(var_list)
save_path = saver.save(sess, save_path=path, global_step=global_step)
print('model saved at %s' % save_path)
def restore(self, sess, path, var_list=None):
# var_list = None returns the list of all saveable variables
saver = tf.train.Saver(var_list)
saver.restore(sess, save_path=path)
print('model restored from %s' % path)
def train(self, sess, encoder_inputs, encoder_inputs_length,
decoder_inputs, decoder_inputs_length):
"""Run a train step of the model feeding the given inputs.
Args:
session: tensorflow session to use.
encoder_inputs: a numpy int matrix of [batch_size, max_source_time_steps]
to feed as encoder inputs
encoder_inputs_length: a numpy int vector of [batch_size]
to feed as sequence lengths for each element in the given batch
decoder_inputs: a numpy int matrix of [batch_size, max_target_time_steps]
to feed as decoder inputs
decoder_inputs_length: a numpy int vector of [batch_size]
to feed as sequence lengths for each element in the given batch
Returns:
A triple consisting of gradient norm (or None if we did not do backward),
average accuracy, and the outputs.
"""
# Check if the model is 'training' mode
if self.mode.lower() != 'train':
raise ValueError("train step can only be operated in train mode")
input_feed = self.check_feeds(encoder_inputs, encoder_inputs_length,
decoder_inputs, decoder_inputs_length, False)
# Input feeds for dropout
input_feed[self.keep_prob_placeholder.name] = self.keep_prob
output_feed = [self.updates, # Update Op that does optimization
self.loss, # Loss for current batch
self.summary_op] # Training summary
outputs = sess.run(output_feed, input_feed)
return outputs[1], outputs[2] # loss, summary
def eval(self, sess, encoder_inputs, encoder_inputs_length,
decoder_inputs, decoder_inputs_length):
"""Run a evaluation step of the model feeding the given inputs.
Args:
session: tensorflow session to use.
encoder_inputs: a numpy int matrix of [batch_size, max_source_time_steps]
to feed as encoder inputs
encoder_inputs_length: a numpy int vector of [batch_size]
to feed as sequence lengths for each element in the given batch
decoder_inputs: a numpy int matrix of [batch_size, max_target_time_steps]
to feed as decoder inputs
decoder_inputs_length: a numpy int vector of [batch_size]
to feed as sequence lengths for each element in the given batch
Returns:
A triple consisting of gradient norm (or None if we did not do backward),
average accuracy, and the outputs.
"""
input_feed = self.check_feeds(encoder_inputs, encoder_inputs_length,
decoder_inputs, decoder_inputs_length, False)
# Input feeds for dropout
input_feed[self.keep_prob_placeholder.name] = 1.0
output_feed = [self.loss, # Loss for current batch
self.summary_op] # Training diary
outputs = sess.run(output_feed, input_feed)
return outputs[0], outputs[1] # loss
def predict(self, sess, encoder_inputs, encoder_inputs_length):
# To reproduce results we have to keep a constant random seed:
# tf.set_random_seed(1729)
# np.random.seed(1729)
input_feed = self.check_feeds(encoder_inputs, encoder_inputs_length,
decoder_inputs=None, decoder_inputs_length=None,
decode=True)
# Input feeds for dropout
input_feed[self.keep_prob_placeholder.name] = 1.0
output_feed = [self.decoder_pred_decode]
outputs = sess.run(output_feed, input_feed)
# GreedyDecoder: [batch_size, max_time_step]
# BeamSearchDecoder: [batch_size, max_time_step, beam_width]
return outputs[0]
def predict_scores(self, sess, encoder_inputs, encoder_inputs_length):
# To reproduce results we have to keep a constant random seed:
# tf.set_random_seed(1729)
# np.random.seed(1729)
input_feed = self.check_feeds(encoder_inputs, encoder_inputs_length,
decoder_inputs=None, decoder_inputs_length=None,
decode=True)
# Input feeds for dropout
input_feed[self.keep_prob_placeholder.name] = 1.0
output_feed = [self.decoder_pred_decode, self.decoder_pred_scores]
outputs = sess.run(output_feed, input_feed)
#scores = np.exp(np.sum(outputs[1], axis=1))
scores = np.sum(outputs[1], axis=1)
#print(outputs[1].shape)
#print(scores.shape)
#print(scores)
#sanity_check = np.sum(scores, axis=1)
#print(sanity_check.shape)
#print(sanity_check)
# GreedyDecoder: [batch_size, max_time_step]
# BeamSearchDecoder: [batch_size, max_time_step, beam_width]
return outputs[0], scores
def check_feeds(self, encoder_inputs, encoder_inputs_length,
decoder_inputs, decoder_inputs_length, decode):
"""
Args:
encoder_inputs: a numpy int matrix of [batch_size, max_source_time_steps]
to feed as encoder inputs
encoder_inputs_length: a numpy int vector of [batch_size]
to feed as sequence lengths for each element in the given batch
decoder_inputs: a numpy int matrix of [batch_size, max_target_time_steps]
to feed as decoder inputs
decoder_inputs_length: a numpy int vector of [batch_size]
to feed as sequence lengths for each element in the given batch
decode: a scalar boolean that indicates decode mode
Returns:
A feed for the model that consists of encoder_inputs, encoder_inputs_length,
decoder_inputs, decoder_inputs_length
"""
#print(encoder_inputs)
input_batch_size = encoder_inputs.shape[0]
if input_batch_size != encoder_inputs_length.shape[0]:
raise ValueError("Encoder inputs and their lengths must be equal in their "
"batch_size, %d != %d" % (input_batch_size, encoder_inputs_length.shape[0]))
if not decode:
target_batch_size = decoder_inputs.shape[0]
if target_batch_size != input_batch_size:
raise ValueError("Encoder inputs and Decoder inputs must be equal in their "
"batch_size, %d != %d" % (input_batch_size, target_batch_size))
if target_batch_size != decoder_inputs_length.shape[0]:
raise ValueError("Decoder targets and their lengths must be equal in their "
"batch_size, %d != %d" % (target_batch_size, decoder_inputs_length.shape[0]))
input_feed = {}
input_feed[self.encoder_inputs.name] = encoder_inputs
input_feed[self.encoder_inputs_length.name] = encoder_inputs_length
if not decode:
input_feed[self.decoder_inputs.name] = decoder_inputs
input_feed[self.decoder_inputs_length.name] = decoder_inputs_length
return input_feed
| [
"bijeeta@deepsec.tech.cornell.edu"
] | bijeeta@deepsec.tech.cornell.edu |
0c44f570aac05f6528e1e1c49e0360d1afcfe04b | 2e153b94076937b230a152ad912f0c4b6810413e | /server/pylanchatd | 097853c5b753c3369ffa5c41a5045cb19db9ce89 | [] | no_license | saurav-malani/Chat-Application | 0784efbfff60f10d4874221f7bd2956b693101d1 | 19ab48215d82d0298f1aab2d1f204dfaffc4681f | refs/heads/master | 2020-03-30T09:49:38.908795 | 2018-10-01T13:36:38 | 2018-10-01T13:36:38 | 151,094,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,927 | #!/usr/bin/env python
import time
# Twisted
from twisted.internet.protocol import Protocol, Factory
from twisted.web import server, resource
from twisted.internet import reactor
# Basic Datastructures and Configuration Parser
from User import User
from Users import Users
from ConfigurationParser import Parser
# Encryption
from Crypto.PublicKey import RSA
from os import urandom
APP_NAME = "pylanchatd"
APP_VERSION = "1.2"
#LOG Variables
LOG_INFO = 1
LOG_SEND = 2
LOG_RECV = 3
LOG_ERR = 4
LOG_CONN = 5
LOG_SERVER = 6
#BROADCAST Message variables
BROADCAST_EVERYBODY = 1
BROADCAST_CHAT = 2
BROADCAST_GAME = 3
BROADCAST_PM = 4
class RPG(Protocol):
init = True
inGAME = False
inCHAT = False
encrypted_messages = False
encrypted_responses = False
protocol = "1.2"
channel = ""
name = ""
def connectionMade(self):
ID = self.factory.users.addUser()
onlineUsers = self.factory.users.numUser
registeredUsers = self.factory.users.regUsers()
IP = self.transport.getPeer()
IP = IP.host
addText("Established connection UID %d (%s)\n" % (ID, IP), LOG_CONN)
print IP, self.factory.blockedip
if IP in self.factory.blockedip:
self.transport.loseConnection()
addText( "Dropped connection with UID " +str(ID) + ". IP %s is blocked." % IP, LOG_ERR)
return
welcome = self.factory.welcome
msg = "UID " + str(ID) + " " + str(registeredUsers) + " " + str(onlineUsers) + " " + IP
publickey = "PROTOCOL " + self.protocol + "\r\nPUBLIC RSAKEY e " + str(self.factory.publickey['e']) + "\r\nPUBLIC RSAKEY n "+ \
str(self.factory.publickey['n']) + "\r\nCHALLENGE " + self.factory.challenge
firstmsg = welcome + "\r\n\r\n" + publickey + "\r\n" + msg +"\r\n"
self.transport.write(firstmsg)
addText(firstmsg, LOG_SEND)
self.factory.clients.append(self)
self.ID = ID
def dataReceived(self, data):
if data == "":
return
data = data.split("\r\n\r\n")
if len(data) != 1:
for d in data:
self.dataReceived(d)
return
else:
data = data[0]
if self.init:
if data[0:9] == "CHALLENGE":
receivedkey = data[10:]
if self.factory.challenge_answer == receivedkey:
addText("UID %d is using RSA encryption to send messages" % self.ID, LOG_INFO)
self.message("PUBLIC KEY RSA CHALLENGE PASSED\r")
self.encrypted_messages = True
else:
addText("UID %d is trying to use RSA encryption, but failed to answer the challenge correctly" % self.ID, LOG_ERR)
self.transport.write("PUBLIC KEY RSA CHALLENGE FAILED")
elif data[0:4] != "USER":
addText( "UID" + str(self.ID) + " : " + data, LOG_RECV)
self.transport.loseConnection()
addText( "Dropping user UID " +str( self.ID) + " unknown protocol", LOG_ERR)
else:
addText( "UID" + str(self.ID) + " : " + data, LOG_RECV)
data = data.split("\r\n")
data = data[0].split(" ")
try:
name = data[1]
if name == "":
self.transport.write("USER Fault"+ "\r\n")
addText( "UID" + str(self.ID) + " USER Fault", LOG_ERR)
else:
if self.factory.users.addName(self.ID, name):
self.transport.write("USER OK " + name + "'\r\n")
self.name = name
addText( "UID" + str(self.ID) + " changed his/her name to " + name, LOG_SERVER)
self.init= False
else:
addText( "UID" + str(self.ID) + " USER AlreadyExists " + name, LOG_ERR)
self.transport.write("USER AlreadyExists " + name + "\r\n")
self.transport.loseConnection()
except:
self.transport.write("USER Fault"+ "\r\n")
addText( "UID" + str(self.ID) + " USER Fault")
else:
if not(self.encrypted_messages):
addText( str(self.name) + " : " + data, LOG_RECV)
self.checkCommands(data)
def connectionLost(self, reason):
self.leave_channel()
self.factory.users.remUser(self.ID)
ID = self.ID
addText("Lost connection with %s because of %s" %(self.name, str(reason)), LOG_CONN)
self.broadcast("EXIT UID " + str(ID))
self.factory.clients.remove(self)
def checkCommands(self, data, unencrypted = False):
dataLineSplit = data.split("\r\n")
dataLine = dataLineSplit[0]
dataSpaceSplit = dataLine.split(" ")
command = dataSpaceSplit[0].upper()
if command == "QUIT":
self.transport.loseConnection()
elif command == "USERS":
ID =self.ID
onlineUsers = self.factory.users.numUser
registeredUsers = self.factory.users.regUsers()
IP = self.transport.getPeer()
IP = IP.host
msg = "USERS " + self.name + "UID " + str(ID) + " " + str(registeredUsers) + " " + str(onlineUsers) + " " + IP + "\r\n"
self.transport.write(msg)
addText( self.name + " " +msg, LOG_SEND)
elif command == "USERLIST":
msg = ""
for c in self.factory.clients:
if c.inCHAT and c.channel == self.channel:
msg += "USERLIST\t" + str( c.ID) +"\t" + c.name + "\t[Online]\t\r\n"
self.transport.write(msg)
addText( "%s requested user list\r\n" % self.name, LOG_SEND)
# New in version 1.2
elif command == "CHANNELLIST":
msg = ""
for channel in self.factory.channels:
msg += "CHANNEL %s %d\r\n" % (channel[0], channel[1])
self.send_command(msg)
addText( "%s requested channel list\r\n" % self.name, LOG_SEND)
elif command == "SERV":
msg = "SERV\r\nChat: main chat room and private messaging\r\nGame: retreive game maps"
self.transport.write(msg + "\r\n\r\n")
addText( self.name + " " +msg, LOG_SEND)
elif command == "JOIN":
try:
serv = dataSpaceSplit[1].upper()
if serv == "GAME":
self.inGAME = True
self.transport.write("JOIN GAME OK 20 20")
self.broadcast("JOIN GAME UID " + str(self.ID) + " " +self.name, BROADCAST_GAME)
addText( self.name + " joined the game", LOG_SERVER)
elif serv == "CHAT":
self.inCHAT = True
self.enter_channel(self.factory.default_channel)
self.broadcast("JOIN CHAT UID " + str(self.ID) + " " + self.name + " " + self.channel + "\r\n\r", BROADCAST_CHAT)
addText( self.name + " joined the chat", LOG_SERVER)
elif serv == "CHANNEL":
self.inCHAT = True
self.broadcast("EXIT UID %d\r\n\r" % self.ID, BROADCAST_CHAT)
self.leave_channel()
self.enter_channel(dataSpaceSplit[2])
self.broadcast("JOIN CHAT UID " + str(self.ID) + " " + self.name + " " + self.channel + "\r\n\r", BROADCAST_CHAT)
addText("%s joined channel %s" % (self.name, self.channel))
except:
msg = "JOIN Fault ServiceNotKnown '%s'" % serv
self.send_command(msg)
addText( self.name + " " + msg, LOG_ERR)
elif command == "PUBLIC":
keys = {}
for d in dataLineSplit:
words = d.split(" ")
if d[:15] == "PUBLIC RSAKEY e":
keys['e'] = long(words[3])
elif d[:15] == "PUBLIC RSAKEY n":
keys['n'] = long(words[3])
elif d[:11] == "MYCHALLENGE":
try:
challenge = words[1]
self.client_key = RSA.construct((keys['n'], keys['e']))
self.send_command("MYCHALLENGE " + self.client_key.encrypt(challenge, "")[0])
addText("Answering %s's RSA challenge" % self.name, LOG_INFO)
except:
addText("Couldn't encrypt %s's RSA challenge" % self.name )
elif command == "MYCHALLENGE":
if dataSpaceSplit[1] == "PASSED":
addText("Passed RSA challenge. %s is using RSA to receive messages." % self.name)
self.encrypted_responses = True
elif dataSpaceSplit[1] == "FAILED":
addText("Failed RSA challenge. %s is not using RSA to receive messages." % self.name, LOG_ERR)
else:
pass
elif command == "MSG":
try:
serv =dataSpaceSplit[1].upper()
msg = dataSpaceSplit[2]
if serv == "GAME":
if self.inGAME:
self.broadcast("MSG GAME UID " + str(self.ID) + " " + msg + "\r\n", BROADCAST_GAME)
addText( self.name + " is sending a GAME message: " + msg, LOG_INFO)
else:
self.transport.write("MSG Fault NotInGame\r\n")
addText( self.name + " tried to send a GAME message but wasn't in a GAME", LOG_ERR)
elif serv == "CHAT":
if self.inCHAT:
self.broadcast_command("MSG CHAT UID " + str(self.ID) + " " + msg)
addText( self.name + " is sending a CHAT message: " + msg, LOG_INFO)
else:
self.transport.write("MSG Fault NotInChat\r\n")
addText( self.name + " tried to send a CHAT message but wasn't in a CHAT", LOG_ERR)
elif serv == "PM":
addText( "PM message..", LOG_INFO)
if self.inCHAT:
to = dataSpaceSplit[2]
msg = dataSpaceSplit[3]
self.message("MSG PM " + self.name + " " + to + " " + msg)
self.broadcast("MSG PM " + self.name + " "+ to + " " + msg, BROADCAST_PM, to)
except:
self.transport.write("MSG Fault ServiceNotKnown\r\n")
addText( self.name + " tried to send a message to unknown service (" + serv + ")", LOG_ERR)
else:
if self.encrypted_messages and not(unencrypted):
addText( "Decrypting data received from %s" % self.name)
realdata = self.get_decrypted_msg(data)
if self.checkCommands(realdata, True) != False:
return True
msg = "FAULT Unknown command " + command
self.transport.write(msg + "\r\n")
addText( self.name + " " +msg, LOG_SEND)
return False
def enter_channel(self, channel):
for x in self.factory.channels:
if x[0] == channel:
x[1] += 1
self.channel = channel
def leave_channel(self):
for x in self.factory.channels:
if x[0] == self.channel:
x[1] -= 1
self.channel = ""
def get_decrypted_msg(self, data):
data = data.split("\r\n")
res = ""
for d in data:
res += self.factory.RSAkey.decrypt(d)
return res
def get_encrypted_msg(self, msg):
if self.encrypted_responses:
if len(msg) < self.client_key.size() / 8:
return self.client_key.encrypt(msg, "")[0]
else:
return self.client_key.encrypt(msg[:self.client_key.size() / 8], "")[0] + "\r\n" + \
self.get_encrypted_msg(msg[self.client_key.size() / 8:])
else:
return msg
def broadcast(self, msg, broadcast = BROADCAST_EVERYBODY, to = ""):
if broadcast == BROADCAST_EVERYBODY:
for c in self.factory.clients:
c.message(msg)
elif broadcast == BROADCAST_GAME:
for c in self.factory.clients:
if c.inGAME:
c.message(msg)
elif broadcast == BROADCAST_CHAT:
for c in self.factory.clients:
if c.inCHAT and c.channel == self.channel:
c.message(msg)
elif broadcast == BROADCAST_PM:
for c in self.factory.clients:
if c.name == to and c.inCHAT:
c.message(msg)
addText( msg, LOG_SERVER)
def broadcast_command(self, cmd):
for c in self.factory.clients:
if c.inCHAT and c.channel == self.channel:
c.send_command(cmd)
def message(self, msg):
addText(msg, LOG_SEND)
self.transport.write(msg + "\n")
def send_command(self, msg):
addText(msg, LOG_SEND)
self.transport.write(self.get_encrypted_msg(msg) + "\r\n\r\n")
class HTTPFrontEnd(resource.Resource):
isLeaf = True
template = "<html><head><title>pylanchatd 1.2 web-stats</title></head><body>%s<hr/><i>Copyright 2008, Bart Spaans</i></body></html>"
def __init__(self, users, channels):
self.users = users
self.channels = channels
def render_GET(self, request):
res = "<h1>pylanchatd 1.2 web-stats</h1><hr/>"
res += "<h2>Online users</h2>"
res += "<ul>" + "".join(["<li>%d - %s</li>" % (user.ID, user.name)\
for user in self.users.users]) + "</ul>";
res += "<h2>Channels</h2>"
res += "<ul>" + "".join(["<li>%s [%d]</li>" % (chan[0], chan[1])\
for chan in self.channels]) + "</ul>";
return self.template % res
def addText(text, log = LOG_INFO):
if log == LOG_INFO:
identifier = "[INFO]"
elif log == LOG_RECV:
identifier = "[RECV]"
elif log == LOG_SEND:
identifier = "[SEND]"
elif log == LOG_ERR:
identifier = "[ERR]"
elif log == LOG_CONN:
identifier = "[CONN]"
elif log == LOG_SERVER:
identifier = "[SERVER]"
print identifier, remove_whitespace_at_end(text)
def remove_whitespace_at_end(str):
if str[-1] == '\n' or str[-1] == '\n':
return remove_whitespace_at_end(str[:-1])
return str
def startService():
p = Parser(
{
"port" : "2727",
"RSAkeysize" : "2048",
"maxclients" : "32",
"welcome_msg" : "conf/welcome",
"blockedip" : "conf/blocked.ip",
"blockednames" : "conf/blocked.names",
#todo
"auth" : "conf/auth",
"wordfilter" : "conf/wordfilter",
"channels" : "main",
"default_channel" : "main",
# todo
"msg_millisec_delay" : "50",
"msg_per_minute" : "10",
}, "=")
options = p.parse_file("server.conf")
if options == False:
return
port = int(options["port"])
welcome = options["welcome_msg"]
addText("%s version %s" % (APP_NAME, APP_VERSION), LOG_INFO)
addText("Attempting to start server at port %s\n" % port, LOG_INFO)
factory = Factory()
factory.protocol = RPG
factory.clients = []
# Parse channel list
factory.channels = []
factory.default_channel = ""
channels = options["channels"].split(",")
for chan in channels:
if chan[0] == ' ':
chan = chan[1:]
addText("Adding channel %s" % chan, LOG_INFO)
factory.channels.append([chan, 0])
if chan == options["default_channel"]:
addText("Set default channel to %s." % chan, LOG_INFO)
factory.default_channel = chan
if factory.default_channel == "":
factory.default_channel = factory.channels[0][0]
addText("Set default channel to %s." % factory.default_channel, LOG_INFO)
# Get welcome message
try:
addText("Getting welcome message from %s" % welcome)
f = open(welcome)
welcome = f.read()
f.close()
except:
addText("Error reading contents of welcome message in %s" % options["welcome"], LOG_ERR)
welcome = "Welcome to the server.\nRunning %s version %s." % (APP_NAME, APP_VERSION)
factory.welcome = welcome
# Get blocked names
try:
addText("Getting blocked usernames from %s" % options["blockednames"])
f = open(options["blockednames"])
blockednames = f.read().split("\n")
f.close()
except:
addText("Error reading contents of %s" % options["blockednames"], LOG_ERR)
blockednames = []
# Get blocked IPs
try:
addText("Getting blocked IP's from %s" % options["blockedip"])
f = open(options["blockedip"])
factory.blockedip = f.read().split("\n")
f.close()
except:
addText("Error reading blocked IP's from %s" % options["blockedip"], LOG_ERR)
factory.blockedip = []
factory.users = Users()
factory.users.blockednames = blockednames
addText("Generating new %s bit RSA public key" % options["RSAkeysize"], LOG_INFO)
factory.RSAkey = RSA.generate(int(options["RSAkeysize"]), urandom)
factory.publickey = factory.RSAkey.publickey().__getstate__()
factory.challenge = "encrypt_this_string_if_you_want_to_use_RSA"
factory.challenge_answer = factory.RSAkey.encrypt(factory.challenge, "")[0]
addText("Listening for incoming connections...", LOG_INFO)
reactor.listenTCP(port, factory)
addText("Opening web server on port 8080...", LOG_INFO)
site = server.Site(HTTPFrontEnd(factory.users, factory.channels))
reactor.listenTCP(8080, site)
reactor.run()
startService()
| [
"malani@localhost.localdomain"
] | malani@localhost.localdomain | |
9c2d4ee881706a7dad71ad011c33a475eab37bfd | e0c76ae542e3d37807b413eb725f716b88611948 | /core/views.py | 949619a01bef43fd025b723d26ef67458e2b866e | [] | no_license | nicholas-karimi/django-rest-todoAPI- | 713136b1fcab92db972c2e7ac5b5620f88c4d15e | 975aa3fc75ef649b2afe298bd2cb7828bd623da7 | refs/heads/master | 2023-08-25T03:00:56.803070 | 2021-10-13T21:56:22 | 2021-10-13T21:56:22 | 416,905,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,403 | py | from django.shortcuts import render
from django.http import JsonResponse
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .models import Task
from .serializers import TaskSerializer
@api_view(['GET'])
def apiOverview(request):
# endpoints to be returned
api_urls ={
'List':'/task-list/',
'Detail View':'/task-detail/<str:pk>/',
'Create':'/task-create',
'Update':'/task-update/<str:pk>/',
'Delete':'/task-delete/<str:pk>/'
}
return Response(api_urls)
# list endpoint
@api_view(['GET'])
def listView(request):
tasks = Task.objects.all()
serializer = TaskSerializer(tasks, many=True)
return Response(serializer.data)
@api_view(['GET'])
def taskView(request, pk):
tasks = Task.objects.get(pk=pk)
serializer = TaskSerializer(tasks, many=False)
return Response(serializer.data)
@api_view(['POST'])
def taskCreate(request):
serializer = TaskSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
@api_view(['POST'])
def taskUpdate(request, pk):
task = Task.objects.get(pk=pk)
serializer = TaskSerializer(instance=task, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
@api_view(['GET'])
def taskDelete(request, pk):
task = Task.objects.get(pk=pk)
task.delete()
return Response("Item deleted successfully!") | [
"kariminic@gmail.com"
] | kariminic@gmail.com |
200a0c214acff2cccff7133ae68f381b0699de4b | d6265afea582ef9d0b282d0dbaf582ef2015a6f4 | /tests/satosa/metadata_creation/test_saml_metadata.py | 49cff97a4cadfb8c3cca7baeb70e08e9ac3e0e73 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | peppelinux/SATOSA | c94b0d2f7fa07b3b8a751f548b8166452e9e084f | 12d9f2532e334978e9a614946d77cc5b217b4383 | refs/heads/master | 2023-08-10T08:08:22.199322 | 2020-04-13T17:26:27 | 2020-04-13T17:26:27 | 180,346,947 | 3 | 0 | Apache-2.0 | 2021-08-24T08:23:33 | 2019-04-09T10:56:02 | Python | UTF-8 | Python | false | false | 18,113 | py | import copy
from base64 import urlsafe_b64encode
import pytest
from saml2.config import SPConfig, Config
from saml2.mdstore import InMemoryMetaData
from saml2.metadata import entity_descriptor
from saml2.sigver import security_context
from saml2.time_util import in_a_while
from satosa.metadata_creation.saml_metadata import create_entity_descriptors, create_signed_entities_descriptor, \
create_signed_entity_descriptor
from satosa.satosa_config import SATOSAConfig
from tests.conftest import BASE_URL
from tests.util import create_metadata_from_config_dict
class TestCreateEntityDescriptors:
def assert_single_sign_on_endpoints_for_saml_frontend(self, entity_descriptor, saml_frontend_config, backend_names):
metadata = InMemoryMetaData(None, str(entity_descriptor))
metadata.load()
sso = metadata.service(saml_frontend_config["config"]["idp_config"]["entityid"], "idpsso_descriptor",
"single_sign_on_service")
for backend_name in backend_names:
for binding, path in saml_frontend_config["config"]["endpoints"]["single_sign_on_service"].items():
sso_urls_for_binding = [endpoint["location"] for endpoint in sso[binding]]
expected_url = "{}/{}/{}".format(BASE_URL, backend_name, path)
assert expected_url in sso_urls_for_binding
def assert_single_sign_on_endpoints_for_saml_mirror_frontend(self, entity_descriptors, encoded_target_entity_id,
saml_mirror_frontend_config, backend_names):
expected_entity_id = saml_mirror_frontend_config["config"]["idp_config"][
"entityid"] + "/" + encoded_target_entity_id
metadata = InMemoryMetaData(None, None)
for ed in entity_descriptors:
metadata.parse(str(ed))
sso = metadata.service(expected_entity_id, "idpsso_descriptor", "single_sign_on_service")
for backend_name in backend_names:
for binding, path in saml_mirror_frontend_config["config"]["endpoints"]["single_sign_on_service"].items():
sso_urls_for_binding = [endpoint["location"] for endpoint in sso[binding]]
expected_url = "{}/{}/{}/{}".format(BASE_URL, backend_name, encoded_target_entity_id, path)
assert expected_url in sso_urls_for_binding
def assert_assertion_consumer_service_endpoints_for_saml_backend(self, entity_descriptor, saml_backend_config):
metadata = InMemoryMetaData(None, str(entity_descriptor))
metadata.load()
acs = metadata.service(saml_backend_config["config"]["sp_config"]["entityid"], "spsso_descriptor",
"assertion_consumer_service")
for url, binding in saml_backend_config["config"]["sp_config"]["service"]["sp"]["endpoints"][
"assertion_consumer_service"]:
assert acs[binding][0]["location"] == url
def test_saml_frontend_with_saml_backend(self, satosa_config_dict, saml_frontend_config, saml_backend_config):
satosa_config_dict["FRONTEND_MODULES"] = [saml_frontend_config]
satosa_config_dict["BACKEND_MODULES"] = [saml_backend_config]
satosa_config = SATOSAConfig(satosa_config_dict)
frontend_metadata, backend_metadata = create_entity_descriptors(satosa_config)
assert len(frontend_metadata) == 1
assert len(frontend_metadata[saml_frontend_config["name"]]) == 1
entity_descriptor = frontend_metadata[saml_frontend_config["name"]][0]
self.assert_single_sign_on_endpoints_for_saml_frontend(entity_descriptor, saml_frontend_config,
[saml_backend_config["name"]])
assert len(backend_metadata) == 1
self.assert_assertion_consumer_service_endpoints_for_saml_backend(
backend_metadata[saml_backend_config["name"]][0],
saml_backend_config)
def test_saml_frontend_with_oidc_backend(self, satosa_config_dict, saml_frontend_config, oidc_backend_config):
satosa_config_dict["FRONTEND_MODULES"] = [saml_frontend_config]
satosa_config_dict["BACKEND_MODULES"] = [oidc_backend_config]
satosa_config = SATOSAConfig(satosa_config_dict)
frontend_metadata, backend_metadata = create_entity_descriptors(satosa_config)
assert len(frontend_metadata) == 1
assert len(frontend_metadata[saml_frontend_config["name"]]) == 1
entity_descriptor = frontend_metadata[saml_frontend_config["name"]][0]
self.assert_single_sign_on_endpoints_for_saml_frontend(entity_descriptor, saml_frontend_config,
[oidc_backend_config["name"]])
# OIDC backend does not produce any SAML metadata
assert not backend_metadata
def test_saml_frontend_with_multiple_backends(self, satosa_config_dict, saml_frontend_config, saml_backend_config,
oidc_backend_config):
satosa_config_dict["FRONTEND_MODULES"] = [saml_frontend_config]
satosa_config_dict["BACKEND_MODULES"] = [saml_backend_config, oidc_backend_config]
satosa_config = SATOSAConfig(satosa_config_dict)
frontend_metadata, backend_metadata = create_entity_descriptors(satosa_config)
assert len(frontend_metadata) == 1
assert len(frontend_metadata[saml_frontend_config["name"]]) == 1
entity_descriptor = frontend_metadata[saml_frontend_config["name"]][0]
self.assert_single_sign_on_endpoints_for_saml_frontend(entity_descriptor, saml_frontend_config,
[saml_backend_config["name"],
oidc_backend_config["name"]])
# only the SAML backend produces SAML metadata
assert len(backend_metadata) == 1
self.assert_assertion_consumer_service_endpoints_for_saml_backend(
backend_metadata[saml_backend_config["name"]][0],
saml_backend_config)
def test_saml_mirror_frontend_with_saml_backend_with_multiple_target_providers(self, satosa_config_dict, idp_conf,
saml_mirror_frontend_config,
saml_backend_config):
idp_conf2 = copy.deepcopy(idp_conf)
idp_conf2["entityid"] = "https://idp2.example.com"
satosa_config_dict["FRONTEND_MODULES"] = [saml_mirror_frontend_config]
saml_backend_config["config"]["sp_config"]["metadata"] = {"inline": [create_metadata_from_config_dict(idp_conf),
create_metadata_from_config_dict(
idp_conf2)]}
satosa_config_dict["BACKEND_MODULES"] = [saml_backend_config]
satosa_config = SATOSAConfig(satosa_config_dict)
frontend_metadata, backend_metadata = create_entity_descriptors(satosa_config)
assert len(frontend_metadata) == 1
assert len(frontend_metadata[saml_mirror_frontend_config["name"]]) == 2
entity_descriptors = frontend_metadata[saml_mirror_frontend_config["name"]]
for target_entity_id in [idp_conf["entityid"], idp_conf2["entityid"]]:
encoded_target_entity_id = urlsafe_b64encode(target_entity_id.encode("utf-8")).decode("utf-8")
self.assert_single_sign_on_endpoints_for_saml_mirror_frontend(entity_descriptors, encoded_target_entity_id,
saml_mirror_frontend_config,
[saml_backend_config["name"]])
assert len(backend_metadata) == 1
self.assert_assertion_consumer_service_endpoints_for_saml_backend(
backend_metadata[saml_backend_config["name"]][0],
saml_backend_config)
def test_saml_mirror_frontend_with_oidc_backend(self, satosa_config_dict, saml_mirror_frontend_config,
oidc_backend_config):
satosa_config_dict["FRONTEND_MODULES"] = [saml_mirror_frontend_config]
satosa_config_dict["BACKEND_MODULES"] = [oidc_backend_config]
satosa_config = SATOSAConfig(satosa_config_dict)
frontend_metadata, backend_metadata = create_entity_descriptors(satosa_config)
assert len(frontend_metadata) == 1
assert len(frontend_metadata[saml_mirror_frontend_config["name"]]) == 1
entity_descriptors = frontend_metadata[saml_mirror_frontend_config["name"]]
target_entity_id = oidc_backend_config["config"]["provider_metadata"]["issuer"]
encoded_target_entity_id = urlsafe_b64encode(target_entity_id.encode("utf-8")).decode("utf-8")
self.assert_single_sign_on_endpoints_for_saml_mirror_frontend(entity_descriptors, encoded_target_entity_id,
saml_mirror_frontend_config,
[oidc_backend_config["name"]])
# OIDC backend does not produce any SAML metadata
assert not backend_metadata
def test_saml_mirror_frontend_with_multiple_backends(self, satosa_config_dict, idp_conf,
saml_mirror_frontend_config,
saml_backend_config, oidc_backend_config):
satosa_config_dict["FRONTEND_MODULES"] = [saml_mirror_frontend_config]
saml_backend_config["config"]["sp_config"]["metadata"] = {
"inline": [create_metadata_from_config_dict(idp_conf)]}
satosa_config_dict["BACKEND_MODULES"] = [saml_backend_config, oidc_backend_config]
satosa_config = SATOSAConfig(satosa_config_dict)
frontend_metadata, backend_metadata = create_entity_descriptors(satosa_config)
assert len(frontend_metadata) == 1
assert len(frontend_metadata[saml_mirror_frontend_config["name"]]) == 2
params = zip([idp_conf["entityid"], oidc_backend_config["config"]["provider_metadata"]["issuer"]],
[saml_backend_config["name"], oidc_backend_config["name"]])
entity_descriptors = frontend_metadata[saml_mirror_frontend_config["name"]]
for target_entity_id, backend_name in params:
encoded_target_entity_id = urlsafe_b64encode(target_entity_id.encode("utf-8")).decode("utf-8")
self.assert_single_sign_on_endpoints_for_saml_mirror_frontend(entity_descriptors, encoded_target_entity_id,
saml_mirror_frontend_config,
[backend_name])
# only the SAML backend produces SAML metadata
assert len(backend_metadata)
self.assert_assertion_consumer_service_endpoints_for_saml_backend(
backend_metadata[saml_backend_config["name"]][0],
saml_backend_config)
def test_two_saml_frontends(self, satosa_config_dict, saml_frontend_config, saml_mirror_frontend_config,
oidc_backend_config):
satosa_config_dict["FRONTEND_MODULES"] = [saml_frontend_config, saml_mirror_frontend_config]
satosa_config_dict["BACKEND_MODULES"] = [oidc_backend_config]
satosa_config = SATOSAConfig(satosa_config_dict)
frontend_metadata, backend_metadata = create_entity_descriptors(satosa_config)
assert len(frontend_metadata) == 2
saml_entities = frontend_metadata[saml_frontend_config["name"]]
assert len(saml_entities) == 1
entity_descriptor = saml_entities[0]
self.assert_single_sign_on_endpoints_for_saml_frontend(entity_descriptor, saml_frontend_config,
[oidc_backend_config["name"]])
mirrored_saml_entities = frontend_metadata[saml_mirror_frontend_config["name"]]
assert len(mirrored_saml_entities) == 1
target_entity_id = oidc_backend_config["config"]["provider_metadata"]["issuer"]
encoded_target_entity_id = urlsafe_b64encode(target_entity_id.encode("utf-8")).decode("utf-8")
self.assert_single_sign_on_endpoints_for_saml_mirror_frontend(mirrored_saml_entities, encoded_target_entity_id,
saml_mirror_frontend_config,
[oidc_backend_config["name"]])
# OIDC backend does not produce any SAML metadata
assert not backend_metadata
def test_create_mirrored_metadata_does_not_contain_target_contact_info(self, satosa_config_dict, idp_conf,
saml_mirror_frontend_config,
saml_backend_config):
satosa_config_dict["FRONTEND_MODULES"] = [saml_mirror_frontend_config]
saml_backend_config["config"]["sp_config"]["metadata"] = {
"inline": [create_metadata_from_config_dict(idp_conf)]}
satosa_config_dict["BACKEND_MODULES"] = [saml_backend_config]
satosa_config = SATOSAConfig(satosa_config_dict)
frontend_metadata, backend_metadata = create_entity_descriptors(satosa_config)
assert len(frontend_metadata) == 1
entity_descriptors = frontend_metadata[saml_mirror_frontend_config["name"]]
metadata = InMemoryMetaData(None, str(entity_descriptors[0]))
metadata.load()
entity_info = list(metadata.values())[0]
expected_entity_info = saml_mirror_frontend_config["config"]["idp_config"]
assert len(entity_info["contact_person"]) == len(expected_entity_info["contact_person"])
for i, contact in enumerate(expected_entity_info["contact_person"]):
assert entity_info["contact_person"][i]["contact_type"] == contact["contact_type"]
assert entity_info["contact_person"][i]["email_address"][0]["text"] == contact["email_address"][0]
assert entity_info["contact_person"][i]["given_name"]["text"] == contact["given_name"]
assert entity_info["contact_person"][i]["sur_name"]["text"] == contact["sur_name"]
expected_org_info = expected_entity_info["organization"]
assert entity_info["organization"]["organization_display_name"][0]["text"] == \
expected_org_info["display_name"][0][0]
assert entity_info["organization"]["organization_name"][0]["text"] == expected_org_info["name"][0][0]
assert entity_info["organization"]["organization_url"][0]["text"] == expected_org_info["url"][0][0]
class TestCreateSignedEntitiesDescriptor:
@pytest.fixture
def entity_desc(self, sp_conf):
return entity_descriptor(SPConfig().load(sp_conf, metadata_construction=True))
@pytest.fixture
def verification_security_context(self, cert_and_key):
conf = Config()
conf.cert_file = cert_and_key[0]
return security_context(conf)
@pytest.fixture
def signature_security_context(self, cert_and_key):
conf = Config()
conf.cert_file = cert_and_key[0]
conf.key_file = cert_and_key[1]
return security_context(conf)
def test_signed_metadata(self, entity_desc, signature_security_context, verification_security_context):
signed_metadata = create_signed_entities_descriptor([entity_desc, entity_desc], signature_security_context)
md = InMemoryMetaData(None, security=verification_security_context)
md.parse(signed_metadata)
assert md.signed() is True
assert md.parse_and_check_signature(signed_metadata) is True
assert not md.entities_descr.valid_until
def test_valid_for(self, entity_desc, signature_security_context):
valid_for = 4 # metadata valid for 4 hours
expected_validity = in_a_while(hours=valid_for)
signed_metadata = create_signed_entities_descriptor([entity_desc], signature_security_context,
valid_for=valid_for)
md = InMemoryMetaData(None)
md.parse(signed_metadata)
assert md.entities_descr.valid_until == expected_validity
class TestCreateSignedEntityDescriptor:
@pytest.fixture
def entity_desc(self, sp_conf):
return entity_descriptor(SPConfig().load(sp_conf, metadata_construction=True))
@pytest.fixture
def verification_security_context(self, cert_and_key):
conf = Config()
conf.cert_file = cert_and_key[0]
return security_context(conf)
@pytest.fixture
def signature_security_context(self, cert_and_key):
conf = Config()
conf.cert_file = cert_and_key[0]
conf.key_file = cert_and_key[1]
return security_context(conf)
def test_signed_metadata(self, entity_desc, signature_security_context, verification_security_context):
signed_metadata = create_signed_entity_descriptor(entity_desc, signature_security_context)
md = InMemoryMetaData(None, security=verification_security_context)
md.parse(signed_metadata)
assert md.signed() is True
assert md.parse_and_check_signature(signed_metadata) is True
assert not md.entity_descr.valid_until
def test_valid_for(self, entity_desc, signature_security_context):
valid_for = 4 # metadata valid for 4 hours
expected_validity = in_a_while(hours=valid_for)
signed_metadata = create_signed_entity_descriptor(entity_desc, signature_security_context,
valid_for=valid_for)
md = InMemoryMetaData(None)
md.parse(signed_metadata)
assert md.entity_descr.valid_until == expected_validity
| [
"rebecka.gulliksson@umu.se"
] | rebecka.gulliksson@umu.se |
cda1f89438ec06db8ca93501a5f2d7fd562a8b17 | 9aa834f603031a86236c457efe5ecbf168215da0 | /tweets/forms.py | 3ec807cc79cfe8b35bb0a211b0f8d465ea69b09f | [] | no_license | paragkulkarni/Django_mytweets | 814aac760c1c04d5b6cf0ac6c182a76f0846e3a1 | 984269dae48fe2c3f170dfc6de5d6796cb66d478 | refs/heads/master | 2021-09-05T05:02:57.466778 | 2018-01-24T08:23:25 | 2018-01-24T08:23:25 | 117,959,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | from django import forms
class TweetForm(forms.Form):
text = forms.CharField(max_length=160, widget=forms.Textarea(attrs={'rows':1, 'cols':85}))
country = forms.CharField(widget=forms.HiddenInput(),required=False)
| [
"paragkk80@gmail.com"
] | paragkk80@gmail.com |
947176b90cbab11d9222c318374ee3befbaa72ad | bbff6cb11688224a5749037693614229d44915cf | /networks/network_utils.py | ca2ee4b4766d68f6128a95cf486e4c566cf349cc | [
"MIT"
] | permissive | yongliang-qiao/fscc | f4b02db2523cc0884816451b4788917ceba4506d | 9ca20128117f789433986971c5cc77f631c102e6 | refs/heads/master | 2023-04-15T13:58:11.619582 | 2020-07-18T07:17:12 | 2020-07-18T07:17:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | import numpy as np
import torch
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def forward_pass(network, _in, _tar, mode='validation', weights=None):
_input = _in.to(device)
_target = _tar.float().unsqueeze(0).to(device)
output = network.network_forward(_input, weights)
if mode == 'validation':
return [output]
else:
loss = network.loss_function(output, _target)
return [output, loss]
def evaluate(network, dataloader, mode='validation', weights=None):
mae, mse, loss = 0.0, 0.0, 0.0
for idx, (_in, _tar) in enumerate(dataloader):
result = forward_pass(network, _in, _tar, mode, weights)
difference = result[0].data.sum() - _tar.sum().type(torch.FloatTensor).cuda()
_mae = torch.abs(difference)
_mse = difference ** 2
mae += _mae.item()
mse += _mse.item()
if mode == 'training':
loss += result[1].item()
mae /= len(dataloader)
mse = np.sqrt(mse / len(dataloader))
if mode == 'training':
loss /= len(dataloader)
return (loss, mae, mse)
return mae, mse
| [
"maheshk2194@gmail.com"
] | maheshk2194@gmail.com |
da327466a9c5966169ed0e73790c57d204126c2b | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_74/244.py | 1aad3f70f9fbf37a7323274a79680d37008e458c | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,237 | py | #!/usr/bin/env python
# encoding: utf-8
import os
def parse_sequence(fileDescriptor):
# Returns a sequence to complete
f = fileDescriptor
sequence = f.readline()
sequence = sequence.split(' ')
n = int(sequence[0])
sequence_a = []
sequence_b = []
sequence_r = []
have_color = False
last_color = 'O'
for i in xrange(1,len(sequence)):
if not have_color and (sequence[i] == 'O' or sequence[i] == 'B'):
have_color = True
last_color = sequence[i]
elif have_color and (sequence[i] != 'O' and sequence[i] != 'B'):
t = (int(sequence[i]), last_color)
if t[1] == 'O':
sequence_a.append(t)
else:
sequence_b.append(t)
sequence_r.append(t)
have_color = False
else:
print "Badformed Input"
exit()
return n, sequence_r, sequence_a, sequence_b
def min_time(n, sequence, seqO, seqB):
posO = 1
posB = 1
cTime = 0
for step in sequence:
if step[1] == 'O':
toComplete = timeToComplete(posO, step[0])
cTime += toComplete
posO = step[0]
seqO.pop(0)
if seqB:
# Is not empty
posB = newPosition(posB, seqB[0][0], toComplete)
else:
toComplete = timeToComplete(posB, step[0])
cTime += toComplete
posB = step[0]
seqB.pop(0)
if seqO:
# Is not empty
posO = newPosition(posO, seqO[0][0], toComplete)
return cTime
def timeToComplete(currPos, destPos):
return (max(currPos, destPos) - min(currPos, destPos) + 1)
def newPosition(currPos, destPos, time):
result = 0
advance = min(timeToComplete(currPos, destPos) -1, time)
if currPos < destPos:
result = currPos + advance
else:
result = currPos - advance
return result
def solve(fileName):
try:
f = open(fileName, "r")
except:
exit()
test_cases = int(f.readline())
for i in xrange(test_cases):
args = parse_sequence(f)
result = min_time(*args)
print "Case #%d: %d" %(i+1, result)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
68d8399c5199cd6c1ac9a2c275edb439b8a5ab47 | c66955c6fc178955c2024e0318ec7a91a8386c2d | /programQuick/chapterEleven/mapIt.py | f66811ddbe725a952063e3f0d855d57f0bd18aa5 | [] | no_license | duheng18/python-study | a98642d6ee1b0043837c3e7c5b91bf1e28dfa588 | 13c0571ac5d1690bb9e615340482bdb2134ecf0e | refs/heads/master | 2022-11-30T17:36:57.060130 | 2019-11-18T07:31:40 | 2019-11-18T07:31:40 | 147,268,053 | 1 | 0 | null | 2022-11-22T03:36:51 | 2018-09-04T00:49:42 | Python | UTF-8 | Python | false | false | 353 | py | import webbrowser, sys, pyperclip
if len(sys.argv) > 1:
# Get address from command line.
address = ' '.join(sys.argv[1:])
else:
# Get address from clipboard.
pyperclip.copy('mapit 870 Valencia St, San Francisco, CA 94110')
address = pyperclip.paste()
print(address)
webbrowser.open('https://www.google.com/maps/place/'+address)
| [
"emaildh@163.com"
] | emaildh@163.com |
bff902fb61c941683b2b4738f0615a12188427e1 | 17b218cf872e203959062908d0f1ec58812d98c8 | /fabfile.py | c52a10f28cc2523f6c7419d7b7a877a22a7d8a71 | [
"Apache-2.0"
] | permissive | renanvicente/root_command_by_ssh | 5f706ef8f620bd3cc9ec059733926694133c36d8 | f34f10cd77625903b355115ae6102c3bfa579bec | refs/heads/master | 2016-09-01T17:05:09.825623 | 2013-11-21T13:38:02 | 2013-11-21T13:38:02 | 13,762,817 | 1 | 0 | null | null | null | null | UTF-8 | Python | true | false | 1,197 | py | from fabric.api import *
from getpass import getpass
cont = 0
info = {}
def get_info(root=False):
try:
global info
if not info:
info['username'] = raw_input('username: ')
info['password'] = getpass('password: ')
if root:
info['pass_root'] = getpass('root password: ')
info['command'] = raw_input('Insert command to execute: ')
except KeyboardInterrupt:
exit(1)
return info
def run_as_root_with_su():
info = get_info(True)
su(info['username'],info['password'], info['command'],info['pass_root'])
def run_as_root_with_sudo():
info = get_info()
env.user = info['username']
env.password = info['password']
sudo('%s' % info['command'])
def run_as_user_common():
info = get_info()
with settings(user='%s' % info['username'], password='%s' % info['password']):
sudo('%s' % info['command'])
def tofile(filename):
env.hosts = open('%s' % filename, 'r').readlines()
def su(username,password,command,pass_root):
with settings(user='%s' % username, password='%s' % password):
run('whoami')
with settings(user='root',password='%s' % pass_root, sudo_prefix="su -c", sudo_prompt="Password:"):
sudo('%s' % command)
| [
"reeh@reeh.linuxextreme.com.br"
] | reeh@reeh.linuxextreme.com.br |
66d4ca5d2985fb751978cbfda10806412206a09f | 1528868cc7bcf830cbe90e638d4e4862a07cb14c | /assistente.py | d4c227d184c40a9e0fae4b803507d04d39763f7d | [] | no_license | rcase31/tcc_vpi | b4502bbed893e476c8964172a3c8c4a25a7db07e | d65768eb3255c905960d1956531327c93cc17e2b | refs/heads/master | 2020-10-01T05:21:45.411286 | 2019-12-11T22:06:08 | 2019-12-11T22:06:08 | 227,467,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,759 | py | from reconhecimento_audio import *
from reconhecimento_video import *
from reproducao_audio import *
from objeto_avistado import *
from enum import Enum
class Estado(Enum):
ESPERA = 0
LEITURA = 1
AGUARDA_OBJETO = 2
ORIENTACAO = 3
OBJETO_ENCONTRADO = 4
def proximo(self):
return Estado(self.value + 1)
# noinspection PyBroadException
class Assistente:
def __init__(self):
self.estado = Estado.ESPERA
self.fala = PlayerAudio()
self.olhos = ReconhecedorObjetos()
self.objeto_em_mira = None
def procura_objetos(self):
with self.olhos:
# aqui eu recebo o seguinte formato: ((x1, y1, x2, y2, mao), (objeto_1, objeto_2,..., objeto_n))
coordenadas_mao, coordenadas_objetos = self.olhos.procurar_c_insistencia(quantidade_maxima=2)
if len(coordenadas_objetos) == 0 or coordenadas_mao is None:
return False
self.mao = ObjetoAvistado(coordenadas_mao)
self.objetos = [ObjetoAvistado(obj) for obj in coordenadas_objetos]
return True
def reproduz_fala(self, audio: str):
self.fala.play(audio)
def avanca_estado(self):
self.estado = self.estado.proximo()
def volta_para_estado_inicial(self):
self.estado = Estado.ESPERA
def mira_em_primeiro_objeto(self):
self.objeto_em_mira = self.objetos[0]
def direciona(self) -> bool:
"""
:return: retorna verdadeiro quando o objeto desejado estiver alinhado com a mão.
"""
# Atualizo a posição da mão
coordenadas_mao = self.olhos.atualiza_pos_mao()
if coordenadas_mao is None:
self.fala.play(Audio.NAO_VEJO_MAO)
return False
else:
self.mao = ObjetoAvistado(coordenadas_mao)
# Checa se a posição da mão sobrepõe o objeto (aqui eu assumo o objeto como estático)
if self.mao.sobrepoe(self.objeto_em_mira):
#self.fala.play(Audio.OBJETO_EM_MIRA)
return True
else:
if self.mao.esta_esquerda(self.objeto_em_mira):
self.fala.play(Audio.ESQUERDA)
else:
self.fala.play(Audio.DIREITA)
if self.mao.esta_acima(self.objeto_em_mira):
self.fala.play(Audio.ABAIXO)
else:
self.fala.play(Audio.ACIMA)
return False
def retorna_objetos_vistos(self) -> list:
return [obj.nome for obj in self.objetos]
@staticmethod
def aguarda_fala(palavras, limite: int = -1) -> str:
contador = 0
palavra_escutada = None
while palavra_escutada is None:
palavra_escutada = aguarda_audio(palavras)
contador += 1
if limite == contador:
return None
return palavra_escutada
def encontrou_objetos(self) -> bool:
#if self.mao is None:
# return False
if len(self.objetos) == 0:
return False
return True
def foca_em_objeto(self, objeto: str):
for o in self.objetos:
if o.nome == objeto:
self.objeto_em_mira = o
def fala_objetos_vistos(self):
fala = [Audio.EU_VEJO,
Audio.numero(len(self.objetos)),
Audio.OBJETOS]
try:
self.fala.play(fala)
except:
self.fala.beep()
for objeto in self.objetos:
print(str(objeto))
self.fala.falar_objeto(str(objeto))
print("Encontrei %1d objetos" % len(self.objetos))
def fala_vai_pegar_primeiro(self):
self.fala.play([Audio.VAMOS_PEGAR])
self.fala.falar_objeto(str(self.objetos[0]))
| [
"rafaelljc@gmail.com"
] | rafaelljc@gmail.com |
f54cf52d0cbde5c7faefa2abb24c20a99be5e0ee | bf40d6324345de28469e34f754bbcaac9e04a42a | /src/fracture_propagation_model.py | 1bd16f8ef38643147273d8a7cf8ce5adbc86dde1 | [
"MIT"
] | permissive | pmgbergen/Numerical-modelling-of-convection-driven-cooling-deformation-and-fracturing-of-thermo-poroelastic-m | 2bae87f18abb3f08c2c26a45d260ffb7789f14ad | dc258123d2481b4051760bf70075d7d4e69b3c2c | refs/heads/main | 2023-01-28T09:09:29.265893 | 2020-12-10T09:24:16 | 2020-12-10T09:24:16 | 320,229,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73,487 | py | """
Model class to be used together with an existing/"physical" model to yield a full propagation
model.
Will also be combined with case specific parameters.
"""
import scipy.sparse as sps
import time
import numpy as np
import porepy as pp
import logging
from typing import Dict, Any
logger = logging.getLogger(__name__)
class TensilePropagation(pp.ConformingFracturePropagation):
"""
One more round of cleaning remains for this and related classes!
EK: On my reading, the only active function in this class is _candidate_faces(),
which is a simplification of the corresponding method in the superclass.
If correct, I suggest we try to integrate the present function as an option
in the superclass, and drop this extra class.
"""
def _sorted_propagation_faces(self, g_l: pp.Grid, d_l: Dict) -> np.ndarray:
parameters_l = d_l[pp.PARAMETERS][self.mechanics_parameter_key]
faces = parameters_l["propagate_faces"].nonzero()[0]
faces = faces[g_l.tags["tip_faces"][faces]]
K_equivalent = d_l[pp.PARAMETERS][self.mechanics_parameter_key][
"SIFs_equivalent"
]
ind = np.argsort(K_equivalent[faces])
faces = np.atleast_1d(faces[ind][::-1])
return faces
def _pick_propagation_face(
self,
g_h: pp.Grid,
g_l: pp.Grid,
data_h: Dict,
data_l: Dict,
data_edge: Dict,
face_l,
neighbor_threshold: int = 0,
force_neighbors: bool = False,
) -> None:
"""
Pick out which matrix face to split for a fracture faces tagged as propagating
using the precomputed propagation angle.
Workflow:
Check that the face_l is permissible
Identify the corresponding edges_h (= nodes if self.Nd==2)
The edges' faces_h are candidates for propagation
Pick the candidate based on the propagation angle
Parameters
----------
g_h : pp.Grid
Higer-dimensional grid.
g_l : pp.Grid
Lower-dimensional grid.
data_h : Dict
Data dictionary corresponding to g_h.
data_l : Dict
Data dictionary corresponding to g_l.
data_edge : Dict
Data dictionary corresponding to the edge formed by g_h and g_l.
Returns
-------
None
DESCRIPTION.
Stores the matrix "propagation_face_map" identifying pairs of
lower- and higherdimensional faces. During grid updates, the former will receive
a new neighbour cell and the latter will be split.
"""
nd = self.Nd
# EK: I am almost sure this method is not used, and can be deleted.
# Leave a breakpoint here, and take action if ever hit it.
# NOTE: If we hit it, the signature of this method is likely wrong (at least it
# is different from the corresponding method in the parent class), so we should
# revise the implementation.
print("The method was used after all. Remove breakpoint, do QC")
breakpoint()
face_l: np.ndarray = face_l[g_l.tags["tip_faces"][face_l]]
if face_l.size == 0:
face_faces = sps.csr_matrix((g_l.num_faces, g_h.num_faces))
data_edge["propagation_face_map"]: sps.spmatrix = face_faces
return
fracture_faces_h = g_h.tags["fracture_faces"].nonzero()[0]
tip_faces_l = g_l.tags["tip_faces"].nonzero()[0]
tip_edges_h = tip_faces_l_to_edges_h(g_l, tip_faces_l, g_h)
tip_edges_h.sort(axis=0)
fracture_edges_h = np.empty((g_l.dim, 0), dtype=int)
for frac_face_h in g_h.tags["fracture_faces"].nonzero()[0]:
for frac_e_h in np.sort(edges_of_face(g_h, frac_face_h), axis=0).T:
frac_e_h = frac_e_h.reshape((g_l.dim, 1))
is_found = np.isin(fracture_edges_h, frac_e_h)
is_found = np.any(np.all(is_found))
if not is_found or fracture_edges_h.size == 0:
fracture_edges_h = np.hstack((fracture_edges_h, frac_e_h))
edge_h = tip_faces_l_to_edges_h(g_l, face_l, g_h)
fracture_nodes_h = np.unique(
g_h.face_nodes[:, g_h.tags["fracture_faces"]].nonzero()[0]
)
faces_h_to_split = np.empty(0, dtype=int)
faces_l_to_split = np.empty(0, dtype=int)
candidate_faces_h, faces_l_loc = self._candidate_faces(
g_h,
edge_h,
g_l,
face_l,
tip_edges_h,
fracture_edges_h,
fracture_faces_h,
neighbor_threshold,
force_neighbors,
)
if force_neighbors:
face_h = candidate_faces_h
else:
faces_l_loc = np.empty(0, dtype=int)
## Pick the right candidate:
# Direction of h-dim face centers from the tip
tip_coords = np.reshape(g_l.face_centers[:nd, face_l], (nd, 1))
face_center_vecs = g_h.face_centers[:nd, candidate_faces_h] - tip_coords
face_center_vecs = face_center_vecs / np.linalg.norm(
face_center_vecs, axis=0
)
# Propagation vector, with sign assuring a positive orientation
# of the basis
propagation_vector = self._propagation_vector(g_l, data_l, face_l)
# Pick the candidate closest to the propagation point,
# i.e. smallest angle between propagation vector and face center vector
distances = pp.geometry.distances.point_pointset(
propagation_vector, face_center_vecs
)
ind = np.argsort(distances)
# There might be no candidate faces left after imposition of restriction
# of permissible candidates
if candidate_faces_h.size > 0:
face_h = candidate_faces_h[ind[0]]
edges_of_new_face = edges_of_face(g_h, face_h)
edges_of_new_face.sort(axis=0)
faces_l_loc = np.empty(0, dtype=int)
for edge in edges_of_new_face.T: # sort!
# Remove from tip edges if it was a tip, add if not
ind = np.all(np.isin(tip_edges_h, edge), axis=0)
if np.any(ind):
tip_edges_h = tip_edges_h[:, ~ind]
face_l_loc = tip_edge_h_to_face_l(g_l, g_h, edge)
if (
face_l_loc.size > 0
): # the else is a tip_edge_h arisen in this propagation step, and does not correspond to a tip to be opened
faces_l_loc = np.hstack((faces_l_loc, face_l_loc))
else:
tip_edges_h = np.hstack(
(tip_edges_h, edge.reshape((g_l.dim, 1)))
)
fracture_edges_h = np.hstack(
(fracture_edges_h, edge.reshape((g_l.dim, 1)))
)
n_neigh = faces_l_loc.size
if n_neigh > neighbor_threshold:
faces_h_to_split = np.hstack((faces_h_to_split, np.tile(face_h, n_neigh)))
faces_l_to_split = np.hstack((faces_l_to_split, faces_l_loc))
fracture_faces_h = np.hstack((fracture_faces_h, face_h))
face_faces = sps.csr_matrix(
(np.ones(faces_l_to_split.shape), (faces_l_to_split, faces_h_to_split)),
shape=(g_l.num_faces, g_h.num_faces),
)
data_edge["propagation_face_map"] = face_faces
def _candidate_faces(
self, g_h: pp.Grid, edge_h, g_l: pp.Grid, face_l: np.ndarray
) -> np.ndarray:
"""For a given edge (understood to be a fracture tip) in g_h, find the
candidate faces that may be ready for a split.
IMPLEMENTATION NOTE: This method is different from the identically named method
in the parent class ConformingFracturePropagation in that fewer checks are done
on the candidate faces. The present method is assumed to be used in a tensile
fracturing regime, where the propagating fracture stays planar, and where the
grid contains faces that fit this propagating geometry. In comparison, the method
in the parent class aims at non-planar fractures, and thus needs to do much more
checks to try to keep a reasonable fracture geometry also after propagation.
"""
def faces_of_edge(g: pp.Grid, e: np.ndarray) -> np.ndarray:
"""
Obtain indices of all faces sharing an edge.
Parameters
----------
g : pp.Grid
e : np.ndarray
The edge.
Returns
-------
faces : np.ndarray
Faces.
"""
if g.dim == 1:
faces = e
elif g.dim == 2:
faces = g.face_nodes[e].nonzero()[1]
elif g.dim == 3:
f_0 = g.face_nodes[e[0]].nonzero()[1]
f_1 = g.face_nodes[e[1]].nonzero()[1]
faces = np.intersect1d(f_0, f_1)
else:
raise ValueError("Grid dimension should be 1, 2 or 3")
return faces
# Find all the edge's neighboring faces
candidate_faces = faces_of_edge(g_h, edge_h)
# Exclude faces that are on a fracture
are_fracture = g_h.tags["fracture_faces"][candidate_faces]
candidate_faces = candidate_faces[np.logical_not(are_fracture)]
return candidate_faces
class THMPropagationModel(TensilePropagation):
def __init__(self, params):
super().__init__(params)
pp.THM.__init__(self, params)
# Set additional case specific fields
self.set_fields(params)
## THM + propagation specific methods
def _initialize_new_variable_values(
self, g: pp.Grid, d: Dict[str, Any], var: str, dofs: Dict[str, int]
) -> np.ndarray:
"""
Overwrite the corresponding method in superclasses: The pressure variable is
initialized to the atmospheric pressure. Apart from this, all other variables
are initialized to zero.
Parameters
----------
g : pp.Grid
Grid.
d : Dict
Data dictionary.
var : str
Name of variable.
dofs : int
Number of DOFs per cell (or face/node).
Returns
-------
vals : np.ndarray
Values for the new DOFs.
"""
cell_dof = dofs.get("cells")
n_new = d["cell_index_map"].shape[0] - d["cell_index_map"].shape[1]
if var == self.scalar_variable: # type: ignore
vals = (
np.ones(n_new * cell_dof) * pp.ATMOSPHERIC_PRESSURE / self.scalar_scale # type: ignore
)
else:
vals = np.zeros(n_new * cell_dof)
return vals
def _map_variables(self, solution: np.ndarray) -> np.ndarray:
"""
In addition to super's mapping an initialization of all primary variables,
map the face values (darcy_fluxes and stored boundary conditions) and
quantities to be exported.
Parameters
----------
solution : np.ndarray
Solution vector from before propagation.
Returns
-------
new_solution : np.ndarray
Mapped solution vector with initialized new DOFs.
"""
# Map solution, and initialize for newly defined dofs
new_solution = super()._map_variables(solution)
self._map_face_values()
return new_solution
def _map_face_values(self) -> None:
"""
Maps the following face values:
old_bc_values, used by DivU
darcy_fluxes, used by Upwind
Returns
-------
None.
"""
# g_h Darcy fluxes are first copied to both the split faces, then mapped
# to the mortar grid and finally removed from d_h.
# In d_l, we initialize zero fluxes on the new faces, since there was
# no flux across fracture tips previous to propagation.
t_key = self.temperature_parameter_key
keys = (
self.mechanics_parameter_key,
self.mechanics_temperature_parameter_key,
)
gb = self.gb
for g, d in gb:
face_map: sps.spmatrix = d["face_index_map"]
mapping = sps.kron(face_map, sps.eye(self.Nd))
# Map darcy fluxes
d[pp.PARAMETERS][t_key]["darcy_flux"] = (
face_map * d[pp.PARAMETERS][t_key]["darcy_flux"]
)
if g.dim == self.Nd:
# Duplicate darcy_fluxes for new faces ("other" side of new fracture)
new_faces = d["new_faces"]
old_faces = d["split_faces"]
d[pp.PARAMETERS][t_key]["darcy_flux"][new_faces] = -d[pp.PARAMETERS][
t_key
]["darcy_flux"][old_faces]
# Map bc values
for key in keys:
old_vals = d[pp.PARAMETERS][key]["bc_values"]
new_vals = mapping * old_vals
new_ind = pp.fvutils.expand_indices_nd(d["new_faces"], self.Nd)
if new_ind.size > 0:
old_ind = pp.fvutils.expand_indices_nd(
d["split_faces"], self.Nd
)
new_vals[new_ind] = old_vals[old_ind]
d[pp.STATE][key]["bc_values"] = new_vals
for e, d in gb.edges():
cell_map: sps.spmatrix = d["cell_index_map"]
mg: pp.MortarGrid = d["mortar_grid"]
d[pp.PARAMETERS][t_key]["darcy_flux"] = (
cell_map * d[pp.PARAMETERS][t_key]["darcy_flux"]
)
g_l, g_h = gb.nodes_of_edge(e)
d_h = gb.node_props(g_h)
new_ind = self._new_dof_inds(cell_map)
fluxes_h: np.ndarray = d_h[pp.PARAMETERS][t_key]["darcy_flux"]
new_mortar_fluxes = mg.primary_to_mortar_int() * fluxes_h
d[pp.PARAMETERS][t_key]["darcy_flux"] += new_mortar_fluxes
g = self._nd_grid()
d = gb.node_props(g)
d[pp.PARAMETERS][t_key]["darcy_flux"][g.tags["fracture_faces"]] = 0
def before_newton_loop(self):
self.convergence_status = False
self._iteration = 0
def update_discretizations(self):
# For the moment, do a full rediscretization. A more targeted approach
# should be possible.
self._minimal_update_discretization()
def before_newton_iteration(self) -> None:
"""Rediscretize non-linear terms.
QUESTION: Should the parent be updated?
"""
# First update parameters, then discretize all terms except those treated
# by mpfa and mpsa in the highest dimension.
# NOTE: We may end up unnecessarily rediscretizing a few terms, but the cost
# of this is insignificant.
self._iteration += 1
## First update parameters.
# The Darcy fluxes were updated right after the previous Newton iteration
# or in self.prepare_for_simulation(), thus no need to update these here.
# Update apertures and specific volumes (e.g. compute from displacement jumps).
# Store as iterate information.
self.update_all_apertures(to_iterate=True)
# Update parameters.
# Depending on the implementation of set_parameters, this can for instance
# update permeability as a function of aperture. Similarly, various other
# quantities can be updated.
self.set_parameters()
###
# With updated parameters (including Darcy fluxes), we can now discretize
# non-linear terms.
# Discretize everything except terms relating to poro-elasticity and
# diffusion (that is, discretize everything not handled by mpfa or mpsa).
# NOTE: Accumulation terms in self.Nd could also have been excluded.
term_list = [
"!mpsa",
"!stabilization",
"!div_u",
"!grad_p",
"!diffusion",
]
filt = pp.assembler_filters.ListFilter(term_list=term_list)
# NOTE: No grid filter here, in pratice, all terms on lower-dimensional grids
# (apart from diffusion) are discretized here, so is everything on the mortars
self.assembler.discretize(filt=filt)
# Discretize diffusion terms on lower-dimensional grids.
for dim in range(self.Nd):
grid_list = self.gb.grids_of_dimension(dim)
if len(grid_list) == 0:
continue
filt = pp.assembler_filters.ListFilter(
grid_list=grid_list,
term_list=["diffusion"],
)
self.assembler.discretize(filt=filt)
def after_propagation_loop(self):
"""
TODO: Purge.
Returns
-------
None.
"""
ValueError("should not call this")
def after_newton_iteration(self, solution: np.ndarray) -> None:
super().after_newton_iteration(solution)
# Update Darcy fluxes based on the newly converged pressure solution.
# NOTE: For consistency between the discretization and solution, this is
# done before updates to permeability or geometry (by fracture propagation).
self.compute_fluxes()
def after_newton_convergence(self, solution, errors, iteration_counter):
"""Propagate fractures if relevant. Update variables and parameters
according to the newly calculated solution.
"""
gb = self.gb
# We export the converged solution *before* propagation:
self.update_all_apertures(to_iterate=True)
self.export_step()
# NOTE: Darcy fluxes were updated in self.after_newton_iteration().
# The fluxes are mapped to the new geometry (and fluxes are assigned for
# newly formed faces) by the below call to self._map_variables().
# Propagate fractures:
# i) Identify which faces to open in g_h
# ii) Split faces in g_h
# iii) Update g_l and the mortar grid. Update projections.
self.evaluate_propagation()
if self.propagated_fracture:
# Update parameters and discretization
for g, d in gb:
if g.dim < self.Nd - 1:
# Should be really careful in this situation. Fingers crossed.
continue
# Transfer information on new faces and cells from the format used
# by self.evaluate_propagation to the format needed for update of
# discretizations (see Discretization.update_discretization()).
# TODO: This needs more documentation.
new_faces = d.get("new_faces", np.array([], dtype=np.int))
split_faces = d.get("split_faces", np.array([], dtype=np.int))
modified_faces = np.hstack((new_faces, split_faces))
update_info = {
"map_cells": d["cell_index_map"],
"map_faces": d["face_index_map"],
"modified_cells": d.get("new_cells", np.array([], dtype=np.int)),
"modified_faces": d.get("new_faces", modified_faces),
}
# d["update_discretization"] = update_info
# Map variables after fracture propagation. Also initialize variables
# for newly formed cells, faces and nodes.
# Also map darcy fluxes and time-dependent boundary values (advection
# and the div_u term in poro-elasticity).
new_solution = self._map_variables(solution)
# Update apertures: Both state (time step) and iterate.
self.update_all_apertures(to_iterate=False)
self.update_all_apertures(to_iterate=True)
# Set new parameters.
self.set_parameters()
# For now, update discretizations will do a full rediscretization
# TODO: Replace this with a targeted rediscretization.
# We may want to use some of the code below (after return), but not all of
# it.
self._minimal_update_discretization()
else:
# No updates to the solution
new_solution = solution
# Finally, use super's method to do updates not directly related to
# fracture propgation
super().after_newton_convergence(new_solution, errors, iteration_counter)
self.adjust_time_step()
# Done!
return
def _minimal_update_discretization(self):
# NOTE: Below here is an attempt at local updates of the discretization
# matrices. For now, these are replaced by a full discretization at the
# begining of each time step.
# EK: Discretization is a pain, because of the flux term.
# The advective term needs an updated (expanded faces) flux term,
# to compute this, we first need to expand discretization of the
# pressure diffusion terms.
# It should be possible to do something smarter here, perhaps compute
# fluxes before splitting, then transfer numbers and populate with other
# values. Or something else.
gb = self.gb
t_0 = time.time()
g_max = gb.grids_of_dimension(gb.dim_max())[0]
grid_list = gb.grids_of_dimension(gb.dim_max() - 1).tolist()
grid_list.append(g_max)
data = gb.node_props(g_max)[pp.DISCRETIZATION_MATRICES]
flow = {}
for key in data["flow"]:
flow[key] = data["flow"][key].copy()
mech = {}
for key in data["mechanics"]:
mech[key] = data["mechanics"][key].copy()
self.discretize_biot(update_after_geometry_change=False)
for e, _ in gb.edges_of_node(g_max):
grid_list.append((e[0], e[1], e))
filt = pp.assembler_filters.ListFilter(
variable_list=[self.scalar_variable, self.mortar_scalar_variable],
term_list=[self.scalar_coupling_term],
grid_list=grid_list,
)
self.assembler.discretize(filt=filt)
grid_list = gb.grids_of_dimension(gb.dim_max() - 1).tolist()
filt = pp.assembler_filters.ListFilter(
term_list=["diffusion", "mass", "source"],
variable_list=[self.scalar_variable],
grid_list=grid_list,
)
# self.assembler.update_discretization(filt=filt)
self.assembler.discretize(filt=filt)
# Now that both variables and discretizations for the flux term have been
# updated, we can compute the fluxes on the new grid.
# self.compute_fluxes()
# Update biot. Should be cheap.
self.copy_biot_discretizations()
# No need to update source term
# Then the temperature discretizations. These are updated, to avoid full mpfa
# in g_max
temperature_terms = ["source", "diffusion", "mass", self.advection_term]
filt = pp.assembler_filters.ListFilter(
grid_list=[self._nd_grid()],
variable_list=[self.temperature_variable],
term_list=temperature_terms,
)
# self.assembler.update_discretization(filt=filt)
self.assembler.discretize(filt=filt)
# Pressure-temperature coupling terms
coupling_terms = [self.s2t_coupling_term, self.t2s_coupling_term]
filt = pp.assembler_filters.ListFilter(
grid_list=[self._nd_grid()],
variable_list=[self.temperature_variable, self.scalar_variable],
term_list=coupling_terms,
)
self.assembler.discretize(filt=filt)
# Build a list of all edges, and all couplings
edge_list = []
for e, _ in self.gb.edges():
edge_list.append(e)
edge_list.append((e[0], e[1], e))
if len(edge_list) > 0:
filt = pp.assembler_filters.ListFilter(grid_list=edge_list)
self.assembler.discretize(filt=filt)
# Finally, discretize terms on the lower-dimensional grids. This can be done
# in the traditional way, as there is no Biot discretization here.
for dim in range(0, self.Nd):
grid_list = self.gb.grids_of_dimension(dim)
if len(grid_list) > 0:
filt = pp.assembler_filters.ListFilter(grid_list=grid_list)
self.assembler.discretize(filt=filt)
logger.info("Rediscretized in {} s.".format(time.time() - t_0))
## Methods specific to this project, but common to (some of) the examples
def set_fields(self, params):
"""
Set various fields to be used in the model.
"""
# We operate on the temperature difference T-T_0, with T in Kelvin
self.T_0_Kelvin = 500
self.background_temp_C = pp.KELKIN_to_CELSIUS(self.T_0_Kelvin)
# Scaling coefficients
self.scalar_scale = 1e7
self.temperature_scale = 1e0
self.file_name = self.params["file_name"]
self.folder_name = self.params["folder_name"]
self.export_fields = [
"u_exp",
"p_exp",
"T_exp",
"traction_exp",
"aperture_exp",
"fluxes_exp",
"cell_centers",
]
# Geometry
def create_grid(self) -> None:
"""
Method that creates the GridBucket of a 2d or 3d domain.
The geometry is defined through the method self._fractures() and the
domain sizes stored in the dictionary self.box.
This method sets self.gb and self.Nd.
"""
# Define fractures
self._fractures()
x = self.box["xmax"] - self.box["xmin"]
y = self.box["ymax"] - self.box["ymin"]
nx = self.params.get("nx", 10)
ny = self.params.get("ny", nx)
ncells = [nx, ny]
dims = [x, y]
if "zmax" in self.box:
ncells.append(self.params.get("nz", nx))
dims.append(self.box["zmax"] - self.box["zmin"])
gb = pp.meshing.cart_grid(self.fracs, ncells, physdims=dims)
pp.contact_conditions.set_projections(gb)
self.gb = gb
self.Nd = self.gb.dim_max()
# Tag the wells
self._tag_well_cells()
self.n_frac = len(gb.grids_of_dimension(self.Nd - 1))
# Numerics
def assign_discretizations(self) -> None:
"""
For long time steps, scaling the diffusive interface fluxes in the non-default
way turns out to actually be beneficial for the condition number.
"""
# Call parent class for disrcetizations.
super().assign_discretizations()
for e, d in self.gb.edges():
d[pp.COUPLING_DISCRETIZATION][self.temperature_coupling_term][e][
1
].kinv_scaling = False
d[pp.COUPLING_DISCRETIZATION][self.scalar_coupling_term][e][
1
].kinv_scaling = True
def assemble_and_solve_linear_system(self, tol):
if getattr(self, "report_A", True):
A, b = self.assembler.assemble_matrix_rhs(add_matrices=False)
for key in A.keys():
logger.debug("{:.2e} {}".format(np.max(np.abs(A[key])), key))
A, b = self.assembler.assemble_matrix_rhs()
prepare_umfpack = self.params.get("prepare_umfpack", False)
if prepare_umfpack:
A.indices = A.indices.astype(np.int64)
A.indptr = A.indptr.astype(np.int64)
logger.debug("Max element in A {0:.2e}".format(np.max(np.abs(A))))
logger.info(
"Max {0:.2e} and min {1:.2e} A sum.".format(
np.max(np.sum(np.abs(A), axis=1)), np.min(np.sum(np.abs(A), axis=1))
)
)
t_0 = time.time()
x = sps.linalg.spsolve(A, b)
logger.info("Solved in {} s.".format(time.time() - t_0))
return x
def check_convergence(self, solution, prev_solution, init_solution, nl_params=None):
g_max = self._nd_grid()
uh_dof = self.assembler.dof_ind(g_max, self.displacement_variable)
p_dof = np.array([], dtype=np.int)
T_dof = np.array([], dtype=np.int)
contact_dof = np.array([], dtype=np.int)
for g, _ in self.gb:
p_dof = np.hstack((p_dof, self.assembler.dof_ind(g, self.scalar_variable)))
T_dof = np.hstack(
(T_dof, self.assembler.dof_ind(g, self.temperature_variable))
)
if g.dim == self.Nd - 1:
contact_dof = np.hstack(
(
contact_dof,
self.assembler.dof_ind(g, self.contact_traction_variable),
)
)
# Also find indices for the contact variables
uj_dof = np.array([], dtype=np.int)
for e, _ in self.gb.edges():
if e[0].dim == self.Nd:
uj_dof = np.hstack(
(
uj_dof,
self.assembler.dof_ind(e, self.mortar_displacement_variable),
)
)
# Pick out the solution from current, previous iterates, as well as the
# initial guess.
def differences(dofs):
sol_now = solution[dofs]
sol_prev = prev_solution[dofs]
sol_init = init_solution[dofs]
diff_iterates = np.sqrt(np.sum((sol_now - sol_prev) ** 2)) / sol_now.size
diff_init = np.sqrt(np.sum((sol_now - sol_init) ** 2)) / sol_now.size
norm = np.sqrt(np.sum(sol_now ** 2)) / sol_now.size
return diff_iterates, diff_init, norm
iterate_diff_T, init_diff_T, norm_T = differences(T_dof)
iterate_diff_p, init_diff_p, norm_p = differences(p_dof)
iterate_diff_uh, init_diff_uh, norm_uh = differences(uh_dof)
iterate_diff_uj, init_diff_uj, norm_uj = differences(uj_dof)
tol_convergence = nl_params["nl_convergence_tol"]
# Not sure how to use the divergence criterion
# tol_divergence = nl_params["nl_divergence_tol"]
diverged = False
# Check absolute convergence criterion
def convergence(val, ref, atol, rtol=None):
if rtol is None:
rtol = atol
if val < atol:
return True, val
error = val / ref
return error < rtol, error
scaled_convergence = 100 * tol_convergence
converged_uh, error_uh = convergence(iterate_diff_uh, norm_uh, tol_convergence)
converged_T, error_T = convergence(iterate_diff_T, norm_T, scaled_convergence)
converged_p, error_p = convergence(iterate_diff_p, norm_p, tol_convergence)
converged_uj, error_uj = convergence(iterate_diff_uj, norm_uj, tol_convergence)
converged = (
converged_uj
# and converged_contact
and converged_uh
and converged_T
and converged_p
)
logger.info(
"Errors: displacement jump {:.2e}, matrix displacement {:.2e}, temperature {:.2e} and pressure {:.2e}".format(
error_uj, error_uh, error_T, error_p
)
)
logger.info(
"Difference: displacement jump {:.2e}, matrix displacement {:.2e}, temperature {:.2e} and pressure {:.2e}".format(
iterate_diff_uj, iterate_diff_uh, iterate_diff_T, iterate_diff_p
)
)
return error_uh, converged, diverged
def adjust_time_step(self):
"""
Adjust the time step so that smaller time steps are used when the driving forces
are changed. Also make sure to exactly reach the start and end time for
each phase.
"""
# Default is to just increase the time step somewhat
self.time_step = getattr(self, "time_step_factor", 1.0) * self.time_step
# We also want to make sure that we reach the end of each simulation phase
for dt, lim in zip(self.phase_time_steps, self.phase_limits):
diff = self.time - lim
if diff < 0 and -diff <= self.time_step:
self.time_step = -diff
if np.isclose(self.time, lim):
self.time_step = dt
# And that the time step doesn't grow too large after the equilibration phase
if self.time > 0:
self.time_step = min(self.time_step, self.max_time_step)
def compute_fluxes(self):
"""
Compute fluxes.
For 3d, the fluxes are damped after the fourth iteration.
"""
use_smoothing = self.Nd == 3
gb = self.gb
for g, d in gb:
pa = d[pp.PARAMETERS][self.temperature_parameter_key]
if self._iteration > 1:
pa["darcy_flux_1"] = pa["darcy_flux"].copy()
for e, d in gb.edges():
pa = d[pp.PARAMETERS][self.temperature_parameter_key]
if self._iteration > 1:
pa["darcy_flux_1"] = pa["darcy_flux"].copy()
super().compute_fluxes()
if not use_smoothing or self._iteration < 5:
return
a, b = 1, 1
node_update, edge_update = 0, 0
for g, d in gb:
pa = d[pp.PARAMETERS][self.temperature_parameter_key]
v1 = pa["darcy_flux_1"]
v2 = pa["darcy_flux"]
v_new = (a * v2 + b * v1) / (a + b)
pa["darcy_flux"] = v_new
node_update += np.sqrt(
np.sum(np.power(v2 - v_new, 2)) / np.sum(np.power(v2, 2))
)
for e, d in gb.edges():
pa = d[pp.PARAMETERS][self.temperature_parameter_key]
v1 = pa["darcy_flux_1"]
v2 = pa["darcy_flux"]
v_new = (a * v2 + b * v1) / (a + b)
pa["darcy_flux"] = v_new
edge_update += np.sqrt(
np.sum(np.power(v2 - v_new, 2)) / np.sum(np.power(v2, 2))
)
logger.info(
"Smoothed fluxes by {:.2e} and edge {:.2e} at time {:.2e}".format(
node_update, edge_update, self.time
)
)
# Initialization etc.
def initial_condition(self) -> None:
"""Initial values for the Darcy fluxes, p, T and u."""
for g, d in self.gb:
d[pp.PARAMETERS] = pp.Parameters()
d[pp.PARAMETERS].update_dictionaries(
[
self.mechanics_parameter_key,
self.mechanics_temperature_parameter_key,
self.scalar_parameter_key,
self.temperature_parameter_key,
]
)
self.update_all_apertures(to_iterate=False)
self.update_all_apertures()
super().initial_condition()
for g, d in self.gb:
u0 = self.initial_displacement(g)
d[pp.PARAMETERS][self.temperature_parameter_key].update(
{"darcy_flux": np.zeros(g.num_faces)}
)
p0 = self.initial_scalar(g)
T0 = self.initial_temperature(g)
state = {
self.scalar_variable: p0,
self.temperature_variable: T0,
}
iterate = {
self.scalar_variable: p0,
self.temperature_variable: T0,
self.displacement_variable: u0,
}
pp.set_state(d, state)
pp.set_iterate(d, iterate)
for e, d in self.gb.edges():
update = {self.mortar_displacement_variable: self.initial_displacement(e)}
pp.set_state(d, update)
pp.set_iterate(d, update)
def initial_scalar(self, g) -> np.ndarray:
"""Hydrostatic pressure depending on _depth, which is set to 0 in exII."""
depth = self._depth(g.cell_centers)
return self.hydrostatic_pressure(g, depth) / self.scalar_scale
def initial_temperature(self, g) -> np.ndarray:
"""Initial temperature is 0, but set to f(z) in exIV."""
return np.zeros(g.num_cells)
def initial_displacement(self, g):
if isinstance(g, tuple):
d = self.gb.edge_props(g)
nc = d["mortar_grid"].num_cells
else:
d = self.gb.node_props(g)
nc = g.num_cells
return d[pp.STATE].get("initial_displacement", np.zeros((self.Nd * nc)))
def compute_initial_displacement(self):
"""Is run prior to a time-stepping scheme. Use this to initialize
displacement consistent with the given BCs, initial pressure and initial
temperature.
A modified version of the full equation system is solved. P and T are
fixed by only considering the implicit mass matrix. The coupling
contributions grad p and grad T are retained in the momentum balance.
"""
self.prepare_simulation()
var_d = self.displacement_variable
# We need the source term for mechanics. Ensure no contribution for
# p and T.
for g, d in self.gb:
d[pp.PARAMETERS][self.temperature_parameter_key]["source"] = np.zeros(
g.num_cells
)
d[pp.PARAMETERS][self.scalar_parameter_key]["source"] = np.zeros(
g.num_cells
)
# Terms to include. We have to retain the coupling terms to avoid a
# singular matrix
terms = [
"mpsa",
self.friction_coupling_term,
"grad_p",
"mass",
"fracture_scalar_to_force_balance",
self.advection_coupling_term,
self.temperature_coupling_term,
self.scalar_coupling_term,
"empty",
"source",
# "matrix_temperature_to_force_balance",
# "matrix_scalar_to_force_balance",
]
filt = pp.assembler_filters.ListFilter(term_list=terms)
A, b = self.assembler.assemble_matrix_rhs(filt=filt)
if self.params.get("prepare_umfpack", False):
A.indices = A.indices.astype(np.int64)
A.indptr = A.indptr.astype(np.int64)
x = sps.linalg.spsolve(A, b)
self.assembler.distribute_variable(x)
# Store the initial displacement (see method initial_displacement)
g = self._nd_grid()
d = self.gb.node_props(g)
d[pp.STATE]["initial_displacement"] = d[pp.STATE][var_d].copy()
for e, d in self.gb.edges():
if e[0].dim == self.Nd:
d[pp.STATE]["initial_displacement"] = d[pp.STATE][
self.mortar_displacement_variable
].copy()
def prepare_simulation(self):
"""
Copy of THM method which avoids overwriting self.gb and rediscretizing
if the method is called a second time (after self.compute_initial_displacement).
"""
first = not hasattr(self, "gb") or self.gb is None
if first:
self.create_grid()
self.update_all_apertures(to_iterate=False)
self.update_all_apertures()
self._set_time_parameters()
self.set_rock_and_fluid()
self.initial_condition()
self.set_parameters()
if first:
self.assign_variables()
self.assign_discretizations()
self.discretize()
# Initialize Darcy fluxes
self.compute_fluxes()
self.initialize_linear_solver()
self.export_step()
def _tag_well_cells(self):
"""
Tag well cells with unitary values, positive for injection cells and negative
for production cells.
"""
pass
# Apertures and specific volumes
def aperture(self, g, from_iterate=True) -> np.ndarray:
"""
Obtain the aperture of a subdomain. See update_all_apertures.
"""
if from_iterate:
return self.gb.node_props(g)[pp.STATE][pp.ITERATE]["aperture"]
else:
return self.gb.node_props(g)[pp.STATE]["aperture"]
def specific_volumes(self, g, from_iterate=True) -> np.ndarray:
"""
Obtain the specific volume of a subdomain. See update_all_apertures.
"""
if from_iterate:
return self.gb.node_props(g)[pp.STATE][pp.ITERATE]["specific_volume"]
else:
return self.gb.node_props(g)[pp.STATE]["specific_volume"]
def update_all_apertures(self, to_iterate=True):
"""
To better control the aperture computation, it is done for the entire gb by a
single function call. This also allows us to ensure the fracture apertures
are updated before the intersection apertures are inherited.
The aperture of a fracture is
initial aperture + || u_n ||
"""
gb = self.gb
for g, d in gb:
apertures = np.ones(g.num_cells)
if g.dim == (self.Nd - 1):
# Initial aperture
apertures *= self.initial_aperture
# Reconstruct the displacement solution on the fracture
g_h = gb.node_neighbors(g)[0]
data_edge = gb.edge_props((g, g_h))
if pp.STATE in data_edge:
u_mortar_local = self.reconstruct_local_displacement_jump(
data_edge,
d["tangential_normal_projection"],
from_iterate=to_iterate,
)
# Magnitudes of normal components
# Absolute value to avoid negative volumes for non-converged
# solution (if from_iterate is True above)
apertures += np.absolute(u_mortar_local[-1])
if to_iterate:
pp.set_iterate(
d,
{"aperture": apertures.copy(), "specific_volume": apertures.copy()},
)
else:
state = {
"aperture": apertures.copy(),
"specific_volume": apertures.copy(),
}
pp.set_state(d, state)
for g, d in gb:
parent_apertures = []
num_parent = []
if g.dim < (self.Nd - 1):
for edges in gb.edges_of_node(g):
e = edges[0]
g_h = e[0]
if g_h == g:
g_h = e[1]
if g_h.dim == (self.Nd - 1):
d_h = gb.node_props(g_h)
if to_iterate:
a_h = d_h[pp.STATE][pp.ITERATE]["aperture"]
else:
a_h = d_h[pp.STATE]["aperture"]
a_h_face = np.abs(g_h.cell_faces) * a_h
mg = gb.edge_props(e)["mortar_grid"]
# Assumes g_h is primary
a_l = (
mg.mortar_to_secondary_avg()
* mg.primary_to_mortar_avg()
* a_h_face
)
parent_apertures.append(a_l)
num_parent.append(
np.sum(mg.mortar_to_secondary_int().A, axis=1)
)
else:
raise ValueError("Intersection points not implemented in 3d")
parent_apertures = np.array(parent_apertures)
num_parents = np.sum(np.array(num_parent), axis=0)
apertures = np.sum(parent_apertures, axis=0) / num_parents
specific_volumes = np.power(
apertures, self.Nd - g.dim
) # Could also be np.product(parent_apertures, axis=0)
if to_iterate:
pp.set_iterate(
d,
{
"aperture": apertures.copy(),
"specific_volume": specific_volumes.copy(),
},
)
else:
state = {
"aperture": apertures.copy(),
"specific_volume": specific_volumes.copy(),
}
pp.set_state(d, state)
return apertures
# Parameter assignment
def set_mechanics_parameters(self):
"""Mechanical parameters.
Note that we divide the momentum balance equation by self.scalar_scale.
A homogeneous initial temperature is assumed.
"""
gb = self.gb
for g, d in gb:
if g.dim == self.Nd:
# Rock parameters
rock = self.rock
lam = rock.LAMBDA * np.ones(g.num_cells) / self.scalar_scale
mu = rock.MU * np.ones(g.num_cells) / self.scalar_scale
C = pp.FourthOrderTensor(mu, lam)
bc = self.bc_type_mechanics(g)
bc_values = self.bc_values_mechanics(g)
sources = self.source_mechanics(g)
# In the momentum balance, the coefficient hits the scalar, and should
# not be scaled. Same goes for the energy balance, where we divide all
# terms by T_0, hence the term originally beta K T d(div u) / dt becomes
# beta K d(div u) / dt = coupling_coefficient d(div u) / dt.
coupling_coefficient = self.biot_alpha(g)
pp.initialize_data(
g,
d,
self.mechanics_parameter_key,
{
"bc": bc,
"bc_values": bc_values,
"source": sources,
"fourth_order_tensor": C,
"biot_alpha": coupling_coefficient,
"time_step": self.time_step,
"shear_modulus": self.rock.MU,
"poisson_ratio": self.rock.POISSON_RATIO,
},
)
pp.initialize_data(
g,
d,
self.mechanics_temperature_parameter_key,
{
"biot_alpha": self.biot_beta(g),
"bc_values": bc_values,
},
)
elif g.dim == self.Nd - 1:
K_crit = self.rock.SIF_crit * np.ones((self.Nd, g.num_faces))
pp.initialize_data(
g,
d,
self.mechanics_parameter_key,
{
"friction_coefficient": self.rock.FRICTION_COEFFICIENT,
"contact_mechanics_numerical_parameter": 1e1,
"dilation_angle": np.radians(3),
"time": self.time,
"SIFs_critical": K_crit,
},
)
for e, d in gb.edges():
mg = d["mortar_grid"]
# Parameters for the surface diffusion. Not used as of now.
pp.initialize_data(
mg,
d,
self.mechanics_parameter_key,
{"mu": self.rock.MU, "lambda": self.rock.LAMBDA},
)
def set_scalar_parameters(self):
for g, d in self.gb:
specific_volumes = self.specific_volumes(g)
# Define boundary conditions for flow
bc = self.bc_type_scalar(g)
# Set boundary condition values
bc_values = self.bc_values_scalar(g)
biot_coefficient = self.biot_alpha(g)
compressibility = self.fluid.COMPRESSIBILITY
mass_weight = compressibility * self.porosity(g)
if g.dim == self.Nd:
mass_weight += (
biot_coefficient - self.porosity(g)
) / self.rock.BULK_MODULUS
mass_weight *= self.scalar_scale * specific_volumes
pp.initialize_data(
g,
d,
self.scalar_parameter_key,
{
"bc": bc,
"bc_values": bc_values,
"mass_weight": mass_weight,
"biot_alpha": biot_coefficient,
"time_step": self.time_step,
"ambient_dimension": self.Nd,
"source": self.source_scalar(g),
},
)
t2s_coupling = (
self.scalar_temperature_coupling_coefficient(g)
* specific_volumes
* self.temperature_scale
)
pp.initialize_data(
g,
d,
self.t2s_parameter_key,
{"mass_weight": t2s_coupling, "time_step": self.time_step},
)
self.set_vector_source()
self.set_permeability_from_aperture()
def set_temperature_parameters(self):
"""temperature parameters.
The entire equation is divided by the initial temperature in Kelvin.
"""
for g, d in self.gb:
T0 = self.T_0_Kelvin
div_T_scale = self.temperature_scale / self.length_scale ** 2 / T0
kappa_f = self.fluid.thermal_conductivity() * div_T_scale
kappa_s = self.rock.thermal_conductivity() * div_T_scale
heat_capacity_s = (
self.rock.specific_heat_capacity(self.background_temp_C)
* self.rock.DENSITY
)
heat_capacity_f = self.fluid_density(g) * self.fluid.specific_heat_capacity(
self.background_temp_C
)
# Aperture and cross sectional area
specific_volumes = self.specific_volumes(g)
# Define boundary conditions for flow
bc = self.bc_type_temperature(g)
# Set boundary condition values
bc_values = self.bc_values_temperature(g)
# and source values
biot_coefficient = self.biot_beta(g)
mass_weight = (
self._effective(g, heat_capacity_f, heat_capacity_s)
* specific_volumes
* self.temperature_scale
/ T0
)
thermal_conductivity = pp.SecondOrderTensor(
self._effective(g, kappa_f, kappa_s) * specific_volumes
)
# darcy_fluxes are length scaled already
advection_weight = heat_capacity_f * self.temperature_scale / T0
pp.initialize_data(
g,
d,
self.temperature_parameter_key,
{
"bc": bc,
"bc_values": bc_values,
"mass_weight": mass_weight,
"second_order_tensor": thermal_conductivity,
"advection_weight": advection_weight,
"biot_alpha": biot_coefficient,
"time_step": self.time_step,
"source": self.source_temperature(g),
"ambient_dimension": self.Nd,
},
)
s2t_coupling = (
self.scalar_temperature_coupling_coefficient(g)
* specific_volumes
* self.scalar_scale
)
pp.initialize_data(
g,
d,
self.s2t_parameter_key,
{"mass_weight": s2t_coupling, "time_step": self.time_step},
)
for e, data_edge in self.gb.edges():
g_l, g_h = self.gb.nodes_of_edge(e)
mg = data_edge["mortar_grid"]
# T0 = self.T_0_Kelvin + self._T(mg)
div_T_scale = (
self.temperature_scale / self.length_scale ** 2 / self.T_0_Kelvin
)
kappa_f = self.fluid.thermal_conductivity() * div_T_scale
a_l = self.aperture(g_l)
V_h = self.specific_volumes(g_h)
a_mortar = mg.secondary_to_mortar_avg() * a_l
kappa_n = 2 / a_mortar * kappa_f
tr = np.abs(g_h.cell_faces)
V_j = mg.primary_to_mortar_int() * tr * V_h
kappa_n = kappa_n * V_j
data_edge = pp.initialize_data(
e,
data_edge,
self.temperature_parameter_key,
{"normal_diffusivity": kappa_n},
)
# BCs. Assumes _p_and_T_dir_faces
def bc_type_scalar(self, g) -> pp.BoundaryCondition:
return pp.BoundaryCondition(g, self._p_and_T_dir_faces(g), "dir")
def bc_type_temperature(self, g) -> pp.BoundaryCondition:
return pp.BoundaryCondition(g, self._p_and_T_dir_faces(g), "dir")
# Common parameters
def set_rock_and_fluid(self):
"""
Set rock and fluid properties to those of granite and water.
We ignore all temperature dependencies of the parameters.
"""
self.rock = Granite()
self.fluid = Water()
def porosity(self, g) -> float:
if g.dim == self.Nd:
return 0.05
else:
return 1.0
def _effective(self, g, param_f, param_s) -> float:
"""Compute effective thermal parameter as porosity weighted sum."""
phi = self.porosity(g)
return phi * param_f + (1 - phi) * param_s
def biot_alpha(self, g) -> np.ndarray:
if g.dim == self.Nd:
return 0.8
else:
return 1.0
def biot_beta(self, g):
"""
For TM, the coefficient is the product of the bulk modulus (=inverse of
the compressibility) and the volumetric thermal expansion coefficient.
"""
if g.dim == self.Nd:
# Factor 3 for volumetric/linear, since the pp.Granite
# thermal expansion expansion coefficient is the linear one at 20 degrees C.
return self.rock.BULK_MODULUS * 3 * self.rock.THERMAL_EXPANSION
else:
# Solution debendent coefficient computed from previous iterate,
# see Eq. (xx)
iterate = self.gb.node_props(g)[pp.STATE][pp.ITERATE]
T_k = iterate[self.temperature_variable] * self.temperature_scale
T0K = self.T_0_Kelvin
return T_k / T0K * self.fluid_density(g)
def scalar_temperature_coupling_coefficient(self, g) -> float:
"""
The temperature-pressure coupling coefficient is porosity times thermal
expansion. The pressure and
scalar scale must be accounted for wherever this coefficient is used.
"""
b_f = self.fluid.thermal_expansion(self.background_temp_C)
if g.dim < self.Nd:
coeff = -b_f
else:
b_s = self.rock.THERMAL_EXPANSION
phi = self.porosity(g)
coeff = -(phi * b_f + (self.biot_alpha(g) - phi) * b_s)
# coeff = -self._effective(g, b_f, b_s)
return coeff
def fluid_density(self, g, dp=None, dT=None) -> np.ndarray:
"""Density computed from current pressure and temperature solution, both
taken from the previous iterate.
\rho = \rho_0 * exp[ compressibility * (p - p_0)
+ thermal_expansion * (T-T_0) ],
with \rho_0 = 1000
p_0 = 1 atm
T_0 = 20 degrees C
Clipping of the solution to aid convergence. Should not affect the
converged solution given the chosen bounds.
"""
iterate = self.gb.node_props(g)[pp.STATE][pp.ITERATE]
if dp is None:
p_k = iterate[self.scalar_variable] * self.scalar_scale
dp = np.clip(p_k, a_min=-1e10, a_max=1e10)
# Use hydrostatic pressure as reference
dp = dp - pp.ATMOSPHERIC_PRESSURE
if dT is None:
T_k = iterate[self.temperature_variable] * self.temperature_scale
dT = np.clip(T_k, a_min=-self.T_0_Kelvin, a_max=self.T_0_Kelvin)
# Use 20 degrees C as reference
dT = dT - (20 - self.background_temp_C)
rho_0 = 1e3 * (pp.KILOGRAM / pp.METER ** 3) * np.ones(g.num_cells)
rho = rho_0 * np.exp(
dp * self.fluid.COMPRESSIBILITY - dT * self.fluid.thermal_expansion(dT)
)
return rho
def set_permeability_from_aperture(self):
"""
Cubic law in fractures, rock permeability in the matrix.
"""
# Viscosity has units of Pa s, and is consequently divided by the scalar scale.
viscosity = self.fluid.dynamic_viscosity() / self.scalar_scale
gb = self.gb
key = self.scalar_parameter_key
for g, d in gb:
if g.dim < self.Nd:
# Use cubic law in fractures. First compute the unscaled
# permeability
apertures = self.aperture(g, from_iterate=True)
apertures_unscaled = apertures * self.length_scale
k = np.power(apertures_unscaled, 2) / 12 / viscosity
d[pp.PARAMETERS][key]["perm_nu"] = k
# Multiply with the cross-sectional area, which equals the apertures
# for 2d fractures in 3d
specific_volumes = self.specific_volumes(g, True)
k = k * specific_volumes
# Divide by fluid viscosity and scale back
kxx = k / self.length_scale ** 2
else:
# Use the rock permeability in the matrix
kxx = (
self.rock.PERMEABILITY
/ viscosity
* np.ones(g.num_cells)
/ self.length_scale ** 2
)
K = pp.SecondOrderTensor(kxx)
d[pp.PARAMETERS][key]["second_order_tensor"] = K
# Normal permeability inherited from the neighboring fracture g_l
for e, d in gb.edges():
mg = d["mortar_grid"]
g_l, g_h = gb.nodes_of_edge(e)
data_l = gb.node_props(g_l)
a = self.aperture(g_l, True)
V = self.specific_volumes(g_l, True)
V_h = self.specific_volumes(g_h, True)
# We assume isotropic permeability in the fracture, i.e. the normal
# permeability equals the tangential one
k_s = data_l[pp.PARAMETERS][key]["second_order_tensor"].values[0, 0]
# Division through half the aperture represents taking the (normal) gradient
kn = mg.secondary_to_mortar_int() * np.divide(k_s, a * V / 2)
tr = np.abs(g_h.cell_faces)
V_j = mg.primary_to_mortar_int() * tr * V_h
kn = kn * V_j
pp.initialize_data(mg, d, key, {"normal_diffusivity": kn})
def source_scalar(self, g) -> np.ndarray:
"""
Source term for the scalar equation.
In addition to regular source terms, we add a contribution compensating
for the added volume in the conservation equation.
For slightly compressible flow in the present formulation, this has units of m^3.
Sources are handled by ScalarSource discretizations.
The implicit scheme yields multiplication of the rhs by dt, but
this is not incorporated in ScalarSource, hence we do it here.
"""
rhs = np.zeros(g.num_cells)
if g.dim < self.Nd:
d = self.gb.node_props(g)
new_cells = d.get("new_cells", np.array([], dtype=np.int))
added_volume = self.initial_aperture * g.cell_volumes[new_cells]
rhs[new_cells] -= added_volume
return rhs
def source_mechanics(self, g) -> np.ndarray:
"""
Gravity term.
"""
values = np.zeros((self.Nd, g.num_cells))
if self.gravity_on:
values[self.Nd - 1] = (
pp.GRAVITY_ACCELERATION
* self.rock.DENSITY
* g.cell_volumes
* self.length_scale
/ self.scalar_scale
* self.gravity_on
)
return values.ravel("F")
def set_vector_source(self):
if not getattr(self, "gravity_on"):
return
for g, d in self.gb:
grho = (
pp.GRAVITY_ACCELERATION
* self.fluid_density(g)
/ self.scalar_scale
* self.length_scale
)
gr = np.zeros((self.Nd, g.num_cells))
gr[self.Nd - 1, :] = -grho
d[pp.PARAMETERS][self.scalar_parameter_key]["vector_source"] = gr.ravel("F")
for e, data_edge in self.gb.edges():
g1, g2 = self.gb.nodes_of_edge(e)
params_l = self.gb.node_props(g1)[pp.PARAMETERS][self.scalar_parameter_key]
mg = data_edge["mortar_grid"]
grho = (
mg.secondary_to_mortar_avg()
* params_l["vector_source"][self.Nd - 1 :: self.Nd]
)
a = mg.secondary_to_mortar_avg() * self.aperture(g1)
gravity = np.zeros((self.Nd, mg.num_cells))
gravity[self.Nd - 1, :] = grho * a / 2
data_edge = pp.initialize_data(
e,
data_edge,
self.scalar_parameter_key,
{"vector_source": gravity.ravel("F")},
)
# Solution storing and export
def _set_exporter(self):
self.exporter = pp.Exporter(
self.gb,
self.file_name,
folder_name=self.viz_folder_name + "_vtu",
fixed_grid=False,
)
self.export_times = []
def export_step(self):
"""
Export the current solution to vtu. The method sets the desired values in d[pp.STATE].
For some fields, it provides zeros in the dimensions where the variable is not defined,
or pads the vector values with zeros so that they have three components, as required
by ParaView.
We use suffix _exp on all exported variables, to separate from scaled versions also
stored in d[pp.STATE].
"""
if "exporter" not in self.__dict__:
self._set_exporter()
for g, d in self.gb:
iterate = d[pp.STATE][pp.ITERATE]
d[pp.STATE]["cell_centers"] = g.cell_centers.copy()
## First export Darcy fluxes:
dis = d[pp.PARAMETERS][self.temperature_parameter_key]["darcy_flux"]
if g.dim == self.Nd:
for e in self.gb.edges_of_node(g):
d_e = self.gb.edge_props(e[0])
mg = d_e["mortar_grid"]
dis_e = d_e[pp.PARAMETERS][self.temperature_parameter_key][
"darcy_flux"
]
faces_on_fracture_surface = (
mg.primary_to_mortar_int().tocsr().indices
)
sign = g.signs_and_cells_of_boundary_faces(
faces_on_fracture_surface
)[0]
dis = dis + mg.mortar_to_primary_int() * (sign * dis_e)
fluxes = g.face_normals * dis / g.face_areas
scalar_div = g.cell_faces
# Vector extension, convert to coo-format to avoid odd errors when one
# grid dimension is 1 (this may return a bsr matrix)
# The order of arguments to sps.kron is important.
block_div = sps.kron(scalar_div, sps.eye(3)).tocsc()
proj = np.abs(block_div.transpose().tocsr())
cell_flux = proj * (fluxes.ravel("F"))
d[pp.STATE]["fluxes_exp"] = cell_flux.reshape((3, g.num_cells), order="F")
## Then handle u and contact traction, which are dimension dependent
if g.dim == self.Nd:
pad_zeros = np.zeros((3 - g.dim, g.num_cells))
u = iterate[self.displacement_variable].reshape(
(self.Nd, -1), order="F"
)
u_exp = np.vstack((u * self.length_scale, pad_zeros))
d[pp.STATE]["u_exp"] = u_exp
d[pp.STATE]["traction_exp"] = np.zeros(d[pp.STATE]["u_exp"].shape)
elif g.dim == (self.Nd - 1):
pad_zeros = np.zeros((2 - g.dim, g.num_cells))
g_h = self.gb.node_neighbors(g)[0]
data_edge = self.gb.edge_props((g, g_h))
u_mortar_local = self.reconstruct_local_displacement_jump(
data_edge, d["tangential_normal_projection"], from_iterate=True
)
u_exp = np.vstack((u_mortar_local * self.length_scale, pad_zeros))
d[pp.STATE]["u_exp"] = u_exp
traction = (
iterate[self.contact_traction_variable].reshape(
(self.Nd, -1), order="F"
)
/ g.cell_volumes
)
d[pp.STATE]["traction_exp"] = (
np.vstack((traction, pad_zeros)) * self.scalar_scale
)
## Apertures, p and T
d[pp.STATE]["aperture_exp"] = self.aperture(g) * self.length_scale
d[pp.STATE]["p_exp"] = iterate[self.scalar_variable] * self.scalar_scale
d[pp.STATE]["T_exp"] = (
iterate[self.temperature_variable] * self.temperature_scale
)
self.exporter.write_vtk(self.export_fields, time_step=self.time, grid=self.gb)
self.export_times.append(self.time)
new_sizes = np.zeros(len(self.gb.grids_of_dimension(self.Nd - 1)))
for i, g in enumerate(self.gb.grids_of_dimension(self.Nd - 1)):
new_sizes[i] = np.sum(g.cell_volumes) * self.length_scale ** 2
if hasattr(self, "fracture_sizes"):
self.fracture_sizes = np.vstack((self.fracture_sizes, new_sizes))
else:
self.fracture_sizes = new_sizes
def export_pvd(self):
"""
At the end of the simulation, after the final vtu file has been exported, the
pvd file for the whole simulation is written by calling this method.
"""
self.exporter.write_pvd(np.array(self.export_times))
def _update_iterate(self, solution_vector: np.ndarray) -> None:
"""
Extract parts of the solution for current iterate.
Calls ContactMechanicsBiot version, and additionally updates the iterate solutions
in d[pp.STATE][pp.ITERATE] are updated for the scalar variable, to be used
for flux computations by compute_darcy_fluxes.
Method is a tailored copy from assembler.distribute_variable.
Parameters:
solution_vector (np.array): solution vector for the current iterate.
"""
super()._update_iterate(solution_vector)
# HACK: This is one big hack to get the export working.
# Ugly, but doesn't affect solution
assembler = self.assembler
variable_names = []
for pair in assembler.block_dof.keys():
variable_names.append(pair[1])
dof = np.cumsum(np.append(0, np.asarray(assembler.full_dof)))
for var_name in set(variable_names):
for pair, bi in assembler.block_dof.items():
g = pair[0]
name = pair[1]
if name != var_name:
continue
if isinstance(g, tuple):
continue
else:
data = self.gb.node_props(g)
# g is a node (not edge)
# Save displacement for export. The export hacks are getting ugly!
if name == self.displacement_variable:
u = solution_vector[dof[bi] : dof[bi + 1]]
data = self.gb.node_props(g)
data[pp.STATE][pp.ITERATE][
self.displacement_variable
] = u.copy()
class Water:
"""
Fluid phase.
"""
def __init__(self, theta_ref=None):
if theta_ref is None:
self.theta_ref = 20 * (pp.CELSIUS)
else:
self.theta_ref = theta_ref
self.VISCOSITY = 1 * pp.MILLI * pp.PASCAL * pp.SECOND
self.COMPRESSIBILITY = 4e-10 / pp.PASCAL
self.BULK_MODULUS = 1 / self.COMPRESSIBILITY
def thermal_expansion(self, delta_theta):
""" Units: m^3 / m^3 K, i.e. volumetric """
return 4e-4
def thermal_conductivity(self, theta=None): # theta in CELSIUS
""" Units: W / m K """
if theta is None:
theta = self.theta_ref
return 0.6
def specific_heat_capacity(self, theta=None): # theta in CELSIUS
""" Units: J / kg K """
return 4200
def dynamic_viscosity(self, theta=None): # theta in CELSIUS
"""Units: Pa s"""
return 0.001
def hydrostatic_pressure(self, depth, theta=None):
rho = 1e3 * (pp.KILOGRAM / pp.METER ** 3)
return rho * depth * pp.GRAVITY_ACCELERATION + pp.ATMOSPHERIC_PRESSURE
class Granite(pp.Granite):
"""
Solid phase.
"""
def __init__(self, theta_ref=None):
super().__init__(theta_ref)
self.BULK_MODULUS = pp.params.rock.bulk_from_lame(self.LAMBDA, self.MU)
self.PERMEABILITY = 1e-14
self.SIF_crit = 5e5 # Obs changed for ex 1 from 1e5
# Increases with T https://link.springer.com/article/10.1007/s00603-020-02303-z
self.THERMAL_EXPANSION = 5e-5
self.FRICTION_COEFFICIENT = 0.8
def thermal_conductivity(self, theta=None):
return 2.0 # Ranges approx 1.7 to 4 according to Wikipedia
# EK: My guess is we can delete functions below.
def tip_faces_l_to_edges_h(g_l, faces_l, g_h):
# Find the edges
nodes_l, _, _ = sps.find(g_l.face_nodes[:, faces_l])
# Obtain the global index of all nodes
global_nodes = g_l.global_point_ind[nodes_l]
# Prepare for checking intersection. ind_l is used to reconstruct non-unique
# nodes later.
global_nodes, ind_l = np.unique(global_nodes, return_inverse=True)
# Find g_h indices of unique global nodes
nodes_l, nodes_h, inds = np.intersect1d(
g_h.global_point_ind, global_nodes, assume_unique=False, return_indices=True
)
# Reconstruct non-unique and reshape to edges (first dim is 2 if nd=3)
edges_h = np.reshape(nodes_h[ind_l], (g_l.dim, faces_l.size), order="f")
return edges_h
def tip_edge_h_to_face_l(g_l: pp.Grid, g_h: pp.Grid, edge_h: np.ndarray) -> np.ndarray:
"""
Assumes all edges_h actually correspond to some face in g_l.
Parameters
----------
g_l : pp.Grid
DESCRIPTION.
g_h : pp.Grid
DESCRIPTION.
edges_h : np.ndarray
DESCRIPTION.
Returns
-------
faces_l : np.ndarray
DESCRIPTION.
"""
# Obtain the global index of all nodes
global_nodes = g_h.global_point_ind[edge_h]
# Find g_l indices of unique global nodes
_, nodes_l, _ = np.intersect1d(
g_l.global_point_ind, global_nodes, assume_unique=False, return_indices=True
)
if nodes_l.size == edge_h.size:
face_l = faces_of_nodes(g_l, nodes_l)
return face_l
else:
return np.empty(0, dtype=int)
def edges_of_face(g, face):
local_nodes = g.face_nodes[:, face].nonzero()[0]
pts = g.nodes[:, local_nodes]
# Faces are defined by one node in 1d and two in 2d. This requires
# dimension dependent treatment:
if g.dim == 3:
# Sort nodes clockwise (!)
# ASSUMPTION: This assumes that the new cell is star-shaped with respect to the
# local cell center. This should be okay.
map_to_sorted = pp.utils.sort_points.sort_point_plane(
pts, g.face_centers[:, face]
)
local_nodes = local_nodes[map_to_sorted]
edges = np.vstack((local_nodes, np.hstack((local_nodes[1:], local_nodes[0]))))
else:
edges = np.atleast_2d(local_nodes)
return edges
def faces_of_nodes(g: pp.Grid, e: np.ndarray) -> np.ndarray:
"""
Obtain indices of all faces sharing one or two nodes.
Parameters
----------
g : pp.Grid
e : np.ndarray
The edge.
Returns
-------
faces : np.ndarray
Faces.
"""
# if g.dim == 1:
# faces = e
if e.size < 2:
assert g.dim < 3
faces = g.face_nodes[e[0]].nonzero()[1]
elif e.size == 2:
f_0 = g.face_nodes[e[0]].nonzero()[1]
f_1 = g.face_nodes[e[1]].nonzero()[1]
faces = np.intersect1d(f_0, f_1)
else:
raise NotImplementedError
return faces
def fracture_edges(g_h):
fracture_edges = np.empty((g_h.dim - 1, 0), dtype=int)
for frac_face in g_h.tags["fracture_faces"].nonzero()[0]:
for frac_e in np.sort(edges_of_face(g_h, frac_face), axis=0).T:
frac_e = frac_e.reshape((g_h.dim - 1, 1))
is_found = np.isin(fracture_edges, frac_e)
is_found = np.any(np.all(is_found, axis=0))
if not is_found or fracture_edges.size == 0:
fracture_edges = np.hstack((fracture_edges, frac_e))
return fracture_edges
| [
"ivar.stefansson@uib.no"
] | ivar.stefansson@uib.no |
5d9ddcd5643b7d3a09a7a2df7d052784a9a314f5 | 30302d215a012a079edf05a4e14e932888385def | /ddht/v5_1/alexandria/typing.py | e73bd07c5b075b4ed6d6eac7f99be2677b7a8cae | [
"MIT"
] | permissive | NhlanhlaHasane/ddht | e54975a7fcf4e9bfa29771ee6b78c5e9a5991aff | 142911d134ff839f3f79ff8fe9e45d3fe5a58cd0 | refs/heads/master | 2023-05-31T05:09:06.371320 | 2021-06-03T22:31:22 | 2021-06-03T22:31:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | from typing import NewType
ContentID = NewType("ContentID", bytes)
ContentKey = NewType("ContentKey", bytes)
| [
"pipermerriam@gmail.com"
] | pipermerriam@gmail.com |
435e90d2b0debc710dd2aca553b76e51ea39e688 | f4434c85e3814b6347f8f8099c081ed4af5678a5 | /sdk/synapse/azure-synapse-artifacts/azure/synapse/artifacts/operations/_big_data_pools_operations.py | 2b2366e730881713afa1086c0e769bf1a8d28656 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | yunhaoling/azure-sdk-for-python | 5da12a174a37672ac6ed8e3c1f863cb77010a506 | c4eb0ca1aadb76ad892114230473034830116362 | refs/heads/master | 2022-06-11T01:17:39.636461 | 2020-12-08T17:42:08 | 2020-12-08T17:42:08 | 177,675,796 | 1 | 0 | MIT | 2020-03-31T20:35:17 | 2019-03-25T22:43:40 | Python | UTF-8 | Python | false | false | 6,664 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class BigDataPoolsOperations(object):
"""BigDataPoolsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.synapse.artifacts.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> "_models.BigDataPoolResourceInfoListResult"
"""List Big Data Pools.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BigDataPoolResourceInfoListResult, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.BigDataPoolResourceInfoListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BigDataPoolResourceInfoListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorContract, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('BigDataPoolResourceInfoListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/bigDataPools'} # type: ignore
def get(
self,
big_data_pool_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.BigDataPoolResourceInfo"
"""Get Big Data Pool.
:param big_data_pool_name: The Big Data Pool name.
:type big_data_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BigDataPoolResourceInfo, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.BigDataPoolResourceInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BigDataPoolResourceInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'bigDataPoolName': self._serialize.url("big_data_pool_name", big_data_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorContract, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('BigDataPoolResourceInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/bigDataPools/{bigDataPoolName}'} # type: ignore
| [
"noreply@github.com"
] | noreply@github.com |
a3d39a3decdee7b76f6712fe6da82843068afeb4 | a631984152b1449df9ef156cf80a033f6a5692ed | /General Python/LTP - Introduction to Python [PluralSight]/LTP - 05 - Shopping.py | d6ac05ab4182b38f3b0c15993ae15e5b5f37d29c | [] | no_license | john-m-hanlon/Python | 2f58e20ba56b3dede3baf6f5ed259d741434108b | 56ebf291b2d8d15f47c942c49d9f40d0ae18741e | refs/heads/master | 2020-06-11T09:47:09.188169 | 2017-03-14T03:32:51 | 2017-03-14T03:32:51 | 75,688,794 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | #
# A simple program to identify grocery items that user wants to purchase
# LTP - 05 - Shopping.py
#
__author__ = 'JohnHanlon'
import sales.shopping_cart
import sales.shopping_order
cart = sales.shopping_cart.Cart()
order = sales.shopping_order.Order()
order.get_input()
while not order.quit:
cart.process(order)
order = sales.shopping_order.Order()
order.get_input()
print(cart)
| [
"hanlon.johnm@gmail.com"
] | hanlon.johnm@gmail.com |
ee2f56938dfb533153bb1cedc2c37b2895664a90 | 15a68c799f107725a40ed9dd56345bcc09ae7365 | /test.py | 40ca14fe708e83935c49a74cba2a84bc7a351b7f | [] | no_license | gz51837844/GibbsLDApy | 484f2c2f10c90df07e82104f25d0a744f20fd362 | 4f062ca36977661840b627e0b1f3c19e7433e092 | refs/heads/master | 2022-11-29T19:10:09.588482 | 2020-08-06T01:56:20 | 2020-08-06T01:56:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,689 | py | #!/usr/bin/env python
# encoding: utf-8
'''
@author: Jasperyang
@license: (C) Copyright 2013-2017, Jasperyang Corporation Limited.
@contact: yiyangxianyi@gmail.com
@software: GibbsLDA
@file: test.py
@time: 3/6/17 8:41 PM
@desc: This is for testing!!! all the functions
'''
from DataSet import *
from Document import *
from Strtokenizer import *
from Utils import *
from Model import *
'''test of strtokenizer'''
# line = 'fasdf asd f dsaf ds af dsaf sdaf dsa fd saf s '
# strtok = Strtokenizer(line,' \r\t\n')
# for i in range(strtok.count_tokens()) :
# print(str(i) + ":" + strtok.token(i) + '\n')
# print(strtok.next_token())
'''test of document'''
# dd = Document()
# line = 'adf'
# do = Document(dd,line)
# print(do.rawstr) # Document这个类可用
'''test of dataset''' # wordmap.txt 中不能出现两个空格连在一起,最后一行要加上换行
wordmapfile = 'test_data/wordmap.txt'
da = DataSet(2)
# pword2id = {'nihao':1,'sa':2,'hahaha':3}
# da.write_wordmap(wordmapfile,pword2id)
# pword2id = {}
# da.read_wordmap1(wordmapfile,pword2id)
# for key,value in pword2id.items() :
# print(key + str(value))
# pid2word = {}
# da.read_wordmap2(wordmapfile,pid2word)
# for key,value in pid2word.items() :
# print(key + str(value))
# dfile = 'test_data/dfile'
# da.read_trndata(dfile,wordmapfile)
# for doc in da.docs :
# print(doc.words)
# da.read_newdata('test_data/newdfile',wordmapfile)
# for doc in da.docs :
# print(doc.words)
# da.read_newdata_withrawstrs('test_data/new2dfile',wordmapfile)
# for doc in da.docs :
# print(doc.words)
'''test of Util'''
# argv = ['-estc', '-alpha', '0.5', '-beta', '0.1', '-ntopics', '100', '-niters',
# '1000', '-savestep', '100', '-twords', '20', '-dfile', 'models/casestudy/trndocs.dat', '-dir', 'test_data',
# '-model', 'model-01800']
# pmodel = Model()
# u = Utils()
# u.parse_args(len(argv), argv, pmodel)
# print(u.generate_model_name(80))
# probs = [2.4,54.23,1.4]
# words = [0,1,2]
# u.sort(probs,words)
# print(probs)
# print(words)
# vect = [{0:2.4},{1:54.23},{2:1.4}]
# u.quicksort(vect,0,2)
# print(vect)
'''test of model'''
# # 不包括需要load_model的
argv = ['-est', '-alpha', '0.5', '-beta', '0.1', '-ntopics', '10', '-niters',
'1000', '-savestep', '100', '-twords', '20', '-dfile', 'dfile', '-dir', 'test_data/',
'-model', 'testmodel']
pmodel = Model()
pmodel.init(len(argv),argv) # 测试 init 包括 init_est
# print("nw:\n")
# print(pmodel.nw)
# print("nd:\n")
# print(pmodel.nd)
# print("nwsum:\n")
# print(pmodel.nwsum)
# print("ndsum:\n")
# print(pmodel.ndsum)
# print("z:\n")
# print(pmodel.z)
# pmodel.load_model('testmodel')
# print(pmodel.z)
# pmodel.save_model_tassign('test_data/testmodel.tassign')
# pmodel.save_model_theta('test_data/testmodel.theta')
# pmodel.save_model_phi('test_data/testmodel.phi')
# pmodel.save_model_twords('test_data/testmodel.twords')
# pmodel.save_model_others('test_data/testmodel.others')
# pmodel.save_model('testmodel')
# 包括需要load_model的 init_estc,init_inf
# argv = ['-inf', '-alpha', '0.5', '-beta', '0.1', '-ntopics', '10', '-niters',
# '1000', '-savestep', '100', '-twords', '20', '-dfile', 'dfile', '-dir', 'test_data/',
# '-model', 'testmodel']
# pmodel = Model()
# pmodel.init(len(argv),argv)
# pmodel.save_inf_model('test_inf_model')
# pmodel.save_inf_model_tassign('test_data/test_inf_model.tassign')
# pmodel.save_inf_model_newtheta('test_data/test_inf_model.theta')
# pmodel.save_inf_model_newphi('test_data/test_inf_model.phi')
# pmodel.save_inf_model_twords('test_data/test_inf_model.twords')
# pmodel.estimate()
# print(pmodel.z) | [
"472530484@qq.com"
] | 472530484@qq.com |
2e0fcab52725520f63a59093e6c11153b64ff890 | 2b20f132cbcb6f14f13ef3412302fb6926a506d9 | /metadata_analysis/plot_metadata_labels.py | 0e60a62ad3ce2104fa2041aa6fb7000fc15defc0 | [
"Apache-2.0"
] | permissive | nfrumkin/forecast-prometheus | a200a140cf7705fcb8a9acf4f7a1c3f13e679c75 | fae241ec4303992ed06df67cbbd8118622e9750b | refs/heads/master | 2020-03-26T09:18:37.156900 | 2018-08-23T20:33:00 | 2018-08-23T20:33:00 | 144,744,292 | 119 | 26 | null | null | null | null | UTF-8 | Python | false | false | 1,230 | py | import json
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.dates as dt
import re
import string
import random
import numpy as np
import bz2
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages('label_hists2.pdf')
import os
label = "instance"
folder = "kubelet_docker_operations_latency_microseconds/"
files = os.listdir(folder)
jsons = []
inc = 0
print(len(files))
md = []
for file in files:
inc += 1
print(inc)
filen = folder + file
try:
f = bz2.BZ2File(filen, 'rb')
jsonFile = json.load(f)
f.close()
except IsADirectoryError:
continue
for pkt in jsonFile:
metadata = pkt["metric"]
del metadata["__name__"]
md.append(metadata)
lbls = {}
for i in range(0, len(md)):
for key in md[i].keys():
if key in lbls.keys():
lbls[key].append(md[i][key])
else:
lbls[key] = [md[i][key]]
for key in lbls.keys():
vals = lbls[key]
plt.figure(figsize=(10,5))
plt.hist(vals)
#plt.gcf().autofmt_xdate()
#plt.legend(lbl)
plt.title(key)
plt.xlabel("Label Value")
plt.ylabel("Count")
plt.savefig(pp, format='pdf')
plt.close()
pp.close() | [
"nfrumkin@redhat.com"
] | nfrumkin@redhat.com |
58bbcb0b913a6f6d65e9cc3f765cf80b1e6d8d8d | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/fabric/rssnmppol.py | 4738ca78412d8a6382b312d0f46b6ee434811e5c | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 8,086 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RsSnmpPol(Mo):
"""
A source relation to the SNMP policy.
"""
meta = NamedSourceRelationMeta("cobra.model.fabric.RsSnmpPol", "cobra.model.snmp.Pol")
meta.targetNameProps["name"] = "tnSnmpPolName"
meta.cardinality = SourceRelationMeta.N_TO_ONE
meta.moClassName = "fabricRsSnmpPol"
meta.rnFormat = "rssnmpPol"
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "SNMP Policy"
meta.writeAccessMask = 0x8e700000001
meta.readAccessMask = 0x8e700000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.parentClasses.add("cobra.model.fabric.PodPGrp")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.reln.To")
meta.superClasses.add("cobra.model.pol.NToRef")
meta.rnPrefixes = [
('rssnmpPol', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 13999, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 11558, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4571
prop.defaultValueStr = "snmpPol"
prop._addConstant("snmpPol", None, 4571)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tContextDn", "tContextDn", 4990, PropCategory.REGULAR)
prop.label = "Target-context"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tContextDn", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tRn", "tRn", 4989, PropCategory.REGULAR)
prop.label = "Target-rn"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("tRn", prop)
prop = PropMeta("str", "tType", "tType", 4988, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "name"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
prop = PropMeta("str", "tnSnmpPolName", "tnSnmpPolName", 11557, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("tnSnmpPolName", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"collinsctk@qytang.com"
] | collinsctk@qytang.com |
8fd2c926a6858bc0b84c062a635d6b2b6be7ab79 | 8595ca431e2f367ed84b5d0ec1a62786b5c72fbd | /6.1/main.py | 09c87a7d090fd9b57d24c695d3f99f7e563c6589 | [] | no_license | bleungwpg/PythonNeoPixelTutorial6 | 80d5bbab736ed8266e1cde1d696174ae4584e2cd | cfcbbb4e795a35794294771e1ba8e3d4e0ccafe4 | refs/heads/master | 2020-03-30T00:03:57.910367 | 2018-11-23T02:55:24 | 2018-11-23T02:55:24 | 150,503,664 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,774 | py | import globalvariables
import time
import board
import neopixel
pixpin = board.A3
numpix = 128
strip = neopixel.NeoPixel(pixpin, numpix, brightness=0.1, auto_write=False)
def showMessage1():
# reset previous colors
strip[96] = (0,0,0)
strip[103] = (0,0,0)
# show color
strip[0] = (255,0,0)
strip[7] = (255,0,0)
strip.write()
time.sleep(1)
# switch to next message
globalvariables.messageID = 2
def showMessage2(nextMessage):
# reset previous colors
strip[0] = (0,0,0)
strip[7] = (0,0,0)
# show color
strip[32] = (0,255,0)
strip[39] = (0,255,0)
strip.write()
time.sleep(2)
# switch to next message
globalvariables.messageID = nextMessage
def showMessage3(duration):
# reset previous colors
strip[32] = (0,0,0)
strip[39] = (0,0,0)
# show color
strip[64] = (255,50,200)
strip[71] = (255,50,200)
strip.write()
time.sleep(duration)
# switch to next message
globalvariables.messageID = 4
def showMessage4(nextMessage,duration):
# reset previous colors
strip[64] = (0,0,0)
strip[71] = (0,0,0)
# show color
strip[96] = (0,0,255)
strip[103] = (0,0,255)
strip.write()
time.sleep(duration)
# switch to next message
globalvariables.messageID = nextMessage
while True:
if globalvariables.messageID == 1:
# go to showMessage1()
showMessage1()
elif globalvariables.messageID == 2:
# go to showMessage2(nextMessage)
showMessage2(3)
elif globalvariables.messageID == 3:
# go to showMessage3(messageDuration)
showMessage3(1)
elif globalvariables.messageID == 4:
# go to showMessage4(nextMessage,messageDuration)
showMessage4(1,2) | [
"noreply@github.com"
] | noreply@github.com |
1e4126b28d998826ae65e2f07cc731f5c4d9a1ea | 994bf7ff0c5ea352fdd8e6ce4f87e915e2060ccd | /django-env/lib/python3.7/copy.py | d2e972d2a990fd7a0d877d0fd6a01f3bb999edd0 | [] | no_license | MhmdNadyMhmd/django-app | 7f4621dcfc2e6f083c3f3bae123f0f37590570ec | 29a4fc8b48a7640b99ad97d364b1c38e80535bcc | refs/heads/master | 2020-04-18T07:32:40.308180 | 2019-02-04T08:46:08 | 2019-02-04T08:46:08 | 167,356,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | /home/mhmd/anaconda3/lib/python3.7/copy.py | [
"mohamed.nady.m.ali@gmail.com"
] | mohamed.nady.m.ali@gmail.com |
b065a739c64edc6b675bde1ed0ef25e4d06c8f1a | 934fe21f1cbbed6774b8bfb7f4ea198d06006a99 | /blog/migrations/0001_initial.py | 81232eb6c02fdcbdf098ba57fb8597de173cf63d | [] | no_license | malbori/my-first-blog | fe28ba67a7aaad3ec72938009e9d241e3b1a275a | ee7c4299aa420ba8f857b552b25e5e2ffdd655dc | refs/heads/master | 2021-05-30T23:00:33.181692 | 2016-03-02T23:51:16 | 2016-03-02T23:51:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-02 23:27
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"mahdialbori@gmail.com"
] | mahdialbori@gmail.com |
cb523c55fea4d9e3cf0e84a94f535a77406e83fc | 848511d0a5e4d2f71fe0fdfc799a132d7eddde28 | /1.introduction/write_a_function.py | 34ccb1106d53367f7c7aceae3ed778d711388746 | [] | no_license | KrishnaRaam-HackerRank/Python | dfda4882ef0143e203b1201cb822185ae86e4a10 | c5603826b0265dca7a7235c36da1925a4973dc76 | refs/heads/master | 2023-02-18T04:07:10.241935 | 2020-12-28T13:51:16 | 2020-12-28T13:51:16 | 281,044,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | import calendar
#https://www.hackerrank.com/challenges/write-a-function/problem
def is_leap(year):
return calendar.isleap(year)
year = int(input())
print(is_leap(year)) | [
"ramakrishna.shastri@target.com"
] | ramakrishna.shastri@target.com |
2dc8ec10581c621ff551465c0b0e09eca7adf210 | bc670588d1d15cf1ec99b8d30c1b80aaa408a76b | /017_command_line_arguments.py | 03da36414c932dafe85c2dd5e5802134bd85d8ab | [] | no_license | msgabor/Python3 | 21e2ef1fbdf1e3e5eba3d5bb8c1e19cda33c3e80 | 7bc7c254596f38dd849dfe16b5f587ca5ec8124e | refs/heads/master | 2020-12-15T16:56:12.451028 | 2020-10-26T19:59:21 | 2020-10-26T19:59:21 | 232,116,859 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py |
# Command Line Interface Creation Kit
# https://click.palletsprojects.com/en/7.x/
# option values can be pulled from environment variables
# on linux sometimes the following encoding setting is necessary:
# LC_ALL=en_US.UTF-8
#
# or in script:
#
# import locale
# locale.setlocale(locale.LC_ALL, 'UTF-8')
import click
@click.command()
@click.option('-c', '--count', default=1, help='Number of greetings.')
@click.option('-n', '--name', prompt='Your name',
help='The person to greet.')
def hello(count, name):
"""Simple program that greets NAME for a total of COUNT times."""
for index in range(count):
print('Hello %s!' % name)
if __name__ == '__main__':
hello()
| [
"noreply@github.com"
] | noreply@github.com |
8be72a52068001cc66bd59da148af82ea5b224a8 | db575f3401a5e25494e30d98ec915158dd7e529b | /BIO_Stocks/PMD.py | f9d9498e20f4a6d77b53ce8653cbb90641628f67 | [] | no_license | andisc/StockWebScraping | b10453295b4b16f065064db6a1e3bbcba0d62bad | 41db75e941cfccaa7043a53b0e23ba6e5daa958a | refs/heads/main | 2023-08-08T01:33:33.495541 | 2023-07-22T21:41:08 | 2023-07-22T21:41:08 | 355,332,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,134 | py | import requests
from lxml import html
from bs4 import BeautifulSoup
import os
from datetime import date, datetime
from ValidationTools import validateday
from Database_Connections import InsertData, Insert_Logging
def main(id_control):
try:
url = 'https://investors.psychemedics.com/sec-filings-and-press-releases/news-releases/default.aspx'
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
result = requests.get(url, headers=headers)
#print(result.content.decode())
html_content = result.content.decode()
soup = BeautifulSoup(html_content, 'html.parser')
#print(soup)
articles = soup.findAll('div', attrs={'class':'irwTableRowItem'})
# get first article
FIRST_ARTICLE = articles[0]
article_date = FIRST_ARTICLE.find('div', attrs={'class':'irwPRDate'})
article_desc = FIRST_ARTICLE.find('h4')
v_article_date = article_date.text.lstrip().rstrip()
#if the process find any article with the today date
istoday, v_art_date = validateday(v_article_date)
if (istoday == True):
v_ticker = os.path.basename(__file__).replace(".py", "")
v_url = article_desc.a.get('href')
v_description = article_desc.text.lstrip().rstrip()
now = datetime.now()
print("URL: " + v_url)
print("DESCRIPTION: " + v_description)
print("ARTICLE_DATE: " + str(now))
# Insert articles
if "https://" in v_url:
InsertData(v_ticker, v_description, v_url, v_art_date)
else:
InsertData(v_ticker, v_description, url, v_art_date)
except Exception:
error_message = "Entrou na excepção ao tratar " + os.path.basename(__file__) + "..."
print(error_message)
Insert_Logging(id_control, 'Detail', error_message)
pass
#InsertData()
if __name__ == "__main__":
main()
| [
"andisc_3@hotmail.com"
] | andisc_3@hotmail.com |
6815c63e54dcd68f5f72a99f5025382635e0a9e8 | f46798fb3efca206e490450b8d85d2b1920abe35 | /vibez.py | e0d2e51f35f81a93c9a55e1ea2f22d38aff48294 | [
"MIT"
] | permissive | Zxayler/VibezPH | 43afe74c4e1d21760296f659286b14c44e970448 | 70b1f7d131897e6939954102d76111f26ef22fa3 | refs/heads/main | 2023-07-18T17:32:18.109777 | 2021-09-06T17:10:08 | 2021-09-06T17:10:08 | 403,698,579 | 0 | 1 | MIT | 2021-09-11T21:49:26 | 2021-09-06T16:57:55 | Python | UTF-8 | Python | false | false | 1,019 | py | from discord.ext.commands import Bot, when_mentioned_or
from discord import Intents, Activity, ActivityType, Status
from discord.ext.commands.core import command
import config
from pathlib import Path
class Vibez_PH_2_0(Bot):
def __init__(self):
self.owner_ids = [481374570130046976, 817701164258689054]
super().__init__(command_prefix=when_mentioned_or("/"), intents=Intents.all())
def setup(self):
cogs = [u.stem for u in Path(".").glob("./cogs/*.py")]
for cog in cogs:
self.load_extension(f'cogs.{cog}')
def run(self):
self.setup()
super().run(config.token, reconnect=True)
async def on_ready(self):
await self.change_presence(status=Status.online,
activity=Activity(type=ActivityType.watching,
name="Vibez PH"))
print("Bot is ready.")
def main():
bot = Vibez_PH_2_0()
bot.run()
if __name__ == '__main__':
main() | [
"zachbotdavid@gmail.com"
] | zachbotdavid@gmail.com |
a41fbaec0c7870b206597745a26e289cb91943e7 | 4c9c2940ef3a07e2756fcceddf01acd384ebde01 | /Python/[5 kyu] emirps.py | 4550d94ea211e128c3446713211ba9db63e83b25 | [
"MIT"
] | permissive | KonstantinosAng/CodeWars | 7d3501a605f7ffecb7f0b761b5ffe414e2f1983a | 157818ece648454e882c171a71b4c81245ab0214 | refs/heads/master | 2023-04-11T09:44:27.480064 | 2023-03-26T21:37:07 | 2023-03-26T21:37:07 | 245,296,762 | 6 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | # see https://www.codewars.com/kata/55a29405bc7d2efaff00007c/train/python
from TestFunction import Test
def is_prime(num):
if num % 2 == 0: return False
for i in range(3, int(num**0.5+1), 2):
if (num % i) == 0:
return False
else:
return True
return False
def is_emrip(num):
s = int(''.join([s for s in reversed(str(num))]))
if s == num: return False
return is_prime(s)
def primes(n):
return [x for x in range(3, n, 2) if is_prime(x)]
def find_emirp(n):
generator = set(primes(10**6))
primes_ = [num for num in generator if num < n]
emrips = [num for num in primes_ if is_emrip(num)]
return [len(emrips), max(emrips) if emrips != [] else 0, sum(emrips)]
test = Test(None)
test.assert_equals(find_emirp(10), [0, 0, 0])
test.assert_equals(find_emirp(50), [4, 37, 98])
test.assert_equals(find_emirp(100), [8, 97, 418])
test.assert_equals(find_emirp(200), [15, 199, 1489])
test.assert_equals(find_emirp(500), [20, 389, 3232])
test.assert_equals(find_emirp(750), [25, 743, 6857])
test.assert_equals(find_emirp(915505), [9278, 915283, 3303565930])
test.assert_equals(find_emirp(530492), [6700, 399941, 1317845448])
| [
"kwstantinos.agelopoulos@outlook.com"
] | kwstantinos.agelopoulos@outlook.com |
a03452b310ca997663bc671a87eac2e20dd047ca | 32896488edb21273742e0add50405eed95f99b08 | /forms.py | 4a03e09386abee00fd7cd806a212c13c1b64823c | [
"BSD-2-Clause"
] | permissive | DuGites/django-simple-registration | 4bd0510e00d3e975fd1a8717bf84ce1d91ca7d53 | 9089e136270f51358c6c4eeeefed97e4a7c37eb0 | refs/heads/master | 2021-09-22T02:32:12.165751 | 2011-01-17T18:29:06 | 2011-01-17T18:29:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,725 | py | """
Forms and validation code for user registration. From django-registration.
Added SARRegistration
"""
from django.contrib.auth.models import User
from django import forms
from django.utils.translation import ugettext_lazy as _
from hashlib import md5
import random
# I put this on all required fields, because it's easier to pick up
# on them with CSS or JavaScript if they have a class of "required"
# in the HTML. Your mileage may vary. If/when Django ticket #3515
# lands in trunk, this will no longer be necessary.
attrs_dict = { 'class': 'required' }
class SARRegistrationForm(forms.Form):
first_name = forms.CharField(label=_("First Name"))
last_name = forms.CharField(label=_("Last Name"))
email = forms.EmailField(widget=forms.TextInput(attrs=dict(attrs_dict,maxlength=75)),
label=_("Email address"))
email_again = forms.EmailField(widget=forms.TextInput(attrs=dict(attrs_dict,maxlength=75)), label=_("Confirm Email address"))
password1 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password"))
password2 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password (again)"))
tos = forms.BooleanField(widget=forms.CheckboxInput(attrs=attrs_dict),
label=_(u'I have read and agree to the Terms of Service'),
error_messages={ 'required': _("You must agree to the terms to register") })
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_("This email address is already in use. Please supply a different email address."))
return self.cleaned_data['email']
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
success = False
while True:
self.cleaned_data['username'] = str(md5(str(self.data['email']) + str(random.random())).hexdigest())[0:30]
try:
user = User.objects.get(username__iexact=self.cleaned_data['username'])
except User.DoesNotExist:
success = True
break
if not success:
raise forms.ValidationError(_("A user with that username already exists."))
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_("The two password fields didn't match."))
if 'email' in self.cleaned_data and 'email_again' in self.cleaned_data:
if self.cleaned_data['email'] != self.cleaned_data['email_again']:
raise forms.ValidationError(_("The two email fields didn't match"))
return self.cleaned_data
class RegistrationForm(forms.Form):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should avoid defining a ``save()`` method -- the actual
saving of collected user data is delegated to the active
registration backend.
"""
username = forms.RegexField(regex=r'^\w+$',
max_length=30,
widget=forms.TextInput(attrs=attrs_dict),
label=_("Username"),
error_messages={ 'invalid': _("This value must contain only letters, numbers and underscores.") })
email = forms.EmailField(widget=forms.TextInput(attrs=dict(attrs_dict,
maxlength=75)),
label=_("Email address"))
password1 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password"))
password2 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password (again)"))
def clean_username(self):
"""
Validate that the username is alphanumeric and is not already
in use.
"""
try:
user = User.objects.get(username__iexact=self.cleaned_data['username'])
except User.DoesNotExist:
return self.cleaned_data['username']
raise forms.ValidationError(_("A user with that username already exists."))
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_("The two password fields didn't match."))
return self.cleaned_data
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput(attrs=attrs_dict),
label=_(u'I have read and agree to the Terms of Service'),
error_messages={ 'required': _("You must agree to the terms to register") })
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_("This email address is already in use. Please supply a different email address."))
return self.cleaned_data['email']
class RegistrationFormNoFreeEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which disallows registration with
email addresses from popular free webmail services; moderately
useful for preventing automated spam registrations.
To change the list of banned domains, subclass this form and
override the attribute ``bad_domains``.
"""
bad_domains = ['aim.com', 'aol.com', 'email.com', 'gmail.com',
'googlemail.com', 'hotmail.com', 'hushmail.com',
'msn.com', 'mail.ru', 'mailinator.com', 'live.com',
'yahoo.com']
def clean_email(self):
"""
Check the supplied email address against a list of known free
webmail domains.
"""
email_domain = self.cleaned_data['email'].split('@')[1]
if email_domain in self.bad_domains:
raise forms.ValidationError(_("Registration using free email addresses is prohibited. Please supply a different email address."))
return self.cleaned_data['email']
| [
"ted.tieken@gmail.com"
] | ted.tieken@gmail.com |
4b1eb8934099b1c7799cf7e46a9df1e9b543d215 | 7dde613c3c074ca2fbacdf4ca934a34f60d7fcba | /wework/source/add_depart_page.py | ca7ce95c2d31aab4704d99b9189141e45f8e9ff5 | [] | no_license | hedyzhouhd/hw | b0b4771866b1e162bb4a6c7ee6da5ddd459c2c39 | f2af7b7b02c0e73eb9a0c8e377c8222903586c76 | refs/heads/master | 2023-04-24T10:33:42.173717 | 2021-04-22T08:54:18 | 2021-04-22T08:54:18 | 323,369,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,947 | py | from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from wework.source.base_page import BasePage
import time
class AddDepartPage(BasePage):
_depart_input_locator = (By.CSS_SELECTOR, ".inputDlg_item input")
_submit_locator = (By.LINK_TEXT, '确定')
# def add_sub_department(self, depart_name):
# """
# 新增部门
# :return:新增成功后返回通讯录页面
# """
# from wework.source.contact_page import ContactPage
# self.wait_for_visible(self._depart_input_locator)
# self.driver.find_element(*self._depart_input_locator).send_keys(depart_name)
# self.driver.find_element(*self._submit_locator).click()
# return ContactPage(self.driver)
def add_department(self, depart_name, parent_depart_name):
"""
左侧菜单+新增部门(没有处理菜单多层级的问题)
:param depart_name: 新增部门名称
:param parent_depart_name: 新增部门所属部门
:return:新增成功后返回通讯录页
"""
from wework.source.contact_page import ContactPage
self.driver.find_element(*self._depart_input_locator).send_keys(depart_name)
self.wait_and_click((By.CLASS_NAME, "js_parent_party_name"))
parent_locator = (By.XPATH, f"//*[@class='member_tag_dialog_inputDlg']//*[text()='{parent_depart_name}']")
print(parent_locator)
# self.wait_and_click(parent_locator)
el = self.driver.find_element(*parent_locator)
self.driver.execute_script("arguments[0].click();", el)
# self.driver.find_element(*self._submit_locator).click()
# r
# locator = (By.LINK_TEXT, f'{parent_depart_name}')
# els = self.driver.find_elements(*locator)
# print(len(els))
| [
"2862514060@qq.com"
] | 2862514060@qq.com |
018b2906e7a41541d957764ddd1c47e355d03386 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2464487_0/Python/CuteCube/ra1.py | dbc146df38875aae8ae187eac50411365e303fb4 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | #!/usr/bin/env python
import math
def main():
f = open('input.txt', 'r')
total_T = int(f.readline())
#print total_T
for T in xrange(1,total_T+1):
r,t = f.readline().split()
r = long(r)
t=long(t)
# 2k^2 + (2r - 1)k - t = 0
b = 2*r -1.0
a = 2.0
c = -t
k = (-b + math.sqrt(b*b - 4*a*c))/2/a
# k = 1
k = long(k)
while not (need(k ,r) <= t and need(k+1, r) > t):
if need(k, r) < t:
k += 1
else:
#k = max(long(k/2)+1, long(k*0.75))
k -= 1
print "Case #{}: {}".format(T, long(k))
def need(k,r):
return 2*k*k + (2*r-1)*k
if __name__ == '__main__':
main() | [
"eewestman@gmail.com"
] | eewestman@gmail.com |
4a7f9b779862e39bed7fde83a238b96e4b69f2f1 | fe4c3905ec0e2d8fa5100454c49a863bda3d05ab | /Code/Mantid/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectResolution.py | 3fe3e42c49c3011afbab8d24a9adf8e2cf6fcb2b | [] | no_license | mkoennecke/mantid | 11f16fe573056d70c119c4d6fb6984b7008cb8e6 | c0a8e5d97cde6cc28abb8c7b1b5c056986a81fec | refs/heads/master | 2021-01-18T11:51:28.997458 | 2015-02-13T10:48:51 | 2015-02-13T10:48:51 | 11,472,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,587 | py | from mantid.simpleapi import *
from mantid.api import *
from mantid.kernel import *
from mantid import config, logger
class IndirectResolution(DataProcessorAlgorithm):
def category(self):
return 'Workflow\\Inelastic;PythonAlgorithms;Inelastic'
def summary(self):
return 'Creates a resolution workspace'
def PyInit(self):
self.declareProperty(StringArrayProperty(name='InputFiles'),
doc='Comma seperated list if input files')
self.declareProperty(WorkspaceProperty('OutputWorkspace', '',
optional=PropertyMode.Optional,
direction=Direction.Output),
doc='Output resolution workspace (if left blank a name will be gernerated automatically)')
self.declareProperty(name='Instrument', defaultValue='',
validator=StringListValidator(['IRIS', 'OSIRIS', 'TOSCA']),
doc='Instrument used during run')
self.declareProperty(name='Analyser', defaultValue='',
validator=StringListValidator(['graphite', 'mica', 'fmica']),
doc='Analyser used during run')
self.declareProperty(name='Reflection', defaultValue='',
validator=StringListValidator(['002', '004', '006']),
doc='Reflection used during run')
self.declareProperty(IntArrayProperty(name='DetectorRange', values=[0, 1]),
doc='Range of detetcors to use in resolution calculation')
self.declareProperty(FloatArrayProperty(name='BackgroundRange', values=[0.0, 0.0]),
doc='Energy range to use as background')
self.declareProperty(name='RebinParam', defaultValue='', doc='Rebinning parameters (min,width,max)')
self.declareProperty(name='ScaleFactor', defaultValue=1.0, doc='Factor to scale resolution curve by')
self.declareProperty(name='Smooth', defaultValue=False, doc='Apply WienerSmooth to resolution')
self.declareProperty(name='Plot', defaultValue=False, doc='Plot resolution curve')
self.declareProperty(name='Save', defaultValue=False, doc='Save resolution workspace as a Nexus file')
def PyExec(self):
from IndirectCommon import StartTime, EndTime, getWSprefix
import inelastic_indirect_reducer
StartTime('IndirectResolution')
self._setup()
InelasticIndirectReduction(Instrument=self._instrument,
Analyser=self._analyser,
Reflection=self._reflection,
Grouping='All',
SumFiles=True,
InputFiles=self._input_files,
DetectorRange=self._detector_range,
OutputWorkspace='__icon_ws_group')
icon_ws = mtd['__icon_ws_group'].getItem(0).getName()
if self._out_ws == "":
self._out_ws = getWSprefix(icon_ws) + 'res'
if self._scale_factor != 1.0:
Scale(InputWorkspace=icon_ws, OutputWorkspace=icon_ws, Factor=self._scale_factor)
CalculateFlatBackground(InputWorkspace=icon_ws, OutputWorkspace=self._out_ws,
StartX=self._background[0], EndX=self._background[1],
Mode='Mean', OutputMode='Subtract Background')
Rebin(InputWorkspace=self._out_ws, OutputWorkspace=self._out_ws, Params=self._rebin_string)
if self._smooth:
WienerSmooth(InputWorkspace=self._out_ws, OutputWorkspace='__smooth_temp')
CopyLogs(InputWorkspace=self._out_ws, OutputWorkspace='__smooth_temp')
RenameWorkspace(InputWorkspace='__smooth_temp', OutputWorkspace=self._out_ws)
self._post_process()
self.setProperty('OutputWorkspace', self._out_ws)
EndTime('IndirectResolution')
def _setup(self):
"""
Gets algorithm properties.
"""
self._input_files = self.getProperty('InputFiles').value
self._out_ws = self.getPropertyValue('OutputWorkspace')
self._instrument = self.getProperty('Instrument').value
self._analyser = self.getProperty('Analyser').value
self._reflection = self.getProperty('Reflection').value
self._detector_range = self.getProperty('DetectorRange').value
self._background = self.getProperty('BackgroundRange').value
self._rebin_string = self.getProperty('RebinParam').value
self._scale_factor = self.getProperty('ScaleFactor').value
self._smooth = self.getProperty('Smooth').value
self._plot = self.getProperty('Plot').value
self._save = self.getProperty('Save').value
def _post_process(self):
"""
Handles adding logs, saving and plotting.
"""
use_scale_factor = self._scale_factor == 1.0
AddSampleLog(Workspace=self._out_ws, LogName='scale', LogType='String', LogText=str(use_scale_factor))
if use_scale_factor:
AddSampleLog(Workspace=self._out_ws, LogName='scale_factor', LogType='Number', LogText=str(self._scale_factor))
AddSampleLog(Workspace=self._out_ws, LogName='res_smoothing_applied', LogType='String', LogText=str(self._smooth))
AddSampleLog(Workspace=self._out_ws, LogName='back_start', LogType='Number', LogText=str(self._background[0]))
AddSampleLog(Workspace=self._out_ws, LogName='back_end', LogType='Number', LogText=str(self._background[1]))
rebin_params = self._rebin_string.split(',')
if len(rebin_params) == 3:
AddSampleLog(Workspace=self._out_ws, LogName='rebin_low', LogType='Number', LogText=rebin_params[0])
AddSampleLog(Workspace=self._out_ws, LogName='rebin_width', LogType='Number', LogText=rebin_params[1])
AddSampleLog(Workspace=self._out_ws, LogName='rebin_high', LogType='Number', LogText=rebin_params[2])
self.setProperty('OutputWorkspace', self._out_ws)
if self._save:
logger.information("Resolution file saved to default save directory.")
SaveNexusProcessed(InputWorkspace=self._out_ws, Filename=self._out_ws + '.nxs')
if self._plot:
from IndirectImport import import_mantidplot
mtd_plot = import_mantidplot()
mtd_plot.plotSpectrum(self._out_ws, 0)
AlgorithmFactory.subscribe(IndirectResolution)
| [
"dan@dan-nixon.com"
] | dan@dan-nixon.com |
27a755114e9428830bf580c087c5298f47d7fec3 | 265392e81827a0489286c499afe85fac2f2eb664 | /blogs/views.py | ed0c470dc3aebb1fe30f9b8aa7b919dbb9281967 | [] | no_license | Girishiam/portfolio_website | 978bb75d1b9c53eb6aa7f02f91b6d8eabcd23d29 | 4d87007fee5a040ed28b5821bde8834c299db4c0 | refs/heads/main | 2023-03-01T17:30:52.332034 | 2021-02-17T15:53:43 | 2021-02-17T15:53:43 | 338,725,782 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | from django.shortcuts import render , get_object_or_404
from .models import Blogging
# Create your views here.
def blogs(request):
blogs = Blogging.objects.order_by('-date')
return render(request, 'blogs.html' ,{'blogs':blogs})
def details(request , blog_id):
blog = get_object_or_404(Blogging,pk=blog_id)
return render (request, 'details.html', {'blog':blog}) | [
"girishmondal.28@gmail.com"
] | girishmondal.28@gmail.com |
9a26f041c16cbcec21210e5a86090f2c5e54013c | 13637748bbbed49f02ce7bcd87458f4c5d475776 | /other/systemd/monitorservice.py | 1c3ffd84a737ddbb45fb1141ca317ef553077958 | [
"MIT"
] | permissive | fizprof/solarthing | 7a29880c0c808a0604315e8efc9784bccea91a59 | 47c0a145f2e7c38e9aca03abdb4276a988df798b | refs/heads/master | 2023-07-11T13:44:22.977847 | 2021-07-31T04:14:23 | 2021-07-31T04:14:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,086 | py | #!/usr/bin/env python3
import sys
from typing import List
import subprocess
import time
from pathlib import Path
import json
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
import traceback
"""
sudo python3 -m pip install slack_sdk
"""
class SlackSender:
def __init__(self, prefix: str, suffix: str, slack_bot_token: str, slack_channel: str):
self.prefix = prefix
self.suffix = suffix
self.slack_channel = slack_channel
self.web_client = WebClient(token=slack_bot_token)
def send(self, message: str):
message = self.prefix + message + self.suffix
try:
self.web_client.chat_postMessage(channel=self.slack_channel, text=message)
except SlackApiError as e:
traceback.print_exc()
class ServiceMonitor:
def __init__(self, service_name: str, slack: SlackSender):
self.service_name = service_name
self.slack = slack
self.was_running = None
self.last_start_message = None
def update(self):
running = is_running(self.service_name)
start_message = get_start_message(self.service_name)
if self.was_running is None:
self.slack.send(f"Started monitoring {self.service_name}. It is {'' if running else 'not '}running.")
elif running != self.was_running:
if running:
self.slack.send(f"Started {self.service_name}")
else:
self.slack.send(f"Stopped {self.service_name}")
elif self.last_start_message is not None and running and self.last_start_message != start_message:
self.slack.send(f"Restarted {self.service_name}")
self.was_running = running
if running:
self.last_start_message = start_message
else:
self.last_start_message = None
def is_running(service_name):
status = subprocess.call(["systemctl", "is-active", "--quiet", service_name])
return status == 0
def get_start_message(service_name):
return subprocess.check_output(["systemctl", "show", "--property=ActiveEnterTimestamp", service_name])
def monitor(service_names: List[str], slack: SlackSender):
services: List[ServiceMonitor] = [ServiceMonitor(name, slack) for name in service_names]
while True:
for service in services:
service.update()
time.sleep(0.3)
def main(args: List[str]):
with Path("config.json").open() as file:
config = json.load(file)
service_names = config["service_names"]
try:
prefix = config["prefix"] + " "
except KeyError:
prefix = ""
try:
suffix = " " + config["suffix"]
except KeyError:
suffix = ""
slack_bot_token = config["slack_bot_token"] # xoxb-***
slack_channel = config["slack_channel"]
slack = SlackSender(prefix, suffix, slack_bot_token, slack_channel)
try:
monitor(service_names, slack)
except KeyboardInterrupt:
slack.send(f"Stopped monitoring {service_name}")
if __name__ == '__main__':
main(sys.argv[1:])
| [
"retrodaredevil@gmail.com"
] | retrodaredevil@gmail.com |
eebec9728ea5e06110a3783a4e01267d3f1990d4 | 380562de8f7d7a88c2a9ed286c4bbd40df291e32 | /get_first.py | ba7618592a8847f495a07c34631dcfa1a1fe1469 | [] | no_license | HarryTheHB/MachineLearning_MLP | 866da690a181e6e7357ee7b7fcf9140367d6513b | 90aaca58d38871585804b016ce5110626f154a9a | refs/heads/master | 2021-01-06T20:46:51.245888 | 2014-12-08T17:13:19 | 2014-12-08T17:13:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | import argparse
parser = argparse.ArgumentParser(description='Process data')
parser.add_argument('-i', '--input', help='input file name', required=True)
parser.add_argument('-o', '--output', help='output file name', required=True)
args = parser.parse_args()
fr = open(args.input, 'r')
fw = open(args.output, 'w')
lines = fr.read().strip().splitlines()
for t in lines:
words = t.split(', ')
fw.write(words[0]+'\n')
| [
"daiyang58@hotmail.com"
] | daiyang58@hotmail.com |
36675db792eaa04c9b5c9732126b47ebda3a154f | 43cdd7cb26fe44b1ed7de6a46f8b5e680c9b1372 | /openpeerpower/generated/config_flows.py | 244c7e0f950d8f44b848d46e9680ed38ab8aaabb | [
"Apache-2.0"
] | permissive | OpenPeerPower/Open-Peer-Power | 02ec5c133564b47c6f72f669e844a666643cacd6 | 940a04a88e8f78e2d010dc912ad6905ae363503c | refs/heads/master | 2022-08-16T09:38:49.994009 | 2021-05-29T03:54:13 | 2021-05-29T03:54:13 | 183,174,237 | 1 | 0 | Apache-2.0 | 2022-07-15T18:43:02 | 2019-04-24T07:35:47 | Python | UTF-8 | Python | false | false | 246 | py | """Automatically generated by oppfest.
To update, run python3 -m script.oppfest
"""
# fmt: off
FLOWS = [
"almond",
"daikin",
"dialogflow",
"homekit_controller",
"met",
"mobile_app",
"mqtt",
"zha",
"zwave"
]
| [
"pcaston@arach.net.au"
] | pcaston@arach.net.au |
38ac0ebefab8c9d3b98a064ad4fb25c4b5357b6d | 2c889d2bde3a6a4c1fc8a0c3d0070779adcfa30b | /p3.py | a8bd0ffd4842f4df6ec610630daa70778987de8d | [] | no_license | rjorth/lab4-key | b5c6c3cca834ba191ea18940160933af594ba8d8 | 67d4a417b6b137a83ee96bc281ab324b5cd8677b | refs/heads/master | 2021-05-15T12:16:13.012732 | 2017-11-01T13:20:52 | 2017-11-01T13:20:52 | 108,420,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,465 | py | import sys
class Node(object):
def __init__(self, v, n):
self.value = v
self.next = n
class LinkedList(object):
def __init__(self):
self.firstLink = None
def add (self, newElement):
self.firstLink = Node(newElement, self.firstLink)
def test(self, testValue):
current = self.firstLink
found = False
while current and not found:
if current.testValue == testValue:
found = True
return True
else:
current = current.next
if not current:
return False
def remove(self, testValue):
if self.testValue == testValue:
self.testValue = self.next.testValue
self.next = self.next.next
return True
if self.next == None:
return False
if self.next.testValue == testValue:
self.next = self.next.next
return True
return remove(self.next,testValue)
def len(self):
temp = self.firstLink
count = 0
while (temp):
count += 1
temp = temp.next
return count
def reverse(self):
if self == None: return
head = self
tail = self.next
reverse(tail)
print head
reverse(object)
def Lprint(self):
node = self.firstLink
while node:
print node.data
node = node.next
| [
"noreply@github.com"
] | noreply@github.com |
1778146de13d49f780aa4ee3134a372e96fa308a | f008fe1057b00d19d7812e0373669763c9992f42 | /python/hpatches_eval.py | ebac75261c116cac7ddb183ec31547f7388ef833 | [
"BSD-2-Clause"
] | permissive | baiyancheng20/hpatches-benchmark | 68a265f5e23f7ceabce06635818b8c6a73c3e578 | 23c3a6dcf92bff7bda492363908cbc52b2533472 | refs/heads/master | 2021-01-20T00:52:08.069991 | 2017-04-22T19:02:21 | 2017-04-22T19:02:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,678 | py | """Evaluation code for the HPatches homography patches dataset.
Usage:
hpatches_eval.py (-h | --help)
hpatches_eval.py --version
hpatches_eval.py --descr-name=<> --task=<>... [--descr-dir=<>] [--split=<>] [--dist=<>] [--delimiter=<>] [--pcapl=<>]
Options:
-h --help Show this screen.
--version Show version.
--descr-name=<> Descriptor name, e.g. sift
--descr-dir=<> Descriptor results root folder [default: ../data/descriptors]
--task=<> Task name. Valid tasks are {verification,matching,retrieval}.
--split=<> Split name. Valid are {a,b,c,full,illum,view} [default: a].
--dist=<> Distance name. Valid are {L1,L2} [default: L2]
--delimiter=<> Delimiter used in the csv files [default: ,]
--pcapl=<> Compute results for pca-power law descr [default: no]
For more visit: https://github.com/hpatches/
"""
from utils.hpatch import *
from utils.tasks import *
from utils.misc import *
from utils.docopt import docopt
import os
import time
import dill
if __name__ == '__main__':
opts = docopt(__doc__, version='HPatches 1.0')
path = os.path.join(opts['--descr-dir'],opts['--descr-name'])
try:
assert os.path.exists(path)
except:
print("%r does not exist." % (path))
exit(0)
if not os.path.exists('results'):
os.makedirs('results')
descr_name = opts['--descr-name']
print('\n>> Running HPatch evaluation for %s' % blue(descr_name))
descr = load_descrs(path,dist=opts['--dist'],sep=opts['--delimiter'])
with open('../tasks/splits/splits.json') as f:
splits = json.load(f)
splt = splits[opts['--split']]
for t in opts['--task']:
if os.path.exists("results/"+descr_name+"_"+t+"_"+splt['name']+".p"):
print("Results for the %s, %s task, split %s, already cached!" %\
(descr_name,t,splt['name']))
else:
res = methods[t](descr,splt)
dill.dump( res, open( "results/"+descr_name+"_"+t+"_"+splt['name']+".p", "wb"))
# do the PCA/power-law evaluation if wanted
if opts['--pcapl']!='no':
print('>> Running evaluation for %s normalisation' % blue("pca/power-law"))
compute_pcapl(descr,splt)
for t in opts['--task']:
if os.path.exists("results/"+descr_name+"_pcapl_"+t+"_"+splt['name']+".p"):
print("Results for the %s, %s task, split %s,PCA/PL already cached!" %\
(descr_name,t,splt['name']))
else:
res = methods[t](descr,splt)
dill.dump( res, open( "results/"+descr_name+"_pcapl_"+t+"_"+splt['name']+".p", "wb"))
| [
"v.balntas@imperial.ac.uk"
] | v.balntas@imperial.ac.uk |
06cb210a650fc39142459a1b18995c9b0a34b4d9 | bbfd441168758ed5fd9801c8330698e2ca3bbaeb | /tf-2-workflow/train_model/train.py | 76bb16faba426c0f0276db7ab0fece03fbab9473 | [
"Apache-2.0"
] | permissive | aws-samples/amazon-sagemaker-script-mode | 66d5041bb35e55eea4efe511c83cd21d4add58db | 54be9ca995bf33d87ccfede258f1c639e07c19fc | refs/heads/master | 2023-08-03T08:49:36.256492 | 2022-03-09T00:18:07 | 2022-03-09T00:18:07 | 169,129,147 | 168 | 103 | Apache-2.0 | 2023-07-21T04:47:36 | 2019-02-04T18:45:15 | Jupyter Notebook | UTF-8 | Python | false | false | 2,459 | py | import argparse
import numpy as np
import os
import tensorflow as tf
from model_def import get_model
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def parse_args():
parser = argparse.ArgumentParser()
# hyperparameters sent by the client are passed as command-line arguments to the script
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--learning_rate', type=float, default=0.1)
# data directories
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))
parser.add_argument('--test', type=str, default=os.environ.get('SM_CHANNEL_TEST'))
# model directory: we will use the default set by SageMaker, /opt/ml/model
parser.add_argument('--model_dir', type=str, default=os.environ.get('SM_MODEL_DIR'))
return parser.parse_known_args()
def get_train_data(train_dir):
x_train = np.load(os.path.join(train_dir, 'x_train.npy'))
y_train = np.load(os.path.join(train_dir, 'y_train.npy'))
print('x train', x_train.shape,'y train', y_train.shape)
return x_train, y_train
def get_test_data(test_dir):
x_test = np.load(os.path.join(test_dir, 'x_test.npy'))
y_test = np.load(os.path.join(test_dir, 'y_test.npy'))
print('x test', x_test.shape,'y test', y_test.shape)
return x_test, y_test
if __name__ == "__main__":
args, _ = parse_args()
print('Training data location: {}'.format(args.train))
print('Test data location: {}'.format(args.test))
x_train, y_train = get_train_data(args.train)
x_test, y_test = get_test_data(args.test)
device = '/cpu:0'
print(device)
batch_size = args.batch_size
epochs = args.epochs
learning_rate = args.learning_rate
print('batch_size = {}, epochs = {}, learning rate = {}'.format(batch_size, epochs, learning_rate))
with tf.device(device):
model = get_model()
optimizer = tf.keras.optimizers.SGD(learning_rate)
model.compile(optimizer=optimizer, loss='mse')
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
validation_data=(x_test, y_test))
# evaluate on test set
scores = model.evaluate(x_test, y_test, batch_size, verbose=2)
print("\nTest MSE :", scores)
# save model
model.save(args.model_dir + '/1')
| [
"rabowsky@amazon.com"
] | rabowsky@amazon.com |
969d308cce5315153f4c62b2ab6b833571aed1c2 | dbb3dfcf57d4a57045f6936da82f7b4ace6221de | /collate.py | d9f5253185d3f4f3ea1850215b7eff393d7ac81a | [
"MIT"
] | permissive | geoquant/JSON-QAnon | d536b52999a8e553d2e89f3ec1cd820e06b70bd8 | 2cd3f0ef311cefeaf74fa1641fbeff6b163ed85a | refs/heads/main | 2023-03-18T17:35:54.539852 | 2021-03-05T06:17:44 | 2021-03-05T06:17:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,413 | py | #!/usr/bin/env python3
import copy
import json
import os
import re
from bs4 import BeautifulSoup
# location of 1.htm, 2.htm, etc.
PAGES_DIRECTORY = 'qposts.online/page'
# when False, trim stray whitepaces from links in posts+refs; see explanation in clean_up_raw_text()
KEEP_ORIGINAL_WHITESPACE = False
def extract_metadata_block(meta_container):
"""
Extracts author + tripcode, source site + board, and link if applicable.
Returns an object of what it finds.
"""
collated_metadata = {}
# extract the span with the name+tripcode in it
author_container = meta_container.find('span', 'name')
# extract the bold/strong text -- i.e. the main name
author = author_container.find('strong').getText()
assert len(author) > 0, 'Author name not found!!'
collated_metadata['author'] = author
# remove the main name, leaving only the tripcode if applicable (and strip l/r whitespace)
author_container.find('strong').decompose()
maybe_tripcode = author_container.getText().strip()
if maybe_tripcode:
collated_metadata['tripcode'] = maybe_tripcode
# extract source board + site block
source_container = meta_container.find('span', 'source')
# extract the bold/strong text -- i.e. the board name
board = source_container.find('strong').getText()
assert len(board) > 0, 'Board name not found!!'
collated_metadata['source'] = {}
collated_metadata['source']['board'] = board
# remove the board name, leaving only the site (and maybe link if applicable)
source_container.find('strong').decompose()
# get thread link if we have it
maybe_thread_link = source_container.find('a')
if maybe_thread_link:
collated_metadata['source']['link'] = maybe_thread_link['href']
maybe_thread_link.decompose()
# we've extracted board name and link if we have it; all that's left is the site
site = source_container.getText().strip()
assert site, 'Site not found!!'
collated_metadata['source']['site'] = site
# attach timestamp
collated_metadata['time'] = int(meta_container.find('span', 'time').getText())
# attach id
collated_metadata['id'] = int(meta_container.find('span', 'num').getText())
return collated_metadata
def extract_images(post_block):
"""
Extracts image filename + uploaded image name for all images in a post/reference.
Returns a list of objects containing filename + uploaded name
"""
images_container = post_block.find('div', 'images', recursive=False)
if not images_container:
return None
# well laid out figs + figcaptions make life easy for images + image names
images = images_container.findAll('figure', recursive=False)
return [{
'file': os.path.split(image.find('a')['href'])[1], # filename on disk
'name': image.find('figcaption').getText() # filename as posted
} for image in images]
def extract_body(post_block, is_ref=False):
"""
Extracts the main body text as plaintext less any referenced divs, images, html tags, etc.
Returns a string; newlines indicated by literal \n.
"""
"""
During body extraction, I decompose a number of elements (including divs, which contain post
references) which basically vaporizes them. Since we need the (post) references later to extract
and python is pass by reference*, we need to duplicate the object.
* if you pull an https://xkcd.com/386/ and say something like "ackchyually in python, object
references are passed by value..." I will find you and smack you
"""
post_block_copy = copy.copy(post_block)
# just attempt to find the main text content; some main posts have a div for this, some
# don't, and no references have it so try/catch
try:
content_div = post_block_copy.find('div', 'text')
if content_div:
post_block_copy = content_div
except AttributeError:
pass
# this is random div noise (unlikely) or a referenced post (almost always); regardless, we don't
# want it/them
divs = post_block_copy.findAll('div')
for div in divs:
div.decompose()
# bs4 thinks these tags need a separator when rendering with get_text(); who knows why...
# Unwrapping them seems to solve it. If any other tags that need to be unwrapped pop up, throw
# them in tags_to_unwrap
tags_to_unwrap = ['abbr', 'em']
for tag_to_unwrap in tags_to_unwrap:
instances_to_unwrap = post_block_copy.findAll(tag_to_unwrap)
for instance_to_unwrap in instances_to_unwrap:
instance_to_unwrap.unwrap()
# Get your pitchforks ready. I don't know why bs4 behaves this way but for some reason it's
# throwing separators where there shouldn't be after unwrapping the abbrs but extracting and
# reparsing seems to fix it. I hate it; I don't understand it; it works; it stays.
post_block_copy_duplicate = BeautifulSoup(str(post_block_copy), 'html.parser')
raw_post_text = post_block_copy_duplicate.get_text(separator="\n")
return clean_up_raw_text(raw_post_text)
def extract_references(post_block):
"""
Extracts the referenced posts from the main post block and returns a list of posts, which always
contains the text that referred to it in the original post (e.g. >>123456) and can contain image
objects + text objects.
Returns a list of post objects.
"""
refs = post_block.findAll('div', 'op')
if not refs:
return None
collated_refs = []
for ref in refs:
collated_ref = {}
# the referring text is always the immediately previous sibling of the reference
collated_ref['reference'] = ref.previous_sibling.getText()
# extract reference text if we have it
maybe_text = extract_body(ref, is_ref=True)
if maybe_text:
collated_ref['text'] = clean_up_raw_text(maybe_text)
# extract the reference's image if we have any
maybe_images = extract_images(ref)
if maybe_images:
collated_ref['images'] = maybe_images
collated_refs.append(collated_ref)
return collated_refs
def clean_up_emails(post):
"""
This a dumb way to handle this but the post site uses a server-side email protection script (I
guess for anti-spam) and it's a little overzealous (note this does not show up in the original
Q posts; these are an artifact introduced by the current host I'm scraping from). Thankfully,
usage is minimal so I just wrote a function to slot them in from the known list. If
significantly more posts are added that trip the protection system or it changes (or the
timestamps are changed but I assume those to be immutable) this will need additional TLC.
"""
if post['post_metadata']['time'] == 1526767434:
post['post_metadata']['author'] = 'NowC@mesTHEP@in—-23!!!'
# Q sure liked this link; three separate posts using it
if post['post_metadata']['time'] in [1588693786, 1585242439, 1553795409]:
post['text'] = post['text'].replace('email\xa0protected]',
'https://uscode.house.gov/view.xhtml?path=/prelim@title'
'18/part1/chapter115&edition=prelim')
return post
def clean_up_raw_text(text):
"""
This corrects some minor oddities in spacing/link text. These show up in the original posts
(as far as I can tell) so removing them technically changes the content of original or
referenced posts. If this is an issue, set KEEP_ORIGINAL_WHITESPACE to True and this will be
short-circuited.
"""
if KEEP_ORIGINAL_WHITESPACE:
return text
# eliminate spaces after http://
http_whitespace_regex = re.compile(r"http:\/\/\s+")
text = http_whitespace_regex.sub('http://', text)
# eliminate spaces after https://
https_whitespace_regex = re.compile(r"https:\/\/\s+")
text = https_whitespace_regex.sub('https://', text)
# tuples of find/replace for known bad URLs
misc_spaced_url_corrections = [
('twitter. com', 'twitter.com'),
('theguardian. com', 'theguardian.com'),
]
for search, replacement in misc_spaced_url_corrections:
text = text.replace(search, replacement)
return text
collected_posts = []
# loop through all html files in the directory to be scanned
for entry in os.scandir(PAGES_DIRECTORY):
# # helpful for debugging -- skip all files but this one
# if entry.name != '1.html':
# continue
# parse the page html
soup = BeautifulSoup(open(entry.path), 'html.parser')
# extract all posts
posts = soup.findAll('div', {'class': 'post', 'data-timestamp': True})
for post in posts:
collated_post = {}
# yank metadata
meta_container = post.find('div', 'meta')
collated_post['post_metadata'] = extract_metadata_block(meta_container)
# # helpful for debugging -- append src file to metadata
# collated_post['post_metadata']['filename'] = entry.name
# # helpful for debugging -- skip all posts but this ID
# # requires scrape_metadata to be appended above
# if collated_post['post_metadata']['id'] != 4939:
# continue
# break out main meat of the post for easier manipulation
post_body = post.find('div', 'message')
# yank images
extracted_images = extract_images(post_body)
if extracted_images:
collated_post['images'] = extracted_images
# yank main post text
extracted_body = extract_body(post_body)
if extracted_body:
collated_post['text'] = extracted_body
# yank referenced posts
referenced_posts = extract_references(post_body)
if referenced_posts:
collated_post['referenced_posts'] = referenced_posts
# clean up emails -- see func comment; this is maximum clowntown
collated_post = clean_up_emails(collated_post)
# attach to big list
collected_posts.append(collated_post)
# sort by date asc
collected_posts.sort(key=lambda post: post['post_metadata']['time'])
# pretty print and dump it
# if you're desperate, removing indent=2 shaves a half meg off
with open('posts.json', 'w') as outfile:
json.dump(collected_posts, outfile, indent=2, ensure_ascii=False)
| [
"jack.kingsman@gmail.com"
] | jack.kingsman@gmail.com |
ad0d7fcc46084f9cf006198340d01c1534e41179 | d64893992dee08c1c84111fc700c267339230743 | /cctv.py | 0fd3b5e558879c84263913e5e07432c7acd989ed | [] | no_license | amredarpan11/SGP-2021 | fe76e677f1b1cf372efafa0662c5168180878a34 | d5a422e2011ae6ca5e3e05d310dad337bdd7bf2b | refs/heads/main | 2023-04-17T04:43:57.682365 | 2021-04-29T05:22:07 | 2021-04-29T05:22:07 | 349,281,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,625 | py | # coding=utf-8
import cv2
import time
def minimizeWindow():
import win32gui,win32con
window = win32gui.GetForegroundWindow()
win32gui.ShowWindow(window,win32con.SW_MINIMIZE)
def cctv():
video = cv2.VideoCapture(0)
video.set(3,640)
video.set(4,480)
width = video.get(3)
height = video.get(4)
print("resolution is set to:" ,width,' x ', height)
print("\n1.press esc to exit.\n2 press m for minimize.")
fourcc = cv2.VideoWriter_fourcc(*'XVID')
date_time = time.strftime("recording %H-%M-%d -%m -%y")
output = cv2.VideoWriter('footages/'+date_time+'.mp4',fourcc,20.0,(640,480))
while video.isOpened() :
check,frame = video.read()
if check == True:
frame = cv2.flip(frame,1)
t = time.ctime()
cv2.rectangle(frame,(5,5,100,20),(255,255,255),cv2.FILLED)
cv2.putText(frame,"camera 1",(20,20),cv2.FONT_HERSHEY_DUPLEX,0.5,(5,5,5),1)
cv2.imshow("CCTV CAMERA",frame)
output.write(frame)
if cv2.waitKey(1) == 27:
print("Footage saved in system")
break
elif cv2.waitKey(1) == ord('m'):
minimizeWindow()
else:
print("cannot open the camera")
break
video.release()
output.release()
cv2.destroyAllWindows()
print("*"*80+"\n"+" "*30+"Welcome to the Smart cctv\n"+"*"*80)
ask = int(input("Do you want to open the smart cctv?\n1. Yes\n2. No\n>>> "))
if ask == 1:
cctv()
elif ask == 2:
print("Thanks for visiting ")
exit()
| [
"noreply@github.com"
] | noreply@github.com |
1685d2a9cf7e5dc726fffb430a61ba17869e53f8 | 4cce3b466591f7f8b9d58c1f8cae4dd0b6425b09 | /classes dealing.py | 09e2606008d31426022cdef988fb9cec1726491e | [] | no_license | adityamangal1/hackerRank-solutions | 4e5fc66785215688449f58176b0260e05fb0c404 | 102ee32f5984240939bf14e799a458d99388774b | refs/heads/master | 2023-04-18T15:35:36.998087 | 2021-04-22T07:16:38 | 2021-04-22T07:16:38 | 297,935,486 | 13 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | import math
class Complex(object):
def __init__(self, real, imaginary):
self.real = real
self.imaginary = imaginary
def __add__(self, no):
complex_n = complex(self.real, self.imaginary) + \
complex(no.real, no.imaginary)
return Complex(complex_n.real, complex_n.imag)
def __sub__(self, no):
complex_n = complex(self.real, self.imaginary) - \
complex(no.real, no.imaginary)
return Complex(complex_n.real, complex_n.imag)
def __mul__(self, no):
complex_n = complex(self.real, self.imaginary) * \
complex(no.real, no.imaginary)
return Complex(complex_n.real, complex_n.imag)
def __truediv__(self, no):
factor = no.real ** 2 + no.imaginary ** 2
return Complex((self.real * no.real + self.imaginary * no.imaginary) / factor, (self.imaginary * no.real - self.real * no.imaginary) / factor)
def mod(self):
return Complex((self.real ** 2 + self.imaginary ** 2) ** (1 / 2), 0)
def __str__(self):
if self.imaginary == 0:
result = "%.2f+0.00i" % (self.real)
elif self.real == 0:
if self.imaginary >= 0:
result = "0.00+%.2fi" % (self.imaginary)
else:
result = "0.00-%.2fi" % (abs(self.imaginary))
elif self.imaginary > 0:
result = "%.2f+%.2fi" % (self.real, self.imaginary)
else:
result = "%.2f-%.2fi" % (self.real, abs(self.imaginary))
return result
if __name__ == '__main__':
c = map(float, input().split())
d = map(float, input().split())
x = Complex(*c)
y = Complex(*d)
print(*map(str, [x+y, x-y, x*y, x/y, x.mod(), y.mod()]), sep='\n')
| [
"adityamangal0202@gmail.com"
] | adityamangal0202@gmail.com |
eb6e24298ea8f7fb918358f14f3dc37f683cef00 | c74ee64c2730c1a8c2a8b39e394fce2125722b1c | /run_tests.py | 691ec73ca87559fb1203ccf4cbffc68e73b10afd | [
"MIT"
] | permissive | hua2001/FFTHomPy | b754213c1d9296236709b13409eb9f006781c13a | e733194b12f0d732377615163495d2bde06a1e46 | refs/heads/master | 2020-12-25T15:30:24.752750 | 2015-08-28T15:36:17 | 2015-08-28T15:37:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,523 | py | #!/usr/bin/python
import unittest
import numpy as np
from homogenize.problem import Problem, import_file
import cPickle as Pickle
import os
class Test_main(unittest.TestCase):
def setUp(self):
self.input_files = ['examples/scalar/scalar_2d.py',
'examples/scalar/scalar_3d.py',
'examples/scalar/from_file.py',
'examples/elasticity/linelas_3d.py']
def tearDown(self):
pass
def test_main(self):
# the main routine for testing
for input_file in self.input_files:
self.main(input_file)
def main(self, input_file):
# test a particular file
basen = os.path.basename(input_file)
conf = import_file(input_file)
for conf_problem in conf.problems:
prob = Problem(conf_problem, conf)
prob.calculate()
file_res = 'tests/results/%s_%s' % (basen.split('.')[0], prob.name)
with open(file_res, 'r') as frs:
res = Pickle.load(frs)
# check the homogenized matrices
for primdual in prob.solve['primaldual']:
kwpd = 'mat_'+primdual
for kw in prob.output[kwpd]:
val = np.linalg.norm(prob.output[kwpd][kw] - res[kwpd][kw])
msg = 'Incorrect (%s) in problem (%s)' % (kw, prob.name)
self.assertAlmostEqual(0, val, msg=msg, delta=1e-14)
if __name__ == "__main__":
unittest.main()
| [
"vondrejc@gmail.com"
] | vondrejc@gmail.com |
c630a294ad77a90e993584e4fabc30126993f236 | f1dc6a4c37dad17d32a379d94748bfe97d7b2784 | /Course 1/Week 4/kargerMinCut.py | 8625cf2054f7ca1ed8b68dee4b0ee76e0c7a496b | [] | no_license | chrism216/Coursera_Stanford_Algorithms | 4cf6248ffa57c8982dc2601819f71e89e8afa8ba | 3ec4eb5587818f0945dc9d1533415a527f90e8ce | refs/heads/master | 2020-03-26T04:32:35.128080 | 2018-10-23T21:51:03 | 2018-10-23T21:51:03 | 144,508,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,680 | py | from random import choice
from math import log
from copy import deepcopy
def merge_vertex(adj):
vertex1 = choice(list(adj))
vertex2 = choice(adj[vertex1])
vertex2_items = adj.pop(vertex2)
# Add edges from deleted vertex2 to vertex1
adj[vertex1] += vertex2_items
# Update edges that pointed to vertex2
for i in vertex2_items:
adj[i] = [vertex1 if x == vertex2 else x for x in adj[i]]
# Delete self loops (edges in vertex1 that point to self):
adj[vertex1] = list(filter(lambda x: x != vertex1, adj[vertex1]))
def karger_merge(adj):
"""Executes merge_vertex on adj until only 2 nodes left, return number of edges in final cut"""
while len(adj) > 2:
merge_vertex(adj)
edges_in_cut = len(adj[list(adj.keys())[0]])
return edges_in_cut
def batch_karger_merge(adj):
"""runs a batch of size n**2*log(n), returns the smallest cut found"""
n = len(adj)
num_runs = 10 #int(n**2 * log(n)) # From the lectures. Watch out for large n!
print("Size of batch is: %s" % num_runs)
print("Running...")
min_cut = -1
for i in range(num_runs):
copy = deepcopy(adj)
# print(copy)
this_cut = karger_merge(copy)
if this_cut < min_cut or min_cut == -1:
min_cut = this_cut
return min_cut
if __name__ == "__main__":
import os
this_folder = os.path.dirname(os.path.abspath(__file__))
my_file = os.path.join(this_folder, 'kargerMinCut.txt')
adj = {}
with open(my_file) as f:
for line in f:
data = list(map(int, line.strip().split("\t")))
adj[data[0]] = data[1:]
print(batch_karger_merge(adj)) | [
"chris_m216@hotmail.com"
] | chris_m216@hotmail.com |
41127de8fe849b6f4a3d581b297a921093e0711c | 8246307d867c7c60157c8e967d15ac1c920f7c5b | /Python/count_letter.py | 56479b1b7350bc7886ebc50e86b577b8e1460484 | [] | no_license | pratyushmp/code_opensource_2020 | abe4aabaa8984cf9eb020604b401ab7ae80e8370 | 2302a6dfa651aaaca8b71786ca526864a60b800d | refs/heads/master | 2023-06-24T07:48:13.339825 | 2020-11-17T04:31:13 | 2020-11-17T04:31:13 | 215,303,780 | 8 | 156 | null | 2023-06-16T06:05:47 | 2019-10-15T13:18:42 | Java | UTF-8 | Python | false | false | 662 | py | def count_letters(text):
result = {}
# Go through each letter in the text
count = 0
for letter in text.lower():
# Check if the letter needs to be counted or not
if letter.isalpha():
if letter not in result:
result[letter]=0
result[letter]+=1
# Add or increment the value in the dictionary
return result
print(count_letters("AaBbCc"))
# output {'a': 2, 'b': 2, 'c': 2}
print(count_letters("Math is fun! 2+2=4"))
# output {'m': 1, 'a': 1, 't': 1, 'h': 1, 'i': 1, 's': 1, 'f': 1, 'u': 1, 'n': 1}
print(count_letters("This is a sentence."))
# output {'t': 2, 'h': 1, 'i': 2, 's': 3, 'a': 1, 'e': 3, 'n': 2, 'c': 1} | [
"gouravrawat255@gmail.com"
] | gouravrawat255@gmail.com |
3aaa08aa641d60b7ce03f8acbd64dbadb472d81d | b4b694e71464532dd19ab72b9f7a1a599c3a4e89 | /multimodal/average_predictions.py | 118b50d064f64188c77210a4019f6cda5d9e9152 | [] | no_license | omg-challenge-alpha/omg_challenge2018_submission_code | e12e1ae224c610af35747f5e5a1c305691795896 | c16f7480efee3fb14579f01d2be9a9313b1a01b2 | refs/heads/master | 2020-04-10T05:59:22.430998 | 2019-01-28T11:31:10 | 2019-01-28T11:31:10 | 160,842,897 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,487 | py |
import pandas as pd
import numpy as np
from scipy.signal import butter, lfilter, freqz
from scipy.stats import pearsonr
from matplotlib import pyplot as plt
def ccc(y_true, y_pred):
true_mean = np.mean(y_true)
pred_mean = np.mean(y_pred)
rho,_ = pearsonr(y_pred,y_true)
std_predictions = np.std(y_pred)
std_gt = np.std(y_true)
ccc = 2 * rho * std_gt * std_predictions / (
std_predictions ** 2 + std_gt ** 2 +
(pred_mean - true_mean) ** 2)
return ccc, rho
# Fermin's 2018 tricks
def f_trick(Y_train, preds):
Y_train_flat = Y_train.flatten()
preds_flat = preds.flatten()
s0 = np.std(Y_train_flat)
V = preds_flat
m1 = np.mean(preds_flat)
s1 = np.std(preds_flat)
m0 = np.mean(Y_train_flat)
norm_preds = s0*(V-m1)/s1+m0
return norm_preds
def get_Y(story, subject, smooth=0):
file_name = "/Subject_"+str(subject)+"_Story_"+str(story) + ".csv"
labels_path = "train_val/original_labels" + file_name
Y = open(labels_path).read().split("\n")[1:-1]
Y = [float(x) for x in Y]
return Y
def get_all_Y(stories, subjects, normalize_labels=False, smooth=0):
Y_list = []
for subject in subjects:
for story in stories:
Y = get_Y(story, subject)
Y_list.append(Y)
if smooth>0:
Y = butter_lowpass_filter_bidirectional(np.array(Y), cutoff=smooth, fs=25, order=1)
if normalize_labels:
Y = (Y- np.min(Y))/(np.max(Y)-np.min(Y))
return np.concatenate(Y_list, axis=0)
def butter_lowpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(data, cutoff, fs, order=5):
b, a = butter_lowpass(cutoff, fs, order=order)
y = lfilter(b, a, data)
return y
def butter_lowpass_filter_bidirectional(data, cutoff=0.1, fs=25, order=1):
y_first_pass = butter_lowpass_filter(data[::-1].flatten(), cutoff, fs, order)
y_second_pass = butter_lowpass_filter(y_first_pass[::-1].flatten(), cutoff, fs, order)
return y_second_pass
test_lenghts = np.array([[9025, 5850,7050],
[9175,5400,6325],
[9450,7000,7000],
[8775,4700,5700],
[7025,6425,9475],
[8850,6500,7850],
[8800,5775,8125],
[8975,5450,8825],
[10325,6100,9550],
[10425,5625, 8850]])
# Evaluate with "average prediction" for each subject (WITHOUT filter optimization)
results = []
with_filter = True
subjects = [1,2,3,4,5,6,7,8,9,10]
modalities = ["rawface", "landmarks", "speech", "lexicons", "fullbody"] #["lexicons", "rawface", "landmarks", "speech", "fullbody"]
stories_trainVal = [1,2,4,5,8]
stories_test = [3,6,7]
results_modality = {m:0 for m in modalities}
save_csv = True
save_path = 'test_prediction_FINAL/'
finaldf = pd.DataFrame()
for i, subject in enumerate(subjects):
for j, story in enumerate(stories_test):
#model.load_weights(checkpoint_filename)
#X_val_dic_s = get_all_X(stories_val, [subject], modalities)
#X_val_list_s = [X_val_dic_s[k] for k in X_val_dic_s]
Y_trainVal_s = get_all_Y(stories_trainVal, [subject], modalities)
#Y_test_s = get_all_Y(stories_test, [subject], modalities)
#Y_val_s
X_coeff = {
"speech": 1. ,
"rawface": .1,
"lexicons": 1. ,
"landmarks": .4,
"fullbody": 1.
}
filters = {
"speech": (0.004,1),
"rawface": (0.006,1),
"lexicons": (0.01,1),
"landmarks":(0.004,1),
"fullbody": (0.004,1)
}
X = {}
len_preds_s = (test_lenghts[i][j])
preds_s = np.zeros((len_preds_s))
ourdf = pd.DataFrame({"Subject":np.repeat(subject,len_preds_s)})
for modality in modalities:
file_name = "/Subject_"+str(subject)+"_Story_"+str(story)+".npy"
base_path = "test/"
latent_vecs_path = base_path + modality + file_name
X[modality] = np.load(latent_vecs_path)
print(modality)
print(X[modality].shape)
X[modality] = X[modality].flatten()
X[modality] = butter_lowpass_filter_bidirectional(X[modality], cutoff=filters[modality][0], order=filters[modality][1])
X[modality] = f_trick(Y_trainVal_s, X[modality])
X[modality] = X[modality]*X_coeff[modality]
preds_s += X[modality]
ourdf[modality]=X[modality]
finaldf = pd.concat([finaldf,ourdf])
preds_s /= sum(X_coeff.values())
if with_filter:
preds_s = butter_lowpass_filter_bidirectional(preds_s, cutoff=0.01, order=1)
preds_tricks_s = f_trick(Y_trainVal_s, preds_s)
plt.figure(figsize=(13, 5))
for modality in modalities:
plt.plot(X[modality],label=modality)
plt.plot(preds_tricks_s,label='average',lw=5)
plt.legend()
plt.show()
if save_csv:
pd.DataFrame({"valence":preds_tricks_s}).to_csv(save_path+'Subject_{0}_Story_{1}.csv'.format(subject,story))
pdddd = pd.DataFrame({"valence":preds_tricks_s})
| [
"noreply@github.com"
] | noreply@github.com |
5d1ee53903bb1c103dcbfa5adcb02a724718dcbf | ea02a4c1466415a050684e7375dd255b212125e9 | /emailupdate/admin.py | f19ecbeed694f268ff2f085314d21bc20707dc19 | [] | no_license | scotplum/inthenout | bb9cb12fd800d31501f7c5274d42759f990eab3e | 772f935396a7ada4e5e35000034f089606e01382 | refs/heads/master | 2020-05-26T22:15:07.519560 | 2017-05-23T04:15:06 | 2017-05-23T04:15:06 | 82,509,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | from django.contrib import admin
from models import Email
# Register your models here.
admin.site.register(Email) | [
"scotplum@gmail.com"
] | scotplum@gmail.com |
517e4b682e6b12974385b9c23201af4bebefd1d0 | 5679731cee36c537615d285ed72810f4c6b17380 | /513_FindBottomLeftTreeValue.py | 0de079fbf90fd9385df6647f65a7e451a7aa108a | [] | no_license | manofmountain/LeetCode | 6b76105190a9b62df65a7b56b6def4120498b9fa | 718f688b3d316e8c10ef680d9c21ecd518d062f8 | refs/heads/master | 2021-01-12T03:41:48.318116 | 2017-07-18T12:35:58 | 2017-07-18T12:35:58 | 78,252,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | # 40.9%
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
#from collections import deque
class Solution(object):
def findBottomLeftValue(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
q, last = [root], root.val
while q:
q.append(None)
last = q[0].val
while q[0]:
if q[0].left:
q.append(q[0].left)
if q[0].right:
q.append(q[0].right)
del q[0]
del q[0]
return last
def findLeftMostNode(self, root):
queue = [root]
for node in queue:
queue += filter(None, (node.right, node.left))
return node.val
| [
"noreply@github.com"
] | noreply@github.com |
91e49750928e22ddb674fc23c07aa8d6808eb4d3 | b21b7e9da588abfdb6e0ba4c42fc73270124b19e | /01-Qualification/02-Nesting_Depth/solution.py | 746e13d50f3615f823b2cb08a6dc8790b71ba3f2 | [] | no_license | luispmenezes/Google-Code-Jam-2020 | 442fc148afc887443807c949a2d76105e7f8a493 | 655ea66dbf36dbb93e2046d8daf60df5c3b9096b | refs/heads/master | 2022-04-10T06:22:46.210862 | 2020-04-05T11:28:16 | 2020-04-05T11:28:16 | 252,834,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | def main():
t = int(input())
for t_idx in range(1, t + 1):
s = input()
new_s = ""
current_depth = 0
for n in s:
new_depth = int(n)
if new_depth > current_depth:
new_s += "(" * (new_depth - current_depth)
elif new_depth < current_depth:
new_s += ")" * (current_depth - new_depth)
new_s += n
current_depth = new_depth
new_s += ")" * current_depth
print('Case #%d: %s' % (t_idx, new_s))
if __name__ == '__main__':
main()
| [
"lspmenezes@gmail.com"
] | lspmenezes@gmail.com |
e0f658a2dfdc29c3743be77e71ff5e2cc4e36238 | 1818247390f45566d4a397e7aea1b2de7027582e | /lecturebook/models.py | 63da7f08a749cecf06bb5f280f1f0c87ad6985fd | [] | no_license | AnGyeIn/ere_app_server | 8e06da73a160c3e10ac41ed61424292fa9df8b21 | 577de77f7f84928a1deca59e9dcbb3d4add096a0 | refs/heads/master | 2022-11-27T16:50:07.366756 | 2020-08-05T07:47:40 | 2020-08-05T07:47:40 | 282,136,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,311 | py | import uuid
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.db import models
# Create your models here.
from django.utils import timezone
class StudentManager(BaseUserManager):
def create_user(self, sNum, name, pNum, password=None):
user = self.model(sNum=sNum, name=name, pNum=pNum)
user.set_password(password)
user.save()
return user
def create_superuser(self, sNum, name, pNum, password):
user = self.create_user(sNum, name, pNum, password)
user.is_admin = True
user.is_staff = True
user.is_superuser = True
user.save()
return user
class Student(AbstractBaseUser, PermissionsMixin):
uuid = models.UUIDField(
primary_key=True,
unique=True,
editable=False,
default=uuid.uuid4,
verbose_name='PK'
)
name = models.TextField()
sNum = models.CharField(unique=True, max_length=10)
pNum = models.TextField()
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
USERNAME_FIELD = 'sNum'
REQUIRED_FIELDS = ['name', 'pNum']
objects = StudentManager()
def __str__(self):
return self.name
class LectureBook(models.Model):
id = models.IntegerField(primary_key=True)
title = models.TextField()
author = models.TextField()
lecture = models.TextField()
owner = models.ForeignKey('Student', on_delete=models.CASCADE, to_field='sNum')
option = models.TextField()
isAvailable = models.BooleanField()
def __str__(self):
return self.title
class LectureBookRequest(models.Model):
lecturebook = models.ForeignKey('LectureBook', on_delete=models.CASCADE, to_field='id')
lecturebookTitle = models.TextField()
owner = models.ForeignKey('Student', on_delete=models.CASCADE, to_field='sNum', related_name='owning')
ownerName = models.TextField()
receiver = models.ForeignKey('Student', on_delete=models.CASCADE, to_field='sNum', related_name='receiving')
receiverName = models.TextField()
option = models.TextField()
requestTime = models.DateTimeField(default=timezone.now)
isAccepted = models.BooleanField(default=False)
def __str__(self):
return '{0} : {1}({2}) -> {3}({4})'.format(self.lecturebook, self.owner, self.owner.sNum, self.receiver, self.receiver.sNum)
| [
"agistudio97@gmail.com"
] | agistudio97@gmail.com |
c44632df47cf5fe63c486976a302b203e61fa980 | fc73adeb9998c3f144bc19d0f32bfa2e08ffe53f | /GBP FX HTML Email- generic.py | a291eaa316e85b9f8a078154677fcd76822df0bd | [] | no_license | llattan/Portfolio-Projects | 58e5dacc1172ab086ab74c7df93ea5065e984660 | 271067cc25af205a55f31b138a1a66e13a823a61 | refs/heads/master | 2022-11-14T20:37:10.295087 | 2020-06-21T17:32:24 | 2020-06-21T17:32:24 | 267,049,876 | 0 | 0 | null | 2020-05-26T13:49:49 | 2020-05-26T13:26:50 | null | UTF-8 | Python | false | false | 4,989 | py | from urllib.request import urlopen
from bs4 import BeautifulSoup
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
#from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
import time
import requests
import csv
#from email import encoders
url="https://finance.yahoo.com/quote/GBPUSD=X?p=GBPUSD=X&.tsrc=fin-srch"
def getFX(url):
#returns a list of all news articles on BBC homepage
html = urlopen(url)
bsObj = BeautifulSoup(html, features = "html.parser")
rate = bsObj.find("span", attrs={"class":'Trsdu(0.3s) Trsdu(0.3s) Fw(b) Fz(36px) Mb(-4px) D(b)'}).text
print (rate)
return rate
def get_url(symbol, years=1):
# Builds a URL to access the CSV download file of a given ticker. By default, returns one year of history.
epoch_year = 31622400 # This is how many seconds are in a year, which is the basis of Epoch time.
period2 = time.time()
period1 = period2 - (years * epoch_year)
url_formatted = ("https://query1.finance.yahoo.com/v7/finance/download/%s?period1=%d&period2=%d&interval=1d&events=history" %
(symbol,
period1,
period2))
return url_formatted
def get_file(symbol):
url = get_url(symbol)
with requests.Session() as s:
download = s.get(url)
decoded_content = download.content.decode('utf-8')
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
fileLines = list(cr)
prevDays = []
for i in range(-6,-1):
prevDays.append(fileLines[i+1][0])
prevDays.append(fileLines[i+1][4])
return prevDays
cur_rate = getFX(url)
conv = "{:,}".format(round((1/float(cur_rate)*150000),2))
prevDays=get_file('GBPUSD=X')
print(prevDays)
for i in range(1, len(prevDays),2):
prevDays[i] = "{:,}".format(round((1/float(prevDays[i])*150000),2))
sender_email = "EMAIL SENDER"
receiver_email = "EMAIL RECEIVER"
password = "YOUR PASSWORD HERE"
message = MIMEMultipart("alternative")
message["Subject"] = "Daily GBP FX Conversion"
message["From"] = sender_email
message["To"] = receiver_email
html_file = open('/storage/emulated/0/Python/Portfolio/email html code.html')
html_body=html_file.read().replace("cur_rate", str(cur_rate)).replace("conv", str(conv)).replace("prevDays[0]", prevDays[0]).replace("prevDays[1]", prevDays[1]).replace("prevDays[2]", prevDays[2]).replace("prevDays[3]", prevDays[3]).replace("prevDays[4]", prevDays[4]).replace("prevDays[5]", prevDays[5]).replace("prevDays[6]", prevDays[6]).replace("prevDays[7]", prevDays[7]).replace("prevDays[8]", prevDays[8]).replace("prevDays[9]", prevDays[9])
# Create the plain-text and HTML version of your message
text = ("Good day!\n"+
"\n"+
" The GBP to USD FX rate is currently:" + str(cur_rate) +". At this price, \n\n"
"150k USD = " + str(conv)+ " GBP \n\n\n"+
"The last 5 previous days were: \n" + prevDays[0] + ": "+ prevDays[1] + " GBP\n"+ prevDays[2]+ ": " +prevDays[3] +" GBP\n"+prevDays[4]+ ": " +prevDays[5] +" GBP\n"+ prevDays[6]+ ": " +prevDays[7]+ " GBP\n"+ prevDays[8]+ ": " +prevDays[9] +" GBP\n"
)
# Turn these into plain/html MIMEText objects
part1 = MIMEText(text, "plain")
part2 = MIMEText(html_body, "html")
# Add HTML/plain-text parts to MIMEMultipart message
# The email client will try to render the last part first
message.attach(part1)
message.attach(part2)
#EMBED IMAGE FILES
banner_img = open('/storage/emulated/0/Python/Portfolio/notes-1158188.jpg', 'rb')
msgImage = MIMEImage(banner_img.read())
banner_img.close()
github_img = open('/storage/emulated/0/Python/Portfolio/Octocat.jpg', 'rb')
git_logoImage = MIMEImage(github_img.read())
github_img.close()
twitter_img = open('/storage/emulated/0/Python/Portfolio/Twitter_Logo_Blue.png', 'rb')
twitterImage = MIMEImage(twitter_img.read())
twitter_img.close()
LI_img = open('/storage/emulated/0/Python/Portfolio/LI-In-Bug.png', 'rb')
LinkedInImage = MIMEImage(LI_img.read())
LI_img.close()
# Define the image's ID as referenced above
msgImage.add_header('Content-ID', '<banner>')
git_logoImage.add_header('Content-ID', '<github>')
twitterImage.add_header('Content-ID', '<twitter>')
LinkedInImage.add_header('Content-ID', '<LI>')
message.attach(msgImage)
message.attach(git_logoImage)
message.attach(twitterImage)
message.attach(LinkedInImage)
#Attaching an image file to the email:
'''img_file = '/storage/emulated/0/Python/Portfolio/notes-1158188.jpg'
try:
with open(img_file, 'rb') as attachment:
part3 = MIMEBase("application","octet-stream")
part3.set_payload(attachment.read())
encoders.encode_base64(part3)
part3.add_header("Content-Disposition",f"attachment; filename= {img_file}")
message.attach(part3)
except Exception as e:
print(f'Oh no! We didn\'t find the attachment! {e}')'''
# Create secure connection with server and send email
with smtplib.SMTP_SSL("smtp.gmail.com", 465) as server:
server.login(sender_email, password)
server.sendmail(
sender_email, receiver_email, message.as_string()
) | [
"noreply@github.com"
] | noreply@github.com |
3715ccf9a3e985bb9759e80cc93d8ee95462f847 | 325c217fea68ffd93e04d0664a76ec72cd262ac1 | /entryparser.py | 938ed4704518bf73bb45a82100957e6e87375e1e | [] | no_license | egesadic/sozlook | 4f4cc9197bae9294042197ef6e1fcd2401b7afd8 | 407be34b715266dd5c91ae97af085a50dc01fc4e | refs/heads/master | 2020-04-15T00:51:00.584295 | 2019-01-05T21:52:51 | 2019-01-05T21:52:51 | 164,253,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | import sozlook
import sozlook_kadinlarkulubu
import time
def auto_fetch(baslik):
sozlook.get_topic(baslik)
plist = sozlook.parse_all_entries(baslik)
sozlook.save_entries(plist)
sozlook.to_excel(plist, baslik)
print("Tamamlandı: " + baslik)
del plist
def local_fetch(baslik):
f = sozlook.load_entries(baslik+"-entrybase")
return f
plist = sozlook_kadinlarkulubu.kadinlarkulubu_search("tampon")
sozlook.to_excel(plist, "tampon") | [
"noreply@github.com"
] | noreply@github.com |
9548ef37a3de605b5bab00df1a6f4567c0a51d12 | ccdeebbbfc6476fdad2984dbcaaba7296e920038 | /stubs/pep8-naming/pep8ext_naming.pyi | 13e7e48fbf50ac2ee4957ce90ab75e334f89a065 | [
"MIT",
"Apache-2.0"
] | permissive | hoefling/typeshed | 75d85144aa8b69fac1872f7d0239f910b509b99c | c9e6bd2df9d2aa05927ce0576c24cbb5740d7361 | refs/heads/master | 2023-07-22T08:50:00.789424 | 2022-10-10T14:26:07 | 2022-10-10T14:26:07 | 208,133,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,115 | pyi | import ast
from argparse import Namespace
from collections.abc import Generator, Iterable
from typing import Any
__version__: str
PYTHON_VERSION: tuple[int, int, int]
CLASS_METHODS: frozenset[str]
METACLASS_BASES: frozenset[str]
METHOD_CONTAINER_NODES: set[ast.AST]
class NamingChecker:
name: str
version: str
visitors: Any
decorator_to_type: Any
ignore_names: frozenset[str]
parents: Any
def __init__(self, tree: ast.AST, filename: str) -> None: ...
@classmethod
def add_options(cls, parser: Any) -> None: ...
@classmethod
def parse_options(cls, option: Namespace) -> None: ...
def run(self) -> Generator[tuple[int, int, str, type[Any]], None, None]: ...
def tag_class_functions(self, cls_node: ast.ClassDef) -> None: ...
def set_function_nodes_types(self, nodes: Iterable[ast.AST], ismetaclass: bool, late_decoration: dict[str, str]) -> None: ...
def __getattr__(self, name: str) -> Any: ... # incomplete (other attributes are normally not accessed)
def __getattr__(name: str) -> Any: ... # incomplete (other attributes are normally not accessed)
| [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.