blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2558802259f4e94409d37148c0262295c24ccc25
|
a4a18abbfae9584116a847bec4a1dfb9e9d3d306
|
/exporter/context.py
|
3d31a81cc82a0fcd48fc84534070802a36690bbf
|
[
"MIT"
] |
permissive
|
KTOmega/Slack-Exporter
|
c6e4d15a5d0c775dd98ef71a3f1528eea337924f
|
b9bec58464cebd64be61b0cc30cffa1a63737007
|
refs/heads/master
| 2023-03-09T13:38:47.474733
| 2021-02-25T02:51:43
| 2021-02-25T02:51:43
| 341,296,567
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
from dataclasses import dataclass
import json
import os
from typing import Dict, Any
from slack_sdk.web.async_client import AsyncWebClient
from . import constants
from .downloader import FileDownloader
from .fragment import FragmentFactory
class JsonSerializable:
def to_json(self) -> str:
return json.dumps(self.__dict__, separators=(',', ':'))
def to_dict(self) -> Dict[str, Any]:
return self.__dict__
@dataclass
class ExporterMetadata(JsonSerializable):
export_time: int
@dataclass
class ExporterContext:
export_time: int
output_directory: str
slack_client: AsyncWebClient
downloader: FileDownloader
fragments: FragmentFactory
last_export_time: int = 0
async def close(self):
await self.downloader.close()
self.fragments.close()
def to_metadata(self) -> ExporterMetadata:
return ExporterMetadata(self.export_time)
def save(self):
self.downloader.write_json(constants.CONTEXT_JSON_FILE, self.to_metadata().to_dict())
@staticmethod
def get_last_export_time(base_dir) -> int:
context_file = os.path.join(base_dir, constants.CONTEXT_JSON_FILE)
if os.path.exists(context_file):
with open(context_file, "r") as fd:
context = json.load(fd)
if "export_time" in context:
return context["export_time"]
return 0
|
[
"me@kevintran.codes"
] |
me@kevintran.codes
|
fef7c0c91068ec1bbab26c4cd51b3f1a5a9f0367
|
21f96bc814627ffb98c965f7615b893c7490b16f
|
/Ejercicio 2/clase_helado.py
|
d2447921a4f3e868cead9b2a108bf7fdff444962
|
[] |
no_license
|
msoto-27/ejercicios-u3
|
2b09cec1ac18018a9eb98d0e8b25e7c6e3043133
|
9ad6b72bdf84efe94c2cec3740728ea4d3f8886d
|
refs/heads/master
| 2022-10-08T11:49:49.898024
| 2020-06-09T00:30:13
| 2020-06-09T00:30:13
| 266,607,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
class Helado(object):
__gramos = 0
__sabores = []
def __init__(self, gramos, sabores):
self.__gramos = gramos
self.__sabores = sabores
def getGramos(self):
return self.__gramos
def getSabores(self):
return self.__sabores
def getGramosPorSabor(self, sabor):
c = 0
for i in self.__sabores:
if sabor == i:
c += self.__gramos / len(self.__sabores)
return c
|
[
"noreply@github.com"
] |
msoto-27.noreply@github.com
|
48593e14dda5f79c55a7ca3e20e0609b9f360191
|
656b45ea0ec5bb9645527fe124b7a3670aa06efd
|
/sentry_variable_update.py
|
3410232a7521ed96ce486deea5bde6ab8cab71fe
|
[] |
no_license
|
SabiqulHassan13/python3-coding-try
|
5e982cc4b9a1832021c82d35cc73ed7e83510f21
|
d90c9eda628a5874ebff4aef4b5bb842c71ae160
|
refs/heads/main
| 2023-02-10T11:19:14.830872
| 2021-01-03T15:25:09
| 2021-01-03T15:25:09
| 326,437,762
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 581
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 13 10:59:46 2018
@author: Robin
"""
# sentry variable update
# counter is a sentry variable here
# if you don't update the value of sentry variable then the loop will be infinite
# use ctrl + C to terminate the infinite loop
counter = 0
while(counter <= 10):
print("value of :", counter, "is", counter)
# only string can be concatenate not number
# if you want to concatenate number then it must be converted to string by str(number_value)
print("value of : " + str(counter) + " is " + str(counter))
counter += 1
|
[
"sabiqulhassan98@gmail.com"
] |
sabiqulhassan98@gmail.com
|
6bde59f8802326df778c3f3ffebdbb6839952624
|
7b20e2f86c2bb2145ae9ca5bcd4b9ad1566e79b0
|
/ABC/ABC142/C.py
|
c206041e53f07ca481b643a9184535cff9b3a464
|
[] |
no_license
|
pto8913/KyoPro
|
5f5e769960dfec73af5b0f338f32659ff067094b
|
29ebc30a3d45fea273cb9034fba8311673a406dd
|
refs/heads/master
| 2021-06-13T16:43:40.275854
| 2021-03-23T00:02:25
| 2021-03-23T00:02:25
| 174,684,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
import sys
stdin = sys.stdin
ns = lambda : stdin.readline().rstrip()
ni = lambda : int(ns())
na = lambda : map(int, stdin.readline().split())
def main():
n = ni()
a = list(na())
d = {}
for i, a in enumerate(a):
d[a] = i+1
res = []
for i in range(n):
res.append(d[i+1])
print(*res)
main()
|
[
"nn3kskyou@gmail.com"
] |
nn3kskyou@gmail.com
|
6c0098dcc19c3cbece8de20ee5fcd0b905bef052
|
009ac7a8a9a219affa944eff3747924a9639ef43
|
/lib/rram_NN/train.py
|
e613c03410ad8ae91217373e070768d3e891f650
|
[
"MIT"
] |
permissive
|
amohant4/RSA_TF
|
586b797f51d882255a6ff5da9063f1c16e09082c
|
5b6705e167ca4f7039e900f9921b90087c9542fd
|
refs/heads/master
| 2020-04-20T09:04:29.242554
| 2019-02-01T20:31:48
| 2019-02-01T20:31:48
| 168,756,637
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,669
|
py
|
# file: train.py
# Author : Abinash Mohanty
# Date : 05/10/2017
# Project : RRAM training NN
import tensorflow as tf
import os
import sys
from rram_NN.config import cfg
from rram_NN.rram_modeling import addDefects, readVerifyTopN
import numpy as np
import cPickle
import math
import random
import matplotlib.pyplot as plt
# change dataset_name to dataset.name
class SolverWrapper(object):
def __init__(self, sess, saver, dataset, network, output_dir, dataset_name, stddevVar):
"""
SolverWrapper constructor. Inputs are:
tensorflow session, tensorflow saver, dataset, network, output directory.
"""
self.net = network
self.stddevVar = stddevVar
self.dataset_name = dataset_name
self.dataset = dataset
self.output_dir = output_dir
self.saver = saver
self._masks = None
self.v_trainable = self._get_trainable()
self.v_non_trainable = self._get_non_trainable()
self.optimizer = self.net.optimizer
self.pretrained_model_tf = os.path.join(self.output_dir, self.net.name, 'baseline', \
self.net.name+'_'+self.dataset_name+'.ckpt')
self.pretrained_model_pkl = os.path.join(self.output_dir, self.net.name, 'baseline', \
self.net.name+'_'+self.dataset_name +'.pkl')
self.pretrained_variation_pkl = os.path.join(self.output_dir, self.net.name, 'variation', \
self.net.name +'_'+ self.dataset_name + '_' + str(stddevVar) +'.pkl')
self.summaryDir = os.path.join(output_dir, self.net.name,'summary')
self._create_dirs()
if cfg.DEBUG_ALL or cfg.WRITE_TO_SUMMARY:
self.writer = tf.summary.FileWriter(self.summaryDir)
def _create_dirs(self):
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
if not os.path.exists(os.path.join(self.output_dir, self.net.name)):
os.makedirs(os.path.join(self.output_dir, self.net.name))
if not os.path.exists(os.path.join(self.output_dir, self.net.name, 'baseline')):
os.makedirs(os.path.join(self.output_dir, self.net.name, 'baseline'))
if not os.path.exists(os.path.join(self.output_dir, self.net.name, 'variation')):
os.makedirs(os.path.join(self.output_dir, self.net.name, 'variation'))
if not os.path.exists(self.summaryDir):
os.makedirs(self.summaryDir)
def _snapshot(self, sess, iter, mode=1):
"""
Writes snapshot of the network to file in output directory.
inputs: tensorflow session, current iteration number, mode.
mode :
0 = baseline model
1 = with variation model
2 = retrained model to rectify variation
"""
prefix = ''
if mode == 2:
prefix='retrained_'
elif mode == 1:
prefix='withVariation_'
filename = prefix + self.net.name + '_iter_{:d}_'.format(iter+1) +self.dataset_name+ '.ckpt'
if mode == 0:
filename = prefix + self.net.name +'_'+self.dataset_name+ '.ckpt'
if mode == 0:
filename = os.path.join(self.output_dir, self.net.name, 'baseline', filename)
elif mode == 1:
filename = os.path.join(self.output_dir, self.net.name,'variation', filename)
elif mode == 2:
filename = os.path.join(self.output_dir, self.net.name, filename)
self.saver.save(sess, filename)
if cfg.DEBUG_TRAINING or cfg.DEBUG_ALL:
print 'Wrote snapshot to: {:s}'.format(filename)
def _train_model_software_baseline(self, sess, max_iters):
"""
Trains a model and implements the network training loop.
Inputs: tensorflow session, maximum number of iterations.
This is the software base line code that trains using float32
"""
print("Creating baseline model ... ")
accuracy = self.net.accuracy
grads = self.net.gradients
grads_and_vars = list(zip(grads, self.v_trainable))
train_step = self.optimizer.apply_gradients(grads_and_vars=grads_and_vars,global_step=self.net.global_step)
sess.run(tf.global_variables_initializer())
if cfg.DEBUG_ALL or cfg.DEBUG_TRAINING:
merged = tf.summary.merge_all()
for iter in range(max_iters):
batch = self.dataset.train.next_batch(cfg.TRAIN.TRAIN_BATCH_SIZE)
if cfg.DEBUG_TRAINING or cfg.DEBUG_ALL:
if (iter+1) % 1000 == 0:
train_accuracy = accuracy.eval(feed_dict={self.net.x : batch[0],
self.net.y_ : batch[1],
self.net.phase : 0.0,
self.net.keep_prob : 1.0})
print("Step : %d, training accuracy : %g"%(iter, train_accuracy))
feed_dict = {self.net.x : batch[0], self.net.y_ : batch[1], self.net.phase : 1.0 ,self.net.keep_prob : 0.5}
if cfg.DEBUG_ALL or cfg.WRITE_TO_SUMMARY:
summary, _ = sess.run([merged, train_step], feed_dict=feed_dict)
self.writer.add_summary(summary, iter)
else:
_ = sess.run([train_step], feed_dict=feed_dict)
self._snapshot(sess, iter, 0)
self._saveTrainedModel(sess, self.pretrained_model_pkl)
def _saveTrainedModel(self, sess, location):
"""
Helper function to save models as python variables.
It is stored using cPickle as binary files. Prior to this
the variables in the models must be initialized.
Args:
sess: tensorflow session
location: file address where the models will be savedd
"""
variables_names =[v.name for v in tf.global_variables()]
values = sess.run(variables_names)
netParams = dict(zip(variables_names, values))
with open(location, 'wb') as fid:
cPickle.dump(netParams, fid, cPickle.HIGHEST_PROTOCOL)
print 'saved models at "{}"'.format(location)
def _find_or_train_baseline(self, sess, iters):
"""
Function looks for the baseline software models.
Incase it doesn't find that, it call the _train_model_software_baseline() to create baseline models.
"""
filename = self.net.name + '_' + self.dataset_name + '.ckpt'
if not os.path.isfile(os.path.join(self.output_dir, self.net.name,'baseline', filename + '.index')):
print 'Baseline software models not found. Training software baseline'
self._train_model_software_baseline(sess, iters)
else:
print 'Baseline models for {} found at {}'.format(self.net.name, os.path.join(self.output_dir, self.net.name,'baseline'))
self.saver.restore(sess, os.path.join(self.output_dir, self.net.name,'baseline', filename))
return self._eval_net('Baseline')
def _create_mask_v0(self, percentRetrainable):
"""
Function to create a random mask to stop gradient flow through specific
prameters of the network while retraining the network.
This creates a list of ndarrays with values equal to 0/1 and dimention
same as the variables in the net.
"""
if cfg.DEBUG_LEVEL_1 or cfg.DEBUG_ALL:
print 'Creating masks for stoping random gradient with \
retention ratio = {}'.format(percentRetrainable)
allShapes = [v.get_shape() for v in tf.trainable_variables()]
keys = [v.name for v in tf.trainable_variables()]
masks = {}
for i in range(len(allShapes)):
mask = np.random.rand(*allShapes[i])
mask = np.where(mask < percentRetrainable/100.0, 1., 0.)
masks[keys[i]] = mask
return masks
def _create_mask_debug(self, percentRetrain):
shapes = [v.get_shape().as_list() for v in tf.trainable_variables()]
keys = [v.name for v in tf.trainable_variables()]
masks = {}
for i in range(len(shapes)):
mask = np.zeros(shapes[i])
masks[keys[i]] = mask
return masks
def _create_mask_topk(self, percentRetrain):
if cfg.DEBUG_LEVEL_1 or cfg.DEBUG_ALL:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Creating masks for tarining top {}% of parameters '.format(percentRetrain)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
shapes = [v.get_shape().as_list() for v in tf.trainable_variables()]
keys = [v.name for v in tf.trainable_variables()]
masks = {}
if cfg.DEBUG_ALL or cfg.DEBUG_TRAINING:
totalRetrained = 0
totalParams = 0
for i in range(len(shapes)):
mask = np.zeros(shapes[i])
dims = len(shapes[i])
if dims == 4:
pecentPerDiagonal = 100.0/float(min(shapes[i][0], shapes[i][1]))
numDiagonals = int(math.ceil(percentRetrain / pecentPerDiagonal))
x = np.arange(shapes[i][0])
y = np.arange(shapes[i][1])
for ii in range(numDiagonals):
for j in range(shapes[i][3]):
for k in range(shapes[i][2]):
random.shuffle(x)
random.shuffle(y)
for m in range(min(shapes[i][0], shapes[i][1])):
mask[x[m],y[m],k,j] = 1.0
if cfg.DEBUG_ALL or cfg.DEBUG_TRAINING:
totalParams += shapes[i][0]*shapes[i][1]*shapes[i][2]*shapes[i][3]
totalRetrained += sum(sum(sum(sum(mask))))
print 'For layer ',str(keys[i]),' percentage per diagonal = ',pecentPerDiagonal
print 'For layer ',str(keys[i]),' number of diagonals selected = ',numDiagonals
print 'For layer ',keys[i],' % of retrained parameters = ', float(sum(sum(sum(sum(mask)))))*100.0/float(shapes[i][0]*shapes[i][1]*shapes[i][2]*shapes[i][3])
print '~~~ ~~~ ~~~ ~~~~ '
elif dims == 2:
maxDim = max(shapes[i][0], shapes[i][1])
pecentPerDiagonal = float(min(shapes[i][0],shapes[i][1]))*100/float(shapes[i][0]*shapes[i][1])
numDiagonals = int(math.ceil(percentRetrain/pecentPerDiagonal))
x = np.arange(maxDim)
y = np.arange(maxDim)
dummy = np.zeros((maxDim, maxDim))
for k in range(numDiagonals):
random.shuffle(x)
random.shuffle(y)
for m in range(len(x)):
dummy[x[m],y[m]] = 1.0
mask = dummy[:shapes[i][0], :shapes[i][1]]
if cfg.DEBUG_ALL or cfg.DEBUG_TRAINING:
totalRetrained += sum(sum(mask))
totalParams += shapes[i][0]*shapes[i][1]
print 'For layer ',keys[i],' percentage per diagonal = ',pecentPerDiagonal
print 'For layer ',keys[i],' number of diagonals selected = ',numDiagonals
print 'For layer ',keys[i],' % of retrained parameters = ', float(sum(sum(mask)))*100.0/float(shapes[i][0]*shapes[i][1])
print '~~~ ~~~ ~~~ ~~~~ '
elif dims == 1:
x = np.arange(shapes[i][0])
random.shuffle(x)
num = int(math.ceil(shapes[i][0]*percentRetrain/100.0))
for p in range(num):
mask[x[p]] = 1.0
if cfg.DEBUG_ALL:
totalRetrained += num
totalParams += shapes[i][0]
if cfg.DEBUG_ALL:
print 'For layer ',keys[i],' % of num retrained = ', num
print 'For layer ',keys[i],' % of num totalParams = ', shapes[i][0]
print 'For layer ',keys[i],' % of retrained parameters = ', float(sum(mask))*100.0/float(shapes[i][0])
print '~~~ ~~~ ~~~ ~~~~ '
masks[keys[i]] = mask
if cfg.DEBUG_ALL or cfg.DEBUG_ALL:
totalPercentRetrained = float(totalRetrained)*100.0/float(totalParams)
print 'Final Retrained parameter % = ', totalPercentRetrained
return masks
def _create_mask(self, percentRetrain):
"""
Function to create a random mask to stop gradient flow through specific
prameters of the network while retraining the network.
This creates a list of ndarrays with values equal to 0/1 and dimention
same as the variables in the net.
"""
if cfg.DEBUG_LEVEL_1 or cfg.DEBUG_ALL:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Creating masks for stoping random gradient with retention ratio = {}'.format(percentRetrain)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
shapes = [v.get_shape().as_list() for v in tf.trainable_variables()]
keys = [v.name for v in tf.trainable_variables()]
masks = {}
if cfg.DEBUG_ALL or cfg.DEBUG_TRAINING:
totalRetrained = 0
totalParams = 0
for i in range(len(shapes)):
mask = np.zeros(shapes[i])
dims = len(shapes[i])
if dims == 4:
pecentPerDiagonal = 100.0/float(min(shapes[i][0], shapes[i][1]))
numDiagonals = int(math.ceil(percentRetrain / pecentPerDiagonal))
x = np.arange(shapes[i][0])
y = np.arange(shapes[i][1])
for ii in range(numDiagonals):
for j in range(shapes[i][3]):
for k in range(shapes[i][2]):
random.shuffle(x)
random.shuffle(y)
for m in range(min(shapes[i][0], shapes[i][1])):
mask[x[m],y[m],k,j] = 1.0
if cfg.DEBUG_ALL or cfg.DEBUG_TRAINING:
totalParams += shapes[i][0]*shapes[i][1]*shapes[i][2]*shapes[i][3]
totalRetrained += sum(sum(sum(sum(mask))))
print 'For layer ',str(keys[i]),' percentage per diagonal = ',pecentPerDiagonal
print 'For layer ',str(keys[i]),' number of diagonals selected = ',numDiagonals
print 'For layer ',keys[i],' % of retrained parameters = ', float(sum(sum(sum(sum(mask)))))*100.0/float(shapes[i][0]*shapes[i][1]*shapes[i][2]*shapes[i][3])
print '~~~ ~~~ ~~~ ~~~~ '
elif dims == 2:
maxDim = max(shapes[i][0], shapes[i][1])
pecentPerDiagonal = float(min(shapes[i][0],shapes[i][1]))*100/float(shapes[i][0]*shapes[i][1])
numDiagonals = int(math.ceil(percentRetrain/pecentPerDiagonal))
x = np.arange(maxDim)
y = np.arange(maxDim)
dummy = np.zeros((maxDim, maxDim))
for k in range(numDiagonals):
random.shuffle(x)
random.shuffle(y)
for m in range(len(x)):
dummy[x[m],y[m]] = 1.0
mask = dummy[:shapes[i][0], :shapes[i][1]]
if cfg.DEBUG_ALL or cfg.DEBUG_TRAINING:
totalRetrained += sum(sum(mask))
totalParams += shapes[i][0]*shapes[i][1]
print 'For layer ',keys[i],' percentage per diagonal = ',pecentPerDiagonal
print 'For layer ',keys[i],' number of diagonals selected = ',numDiagonals
print 'For layer ',keys[i],' % of retrained parameters = ', float(sum(sum(mask)))*100.0/float(shapes[i][0]*shapes[i][1])
print '~~~ ~~~ ~~~ ~~~~ '
elif dims == 1:
x = np.arange(shapes[i][0])
random.shuffle(x)
num = int(math.ceil(shapes[i][0]*percentRetrain/100.0))
for p in range(num):
mask[x[p]] = 1.0
if cfg.DEBUG_ALL:
totalRetrained += num
totalParams += shapes[i][0]
if cfg.DEBUG_ALL:
print 'For layer ',keys[i],' % of num retrained = ', num
print 'For layer ',keys[i],' % of num totalParams = ', shapes[i][0]
print 'For layer ',keys[i],' % of retrained parameters = ', float(sum(mask))*100.0/float(shapes[i][0])
print '~~~ ~~~ ~~~ ~~~~ '
masks[keys[i]] = mask
if cfg.DEBUG_ALL or cfg.DEBUG_ALL:
totalPercentRetrained = float(totalRetrained)*100.0/float(totalParams)
print 'Final Retrained parameter % = ', totalPercentRetrained
return masks
def _get_trainable(self):
"""
Function to return the trainable variables in the current graph.
"""
v_trainable = [v for v in tf.trainable_variables()]
return v_trainable
def _get_non_trainable(self):
"""
Function to return the non_trainable variables in the current graph.
It returns only the trainable parameters in the baseline neural network which are
non trainable in the SRAM branched neural network.
The returned values do not include the variables like global step, learning rate etc.
"""
if cfg.DEBUG_TRAINING or cfg.DEBUG_ALL:
print '[',os.path.basename(sys.argv[0]),'] Separating trainable and non-trainable variables in the graph.'
v_all = [v for v in tf.global_variables()]
v_trainable = [v for v in tf.trainable_variables()]
v_no_train = list(set(v_all)-set(v_trainable))
v_non_trainable = [v for v in v_no_train if v.name != 'global_step:0']
return v_non_trainable
def _load_model(self, path, variableList):
"""
Function to load model parameters from cPickle file.
the pickle file should be a dictoary with keys as the variable names
and values as the variable values.
Args:
path: location of the pickle file
variableList: list of tensors which are to be loaded.
"""
with open(path) as fid:
if cfg.DEBUG_TRAINING or cfg.DEBUG_ALL:
print '[',os.path.basename(sys.argv[0]),'] Models weight from : ', path
params = cPickle.load(fid)
for i in range(len(variableList)):
if cfg.DEBUG_TRAINING or cfg.DEBUG_ALL:
print '[',os.path.basename(sys.argv[0]),'] Loading variables for : ', variableList[i].name, ' - ', variableList[i].shape
variableList[i].load(params[variableList[i].name])
def _init_baseline_network(self, sess):
"""
Initializes only the baseline network.
creates gradient mask / connectivity matrix for SRAM crossbar.
Args:
sess: tensorflow session in which graph is present.
"""
sess.run(tf.global_variables_initializer())
self._load_model(self.pretrained_model_pkl, self.v_trainable)
def _init_network(self, sess, percentRetrainable):
"""
Initializes the network.
creates gradient mask / connectivity matrix for SRAM crossbar.
Args:
sess: tensorflow session in which graph is present.
percentRetrainable: percentage of trainable parameters in SRAM crossbar.
"""
sess.run(tf.global_variables_initializer())
self._load_model(self.pretrained_model_pkl, self.v_non_trainable)
self._masks = self._create_mask(percentRetrainable)
#self._masks = self._create_mask_debug(percentRetrainable)
if cfg.DEBUG_TRAINING or cfg.DEBUG_ALL:
for key in self._masks.keys():
print key, ' -- ',self._masks[key].shape
for i in range(len(self.v_trainable)):
parameters = self.v_trainable[i].eval()
parameters = parameters * self._masks[self.v_trainable[i].name]
self.v_trainable[i].load(parameters)
def _eval_net(self, description=''):
"""
This function evaluates the performance of the current network in the given session.
Args:
sess: Tensorflow session
description: string describing the network for loggin.
"""
accuracy = self.net.accuracy.eval(feed_dict={self.net.x : self.dataset.test.images,
self.net.y_ : self.dataset.test.labels,
self.net.phase: 0.0,
self.net.keep_prob : 1.0})
print description, ' Network accuracy : {}'.format(accuracy)
return accuracy
def _train_model(self, sess, max_iters):
"""
Function to train the model.
Args:
sess: tensorflow session.
max_iters: maximum number of batches.
"""
accuracy = self.net.accuracy
grads = self.net.gradients
keys = [v.name for v in self.v_trainable]
for i in range(len(grads)):
grads[i] = grads[i]*self._masks[keys[i]]
grads_and_vars = list(zip(grads, self.v_trainable))
train_step = self.optimizer.apply_gradients(grads_and_vars=grads_and_vars,global_step=self.net.global_step)
if cfg.DEBUG_ALL or cfg.DEBUG_TRAINING:
merged = tf.summary.merge_all()
last_snapshot_iter = -1
for iter in range(max_iters):
batch = self.dataset.train.next_batch(cfg.TRAIN.TRAIN_BATCH_SIZE)
if cfg.DEBUG_TRAINING or cfg.DEBUG_ALL:
if (iter+1) % 1000 == 0:
train_accuracy = accuracy.eval(feed_dict={self.net.x : batch[0],
self.net.y_ : batch[1],
self.net.phase: 0.0,
self.net.keep_prob : 1.0})
print("Step : %d, training accuracy : %g"%(iter, train_accuracy))
feed_dict = {self.net.x : batch[0], self.net.y_ : batch[1], self.net.phase : 1.0,self.net.keep_prob : 0.5}
if cfg.DEBUG_ALL or cfg.WRITE_TO_SUMMARY:
summary, _ = sess.run([merged, train_step], feed_dict=feed_dict)
self.writer.add_summary(summary, iter)
else:
_ = sess.run([train_step], feed_dict=feed_dict)
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self._snapshot(sess, iter, 2)
if last_snapshot_iter != iter:
self._snapshot(sess, iter, 2)
def _train_model_v1(self, sess, max_iters):
"""
Function to train the model.
Args:
sess: tensorflow session.
max_iters: maximum number of batches.
"""
accuracy = self.net.accuracy
grads = self.net.gradients
keys = [v.name for v in self.v_trainable]
for i in range(len(grads)):
grads[i] = grads[i]*self._masks[keys[i]]
grads_and_vars = list(zip(grads, self.v_trainable))
train_step = self.optimizer.apply_gradients(grads_and_vars=grads_and_vars,global_step=self.net.global_step)
if cfg.DEBUG_ALL or cfg.DEBUG_TRAINING:
merged = tf.summary.merge_all()
last_snapshot_iter = -1
acc = []
iterations = []
for iter in range(max_iters):
batch = self.dataset.train.next_batch(cfg.TRAIN.TRAIN_BATCH_SIZE)
if cfg.DEBUG_TRAINING or cfg.DEBUG_ALL:
if (iter+1) % 50 == 0:
train_accuracy = accuracy.eval(feed_dict={self.net.x : self.dataset.test.images,
self.net.y_ : self.dataset.test.labels,
self.net.phase: 0.0,
self.net.keep_prob : 1.0})
print("Step : %d, training accuracy : %g"%(iter, train_accuracy))
acc.append(train_accuracy)
iterations.append(iter)
feed_dict = {self.net.x : batch[0], self.net.y_ : batch[1], self.net.phase : 1.0,self.net.keep_prob : 0.5}
if cfg.DEBUG_ALL or cfg.WRITE_TO_SUMMARY:
summary, _ = sess.run([merged, train_step], feed_dict=feed_dict)
self.writer.add_summary(summary, iter)
else:
_ = sess.run([train_step], feed_dict=feed_dict)
if (iter+1) % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = iter
self._snapshot(sess, iter, 2)
if last_snapshot_iter != iter:
self._snapshot(sess, iter, 2)
return acc, iterations
def _add_variation_to_baseline(self, sess, stddevVar, num_levels=32, writeToPickle=True):
self._init_baseline_network(sess)
_ = self._eval_net('Baseline')
"""
addDefects(self.v_trainable, stddevVar, num_levels, cfg.RRAM.SA0, cfg.RRAM.SA1)
acc = self._eval_net('Write Variation')
"""
#"""
if not os.path.exists(self.pretrained_variation_pkl):
addDefects(self.v_trainable, stddevVar , num_levels, cfg.RRAM.SA0, cfg.RRAM.SA1)
if writeToPickle:
self._saveTrainedModel(sess, self.pretrained_variation_pkl)
else:
self._load_model(self.pretrained_variation_pkl, self.v_trainable)
acc = self._eval_net('Write Variation')
#"""
return acc
def _checkNVerifyTopN(self, sess, topN, stddev, netName, datasetName):
#self._init_baseline_network(sess)
sess.run(tf.global_variables_initializer())
path = os.path.join(self.output_dir, self.net.name, 'baseline', \
self.net.name+'_'+self.dataset_name +'_quatized.pkl')
self._load_model(path, self.v_trainable)
_ = self._eval_net('Quantized')
print '_checkNVerifyTopN | stddev', str(stddev)
readVerifyTopN(self.v_trainable, topN, stddev, netName, datasetName)
acc = self._eval_net('Top N Read-Verified')
return acc
def _retrain_baseline(self, sess, max_iters, stddevVar, percentRetrainable):
_ = self._add_variation_to_baseline(sess, stddevVar, 32, True)
self._masks = self._create_mask(percentRetrainable)
self._train_model(sess, max_iters)
return self._eval_net('Retrained')
def _iters_vs_accuracy(self, sess, max_iters, stddevVar, percentRetrainable):
_ = self._add_variation_to_baseline(sess, stddevVar, 32, True)
self._masks = self._create_mask(percentRetrainable)
acc, iters = self._train_model_v1(sess, max_iters)
_ = self._eval_net('Retrained')
return acc, iters
def plot_images(images, cls_true, cls_pred=None, smooth=True):
assert len(images) == len(cls_true) == 9
fig, axes = plt.subplots(3, 3)
if cls_pred is None:
hspace = 0.3
else:
hspace = 0.6
fig.subplots_adjust(hspace=hspace, wspace=0.3)
for i, ax in enumerate(axes.flat):
if smooth:
interpolation = 'spline16'
else:
interpolation = 'nearest'
ax.imshow(images[i, :, :, :], interpolation=interpolation)
cls_true_name = 'XXX'
if cls_pred is None:
xlabel = "True: {0}".format(cls_true_name)
else:
cls_pred_name = class_names[cls_pred[i]]
xlabel = "True: {0}\nPred: {1}".format(cls_true_name, cls_pred_name)
ax.set_xlabel(xlabel)
ax.set_xticks([])
ax.set_yticks([])
plt.show()
def verifyTopN(network, dataset, output_dir, stddevVar, dataset_name, num_levels, topN):
saver = tf.train.Saver(max_to_keep=100)
sess = tf.InteractiveSession()
sw = SolverWrapper(sess, saver, dataset, network, output_dir, dataset_name, stddevVar)
print 'Solving ... '
print 'VerifyTopN | stddev', str(stddevVar)
acc = sw._checkNVerifyTopN(sess, topN, stddevVar, network.name, dataset_name)
if cfg.WRITE_TO_SUMMARY or cfg.DEBUG_ALL:
graphWriter = tf.summary.FileWriter(sw.summaryDir, sess.graph)
print 'Done Training'
return acc
def train_net_v1(network, dataset, output_dir, iters, stddevVar, percentRetrainable, dataset_name, num_levels):
saver = tf.train.Saver(max_to_keep=100)
sess = tf.InteractiveSession()
sw = SolverWrapper(sess, saver, dataset, network, output_dir, dataset_name, stddevVar)
print 'Solving ... '
acc, iters = sw._iters_vs_accuracy(sess, iters, stddevVar, percentRetrainable)
if cfg.WRITE_TO_SUMMARY or cfg.DEBUG_ALL:
graphWriter = tf.summary.FileWriter(sw.summaryDir, sess.graph)
print 'Done Training'
return acc, iters
def train_net(network, dataset, output_dir, iters, stddevVar, percentRetrainable, dataset_name, num_levels):
"""
Trains a network for a given dataset.
Args:
network: tensorflow network to train
dataset: dataset for training and testing
output_dir: directory to store checkpoints
iters: maximum iterations to run the training process
baselineWeights: path to the trained weights of the original network
stddevVar: standard deviation of variation to be introduced in the weights as device parameters.
percentRetrainable: percentage of parameters to retrain (Number of parameters in the SRAM array).
"""
saver = tf.train.Saver(max_to_keep=100)
sess = tf.InteractiveSession()
sw = SolverWrapper(sess, saver, dataset, network, output_dir, dataset_name, stddevVar)
print 'Solving ... '
#acc = sw._find_or_train_baseline(sess, iters)
#acc = sw._add_variation_to_baseline(sess, stddevVar, num_levels, True)
#acc = sw._test_new(sess, iters, stddevVar, percentRetrainable)
acc = sw._retrain_baseline(sess, iters, stddevVar, percentRetrainable)
#acc, iters = sw._iters_vs_accuracy(sess, iters, stddevVar, percentRetrainable)
if cfg.WRITE_TO_SUMMARY or cfg.DEBUG_ALL:
graphWriter = tf.summary.FileWriter(sw.summaryDir, sess.graph)
print 'Done Training'
return acc
|
[
"amohant4@asu.edu"
] |
amohant4@asu.edu
|
3a5e1a7ef80e8a89992fa1345dcb1ea176fe38d4
|
a34f36f2f08791d353b63e786fa99fe7e7c65d9f
|
/contest2.py
|
827d8394e48b289789f5150a96d809ca1e99d7fb
|
[] |
no_license
|
vijay9908/code_forces
|
5f758c4417d448fb2637dd4b896dfc59409f8b97
|
7d58e52aabea612dfed52dd3534e38563bf78633
|
refs/heads/master
| 2021-06-25T11:54:55.179108
| 2020-11-19T15:24:08
| 2020-11-19T15:24:08
| 173,603,181
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
n = int(input())
a = list(map(int,input().split()))
uniques = sorted(set(a))
if len(uniques) == 1:
print("0")
elif len(uniques) == 2:
print((uniques[1] - uniques[0]))
elif(len(uniques)== 3):
if((uniques[1]-uniques[0])==(uniques[2]-uniques[1])):
print(uniques[1]-uniques[0])
else:
print("-1")
|
[
"vijaytanmay055@gmail.com"
] |
vijaytanmay055@gmail.com
|
63c2196afa1515e12620f4d54e719a0bdb60cc3c
|
e6c65e2e354336a4bea5b6a4ccbccd3682915fe2
|
/out-bin/py/google/fhir/models/model_test.runfiles/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/contrib/boosted_trees/proto/split_info_pb2.py
|
8db7c5a5c0b0998a4b09a264af429e20ea8570c3
|
[
"Apache-2.0"
] |
permissive
|
rasalt/fhir-datalab
|
c30ab773d84983dd04a37e9d0ddec8bf2824b8a4
|
3e329fc8b4226d3e3a4a7c23c306a86e7a9ea0de
|
refs/heads/master
| 2021-10-09T05:51:04.593416
| 2018-12-21T18:11:03
| 2018-12-22T05:38:32
| 162,744,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
/home/rkharwar/.cache/bazel/_bazel_rkharwar/0ddaa3627472ad9d1367a008236ce2f5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/contrib/boosted_trees/proto/split_info_pb2.py
|
[
"ruchika.kharwar@gmail.com"
] |
ruchika.kharwar@gmail.com
|
1387c0e3357b7e14538121bd9b9b1a0f6218b211
|
b52d75d6880cee281a024a3d6d83dc94ccd4985c
|
/pyconfort/pyconfort/cheshire_lookup.py
|
36cb10164145d42b7dfc92d59d467a2cfca9512d
|
[
"MIT"
] |
permissive
|
SabariKumar/KimLabCodingCamp2021
|
536fa1b5d1863986386c5aa584f4279782ebf945
|
7c8a7086997c8f99d0509b38a120961050f47dbb
|
refs/heads/main
| 2023-06-21T22:36:57.116822
| 2021-07-08T04:34:43
| 2021-07-08T04:34:43
| 367,467,756
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,125
|
py
|
#!/usr/bin/env python
from __future__ import print_function
import datetime
import sys
import unicodedata
import pandas as pd
## convert HTML table to Pandas dataframe
def parse_html_table(table):
n_rows=0; n_columns = 0; column_names = []
## the tables contain different numbers of columns; get their names
rows = table.find_all('tr')
tds = rows[1].find_all('td')
# avoid having two column names the same -
for i in range(0,len(tds)):
if i == 2: column_names.append('scale_'+tds[i].get_text().split('\n')[0])
elif i == 3 and tds[i].get_text().split('\n')[0].upper() == '13C': column_names.append('scale_'+tds[i].get_text().split('\n')[0])
else: column_names.append(tds[i].get_text().split('\n')[0])
n_columns = len(column_names)
# Determine the number of rows in the table
i = 1
for row in rows[2:]:
td_tags = row.find_all('td')
if len(td_tags) == n_columns:
n_rows+=1
columns = column_names
df = pd.DataFrame(columns = columns,
index= list(range(0,n_rows)))
row_marker = 0
for row in rows[2:]:
column_marker = 0
columns = row.find_all('td')
for column in columns:
#print row_marker, column_marker, ' '.join(column.get_text().split())
df.iat[row_marker,column_marker] = ' '.join(column.get_text().split())
column_marker += 1
if len(columns) > 0:
row_marker += 1
return df
## Parse from local offline version of the CHESHIRE scaling factors html or directly from the web
def cheshire(online, nucleus, opt_method, opt_basis, opt_solv, nmr_method, nmr_basis, nmr_solv, nmr_aos,log):
try:
from bs4 import BeautifulSoup
except (ModuleNotFoundError,AttributeError):
log.write('\nThe bs4 module is not installed correctly - CHESHIRE search is not available')
sys.exit()
## current time for printing
now = datetime.datetime.now()
if online == False:
log.write(" READING FROM LOCAL VERSION OF CHESHIRE {0}".format( now.strftime("%Y-%m-%d %H:%M")))
html = BeautifulSoup(open('./scaling_factors.html'), "lxml")
else:
import requests
log.write(" READING FROM http://cheshirenmr.info/ScalingFactors.htm {0}".format(now.strftime("%Y-%m-%d %H:%M")))
url = 'http://cheshirenmr.info/ScalingFactors.htm'
response = requests.get(url)
html = BeautifulSoup(response.text, "lxml")
calc_opt = opt_method.upper()+'/'+opt_basis
calc_nmr = nmr_method.upper()+'/'+nmr_basis
if nmr_solv == None:
log.write(" ", nmr_aos.upper()+'-'+calc_nmr+'//'+calc_opt)
else:
if opt_solv == None: log.write(" ", nmr_solv[0].upper()+'('+nmr_solv[1]+')-'+nmr_aos.upper()+'-'+calc_nmr+'//'+calc_opt)
else: log.write(" ", nmr_solv[0].upper()+'('+nmr_solv[1]+')-'+nmr_aos.upper()+'-'+calc_nmr+'//'+opt_solv[0].upper()+'('+opt_solv[1]+')-'+calc_opt)
for table in html.find_all('table'):
id = table['id']
scaling_table = parse_html_table(table)
# solvent details for the CHESHIRE database
# manually entered would be better to parse from HTML - will add in due course
if id == 'table1a': scrf = ['pcm', 'acetone']
elif id == 'table1b': scrf = ['smd', 'chloroform']
elif id == 'table1c': scrf = ['cpcm', 'chloroform'] #UAKS radii, nosymmcav
elif id == 'table1d': scrf = ['smd', 'chloroform']
elif id == 'table2': scrf = ['pcm', 'chloroform']
elif id == 'table3a': scrf = ['pcm', 'toluene']
elif id == 'table5-acetone': scrf = ['pcm', 'acetone']
elif id == 'table5-acetonitrile': scrf = ['pcm', 'acetonitrile']
elif id == 'table5-benzene': scrf = ['pcm', 'benzene']
elif id == 'table5-chloroform': scrf = ['pcm', 'chloroform']
elif id == 'table5-dichloromethane': scrf = ['pcm', 'dichloromethane']
elif id == 'table5-dimethylsulfoxide': scrf = ['pcm', 'dimethylsulfoxide']
elif id == 'table5-methanol': scrf = ['pcm', 'methanol']
elif id == 'table5-tetrahydrofuran': scrf = ['pcm', 'tetrahydrofuran']
elif id == 'table5-toluene': scrf = ['pcm', 'toluene']
elif id == 'table5-water': scrf = ['pcm', 'water']
elif id == 'table7': scrf = ['smd', 'chloroform']
else: scrf = None
# Look for a match between calculation and database (case insensitive)
# Returns the first match and then breaks
for index, row in scaling_table.iterrows():
db_nmr_solv = None; db_opt_solv = None; db_nmr_aos = 'GIAO'
try:
db_nmr = row['NMR'].lower().split()[0].split("/")
if row['NMR'].lower().find('scrf') >-1: db_nmr_solv = scrf
if row['NMR'].lower().find('cgst') >-1: db_nmr_aos = 'CGST'
try: [db_nmr_method, db_nmr_basis] = db_nmr
except ValueError: pass
if db_nmr_method[0] == '#': db_nmr_method = db_nmr_method[1:]
db_opt = row['Geometry'].lower().split()[0].split("/")
if row['Geometry'].lower().find('scrf') >-1: db_opt_solv = scrf
try: [db_opt_method, db_opt_basis] = db_opt
except ValueError: pass
if db_opt_method[0] == '#': db_opt_method = db_opt_method[1:]
if db_nmr_method.lower() == nmr_method.lower() and db_nmr_basis.lower() == nmr_basis.lower() and db_nmr_aos.lower() == nmr_aos.lower():
if db_opt_method.lower() == opt_method.lower() and db_opt_basis.lower() == opt_basis.lower():
#print "matched levels of theory"
#print db_nmr_solv, nmr_solv, db_opt_solv, opt_solv
if db_nmr_solv == nmr_solv and db_opt_solv == opt_solv:
log.write(" --- MATCH ---", id.upper()); return row['scale_'+nucleus]
except: pass
|
[
"sabarinkumar@gmail.com"
] |
sabarinkumar@gmail.com
|
a11f6cff83681db94d4d84752d0362fdd9555b99
|
aafff2ac6e153ac2b35a351fc9292d5ec73e9267
|
/network/multi_net.py
|
24e029360c482bd7b653385a8432930541a19514
|
[] |
no_license
|
ssungjun/Comparative-Analysis-of-Relative-Camera-Pose-Estimation-based-on-Deep-Learning
|
fafe4b6101c6f44a10babce011d0eee6768095dc
|
5f34f13632801c0c83cd5a08ae22617f17899137
|
refs/heads/master
| 2020-11-25T04:49:25.745692
| 2019-12-17T02:22:51
| 2019-12-17T02:22:51
| 228,509,036
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,668
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from network.backbone.resnet_base import Bottleneck_elu
from network.backbone.resnet_base import *
from util import *
class ResNetHead(nn.Module):
def __init__(self):
super(ResNetHead, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7,
stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.elu = nn.ELU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.elu(x)
x = self.maxpool(x)
return x
class ResModule(nn.Module):
def __init__(self, inplanes, planes, blocks_n, stride, layer_idx, block=Bottleneck_elu):
super(ResModule, self).__init__()
self.module_name = 'layer'+str(layer_idx)
self.inplanes = inplanes
self.planes = planes
self.resModule = nn.ModuleDict({
self.module_name: self._make_layer(
block, self.planes, blocks_n, stride)
})
# self.__dict__.update(
# {self.module_name: self._make_layer(
# block, self.planes, blocks_n, stride)
# }
# )
# self.layer = self._make_layer(
# block, self.planes, blocks_n, stride)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
# x = self.__dict__[self.module_name](x)
# x = vars(self)[self.module_name](x)
# x = self.layer(x)
x = self.resModule[self.module_name](x)
return x
class MultiNet(nn.Module):
_inplanes = 64
def __init__(self, pose_type='quaternion'): # fc1_shape
super(MultiNet, self).__init__()
self.feature_resnet = resnet50(pretrained=True)
self.feature_down = nn.Sequential(
nn.Linear(2048, 512),
nn.LeakyReLU(),
nn.Dropout(0.5))
# phase1
self.regressor1 = nn.Sequential(
nn.Linear(1024, 1024),
nn.LeakyReLU(),
nn.Dropout(0.5),
nn.Linear(1024, 256),
nn.LeakyReLU(),
nn.Dropout(0.5),
nn.Linear(256, 7))
# phase2
self.lstm = nn.LSTM(input_size=1024, hidden_size=512, num_layers=3, batch_first=True, dropout=0.5, bidirectional=True)
self.regressor2 = nn.Sequential(
nn.Linear(1024, 1024),
nn.LeakyReLU(),
nn.Dropout(0.5),
nn.Linear(1024, 256),
nn.LeakyReLU(),
nn.Dropout(0.5),
nn.Linear(256, 7))
def forward(self, image1, image2, image3, image4, image5, phase=1):
self.lstm.flatten_parameters()
out1 = self.feature_down(self.feature_resnet(image1))
out2 = self.feature_down(self.feature_resnet(image2))
out3 = self.feature_down(self.feature_resnet(image3))
out4 = self.feature_down(self.feature_resnet(image4))
out5 = self.feature_down(self.feature_resnet(image5))
lstm_in1 = torch.cat([out1, out2], dim=1)
lstm_in2 = torch.cat([out2, out3], dim=1)
lstm_in3 = torch.cat([out3, out4], dim=1)
lstm_in4 = torch.cat([out4, out5], dim=1)
outputs = []
lstm_input_list = [lstm_in1, lstm_in2, lstm_in3, lstm_in4]
# phase1
if phase == 1:
for i in range(len(lstm_input_list)):
out = self.regressor1(lstm_input_list[i])
outputs.append(out)
else:
# phase2
lstm_in = torch.stack(lstm_input_list, 1)
lstm_out = self.lstm(lstm_in)
for i in range(lstm_out[0].size()[1]):
out = self.regressor2(lstm_out[0][:, i, :])
outputs.append(out)
return outputs
|
[
"noreply@github.com"
] |
ssungjun.noreply@github.com
|
20f492a58694e5bed3221a4b86be5554bd64b1df
|
c7a397bd377fa8f310a715ea35fb2e0b54001e88
|
/scripts/build_perfect_run.py
|
7f393cb7fcae1e631e7c5dff9c8e4b759e7b5329
|
[] |
no_license
|
felipemoraes/dynamic
|
bf1f928b7c396eaff972efc92b2d47407bb9b3a8
|
a7c797bdc158e006575ee82d3d269099c5dea611
|
refs/heads/master
| 2021-03-27T14:23:33.237885
| 2018-11-14T15:17:42
| 2018-11-14T15:17:42
| 60,723,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,108
|
py
|
# -*- encoding: utf-8 -*-
from collections import OrderedDict
import sys
import math
MAX_JUDGMENT = 4
MAX_HEIGHT = 5
beta = 1
gamma = 0.5
# $topic $docno $subtopic $judgement
qrels = {}
#$topic $subtopic $area
subtopic_weight = {}
# $topic $subtopic $gainHeights
current_gain_height = {}
# $topic $subtopic $occurrences
subtopic_cover = {}
#########################################
#### Read qrels file(groundtruth), check format, and sort
def prepare_qrels(qrelsfile):
global qrels, subtopic_weight, current_gain_height, subtopic_cover
# $topic $docno $subtopic $judgement
qrels = {}
#$topic $subtopic $area
subtopic_weight = {}
# $topic $subtopic $gainHeights
current_gain_height = {}
# $topic $subtopic $occurrences
subtopic_cover = {}
tmp_qrels = {}
count = 0
for line in open(qrelsfile):
topic, subtopic, docno, passage, judgment = line.strip().split('\t')
if int(judgment) > 0 :
judgment = int(judgment)
else :
judgment = 1
if not tmp_qrels.has_key(topic):
tmp_qrels[topic] = {}
tmp_qrels[topic][docno] = {}
tmp_qrels[topic][docno][subtopic] = {}
if not tmp_qrels[topic].has_key(docno):
tmp_qrels[topic][docno] = {}
tmp_qrels[topic][docno][subtopic] = {}
if not tmp_qrels[topic][docno].has_key(subtopic):
tmp_qrels[topic][docno][subtopic] = {}
tmp_qrels[topic][docno][subtopic][passage] = judgment
for topic, docs in tmp_qrels.iteritems():
for docno, subtopics in docs.iteritems():
for subtopic, rels in subtopics.iteritems():
rels = sorted(rels.values(),reverse=True)
log2 = math.log(2)
rel = sum([ rel/(math.log(rank+2)/log2) for rank, rel in enumerate(rels)])
if not qrels.has_key(topic):
qrels[topic] = {}
subtopic_weight[topic] = {}
current_gain_height[topic] = {}
subtopic_cover[topic] = {}
qrels[topic][docno] = {}
qrels[topic][docno][subtopic] = {}
if not qrels[topic].has_key(docno):
qrels[topic][docno] = {}
qrels[topic][docno][subtopic] = {}
if not qrels[topic][docno].has_key(subtopic):
qrels[topic][docno][subtopic] = {}
qrels[topic][docno][subtopic] = rel
subtopic_weight[topic][subtopic] = 1
current_gain_height[topic][subtopic] = 0
subtopic_cover[topic][subtopic] = 0
#### Normalize subtopic weight
for topic, subtopics in subtopic_weight.iteritems():
max_weight = get_max_weight(topic)
for subtopic in subtopics:
subtopic_weight[topic][subtopic] /= float(max_weight)
def get_doc_gain(topic,docno):
gain = 0
for subtopic, area in subtopic_weight[topic].iteritems():
nrel = subtopic_cover[topic][subtopic]
if qrels[topic].has_key(docno):
if not qrels[topic][docno].has_key(subtopic):
continue
else:
continue
hight_keepfilling = get_hight_keepfilling(topic, docno, subtopic, nrel+1)
area = get_area(topic,subtopic)
gain += area*hight_keepfilling
return gain
def update_doc_gain(topic,docno):
gain = 0
for subtopic, area in subtopic_weight[topic].iteritems():
nrel = subtopic_cover[topic][subtopic]
if qrels[topic].has_key(docno):
if not qrels[topic][docno].has_key(subtopic):
continue
else:
continue
hight_keepfilling = update_hight_keepfilling(topic, docno, subtopic, nrel+1)
area = get_area(topic,subtopic)
gain += area*hight_keepfilling
subtopic_cover[topic][subtopic] +=1
return gain
def get_hight_keepfilling(topic, docno, subtopic, nrel):
rel = 0
if qrels[topic].has_key(docno):
if qrels[topic][docno].has_key(subtopic):
rel = qrels[topic][docno][subtopic]
if rel == 0:
return 0
current_gain = current_gain_height[topic][subtopic]
gain = get_hight_discount(nrel)*rel
return gain
def update_hight_keepfilling(topic, docno, subtopic, nrel):
rel = 0
if qrels[topic].has_key(docno):
if qrels[topic][docno].has_key(subtopic):
rel = qrels[topic][docno][subtopic]
if rel == 0:
return 0
current_gain = current_gain_height[topic][subtopic]
gain = get_hight_discount(nrel)*rel
if current_gain + gain > MAX_HEIGHT:
gain = MAX_HEIGHT - current_gain
current_gain_height[topic][subtopic] += gain
return gain
def get_area(topic, subtopic):
if subtopic_weight[topic].has_key(subtopic):
return subtopic_weight[topic][subtopic]
return 0
def get_hight_discount(nrels):
return gamma ** nrels
def get_max_weight(topic):
max_weight = sum([v for v in subtopic_weight[topic].values()])
return max_weight
def perfect_run(qrelsfile):
prepare_qrels(qrelsfile)
for topic, docs in qrels.iteritems():
candidate_docs = docs.keys()
best_docs = set()
i = 0
while len(best_docs) < len(candidate_docs):
best_doc = "-"
best_gain = -1
for docno in candidate_docs:
if docno in best_docs:
continue
gain = get_doc_gain(topic,docno)
if gain > best_gain:
best_doc = docno
best_gain = gain
update_doc_gain(topic,docno)
best_docs.add(best_doc)
print "%s\t%s\t%s\t%f\t1\t%s" % (topic, (i/5), best_doc, ((len(candidate_docs)-i)/float(len(candidate_docs))), "|".join(["%s:%f" % (subtopic, qrels[topic][best_doc][subtopic]) for subtopic in qrels[topic][best_doc]]))
i+=1
perfect_run(sys.argv[1])
|
[
"fe.moraesg@gmail.com"
] |
fe.moraesg@gmail.com
|
24d99fae3b1ddd8240944745bf0aacfc89d1c413
|
c88e0a47b1065c8f00dc30a85580cf4b1e3ee0e7
|
/dosyayaYazmak.py
|
51fe6f6e2ca4bda046cc83c4216f2b2b7f53434d
|
[] |
no_license
|
adalix/py2-book
|
09b0a09b1d762562534b999eca81a40cfbdc6cd8
|
256d920ba7ba1d5e8128c0f71479e51755b281cd
|
refs/heads/master
| 2021-01-10T14:50:09.638783
| 2016-03-10T13:49:15
| 2016-03-10T13:49:15
| 51,669,594
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 112
|
py
|
tg = open ("tg.txt","w")
tg.write("tolgahan faln faln")
tg.close()
tg = open("tg.txt","r")
print tg.read()
|
[
"gumustolgahan@gmail.com"
] |
gumustolgahan@gmail.com
|
c3948213eed1ef36d98427854f530df32182dc2e
|
9051a18b68f7c74fcb1356e573419cd0ab2e2b8d
|
/web/update_4.py
|
4d70b8314ccb5d307a8f0edcafef915e0f2d27f1
|
[] |
no_license
|
paulie367/paulie367.github.io
|
4dbc25faea6ccce16e1aec38fd943aae9f30bdc3
|
71ee318c978839cdfac603109d56559a60d95310
|
refs/heads/master
| 2023-08-03T03:02:19.752849
| 2021-09-27T08:41:07
| 2021-09-27T08:41:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,623
|
py
|
#!C:\Users\2054069\AppData\Local\Programs\Python\Python39\python.exe
print("Content-Type: text/html; charset=utf-8")
print("")
import cgi, os
files = os.listdir('data')
#print(files)
listStr = ''
for item in files:
listStr = listStr + '<li><a href="index_4_update.py?id={name}">{name}</a></li>'.format(name=item)
#print(listStr)
form = cgi.FieldStorage()
if 'id' in form:
pageID = form["id"].value
description = open('data/'+pageID, 'r').read()
else:
pageID = 'welcome'
description = 'hello, web'
print("hello world")
print("of ")
print(pageID)
import sys
import io #encoding utf-8 설정
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8') #encoding utf-8 설정
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8') #encoding utf-8 설정
print('''<!doctype html>
<html>
<head>
<title> WEB1 - welcome </title>
<meta charset="utf-8">
</head>
<body>
<h1><a href="index_4_update.py">WEB</a></h1>
<ol>
{listStr}
</ol>
<h2>{title}</h2>
<p>{desc}
</p>
<a href="create_4.py">create</a>
<form action="process_update_4.py" method="post">
<p><input type="hidden" name="pageID" value="{form_default_title}" ></p>
<p><input type="text" name="title" placeholder="title" value="{form_default_title}"></p>
<p><textarea rows="4" name="description" placeholder="description">{form_default_description}</textarea></p>
<p><input type="submit" value="submit" ></p>
</form>
<img src="engine.jpg" alt="My Image" width = 40%">
</body>
</html>
'''.format(title=pageID, desc=description, listStr=listStr, form_default_title=pageID, form_default_description=description))
|
[
"noreply@github.com"
] |
paulie367.noreply@github.com
|
f7df70c87749177fdb0473207659ba0ee49741c0
|
01c14348768543446220e4892ecb289aeb04af50
|
/palindrome.py
|
33d27dd5418e4f060fc2302abb691308143f08de
|
[] |
no_license
|
beyzakilickol/week1Wednesaday
|
c92c715516b173361c1e02efeaba4b7168d43de4
|
c4b66a2a83a857a8e60f6394225a29eef173ecb2
|
refs/heads/master
| 2020-03-29T03:56:30.764880
| 2018-09-19T20:30:35
| 2018-09-19T20:30:35
| 149,508,175
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
word = input('Enter the word: ')
arr = list(word)
second_word = []
for index in range(len(arr)-1, -1 , -1):
second_word.append(arr[index])
print(second_word)
reversed = ''.join(second_word)
print(reversed)
def is_palindrome():
if(word == reversed):
return True
else:
return False
print(is_palindrome())
#-----------------------------------second way-----------------------
#word = input('Enter the word: ')
#reversed = word[::-1]
#def is_palindrome():
# if(word == reversed):
# return True
# else:
# return False
#print(is_palindrome())
|
[
"43250495+beyzakilickol@users.noreply.github.com"
] |
43250495+beyzakilickol@users.noreply.github.com
|
92cedf3e8cefe043ac52a07aa1226af00f782812
|
390eae08be96c6f5ddc78434cfb0398cb0095f3b
|
/tf_runner.py
|
299af6dd0b028c145a824c376b96aa37949cfd85
|
[] |
no_license
|
BurakCinar07/RealTimePedestrianDetection
|
500f41f43780a5359047059bd37135817e1763db
|
34ed89d1b0b5e53efe5e9bc5e9cc08ef6876c4d2
|
refs/heads/master
| 2020-03-21T07:26:11.047926
| 2018-06-22T09:00:46
| 2018-06-22T09:00:46
| 126,314,520
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,327
|
py
|
import sys
import time
import tensorflow as tf
import nn_datasource as ds
from checkpoint_manager import CheckpointManager
IMAGE_WIDTH = 72
IMAGE_HEIGHT = 170
IMAGE_CHANNEL = 1
EPOCH_LENGTH = 300
BATCH_SIZE = 100
LEARNING_RATE = 0.001
RANDOM_SEED = 2
WEIGHT_COUNTER = 0
BIAS_COUNTER = 0
CONVOLUTION_COUNTER = 0
POOLING_COUNTER = 0
sess = None
def new_weights(shape):
global WEIGHT_COUNTER
weight = tf.Variable(tf.random_normal(
shape=shape, seed=RANDOM_SEED), name='w_' + str(WEIGHT_COUNTER))
WEIGHT_COUNTER += 1
return weight
def new_biases(length):
global BIAS_COUNTER
bias = tf.Variable(
tf.zeros(shape=[length]), name='b_' + str(BIAS_COUNTER))
BIAS_COUNTER += 1
return bias
def new_conv_layer(input, num_input_channels, filter_size, num_filters, pooling=2):
global CONVOLUTION_COUNTER
global POOLING_COUNTER
shape = [filter_size, filter_size, num_input_channels, num_filters]
weights = new_weights(shape=shape)
biases = new_biases(length=num_filters)
layer = tf.nn.conv2d(input=input, filter=weights,
strides=[1, 1, 1, 1], padding='SAME',
name='conv_' + str(CONVOLUTION_COUNTER))
CONVOLUTION_COUNTER += 1
layer = tf.add(layer, biases)
layer = tf.nn.relu(layer)
if pooling is not None and pooling > 1:
layer = tf.nn.max_pool(value=layer, ksize=[1, pooling, pooling, 1],
strides=[1, pooling, pooling, 1], padding='SAME',
name='pool_' + str(POOLING_COUNTER))
POOLING_COUNTER += 1
return layer, weights
def flatten_layer(layer):
layer_shape = layer.get_shape()
num_features = layer_shape[1:4].num_elements()
layer_flat = tf.reshape(layer, [-1, num_features])
return layer_flat, num_features
def new_fc_layer(input, num_inputs, num_outputs):
weights = new_weights(shape=[num_inputs, num_outputs])
biases = new_biases(length=num_outputs)
layer = tf.add(tf.matmul(input, weights), biases)
# layer = tf.nn.relu(layer)
return layer
tf.reset_default_graph()
TEST = True
NETWORK_NUMBER = 4
print(NETWORK_NUMBER)
input_placeholder = tf.placeholder(
tf.float32, shape=[None, IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNEL], name='input_placeholder')
output_placeholder = tf.placeholder(tf.float32, shape=[None, 2], name='output_placeholder')
layer_conv_1, weights_conv_1 = new_conv_layer(
input=input_placeholder,
num_input_channels=IMAGE_CHANNEL,
filter_size=5,
num_filters=64,
pooling=2
)
layer_conv_2, weights_conv_2 = new_conv_layer(
input=layer_conv_1,
num_input_channels=64,
filter_size=3,
num_filters=128,
pooling=2
)
layer_conv_3, weights_conv_3 = new_conv_layer(
input=layer_conv_2,
num_input_channels=128,
filter_size=3,
num_filters=128,
pooling=None
)
layer_conv_4, weights_conv_4 = new_conv_layer(
input=layer_conv_3,
num_input_channels=128,
filter_size=3,
num_filters=128,
pooling=None
)
layer_conv_5, weights_conv_5 = new_conv_layer(
input=layer_conv_4,
num_input_channels=128,
filter_size=3,
num_filters=256,
pooling=3
)
layer_flat, num_features = flatten_layer(layer_conv_5)
layer_fc_1 = new_fc_layer(
input=layer_flat, num_inputs=num_features, num_outputs=4096)
layer_fc_1 = tf.nn.softmax(layer_fc_1)
if TEST is not True:
layer_fc_1 = tf.nn.dropout(layer_fc_1, 0.5)
layer_fc_2 = new_fc_layer(
input=layer_fc_1, num_inputs=4096, num_outputs=4096)
layer_fc_2 = tf.nn.softmax(layer_fc_2)
if TEST is not True:
layer_fc_2 = tf.nn.dropout(layer_fc_2, 0.5)
layer_output = new_fc_layer(
input=layer_fc_2, num_inputs=4096, num_outputs=2)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=output_placeholder,
logits=layer_output)
cost = tf.reduce_mean(cross_entropy)
# cost = tf.losses.mean_squared_error(output_placeholder, layer_output)
optimizer = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cost)
predictions = tf.argmax(tf.nn.softmax(layer_output), dimension=1)
prediction_equalities = tf.equal(predictions, tf.argmax(output_placeholder, dimension=1))
accuracy = tf.reduce_mean(tf.cast(prediction_equalities, tf.float32))
def train_nn(number, input_placeholder, output_placeholder, accuracy, cost, optimizer):
global TEST
checkpoint_manager = CheckpointManager(number)
init_g = tf.global_variables_initializer()
init_l = tf.local_variables_initializer()
with tf.Session() as sess:
sess.run(init_g)
sess.run(init_l)
checkpoint_manager.on_training_start(
ds.DATASET_FOLDER, EPOCH_LENGTH, BATCH_SIZE,
LEARNING_RATE, "AdamOptimizer", True)
for batch_index, batch_images, batch_labels in ds.training_batch_generator(BATCH_SIZE, grayscale=True):
print("Starting batch {:3}".format(batch_index + 1))
for current_epoch in range(EPOCH_LENGTH):
feed = {
input_placeholder: batch_images,
output_placeholder: batch_labels
}
epoch_accuracy, epoch_cost, _ = sess.run(
[accuracy, cost, optimizer], feed_dict=feed)
print("Batch {:3}, Epoch {:3} -> Accuracy: {:3.1%}, Cost: {}".format(
batch_index + 1, current_epoch + 1, epoch_accuracy, epoch_cost))
checkpoint_manager.on_epoch_completed()
TEST = True
batch_accuracy_training, batch_cost_training = sess.run(
[accuracy, cost], feed_dict=feed)
TEST = False
print("Batch {} has been finished. Accuracy: {:3.1%}, Cost: {}".format(
batch_index + 1, batch_accuracy_training, batch_cost_training))
checkpoint_manager.on_batch_completed(
batch_cost_training, batch_accuracy_training)
checkpoint_manager.save_model(sess)
print("\nTraining finished at {}!".format(time.asctime()))
# overall_accuracy, overall_cost = \
# test_nn(number, input_placeholder, output_placeholder, accuracy, cost)
checkpoint_manager.on_training_completed(None)
def test_frame(frame):
prediction = tf.argmax(tf.nn.softmax(layer_output), 1)
print(prediction.eval(feed_dict={input_placeholder:[frame]}, session=sess))
def test_nn(number, input_placeholder, output_placeholder, accuracy, cost):
checkpoint_manager = CheckpointManager(number)
init_g = tf.global_variables_initializer()
init_l = tf.local_variables_initializer()
with tf.Session() as sess:
sess.run(init_g)
sess.run(init_l)
checkpoint_manager.restore_model(sess)
total_accuracy = 0
total_cost = 0
batches = None
for batch_index, test_images, test_labels in ds.test_batch_generator(100, grayscale=True):
feed = {
input_placeholder: test_images,
output_placeholder: test_labels
}
test_accuracy, test_cost = sess.run(
[accuracy, cost], feed_dict=feed)
print("Batch {:3}, Accuracy: {:3.1%}, Cost: {}" \
.format(batch_index, test_accuracy, test_cost))
total_accuracy += test_accuracy
total_cost += test_cost
batches = batch_index
overall_accuracy = total_accuracy / (batches + 1)
overall_cost = total_cost / (batches + 1)
print("Total test accuracy: {:5.1%}".format(overall_accuracy))
return overall_accuracy, overall_cost
def main():
pass
def init():
checkpoint_manager = CheckpointManager(NETWORK_NUMBER)
init_g = tf.global_variables_initializer()
init_l = tf.local_variables_initializer()
with tf.Session() as ses:
ses.run(init_g)
ses.run(init_l)
checkpoint_manager.restore_model(ses)
sess = ses
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
BurakCinar07.noreply@github.com
|
331a858abcf24bb56d1b2857284bce312868b9d3
|
f6e83bc298b24bfec278683341b2629388b22e6c
|
/scripts/generate_shutdown_order.py
|
a9a1168a051ec2bc8adb0ea3bf0f478399207a45
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
noaOrMlnx/sonic-utilities
|
8d8ee86a9c258b4a5f37af69359ce100c29ad99c
|
9881f3edaa136233456408190367a09e53386376
|
refs/heads/master
| 2022-08-17T23:15:57.577454
| 2022-05-18T21:49:32
| 2022-05-18T21:49:32
| 225,886,772
| 1
| 0
|
NOASSERTION
| 2022-07-19T08:49:40
| 2019-12-04T14:31:32
|
Python
|
UTF-8
|
Python
| false
| false
| 463
|
py
|
#!/usr/bin/python3
''' This script is used to generate initial warm/fast shutdown order file '''
from sonic_package_manager import PackageManager
def main():
manager = PackageManager.get_manager()
installed_packages = manager.get_installed_packages()
print('installed packages {}'.format(installed_packages))
manager.service_creator.generate_shutdown_sequence_files(installed_packages)
print('Done.')
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
noaOrMlnx.noreply@github.com
|
3f04a266de102089706de59d6fedfe00242dd410
|
0dd1f090cd4dbcda484dca07ae279f3673e31a14
|
/flask/restful_flask_apis/requ.py
|
80452d396cd3cbd4ab25cbd6124fa6a81cbcdcea
|
[] |
no_license
|
HaidiChen/WebAPIs
|
53fd668a3f5ce91a15166e519f8990f7c41799af
|
5cc108d774c3764c3f5146143af091409bbdf4c0
|
refs/heads/master
| 2020-05-02T17:49:59.267234
| 2019-07-15T00:28:02
| 2019-07-15T00:28:02
| 178,110,617
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 999
|
py
|
from flask import Flask
from flask_restful import Resource, Api, reqparse
app = Flask(__name__)
api = Api(app)
parser = reqparse.RequestParser(bundle_errors=True)
parser.add_argument('rate', type=int, help='Rate to change for this Resource')
# argument is required
parser.add_argument('name', required=True,
action='append', help="Name cannot be blank!")
parser.add_argument('foo', type=int, required=True, choices=(1, 2),
help='Bad choice: {error_msg}')
# change the name of argument when it is parsed
parser.add_argument('color', dest='public_color')
class Price(Resource):
def post(self):
args = parser.parse_args()
rate = args['rate']
names = args['name']
pc = args['public_color']
if rate:
rate = str(rate * 100)
return {'price': rate, 'names': names}
return {'price': 'nothing', 'names': names, 'color': pc}
api.add_resource(Price, '/price')
if __name__ == "__main__":
app.run(debug=True)
|
[
"chenhaidi43@163.com"
] |
chenhaidi43@163.com
|
86fed817850caaf05d4b6eafcbc82e9e4f727b43
|
0ca5780b8a121b90e2191d7e394e35f49ab68828
|
/controllers/player.py
|
430bafb76594668e60744516af0a07a1a2086e27
|
[
"MIT"
] |
permissive
|
Ghloin/tweeria
|
805091a40a2625f4983b960ccd477af6ffb1c1ba
|
5f7cf917a6e08f15cd914c11823dbd81c11b95a1
|
refs/heads/master
| 2021-01-21T05:59:24.279175
| 2015-04-11T23:49:09
| 2015-04-11T23:49:09
| 33,859,414
| 1
| 0
| null | 2015-04-13T09:22:40
| 2015-04-13T09:22:39
| null |
UTF-8
|
Python
| false
| false
| 47,767
|
py
|
# -*- coding: UTF-8 -*-
import basic
import tweepy
import re
import json
import math
from sets import Set
from time import time, localtime
from guild import guildsController
from random import randint, sample
import memcache_controller
from functions import getMessages, prettyItemBonus, getRelativeDate, getDisplayPages
from misc import miscController
import cherrypy
class playerController(basic.defaultController):
DIR = './players/'
RE_ITEMS_UIDS = re.compile('^(\d)\#(.*)')
query_result = False
def __init__(self):
basic.defaultController.__init__(self)
self._getStaticAchvs()
self.cache = memcache_controller.cacheController()
@basic.printpage
def printPage(self, page, params):
return {
'top': self.printTopList,
'registration': self.redirectToTwitter,
'new': self.printCreatingPlayerPage,
'spellbook': self.printSpellbook,
'inv': self.printInviteCenter,
'authors': self.printTopAuthors,
'settings': self.printSettings,
'__default__': {
'method': self.printPlayerPage,
'params': {'username': page}
}
}
@basic.methods
def methods(self, params={}):
return {
'type_of_form': {
'add_user': self.finishCreatingNewPlayer,
'equip_item': self.equipItem,
'sell_item': self.sellItem,
'change_title': self.changeSettings,
'change_player_settings': self.changeSettings,
'change_pvp': self.changeSettings,
'change_artwork': self.changeSettings,
'move_spell_to_book': self.setSpellActive,
'move_spell_from_book': self.setSpellInactive,
'change_post_setting': self.changePostToTwitter,
'send_mention_invite': self.sendMentionInvite,
'get_friends': self.getFriends,
'reset_hero': self.resetHero
}
}
# --------------------------------------------------------------------------------------------------
# Misc
def _getStaticAchvs(self):
self.static = self.model.players.getAchvStaticForPrint()
def isPlayerAlreadyRegistered(self, user_id):
player = self.model.players.getPlayer(user_id)
return player
def authorizePlayer(self, player, to_invite_page=False):
self.sbuilder.createSession(int(player['user_id']))
if to_invite_page:
return self.sbuilder.redirect(self.core.HOST + 'inv', 'Redirecting ... ')
else:
backlink = self.sbuilder.getOneCookie('login_back_url')
if backlink:
return self.sbuilder.redirect(self.core.HOST + backlink, 'Redirecting ... ')
else:
return self.sbuilder.redirect(self.core.HOST + player['name'], 'Redirecting to your profile')
def redirectToTwitter(self, fields=None, params=None):
if 'backlink' in params:
self.sbuilder.setCookie({'login_back_url': params['backlink']}, 30)
self.sbuilder.setCookie({'just_login': True}, 30)
# реферальная кука
username = False
if len(params) > 1:
for param in params:
if not param in ['__page__', '__query__', 'backlink', 'guild']:
self.sbuilder.setCookie({'referal_name': param}, 300)
username = param
if 'guild' in params and username:
info = self.model.players.getPlayerRawByName(username, {'_guild_name': 1})
if info and info['_guild_name']:
self.sbuilder.setCookie({'guild_invite': info['_guild_name']}, 300)
break
auth = tweepy.OAuthHandler(self.core.p_key, self.core.p_secret)
url = auth.get_authorization_url(True)
return self.sbuilder.redirect(url)
def getPlayersGuild(self, user_id):
return self.model.guilds.getPlayersGuild(user_id)
# --------------------------------------------------------------------------------------------------
# Page methods
def sendMentionInvite(self, params):
if self.cur_player and 'name' in params and params['name']:
if params['name'][0] == '@':
params['name'] = params['name'][1:]
text = '@' + params['name'] + ' join my journey in Tweeria http://tweeria.com/invite?' + self.cur_player[
'login_name'] + ' #rpg'
result = self.model.players.postMentionInvite(self.cur_player['login_id'], text)
else:
result = False
cherrypy.response.headers['Content-Type'] = "application/json"
return json.dumps({"invited": result})
# -------------
def equipItem(self, params):
if 'uid' in params:
uid = params['uid']
if 'old_id' in params:
old_id = params['old_id']
else:
old_id = '0'
returnHash = {
"equipted": self.model.items.equipItem(
uid, self.cur_player['login_id'],
self.cur_player['login_class'],
old_id,
self.cur_player['login_lvl']
),
"stats": self.model.players.recalculateStats(self.cur_player['login_id'])
}
else:
returnHash = {'equipted': False, 'stats': {}}
cherrypy.response.headers['Content-Type'] = "application/json"
return json.dumps(returnHash)
def sellItem(self, params):
rules = {
'uid': {'not_null': 1},
}
returnHash = {'sold': False}
status = self.checkParams(params, rules)
if status['status']:
created_by_player = int(params['created_by_player']) == 1
cost = self.model.items.sellItem(self.cur_player['login_id'], params['uid'], to_pool=created_by_player)
if created_by_player:
cost = int(float(cost) / 2)
returnHash = {
"sold": True,
"goldgained": cost,
"stats": self.model.players.recalculateStats(self.cur_player['login_id'])
}
cherrypy.response.headers['Content-Type'] = "application/json"
return json.dumps(returnHash)
def changePostToTwitter(self, param):
checked = 'post_to_twitter' in param and param['post_to_twitter'] == '1'
self.model.players.updatePlayerData(self.cur_player['login_id'], {'post_to_twitter': checked})
self.httpRedirect(param)
def changeSettings(self, param):
# метод для смены артворка/титула
def changeThings(type_name, param_name, param, field_name):
availiable_things = self.mongo.getu('players',
{'_id': self.cur_player['login_id'], type_name: {'$exists': 1}},
{'_id': 1, type_name: 1})
if availiable_things:
things = []
for thing in availiable_things[0][type_name]:
if thing[field_name] == int(param[param_name]):
thing.update({'current': True})
else:
thing.update({'current': False})
things.append(thing)
self.mongo.update('players', {'_id': self.cur_player['login_id']}, {type_name: things})
if 'pvp_mode' in param:
pvp = int(param['pvp_mode'])
if not pvp in [0, 1]:
pvp = 0
param.update({'success': True})
self.mongo.update('players', {'user_id': self.cur_player['login_user_id']}, {'pvp': pvp}, True)
if 'change_title' in param:
changeThings('titles', 'change_title', param, 'item_UID')
if 'change_artwork' in param:
self.model.misc.changePlayerArtworks(self.cur_player['login_id'], param['change_artwork'])
self.sbuilder.httpRedirect(param['__page__'])
def printCreatingPlayerPage(self, fields, param):
fields.update({self.title: 'Choose your path'})
def checkInfoActuality(old_data, new_data, auth):
record = {}
if new_data.screen_name != old_data['name']:
record.update({'name': new_data.screen_name})
if new_data.profile_image_url != old_data['avatar']:
record.update({'img': new_data.profile_image_url})
if auth.access_token.key != old_data['token1'] or auth.access_token.secret != old_data['token2']:
record.update({
'token1': auth.access_token.key,
'token2': auth.access_token.secret
})
if not 'utc_offset' in old_data or new_data.utc_offset != old_data['utc_offset']:
record.update({'utc_offset': new_data.utc_offset})
return record
def getLeadership(following, followers):
if following == 0:
ratio = 0
else:
ratio = int(float(followers) / following)
lead = 0
for record in self.balance.LEAD:
if record['min'] <= ratio and record['min_fol'] <= followers:
lead = record['lead']
return lead
if not 'oauth_token' in param and not 'oauth_verifier' in param:
return self.sbuilder.redirect('../')
consumer_key = self.core.p_key
consumer_secret = self.core.p_secret
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_request_token(param['oauth_token'], param['oauth_verifier'])
try:
auth.get_access_token(param['oauth_verifier'])
except tweepy.TweepError:
return self.sbuilder.throwWebError(2001)
api = tweepy.API(auth)
user = api.me()
if 'invites' in self.core.debug and self.core.debug['invites']:
allowed = self.model.misc.getPlayerInvite(user.screen_name)
if not allowed:
return self.sbuilder.throwWebError(2001)
#login = self.sbuilder.getLoginCookie()
player = self.isPlayerAlreadyRegistered(user.id)
if player:
diff_data = checkInfoActuality(player, user, auth)
if diff_data:
self.model.players.updatePlayerData(player['_id'], diff_data)
return self.authorizePlayer({'name': user.screen_name, 'user_id': int(user.id)})
if user.utc_offset:
utc_offset = user.utc_offset
else:
utc_offset = 0
fields.update({
'id': user.id,
'login_name': user.screen_name,
'avatar': user.profile_image_url,
'access_key': auth.access_token.key,
'access_secret': auth.access_token.secret,
'step': 1,
'utc': utc_offset,
'rto': getLeadership(user.friends_count, user.followers_count)
})
buff = self.sbuilder.balance.classes
classes = []
for key in buff:
record = {
'id': key,
'name': buff[key]
}
classes.append(record)
fields.update({'classes': classes})
# check referal
referal_name = self.sbuilder.getOneCookie('referal_name')
if referal_name:
fields.update({'referal': referal_name})
guild_invite = self.sbuilder.getOneCookie('guild_invite')
if guild_invite:
fields.update({'guild_invite': guild_invite})
return self.sbuilder.loadTemplate(self.DIR + 'registration.jinja2', fields)
def finishCreatingNewPlayer(self, param):
def getReferalBonus(referal_name, user_id, user_name):
self.model.players.giveBonusToReferal(referal_name, user_id, user_name)
if self.isPlayerAlreadyRegistered(int(param['id'])):
return self.authorizePlayer({'name': param['login_name'], 'user_id': int(param['id'])})
rules = {
'token1': {},
'token2': {},
'login_name': {'not_null': True},
'avatar': {},
'id': {'gt': 1},
'sex': {'gte': 0, 'lte': 1},
'class': {'gte': 1, 'lte': len(self.balance.classes)},
'utc': {'int': 1}
}
status = self.checkParams(param, rules)
if 'race' in param:
buff = param['race'].split(':')
try:
faction = int(buff[0])
race = int(buff[1])
except Exception:
status = False
else:
status = False
if status:
# чтобы в параметры фигню не передали и 500 не упала
try:
_class = int(param['class'])
_user_id = int(param['id'])
_sex = int(param['sex'])
_utc = int(param['utc'])
_rto = int(param['rto'])
except Exception:
return self.redirectToTwitter()
new_player = self.model.playerInstance({
'class': _class,
'user_id': _user_id,
'avatar': param['avatar'],
'name': param['login_name'],
'token1': param['token1'],
'token2': param['token2'],
'sex': _sex,
'post_to_twitter': 'post_to_twitter' in param,
'race': race,
'utc_offset': _utc,
'faction': faction,
'last_login': time(),
'position': self.balance.started_locations[faction]
})
stats = {}
for stat in self.balance.classes_stats[param['class']]:
value = self.balance.classes_stats[param['class']][stat]
stats.update({stat: {'current': value, 'max': value, 'max_dirty': value}})
# бонус по реферальной программе
if 'referal' in param:
for type_name in stats['HP']:
stats['HP'][type_name] += 5
stats['luck'][type_name] += 1
# присоединятся к гильдии?
join_guild = ''
if 'guild_invite' in param and param['guild_invite'].strip():
join_guild = param['guild_invite'].strip()
if 'rto' in param:
rto = _rto
if rto > 10:
rto = 10
stats.update({
"lead": {
"current": rto,
"max": rto,
"max_dirty": rto
}
})
new_player.data.update({'stat': stats})
player_id = self.model.players.addNewPlayer(
new_player.data.copy(),
self.balance.starter_equip[param['class']], # стартовые вещи (UIDs)
join_guild
)
if self.model.players.isBetaPlayer(_user_id):
player_info = self.model.players.getPlayerRaw(_user_id, {'_id': 1, 'user_id': 1, 'name': 1})
self.model.items.unpackBetaItems(player_info)
self.model.spells.unpackBetaSpells(player_info)
self.model.misc.unpackBetaArtworks(player_info)
if 'referal' in param:
getReferalBonus(param['referal'], int(param['id']), param['login_name'])
try:
tweet_text = self.balance.REGISTRATION_TWEET + ' http://tweeria.com/invite?' + param['login_name']
self.model.players.postMentionInvite(player_id, tweet_text)
except Exception:
pass
del new_player
return self.authorizePlayer({'name': param['login_name'], 'user_id': param['id']}, to_invite_page=True)
else:
return self.redirectToTwitter()
def setSpellActive(self, params):
if 'id' in params:
id = params['id']
builtin = False
else:
id = params['uid']
builtin = True
active_spells = self.model.spells.getCountActiveSpells(self.cur_player['login_id'])
if active_spells < self.sbuilder.balance.MAX_ACTIVE_SPELLS:
result = self.model.spells.moveToBook(self.cur_player['login_id'], id, self.cur_player['login_lvl'],
builtin)
changed = result
else:
changed = False
returnHash = {"changed": changed}
return json.dumps(returnHash)
def setSpellInactive(self, params):
if 'id' in params:
id = params['id']
builtin = False
else:
id = params['uid']
builtin = True
self.model.spells.moveFromBook(self.cur_player['login_id'], id, builtin)
return json.dumps({"changed": True})
def getFriends(self, params):
max_players_on_page = 25
skip = 0
if 'skip' in params:
try:
skip = int(params['skip'])
except Exception:
pass
raw_friends = self.model.players.getFriends(self.cur_player['login_id'])
friends = []
count = 0
if raw_friends:
for friend in raw_friends:
if count >= skip and count < (skip + max_players_on_page):
friends.append({
'name': friend.screen_name,
'avatar': friend.profile_image_url,
'counte': count
})
count += 1
cherrypy.response.headers['Content-Type'] = "application/json"
return json.dumps(friends)
def resetHero(self, params):
if not self.cur_player:
return self.sbuilder.redirect('http://tweeria.com')
try:
class_id = int(params['class'])
faction_id, race_id = map(int, params['race'].split(':'))
except Exception:
return False
player = self.model.players.getPlayerBy_ID(self.cur_player['login_id'], {
'lvl': 1,
'resources': 1,
'exp': 1,
'stat.lead.current': 1
})
record = {
# lvl restrictions
'lvl': int(float(player['lvl'] / 2)),
'exp': 0,
'race': race_id,
'class': class_id,
'faction': faction_id,
'artworks': {},
'position': self.balance.started_locations[faction_id]
}
if record['lvl'] <= 0:
record['lvl'] = 1
stats = {}
for stat in self.balance.classes_stats[str(record['class'])]:
value = self.balance.classes_stats[str(record['class'])][stat] * record['lvl']
stats.update({stat: {'current': value, 'max': value, 'max_dirty': value}})
lead = player['stat']['lead']['current']
stats['lead'].update({'current': lead, 'max': lead, 'max_dirty': lead})
# resource restrictions
for res_name in player['resources']:
player['resources'][res_name] = int(float(player['resources'][res_name]) / 2)
record.update({
'resources': player['resources'],
'stat': stats
})
self.model.players.resetPlayerData(self.cur_player['login_id'], record)
self.httpRedirect(params, '?success=reset')
# --------------------------------------------------------------------------------------------------
# Print pages
def printPlayerPage(self, fields, params):
def getPlayerItems(player, fields):
all_items = self.model.players.getPlayerHaveItems(player['_id'])
items = {}
wealth_items = []
inventory = []
ring1_exists = False
if self.cur_player:
str_class = str(self.cur_player['login_class'])
else:
str_class = False
authors_ids = Set()
for item in all_items:
if "author" in item:
authors_ids.add(item['author'])
players_names = self.model.players.getPlayersList(authors_ids, ['_id', 'name'])
for item in all_items:
item['type'] = int(item['type'])
# после type 100 начинаются неигровые предметы
# которые учитывать не нужно
if item['type'] < 100:
if 'author' in item:
item.update({
'img': item['img'] + '_thumb.png',
'big_img': item['img'] + '_fit.png'
})
else:
item['img'] = '/data/items/' + item['img'] + '.jpg'
if item['equipped'] and item['type'] == 6:
if ring1_exists:
item['type'] = 66
else:
ring1_exists = True
if 'UID' in item and 'pooled_date' in item and item['img'][:2] != './':
item['img'] = '/data/items/' + item['img'][:-10] + '.jpg'
if 'UID' in item and not 'author' in item:
item_uid_str = str(int(item['UID']))
created_by_player = False
else:
item_uid_str = str(item['_id'])
item['color'] = 1
created_by_player = True
can_use_item = '0'
if self.cur_player and 'lvl_min' in item and int(item['lvl_min']) <= int(
self.cur_player['login_lvl']):
can_use_item = '1'
item.update(prettyItemBonus(item, self.balance.stats_name))
record = item
record.update({
'link': '/obj/1/' + item_uid_str + '/' + can_use_item,
'id': str(item['_id']),
'created_by_player': created_by_player,
})
if item['type'] == 1 and str_class:
if not item['view'] in self.sbuilder.balance.available_weapons[str_class]:
item['cant_use'] = True
if item['type'] == 1:
item['str_type'] = item['view']
else:
item['str_type'] = self.sbuilder.balance.item_types[item['type'] % 60]
for player in players_names:
if "author" in item and player['_id'] == item['author']:
item['author_name'] = player['name']
if item['equipped']:
items.update({'slot' + str(item['type']): record})
else:
inventory.append(record)
# shop items
else:
wealth_items.append(item)
fields.update({
'items': items,
'inventory': inventory,
'wealth': wealth_items
})
def getCurrentTitle(player, fields):
for title in player['titles']:
if 'current' in title and title['current']:
fields['player'].update({'current_title': title['name']})
fields['player'].update({'name_with_title': re.sub('\{player\}', player['name'], title['desc'])})
return True
fields['player'].update({'name_with_title': fields['player']['name']})
return False
def getEventsByPlayer(player, fields):
if self.cur_player and self.cur_player['login_utc_offset']:
utc_offset = self.cur_player['login_utc_offset']
else:
utc_offset = self.core.server_utc_offset
events = self.model.events.getEvents(
player_id=player['_id'],
query={'upcoming': 1},
fields={
'start_date': 1,
'guild_side_name': 1,
'sides_names': 1,
'target_name': 1,
'_id': 1,
'finish_date': 1,
'type': 1
}
)
current_time = time()
for event in events:
event.update({'start_date_f': getRelativeDate(int(event['start_date']) + utc_offset)})
if event['start_date'] <= current_time <= event['finish_date']:
fields.update({
'in_progress_event': event
})
fields.update({'events': events})
def getPlayerStats(player, fields):
static = self.model.players.getStatisticStaticForPrint()
buff_players_stats = self.model.players.getPlayerStatistics(player['user_id'])['stats']
player_stats = []
group = []
group_name = ''
for stat_static in static:
if stat_static['type'] == 'none':
if group_name:
player_stats.append({'name': group_name, 'stats': group})
group_name = stat_static['text']
group = []
else:
if stat_static['visibility']:
group.append({
'name': stat_static['text'],
'value': buff_players_stats[stat_static['name']]
})
fields.update({'statistics': player_stats})
def getPlayerAchvs(player, fields):
buff_player_achvs = self.model.players.getPlayerAchvs(player['user_id'])['achvs']
player_achvs = []
group = []
group_name = ''
for achv_static in self.static:
if achv_static['type'] == 0:
if group_name:
player_achvs.append({'name': group_name, 'achvs': group})
group_name = achv_static['name']
group = []
else:
if achv_static['visibility']:
group.append({
'name': achv_static['name'],
'complete': buff_player_achvs[str(achv_static['UID'])],
'UID': achv_static['UID'],
'text': achv_static['text'],
'img': achv_static['img']
})
player_achvs.append({'name': group_name, 'achvs': group})
fields.update({'achvs': player_achvs})
def getPlayerSpells(player, fields):
spellbook = self.model.spells.getSpellBook(player['_id'])
spells_ids = []
for item in spellbook['spells']:
if 'spell_UID' in item:
spells_ids.append(item['spell_UID'])
else:
spells_ids.append(item['spell_id'])
spells = self.model.spells.getSpellsByIds(spells_ids)
for spell in spells:
if 'author' in spell:
spell['img'] += '_thumb.png'
else:
spell['img'] = '/' + self.core.IMAGE_SPELL_FOLDER + spell['img'] + '.jpg'
fields.update({'spells': spells})
def getArtwork(player, fields):
# Получаем artwork
is_artwork = False
artwork_path = ''
artwork_id = 0
if 'artworks' in player:
for artwork in player['artworks']:
if 'current' in artwork and artwork['current']:
if 'UID' in artwork:
artwork_path = self.core.ARTWORK_PATH + artwork['img'] + '.jpg'
else:
if artwork['img'] == './data/artwork_delete.jpg':
artwork_path = artwork['img']
else:
artwork_path = artwork['img'] + '_fit.png'
is_artwork = True
if '_id' in artwork:
artwork_id = artwork['_id']
break
else:
artwork_id = 'none'
if not is_artwork:
key = str(player['faction']) + str(player['race']) + str(player['class'])
if key in self.balance.default_artworks:
artwork_path = self.core.ARTWORK_PATH + self.balance.default_artworks[key]['src'] + '.jpg'
artwork_id = self.balance.default_artworks[key]['_id']
else:
fields.update({'default_artwork': True})
fields['player'].update({
'artwork': artwork_path,
'artwork_id': artwork_id
})
def getPlayerBuffs(player, fields):
fields.update({'stat_names': self.balance.stats_name})
inactive_count = 0
for buff in player['buffs']:
buff['type'] = 'buff'
buff['minutes'] = int(float(buff['start_time'] + buff['length'] - time()) / 60)
if buff['minutes'] > 0:
if 'buff_uid' in buff:
buff['buff_img'] = '/data/spells/' + buff['buff_img'] + '.jpg'
for action_name in buff['actions']:
if action_name in player['stat']:
player['stat'][action_name]['current'] += buff['actions'][action_name]
is_buff = buff['actions'][action_name] > 0
buff['actions'][action_name] = str(buff['actions'][action_name])
if is_buff:
buff['actions'][action_name] = '+' + buff['actions'][action_name]
else:
buff['type'] = 'debuff'
player['stat'][action_name]['change'] = is_buff
else:
inactive_count += 1
if inactive_count != 0 and inactive_count == len(fields['player']['buffs']):
fields['player']['buffs'] = []
def getNearPlayers(player, fields):
def miniFormatPlayers(players):
for player in players:
player.update({
'class_name': self.balance.classes[str(player['class'])],
'race_name': self.balance.races[player['faction']][player['race']],
})
return players
rad = 6
players_count = self.model.players.getNearPlayersCount(
player['position']['x'],
player['position']['y'],
rad,
player['name']
)
raw_records = self.model.players.getNearEnemies(rad, player)
enemies = miniFormatPlayers(sample(raw_records, min(5, len(raw_records))))
raw_records = self.model.players.getNearFriends(rad, player)
friends = miniFormatPlayers(sample(raw_records, min(5, len(raw_records))))
fields.update({
'nearby_players': {
'count': players_count,
'enemies': enemies,
'friends': friends
}
})
def getAuthorInfo(player, fields):
info = self.model.misc.getAuthorLikes(player['_id'], {'likes': 1})
if not info and 'ugc_enabled' in player and player['ugc_enabled']:
info = {'likes': 0}
if info:
fields.update({'author_info': info})
player = self.model.players.getPlayer(params['username'], fields='game')
if not player:
return self.sbuilder.throwWebError(7001)
getAuthorInfo(player, fields)
if 'works' in params:
fields.update({'player': player})
return self.printWorksPage(fields, params)
fields.update({self.title: player['name'] + '\'s profile'})
lvl_caps = self.model.getLvls()
cache_need_save = False
from_cache = False
if self.cur_player and player['name'] == self.cur_player['login_name']:
fields.update({'player_self': True})
if from_cache:
# fields = dict(loaded['content'].items() + fields.items())
pass
else:
fields.update({'player': player})
fields['player']['is_sleep'] = not (fields['player']['last_login'] >= time() - self.core.MAX_TIME_TO_SLEEP)
# format player's last events messages
tags = self.model.misc.getTags()
fields['player']['messages'] = getMessages(fields['player']['messages'], host=self.core.HOST, tags=tags)
if self.cur_player and 'login_id' in self.cur_player and player and self.cur_player['login_id'] == player['_id']:
getEventsByPlayer(player, fields)
getPlayerSpells(player, fields)
getPlayerBuffs(player, fields)
getPlayerItems(player, fields)
getCurrentTitle(player, fields)
getPlayerStats(player, fields)
getPlayerAchvs(player, fields)
if self.cur_player and self.cur_player['login_id'] == player['_id']:
getNearPlayers(player, fields)
getPlayerSpells(player, fields)
if 'pvp' in player and player['pvp'] == 1:
fields.update({'pvp_mode': 1})
fields['player'].update({
'exp_level_cap': '',
'exp_percent': 0
})
if int(player['lvl']) != self.balance.max_lvl:
lvl_cap = lvl_caps[str(fields['player']['lvl'] + 1)]
fields['player'].update({
'exp_level_cap': str(player['exp']) + ' / ' + str(lvl_cap),
'exp_percent': int((float(player['exp']) / float(lvl_cap)) * 100)
})
fields['player'].update({
'HP_percent': int(float(fields['player']['stat']['HP']['current']) / fields['player']['stat']['HP']['max_dirty'] * 100),
'MP_percent': int(float(fields['player']['stat']['MP']['current']) / fields['player']['stat']['MP']['max_dirty'] * 100)
}
)
# Получаем расу
fields['player']['race_name'] = self.balance.races[fields['player']['faction']][fields['player']['race']]
# Получаем название класса
fields['player']['class_name'] = self.balance.classes[str(fields['player']['class'])]
# Получаем название пола
fields['player']['sex_name'] = ['Female', 'Male'][fields['player']['sex']]
getArtwork(player, fields)
# получаем гильдию
guild = self.getPlayersGuild(player['_id'])
if guild:
fields.update({'guild': {'name': guild['name'], 'link': guild['link_name']}})
# Получаем тип урона
fields['player']['damage_type'] = self.balance.damage_type[str(fields['player']['class'])]
if self.cur_player:
inventory_count = self.model.items.getInventoryCount(self.cur_player['login_id'])
else:
inventory_count = 0
fields.update({
'help': 'help' in params,
'inventory_count': inventory_count,
'player_coords': self.core.relativePosition(player['position'])
})
if cache_need_save:
self.cache.cacheSave('!' + player['name'], content=fields)
return basic.defaultController._printTemplate(self, 'player', fields)
def printWorksPage(self, fields, params):
def getLikesDict(items_ids):
buff_item_likes = self.model.items.getItemsLikes(items_ids)
item_likes = {}
for item_like in buff_item_likes:
item_likes.update({
str(item_like['item_id']):
{
'count': len(item_like['people']),
'people': item_like['people']
}
})
return item_likes
def getLike(item_likes, _id):
str_id = str(_id)
record = {
'likes': 0,
'is_like': False
}
if str_id in item_likes:
record['likes'] = item_likes[str_id]['count']
if self.cur_player:
record['is_like'] = self.cur_player['login_id'] in item_likes[str_id]['people']
return record
def formatArtworks(likes, artworks):
for artwork in artworks:
artwork.update(getLike(likes, artwork['_id']))
return miscController.formatArtworks(self, artworks)
def formatItems(likes, items):
for item in items:
item['img'] = '/' + item['img'] + '_fit.png'
item['author_name'] = player['name']
item.update(prettyItemBonus(item, self.balance.stats_name))
if "stat_parsed" in item:
item.update({"bonus_parsed": json.dumps(item['stat_parsed'])})
if "img" in item:
item.update({"share_img": item["img"][3:]})
item.update(getLike(likes, item['_id']))
return items
def formatSpells(likes, spells):
for spell in spells:
spell['author_name'] = player['name']
spell['img'] += '_fit.png'
if "spell_actions" in spell:
for action in spell["spell_actions"]:
if action["effect"].upper() in self.balance.stats_name:
stat = action["effect"].upper()
else:
stat = action["effect"].lower()
action.update({
"stat_name": self.balance.stats_name[stat]
})
spell.update(getLike(likes, spell['_id']))
return spells
player = fields['player']
if not ('ugc_enabled' in player and player['ugc_enabled']):
return self.sbuilder.throwWebError(404)
fields.update({self.title: player['name'] + '\'s portfolio'})
artworks = self.model.misc.getActiveArtworksByPlayer(player['_id'])
items = self.model.items.getActiveItemsByPlayer(player['_id'])
spells = self.model.spells.getActiveSpellsPattern(player['_id'])
_ids = Set()
for thing in artworks + items + spells:
_ids.add(thing['_id'])
_likes = getLikesDict(_ids)
fields.update({
'items': formatItems(_likes, items),
'spells': formatSpells(_likes, spells),
'artworks': formatArtworks(_likes, artworks),
'stat_names': self.balance.stats_name
})
return basic.defaultController._printTemplate(self, 'works', fields)
def printTopList(self, fields, params):
fields.update({self.title: 'Top'})
needed_fields = {'name': 1, 'class': 1, 'race': 1, 'lvl': 1, 'faction': 1, 'pvp_score': 1, 'achv_points': 1,
'avatar': 1, '_guild_name': 1}
no_60lvl = {'lvl': {'$lte': 60}}
players_by_lvl = self.mongo.getu('players', search=no_60lvl, limit=10, sort={'lvl': -1}, fields=needed_fields)
for players in [players_by_lvl]:
for player in players:
player.update({
'class_name': self.balance.classes[str(player['class'])],
'race_name': self.balance.races[player['faction']][player['race']],
})
player['pvp_score'] = int(player['pvp_score'])
player['achv_points'] = int(player['achv_points'])
player['lvl'] = int(player['lvl'])
top_players_guilds = self.model.guilds.getTopGuildsByPeopleCount(10)
if self.cur_player:
guild = self.getPlayersGuild(self.cur_player['login_id'])
if guild:
guild = guildsController.formatGuilds(self, [guild])[0]
fields.update({
'your_guild': guild
})
fields.update({
'top_by_lvl': players_by_lvl,
'top_popular_guilds': guildsController.formatGuilds(self, top_players_guilds)
})
return basic.defaultController._printTemplate(self, 'top', fields)
def printTopAuthors(self, fields, params):
fields.update({self.title: 'Top authors'})
def getPaginatorData(players_on_page):
players_count = self.model.misc.getAuthorsCount()
pages = int(math.ceil(float(players_count) / players_on_page))
fields.update({
'total_pages': pages
})
def getSortParams():
if not 'pi' in params:
fields.update({'param_pi': 1})
try:
page_number = int(params['pi'])
except Exception:
page_number = 1
return {
'page_number': page_number,
'sort_field': '',
'sort_order': ''
}
authors_on_page = 20
getPaginatorData(authors_on_page)
sort_params = getSortParams()
authors = self.model.misc.getAuthorsLikes(
authors_on_page,
skip=(sort_params['page_number'] - 1) * authors_on_page
)
author_ids = Set()
for author in authors:
author_ids.add(author['author_id'])
authors_info = self.model.players.getPlayersList2(author_ids, {'name': 1, '_guild_name': 1, 'lvl': 1})
authors_guilds = {}
for author in authors_info:
authors_guilds.update({author['name']: author})
for author in authors:
author.update({
'_guild_name': authors_guilds[author['author_name']]['_guild_name'],
'lvl': authors_guilds[author['author_name']]['lvl']
})
fields.update({
'authors': authors,
'display_pages': getDisplayPages(int(fields['param_pi']), fields['total_pages'], 10)
})
return basic.defaultController._printTemplate(self, 'all_authors', fields)
def printSpellbook(self, fields, params):
fields.update({self.title: 'Spellbook'})
if not self.cur_player:
return self.sbuilder.redirect('../')
fields.update({'stat_names': self.balance.stats_name})
if 'type_of_form' in params and params['type_of_form'] in ["equip_item", "sell_item", "move_spell_to_book",
"move_spell_from_book"]:
fields.update({"result": self.query_result})
self.query_result = False
return self.sbuilder.loadTemplate(self.DIR + 'player-ajax.jinja2', fields)
spellbook = self.model.spells.getSpellBook(self.cur_player['login_id'])
available_spells = self.model.spells.getAvailableStandartSpells(self.cur_player['login_lvl'])
buyed_spells = self.model.spells.getBuyedSpells(self.cur_player['login_id'])
for spell in available_spells:
spell['img'] = '/data/spells/' + spell['img'] + '.jpg'
if buyed_spells:
available_spells += buyed_spells
for tmp_spell in available_spells:
for spell_info in spellbook['spells']:
if 'UID' in tmp_spell and spell_info['spell_id'] == tmp_spell['UID'] or spell_info['spell_id'] == tmp_spell['_id']:
tmp_spell.update({'active': True})
tmp_spell['can_use'] = tmp_spell['lvl_min'] <= self.cur_player['login_lvl']
fields.update({'spells': available_spells})
return basic.defaultController._printTemplate(self, 'spellbook', fields)
def printInviteCenter(self, fields, params):
if not self.cur_player:
return self.sbuilder.redirect('http://tweeria.com')
#fields.update({'friends': self.getFriends(params)})
return basic.defaultController._printTemplate(self, 'invite_center', fields)
def printSettings(self, fields, params):
if not self.cur_player:
return self.sbuilder.redirect('http://tweeria.com')
player_info = self.model.players.getPlayerBy_ID(self.cur_player['login_id'], {
'pvp': 1,
'titles': 1,
'artworks': 1,
'_id': 1,
'race': 1,
'faction': 1,
'class': 1,
'sex': 1,
'post_to_twitter': 1
})
# Получаем расу
player_info.update({
'race_name': self.balance.races[player_info['faction']][player_info['race']],
'class_name': self.balance.classes[str(player_info['class'])],
'sex_name': ['Female', 'Male'][player_info['sex']]
})
is_artwork = False
if 'artworks' in player_info:
for artwork in player_info['artworks']:
if 'current' in artwork and artwork['current']:
is_artwork = True
if not is_artwork:
fields.update({'default_artwork': True})
fields.update(player_info)
fields.update(self.balance.classes_and_races)
return basic.defaultController._printTemplate(self, 'settings', fields)
data = {
'class': playerController,
'type': ['default'],
'urls': ['top', 'registration', 'new', 'spellbook', 'inv', 'settings', 'authors']
}
|
[
"alex.shteinikov@gmail.com"
] |
alex.shteinikov@gmail.com
|
83372bea64fdb4871d2df6d228c8bcf2e665d059
|
4c9580b2e09e2b000e27a1c9021b12cf2747f56a
|
/chapter03/books/migrations/0001_initial.py
|
b438ba725fa795db838cd03b02cd0b3cf499c4dc
|
[] |
no_license
|
jzplyy/xiaoyue_mall
|
69072c0657a6878a4cf799b8c8218cc7d88c8d12
|
4f9353d6857d1bd7dc54151ca8b34dcb4671b8dc
|
refs/heads/master
| 2023-06-26T02:48:03.103635
| 2021-07-22T15:51:07
| 2021-07-22T15:51:07
| 388,514,311
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 887
|
py
|
# Generated by Django 2.2 on 2020-11-04 02:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BookInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='名称')),
('pub_date', models.DateField(verbose_name='发布日期')),
('readcount', models.IntegerField(default=0, verbose_name='阅读量')),
('commentcount', models.IntegerField(default=0, verbose_name='评论量')),
('is_delete', models.BooleanField(default=False, verbose_name='逻辑删除')),
],
),
]
|
[
"jzplyy@126.com"
] |
jzplyy@126.com
|
e7d3f66bb4533ea1ce904edcdd4759047c80cb24
|
ae3d81f1e78b628a9917f35e691acef485f7287e
|
/Python/reverse.py
|
ee11c6515e31f8cd1a81260731b9d48a897ba438
|
[] |
no_license
|
Stanwar/Code_Academy
|
3499d127b179b82496347bb0fbcb1fd1dcdcfb72
|
3d218c84f87bec2fc9ee35d7692f4546dbdd4096
|
refs/heads/master
| 2020-04-29T03:40:59.072515
| 2015-08-11T03:25:30
| 2015-08-11T03:25:30
| 40,500,665
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
def reverse(text):
result = ""
for i in range(1,len(text)+1):
result = result + (text[len(text)-i])
print result
return str(result)
reverse('abcd')
|
[
"sharad_Tanwar@outlook.com"
] |
sharad_Tanwar@outlook.com
|
8f6f823d5574cd3ae68bdfdd3c962695662a5115
|
61eac26b73015c5af29768cbdb103334b73d2e81
|
/actas/migrations/0014_auto_20160908_1409.py
|
968b004ad74bf2de00398e6a37f8342da4bb9a28
|
[] |
no_license
|
nwvaras/Discusion-Abierta
|
08af886449078f97fd8c3dbb910e077acae612f7
|
81a47dce9b4be4d18a060d01bc0666c1cfd3a073
|
refs/heads/master
| 2021-01-22T13:13:11.505839
| 2017-04-11T11:30:09
| 2017-04-11T11:30:09
| 67,081,722
| 0
| 2
| null | 2016-08-31T23:46:10
| 2016-08-31T23:46:10
| null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-08 14:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('actas', '0013_auto_20160907_2137'),
]
operations = [
migrations.AlterField(
model_name='encuentro',
name='hash_search',
field=models.UUIDField(default=b'e39b001475cd11e69768645a04c2662c'),
),
]
|
[
"nsdelgadov@gmail.com"
] |
nsdelgadov@gmail.com
|
a0ed9b01ff6ad8ebf0668267d6c250b2ab2dbe00
|
da4c10ba7f7a499b5192eb82e49fe0a678cc6e82
|
/yelp/items.py
|
0ffa58216ac6ed18013a6bef7a563d66668e5c8c
|
[] |
no_license
|
symoon94/yelp-scrapy
|
7fa90bf22f6b490209d68b8258a44692dea77eb2
|
a42c0fd0411c952951959a1a664ae0778628f268
|
refs/heads/master
| 2020-12-14T21:45:46.094017
| 2020-01-26T00:11:52
| 2020-01-26T00:11:52
| 234,879,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 779
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class Place(scrapy.Item):
url = scrapy.Field()
lat = scrapy.Field()
lon = scrapy.Field()
searchActions = scrapy.Field()
allPhotosHref = scrapy.Field()
reviewCount = scrapy.Field()
name = scrapy.Field()
rating = scrapy.Field()
phone = scrapy.Field()
allPhotosHref = scrapy.Field()
photoHref = scrapy.Field()
reviewCount = scrapy.Field()
formattedAddress = scrapy.Field()
categories = scrapy.Field()
reviews = scrapy.Field()
class Review(scrapy.Item):
ratingValue = scrapy.Field()
datePublished = scrapy.Field()
url = scrapy.Field()
|
[
"msy0128@gmail.com"
] |
msy0128@gmail.com
|
9c08a5c942654c7f869124ca9cccd78e9d54a5de
|
ebcd8c5360cbfe8ed50d5332fbc665321b87de88
|
/module_meshes.py
|
1a37899a2a1f8131e48c53bfac6d6d4ae4e0ce96
|
[
"Unlicense"
] |
permissive
|
Ikaguia/LWBR-WarForge
|
dabe7a3d4dc9251edddc0df5a0d06d23756e4382
|
0099fe20188b2dbfff237e8690ae54c33671656f
|
refs/heads/master
| 2021-05-07T17:56:43.982163
| 2018-03-06T02:24:49
| 2018-03-06T02:24:49
| 108,722,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 41,206
|
py
|
from compiler import *
####################################################################################################################
# Each mesh record contains the following fields:
# 1) Mesh id: used for referencing meshes in other files. The prefix mesh_ is automatically added before each mesh id.
# 2) Mesh flags. See header_meshes.py for a list of available flags
# 3) Mesh resource name: Resource name of the mesh
# 4) Mesh translation on x axis: Will be done automatically when the mesh is loaded
# 5) Mesh translation on y axis: Will be done automatically when the mesh is loaded
# 6) Mesh translation on z axis: Will be done automatically when the mesh is loaded
# 7) Mesh rotation angle over x axis: Will be done automatically when the mesh is loaded
# 8) Mesh rotation angle over y axis: Will be done automatically when the mesh is loaded
# 9) Mesh rotation angle over z axis: Will be done automatically when the mesh is loaded
# 10) Mesh x scale: Will be done automatically when the mesh is loaded
# 11) Mesh y scale: Will be done automatically when the mesh is loaded
# 12) Mesh z scale: Will be done automatically when the mesh is loaded
####################################################################################################################
meshes = [
("pic_bandits", 0, "pic_bandits", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_mb_warrior_1", 0, "pic_mb_warrior_1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_messenger", 0, "pic_messenger", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_prisoner_man", 0, "pic_prisoner_man", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_prisoner_fem", 0, "pic_prisoner_fem", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_prisoner_wilderness", 0, "pic_prisoner_wilderness", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_siege_sighted", 0, "pic_siege_sighted", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_siege_sighted_fem", 0, "pic_siege_sighted_fem", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_camp", 0, "pic_camp", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_payment", 0, "pic_payment", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_escape_1", 0, "pic_escape_1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_escape_1_fem", 0, "pic_escape_1_fem", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_victory", 0, "pic_victory", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_defeat", 0, "pic_defeat", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_wounded", 0, "pic_wounded", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_wounded_fem", 0, "pic_wounded_fem", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_steppe_bandits", 0, "pic_steppe_bandits", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_mountain_bandits", 0, "pic_mountain_bandits", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_sea_raiders", 0, "pic_sea_raiders", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_deserters", 0, "pic_deserters", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_forest_bandits", 0, "pic_forest_bandits", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_cattle", 0, "pic_cattle", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_looted_village", 0, "pic_looted_village", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_village_p", 0, "pic_village_p", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_village_s", 0, "pic_village_s", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_village_w", 0, "pic_village_w", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_recruits", 0, "pic_recruits", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_arms_swadian", 0, "pic_arms_swadian", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_arms_vaegir", 0, "pic_arms_vaegir", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_arms_khergit", 0, "pic_arms_khergit", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_arms_nord", 0, "pic_arms_nord", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_arms_rhodok", 0, "pic_arms_rhodok", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_sarranid_arms", 0, "pic_sarranid_arms", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_castle1", 0, "pic_castle1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_castledes", 0, "pic_castledes", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_castlesnow", 0, "pic_castlesnow", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_charge", 0, "pic_charge", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_khergit", 0, "pic_khergit", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_nord", 0, "pic_nord", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_rhodock", 0, "pic_rhodock", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_sally_out", 0, "pic_sally_out", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_siege_attack", 0, "pic_siege_attack", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_swad", 0, "pic_swad", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_town1", 0, "pic_town1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_towndes", 0, "pic_towndes", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_townriot", 0, "pic_townriot", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_townsnow", 0, "pic_townsnow", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_vaegir", 0, "pic_vaegir", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_villageriot", 0, "pic_villageriot", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("pic_sarranid_encounter", 0, "pic_sarranid_encounter", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_score_a", 0, "mp_score_a", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_score_b", 0, "mp_score_b", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("portrait_blend_out", 0, "portrait_blend_out", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("load_window", 0, "load_window", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("checkbox_off", render_order_plus_1, "checkbox_off", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("checkbox_on", render_order_plus_1, "checkbox_on", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("white_plane", 0, "white_plane", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("white_dot", 0, "white_dot", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("player_dot", 0, "player_dot", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_infantry", 0, "flag_infantry", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_archers", 0, "flag_archers", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_cavalry", 0, "flag_cavalry", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("inv_slot", 0, "inv_slot", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ingame_menu", 0, "mp_ingame_menu", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_left", 0, "mp_inventory_left", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_right", 0, "mp_inventory_right", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_choose", 0, "mp_inventory_choose", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_slot_glove", 0, "mp_inventory_slot_glove", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_slot_horse", 0, "mp_inventory_slot_horse", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_slot_armor", 0, "mp_inventory_slot_armor", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_slot_helmet", 0, "mp_inventory_slot_helmet", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_slot_boot", 0, "mp_inventory_slot_boot", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_slot_empty", 0, "mp_inventory_slot_empty", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_slot_equip", 0, "mp_inventory_slot_equip", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_left_arrow", 0, "mp_inventory_left_arrow", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_inventory_right_arrow", 0, "mp_inventory_right_arrow", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_main", 0, "mp_ui_host_main", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_1", 0, "mp_ui_host_maps_a1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_2", 0, "mp_ui_host_maps_a2", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_3", 0, "mp_ui_host_maps_c", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_4", 0, "mp_ui_host_maps_ruinedf", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_5", 0, "mp_ui_host_maps_a1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_6", 0, "mp_ui_host_maps_a1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_7", 0, "mp_ui_host_maps_fieldby", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_8", 0, "mp_ui_host_maps_castle2", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_9", 0, "mp_ui_host_maps_snovyv", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_10", 0, "mp_ui_host_maps_castle3", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_11", 0, "mp_ui_host_maps_c1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_12", 0, "mp_ui_host_maps_c2", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_13", 0, "mp_ui_host_maps_c3", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_randomp", 0, "mp_ui_host_maps_randomp", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_randoms", 0, "mp_ui_host_maps_randoms", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_command_panel", 0, "mp_ui_command_panel", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_command_border_l", 0, "mp_ui_command_border_l", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_command_border_r", 0, "mp_ui_command_border_r", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_welcome_panel", 0, "mp_ui_welcome_panel", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_sw", 0, "flag_project_sw", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_vg", 0, "flag_project_vg", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_kh", 0, "flag_project_kh", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_nd", 0, "flag_project_nd", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_rh", 0, "flag_project_rh", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_sr", 0, "flag_project_sr", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_projects_end", 0, "0", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_sw_miss", 0, "flag_project_sw_miss", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_vg_miss", 0, "flag_project_vg_miss", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_kh_miss", 0, "flag_project_kh_miss", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_nd_miss", 0, "flag_project_nd_miss", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_rh_miss", 0, "flag_project_rh_miss", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_sr_miss", 0, "flag_project_sr_miss", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_misses_end", 0, "0", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("color_picker", 0, "color_picker", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("custom_map_banner_01", 0, "custom_map_banner_01", 0, 0, 0, -90, 0, 90, 1, 1, 1),
("custom_map_banner_02", 0, "custom_map_banner_02", 0, 0, 0, -90, 0, 90, 1, 1, 1),
("custom_map_banner_03", 0, "custom_map_banner_03", 0, 0, 0, -90, 0, 90, 1, 1, 1),
("custom_banner_01", 0, "custom_banner_01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("custom_banner_02", 0, "custom_banner_02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("custom_banner_bg", 0, "custom_banner_bg", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg01", 0, "custom_banner_fg01", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg02", 0, "custom_banner_fg02", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg03", 0, "custom_banner_fg03", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg04", 0, "custom_banner_fg04", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg05", 0, "custom_banner_fg05", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg06", 0, "custom_banner_fg06", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg07", 0, "custom_banner_fg07", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg08", 0, "custom_banner_fg08", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg09", 0, "custom_banner_fg09", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg10", 0, "custom_banner_fg10", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg11", 0, "custom_banner_fg11", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg12", 0, "custom_banner_fg12", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg13", 0, "custom_banner_fg13", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg14", 0, "custom_banner_fg14", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg15", 0, "custom_banner_fg15", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg16", 0, "custom_banner_fg16", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg17", 0, "custom_banner_fg17", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg18", 0, "custom_banner_fg18", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg19", 0, "custom_banner_fg19", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg20", 0, "custom_banner_fg20", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg21", 0, "custom_banner_fg21", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg22", 0, "custom_banner_fg22", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_fg23", 0, "custom_banner_fg23", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_01", 0, "custom_banner_charge_01", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_02", 0, "custom_banner_charge_02", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_03", 0, "custom_banner_charge_03", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_04", 0, "custom_banner_charge_04", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_05", 0, "custom_banner_charge_05", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_06", 0, "custom_banner_charge_06", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_07", 0, "custom_banner_charge_07", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_08", 0, "custom_banner_charge_08", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_09", 0, "custom_banner_charge_09", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_10", 0, "custom_banner_charge_10", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_11", 0, "custom_banner_charge_11", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_12", 0, "custom_banner_charge_12", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_13", 0, "custom_banner_charge_13", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_14", 0, "custom_banner_charge_14", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_15", 0, "custom_banner_charge_15", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_16", 0, "custom_banner_charge_16", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_17", 0, "custom_banner_charge_17", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_18", 0, "custom_banner_charge_18", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_19", 0, "custom_banner_charge_19", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_20", 0, "custom_banner_charge_20", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_21", 0, "custom_banner_charge_21", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_22", 0, "custom_banner_charge_22", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_23", 0, "custom_banner_charge_23", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_24", 0, "custom_banner_charge_24", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_25", 0, "custom_banner_charge_25", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_26", 0, "custom_banner_charge_26", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_27", 0, "custom_banner_charge_27", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_28", 0, "custom_banner_charge_28", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_29", 0, "custom_banner_charge_29", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_30", 0, "custom_banner_charge_30", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_31", 0, "custom_banner_charge_31", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_32", 0, "custom_banner_charge_32", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_33", 0, "custom_banner_charge_33", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_34", 0, "custom_banner_charge_34", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_35", 0, "custom_banner_charge_35", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_36", 0, "custom_banner_charge_36", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_37", 0, "custom_banner_charge_37", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_38", 0, "custom_banner_charge_38", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_39", 0, "custom_banner_charge_39", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_40", 0, "custom_banner_charge_40", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_41", 0, "custom_banner_charge_41", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_42", 0, "custom_banner_charge_42", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_43", 0, "custom_banner_charge_43", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_44", 0, "custom_banner_charge_44", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_45", 0, "custom_banner_charge_45", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("custom_banner_charge_46", 0, "custom_banner_charge_46", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_custom_banner", 0, "tableau_mesh_custom_banner", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_custom_banner_square", 0, "tableau_mesh_custom_banner_square", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_custom_banner_tall", 0, "tableau_mesh_custom_banner_tall", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_custom_banner_short", 0, "tableau_mesh_custom_banner_short", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_round_1", 0, "tableau_mesh_shield_round_1", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_round_2", 0, "tableau_mesh_shield_round_2", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_round_3", 0, "tableau_mesh_shield_round_3", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_round_4", 0, "tableau_mesh_shield_round_4", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_round_5", 0, "tableau_mesh_shield_round_5", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_small_round_1", 0, "tableau_mesh_shield_small_round_1", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_small_round_2", 0, "tableau_mesh_shield_small_round_2", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_small_round_3", 0, "tableau_mesh_shield_small_round_3", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_kite_1", 0, "tableau_mesh_shield_kite_1", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_kite_2", 0, "tableau_mesh_shield_kite_2", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_kite_3", 0, "tableau_mesh_shield_kite_3", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_kite_4", 0, "tableau_mesh_shield_kite_4", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_heater_1", 0, "tableau_mesh_shield_heater_1", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_heater_2", 0, "tableau_mesh_shield_heater_2", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_pavise_1", 0, "tableau_mesh_shield_pavise_1", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_shield_pavise_2", 0, "tableau_mesh_shield_pavise_2", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("heraldic_armor_bg", 0, "heraldic_armor_bg", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("tableau_mesh_heraldic_armor_a", 0, "tableau_mesh_heraldic_armor_a", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("tableau_mesh_heraldic_armor_b", 0, "tableau_mesh_heraldic_armor_b", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("tableau_mesh_heraldic_armor_c", 0, "tableau_mesh_heraldic_armor_c", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("tableau_mesh_heraldic_armor_d", 0, "tableau_mesh_heraldic_armor_d", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("outer_terrain_plain_1", 0, "ter_border_a", -90, 0, 0, 0, 0, 0, 1, 1, 1),
("banner_a01", 0, "banner_a01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a02", 0, "banner_a02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a03", 0, "banner_a03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a04", 0, "banner_a04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a05", 0, "banner_a05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a06", 0, "banner_a06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a07", 0, "banner_a07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a08", 0, "banner_a08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a09", 0, "banner_a09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a10", 0, "banner_a10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a11", 0, "banner_a11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a12", 0, "banner_a12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a13", 0, "banner_a13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a14", 0, "banner_a14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a15", 0, "banner_f21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a16", 0, "banner_a16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a17", 0, "banner_a17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a18", 0, "banner_a18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a19", 0, "banner_a19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a20", 0, "banner_a20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_a21", 0, "banner_a21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b01", 0, "banner_b01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b02", 0, "banner_b02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b03", 0, "banner_b03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b04", 0, "banner_b04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b05", 0, "banner_b05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b06", 0, "banner_b06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b07", 0, "banner_b07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b08", 0, "banner_b08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b09", 0, "banner_b09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b10", 0, "banner_b10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b11", 0, "banner_b11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b12", 0, "banner_b12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b13", 0, "banner_b13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b14", 0, "banner_b14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b15", 0, "banner_b15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b16", 0, "banner_b16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b17", 0, "banner_b17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b18", 0, "banner_b18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b19", 0, "banner_b19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b20", 0, "banner_b20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_b21", 0, "banner_b21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c01", 0, "banner_c01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c02", 0, "banner_c02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c03", 0, "banner_c03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c04", 0, "banner_c04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c05", 0, "banner_c05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c06", 0, "banner_c06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c07", 0, "banner_c07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c08", 0, "banner_c08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c09", 0, "banner_c09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c10", 0, "banner_c10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c11", 0, "banner_c11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c12", 0, "banner_c12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c13", 0, "banner_c13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c14", 0, "banner_c14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c15", 0, "banner_c15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c16", 0, "banner_c16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c17", 0, "banner_c17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c18", 0, "banner_c18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c19", 0, "banner_c19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c20", 0, "banner_c20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_c21", 0, "banner_c21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d01", 0, "banner_d01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d02", 0, "banner_d02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d03", 0, "banner_d03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d04", 0, "banner_d04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d05", 0, "banner_d05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d06", 0, "banner_d06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d07", 0, "banner_d07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d08", 0, "banner_d08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d09", 0, "banner_d09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d10", 0, "banner_d10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d11", 0, "banner_d11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d12", 0, "banner_d12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d13", 0, "banner_d13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d14", 0, "banner_d14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d15", 0, "banner_d15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d16", 0, "banner_d16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d17", 0, "banner_d17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d18", 0, "banner_d18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d19", 0, "banner_d19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d20", 0, "banner_d20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_d21", 0, "banner_d21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e01", 0, "banner_e01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e02", 0, "banner_e02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e03", 0, "banner_e03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e04", 0, "banner_e04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e05", 0, "banner_e05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e06", 0, "banner_e06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e07", 0, "banner_e07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e08", 0, "banner_e08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e09", 0, "banner_e09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e10", 0, "banner_e10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e11", 0, "banner_e11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e12", 0, "banner_e12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e13", 0, "banner_e13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e14", 0, "banner_e14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e15", 0, "banner_e15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e16", 0, "banner_e16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e17", 0, "banner_e17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e18", 0, "banner_e18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e19", 0, "banner_e19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e20", 0, "banner_e20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_e21", 0, "banner_e21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f01", 0, "banner_f01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f02", 0, "banner_f02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f03", 0, "banner_f03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f04", 0, "banner_f04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f05", 0, "banner_f05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f06", 0, "banner_f06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f07", 0, "banner_f07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f08", 0, "banner_f08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f09", 0, "banner_f09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f10", 0, "banner_f10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f11", 0, "banner_f11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f12", 0, "banner_f12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f13", 0, "banner_f13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f14", 0, "banner_f14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f15", 0, "banner_f15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f16", 0, "banner_f16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f17", 0, "banner_f17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f18", 0, "banner_f18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f19", 0, "banner_f19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f20", 0, "banner_f20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g01", 0, "banner_f01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g02", 0, "banner_f02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g03", 0, "banner_f03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g04", 0, "banner_f04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g05", 0, "banner_f05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g06", 0, "banner_f06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g07", 0, "banner_f07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g08", 0, "banner_f08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g09", 0, "banner_f09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_g10", 0, "banner_f10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_kingdom_a", 0, "banner_kingdom_a", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_kingdom_b", 0, "banner_kingdom_b", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_kingdom_c", 0, "banner_kingdom_c", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_kingdom_d", 0, "banner_kingdom_d", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_kingdom_e", 0, "banner_kingdom_e", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_kingdom_f", 0, "banner_kingdom_f", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banner_f21", 0, "banner_a15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a01", 0, "arms_a01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a02", 0, "arms_a02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a03", 0, "arms_a03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a04", 0, "arms_a04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a05", 0, "banner_a05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a06", 0, "arms_a06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a07", 0, "banner_a07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a08", 0, "arms_a08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a09", 0, "banner_a09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a10", 0, "banner_a10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a11", 0, "banner_a11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a12", 0, "arms_a12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a13", 0, "arms_a13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a14", 0, "banner_a14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a15", 0, "banner_f21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a16", 0, "arms_a16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a17", 0, "arms_a17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a18", 0, "arms_a18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a19", 0, "arms_a19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a20", 0, "arms_a20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_a21", 0, "arms_a21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b01", 0, "arms_b01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b02", 0, "arms_b02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b03", 0, "banner_b03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b04", 0, "banner_b04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b05", 0, "arms_b05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b06", 0, "arms_b06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b07", 0, "arms_b07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b08", 0, "arms_b08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b09", 0, "arms_b09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b10", 0, "arms_b10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b11", 0, "banner_b11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b12", 0, "banner_b12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b13", 0, "banner_b13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b14", 0, "arms_b14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b15", 0, "arms_b15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b16", 0, "arms_b16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b17", 0, "banner_b17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b18", 0, "arms_b18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b19", 0, "banner_b19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b20", 0, "arms_b20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_b21", 0, "banner_b21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c01", 0, "arms_c01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c02", 0, "banner_c02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c03", 0, "banner_c03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c04", 0, "arms_c04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c05", 0, "banner_c05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c06", 0, "arms_c06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c07", 0, "arms_c07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c08", 0, "banner_c08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c09", 0, "banner_c09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c10", 0, "arms_c10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c11", 0, "banner_c11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c12", 0, "arms_c12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c13", 0, "arms_c13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c14", 0, "arms_c14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c15", 0, "banner_c15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c16", 0, "arms_c16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c17", 0, "banner_c17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c18", 0, "banner_c18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c19", 0, "arms_c19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c20", 0, "banner_c20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_c21", 0, "banner_c21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d01", 0, "banner_d01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d02", 0, "arms_d02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d03", 0, "arms_d03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d04", 0, "arms_d04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d05", 0, "banner_d05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d06", 0, "arms_d06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d07", 0, "arms_d07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d08", 0, "arms_d08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d09", 0, "arms_d09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d10", 0, "banner_d10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d11", 0, "arms_d11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d12", 0, "arms_d12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d13", 0, "arms_d13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d14", 0, "arms_d14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d15", 0, "arms_d15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d16", 0, "arms_d16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d17", 0, "arms_d17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d18", 0, "arms_d18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d19", 0, "arms_d19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d20", 0, "arms_d20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_d21", 0, "arms_d21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e01", 0, "banner_e01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e02", 0, "arms_e02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e03", 0, "banner_e03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e04", 0, "banner_e04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e05", 0, "banner_e05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e06", 0, "banner_e06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e07", 0, "banner_e07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e08", 0, "banner_e08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e09", 0, "banner_e09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e10", 0, "banner_e10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e11", 0, "banner_e11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e12", 0, "banner_e12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e13", 0, "banner_e13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e14", 0, "banner_e14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e15", 0, "banner_e15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e16", 0, "banner_e16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e17", 0, "banner_e17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e18", 0, "banner_e18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e19", 0, "banner_e19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e20", 0, "banner_e20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_e21", 0, "banner_e21", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f01", 0, "banner_f01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f02", 0, "banner_f02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f03", 0, "banner_f03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f04", 0, "banner_f04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f05", 0, "banner_f05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f06", 0, "banner_f06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f07", 0, "banner_f07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f08", 0, "banner_f08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f09", 0, "banner_f09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f10", 0, "banner_f10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f11", 0, "banner_f11", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f12", 0, "banner_f12", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f13", 0, "banner_f13", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f14", 0, "banner_f14", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f15", 0, "banner_f15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f16", 0, "banner_f16", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f17", 0, "banner_f17", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f18", 0, "banner_f18", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f19", 0, "banner_f19", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f20", 0, "banner_f20", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g01", 0, "banner_f01", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g02", 0, "banner_f02", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g03", 0, "banner_f03", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g04", 0, "banner_f04", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g05", 0, "banner_f05", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g06", 0, "banner_f06", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g07", 0, "banner_f07", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g08", 0, "banner_f08", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g09", 0, "banner_f09", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_g10", 0, "banner_f10", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_kingdom_a", 0, "banner_kingdom_a", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_kingdom_b", 0, "banner_kingdom_b", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_kingdom_c", 0, "banner_kingdom_c", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_kingdom_d", 0, "banner_kingdom_d", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_kingdom_e", 0, "banner_kingdom_e", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_kingdom_f", 0, "banner_kingdom_f", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("arms_f21", 0, "banner_a15", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banners_default_a", 0, "banners_default_a", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banners_default_b", 0, "banners_default_b", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banners_default_c", 0, "banners_default_c", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banners_default_d", 0, "banners_default_d", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("banners_default_e", 0, "banners_default_e", 0, 0, 0, -90, 0, 0, 1, 1, 1),
("troop_label_banner", 0, "troop_label_banner", 0, 0, 0, 0, 0, 0, 10, 10, 10),
("ui_kingdom_shield_1", 0, "ui_kingdom_shield_1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ui_kingdom_shield_2", 0, "ui_kingdom_shield_2", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ui_kingdom_shield_3", 0, "ui_kingdom_shield_3", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ui_kingdom_shield_4", 0, "ui_kingdom_shield_4", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ui_kingdom_shield_5", 0, "ui_kingdom_shield_5", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ui_kingdom_shield_6", 0, "ui_kingdom_shield_6", 0, 0, 0, 0, 0, 0, 1, 1, 1),
#("flag_swadian", 0, "banner_a01", 0, 0, 0, 0, 0, 0, 1, 1, 1),
#("flag_vaegir", 0, "banner_a02", 0, 0, 0, 0, 0, 0, 1, 1, 1),
#("flag_khergit", 0, "banner_d01", 0, 0, 0, 0, 0, 0, 1, 1, 1),
#("flag_nord", 0, "banner_a03", 0, 0, 0, 0, 0, 0, 1, 1, 1),
#("flag_rhodok", 0, "banner_a04", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mouse_arrow_down", 0, "mouse_arrow_down", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mouse_arrow_right", 0, "mouse_arrow_right", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mouse_arrow_left", 0, "mouse_arrow_left", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mouse_arrow_up", 0, "mouse_arrow_up", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mouse_arrow_plus", 0, "mouse_arrow_plus", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mouse_left_click", 0, "mouse_left_click", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mouse_right_click", 0, "mouse_right_click", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("status_ammo_ready", 0, "status_ammo_ready", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("main_menu_background", 0, "main_menu_nord", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("loading_background", 0, "load_screen_2", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ui_quick_battle_a", 0, "ui_quick_battle_a", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("white_bg_plane_a", 0, "white_bg_plane_a", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_icon_infantry", 0, "cb_ui_icon_infantry", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_icon_archer", 0, "cb_ui_icon_archer", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_icon_horseman", 0, "cb_ui_icon_horseman", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_main", 0, "cb_ui_main", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_maps_scene_01", 0, "cb_ui_maps_scene_01", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_maps_scene_02", 0, "cb_ui_maps_scene_02", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_maps_scene_03", 0, "cb_ui_maps_scene_03", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_maps_scene_04", 0, "cb_ui_maps_scene_04", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_maps_scene_05", 0, "cb_ui_maps_scene_05", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_maps_scene_06", 0, "cb_ui_maps_scene_06", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_maps_scene_07", 0, "cb_ui_maps_scene_07", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_maps_scene_08", 0, "cb_ui_maps_scene_08", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("cb_ui_maps_scene_09", 0, "cb_ui_maps_scene_09", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_14", 0, "mp_ui_host_maps_c4", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_15", 0, "mp_ui_host_maps_c5", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("quit_adv", 0, "quit_adv", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("quit_adv_b", 0, "quit_adv_b", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ui_kingdom_shield_7", 0, "ui_kingdom_shield_7", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_rb", 0, "flag_project_rb", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("flag_project_rb_miss", 0, "flag_project_rb_miss", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_16", 0, "mp_ui_host_maps_d1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_17", 0, "mp_ui_host_maps_d2", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_18", 0, "mp_ui_host_maps_d3", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_19", 0, "mp_ui_host_maps_e2", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_20", 0, "mp_ui_host_maps_e1", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("mp_ui_host_maps_21", 0, "mp_ui_host_maps_cold_cost", 0, 0, 0, 0, 0, 0, 1, 1, 1),
#INVASION MODE START
("incoming_enemy", 0, "cb_ui_icon_infantry", 0, 0, 0, 0, 0, 0, 2, 2, 2),
("prison_cart_pos", 0, "ccoop_prison_cart", 0, 0, 0, 0, 0, 0, 2, 2, 2),
("ccoop_drop_chest_top", 0, "ccoop_drop_chest_top", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ccoop_drop_chest_bottom", 0, "ccoop_drop_chest_bottom", 0, 0, 200, 0, 0, 0, 1, 1, 1),
("ccoop_random_class", 0, "ccoop_random_class", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ccoop_default_class", 0, "ccoop_default_class", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ccoop_melee_class", 0, "ccoop_melee_class", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ccoop_ranged_class", 0, "ccoop_ranged_class", 0, 0, 0, 0, 0, 0, 1, 1, 1),
("ccoop_mounted_class", 0, "ccoop_mounted_class", 0, 0, 0, 0, 0, 0, 1, 1, 1),
#INVASION MODE END
]
#LWBR WarForge 2.0 --- BEGIN
if not IS_CLIENT:
for g in xrange(len(meshes)):
meshes[g] = (meshes[g][0],0,"pic_bandits",0,0,0,0,0,0,0,0,0)
#LWBR WarForge 2.0 --- END
|
[
"cristianobrust123@gmail.com"
] |
cristianobrust123@gmail.com
|
b769eca771f05f093f3193a9bfb26ffd56ba7b94
|
14c67704c62b1676fcf7ef34fef5d863004e41c8
|
/My-Subpixel-Corr-TF/My-Subpixel-Corr-TF-master/src/training_schedules.py
|
526cc7789cbd4acfc378f2eb4b4a9a3f3a1ad9a2
|
[
"MIT"
] |
permissive
|
jinwooklim/DML-subpixel-corr-tf
|
3e149ab7a5c9908918dec0a302065ef9fb1a2a7e
|
8ce20a9381c19adc762cbab256b6851f07064ea4
|
refs/heads/master
| 2022-04-04T11:39:28.043927
| 2020-02-13T07:53:08
| 2020-02-13T07:53:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
LONG_SCHEDULE = {
'step_values': [400000, 600000, 800000, 1000000],
'learning_rates': [0.0001, 0.00005, 0.000025, 0.0000125, 0.00000625],
'momentum': 0.9,
'momentum2': 0.999,
'weight_decay': 0.0004,
'max_iter': 1200000,
}
SUBPIXEL_SCHEDULE = {
'step_values': [8000, 11000, 14000, 17000],
#'learning_rates': [0.0001, 0.00005, 0.000025, 0.0000125, 0.00000625], # (1) jy
'learning_rates': [0.00005, 0.000025, 0.0000125, 0.00000625, 0.000003125], # (2) jwlim
'momentum': 0.9,
'momentum2': 0.999,
'weight_decay': 0.0004,
'max_iter': 20000,
}
FINETUNE_SCHEDULE = {
# TODO: Finetune schedule
}
|
[
"aiden.limo@outlook.com"
] |
aiden.limo@outlook.com
|
f35e5e02de4e8499e7ef24adcf93184ea3aabb86
|
6a2ee082bdc2cda591c1d2f96114d89843687bc5
|
/portfolio/blog/urls.py
|
78de6ee3e63da2274bdb8c76e54081ad5794468e
|
[] |
no_license
|
iamrames/portfolio
|
f1e89b0f71af8fe2a0f6d3608b21b077ff036b91
|
fd2a839178ea4055a3c012bde3cdbf48080c42ff
|
refs/heads/master
| 2021-04-09T14:49:33.420541
| 2018-03-18T05:06:29
| 2018-03-18T05:06:29
| 125,694,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
from . import views
from django.urls import path
urlpatterns = [
path("",views.index.as_view(),name='index'),
path("about/",views.about.as_view(),name="about"),
path("contact/",views.form_name_view,name="contact"),
path("portfolio/",views.portfolio.as_view(),name="portfolio"),
path("photos/",views.photos.as_view(),name="photos"),
path("rames/",views.rames.as_view(),name="rames"),
]
|
[
"addictedtomig@gmail.com"
] |
addictedtomig@gmail.com
|
f6eff06c09abb93aca4a5c682634404aa8f93f25
|
99b36e8d9d5996d134c82b42b2f89a456b713fb5
|
/day3/day3Code.py
|
93df1be9692769de6c184c66c083cfd714ecd67c
|
[] |
no_license
|
brendanbikes/adventOfCode2020
|
2087d3251a3b38d6aa7e686239784f8df2594579
|
47d56075a7ea3a50d168f3ccd67270e441ed85aa
|
refs/heads/main
| 2023-02-10T13:22:06.021238
| 2020-12-27T21:55:50
| 2020-12-27T21:55:50
| 319,201,167
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 831
|
py
|
import numpy as np
import sys
def readInput():
with open('day3input.txt', 'r') as f:
grid = f.read().splitlines()
#extend the grid to the right a bunch
newGrid=[]
for row in grid:
row = row*1000
newGrid.append(row)
return newGrid
def process():
grid = readInput()
slopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]
rows = len(grid)
columns = len(grid[0])
treeCountProduct=1
for pair in slopes:
slope_x, slope_y = pair
i_x = 0
i_y = 0
treeCount = 0
while i_x <= columns-1 and i_y <= rows-1:
#detect tree
if grid[i_y][i_x] == '#':
treeCount+=1
#increment position
i_x+=slope_x
i_y+=slope_y
print('Found {} trees.'.format(treeCount))
treeCountProduct*=treeCount
print('This is the product of all tree counts: {}'.format(treeCountProduct))
if __name__ == "__main__":
process()
|
[
"brendanmurphy@orolo-3.local"
] |
brendanmurphy@orolo-3.local
|
76d1e00dcaaafe6d64f477cafa4e096279d9856c
|
5c724d6e03e4194680c793718a4f72a58ca66bb1
|
/app/migrations/0094_auto_20180928_1207.py
|
ac9b198eca9891502fecb050653a4d47fb3fcb0a
|
[] |
no_license
|
tigrezhito1/bat
|
26002de4540bb4eac2751a31171adc45687f4293
|
0ea6b9b85e130a201c21eb6cbf09bc21988d6443
|
refs/heads/master
| 2020-05-02T07:13:06.936015
| 2019-03-26T15:04:17
| 2019-03-26T15:04:17
| 177,812,144
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-09-28 17:07
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0093_auto_20180925_1800'),
]
operations = [
migrations.AlterField(
model_name='produccion',
name='fecha',
field=models.DateTimeField(default=datetime.datetime(2018, 9, 28, 12, 7, 16, 43141), editable=False, help_text='Fecha de recepci\xf3n de la llamada (No se puede modificar)'),
),
migrations.AlterField(
model_name='produccion',
name='hora_instalacion',
field=models.TimeField(blank=True, default=datetime.datetime(2018, 9, 28, 12, 7, 16, 45305), editable=False, max_length=1000, null=True),
),
]
|
[
"you@example.com"
] |
you@example.com
|
cb5666570d2b3412233d972cc57a2e6f65ace92d
|
ce72637209547fe47301f03cc5cb11c6f614c095
|
/Custom/events/Zabbix/API/httpretty/core.py
|
3317543778464170d02e5f9bbd082d5864d13272
|
[
"Apache-2.0"
] |
permissive
|
ThinkboxSoftware/Deadline
|
29d1168efeb3c9a2f26a05d725eb63746d46499e
|
bfc97123e259f5e8392d3d45101c52ac79a07609
|
refs/heads/master
| 2023-08-28T01:05:24.365336
| 2023-08-05T20:31:06
| 2023-08-05T20:31:06
| 4,706,359
| 136
| 50
|
Apache-2.0
| 2023-08-05T20:31:07
| 2012-06-18T21:17:32
|
Python
|
UTF-8
|
Python
| false
| false
| 34,264
|
py
|
# #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <HTTPretty - HTTP client mock for Python>
# Copyright (C) <2011-2013> Gabriel Falcão <gabriel@nacaolivre.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import re
import codecs
import inspect
import socket
import functools
import itertools
import warnings
import logging
import traceback
import json
import contextlib
from .compat import (
PY3,
StringIO,
text_type,
BaseClass,
BaseHTTPRequestHandler,
quote,
quote_plus,
urlunsplit,
urlsplit,
parse_qs,
unquote,
unquote_utf8,
ClassTypes,
basestring
)
from .http import (
STATUSES,
HttpBaseClass,
parse_requestline,
last_requestline,
)
from .utils import (
utf8,
decode_utf8,
)
from .errors import HTTPrettyError
from datetime import datetime
from datetime import timedelta
from errno import EAGAIN
old_socket = socket.socket
old_create_connection = socket.create_connection
old_gethostbyname = socket.gethostbyname
old_gethostname = socket.gethostname
old_getaddrinfo = socket.getaddrinfo
old_socksocket = None
old_ssl_wrap_socket = None
old_sslwrap_simple = None
old_sslsocket = None
if PY3: # pragma: no cover
basestring = (bytes, str)
try: # pragma: no cover
import socks
old_socksocket = socks.socksocket
except ImportError:
socks = None
try: # pragma: no cover
import ssl
old_ssl_wrap_socket = ssl.wrap_socket
if not PY3:
old_sslwrap_simple = ssl.sslwrap_simple
old_sslsocket = ssl.SSLSocket
except ImportError: # pragma: no cover
ssl = None
DEFAULT_HTTP_PORTS = frozenset([80])
POTENTIAL_HTTP_PORTS = set(DEFAULT_HTTP_PORTS)
DEFAULT_HTTPS_PORTS = frozenset([443])
POTENTIAL_HTTPS_PORTS = set(DEFAULT_HTTPS_PORTS)
class HTTPrettyRequest(BaseHTTPRequestHandler, BaseClass):
"""Represents a HTTP request. It takes a valid multi-line, `\r\n`
separated string with HTTP headers and parse them out using the
internal `parse_request` method.
It also replaces the `rfile` and `wfile` attributes with StringIO
instances so that we garantee that it won't make any I/O, neighter
for writing nor reading.
It has some convenience attributes:
`headers` -> a mimetype object that can be cast into a dictionary,
contains all the request headers
`method` -> the HTTP method used in this request
`querystring` -> a dictionary containing lists with the
attributes. Please notice that if you need a single value from a
query string you will need to get it manually like:
```python
>>> request.querystring
{'name': ['Gabriel Falcao']}
>>> print request.querystring['name'][0]
```
`parsed_body` -> a dictionary containing parsed request body or
None if HTTPrettyRequest doesn't know how to parse it. It
currently supports parsing body data that was sent under the
`content-type` headers values: 'application/json' or
'application/x-www-form-urlencoded'
"""
def __init__(self, headers, body=''):
# first of all, lets make sure that if headers or body are
# unicode strings, it must be converted into a utf-8 encoded
# byte string
self.raw_headers = utf8(headers.strip())
self.body = utf8(body)
# Now let's concatenate the headers with the body, and create
# `rfile` based on it
self.rfile = StringIO(b'\r\n\r\n'.join([self.raw_headers, self.body]))
self.wfile = StringIO() # Creating `wfile` as an empty
# StringIO, just to avoid any real
# I/O calls
# parsing the request line preemptively
self.raw_requestline = self.rfile.readline()
# initiating the error attributes with None
self.error_code = None
self.error_message = None
# Parse the request based on the attributes above
self.parse_request()
# making the HTTP method string available as the command
self.method = self.command
# Now 2 convenient attributes for the HTTPretty API:
# `querystring` holds a dictionary with the parsed query string
try:
self.path = self.path.encode('iso-8859-1')
except UnicodeDecodeError:
pass
self.path = decode_utf8(self.path)
qstring = self.path.split("?", 1)[-1]
self.querystring = self.parse_querystring(qstring)
# And the body will be attempted to be parsed as
# `application/json` or `application/x-www-form-urlencoded`
self.parsed_body = self.parse_request_body(self.body)
def __str__(self):
return '<HTTPrettyRequest("{0}", total_headers={1}, body_length={2})>'.format(
self.headers.get('content-type', ''),
len(self.headers),
len(self.body),
)
def parse_querystring(self, qs):
expanded = unquote_utf8(qs)
parsed = parse_qs(expanded)
result = {}
for k in parsed:
result[k] = list(map(decode_utf8, parsed[k]))
return result
def parse_request_body(self, body):
""" Attempt to parse the post based on the content-type passed. Return the regular body if not """
PARSING_FUNCTIONS = {
'application/json': json.loads,
'text/json': json.loads,
'application/x-www-form-urlencoded': self.parse_querystring,
}
FALLBACK_FUNCTION = lambda x: x
content_type = self.headers.get('content-type', '')
do_parse = PARSING_FUNCTIONS.get(content_type, FALLBACK_FUNCTION)
try:
body = decode_utf8(body)
return do_parse(body)
except:
return body
class EmptyRequestHeaders(dict):
pass
class HTTPrettyRequestEmpty(object):
body = ''
headers = EmptyRequestHeaders()
class FakeSockFile(StringIO):
pass
class FakeSSLSocket(object):
def __init__(self, sock, *args, **kw):
self._httpretty_sock = sock
def __getattr__(self, attr):
return getattr(self._httpretty_sock, attr)
class fakesock(object):
class socket(object):
_entry = None
debuglevel = 0
_sent_data = []
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM,
protocol=0):
self.setsockopt(family, type, protocol)
self.truesock = old_socket(family, type, protocol)
self._closed = True
self.fd = FakeSockFile()
self.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
self._sock = self
self.is_http = False
self._bufsize = 16
def getpeercert(self, *a, **kw):
now = datetime.now()
shift = now + timedelta(days=30 * 12)
return {
'notAfter': shift.strftime('%b %d %H:%M:%S GMT'),
'subjectAltName': (
('DNS', '*%s' % self._host),
('DNS', self._host),
('DNS', '*'),
),
'subject': (
(
('organizationName', '*.%s' % self._host),
),
(
('organizationalUnitName',
'Domain Control Validated'),
),
(
('commonName', '*.%s' % self._host),
),
),
}
def ssl(self, sock, *args, **kw):
return sock
def setsockopt(self, family, type, protocol):
self.family = family
self.protocol = protocol
self.type = type
def connect(self, address):
self._address = (self._host, self._port) = address
self._closed = False
self.is_http = self._port in POTENTIAL_HTTP_PORTS | POTENTIAL_HTTPS_PORTS
if not self.is_http:
self.truesock.connect(self._address)
def close(self):
if not (self.is_http and self._closed):
self.truesock.close()
self._closed = True
def makefile(self, mode='r', bufsize=-1):
"""Returns this fake socket's own StringIO buffer.
If there is an entry associated with the socket, the file
descriptor gets filled in with the entry data before being
returned.
"""
self._mode = mode
self._bufsize = bufsize
if self._entry:
self._entry.fill_filekind(self.fd)
return self.fd
def real_sendall(self, data, *args, **kw):
"""Sends data to the remote server. This method is called
when HTTPretty identifies that someone is trying to send
non-http data.
The received bytes are written in this socket's StringIO
buffer so that HTTPretty can return it accordingly when
necessary.
"""
if self.is_http: # no need to connect if `self.is_http` is
# False because self.connect already did
# that
self.truesock.connect(self._address)
self.truesock.settimeout(0)
self.truesock.sendall(data, *args, **kw)
should_continue = True
while should_continue:
try:
received = self.truesock.recv(self._bufsize)
self.fd.write(received)
should_continue = len(received) > 0
except socket.error as e:
if e.errno == EAGAIN:
continue
break
self.fd.seek(0)
def sendall(self, data, *args, **kw):
self._sent_data.append(data)
try:
requestline, _ = data.split(b'\r\n', 1)
method, path, version = parse_requestline(decode_utf8(requestline))
is_parsing_headers = True
except ValueError:
is_parsing_headers = False
if not self._entry:
# If the previous request wasn't mocked, don't mock the subsequent sending of data
return self.real_sendall(data, *args, **kw)
self.fd.seek(0)
if not is_parsing_headers:
if len(self._sent_data) > 1:
headers = utf8(last_requestline(self._sent_data))
meta = self._entry.request.headers
body = utf8(self._sent_data[-1])
if meta.get('transfer-encoding', '') == 'chunked':
if not body.isdigit() and body != b'\r\n' and body != b'0\r\n\r\n':
self._entry.request.body += body
else:
self._entry.request.body += body
httpretty.historify_request(headers, body, False)
return
# path might come with
s = urlsplit(path)
POTENTIAL_HTTP_PORTS.add(int(s.port or 80))
headers, body = list(map(utf8, data.split(b'\r\n\r\n', 1)))
request = httpretty.historify_request(headers, body)
info = URIInfo(hostname=self._host, port=self._port,
path=s.path,
query=s.query,
last_request=request)
matcher, entries = httpretty.match_uriinfo(info)
if not entries:
self._entry = None
self.real_sendall(data)
return
self._entry = matcher.get_next_entry(method, info, request)
def debug(self, func, *a, **kw):
if self.is_http:
frame = inspect.stack()[0][0]
lines = list(map(utf8, traceback.format_stack(frame)))
message = [
"HTTPretty intercepted and unexpected socket method call.",
("Please open an issue at "
"'https://github.com/gabrielfalcao/HTTPretty/issues'"),
"And paste the following traceback:\n",
"".join(decode_utf8(lines)),
]
raise RuntimeError("\n".join(message))
return func(*a, **kw)
def settimeout(self, new_timeout):
self.timeout = new_timeout
def send(self, *args, **kwargs):
return self.debug(self.truesock.send, *args, **kwargs)
def sendto(self, *args, **kwargs):
return self.debug(self.truesock.sendto, *args, **kwargs)
def recvfrom_into(self, *args, **kwargs):
return self.debug(self.truesock.recvfrom_into, *args, **kwargs)
def recv_into(self, *args, **kwargs):
return self.debug(self.truesock.recv_into, *args, **kwargs)
def recvfrom(self, *args, **kwargs):
return self.debug(self.truesock.recvfrom, *args, **kwargs)
def recv(self, *args, **kwargs):
return self.debug(self.truesock.recv, *args, **kwargs)
def __getattr__(self, name):
return getattr(self.truesock, name)
def fake_wrap_socket(s, *args, **kw):
return s
def create_fake_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
s = fakesock.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
s.settimeout(timeout)
if source_address:
s.bind(source_address)
s.connect(address)
return s
def fake_gethostbyname(host):
return '127.0.0.1'
def fake_gethostname():
return 'localhost'
def fake_getaddrinfo(
host, port, family=None, socktype=None, proto=None, flags=None):
return [(2, 1, 6, '', (host, port))]
class Entry(BaseClass):
def __init__(self, method, uri, body,
adding_headers=None,
forcing_headers=None,
status=200,
streaming=False,
**headers):
self.method = method
self.uri = uri
self.info = None
self.request = None
self.body_is_callable = False
if hasattr(body, "__call__"):
self.callable_body = body
self.body = None
self.body_is_callable = True
elif isinstance(body, text_type):
self.body = utf8(body)
else:
self.body = body
self.streaming = streaming
if not streaming and not self.body_is_callable:
self.body_length = len(self.body or '')
else:
self.body_length = 0
self.adding_headers = adding_headers or {}
self.forcing_headers = forcing_headers or {}
self.status = int(status)
for k, v in headers.items():
name = "-".join(k.split("_")).title()
self.adding_headers[name] = v
self.validate()
def validate(self):
content_length_keys = 'Content-Length', 'content-length'
for key in content_length_keys:
got = self.adding_headers.get(
key, self.forcing_headers.get(key, None))
if got is None:
continue
try:
igot = int(got)
except ValueError:
warnings.warn(
'HTTPretty got to register the Content-Length header ' \
'with "%r" which is not a number' % got,
)
if igot > self.body_length:
raise HTTPrettyError(
'HTTPretty got inconsistent parameters. The header ' \
'Content-Length you registered expects size "%d" but ' \
'the body you registered for that has actually length ' \
'"%d".' % (
igot, self.body_length,
)
)
def __str__(self):
return r'<Entry %s %s getting %d>' % (
self.method, self.uri, self.status)
def normalize_headers(self, headers):
new = {}
for k in headers:
new_k = '-'.join([s.lower() for s in k.split('-')])
new[new_k] = headers[k]
return new
def fill_filekind(self, fk):
now = datetime.utcnow()
headers = {
'status': self.status,
'date': now.strftime('%a, %d %b %Y %H:%M:%S GMT'),
'server': 'Python/HTTPretty',
'connection': 'close',
}
if self.forcing_headers:
headers = self.forcing_headers
if self.adding_headers:
headers.update(self.normalize_headers(self.adding_headers))
headers = self.normalize_headers(headers)
status = headers.get('status', self.status)
if self.body_is_callable:
status, headers, self.body = self.callable_body(self.request, self.info.full_url(), headers)
headers.update({
'content-length': len(self.body)
})
string_list = [
'HTTP/1.1 %d %s' % (status, STATUSES[status]),
]
if 'date' in headers:
string_list.append('date: %s' % headers.pop('date'))
if not self.forcing_headers:
content_type = headers.pop('content-type',
'text/plain; charset=utf-8')
content_length = headers.pop('content-length', self.body_length)
string_list.append('content-type: %s' % content_type)
if not self.streaming:
string_list.append('content-length: %s' % content_length)
string_list.append('server: %s' % headers.pop('server'))
for k, v in headers.items():
string_list.append(
'{0}: {1}'.format(k, v),
)
for item in string_list:
fk.write(utf8(item) + b'\n')
fk.write(b'\r\n')
if self.streaming:
self.body, body = itertools.tee(self.body)
for chunk in body:
fk.write(utf8(chunk))
else:
fk.write(utf8(self.body))
fk.seek(0)
def url_fix(s, charset='utf-8'):
scheme, netloc, path, querystring, fragment = urlsplit(s)
path = quote(path, b'/%')
querystring = quote_plus(querystring, b':&=')
return urlunsplit((scheme, netloc, path, querystring, fragment))
class URIInfo(BaseClass):
def __init__(self,
username='',
password='',
hostname='',
port=80,
path='/',
query='',
fragment='',
scheme='',
last_request=None):
self.username = username or ''
self.password = password or ''
self.hostname = hostname or ''
if port:
port = int(port)
elif scheme == 'https':
port = 443
self.port = port or 80
self.path = path or ''
self.query = query or ''
if scheme:
self.scheme = scheme
elif self.port in POTENTIAL_HTTPS_PORTS:
self.scheme = 'https'
else:
self.scheme = 'http'
self.fragment = fragment or ''
self.last_request = last_request
def __str__(self):
attrs = (
'username',
'password',
'hostname',
'port',
'path',
)
fmt = ", ".join(['%s="%s"' % (k, getattr(self, k, '')) for k in attrs])
return r'<httpretty.URIInfo(%s)>' % fmt
def __hash__(self):
return hash(text_type(self))
def __eq__(self, other):
self_tuple = (
self.port,
decode_utf8(self.hostname.lower()),
url_fix(decode_utf8(self.path)),
)
other_tuple = (
other.port,
decode_utf8(other.hostname.lower()),
url_fix(decode_utf8(other.path)),
)
return self_tuple == other_tuple
def full_url(self, use_querystring=True):
credentials = ""
if self.password:
credentials = "{0}:{1}@".format(
self.username, self.password)
query = ""
if use_querystring and self.query:
query = "?{0}".format(decode_utf8(self.query))
result = "{scheme}://{credentials}{domain}{path}{query}".format(
scheme=self.scheme,
credentials=credentials,
domain=self.get_full_domain(),
path=decode_utf8(self.path),
query=query
)
return result
def get_full_domain(self):
hostname = decode_utf8(self.hostname)
# Port 80/443 should not be appended to the url
if self.port not in DEFAULT_HTTP_PORTS | DEFAULT_HTTPS_PORTS:
return ":".join([hostname, str(self.port)])
return hostname
@classmethod
def from_uri(cls, uri, entry):
result = urlsplit(uri)
if result.scheme == 'https':
POTENTIAL_HTTPS_PORTS.add(int(result.port or 443))
else:
POTENTIAL_HTTP_PORTS.add(int(result.port or 80))
return cls(result.username,
result.password,
result.hostname,
result.port,
result.path,
result.query,
result.fragment,
result.scheme,
entry)
class URIMatcher(object):
regex = None
info = None
def __init__(self, uri, entries, match_querystring=False):
self._match_querystring = match_querystring
if type(uri).__name__ == 'SRE_Pattern':
self.regex = uri
result = urlsplit(uri.pattern)
if result.scheme == 'https':
POTENTIAL_HTTPS_PORTS.add(int(result.port or 443))
else:
POTENTIAL_HTTP_PORTS.add(int(result.port or 80))
else:
self.info = URIInfo.from_uri(uri, entries)
self.entries = entries
#hash of current_entry pointers, per method.
self.current_entries = {}
def matches(self, info):
if self.info:
return self.info == info
else:
return self.regex.search(info.full_url(
use_querystring=self._match_querystring))
def __str__(self):
wrap = 'URLMatcher({0})'
if self.info:
return wrap.format(text_type(self.info))
else:
return wrap.format(self.regex.pattern)
def get_next_entry(self, method, info, request):
"""Cycle through available responses, but only once.
Any subsequent requests will receive the last response"""
if method not in self.current_entries:
self.current_entries[method] = 0
#restrict selection to entries that match the requested method
entries_for_method = [e for e in self.entries if e.method == method]
if self.current_entries[method] >= len(entries_for_method):
self.current_entries[method] = -1
if not self.entries or not entries_for_method:
raise ValueError('I have no entries for method %s: %s'
% (method, self))
entry = entries_for_method[self.current_entries[method]]
if self.current_entries[method] != -1:
self.current_entries[method] += 1
# Attach more info to the entry
# So the callback can be more clever about what to do
# This does also fix the case where the callback
# would be handed a compiled regex as uri instead of the
# real uri
entry.info = info
entry.request = request
return entry
def __hash__(self):
return hash(text_type(self))
def __eq__(self, other):
return text_type(self) == text_type(other)
class httpretty(HttpBaseClass):
"""The URI registration class"""
_entries = {}
latest_requests = []
last_request = HTTPrettyRequestEmpty()
_is_enabled = False
@classmethod
def match_uriinfo(cls, info):
for matcher, value in cls._entries.items():
if matcher.matches(info):
return (matcher, info)
return (None, [])
@classmethod
@contextlib.contextmanager
def record(cls, filename, indentation=4, encoding='utf-8'):
try:
import urllib3
except ImportError:
raise RuntimeError('HTTPretty requires urllib3 installed for recording actual requests.')
http = urllib3.PoolManager()
cls.enable()
calls = []
def record_request(request, uri, headers):
cls.disable()
response = http.request(request.method, uri)
calls.append({
'request': {
'uri': uri,
'method': request.method,
'headers': dict(request.headers),
'body': decode_utf8(request.body),
'querystring': request.querystring
},
'response': {
'status': response.status,
'body': decode_utf8(response.data),
'headers': dict(response.headers)
}
})
cls.enable()
return response.status, response.headers, response.data
for method in cls.METHODS:
cls.register_uri(method, re.compile(r'.*', re.M), body=record_request)
yield
cls.disable()
with codecs.open(filename, 'w', encoding) as f:
f.write(json.dumps(calls, indent=indentation))
@classmethod
@contextlib.contextmanager
def playback(cls, origin):
cls.enable()
data = json.loads(open(origin).read())
for item in data:
uri = item['request']['uri']
method = item['request']['method']
cls.register_uri(method, uri, body=item['response']['body'], forcing_headers=item['response']['headers'])
yield
cls.disable()
@classmethod
def reset(cls):
POTENTIAL_HTTP_PORTS.intersection_update(DEFAULT_HTTP_PORTS)
POTENTIAL_HTTPS_PORTS.intersection_update(DEFAULT_HTTPS_PORTS)
cls._entries.clear()
cls.latest_requests = []
cls.last_request = HTTPrettyRequestEmpty()
@classmethod
def historify_request(cls, headers, body='', append=True):
request = HTTPrettyRequest(headers, body)
cls.last_request = request
if append or not cls.latest_requests:
cls.latest_requests.append(request)
else:
cls.latest_requests[-1] = request
return request
@classmethod
def register_uri(cls, method, uri, body='HTTPretty :)',
adding_headers=None,
forcing_headers=None,
status=200,
responses=None, match_querystring=False,
**headers):
uri_is_string = isinstance(uri, basestring)
if uri_is_string and re.search(r'^\w+://[^/]+[.]\w{2,}$', uri):
uri += '/'
if isinstance(responses, list) and len(responses) > 0:
for response in responses:
response.uri = uri
response.method = method
entries_for_this_uri = responses
else:
headers[str('body')] = body
headers[str('adding_headers')] = adding_headers
headers[str('forcing_headers')] = forcing_headers
headers[str('status')] = status
entries_for_this_uri = [
cls.Response(method=method, uri=uri, **headers),
]
matcher = URIMatcher(uri, entries_for_this_uri,
match_querystring)
if matcher in cls._entries:
matcher.entries.extend(cls._entries[matcher])
del cls._entries[matcher]
cls._entries[matcher] = entries_for_this_uri
def __str__(self):
return '<HTTPretty with %d URI entries>' % len(self._entries)
@classmethod
def Response(cls, body, method=None, uri=None, adding_headers=None, forcing_headers=None,
status=200, streaming=False, **headers):
headers[str('body')] = body
headers[str('adding_headers')] = adding_headers
headers[str('forcing_headers')] = forcing_headers
headers[str('status')] = int(status)
headers[str('streaming')] = streaming
return Entry(method, uri, **headers)
@classmethod
def disable(cls):
cls._is_enabled = False
socket.socket = old_socket
socket.SocketType = old_socket
socket._socketobject = old_socket
socket.create_connection = old_create_connection
socket.gethostname = old_gethostname
socket.gethostbyname = old_gethostbyname
socket.getaddrinfo = old_getaddrinfo
socket.__dict__['socket'] = old_socket
socket.__dict__['_socketobject'] = old_socket
socket.__dict__['SocketType'] = old_socket
socket.__dict__['create_connection'] = old_create_connection
socket.__dict__['gethostname'] = old_gethostname
socket.__dict__['gethostbyname'] = old_gethostbyname
socket.__dict__['getaddrinfo'] = old_getaddrinfo
if socks:
socks.socksocket = old_socksocket
socks.__dict__['socksocket'] = old_socksocket
if ssl:
ssl.wrap_socket = old_ssl_wrap_socket
ssl.SSLSocket = old_sslsocket
ssl.__dict__['wrap_socket'] = old_ssl_wrap_socket
ssl.__dict__['SSLSocket'] = old_sslsocket
if not PY3:
ssl.sslwrap_simple = old_sslwrap_simple
ssl.__dict__['sslwrap_simple'] = old_sslwrap_simple
@classmethod
def is_enabled(cls):
return cls._is_enabled
@classmethod
def enable(cls):
cls._is_enabled = True
socket.socket = fakesock.socket
socket._socketobject = fakesock.socket
socket.SocketType = fakesock.socket
socket.create_connection = create_fake_connection
socket.gethostname = fake_gethostname
socket.gethostbyname = fake_gethostbyname
socket.getaddrinfo = fake_getaddrinfo
socket.__dict__['socket'] = fakesock.socket
socket.__dict__['_socketobject'] = fakesock.socket
socket.__dict__['SocketType'] = fakesock.socket
socket.__dict__['create_connection'] = create_fake_connection
socket.__dict__['gethostname'] = fake_gethostname
socket.__dict__['gethostbyname'] = fake_gethostbyname
socket.__dict__['getaddrinfo'] = fake_getaddrinfo
if socks:
socks.socksocket = fakesock.socket
socks.__dict__['socksocket'] = fakesock.socket
if ssl:
ssl.wrap_socket = fake_wrap_socket
ssl.SSLSocket = FakeSSLSocket
ssl.__dict__['wrap_socket'] = fake_wrap_socket
ssl.__dict__['SSLSocket'] = FakeSSLSocket
if not PY3:
ssl.sslwrap_simple = fake_wrap_socket
ssl.__dict__['sslwrap_simple'] = fake_wrap_socket
def httprettified(test):
"A decorator tests that use HTTPretty"
def decorate_class(klass):
for attr in dir(klass):
if not attr.startswith('test_'):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
setattr(klass, attr, decorate_callable(attr_value))
return klass
def decorate_callable(test):
@functools.wraps(test)
def wrapper(*args, **kw):
httpretty.reset()
httpretty.enable()
try:
return test(*args, **kw)
finally:
httpretty.disable()
return wrapper
if isinstance(test, ClassTypes):
return decorate_class(test)
return decorate_callable(test)
|
[
"jamescoulter@thinkboxsoftware.com"
] |
jamescoulter@thinkboxsoftware.com
|
dd99d186c0a5d2a69b445acf76a84a4e543f5ee3
|
8a0d2d3d387ff97e24d6d10dd4d8575f834ddba6
|
/practice050718.py
|
0563a41878bd1f64c775fb4000575cba09427644
|
[] |
no_license
|
simonnunn/python
|
9265f3b6be509cdb2356e7bb884dfbef80c6eabe
|
b6294bbf1fa25a4d23f639ef5a16425860c60aee
|
refs/heads/master
| 2020-03-22T11:27:53.170317
| 2018-07-06T10:53:14
| 2018-07-06T10:53:14
| 139,972,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
file = open("teams.txt","r")
(file.readline())
(file.readline())
print("Second and Third Character from the third team:")
(file.read(0))
(file.read(1))
print(file.read(1))
print(file.read(1))
file.seek(0)
print("Rest of file:")
print(file.read())
file.close()
|
[
"noreply@github.com"
] |
simonnunn.noreply@github.com
|
7648b67635a5b5bf0cef50f3b8042857a0caa9d2
|
6df9a960c0a4e2049b5932938a83ee82d4516412
|
/creating-project/application/table/migrations/0005_filepath_folder_name.py
|
2106a336466bdd5fec83c6c7831cffbc37e71568
|
[] |
no_license
|
alekseykonotop/dj_hw
|
9585f0d42ec95d31f5eeae09b953e5f195bc9ee7
|
6752361d007d777127eb77445d45da58332e0223
|
refs/heads/master
| 2021-07-19T06:30:04.333018
| 2019-09-21T18:12:38
| 2019-09-21T18:12:38
| 177,439,677
| 0
| 0
| null | 2020-06-05T22:56:52
| 2019-03-24T16:24:46
|
Python
|
UTF-8
|
Python
| false
| false
| 458
|
py
|
# Generated by Django 2.2 on 2019-09-11 17:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('table', '0004_auto_20190911_1412'),
]
operations = [
migrations.AddField(
model_name='filepath',
name='folder_name',
field=models.CharField(default='', max_length=100, verbose_name='Имя папки для хранения'),
),
]
|
[
"alekseykonotop@gmail.com"
] |
alekseykonotop@gmail.com
|
7697a6995f82fc580f90a49854e81fad1c3504cb
|
00f9d3664d012cd7964bd0993bafe624cfe75d80
|
/Using LSTM/model.py
|
5c067ab49182c40c33cc83cd674c2dd3a57cd620
|
[] |
no_license
|
Aakash12980/Disaster-Prediction-
|
6fef85afb7b7cd0d553b90cf396386c9c8763204
|
b397a72faff1bfb9048ef38989e87e66e1c9a019
|
refs/heads/master
| 2023-01-08T16:28:49.294465
| 2020-10-30T17:17:15
| 2020-10-30T17:17:15
| 296,061,088
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
import torch
import torch.nn as nn
import sys
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from model_embeddings import ModelEmbeddings
from typing import List, Tuple, Dict, Set, Union
class ClassifyModel(nn.Module):
def __init__(self, embed_size, hidden_size, output_size, n_layers, vocab, device, drop_out=0.2):
super(ClassifyModel, self).__init__()
self.model_embed = ModelEmbeddings(embed_size, vocab)
self.hidden_size = hidden_size
self.output_size = output_size
self.drop_out = drop_out
self.n_layers = n_layers
self.vocab = vocab
self.device = device
self.encoder = nn.LSTM(input_size=embed_size, hidden_size=self.hidden_size, num_layers=self.n_layers, bias=True, bidirectional=False, dropout=self.drop_out)
self.fully_connected_layer = nn.Linear(in_features=self.hidden_size, out_features=self.output_size, bias=True)
self.sigmoid_layer = nn.Sigmoid()
def forward(self, x_data, hidden):
x_len = [len(i) for i in x_data]
x_padded = self.vocab.x.to_input_tensor(x_data, device=self.device)
embed_mat = self.model_embed.embed_x(x_padded)
seq_padded = pack_padded_sequence(embed_mat, x_len)
enc_hiddens, (last_hidden, last_cell) = self.encoder(seq_padded)
enc_hiddens = pad_packed_sequence(sequence=enc_hiddens)[0].permute(1,0,2)
# concat_layers = torch.cat((last_hidden[-2, :, :], last_hidden[-1, :, :]), 1)
fully_connect_output = self.fully_connected_layer(last_hidden[0])
return self.sigmoid_layer(fully_connect_output)
|
[
"Bhandariaakash9@gmail.com"
] |
Bhandariaakash9@gmail.com
|
d9ce7335aef2bd731017faf59e41c3b883a0d6f9
|
6ae93145e035a063cb8cd87d663f35f53d3bfe9e
|
/src/bot.py
|
27e1b964d6ddd19483932506938a102412f84d30
|
[
"MIT"
] |
permissive
|
Eduardo3445/flappyAI
|
533672366031e7ad4644707c58367ad5a16bb94d
|
0d1e0ec8195f095d7f7e8f5010a13599328654ff
|
refs/heads/master
| 2020-12-22T17:53:14.534227
| 2019-04-06T02:34:52
| 2019-04-06T02:34:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,023
|
py
|
import json
class Bot(object):
"""
The Bot class that applies the Qlearning logic to Flappy bird game
After every iteration (iteration = 1 game that ends with the bird dying) updates Q values
After every DUMPING_N iterations, dumps the Q values to the local JSON file
"""
def __init__(self):
self.gameCNT = 0 # Game count of current run, incremented after every death
self.DUMPING_N = 25 # Number of iterations to dump Q values to JSON after
self.discount = 1.0
self.r = {0: 1, 1: -1000} # Reward function
self.lr = 0.7
self.load_qvalues()
self.last_state = "420_240_0"
self.last_action = 0
self.moves = []
def load_qvalues(self):
"""
Load q values from a JSON file
"""
self.qvalues = {}
try:
fil = open("qvalues.json", "r")
except IOError:
return
self.qvalues = json.load(fil)
fil.close()
def act(self, xdif, ydif, vel):
"""
Chooses the best action with respect to the current state - Chooses 0 (don't flap) to tie-break
"""
state = self.map_state(xdif, ydif, vel)
self.moves.append(
(self.last_state, self.last_action, state)
) # Add the experience to the history
self.last_state = state # Update the last_state with the current state
if self.qvalues[state][0] >= self.qvalues[state][1]:
self.last_action = 0
return 0
else:
self.last_action = 1
return 1
def update_scores(self, dump_qvalues = True):
"""
Update qvalues via iterating over experiences
"""
history = list(reversed(self.moves))
# Flag if the bird died in the top pipe
high_death_flag = True if int(history[0][2].split("_")[1]) > 120 else False
# Q-learning score updates
t = 1
for exp in history:
state = exp[0]
act = exp[1]
res_state = exp[2]
# Select reward
if t == 1 or t == 2:
cur_reward = self.r[1]
elif high_death_flag and act:
cur_reward = self.r[1]
high_death_flag = False
else:
cur_reward = self.r[0]
# Update
# self.qvalues[state][act] = (1-self.lr) * (self.qvalues[state][act]) + \
# self.lr * ( cur_reward + self.discount*max(self.qvalues[res_state]) )
# SARSA
self.qvalues[state][act] = self.qvalues[state][act] + \
self.lr * (cur_reward + self.discount * max(self.qvalues[res_state]) - self.qvalues[state][act])
t += 1
self.gameCNT += 1 # increase game count
if dump_qvalues:
self.dump_qvalues() # Dump q values (if game count % DUMPING_N == 0)
self.moves = [] # clear history after updating strategies
def map_state(self, xdif, ydif, vel):
"""
Map the (xdif, ydif, vel) to the respective state, with regards to the grids
The state is a string, "xdif_ydif_vel"
X -> [-40,-30...120] U [140, 210 ... 420]
Y -> [-300, -290 ... 160] U [180, 240 ... 420]
"""
if xdif < 140:
xdif = int(xdif) - (int(xdif) % 10)
else:
xdif = int(xdif) - (int(xdif) % 70)
if ydif < 180:
ydif = int(ydif) - (int(ydif) % 10)
else:
ydif = int(ydif) - (int(ydif) % 60)
return str(int(xdif)) + "_" + str(int(ydif)) + "_" + str(vel)
def dump_qvalues(self, force = False):
"""
Dump the qvalues to the JSON file
"""
if self.gameCNT % self.DUMPING_N == 0 or force:
fil = open("qvalues.json", "w")
json.dump(self.qvalues, fil)
fil.close()
print("Q-values updated on local file.")
|
[
"guilhermenazb@gmail.com"
] |
guilhermenazb@gmail.com
|
e802e9dfae9b27c85a6898fd055e57234915dfb5
|
e01fb71c991e57504fa745d0a29b4a84033db438
|
/collectiveintelligence_book_master/feedfilter.py
|
6167fcf2df278aaaf041ecc8d8e836fcc4bc69dd
|
[] |
no_license
|
NandaCj/Machine_Learning1
|
6707f36d71e26dcdca03fc11da27f724e21f265e
|
fc2255f6932d8fd7a0ec002e6885e5a45fd04fe5
|
refs/heads/master
| 2021-06-20T01:32:17.249662
| 2019-05-30T06:42:13
| 2019-05-30T06:42:13
| 134,279,997
| 2
| 0
| null | 2019-05-30T06:42:14
| 2018-05-21T14:25:13
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,757
|
py
|
import feedparser
import re
def interestingwords(s):
splitter = re.compile(r'\W*')
return [s.lower() for s in splitter.split(s) if len(s) > 2 and len(s) < 20]
def entryfeatures(entry):
f = {}
# extract title
titlewords = interestingwords(entry['title'])
for w in titlewords: f['Title:' + w] = 1
# extract summary
summarywords = interestingwords(entry['summary'])
# count uppercase words
uc = 0
for i in range(len(summarywords)):
w = summarywords[i]
f[w] = 1
if w.isupper(): uc += 1
# get word pairs in summary aas features
if i < len(summarywords) - 1:
twowords = ' '.join(summarywords[i:i+1])
f[twowords] = 1
# keep creator and publisher as a whole
f['Publisher:' + entry['publisher']] = 1
# Insert virtual keyword for uppercase words
if float(uc) / len(summarywords) > 0.3: f['UPPERCASE'] = 1
print f.keys()
return f.keys()
def read(feed, classifier):
f = feedparser.parse(feed)
for entry in f['entries']:
print
print '----'
print 'Title: ' + entry['title'].encode('utf-8')
print 'Publisher: ' + entry['publisher'].encode('utf-8')
print
print entry['summary'].encode('utf-8')
fulltext = '%s\n%s\n%s' % (
entry['title'], entry['publisher'], entry['summary'])
#print 'Guess: ' + str(classifier.classify(fulltext))
#cl = raw_input('Enter category: ')
#classifier.train(fulltext, cl)
print 'Guess: ' + str(classifier.classify(entry))
cl = raw_input('Enter category: ')
classifier.train(entry, cl)
if __name__ == '__main__':
import docclass
#cl = docclass.fisherclassifier(docclass.getwords)
cl = docclass.fisherclassifier(entryfeatures)
cl.setdb('python_feed.db')
read('python_search.xml', cl)
|
[
"nandpara@cisco.com"
] |
nandpara@cisco.com
|
d297cee91f53501ac87135d881bc7513083be016
|
2bc6a464116ca246ec7688938a5160549d0b2638
|
/src/entities/towers/tower.py
|
61083c7ddc728c8ae81f2bc7d0d6b3c27e936894
|
[
"MIT"
] |
permissive
|
evrardco/GameJam-lln-2021
|
4d7657d507132d055c42e650ad4adf4fd6aebf91
|
ee2cce0feb423a0b3319c9933c8c8b5748225e39
|
refs/heads/main
| 2023-03-20T21:17:07.779340
| 2021-03-21T13:02:00
| 2021-03-21T13:02:00
| 349,721,579
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,820
|
py
|
from arcade import Sprite
from arcade import color
from arcade.draw_commands import draw_circle_outline
class Tower(Sprite):
def __init__(self, game_level, *args, **kwargs):
super().__init__(*args, **kwargs)
self.range = 100
self.max_lvl = 1
self.lvl = 0
self.fire_rate = 1 # fire per ms
self.game_level =game_level
self.enemies = self.game_level.enemy_list
self.id = Tower.id_counter
self.selected = False
self._elapsed_fire = self.fire_rate
self.cost = 1
self.dmg = 1
self.name = "Tower"
Tower.id_counter += 1
def draw(self):
super().draw()
if self.selected:
draw_circle_outline(self.center_x, self.center_y, self.range, color.BLUE)
def on_update(self, delta_time: float):
targets = self.targets_in_range()
if self.lvl > 0 and targets and self._elapsed_fire >= self.fire_rate:
self.fire(targets)
self._elapsed_fire = 0
self._elapsed_fire = min(self._elapsed_fire + delta_time, self.fire_rate)
super().on_update(delta_time=delta_time)
def targets_in_range(self):
enemies_in_range = []
for e in self.enemies:
if abs(e.center_x - self.center_x) + abs(e.center_y - self.center_y) < self.range:
enemies_in_range.append(e)
return enemies_in_range
def fire(self, targets):
# print(f"[Tower {self.id}] {len(targets)} enemies in range")
pass
def lvl_up(self):
if self.game_level.followers < self.cost or self.lvl + 1 > self.max_lvl:
return False
self.lvl += 1
self.game_level.set_followers(self.game_level.followers - self.cost)
return True
Tower.id_counter = 0
|
[
"maxime_postaire@hotmail.fr"
] |
maxime_postaire@hotmail.fr
|
db0f581d7ce78000bfb096134567d12e6a21a513
|
476936830895db3551e622c8289f6af22f1a81d2
|
/image_processing/pixelate.py
|
96942bfe799d7a60a3f92b97e56fab15b15aca0a
|
[] |
no_license
|
harshitgarg22/learn-python
|
b5705fb5e77f86444ed3da72d6d0edc7b48d0b48
|
ccd0e614bb381b800e9cc224d87bbdacaff2daaf
|
refs/heads/master
| 2022-12-07T21:59:25.213842
| 2020-04-28T15:12:33
| 2020-04-28T15:12:33
| 142,181,275
| 0
| 0
| null | 2022-11-22T05:59:41
| 2018-07-24T15:54:10
|
Python
|
UTF-8
|
Python
| false
| false
| 563
|
py
|
from PIL import Image
from utils_pixelate import create_image, get_pixel
def pixelate(image, STEP=50):
width, height = image.size
outImage = create_image(width, height)
pixels = outImage.load()
for i in range(0, width, STEP):
for j in range(0, height, STEP):
for a in range(0, STEP):
for b in range(0, STEP):
try:
pixels[i + a, j + b] = get_pixel(image, i, j)
except:
pass
# Return new image
return outImage
|
[
"hg1229@gmail.com"
] |
hg1229@gmail.com
|
ada2aa2d1c79110f48f2d4b2b1baff97eab6529c
|
5eaed6eaf1ad846e3603c070658fc1b5b8b09072
|
/Day11-class2/practice04.py
|
fd54c999c5130460a42d2c6f7c53a3f2f3e1773f
|
[] |
no_license
|
Heez27/AI_Edu
|
c3de56d532a616247f4d79606043f85f652c93e7
|
79025327e6eed2c368fec1d73ac94aaf271754a7
|
refs/heads/main
| 2023-05-09T12:35:22.000559
| 2021-05-31T07:45:42
| 2021-05-31T07:45:42
| 349,044,578
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
#다음과 같은 출력이 되도록 구구단을 작성하세요.(이중 for~in)
#1 x 1 = 1 2 x 1 = 2 ... 9 x 1 = 9
#...
#1 x 9 = 9 2 x 9 = 18 ... 9 x 9 = 81
for i in range(1,10):
for j in range(1,10):
print('%d x %d = %d '%(j,i,i*j),end='')
print()
|
[
"noreply@github.com"
] |
Heez27.noreply@github.com
|
23bb63cf22fff9fd08050e0f1b3f3c7a82d38052
|
aeef73ecc10cf276b82246a0fab4b824a69e3e71
|
/TEpython4.py
|
e59e7fe9fe902f790e3c9dfb80408177988f9ed5
|
[] |
no_license
|
LaurelOak/Peatland-Flux-Analysis
|
dad07c4770ba5ffc67c3c4e487a63ccf86f428b8
|
31a86f0ff403f495835cf2bc5b470f12c11aa610
|
refs/heads/master
| 2021-03-04T13:34:03.184874
| 2020-03-11T15:09:00
| 2020-03-11T15:09:00
| 246,037,736
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 41,148
|
py
|
#!/usr/bin/env python
# coding: utf-8
# ## This module contains the TE code translation from Matlab
#
# (MatLab version written by Laurel L. and modified by Dino B. Translation to Python by Edom M.)
# (Updated 2/25/20 by Laurel to accept inputs with NaNs.)
# Subsequent major update on 3/9/20 by Laurel L. to calculate the TE significance threshold on M rather than Mshort. Previously, the truncated dataset (Mshort) was reshuffled and then relagged, which shortened the dataset further. Now, the data are shuffled and then resampled, lagged, and truncated as in the calculation of TE on the full dataset. Thus, different datapoints may go into the computation of TE for the shuffled probability distribution, but the number of datapoints is the same as in the original TE calculation.
#Also new in this version: Resampling the data matrices based on the source data's autocorrelation function. This makes the computation more appropriate for information transfer due to low-frequency signals (e.g., day-of-year anomalies, as opposed to high-frequency anomalies calculated from moving average filters) because each feature of the signal is theoretically only sampled once. This avoids pseudoreplication.To turn this feature off, specify a period of 1 instead of per in the functions that are called internally in the RunNewTEVarsSer codes, but this is not recommended.
# The following functions are included in this module:
#
# 1. Mutual information
#
# 1. mutinfo_new(M, nbins) - Calculates mutual information I(x,y).
#
#
# 2. Tranfer entropy
#
# 1. transen_new(M, lag, nbins) - Calculates transfer information - TE(x,y) x to y. x source M[:,0] and y the sink M[:,1].
#
#
# 3. Intermediate functions
#
# 1. LagData_new - shifts a matrix so that it is rearranged to be ready for TE calculation as in Knutt et al., 2005
# 2. jointentropy_new(M, nbins) - Calculates the joint entropy H(x,y)
# 3. jointentropy3_new(M, nbins) - Calculates the joint entropy for three variables H(x,y,z)
# 4. shuffle( M ) - shuffles the entries of the matrix M in time while keeping NaNs (blank data values) NaNs. So that, Monte Carlo is possible
# 5. transenshuffle_new(M, lag, nbins) - Calculates the transfer entropy for a shuffled time series that has already been lined up with LagData
#
#
# 4. Monte Carlo analysis of mutual information and transfer entropy
#
# 1. mutinfo_crit_new( M, nbins, alpha, numiter) - Finds critical values of mutual information statistics that needs to be exceeded for statistical significance
# 2. transen_crit_new( M, lag, alpha, numiter, nbins) - Finds the critical value of the transfer entropy statistic that needs to be exceeded for statistical signficance
#
#
# 5. All in one code
# 1. RunNewTE2VarsSer(DataMatrix, LabelCell, SinkNodes, SourceNodes, resultsDir, maxLag, minSamples, numShuffles, sigLevel, numBins) - runs all together in serial mode.
# 2. RunNewTE2VarsSer2(DataMatrix, LabelCell, SinkNodes, SourceNodes, resultsDir, maxLag, minSamples, numShuffles, sigLevel, numBins) - runs all together in serial mode. Sink lag fixed at lag 1 for self optimality.
get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
import copy
import os
np.random.seed(50)
from scipy.stats import norm
# In[2]:
def checkMakeDir2(dirName): #
result = dirName
result2 = dirName*2
return result, result2
# In[4]:
# def checkMakeDir(dirName):
# ### Mutual information
# In[9]:
def mutinfo_new(M, nbins):
# Calculates mutual information
# M is an array with two columns [ source, sink]
# nbins list of number of bins in 1D, 2D and 3D, with three elements
ths = 1e-5
this_col1 = M[:,0]
counts1, binEdges1=np.histogram(this_col1[~np.isnan(this_col1)],bins=nbins[1]) # Source Variable. Figure out bin edges without NaNs.
binEdges1[0] = binEdges1[0]-ths
binEdges1[len(binEdges1)-1]=binEdges1[len(binEdges1)-1]+ths
col1cat = np.digitize(M[:,0], binEdges1, right=False) #Bin index for each entry. NaN values are assigned to index = nbins + 1
this_col2 = M[:,1]
counts2, binEdges2=np.histogram(this_col2[~np.isnan(this_col2)],bins=nbins[1]) # Sink Variable
binEdges2[0] = binEdges2[0]-ths
binEdges2[len(binEdges2)-1]=binEdges2[len(binEdges2)-1]+ths
col2cat = np.digitize(M[:,1], binEdges2, right=False) # which bin (ID) is the data located. NaN values are assigned to index = nbins + 1
#Now assign the NaN values to bin 0
col1cat[col1cat==nbins[1]+1] = 0
col2cat[col2cat==nbins[1]+1] = 0
col1cat[col2cat==0] = 0 #If there is an NaN for any row, assign the other column in that row to the NaN bin too
col2cat[col1cat==0] = 0 #If there is an NaN for any row, assign the other column in that row to the NaN bin too
#print(col1cat)
# convert 1D histogram to a 2D histogram
jointentcat = (col1cat-1)*nbins[1]+col2cat #This classifies the joint entropy bin into a number between 1 and nbins^2. 0 is assigned to rows with misisng data.
nbins_2 = nbins[1]**2
N = np.bincount(jointentcat[jointentcat>0]) # Number of datapoints within each joint entropy bin, not including NaN bins.
p = N/sum(N); # Vector of probabilities
# 1D probability/histogram
N1, binEdges1d1=np.histogram(this_col1[~np.isnan(this_col1)],bins=nbins[0]) # Which bin the first data column is in
N2, binEdges1d2=np.histogram(this_col2[~np.isnan(this_col2)],bins=nbins[0]) #Which bin the second data column is in
p1 = N1/sum(N1)
p2 = N2/sum(N2)
# Shanon entropy
pgt0 = p[p>0] # px,y
p1gt0 = p1[p1>0] # px
p2gt0 = p2[p2>0] # py
log2p2gt0 = np.log2(p2gt0)
#Shannon entropy of the sink variable. Used to normalize mutual informaiton in the next line.
Hy = (-sum(p2gt0*log2p2gt0))
# Mutual information, in bits. Joint entropy is scaled to the number of bins in a single dimension.
I = ( (-sum(p1gt0*np.log2(p1gt0)) - sum(p2gt0*log2p2gt0) ) + (sum(pgt0*np.log2(pgt0)))*np.log2(nbins[0])/np.log2(nbins[1]))/Hy
# double integral in the last component is done as a 1D.
#return nbins_2, jointentcat,p , sum(N), I, Hy
return I
# ## Intermediate functions
# In[13]:
def PickSampleInterval(X, maxlag=365, alpha=0.05): #Dynamically selects the appropriate interval for sampling the data, based on the autocorrelation function. Nans are OK. Alpha is the significance level for assessing the significance of the autocorrelation function.
nX = len(X)
r = np.zeros(maxlag)
sig_thr = np.zeros(maxlag)
for ii in range(maxlag):
if ii == 0:
Y = X
Z = X
else:
Y = X[:-ii].copy()
Z = X[ii:].copy()
Y[np.isnan(Z)]=np.nan
Z[np.isnan(Y)]=np.nan
r[ii] = np.corrcoef(Y[~np.isnan(Y)], Z[~np.isnan(Z)])[0,1]
sig_thr[ii] = norm.ppf(1-alpha/2)/np.sqrt(np.sum(~np.isnan(Y)))
# plt.plot(r)
# plt.xlabel('Lag')
# plt.ylabel('Autocorrelation')
above_thr = r-sig_thr
not_corr = np.where(above_thr<0)
if np.min(np.size(not_corr))>0:
per = not_corr[0][0] # "per" is the period. The data should be sampled once per period.
else:
per = maxlag
return per
def LagData_new( M_unlagged, shift ):
# LagData Shifts two time-series so that a matrix is generated that allows easy computation of Knutt et al 2005 based TE computation
# M_unlagged is a matrix [X Y..n], where X and Y are column vectors of the
# variables to be compared. shift is a row vector that says how much each
# variable in M_unlagged is to be shifted by.
nR,nC = np.shape(M_unlagged)
maxShift = max(shift)
minShift = min(shift)
newlength = nR - maxShift + minShift
M_lagged = np.nan*np.ones([newlength, nC]) #[source_lagged(1:n-lag), sink_unlagged(lag:n), sink_lagged(1:n-lag)]
#@@@@@@@@@@@@@@######## Dino's verson uses shift of [0, 0, -lag ] for the shuffle case of transfer entropy (transenshuffle_new)
for ii in range(np.shape(M_lagged)[1]):
M_lagged[:,ii] = M_unlagged[(shift[ii]-minShift):(np.shape(M_unlagged)[0]-maxShift+shift[ii]), ii]
return M_lagged
def ResampleLagged(M_lagged, per): #Resample the data in M to avoid pseudoreplication. Per is the output of PickSampleInterval.
M_lagged = M_lagged[::per][:]
return M_lagged
# Alternatively
# lag = np.abs(shift[0])
# M_lagged[:,0] = M_unlagged[0:(nR-lag), 0]
# M_lagged[:,1] = M_unlagged[lag:(nR),1]
# M_lagged[:,2] = M_unlagged[0:(nR-lag),2]
# return M_lagged
# In[27]:
def jointentropy_new(M, nbins):
# Calculates the joint entropy H(x,y)
# M is two dimensional column matrix for which joint entropy is to be computed
# H is the normalized joint entropy
# nvalidpoints is the number of rows (samples) used to calculate the joint entropy
ths = 1e-5 #tolerance
this_col = M[:,0]
counts1, binEdges1=np.histogram(this_col[~np.isnan(this_col)],bins=nbins) # Source Variable [ ]
binEdges1[0] = binEdges1[0]-ths
binEdges1[len(binEdges1)-1]=binEdges1[len(binEdges1)-1]+ths
col1cat = np.digitize(M[:,0], binEdges1, right=False) #NaNs will be in bin nbins+1
this_col = M[:,1]
counts2, binEdges2=np.histogram(this_col[~np.isnan(this_col)],bins=nbins) # Sink Variable
binEdges2[0] = binEdges2[0]-ths
binEdges2[len(binEdges2)-1]=binEdges2[len(binEdges2)-1]+ths
col2cat = np.digitize(M[:,1], binEdges2, right=False) # which bin (ID) is the data located
#Now assign the NaN values to bin 0
col1cat[col1cat==nbins+1] = 0
col2cat[col2cat==nbins+1] = 0
col1cat[col2cat==0] = 0 #If there is an NaN for any row, assign the other column in that row to the NaN bin too
col2cat[col1cat==0] = 0 #If there is an NaN for any row, assign the other column in that row to the NaN bin too
#print(col1cat)
# convert 1D histogram to a 2D histogram
jointentcat = (col1cat-1)*nbins+col2cat #This classifies the joint entropy bin into a number between 1 and nbins^2. 0 is assigned to rows with misisng data.
nbins_2 = nbins**2
N = np.bincount(jointentcat[jointentcat>0]) # Number of datapoints within each joint entropy bin, not including NaN bins.
p = N/sum(N); # Vector of probabilities
pgt0 = p[p>0] # p(x,y)
H = -sum(pgt0*np.log2(pgt0))
nvalidpoints = sum(N)
return H, nvalidpoints
# In[29]:
def jointentropy3_new(M, nbins):
# Calculates the joint entropy for three variables H(x,y,z)
# M is a three-column matrix that contains the input vectors of data.
# nvalidpoints is the number of rows (samples) used to calculate the joint entropy
ths = 1e-5 #tolerance
this_col = M[:,0] #Source variable
counts1, binEdges1=np.histogram(this_col[~np.isnan(this_col)],bins=nbins) # Determine bin edges from non-NaN dataset
binEdges1[0] = binEdges1[0]-ths
binEdges1[len(binEdges1)-1]=binEdges1[len(binEdges1)-1]+ths
col1cat = np.digitize(M[:,0], binEdges1, right=False)
this_col = M[:,1] #Sink variable
counts2, binEdges2=np.histogram(this_col[~np.isnan(this_col)],bins=nbins) # Determine bin edges from non-NaN dataset
binEdges2[0] = binEdges2[0]-ths
binEdges2[len(binEdges2)-1]=binEdges2[len(binEdges2)-1]+ths
col2cat = np.digitize(M[:,1], binEdges2, right=False) # which bin (ID) is the data located
this_col = M[:,2] # Source variable
counts3, binEdges3=np.histogram(this_col[~np.isnan(this_col)],bins=nbins) # Determine bin edges from non-NaN dataset
binEdges3[0] = binEdges3[0]-ths
binEdges3[len(binEdges3)-1]=binEdges3[len(binEdges3)-1]+ths
col3cat = np.digitize(M[:,2], binEdges3, right=False)
#Now assign the NaN values to bin 0
col1cat[col1cat==nbins+1] = 0
col2cat[col2cat==nbins+1] = 0
col3cat[col3cat==nbins+1] = 0
#If there is an NaN for any row, assign the other column in that row to the NaN bin too
col1cat[col2cat==0] = 0
col1cat[col3cat==0] = 0
col2cat[col1cat==0] = 0
col3cat[col1cat==0] = 0
# This classifies the joint entropy bin into a number between 1 and nbins^2. 0 is assigned to rows with misisng data.
jointentcat = (col1cat-1)*nbins**2 + (col2cat-1)*nbins + col3cat
#print(np.asarray((jointentcat,col1cat,col2cat, col3cat)).T)
nbins_3 = nbins**3
N = np.bincount(jointentcat[jointentcat>0]) # Number of datapoints within each joint entropy bin.
sumN = sum(N)
p = N/sumN # Vector of probabilities
pgt0 = p[p>0]
H = -sum(pgt0*np.log2(pgt0))
nvalidpoints = sumN
return H, nvalidpoints
# In[32]:
def shuffle( M ):
# shuffles the entries of the matrix M in time while keeping NaNs (blank data values) NaNs.
# M is the matrix where the columns are individual variables and the rows are entries in time
Mss = np.ones(np.shape(M))*np.nan # Initialize
for n in range(np.shape(M)[1]): # Columns are shuffled separately
notnans = np.argwhere(~np.isnan(M[:,n]))
R = np.random.rand(np.shape(notnans)[0],1) #np.random.rand(5,1)
I = np.argsort(R,axis=0)
#print(notnans[:,0])
#print(notnans[I,0])
#print('a',M[notnans[:,0],n])
Mss[notnans[:,0],n] = M[notnans[I[:],0],n].reshape(np.shape(notnans)[0],) #In the last version, the argument of np.shape() was M. This is not correct. It should be notnans. (Updated 2/25/20)
return Mss
# ## Transfer entropy
# In[34]:
def transen_new(M, lag, nbins, per=1, resample_on=0):
# Calculates transfer information
# M is an array with two columns [ source, sink]
# nbins list of number of bins in 1D, 2D and 3D, with three elements
# lag is the time lag of interest.
# per is the period for resampling (from PickSampleInterval)
# resample_on is binary, indicating whether resampling in accordance with per should be done.It should be off for determining critical values, as that subroutine is passed an already-resampled matrix.
# M4 is the lagged subset of data transfer entropy was run on.
M4 = LagData_new(np.column_stack((M, M[:,1])), [-lag, 0, -lag]) # source, sink, sink is input then
# M4 becomes [source_lagged(1:n-lag), sink_unlagged(lag:n), sink_lagged(1:n-lag)] => H(Xt-T, Yt, Yt-T)
M4[np.argwhere(np.isnan(np.sum(M4,axis=1))), :] = np.nan # Reset rows with any NaN entry to NaN.
M4 = ResampleLagged(M4, per) #Resample the lagged M by the autocorrelation-determined "period" to avoid pseudoreplication
M1 = M4[:,(0,2)] # [source_lagged(1:n-lag), sink_lagged(1:n-lag)] =>H(Xt-T,Yt-T)
M2 = M4[:,(1,2)] # [sink_unlagged(lag:n), sink_lagged(1:n-lag)] =>H(Yt,Yt-T)
#@@@@@@@@@@@@@@######## Dino uses M4[:,1] to be predicted
M3 = M4[:,2] # [sink_unlagged(lag:n)] to be predicted is used with DINO. BUT, need CORRECTION =>H(Yt) should be corrected to H(Yt-T) M[:,2]. Laurel's note: These two will have approximately the same entropy. The lagged version will just be the entropy over a partially truncated time series.
# Knutt et al indicates lagged being used H(Yt-T). Thus, M4[:,2]
# Now calculate the joint and marginal entropy components:
T1, n_valid_pairs1 = jointentropy_new(M1,nbins[1])
T2, n_valid_pairs2 = jointentropy_new(M2,nbins[1])
# Entropy for the single predictor
n3, valueatn = np.histogram(M3[~np.isnan(M3)], nbins[0]) # results in count [n3] and the corresponding value. Updated 2/25/20 to do this just over non-NaNs.
n3gt0 = n3[n3>0]
sumn3gt0 = sum(n3gt0)
T3 = -sum((n3gt0/sumn3gt0)*(np.log2(n3gt0/sumn3gt0))) # Nonnormalized Shannon entropy of variable Y
# Three variable entropy
T4, n_valid_pairs4 = jointentropy3_new(M4,nbins[2])
Tn = T3 # This is the Shannon entropy of Y, used to normalize the value of transfer entropy obtained below.
log2nbins1 = np.log2(nbins[0])
log2nbins2 = np.log2(nbins[1])
log2nbins3 = np.log2(nbins[2])
log2nbins1_2 = log2nbins1/log2nbins2
log2nbins1_3 = log2nbins1/log2nbins3
T1 = T1*log2nbins1_2
T2 = T2*log2nbins1_2
T4 = T4*log2nbins1_3
T = (T1+T2-T3-T4)/Tn # Knuth formulation of transfer entropy
N = min([n_valid_pairs1, n_valid_pairs2, n_valid_pairs4]) # Number of valid matched pairs used in the calculation
return T, N
# In[42]:
def transen_new2(M, shift, nbins, per): # with shift as an input different lags btween source and sink are possible
# shift [-lag of source, 0, - lag of sink] # lag of sink usually being 1
# Calculates transfer information
# M is an array with two columns [ source, sink]
# nbins list of number of bins in 1D, 2D and 3D, with three elements
# lag is the time lag of interest.
# per is the period for resampling (from PickSampleInterval)
# M4 is the lagged subset of data transfer entropy was run on.
M4 = LagData_new(np.column_stack((M, M[:,1])), shift) # source, sink, sink is input then
# M4 becomes [source_lagged(1:n-lag), sink_unlagged(lag:n), sink_lagged(1:n-lag)] => H(Xt-T, Yt, Yt-T)
M4[np.argwhere(np.isnan(np.sum(M4,axis=1))), :] = np.nan # Reset rows with any NaN entry to NaN.
M4 = ResampleLagged(M4, per) #Resample the lagged M by the autocorrelation-determined "period" to avoid pseudoreplication
M1 = M4[:,(0,2)] # [source_lagged(1:n-lag), sink_lagged(1:n-lag)] =>H(Xt-T,Yt-T)
M2 = M4[:,(1,2)] # [sink_unlagged(lag:n), sink_lagged(1:n-lag)] =>H(Yt,Yt-T)
#@@@@@@@@@@@@@@######## Dino uses M4[:,1] to be predicted
M3 = M4[:,2] # [sink_unlagged(lag:n)] to be predicted is used with DINO. BUT, need CORRECTION =>H(Yt) should be corrected to H(Yt-T) M[:,2]
# Knutt et al indicates lagged being used H(Yt-T). Thus, M4[:,2]
# Now calculate the joint and marginal entropy components:
T1, n_valid_pairs1 = jointentropy_new(M1,nbins[1])
T2, n_valid_pairs2 = jointentropy_new(M2,nbins[1])
# Entropy for the single predictor
n3, valueatn = np.histogram(M3[~np.isnan(M3)], nbins[0]) # results in count [n3] and the corresponding value
n3gt0 = n3[n3>0]
sumn3gt0 = sum(n3gt0)
T3 = -sum((n3gt0/sumn3gt0)*(np.log2(n3gt0/sumn3gt0))) # Nonnormalized Shannon entropy of variable Y
# Three variable entropy
T4, n_valid_pairs4 = jointentropy3_new(M4,nbins[2])
Tn = T3 # This is the Shannon entropy of Y, used to normalize the value of transfer entropy obtained below.
log2nbins1 = np.log2(nbins[0])
log2nbins2 = np.log2(nbins[1])
log2nbins3 = np.log2(nbins[2])
log2nbins1_2 = log2nbins1/log2nbins2
log2nbins1_3 = log2nbins1/log2nbins3
T1 = T1*log2nbins1_2
T2 = T2*log2nbins1_2
T4 = T4*log2nbins1_3
T = (T1+T2-T3-T4)/Tn # Knuth formulation of transfer entropy
N = min([n_valid_pairs1, n_valid_pairs2, n_valid_pairs4]) # Number of valid matched pairs used in the calculation
return T, N
# In[44]:
def transenshuffle_new(M, lag, nbins, per):
# Calculates the transfer entropy for a shuffled time series that has already been lined up with LagData
# Calculates the transfer entropy of X>Y, the amount by which knowledge
# of variable X at a time lag reduces the uncertainty in variable Y. M =
# [X Y], and lag is the time lag of interest. nbins is the number of bins
# used to discretize the probability distributions.
# per is the period for resampling (from PickSampleInterval)
Minput = shuffle(M[:,(0,1)])
T, N = transen_new(Minput, lag, nbins, per)
return T, N
# In[59]:
def transenshuffle_new2(M, shift, nbins, per):
# Calculates the transfer entropy for a shuffled time series that has already been lined up with LagData
# Calculates the transfer entropy of X>Y, the amount by which knowledge
# of variable X at a time lag reduces the uncertainty in variable Y. M =
# [X Y], and lag is the time lag of interest. nbins is the number of bins
# used to discretize the probability distributions.
# per is the period for resampling (from PickSampleInterval)
Minput = shuffle(M[:,(0,1)])
T, N = transen_new2(Minput, shift, nbins, per)
return T, N
# ## Critical values of Mutual information and Transfer entropy
# In[65]:
def mutinfo_crit_new( M, nbins, alpha, numiter):
# Finds critical values of mutual information statistics that needs to be exceeded for statistical significance
# M is the matrix where columns are the individual variables and rows ae the values in time.
# nbins - number of bins
# alpha - is the significance level
# numiter - is the number of Monte Carlo simulations for shuffling
MIss = np.ones([numiter])*np.nan
for ii in range(numiter):
Mss = shuffle(M)
MIss[ii] = mutinfo_new(Mss,nbins)
#print(MIss.shape)
MIss = np.sort(MIss)
MIcrit = MIss[round((1-alpha)*numiter)] # develop a histogram and peak the 95% quantile significance level with alpha = 0.05
return MIcrit
# In[67]:
def transen_crit_new( M, lag, alpha, numiter, nbins, per):
# Finds the critical value of the transfer entropy statistic
# that needs to be exceeded for statistical signficance.
# M = matrix of unshifted variables, e.g., [X Y] for calculating the X>Y transfer entropy.
# lag = time lag.
# alpha = significance level.
# numiter = number of Monte Carlo shufflings to perform.
# nbins = number of bins to use to discretize the probability distributions.
# per is the period for resampling (from PickSampleInterval)
Tss = np.ones([numiter])*np.nan # Initializing shuffled transfer entropy table
#print(Tss)
for ii in range(numiter):
Tss[ii], a = transenshuffle_new(M, lag, nbins, per) # Calculates TE for each Monte Carlo Shuffling
#print(Tss)
Tss = np.sort(Tss)
Tcrit = Tss[round((1-alpha)*numiter)] # develop a histogram and peaks the 1-aplpha (95%) quantile significance level with alpha (= 0.05)
return Tcrit
# In[68]:
def transen_crit_new2( M, shift, alpha, numiter, nbins, per):
# Finds the critical value of the transfer entropy statistic
# that needs to be exceeded for statistical signficance.
# M = matrix of unshifted variables, e.g., [X Y] for calculating the X>Y transfer entropy.
# lag = time lag.
# alpha = significance level.
# numiter = number of Monte Carlo shufflings to perform.
# nbins = number of bins to use to discretize the probability distributions.
# per is the period for resampling (from PickSampleInterval)
Tss = np.ones([numiter])*np.nan # Initializing shuffled transfer entropy table
#print(Tss)
for ii in range(numiter):
Tss[ii], a = transenshuffle_new2(M, shift, nbins, per) # Calculates TE for each Monte Carlo Shuffling
#print(Tss)
Tss = np.sort(Tss)
Tcrit = Tss[round((1-alpha)*numiter)] # develop a histogram and peaks the 1-aplpha (95%) quantile significance level with alpha (= 0.05)
return Tcrit
# ## Serial TE & I calculater
# In[52]:
# number of monteCarlo shuffle - kills the time - going from 100 to 1000 very time consuming. Parallel!!
# maxLag also takes a lot of time. Number of lag considered. 3*365
# number of source variables -- 20
def RunNewTE2VarsSer(DataMatrix, LabelCell, SinkNodes=None, SourceNodes=None, resultsDir = './Results/',
maxLag=3*365, minSamples=200, numShuffles = 100, sigLevel=0.05, numBins=[11,11,11], do_not_resample=['none']):
# computes TE assumes a data matrix with time in first columns and vars on others
# do_not_resample is a list of variable names not to resample
# Inputs
# DataMatrix - data matrix with time in the first column
# LabelCell - variable name of each data matrix entry
# Source_nodes - array of column indices for source variables [2]
# Sink_nodes - array of column of indices for sink variales [3:end]
# resultsDir - directory for results ./Results/
# maxLag - maximum lag (3*365) 3 years
# minSamples - minimum number of valid samples for TE (suggestion 200)
# numShuffles - number of MonteCarlo shuffle iterations (suggestion 500)
# sigLevel - significance level (suggested 0.05)
# numBins - number of bins to use in 1, 2, and 3 dimensions default [11,11,11]
# Outputs
# Imat - mutual information
# Icritmat - significance threshold
# Tfirstmat - first T > Tcrit
# Tbiggestmat - Tmax for T > Tcrit
# Tcube_store - all T for all sink, source, lag combinations
# Tcritcube_store - all Tcrits for all sink, source, lag combinations
if DataMatrix.size == 0:
return 'no dataMatrix'
if LabelCell.size == 0:
return 'no variable names'
if SourceNodes is None:
SourceNodes = np.arange(2,np.shape(DataMatrix)[1])
if SinkNodes is None:
SinkNodes = np.array([1])
nSources = len(SourceNodes)
nSinks = len(SinkNodes)
# Start clock
print('Beginning 2-variable analysis (serial) ...')
# Tot = tic
# print(SourceNodes,SinkNodes)
# =========================================
## Shrink input matrices to include only variables that are used
# now the order is time, sinks, sources
#@@@@@@@@@@@@@@@@@@@@@
# from Pd to np.array
dataMat = np.column_stack((DataMatrix[:,0], DataMatrix[:,SinkNodes], DataMatrix[:,SourceNodes])) # date, sink, sources
labCell = np.r_[np.array([LabelCell[0]]), np.array(LabelCell[SinkNodes]), np.array(LabelCell[SourceNodes])]
#np.r_[np.array([LabelCell[0]]), np.array(LabelCell[SinkNodes]), np.array(LabelCell[SourceNodes])]
#np.r_[np.array(LabelCell[0]), np.array(LabelCell[1]), np.array(LabelCell[[2,3,4]])]
#Or labCell = np.column_stack((LabelCell[:,0], LabelCell[:,SinkNodes], LabelCell[:,SourceNodes]))
del DataMatrix # or set it to empty DataMatrix = []
del LabelCell
# =============================================
# Initialize output matrices
# mutual information between sources and sinks
# the sink is daily mean Q, and all pairwise interactions are evaluated
Imat = np.ones([nSinks,nSources])*np.nan # row value = # sink vars, col values = # source vars;
# significance threshold
Icritmat = copy.deepcopy(Imat)
# first T > Tcrit
Tfirstmat = copy.deepcopy(Imat)
# Tmax for T > Tcrit
Tbiggestmat = copy.deepcopy(Imat)
# All T for all sink, source, lag combinations
Tcube_store = np.ones([nSinks,nSources,maxLag])*np.nan
# All Tcrits for all sink, source, lag combinations
Tcritcube_store = copy.deepcopy(Tcube_store)
# =============================================
# LOOP OVER ALL PAIRS OF SOURCE AND SINK VARIABLES TO CALCULATE MI and TE
for mySinkIter in range(nSinks): # loop over Sink nodes (information receivers) [ 0]
mySinkNum = SinkNodes[mySinkIter]
mySinkInd = 1 + mySinkIter # exclude time
# extract sub-matrices for the ease of computation
Ivec = Imat[mySinkIter,:]
Icritvec = Icritmat[mySinkIter,:]
Tfirstvec = Tfirstmat[mySinkIter,:]
Tbiggestvec = Tbiggestmat[mySinkIter,:]
Tmat_store = np.reshape(Tcube_store[mySinkIter,:,:],[nSources,maxLag])
Tcritmat_store = np.reshape(Tcritcube_store[mySinkIter,:,:], [nSources,maxLag])
sinkName = labCell[mySinkInd] # Text name of the Sink variable
MmySink = dataMat[:,mySinkInd] # Select the sink variable to run
#print(mySinkIter)
for mySourceIter in range(nSources): # Loop over the source nodes
#print(mySourceIter)
mySourceNum = SourceNodes[mySourceIter]
mySourceInd = 1 + nSinks + mySourceIter
Mmysource = dataMat[:,mySourceInd] # Select source variables
sourceName = labCell[mySourceInd] # Name of the source variable
print('Source node ', mySourceNum-1, sourceName, ':=>', 'Sink node ', mySinkNum, sinkName)
print('Lag ', 'Sink', 'Source')
if sourceName in do_not_resample:
per = 1
else:
per = PickSampleInterval(np.float64(Mmysource), maxLag, 0.01) #Pick the sample interval based on autocorrelation. New 3/8/20
print(per)
M = np.column_stack((Mmysource, MmySink)) # Source followed by Sink
M = M.astype('float')
#print(M.shape)
# MUTUAL INFORMATION
Mmut = ResampleLagged(M, per) #Resample to avoid pseudoreplication
I = mutinfo_new(Mmut,numBins) # computes mutual information
Ivec[mySourceIter] = I # save it in a matrix
Icrit = mutinfo_crit_new(M=Mmut, alpha=sigLevel, nbins=numBins,numiter = numShuffles)
Icritvec[mySourceIter] = Icrit
# TRANSFER ENTROPY
T = np.ones([maxLag])*np.nan # intialize the TE vector over the range of lags examined
Tcrit = copy.deepcopy(T) # Initialize the vector of the critical TE
for lag in range(maxLag): #[0 to 364] in a year i.e., no lag day
t, N = transen_new(M=M, lag=lag, nbins=numBins, per=per) # Computes TE for at a given lag of 'lag'.
if N >= minSamples: # enough length to compute TE
T[lag] = t # save TE computed
Tcrit[lag] = transen_crit_new(M=M, alpha= sigLevel, lag=lag, nbins=numBins,numiter=numShuffles, per=per) # TE critical Updated 3/9/20. Previously, Mshort was used, but because of the lag, this can cut down on the number of valid pairs.
print(lag, mySinkIter, mySourceIter, N)
# Save the first and biggest value of T over the significance threshold
TgTcrit = np.argwhere(T >= Tcrit) # np.argwhere(np.array([5,6,9,18]) > np.array([3,9,2,9]))
if any(TgTcrit):
Tfirstvec[mySourceIter] = T[TgTcrit[0,0]]
Tbiggestvec[mySourceIter] = max(T[TgTcrit[:,0]]) # @@@@@ Should be T-Tcrit biggest!!!!!!
#print(Tcrit.shape, T.shape, Tcritcube_store.shape)
Tmat_store[mySourceIter,:] = T
Tcritmat_store[mySourceIter,:] = Tcrit
#print(np.arange(maxLag), T)
fH = plt.figure(figsize= [5,5],dpi=150)
plt.plot(np.arange(maxLag), T, color='green', marker='o', linewidth=2, markersize=0.5)
plt.xlabel('Lag, days')
plt.ylabel('Tz')
plt.plot(np.arange(maxLag), Tcrit, color = 'black', linewidth=2, linestyle='dashed')
plt.title([sourceName, 'vs', sinkName])
# Save the graphics
#save_results_to = '/Users/S/Desktop/Results/'
f_name = resultsDir + 'TE_analysis' + str(sourceName) + '_Vs_' + str(sinkName) +'.png'
plt.savefig(f_name, dpi=150)
plt.close(fH) # close it with out displaying
# replace column vectors from source iterations into matrices
Imat[mySinkIter, :] = Ivec
Icritmat[mySinkIter, :] = Icritvec
Tfirstmat[mySinkIter,:] = Tfirstvec
Tbiggestmat[mySinkIter,:] = Tbiggestvec
Tcube_store[mySinkIter,:,:] = Tmat_store
Tcritcube_store[mySinkIter,:,:] = Tcritmat_store
# save results (modify to save just relevant variables)
# save([resultsDir 'TE_analysis_workspace.mat'], '-v7.3');
# Stop clock
print('Finished 2-variable analysis (serial)!');
return Imat, Icritmat, Tfirstmat, Tbiggestmat, Tcube_store, Tcritcube_store # | sink | source | lag |
# In[69]:
# number of monteCarlo shuffle - kills the time - going from 100 to 1000 very time consuming. Parallel!!
# maxLag also takes a lot of time. Number of lag considered. 3*365
# number of source variables -- 20
def RunNewTE2VarsSer2(DataMatrix, LabelCell, shift, SinkNodes=None, SourceNodes=None, resultsDir = './Results/',
maxLag=3*365, minSamples=200, numShuffles = 100, sigLevel=0.05, numBins=[11,11,11], do_not_resample=['none']):
# computes TE assumes a data matrix with time in first columns and vars on others
# do_not_resample is a list of variable names not to resample
# Inputs
# DataMatrix - data matrix with time in the first column
# LabelCell - variable name of each data matrix entry
# Source_nodes - array of column indices for source variables [2]
# Sink_nodes - array of column of indices for sink variales [3:end]
# resultsDir - directory for results ./Results/
# maxLag - maximum lag (3*365) 3 years
# minSamples - minimum number of valid samples for TE (suggestion 200)
# numShuffles - number of MonteCarlo shuffle iterations (suggestion 500)
# sigLevel - significance level (suggested 0.05)
# numBins - number of bins to use in 1, 2, and 3 dimensions default [11,11,11]
# Outputs
# Imat - mutual information
# Icritmat - significance threshold
# Tfirstmat - first T > Tcrit
# Tbiggestmat - Tmax for T > Tcrit
# Tcube_store - all T for all sink, source, lag combinations
# Tcritcube_store - all Tcrits for all sink, source, lag combinations
if DataMatrix.size == 0:
return 'no dataMatrix'
if LabelCell.size == 0:
return 'no variable names'
if SourceNodes is None:
SourceNodes = np.arange(2,np.shape(DataMatrix)[1])
if SinkNodes is None:
SinkNodes = np.array([1])
nSources = len(SourceNodes)
nSinks = len(SinkNodes)
# Start clock
print('Beginning 2-variable analysis (serial) ...')
# Tot = tic
# print(SourceNodes,SinkNodes)
# =========================================
## Shrink input matrices to include only variables that are used
# now the order is time, sinks, sources
#@@@@@@@@@@@@@@@@@@@@@
# from Pd to np.array
dataMat = np.column_stack((DataMatrix[:,0], DataMatrix[:,SinkNodes], DataMatrix[:,SourceNodes])) # date, sink, sources
labCell = np.r_[[np.array(LabelCell[0])], np.array(LabelCell[SinkNodes]), np.array(LabelCell[SourceNodes])]
#np.r_[np.array(LabelCell[0]), np.array(LabelCell[1]), np.array(LabelCell[[2,3,4]])]
#Or labCell = np.column_stack((LabelCell[:,0], LabelCell[:,SinkNodes], LabelCell[:,SourceNodes]))
del DataMatrix # or set it to empty DataMatrix = []
del LabelCell
# =============================================
# Initialize output matrices
# mutual information between sources and sinks
# the sink is daily mean Q, and all pairwise interactions are evaluated
Imat = np.ones([nSinks,nSources])*np.nan # row value = # sink vars, col values = # source vars;
# significance threshold
Icritmat = copy.deepcopy(Imat)
# first T > Tcrit
Tfirstmat = copy.deepcopy(Imat)
# Tmax for T > Tcrit
Tbiggestmat = copy.deepcopy(Imat)
# All T for all sink, source, lag combinations
Tcube_store = np.ones([nSinks,nSources,maxLag])*np.nan
# All Tcrits for all sink, source, lag combinations
Tcritcube_store = copy.deepcopy(Tcube_store)
# =============================================
# LOOP OVER ALL PAIRS OF SOURCE AND SINK VARIABLES TO CALCULATE MI and TE
for mySinkIter in range(nSinks): # loop over Sink nodes (information receivers) [ 0]
mySinkNum = SinkNodes[mySinkIter]
mySinkInd = 1 + mySinkIter # exclude time
# extract sub-matrices for the ease of computation
Ivec = Imat[mySinkIter,:]
Icritvec = Icritmat[mySinkIter,:]
Tfirstvec = Tfirstmat[mySinkIter,:]
Tbiggestvec = Tbiggestmat[mySinkIter,:]
Tmat_store = np.reshape(Tcube_store[mySinkIter,:,:],[nSources,maxLag])
Tcritmat_store = np.reshape(Tcritcube_store[mySinkIter,:,:], [nSources,maxLag])
sinkName = labCell[mySinkInd] # Text name of the Sink variable
MmySink = dataMat[:,mySinkInd] # Select the sink variable to run
#print(mySinkIter)
for mySourceIter in range(nSources): # Loop over the source nodes
#print(mySourceIter)
mySourceNum = SourceNodes[mySourceIter]
mySourceInd = 1 + nSinks + mySourceIter
Mmysource = dataMat[:,mySourceInd] # Select source variables
sourceName = labCell[mySourceInd] # Name of the source variable
print('Source node ', mySourceNum-1, sourceName, ':=>', 'Sink node ', mySinkNum, sinkName)
print('Lag ', 'Sink', 'Source')
if sourceName in do_not_resample:
per = 1
else:
per = PickSampleInterval(np.float64(Mmysource), maxLag, 0.01) #Pick the sample interval based on autocorrelation. New 3/8/20
print(per)
M = np.column_stack((Mmysource, MmySink)) # Source followed by Sink
M = M.astype('float')
#print(M.shape)
# MUTUAL INFORMATION
Mmut = ResampleLagged(M, per) #Resample to avoid pseudoreplication
I = mutinfo_new(Mmut,numBins) # computes mutual information
Ivec[mySourceIter] = I # save it in a matrix
Icrit = mutinfo_crit_new(M=Mmut, alpha=sigLevel, nbins=numBins,numiter = numShuffles)
Icritvec[mySourceIter] = Icrit
# TRANSFER ENTROPY
T = np.ones([maxLag])*np.nan # intialize the TE vector over the range of lags examined
Tcrit = copy.deepcopy(T) # Initialize the vector of the critical TE
for lag in range(maxLag): #[0 to 364] in a year i.e., no lag day
t, N = transen_new2(M=M, shift=[-lag,shift[1],-per], nbins=numBins, per=per) # Computes TE for at a given lag of 'lag'
if N >= minSamples: # enough length to compute TE
T[lag] = t # save TE computed
Tcrit[lag] = transen_crit_new2(M=M, shift=[-lag,shift[1],shift[2]], alpha= sigLevel,nbins=numBins,numiter=numShuffles, per=per) # TE critical. Updated 3/9/20. Previously, Mshort was used, but because of the lag, this can cut down on the number of valid pairs.
print(lag, mySinkIter, mySourceIter, N)
# Save the first and biggest value of T over the significance threshold
TgTcrit = np.argwhere(T >= Tcrit) # np.argwhere(np.array([5,6,9,18]) > np.array([3,9,2,9]))
if any(TgTcrit):
Tfirstvec[mySourceIter] = T[TgTcrit[0,0]]
Tbiggestvec[mySourceIter] = max(T[TgTcrit[:,0]]) # @@@@@ Should be T-Tcrit biggest!!!!!!
#print(Tcrit.shape, T.shape, Tcritcube_store.shape)
Tmat_store[mySourceIter,:] = T
Tcritmat_store[mySourceIter,:] = Tcrit
#print(np.arange(maxLag), T)
fH = plt.figure(figsize= [5,5],dpi=150)
plt.plot(np.arange(maxLag), T, color='green', marker='o', linewidth=2, markersize=0.5)
plt.xlabel('Lag, days')
plt.ylabel('Tz')
plt.plot(np.arange(maxLag), Tcrit, color = 'black', linewidth=2, linestyle='dashed')
plt.title([sourceName, 'vs', sinkName])
# Save the graphics
#save_results_to = '/Users/S/Desktop/Results/'
f_name = resultsDir + 'TE_analysis' + str(sourceName) + '_Vs_' + str(sinkName) +'.png'
plt.savefig(f_name, dpi=150)
plt.close(fH) # close it with out displaying
# replace column vectors from source iterations into matrices
Imat[mySinkIter, :] = Ivec
Icritmat[mySinkIter, :] = Icritvec
Tfirstmat[mySinkIter,:] = Tfirstvec
Tbiggestmat[mySinkIter,:] = Tbiggestvec
Tcube_store[mySinkIter,:,:] = Tmat_store
Tcritcube_store[mySinkIter,:,:] = Tcritmat_store
# save results (modify to save just relevant variables)
# save([resultsDir 'TE_analysis_workspace.mat'], '-v7.3');
# Stop clock
print('Finished 2-variable analysis (serial)!');
return Imat, Icritmat, Tfirstmat, Tbiggestmat, Tcube_store, Tcritcube_store # | sink | source | lag |
|
[
"laurel@berkeley.edu"
] |
laurel@berkeley.edu
|
4f7edf5d9993d14ed93c595d1579ad18089cac63
|
5c034122de6639bf3f6d7192ab0ea7da036d22db
|
/словари2.py
|
f15aeabb12ce787e92b5b439695f9e863e10d46e
|
[] |
no_license
|
Nurlis98/Exersices-from-Eric-Metiz-book
|
85111a2d94448853b785bf0c230ac903d6d44809
|
79074c10c90426dbb8b0aab275d437652d06ae90
|
refs/heads/master
| 2020-05-09T12:40:45.130513
| 2019-04-13T05:21:24
| 2019-04-13T05:21:24
| 181,119,726
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 94
|
py
|
favorite_numbers= {'Nurbek': 5, 'Aslan': 8, 'Adilet':7, 'Neymar':10}
print(favorite_numbers)
|
[
"noreply@github.com"
] |
Nurlis98.noreply@github.com
|
864fb65f15185ee5517a83c165df9ec067743ca7
|
8f1043dabe7275b33d7fe8e9095e474023f9de6c
|
/ghost.py
|
0b80e62be1f829115e2488998b16a65a62eb9393
|
[] |
no_license
|
pietjan12/pacman_deeplearning
|
acd84dd063f8f76754ee9a9ee558d9d321b7c618
|
1456d6c1daef2fd2b8805a5a2d734dc1127956ef
|
refs/heads/main
| 2023-01-05T05:31:46.217709
| 2020-11-04T12:57:26
| 2020-11-04T12:57:26
| 296,563,462
| 0
| 0
| null | 2020-11-04T09:54:41
| 2020-09-18T08:37:46
|
Python
|
UTF-8
|
Python
| false
| false
| 7,238
|
py
|
import random
#color of ghosts.
ghostcolor = {}
ghostcolor[0] = (255, 0, 0, 255)
ghostcolor[1] = (255, 128, 255, 255)
ghostcolor[2] = (128, 255, 255, 255)
ghostcolor[3] = (255, 128, 0, 255)
ghostcolor[4] = (50, 50, 255, 255) # blue, vulnerable ghost
ghostcolor[5] = (255, 255, 255, 255) # white, flashing ghost
class ghost():
def __init__(self, ghostID):
self.x = 0
self.y = 0
self.velX = 0
self.velY = 0
self.speed = 1
self.nearestRow = 0
self.nearestCol = 0
self.id = ghostID
# ghost "state" variable
# 1 = normal
# 2 = vulnerable
# 3 = spectacles
self.state = 1
self.homeX = 0
self.homeY = 0
self.currentPath = ""
self.anim = {}
from pacman import pygame
for i in range(1, 7, 1):
self.anim[i] = pygame.image.load('sprites/ghost ' + str(i) + '.gif')
# change the ghost color in this frame
for y in range(0, 16, 1):
for x in range(0, 16, 1):
if self.anim[i].get_at((x, y)) == (255, 0, 0, 255):
# default, red ghost body color
self.anim[i].set_at((x, y), ghostcolor[self.id])
self.animFrame = 1
self.animDelay = 0
def Draw(self):
from pacman import thisGame, player, screen
if thisGame.mode == 3:
return False
# ghost eyes --
for y in range(4, 8, 1):
for x in range(3, 7, 1):
self.anim[self.animFrame].set_at((x, y), (255, 255, 255, 255))
self.anim[self.animFrame].set_at((x + 6, y), (255, 255, 255, 255))
if player.x > self.x and player.y > self.y:
# player is to lower-right
pupilSet = (5, 6)
elif player.x < self.x and player.y > self.y:
# player is to lower-left
pupilSet = (3, 6)
elif player.x > self.x and player.y < self.y:
# player is to upper-right
pupilSet = (5, 4)
elif player.x < self.x and player.y < self.y:
# player is to upper-left
pupilSet = (3, 4)
else:
pupilSet = (4, 6)
for y in range(pupilSet[1], pupilSet[1] + 2, 1):
for x in range(pupilSet[0], pupilSet[0] + 2, 1):
self.anim[self.animFrame].set_at((x, y), (0, 0, 255, 255))
self.anim[self.animFrame].set_at((x + 6, y), (0, 0, 255, 255))
# -- end ghost eyes
if self.state == 1:
# draw regular ghost (this one)
screen.blit(self.anim[self.animFrame],
(self.x - thisGame.screenPixelPos[0], self.y - thisGame.screenPixelPos[1]))
elif self.state == 2:
# draw vulnerable ghost
from pacman import ghosts
if thisGame.ghostTimer > 100:
# blue
screen.blit(ghosts[4].anim[self.animFrame],
(self.x - thisGame.screenPixelPos[0], self.y - thisGame.screenPixelPos[1]))
else:
# blue/white flashing
tempTimerI = int(thisGame.ghostTimer / 10)
if tempTimerI == 1 or tempTimerI == 3 or tempTimerI == 5 or tempTimerI == 7 or tempTimerI == 9:
screen.blit(ghosts[5].anim[self.animFrame],
(self.x - thisGame.screenPixelPos[0], self.y - thisGame.screenPixelPos[1]))
else:
screen.blit(ghosts[4].anim[self.animFrame],
(self.x - thisGame.screenPixelPos[0], self.y - thisGame.screenPixelPos[1]))
elif self.state == 3:
import tile_ids
# draw glasses
screen.blit(tile_ids.tileIDImage[tile_ids.tileID['glasses']],
(self.x - thisGame.screenPixelPos[0], self.y - thisGame.screenPixelPos[1]))
if thisGame.mode == 6 or thisGame.mode == 7:
# don't animate ghost if the level is complete
return False
self.animDelay += 1
if self.animDelay == 2:
self.animFrame += 1
if self.animFrame == 7:
# wrap to beginning
self.animFrame = 1
self.animDelay = 0
def Move(self):
from pacman import path, player
self.x += self.velX
self.y += self.velY
self.nearestRow = int(((self.y + 8) / 16))
self.nearestCol = int(((self.x + 8) / 16))
if (self.x % 16) == 0 and (self.y % 16) == 0:
# if the ghost is lined up with the grid again
# meaning, it's time to go to the next path item
if (self.currentPath):
self.currentPath = self.currentPath[1:]
self.FollowNextPathWay()
else:
self.x = self.nearestCol * 16
self.y = self.nearestRow * 16
# chase pac-man
self.currentPath = path.FindPath((self.nearestRow, self.nearestCol),
(player.nearestRow, player.nearestCol))
self.FollowNextPathWay()
def FollowNextPathWay(self):
from pacman import path, player, thisLevel
import tile_ids
# print "Ghost " + str(self.id) + " rem: " + self.currentPath
# only follow this pathway if there is a possible path found!
if not self.currentPath == False:
if len(self.currentPath) > 0:
if self.currentPath[0] == "L":
(self.velX, self.velY) = (-self.speed, 0)
elif self.currentPath[0] == "R":
(self.velX, self.velY) = (self.speed, 0)
elif self.currentPath[0] == "U":
(self.velX, self.velY) = (0, -self.speed)
elif self.currentPath[0] == "D":
(self.velX, self.velY) = (0, self.speed)
else:
# this ghost has reached his destination!!
if not self.state == 3:
# chase pac-man
self.currentPath = path.FindPath((self.nearestRow, self.nearestCol),
(player.nearestRow, player.nearestCol))
self.FollowNextPathWay()
else:
# glasses found way back to ghost box
self.state = 1
self.speed = self.speed / 4
# give ghost a path to a random spot (containing a pellet)
(randRow, randCol) = (0, 0)
while not thisLevel.GetMapTile(randRow, randCol) == tile_ids.tileID['pellet'] or (randRow, randCol) == (
0, 0):
randRow = random.randint(1, thisLevel.lvlHeight - 2)
randCol = random.randint(1, thisLevel.lvlWidth - 2)
self.currentPath = path.FindPath((self.nearestRow, self.nearestCol), (randRow, randCol))
self.FollowNextPathWay()
|
[
"n.vanderburgt@student.fontys.nl"
] |
n.vanderburgt@student.fontys.nl
|
635a83d12ba05a184e2ed5f76db283eb66e94e73
|
444c07a8d8f55866403b2f2ab4138c20a0e328c7
|
/projeto_agendas/urls.py
|
ae3f72c74d1859de948467d971f23cae887e99a4
|
[] |
no_license
|
TheQuito/projeto_agendas
|
b1b33526b50e2cbde93220875a507bd358e6d406
|
d36e17079afb5009d642ca38aa940a923efdb167
|
refs/heads/master
| 2020-04-14T15:02:27.176067
| 2019-02-26T20:34:47
| 2019-02-26T20:34:47
| 160,865,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
"""projeto_agendas URL Configuration
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls')),
path('agendas_rest_api/', include('agendas_rest_api.urls')),
]
|
[
"jonesdhy@hotmail.com"
] |
jonesdhy@hotmail.com
|
50ab680a0d68f273e6acbc7e9f5081f0336f9178
|
b64e879968bc2a977d0800164eeaae887608465b
|
/Source/vvc_project/wizards/review_detail_project_wizard.py
|
dc7e7061de468272801cebc897f313899e46c607
|
[] |
no_license
|
PhuocThinh/OdooDocument
|
371df0b450d5ed662717c308a7c9ffbca2f65656
|
1a0ca11c729e919b34f3bb8a2e12d481fbd27f2f
|
refs/heads/master
| 2020-08-16T04:44:24.700066
| 2019-10-30T09:43:00
| 2019-10-30T09:43:00
| 215,456,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
from odoo import api, models, fields
class ReviewDetailProjectWizard(models.TransientModel):
_name = 'review.detail.wizard.project'
_description = 'Project Review Detail Wizard'
content = fields.Html(string='Content')
status = fields.Selection([('new', "New"),
('done', "Done"),
('cancel', "Cancel")
], 'Status', default='new', required=True)
review_id = fields.Many2one('review.history.project.wizard', string="Review")
|
[
"thinh.pvp@vn.vinx.asia"
] |
thinh.pvp@vn.vinx.asia
|
3687486371728e2b0c5aea7f520cefa19b5385d1
|
4aaef00df82d733dcef81bbb77b04ad92c5f512f
|
/server.py
|
38188c4b40b03cdcc79e9cdbe460dc747e46ad8c
|
[] |
no_license
|
niallo/mongo-perf
|
0f48cbba7e6a56a6379ce69c708164279fa12f24
|
4b1b2a5de72dd0f1deb8d46b7acbb555086050eb
|
refs/heads/master
| 2021-01-24T02:39:29.411930
| 2013-02-09T19:52:46
| 2013-02-09T19:52:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,139
|
py
|
#!/usr/bin/python
from bottle import *
import pymongo
from datetime import datetime
import sys
import json
db = pymongo.Connection('localhost', 27017)['bench_results']
@route('/static/:filename#.*#')
def static_file(filename):
send_file(filename, root='./static')
@route("/raw")
def raw_data():
out = []
versions = request.GET.get('versions', '')
if versions:
if versions.startswith('/') and versions.endswith('/'):
q = {'mongodb_version': {'$regex': versions[1:-1]}}
else:
q = {'mongodb_version': {'$in': versions.split()}}
else:
q = {}
cursor = db.raw.find(q).sort([('name',1), ('mongodb_version',1), ('mongodb_git',1)])
name = None
results = []
for result in cursor:
if result['name'] != name:
if name is not None:
out.append({'name':name, 'results':results})
name = result['name']
results = []
row = dict(version=result['mongodb_version'], date=result['mongodb_date'])
for (n, res) in result['results'].iteritems():
row[n] = res
results.append(row)
out.append({'name':name, 'results':results})
return out
@route("/")
def main_page():
metric = request.GET.get('metric', 'ops_per_sec')
results = raw_data()
threads = set()
flot_results = []
for outer_result in results:
out = []
for i, result in enumerate(outer_result['results']):
out.append({'label': result['version']
,'data': sorted([int(k), v[metric]] for (k,v) in result.iteritems() if k.isdigit())
})
threads.update(int(k) for k in result if k.isdigit())
flot_results.append(json.dumps(out))
return template('main_page.tpl'
,results=results
,flot_results=flot_results
,request=request
,threads=sorted(threads)
)
if __name__ == '__main__':
do_reload = '--reload' in sys.argv
debug(do_reload)
run(reloader=do_reload, host='0.0.0.0', server=AutoServer)
|
[
"redbeard0531@gmail.com"
] |
redbeard0531@gmail.com
|
590a3359c5f5ac28c8d7ac33a1d38153112d62a8
|
e24bf8b1ae9071ef29393e25e55d57e57ecaa4d4
|
/HLTBendingAngle/python/ConfFile_cfg.py
|
6c9663541350f7f1067424a131c023c1281f15af
|
[] |
no_license
|
tahuang1991/MuJetAnalysis
|
35444464054fb1f3a69980d80a1a87c0530ad1c4
|
2b7bb07ad1e428d13411c6594db01f075fcffdf6
|
refs/heads/master
| 2020-12-03T08:03:53.099393
| 2016-01-26T22:52:19
| 2016-01-26T22:52:19
| 46,516,216
| 0
| 0
| null | 2015-11-19T19:46:24
| 2015-11-19T19:46:24
| null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
'file:myfile.root'
)
)
process.demo = cms.EDAnalyzer('HLTBendingAngle'
)
process.p = cms.Path(process.demo)
|
[
"jrdv009@tamu.edu"
] |
jrdv009@tamu.edu
|
61d64f4a573e2b561240731d80288fbdd110a42a
|
5b976ba89e3de22bd00ccd6a6e2eafde807bf8cf
|
/computer/training_image_collection.py
|
1e56d8ccac481018c686f3fefd6188b43c126bb8
|
[
"MIT"
] |
permissive
|
cfizette/Neural-Net-RC-Car
|
fec8348fa14240c336c95775f1bda58afba9e7ed
|
a45c13d2b1a7107d5f41645c258d7d000cf63b0d
|
refs/heads/master
| 2021-01-02T23:05:02.957073
| 2017-11-04T04:35:26
| 2017-11-04T04:35:26
| 99,464,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,616
|
py
|
import threading
import io
import socket
import struct
import serial
import pygame
import _thread as thread
import cv2
import numpy as np
import os
import time
'''
Notes:
The pygame window will take a few moments to warm up and may be unresponsive during this time, just wait.
Usage:
First start this program, then start the client side program on the Pi.
Drive the car through your track.
When you reach the end of the track, either end the program by pressing Q or
rearrange the track and drive through it again. The program will only save
data when the car is moving forwards.
'''
# Folder for saving training data
folder = 'training_images'
# Com port for Arduino
com_port = 3
# IP Address
ip_address = '192.168.1.14'
color = 1 # 1 for color, 0 for false
def rc_controller():
ser = serial.Serial(com_port, 115200, timeout=1)
pygame.init()
disp = pygame.display.set_mode((800, 600))
print('getting input')
# Get keyboard input and send data
while True:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN or event.type == pygame.KEYUP:
key_input = pygame.key.get_pressed()
if key_input[pygame.K_UP] and key_input[pygame.K_RIGHT]:
print('Forward Right')
ser.write(b'5')
elif key_input[pygame.K_UP] and key_input[pygame.K_LEFT]:
print('Forward Left')
ser.write(b'6')
elif key_input[pygame.K_DOWN] and key_input[pygame.K_LEFT]:
print('Reverse Left')
ser.write(b'8')
elif key_input[pygame.K_DOWN] and key_input[pygame.K_RIGHT]:
print('Reverse Right')
ser.write(b'7')
elif key_input[pygame.K_UP]:
print("Forward")
ser.write(b'1')
elif key_input[pygame.K_DOWN]:
print("Reverse")
ser.write(b'2')
elif key_input[pygame.K_RIGHT]:
print("Right")
ser.write(b'4')
elif key_input[pygame.K_LEFT]:
print("Left")
ser.write(b'3')
else:
ser.write(b'0')
def get_input():
key_in = pygame.key.get_pressed()
# Only collect when moving forward
if key_in[pygame.K_UP]:
if key_in[pygame.K_LEFT]:
return 'left'
elif key_in[pygame.K_RIGHT]:
return 'right'
else:
return 'straight'
else:
return 'stationary'
def get_train_images():
class GetTrainImages(object):
def __init__(self):
self.server_socket = socket.socket()
self.server_socket.bind((ip_address, 8000))
self.server_socket.listen(0)
self.connection = self.server_socket.accept()[0].makefile('rb')
self.send_inst = True
self.collect_images()
def collect_images(self):
print('collecting images')
# left = 0
# right = 1
# straight = 2
label_array = np.zeros(1)
left_array = np.zeros(1)
right_array = 1*np.ones(1)
straight_array = 2*np.ones(1)
frame_num = 1
try:
stream_bytes = ' '
while self.send_inst:
# Read the length of image, if 0 break
image_len = struct.unpack('<L', self.connection.read(struct.calcsize('<L')))[0]
if not image_len:
break
# Check for exit command
key_in = pygame.key.get_pressed()
if key_in[pygame.K_q]:
break
# Construct stream and read image from connection
image_stream = io.BytesIO()
image_stream.write(self.connection.read(image_len))
# Show video feed
data = np.fromstring(image_stream.getvalue(), dtype=np.uint8)
image = cv2.imdecode(data, color)
try:
cv2.imshow('image', image)
cv2.waitKey(1)
except:
print("error displaying image")
# Get input from pygame
user_input = get_input()
# Add to label_array, only when moving
if user_input is not 'stationary':
if user_input is 'left':
label_array = np.append(label_array, left_array, axis=0)
elif user_input is 'right':
label_array = np.append(label_array, right_array, axis=0)
elif user_input is 'straight':
label_array = np.append(label_array, straight_array, axis=0)
# Save frame
cv2.imwrite(folder + '/' + str(frame_num) + '.jpg', image)
frame_num += 1
except IOError as e:
print(e)
# Save label_array
label_array = np.delete(label_array, 0)
np.save(folder + '/labels.npy', label_array)
GetTrainImages()
try:
threading.Thread(target=get_train_images).start()
threading.Thread(target=rc_controller).start()
except:
print("Unable to start new thread")
while True:
pass
|
[
"cfizett1@binghamton.edu"
] |
cfizett1@binghamton.edu
|
9cc50ddd7c4aa4f288db9b25f241d953d7ffc2c6
|
c2e6dc71f1533a5201b527b13ca50641f51bbb06
|
/empireofcode/done/even_last.py
|
12d2dc1126971a8e9387ccead4d7a39f5f653928
|
[] |
no_license
|
paolo12/first_gift
|
3ecb2c96aa110e557765331e991031b5c4141b31
|
89bc214bff6fe54d8580cb5eb087ae42c8ff2d14
|
refs/heads/master
| 2021-01-10T05:24:39.865929
| 2017-05-30T13:36:42
| 2017-05-30T13:36:42
| 53,891,429
| 0
| 0
| null | 2016-03-16T21:42:55
| 2016-03-14T20:58:52
|
Python
|
UTF-8
|
Python
| false
| false
| 834
|
py
|
def even_last(array):
summ_array = []
i = 0
if len(array) == 0:
return 0
else:
while (i <= len(array)-1) and (len(summ_array) < len(array)):
summ_array.append(array[i])
print("summ_array = ", summ_array)
if i + 1 <= len(array):
i += 2
else:
continue
result = sum(summ_array) * array[-1]
return result
"""
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert even_last([0, 1, 2, 3, 4, 5]) == 30, "(0+2+4)*5=30"
assert even_last([1, 3, 5]) == 30, "(1+5)*5=30"
assert even_last([6]) == 36, "(6)*6=36"
assert even_last([]) == 0, "An empty array = 0"
print("Use 'Check' to earn sweet rewards!")
"""
print(even_last([]))
|
[
"ppyakov@gmail.com"
] |
ppyakov@gmail.com
|
12dce45fd1b5b8283d9673ea5a485939792dec47
|
ccfe0a1b394e105eb547555ba5eb32010cfbea1e
|
/src/__init__.py
|
5657196915b800db2eb43fe3a913e2c31ae69e5e
|
[] |
no_license
|
aponamarev/CarND-Vehicle-Detection
|
4fe33b7c1e34d39fa9da8bf52ce5dadd51094263
|
13b077bb3bccd2b6d46aff9e23050f90d5f3e92d
|
refs/heads/master
| 2020-12-25T18:33:26.820217
| 2017-06-13T05:35:59
| 2017-06-13T05:35:59
| 93,965,965
| 0
| 0
| null | 2017-06-10T21:20:11
| 2017-06-10T21:20:11
| null |
UTF-8
|
Python
| false
| false
| 122
|
py
|
#!/usr/bin/env python
"""
Created 6/12/17.
"""
__author__ = "Alexander Ponamarev"
__email__ = "alex.ponamaryov@gmail.com"
|
[
"alex.ponamaryov@gmail.com"
] |
alex.ponamaryov@gmail.com
|
aed660996022b455724c7d0ca62cb8c4e9ba82c9
|
16bf0672cdec5f6a680a8af02c0a0a3fb0b36189
|
/modules/4h 32m zip function.py
|
2887f9f9c0f366401e7a8f71bf95314ccd2654f5
|
[] |
no_license
|
tanbir6666/test-01
|
91bfb82dc928a7d2a07b8b5a31501d7c4284435f
|
2d5a5f67d56a6288beea5a6ed6f3102e43c083c3
|
refs/heads/master
| 2023-06-30T11:41:31.542298
| 2021-08-04T19:33:18
| 2021-08-04T19:33:18
| 392,805,328
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,715
|
py
|
#zip function will marge 2 list items
Names=["Tanbir","Taohid","Shmu","Hiru","Tayeb"]
Ages=[21,11,42,58]
height=["5f9inc","4f5inc","5ft3inc","5ft5inc","5ft10inc"]
#here height has the shortest amount . here 2 & and zip index will reach max 2 Rows
# that means in all zip inside items , whoever has the sortest amount of data will be equal to the zip row value
zip_function=list(zip(Names,Ages,height))
print(zip_function)
for zi in zip_function:
print("name : ",zi[0]," & ","Age : ",zi[1]," & Height :",zi[2])
unzipped_function=list(zip(*zip_function))
print(unzipped_function)
for unzipped_tuple in unzipped_function:
print(list(unzipped_tuple))
list_one=[1,2,3,4,5,6,7,8,9,10,11]
list_two=["one","Two","Three","Four","Five","six","Seven","Eight"]
for l1, l2 in zip(list_one, list_two):
print(l1)
print(l2)
# zip is used to marge 2 or more datas and loop threw Together
on_line_loop = [[l1,l2] for l1, l2 in zip(list_one, list_two)]
print(on_line_loop)
items=["Mothboard","RAM","Hard Drive","Power Supply"]
prices=[4000,2000,4000,2600]
amounts=[10,12,13,25]
Sentences=[]
for (item,price,amount) in zip(items,prices,amounts):
Sentences.append((item+"\'s Price are "+str(price*amount)+" taka & "+str(price)+" taka per pice"))
for sen in Sentences:
print(sen)
import pandas
datas=pandas.DataFrame({
"Product Name": items,
"Product Price": prices,
"Product Amount": amounts,
})
print(datas.loc[0:])
datas.to_csv("Product info.csv")
my_Serise=pandas.Series(data=items,index=["one","Two","three","four"])
print(my_Serise)
print(pandas.read_csv("product info.csv"))
|
[
"tanbirhawlader12690@gmail.com"
] |
tanbirhawlader12690@gmail.com
|
e1292eb1c55d1880ef69645fdde2e50902e6482e
|
2b0f9a38782d55ff24b369f2f699359fe89e7654
|
/PythonWork/Palindromes.py
|
3b416af369ae8e0eaee3fe42da3930c9a704854d
|
[
"BSD-3-Clause"
] |
permissive
|
truggles/ProjectEuler
|
a8534ee4d2a0469708bfc203c7cc3dd781554229
|
714026567686779eafbf3cc3e1d7c7ebc6cca9c3
|
refs/heads/master
| 2020-12-02T21:37:02.795551
| 2016-02-07T12:09:01
| 2016-02-07T12:09:01
| 30,518,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
numA = 999
numB = 999
isGreatest = False
isGreat = False
greatestTempNum = 0
greatestTempSt = ''
greatestNum = 0
greatestSt = ''
#while numA > numB:
for i in range(0, numA-900):
nAtemp = numA - i
isPalin = False
for j in range(0, numB):
nBtemp = numB - j
product = nAtemp * nBtemp
#print product
stP = str(product)
if len(stP) == 6 and stP[0] == stP[-1] and stP[1] == stP[-2] and stP[2] == stP[-3]:
print "%s is a Palindrome!" % stP
print "%s = %i x %i" % (stP, nAtemp, nBtemp)
if greatestTempNum < product: greatestTempNum = product
isPalin = True
break
print "Greatest is: %i" % greatestTempNum
|
[
"truggles@wisc.edu"
] |
truggles@wisc.edu
|
6387acf0252dc6b79f49e2b205834defd521184a
|
4ee8b8bdd3d271a07fd9771133a5562e872fc9ff
|
/celcius_to_f.py
|
c671133dfc2722c6670932c088011b4c366afb52
|
[] |
no_license
|
Ogeoluwa/third
|
5bab45fe24a9fb5640bb76a3a2527762eaa5eae4
|
490aaf8d8df2079ac63a09470aa1ca02b42d6dda
|
refs/heads/main
| 2023-01-01T15:34:55.960997
| 2020-11-02T16:37:22
| 2020-11-02T16:37:22
| 309,427,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 115
|
py
|
temp = input('What temperature do you want to convert to? (c or f)')
if temp == f:
fahrenheit = (c * 9/5) + 32
|
[
"ogeoluwa.otitoloju@gmail.com"
] |
ogeoluwa.otitoloju@gmail.com
|
5d8568be87f3c82feb8f0c5d90fb567f5345adac
|
07f6b61aacfad5f30d9eedc384f2198fb303a094
|
/Practice/CodingBat/sleepIn.py
|
f1ca6cbb029a12e9770552553c78660cb78fcc4a
|
[] |
no_license
|
tiendong96/Python
|
5f0306da422d08cebcde948e346c17a3f9556d50
|
c4b7bc10a1400d346d32357c502e418ad9219f78
|
refs/heads/master
| 2023-03-21T17:31:46.863163
| 2021-03-10T21:52:55
| 2021-03-10T21:52:55
| 335,058,019
| 0
| 0
| null | 2021-02-01T20:14:22
| 2021-02-01T19:18:04
|
Python
|
UTF-8
|
Python
| false
| false
| 355
|
py
|
# The parameter weekday is True if it is a weekday,
# and the parameter vacation is True if we are on vacation.
# We sleep in if it is not a weekday or we're on vacation. Return True if we sleep in.
def sleep_in(weekday, vacation):
return (not weekday or vacation)
if __name__ == '__main__':
print(sleep_in(False, True)) #not weekday, vacation
|
[
"tienbusinessinquiry@gmail.com"
] |
tienbusinessinquiry@gmail.com
|
9f56ec17df57c3123fc94c0484a3164bb7f25abe
|
1130e63c96649389760e6fb98aeee3a9b2bffcb3
|
/script/test_all_branches
|
023aa79eb966c58e67e54587454ed9fe7e46073e
|
[
"BSD-3-Clause"
] |
permissive
|
jrogstad/mopub-android-sdk
|
e81647c677fe684af49cae87a32a69505649909a
|
4eed7087a55ec766dfa6b03f4f8ff4ec85c02bfd
|
refs/heads/master
| 2021-01-15T18:45:25.968660
| 2013-11-20T04:51:50
| 2013-11-20T04:51:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
#!/usr/bin/python
import os
import shared_values
import git_helper
import os_helper
original_branch = git_helper.git_current_branch()
for branch in shared_values.branches_synced_with_master:
os_helper.try_system_quiet('git co ' + branch)
if os.system('mvn clean install'):
print "FALURE!!!!!!!!!!!!!!!"
exit(1)
os_helper.system_quiet('git co ' + original_branch)
print "SUCCESS!!!!!!!!!!!"
|
[
"pair+nat+phil@pivotallabs.com"
] |
pair+nat+phil@pivotallabs.com
|
|
ca452e16876c0dc2101adadbac281236e1a6d309
|
b777204c31f83d563b511b2f84c955bd49da9f17
|
/doctors/models.py
|
87fe654546e55546dee65ba45321e6b2aca102e5
|
[] |
no_license
|
DavronR/timesheet
|
aa5b8f8bdd1e14429e4b5d6ba1edec6c496a53af
|
5eaa75c0c0186e3eba5cfc47a251ca50baf6f6d5
|
refs/heads/main
| 2023-04-01T08:41:31.404284
| 2021-04-06T21:52:38
| 2021-04-06T21:52:38
| 355,235,857
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,758
|
py
|
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils import timezone
import datetime
from datetime import timedelta
# Create your models here.
class User(AbstractUser):
is_doctor = models.BooleanField(default=False)
class Location(models.Model):
name = models.CharField(max_length=200)
sector = models.CharField(max_length=20)
def __str__(self):
return self.name
class HourCode(models.Model):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class Activity(models.Model):
work_date = models.DateField()
location = models.ForeignKey(Location, related_name="location", on_delete=models.CASCADE)
user = models.ForeignKey(User, related_name="activities", on_delete=models.CASCADE)
time_in = models.TimeField()
time_out = models.TimeField()
hour_code = models.ForeignKey(HourCode, related_name="hour_code", on_delete=models.CASCADE)
fbp_payrol = models.DecimalField(max_digits=10, decimal_places=2)
amco_payrol = models.DecimalField(max_digits=10, decimal_places=2)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def is_locked(self):
today = timezone.now()
if (today-self.created).days >= 45:
return True
return False
def hours_worked(self):
start = timedelta(hours=self.time_in.hour, minutes=self.time_in.minute)
end = timedelta(hours=self.time_out.hour, minutes=self.time_out.minute)
td = end - start
days, hours, minutes = td.days, td.seconds // 3600, td.seconds // 60 % 60
return f"{hours} hours and {minutes} minutes"
|
[
"turaboy.holmirzaev@toptal.com"
] |
turaboy.holmirzaev@toptal.com
|
7a5951f31b24674123c2b2f97433fe00f3f35f76
|
7dd5b9012401afa2e48b3e8d988592cc318a844b
|
/page_loader/cli.py
|
cefd4876d9a4d32f3a0df5fc90ba658be2202da4
|
[] |
no_license
|
twistby/python-project-lvl3
|
0d5f57bdbb9360960f8f928026a4119a3ab71786
|
1388a699b51fec948ba2d7f471d63c0eb2ece056
|
refs/heads/main
| 2023-08-20T06:50:53.295528
| 2021-10-31T15:10:39
| 2021-10-31T15:10:39
| 392,060,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
"""Cli modul."""
import argparse
def make_parser(default_folder: str):
"""Make argparse parser."""
parser = argparse.ArgumentParser(
description='Use this utility to download web page localy.',
usage='page-loader -o tmp_dir http://template.com',
)
parser.add_argument(
'page_address',
help='web-page address',
)
parser.add_argument(
'-o',
'--output',
help='directory where to save the page',
default=default_folder,
)
return parser
def get_args(default_folder: str):
"""Return arguments."""
parser = make_parser(default_folder)
return parser.parse_args()
|
[
"pref@outlook.com"
] |
pref@outlook.com
|
a803778b4200d3de5eb086211d30a7d03cd014bf
|
9e05945d1ed9fcece072a789c3f606e75b27cfcf
|
/python_study_3/page4/script.py
|
bf4729462cbd60b0e78d6b86c37b81d1d8f79a5c
|
[] |
no_license
|
taiga-ishii/Python_Progate
|
9ad0b8f6aa73e6e1b5eac90cd628c594d18d3009
|
e121532b2b558aa80e863caee683146e4a87ad67
|
refs/heads/master
| 2020-03-27T08:47:54.590587
| 2018-08-27T11:34:26
| 2018-08-27T11:34:26
| 146,288,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
# 名前を第2引数で受け取れるようにしてください
def print_hand(hand,name):
# 「◯◯は□□を出しました」と出力されるように書き換えてください
print(name+'は'+hand + 'を出しました')
# 第2引数に文字列「にんじゃわんこ」を入れてください
print_hand('グー','にんじゃわんこ')
# 第2引数に文字列「コンピューター」を入れてください
print_hand('パー','コンピューター')
|
[
"tiger1410111@gmail.com"
] |
tiger1410111@gmail.com
|
177f83fef3510a591e1377a6f8e002e183abe263
|
968e923ef73711944895e00ec0571387b608b6ce
|
/exec/runExperiment.py
|
f2e75c0e432b2094a737a98df6762ea7df4b6011
|
[] |
no_license
|
chengshaozhe/commitmentSnake
|
b3805b573e5a8205d146418873ef9ccb91eb5929
|
76523aee98f5a28ede59d9e5e96f5634e4cfabb3
|
refs/heads/master
| 2021-07-06T19:39:30.994709
| 2020-12-18T10:20:01
| 2020-12-18T10:20:01
| 214,105,726
| 0
| 0
| null | 2019-10-10T06:36:39
| 2019-10-10T06:36:39
| null |
UTF-8
|
Python
| false
| false
| 3,129
|
py
|
import pygame as pg
import os
import collections as co
import numpy as np
import pickle
import sys
import math
sys.path.append(os.path.join(os.path.join(os.path.dirname(__file__), '..')))
from src.Visualization import DrawBackground, DrawNewState, DrawImage
from src.Controller import HumanController, CheckBoundary
from src.UpdateWorld import *
from src.Trial import Trial
from src.Experiment import Experiment
from src.Writer import WriteDataFrameToCSV
def main():
dimension = 15
bounds = [0, 0, dimension - 1, dimension - 1]
condition = [-5, -3, -1, 0, 1, 3, 5]
minDistanceBetweenGrids = max(condition) + 1
maxDistanceBetweenGrids = calculateMaxDistanceOfGrid(bounds) - minDistanceBetweenGrids
initialWorld = InitialWorld(bounds)
updateWorld = UpdateWorld(bounds, condition, minDistanceBetweenGrids, maxDistanceBetweenGrids)
pg.init()
screenWidth = 680
screenHeight = 680
screen = pg.display.set_mode((screenWidth, screenHeight))
leaveEdgeSpace = 2
lineWidth = 1
backgroundColor = [205, 255, 204]
lineColor = [0, 0, 0]
targetColor = [255, 50, 50]
playerColor = [50, 50, 255]
targetRadius = 10
playerRadius = 10
textColorTuple = (255, 50, 50)
pg.event.set_allowed([pg.KEYDOWN, pg.QUIT])
picturePath = os.path.abspath(os.path.join(os.getcwd(), os.pardir)) + '/pictures/'
resultsPath = os.path.abspath(os.path.join(os.getcwd(), os.pardir)) + '/results/'
humanController = HumanController(dimension)
controller = humanController
experimentValues = co.OrderedDict()
# experimentValues["name"] = input("Please enter your name:").capitalize()
experimentValues["name"] = 'test'
experimentValues["condition"] = 'None'
writerPath = resultsPath + experimentValues["name"] + '.csv'
writer = WriteDataFrameToCSV(writerPath)
introductionImage = pg.image.load(picturePath + 'introduction.png')
restImage = pg.image.load(picturePath + 'rest.png')
finishImage = pg.image.load(picturePath + 'finish.png')
introductionImage = pg.transform.scale(introductionImage, (screenWidth, screenHeight))
finishImage = pg.transform.scale(finishImage, (int(screenWidth * 2 / 3), int(screenHeight / 4)))
drawBackground = DrawBackground(screen, dimension, leaveEdgeSpace, backgroundColor, lineColor, lineWidth, textColorTuple)
drawNewState = DrawNewState(screen, drawBackground, targetColor, playerColor, targetRadius, playerRadius)
drawImage = DrawImage(screen)
block = 15
designValues = createDesignValues(condition * 3, block)
checkBoundary = CheckBoundary([0, dimension - 1], [0, dimension - 1])
trial = Trial(controller, drawNewState, checkBoundary)
restTrialInterval = math.ceil(len(designValues) / 6)
restTrial = list(range(0, len(designValues), restTrialInterval))
experiment = Experiment(trial, writer, experimentValues, initialWorld, updateWorld, drawImage, resultsPath, minDistanceBetweenGrids, maxDistanceBetweenGrids, restImage, finishImage, restTrial)
drawImage(introductionImage)
experiment(designValues)
if __name__ == "__main__":
main()
|
[
"shaozhecheng@outlook.com"
] |
shaozhecheng@outlook.com
|
37a50da5e3dc6a53a4249b19d9105573ad01a331
|
a10efbf594de50b829e259b4e5f1ef73fa92bb7c
|
/Sem 4/lab5.py
|
e86e9783f779b706928164258ca0f6a4f364213e
|
[] |
no_license
|
andrew-kulikov/digital-analysis
|
a3e213ce9dd3d634b5fe1f3d20f4125035e75e08
|
f8a9d25ca4d21adf6a8c0a29f24de7ddae807f3d
|
refs/heads/master
| 2020-04-05T09:58:46.473376
| 2018-11-27T20:51:06
| 2018-11-27T20:51:06
| 156,782,906
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,412
|
py
|
from matplotlib import pyplot as plt
import numpy as np
from sympy import *
from sympy.plotting import plot_parametric
from sympy.utilities.lambdify import lambdastr
from pprint import pprint
def f(x):
return x * x + np.log(x) - 2
def graph():
x = Symbol('x')
y = Symbol('y')
p1 = plot_implicit(Eq(sin(x+y)-1.5*x, 0.1), (x, -2, 2), (y, -2, 2))
p2 = plot_implicit(Eq(x**2+y**2, 1), (x, -2, 2), (y, -2, 2))
p1.extend(p2)
p1.show()
def newton_eq(f, fstr, x0, e=0.00001, max_iterations=10000):
print('Newton method is running')
x = Symbol('x')
df = lambdify(x, fstr.diff(x), 'numpy')
xk = x0 - f(x0) / df(x0)
for k in range(max_iterations):
xp = xk
xk -= f(xk) / df(xk)
print('Iteration #' + str(k + 1) + ' Current solution: ' + str(xk))
if abs(xp - xk) < e:
break
return xk, k
def chords(f, a, b, e=0.001, max_iterations=10000):
print('Chords method is running')
xn = b
xp = a
i = 0
while abs(f(xn)) > e:
tmp = xn
xn = xp - f(xp) / (f(xn) - f(xp)) * (xn - xp)
xp = tmp
i += 1
print('Iteration #' + str(i) + ' Current solution: ' + str(xn))
if i > max_iterations:
break
return xn, i
def iterations(x0, F, e=0.001, max_iterations=10000):
print('Iterations method is running')
xp = np.copy(x0)
for i in range(max_iterations):
xk = np.copy(xp)
for j in range(len(xk)):
xk[j] = F[j](*xp)
print('Iteration #' + str(i + 1) + ' Current solution: ' + str(xk))
if np.max(np.abs(xk - xp)) < e:
break
xp = xk
return xp, i + 1
def build_F(exprs):
x, y = symbols('x y')
F = []
for i in range(len(exprs)):
F.append(lambdify((x, y), exprs[i], 'numpy'))
return F
def build_jacobian(syms, funcs):
J = []
for i in range(len(funcs)):
J.append([])
for sym in syms:
J[i].append(lambdify(syms, funcs[i].diff(sym), 'numpy'))
return J
def eval_jacobian(J, vals):
rows, cols = J.shape
M = np.zeros(J.shape)
for i in range(rows):
for j in range(cols):
M[i, j] = J[i, j](*vals)
return M
def eval_F(F, vals):
F1 = np.zeros(F.shape)
for i in range(len(F)):
F1[i] = F[i](*vals)
return F1
def newton_syst(J, F, x0, e=0.001, max_iterations=10000):
print('Newton method is running')
xp = np.copy(x0)
xk = np.copy(x0)
for i in range(max_iterations):
xp = np.copy(xk)
xk = xk - np.dot(
np.linalg.inv(eval_jacobian(J, xk)),
eval_F(F, xk))
print('Iteration #' + str(i + 1) + ' Current solution: ' + str(xk))
if np.max(np.abs(xp - xk)) < e:
break
return xk, i + 1
def newton_syst_mod(J, F, x0, e=0.001, max_iterations=10000):
print('Modified Newton method is running')
xp = np.copy(x0)
xk = np.copy(x0)
J0 = np.linalg.inv(eval_jacobian(J, xk))
for i in range(max_iterations):
xp = np.copy(xk)
xk = xk - np.dot(J0, eval_F(F, xk))
print('Iteration #' + str(i + 1) + ' Current solution: ' + str(xk))
if np.max(np.abs(xp - xk)) < e:
break
return xk, i + 1
def main():
a = 1
b = 1.5
ans, iters_chords = chords(f, a, b)
x, y = symbols('x y')
fstr = x**2 + log(x) - 2
plot(x**2 + log(x) - 2, (x, 0.01, 4))
ans1, iters_newton_eq = newton_eq(f, fstr, 1.25)
print(ans, ans1)
print(iters_chords, iters_newton_eq)
graph()
F = build_F([2/3*(sin(x+y) - 0.1), x**2+y**2-1+y])
x0 = np.zeros(2)
x0, iters_iter = iterations([-0.5, -0.4], F)
print('Iterations method for system: ')
print('Amount of iterations: ' + str(iters_iter))
print('Answer: ' + str(x0))
J = np.array(build_jacobian([x, y], [sin(x+y)-0.1-1.5*x, x**2+y**2-1]))
F = np.array(build_F([sin(x+y)-0.1-1.5*x, x**2+y**2-1]))
x0, iters_newton_sys = newton_syst(J, F, [0.5, 0.75])
print('Newton method for system: ')
print('Amount of iterations: ' + str(iters_newton_sys))
print('Answer: ' + str(x0))
x0, iters_newton_sys_mod = newton_syst_mod(J, F, [0.5, 0.75])
print('Modified newton method for system: ')
print('Amount of iterations: ' + str(iters_newton_sys_mod))
print(x0)
if __name__ == '__main__':
main()
|
[
"andrew.1.kulikov@gmail.com"
] |
andrew.1.kulikov@gmail.com
|
3deab869a7655e415fb6f31d42792fab08077a20
|
fa04326369fe55edbc72958b7a77affe8c38e7e4
|
/pyart/correct/unwrap.py
|
7c6a903447e52e921844c9fc5775989681393ab7
|
[
"BSD-3-Clause"
] |
permissive
|
tsupinie/pyart
|
9a8259c243deaf237f5395e27957678a11c86bb9
|
a65dfa86726bf9b69c41eaefc598d3b48007d128
|
refs/heads/master
| 2021-01-12T21:03:08.429546
| 2015-05-26T15:18:13
| 2015-05-26T15:18:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,318
|
py
|
"""
pyart.correct.unwrap
====================
Dealias using multidimensional phase unwrapping algorithms.
.. autosummary::
:toctree: generated/
dealias_unwrap_phase
_dealias_unwrap_3d
_dealias_unwrap_2d
_dealias_unwrap_1d
_verify_unwrap_unit
_is_radar_cubic
_is_radar_sweep_aligned
_is_radar_sequential
_is_sweep_sequential
"""
from __future__ import print_function
import numpy as np
from ..config import get_metadata
from ._common_dealias import _parse_fields, _parse_gatefilter
from ._common_dealias import _parse_rays_wrap_around, _parse_nyquist_vel
from ._unwrap_1d import unwrap_1d
from ._unwrap_2d import unwrap_2d
from ._unwrap_3d import unwrap_3d
def dealias_unwrap_phase(
radar, unwrap_unit='sweep', nyquist_vel=None,
check_nyquist_uniform=True, gatefilter=False,
rays_wrap_around=None, keep_original=False, vel_field=None,
corr_vel_field=None, skip_checks=False, **kwargs):
"""
Dealias Doppler velocities using multi-dimensional phase unwrapping.
Parameters
----------
radar : Radar
Radar object containing Doppler velocities to dealias.
unwrap_unit : {'ray', 'sweep', 'volume'}, optional
Unit to unwrap independently. 'ray' will unwrap each ray
individually, 'sweep' each sweep, and 'volume' will unwrap the entire
volume in a single pass. 'sweep', the default, often gives superior
results when the lower sweeps of the radar volume are contaminated by
clutter. 'ray' does not use the gatefilter parameter and rays where
gates ared masked will result in poor dealiasing for that ray.
nyquist_velocity : array like or float, optional
Nyquist velocity in unit identical to those stored in the radar's
velocity field, either for each sweep or a single value which will be
used for all sweeps. None will attempt to determine this value from
the Radar object. The Nyquist velocity of the first sweep is used
for all dealiasing unless the unwrap_unit is 'sweep' when the
velocities of each sweep are used.
check_nyquist_uniform : bool, optional
True to check if the Nyquist velocities are uniform for all rays
within a sweep, False will skip this check. This parameter is ignored
when the nyquist_velocity parameter is not None.
gatefilter : GateFilter, None or False, optional.
A GateFilter instance which specified which gates should be
ignored when performing de-aliasing. A value of None created this
filter from the radar moments using any additional arguments by
passing them to :py:func:`moment_based_gate_filter`. False, the
default, disables filtering including all gates in the dealiasing.
rays_wrap_around : bool or None, optional
True when the rays at the beginning of the sweep and end of the sweep
should be interpreted as connected when de-aliasing (PPI scans).
False if they edges should not be interpreted as connected (other scan
types). None will determine the correct value from the radar
scan type.
keep_original : bool, optional
True to retain the original Doppler velocity values at gates
where the dealiasing procedure fails or was not applied. False
does not replacement and these gates will be masked in the corrected
velocity field.
vel_field : str, optional
Field in radar to use as the Doppler velocities during dealiasing.
None will use the default field name from the Py-ART configuration
file.
corr_vel_field : str, optional
Name to use for the dealiased Doppler velocity field metadata. None
will use the default field name from the Py-ART configuration file.
skip_checks : bool
True to skip checks verifing that an appropiate unwrap_unit is
selected, False retains these checked. Setting this parameter to True
is not recommended and is only offered as an option for extreme cases.
Returns
-------
corr_vel : dict
Field dictionary containing dealiased Doppler velocities. Dealiased
array is stored under the 'data' key.
References
----------
.. [1] Miguel Arevallilo Herraez, David R. Burton, Michael J. Lalor,
and Munther A. Gdeisat, "Fast two-dimensional phase-unwrapping
algorithm based on sorting by reliability following a noncontinuous
path", Journal Applied Optics, Vol. 41, No. 35 (2002) 7437,
.. [2] Abdul-Rahman, H., Gdeisat, M., Burton, D., & Lalor, M., "Fast
three-dimensional phase-unwrapping algorithm based on sorting by
reliability following a non-continuous path. In W. Osten,
C. Gorecki, & E. L. Novak (Eds.), Optical Metrology (2005) 32--40,
International Society for Optics and Photonics.
"""
vel_field, corr_vel_field = _parse_fields(vel_field, corr_vel_field)
gatefilter = _parse_gatefilter(gatefilter, radar, **kwargs)
rays_wrap_around = _parse_rays_wrap_around(rays_wrap_around, radar)
nyquist_vel = _parse_nyquist_vel(nyquist_vel, radar, check_nyquist_uniform)
if not skip_checks:
_verify_unwrap_unit(radar, unwrap_unit)
# exclude masked and invalid velocity gates
gatefilter.exclude_masked(vel_field)
gatefilter.exclude_invalid(vel_field)
gfilter = gatefilter.gate_excluded
# raw vel. data possibly with masking
raw_vdata = radar.fields[vel_field]['data']
vdata = raw_vdata.view(np.ndarray) # mask removed
# perform dealiasing
if unwrap_unit == 'ray':
# 1D unwrapping does not use the gate filter nor respect
# masked gates in the rays. No information from the radar object is
# needed for the unfolding
data = _dealias_unwrap_1d(vdata, nyquist_vel)
elif unwrap_unit == 'sweep':
data = _dealias_unwrap_2d(
radar, vdata, nyquist_vel, gfilter, rays_wrap_around)
elif unwrap_unit == 'volume':
data = _dealias_unwrap_3d(
radar, vdata, nyquist_vel, gfilter, rays_wrap_around)
else:
message = ("Unknown `unwrap_unit` parameter, must be one of"
"'ray', 'sweep', or 'volume'")
raise ValueError(message)
# mask filtered gates
if np.any(gfilter):
data = np.ma.array(data, mask=gfilter)
# restore original values where dealiasing not applied
if keep_original:
data[gfilter] = raw_vdata[gfilter]
# return field dictionary containing dealiased Doppler velocities
corr_vel = get_metadata(corr_vel_field)
corr_vel['data'] = data
return corr_vel
def _dealias_unwrap_3d(radar, vdata, nyquist_vel, gfilter, rays_wrap_around):
""" Dealias using 3D phase unwrapping (full volume at once). """
# form cube and scale to phase units
nyquist_vel = nyquist_vel[0] # must be uniform, not checked
shape = (radar.nsweeps, -1, radar.ngates)
scaled_cube = (np.pi * vdata / nyquist_vel).reshape(shape)
filter_cube = gfilter.reshape(shape)
# perform unwrapping
wrapped = np.require(scaled_cube, np.float64, ['C'])
mask = np.require(filter_cube, np.uint8, ['C'])
unwrapped = np.empty_like(wrapped, dtype=np.float64, order='C')
unwrap_3d(wrapped, mask, unwrapped, [False, rays_wrap_around, False])
# scale back to velocity units
unwrapped_cube = unwrapped * nyquist_vel / np.pi
unwrapped_volume = unwrapped_cube.reshape(-1, radar.ngates)
unwrapped_volume = unwrapped_volume.astype(vdata.dtype)
return unwrapped_volume
def _dealias_unwrap_1d(vdata, nyquist_vel):
""" Dealias using 1D phase unwrapping (ray-by-ray) """
# nyquist_vel is only available sweep by sweep which has been lost at
# this point. Metioned in the documentation
nyquist_vel = nyquist_vel[0]
data = np.empty_like(vdata)
for i, ray in enumerate(vdata):
# extract ray and scale to phase units
scaled_ray = ray * np.pi / nyquist_vel
# perform unwrapping
wrapped = np.require(scaled_ray, np.float64, ['C'])
unwrapped = np.empty_like(wrapped, dtype=np.float64, order='C')
unwrap_1d(wrapped, unwrapped)
# scale back into velocity units and store
data[i] = unwrapped * nyquist_vel / np.pi
return data
def _dealias_unwrap_2d(radar, vdata, nyquist_vel, gfilter, rays_wrap_around):
""" Dealias using 2D phase unwrapping (sweep-by-sweep). """
data = np.zeros_like(vdata)
for nsweep, sweep_slice in enumerate(radar.iter_slice()):
# extract sweep and scale to phase units
sweep_nyquist_vel = nyquist_vel[nsweep]
scaled_sweep = vdata[sweep_slice] * np.pi / sweep_nyquist_vel
sweep_mask = gfilter[sweep_slice]
# perform unwrapping
wrapped = np.require(scaled_sweep, np.float64, ['C'])
mask = np.require(sweep_mask, np.uint8, ['C'])
unwrapped = np.empty_like(wrapped, dtype=np.float64, order='C')
unwrap_2d(wrapped, mask, unwrapped, [rays_wrap_around, False])
# scale back into velocity units and store
data[sweep_slice, :] = unwrapped * sweep_nyquist_vel / np.pi
return data
def _verify_unwrap_unit(radar, unwrap_unit):
"""
Verify that the radar supports the requested unwrap unit
raises a ValueError if the unwrap_unit is not supported.
"""
if unwrap_unit == 'sweep' or unwrap_unit == 'volume':
if _is_radar_sequential(radar) is False:
mess = ("rays are not sequentially ordered, must use 'ray' "
"unwrap_unit.")
raise ValueError(mess)
if unwrap_unit == 'volume':
if _is_radar_cubic(radar) is False:
mess = "Non-cubic radar volume, 'volume' unwrap_unit invalid. "
raise ValueError(mess)
if _is_radar_sweep_aligned(radar) is False:
mess = ("Angle in sequential sweeps in radar volumes are not "
"aligned, 'volume unwrap_unit invalid")
raise ValueError(mess)
def _is_radar_cubic(radar):
""" Test if a radar is cubic (sweeps have the same number of rays). """
rays_per_sweep = radar.rays_per_sweep['data']
return bool(np.all(rays_per_sweep == rays_per_sweep[0]))
def _is_radar_sweep_aligned(radar, diff=0.1):
"""
Test that all sweeps in the radar sample nearly the same angles.
Test that the maximum difference in sweep sampled angles is below
`diff` degrees. The radar should first be tested to verify that is cubic
before calling this function using the _is_radar_cubic function.
"""
if radar.nsweeps == 1:
return True # all single sweep volume are sweep aligned
if radar.scan_type == 'ppi':
angles = radar.azimuth['data']
elif radar.scan_type == 'rhi':
angles = radar.elevation['data']
else:
raise ValueError('invalid scan_type: %s' % (radar.scan_type))
starts = radar.sweep_start_ray_index['data']
ends = radar.sweep_end_ray_index['data']
ref_angles = angles[starts[0]:ends[0] + 1]
for start, end in zip(starts, ends):
test_angles = angles[start:end+1]
if np.any(np.abs(test_angles - ref_angles) > diff):
return False
return True
def _is_radar_sequential(radar):
""" Test if all sweeps in radar are sequentially ordered. """
for i in xrange(radar.nsweeps):
if not _is_sweep_sequential(radar, i):
return False
return True
def _is_sweep_sequential(radar, sweep_number):
""" Test if a specific sweep is sequentially ordered. """
start = radar.sweep_start_ray_index['data'][sweep_number]
end = radar.sweep_end_ray_index['data'][sweep_number]
if radar.scan_type == 'ppi':
angles = radar.azimuth['data'][start:end+1]
elif radar.scan_type == 'rhi':
angles = radar.elevation['data'][start:end+1]
elif radar.scan_type == 'vpt':
# for VPT scan time should not run backwards, so time is the
# equivalent variable to an angle.
angles = radar.time['data']
else:
raise ValueError('invalid scan_type: %s' % (radar.scan_type))
rolled_angles = np.roll(angles, -np.argmin(angles))
return np.all(np.diff(rolled_angles) >= 0)
|
[
"jjhelmus@gmail.com"
] |
jjhelmus@gmail.com
|
9024dbe589356bc46c896a0da84b6eb270ac2e4f
|
6a63a3b241e161d1e69f1521077617ad86f31eab
|
/python/ray/util/client/__init__.py
|
17094dae04f1470fa1ddd2b12dcae5e97f7f3a43
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
jovany-wang/ray
|
47a9df67e8ea26337517d625df50eb0b8b892135
|
227aef381a605cb1ebccbba4e84b840634196a35
|
refs/heads/master
| 2023-09-03T23:53:00.050619
| 2022-08-20T21:50:52
| 2022-08-20T21:50:52
| 240,190,407
| 1
| 1
|
Apache-2.0
| 2023-03-04T08:57:04
| 2020-02-13T06:13:19
|
Python
|
UTF-8
|
Python
| false
| false
| 11,454
|
py
|
import logging
import os
import sys
import threading
from typing import Any, Dict, List, Optional, Tuple
import grpc
import ray._private.ray_constants as ray_constants
from ray._private.client_mode_hook import (
_explicitly_disable_client_mode,
_explicitly_enable_client_mode,
)
from ray._private.ray_logging import setup_logger
from ray.job_config import JobConfig
from ray.util.annotations import DeveloperAPI
logger = logging.getLogger(__name__)
# This version string is incremented to indicate breaking changes in the
# protocol that require upgrading the client version.
CURRENT_PROTOCOL_VERSION = "2022-07-24"
class _ClientContext:
def __init__(self):
from ray.util.client.api import _ClientAPI
self.api = _ClientAPI()
self.client_worker = None
self._server = None
self._connected_with_init = False
self._inside_client_test = False
def connect(
self,
conn_str: str,
job_config: JobConfig = None,
secure: bool = False,
metadata: List[Tuple[str, str]] = None,
connection_retries: int = 3,
namespace: str = None,
*,
ignore_version: bool = False,
_credentials: Optional[grpc.ChannelCredentials] = None,
ray_init_kwargs: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""Connect the Ray Client to a server.
Args:
conn_str: Connection string, in the form "[host]:port"
job_config: The job config of the server.
secure: Whether to use a TLS secured gRPC channel
metadata: gRPC metadata to send on connect
connection_retries: number of connection attempts to make
ignore_version: whether to ignore Python or Ray version mismatches.
This should only be used for debugging purposes.
Returns:
Dictionary of connection info, e.g., {"num_clients": 1}.
"""
# Delay imports until connect to avoid circular imports.
from ray.util.client.worker import Worker
if self.client_worker is not None:
if self._connected_with_init:
return
raise Exception("ray.init() called, but ray client is already connected")
if not self._inside_client_test:
# If we're calling a client connect specifically and we're not
# currently in client mode, ensure we are.
_explicitly_enable_client_mode()
if namespace is not None:
job_config = job_config or JobConfig()
job_config.set_ray_namespace(namespace)
logging_level = ray_constants.LOGGER_LEVEL
logging_format = ray_constants.LOGGER_FORMAT
if ray_init_kwargs is None:
ray_init_kwargs = {}
# NOTE(architkulkarni): env_hook is not supported with Ray Client.
ray_init_kwargs["_skip_env_hook"] = True
if ray_init_kwargs.get("logging_level") is not None:
logging_level = ray_init_kwargs["logging_level"]
if ray_init_kwargs.get("logging_format") is not None:
logging_format = ray_init_kwargs["logging_format"]
setup_logger(logging_level, logging_format)
try:
self.client_worker = Worker(
conn_str,
secure=secure,
_credentials=_credentials,
metadata=metadata,
connection_retries=connection_retries,
)
self.api.worker = self.client_worker
self.client_worker._server_init(job_config, ray_init_kwargs)
conn_info = self.client_worker.connection_info()
self._check_versions(conn_info, ignore_version)
self._register_serializers()
return conn_info
except Exception:
self.disconnect()
raise
def _register_serializers(self):
"""Register the custom serializer addons at the client side.
The server side should have already registered the serializers via
regular worker's serialization_context mechanism.
"""
import ray.util.serialization_addons
from ray.util.serialization import StandaloneSerializationContext
ctx = StandaloneSerializationContext()
ray.util.serialization_addons.apply(ctx)
def _check_versions(self, conn_info: Dict[str, Any], ignore_version: bool) -> None:
local_major_minor = f"{sys.version_info[0]}.{sys.version_info[1]}"
if not conn_info["python_version"].startswith(local_major_minor):
version_str = f"{local_major_minor}.{sys.version_info[2]}"
msg = (
"Python minor versions differ between client and server:"
+ f" client is {version_str},"
+ f" server is {conn_info['python_version']}"
)
if ignore_version or "RAY_IGNORE_VERSION_MISMATCH" in os.environ:
logger.warning(msg)
else:
raise RuntimeError(msg)
if CURRENT_PROTOCOL_VERSION != conn_info["protocol_version"]:
msg = (
"Client Ray installation incompatible with server:"
+ f" client is {CURRENT_PROTOCOL_VERSION},"
+ f" server is {conn_info['protocol_version']}"
)
if ignore_version or "RAY_IGNORE_VERSION_MISMATCH" in os.environ:
logger.warning(msg)
else:
raise RuntimeError(msg)
def disconnect(self):
"""Disconnect the Ray Client."""
from ray.util.client.api import _ClientAPI
if self.client_worker is not None:
self.client_worker.close()
self.api = _ClientAPI()
self.client_worker = None
# remote can be called outside of a connection, which is why it
# exists on the same API layer as connect() itself.
def remote(self, *args, **kwargs):
"""remote is the hook stub passed on to replace `ray.remote`.
This sets up remote functions or actors, as the decorator,
but does not execute them.
Args:
args: opaque arguments
kwargs: opaque keyword arguments
"""
return self.api.remote(*args, **kwargs)
def __getattr__(self, key: str):
if self.is_connected():
return getattr(self.api, key)
elif key in ["is_initialized", "_internal_kv_initialized"]:
# Client is not connected, thus Ray is not considered initialized.
return lambda: False
else:
raise Exception(
"Ray Client is not connected. Please connect by calling `ray.init`."
)
def is_connected(self) -> bool:
if self.client_worker is None:
return False
return self.client_worker.is_connected()
def init(self, *args, **kwargs):
if self._server is not None:
raise Exception("Trying to start two instances of ray via client")
import ray.util.client.server.server as ray_client_server
server_handle, address_info = ray_client_server.init_and_serve(
"127.0.0.1:50051", *args, **kwargs
)
self._server = server_handle.grpc_server
self.connect("127.0.0.1:50051")
self._connected_with_init = True
return address_info
def shutdown(self, _exiting_interpreter=False):
self.disconnect()
import ray.util.client.server.server as ray_client_server
if self._server is None:
return
ray_client_server.shutdown_with_server(self._server, _exiting_interpreter)
self._server = None
# All connected context will be put here
# This struct will be guarded by a lock for thread safety
_all_contexts = set()
_lock = threading.Lock()
# This is the default context which is used when allow_multiple is not True
_default_context = _ClientContext()
@DeveloperAPI
class RayAPIStub:
"""This class stands in as the replacement API for the `import ray` module.
Much like the ray module, this mostly delegates the work to the
_client_worker. As parts of the ray API are covered, they are piped through
here or on the client worker API.
"""
def __init__(self):
self._cxt = threading.local()
self._cxt.handler = _default_context
self._inside_client_test = False
def get_context(self):
try:
return self._cxt.__getattribute__("handler")
except AttributeError:
self._cxt.handler = _default_context
return self._cxt.handler
def set_context(self, cxt):
old_cxt = self.get_context()
if cxt is None:
self._cxt.handler = _ClientContext()
else:
self._cxt.handler = cxt
return old_cxt
def is_default(self):
return self.get_context() == _default_context
def connect(self, *args, **kw_args):
self.get_context()._inside_client_test = self._inside_client_test
conn = self.get_context().connect(*args, **kw_args)
global _lock, _all_contexts
with _lock:
_all_contexts.add(self._cxt.handler)
return conn
def disconnect(self, *args, **kw_args):
global _lock, _all_contexts, _default_context
with _lock:
if _default_context == self.get_context():
for cxt in _all_contexts:
cxt.disconnect(*args, **kw_args)
_all_contexts = set()
else:
self.get_context().disconnect(*args, **kw_args)
if self.get_context() in _all_contexts:
_all_contexts.remove(self.get_context())
if len(_all_contexts) == 0:
_explicitly_disable_client_mode()
def remote(self, *args, **kwargs):
return self.get_context().remote(*args, **kwargs)
def __getattr__(self, name):
return self.get_context().__getattr__(name)
def is_connected(self, *args, **kwargs):
return self.get_context().is_connected(*args, **kwargs)
def init(self, *args, **kwargs):
ret = self.get_context().init(*args, **kwargs)
global _lock, _all_contexts
with _lock:
_all_contexts.add(self._cxt.handler)
return ret
def shutdown(self, *args, **kwargs):
global _lock, _all_contexts
with _lock:
if _default_context == self.get_context():
for cxt in _all_contexts:
cxt.shutdown(*args, **kwargs)
_all_contexts = set()
else:
self.get_context().shutdown(*args, **kwargs)
if self.get_context() in _all_contexts:
_all_contexts.remove(self.get_context())
if len(_all_contexts) == 0:
_explicitly_disable_client_mode()
ray = RayAPIStub()
@DeveloperAPI
def num_connected_contexts():
"""Return the number of client connections active."""
global _lock, _all_contexts
with _lock:
return len(_all_contexts)
# Someday we might add methods in this module so that someone who
# tries to `import ray_client as ray` -- as a module, instead of
# `from ray_client import ray` -- as the API stub
# still gets expected functionality. This is the way the ray package
# worked in the past.
#
# This really calls for PEP 562: https://www.python.org/dev/peps/pep-0562/
# But until Python 3.6 is EOL, here we are.
|
[
"noreply@github.com"
] |
jovany-wang.noreply@github.com
|
c2705f3045b807f67db1b7f4cad8f941630fc88b
|
61fb59938d2bf5d658a90a7ccc21665fd1da0af8
|
/apps/gateway/forms.py
|
f2e818293c751ff4a7754d522a8f586c2ad28540
|
[] |
no_license
|
ian0411/oscar
|
712f88c27050877071eba998637211aba0703547
|
29529e6cea4dc97c75fc22534413c2fb041e9a52
|
refs/heads/master
| 2020-12-03T03:51:20.256079
| 2017-06-29T13:51:15
| 2017-06-29T13:51:15
| 95,782,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
from django import forms
from django.contrib.auth.models import User
from oscar.apps.customer.utils import normalise_email
class GatewayForm(forms.Form):
email = forms.EmailField()
def clean_email(self):
email = normalise_email(self.cleaned_data['email'])
if User.objects.filter(email__iexact=email).exists():
raise forms.ValidationError(
"A user already exists with email %s" % email
)
return email
|
[
"ian0411@hotmail.com"
] |
ian0411@hotmail.com
|
b1845fcdadfeb8237cb59e861fa3452738df75e7
|
886aed76cc1a26de9eb5b42f446022cafe63b03c
|
/haste_processing_node/haste_storage_client_cache.py
|
a51c743413a0f3a113d50dc2103dd2e0fb2d033c
|
[] |
no_license
|
HASTE-project/haste-image-analysis-container
|
83411dab2be41e2a85194e4302e5920db95998b1
|
5f84b2cd8947797b774bb95e1edc8e5e5e5f7c13
|
refs/heads/master
| 2021-10-11T22:54:27.170172
| 2019-01-30T09:57:05
| 2019-01-30T09:57:05
| 119,521,462
| 0
| 0
| null | 2019-01-30T09:57:06
| 2018-01-30T10:36:22
|
Python
|
UTF-8
|
Python
| false
| false
| 3,260
|
py
|
from haste_storage_client.core import HasteStorageClient, OS_SWIFT_STORAGE, TRASH
from haste.windowed_conformal_model.conformal_interestingness_model import ConformalInterestingnessModel
import json
import os.path
import urllib.request
haste_storage_clients_az_lnp = {}
haste_storage_clients_vironova = {}
def __get_magic_haste_client_config_from_server(host):
print('attempting to read config info from ' + host + '...', flush=True)
t = 'w0rj540vhw8dx0ng0t6nw8cghp'
url = 'http://' + host + ':27000/' + t + '/haste_storage_client_config.json'
stream = urllib.request.urlopen(url, timeout=2)
config = stream.read()
config = config.decode('utf-8')
config = json.loads(config)
return config
def __get_haste_storage_client_config():
# If a local config file exists, use it:
json_config = os.path.expanduser('~/.haste/haste_storage_client_config.json')
if os.path.isfile(json_config):
return None # Client will attempt to read config from this file if passed 'None'.
# Otherwise, use the auto-configuration server:
# There is no DNS for SNIC, so hostnames won't work here. (unless /etc/hosts is updated inside the container).
for host in ['192.168.1.28', # metadata-db-prod (private)
'130.239.81.96', # metadata-db-prod (public)
'127.0.0.1']:
try:
return __get_magic_haste_client_config_from_server(host)
except Exception as e:
print(e)
print('...failed')
print('failed reading config from all locations', flush=True)
def get_storage_client_az_lnp(stream_id):
# For the Vironova dataset, streamed from microscope.
if stream_id not in haste_storage_clients_az_lnp:
haste_storage_client_config = __get_haste_storage_client_config()
model = ConformalInterestingnessModel()
client = HasteStorageClient(stream_id,
config=haste_storage_client_config,
interestingness_model=model,
storage_policy=[(0.5, 1.0, OS_SWIFT_STORAGE)]) # discard blobs which don't match the policy.
print('creating client for stream ID: ' + stream_id, flush=True)
haste_storage_clients_az_lnp[stream_id] = client
# TODO: only cache N clients.
return haste_storage_clients_az_lnp[stream_id]
def get_storage_client_vironova(stream_id):
if stream_id not in haste_storage_clients_vironova:
haste_storage_client_config = __get_haste_storage_client_config()
# Default to 1.0
model = None
client = HasteStorageClient(stream_id,
config=haste_storage_client_config,
interestingness_model=model,
storage_policy=[(0.0, 1.0, OS_SWIFT_STORAGE)]) # discard blobs which don't match the policy.
print('creating client for stream ID: ' + stream_id, flush=True)
haste_storage_clients_vironova[stream_id] = client
# TODO: only cache N clients.
return haste_storage_clients_vironova[stream_id]
if __name__ == '__main__':
# Test
config = __get_haste_storage_client_config()
print(config)
|
[
"blamey.ben@gmail.com"
] |
blamey.ben@gmail.com
|
06fe70e2702191d04c51c0c22a73d8d266a8bc53
|
b98aa0f5c354a1840a8be7ecd92445edc9a7f280
|
/manage.py
|
64c973d6e7dc217a367dfb4bff92d3a4a6374d9c
|
[
"Unlicense"
] |
permissive
|
SpaceHotDog/Flask_API
|
337297ceb36367505bb7e19cde3dbdb462d87058
|
8ce5136f44ee21a0e0ab77a1d8fefadb0dbeb74d
|
refs/heads/master
| 2021-01-16T18:00:16.591922
| 2017-08-12T18:24:01
| 2017-08-12T18:24:01
| 100,034,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 719
|
py
|
# manage.py
import os
from flask_script import Manager # Class for handling a set of commands.
from flask_migrate import Migrate, MigrateCommand
from app import db, create_app
# Import the models so that the script can find the models to be migrated.
from app import models
app = create_app(config_name=os.getenv('APP_SETTINGS'))
# The MigrateCommand contains a set of migration commands.
migrate = Migrate(app, db)
# The Manager class keeps track of all the commands and handles how they are called from the command line.
manager = Manager(app)
# Manager also adds the migration commands and enforces that they start with db.
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
[
"noreply@github.com"
] |
SpaceHotDog.noreply@github.com
|
70525813a57f89b68432c0c09c5e6cc1b4285b92
|
627867eca0d82f8fb399a4b7320da7979af3e8dd
|
/venv/bin/pip3
|
d631029b89174cafdc79fb08b15d169c09560ff3
|
[] |
no_license
|
itoutsourcing86/photo-api
|
bd3bf6f79a163d26f6bb3629861e3bdc190a17d5
|
8886b8fb86dbcc7163cdb4f3ad519ffb9d8ddc33
|
refs/heads/master
| 2021-04-06T19:51:33.166133
| 2018-03-15T10:45:20
| 2018-03-15T10:45:20
| 125,268,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
#!/home/alex/PycharmProjects/api/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3')()
)
|
[
"itoutsourcing86@gmail.com"
] |
itoutsourcing86@gmail.com
|
|
2b3d1686bc46f44039b6cf79b155c3b1269ec801
|
ee1de78906a835f60a2f4e7eec5daac6acec4d41
|
/day07/buying3_4.py
|
dc18e8d21b78787a258e7165e258e948f962ba48
|
[] |
no_license
|
ktb5891/ML_lecture
|
7d3663f550ffbaa9aa18462c53d48e94bd6a22bf
|
ee691d6fdfe9a2835be7f65fbce96cf1d160e15a
|
refs/heads/main
| 2023-06-03T03:03:10.667083
| 2021-06-24T00:43:15
| 2021-06-24T00:43:15
| 377,467,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,034
|
py
|
import requests
from bs4 import BeautifulSoup
import pymysql
def save_data(item_info):
sql = "select count(*) from items where item_code= " + item_info['item_code'] + ";"
cursor.execute(sql)
result = cursor.fetchone()
if result[0] == 0:
sql2 = """insert into items values('""" + item_info['item_code'] + """',
'""" + item_info['title'] + """',
""" + str(item_info['origin_price']) + """,
""" + str(item_info['discount_price']) + """,
""" + str(item_info['discount_percent']) + """,
'""" + item_info['provider'] + """');"""
# print(sql2)
cursor.execute(sql2)
sql1 = """insert into ranking(main_category, sub_category, item_ranking, item_code) values('""" + item_info[
'category_name'] + """',
'""" + item_info['sub_category_name'] + """',
""" + str(item_info['ranking']) + """,
'""" + item_info['item_code'] + """');"""
# print(sql1)
cursor.execute(sql1)
def get_items(html, category_name, sub_category_name):
items_result_list = list()
best_item = html.select('div.best-list')
for idx, item in enumerate(best_item[1].select('li')):
dict_data = dict()
ranking = idx + 1
title = item.select_one('a.itemname')
origin_price = item.select_one('div.o-price')
discount_price = item.select_one('div.s-price strong span')
discount_percent = item.select_one('div.s-price em')
if origin_price == None or origin_price.get_text() == '':
origin_price = discount_price
if discount_price == None:
origin_price, discount_price = 0, 0
else:
origin_price = origin_price.get_text().replace(',', '').replace('원', '')
discount_price = discount_price.get_text().replace(',', '').replace('원', '')
if discount_percent == None or discount_percent == '':
discount_percent = 0
else:
discount_percent = discount_percent.get_text().replace('%', '')
product_link = item.select_one('div.thumb > a')
item_code = product_link.attrs['href'].split('=')[1].split('&')[0]
res = requests.get(product_link.attrs['href'])
soup = BeautifulSoup(res.content, 'html.parser')
provider = soup.select_one('div.item-topinfo_headline > p > span')
if provider == None:
provider = ''
else:
provider = provider.get_text()
dict_data['category_name'] = category_name
dict_data['sub_category_name'] = sub_category_name
dict_data['ranking'] = ranking
dict_data['title'] = title.get_text()
dict_data['origin_price'] = origin_price
dict_data['discount_price'] = discount_price
dict_data['discount_percent'] = discount_percent
dict_data['item_code'] = item_code
dict_data['provider'] = provider.replace('\n', '')
save_data(dict_data)
# print(dict_data)
# print(category_name, sub_category_name, ranking, item_code, provider, title.get_text(), origin_price, discount_price, discount_percent)
def get_category(category_link, category_name):
res = requests.get(category_link)
soup = BeautifulSoup(res.content, "html.parser")
# print(category_link, category_name)
# get_items(soup, category_name, "ALL")
sub_categories = soup.select('div.gbest-cate div.cate-l div.navi.group ul li a')
for sub_category in categories:
res = requests.get('http://corners.gmarket.co.kr' + sub_category['href'])
soup = BeautifulSoup(res.content, 'html.parser')
get_items(soup, category_name, sub_category.get_text())
print(category_link, category_name, sub_category.get_text(), 'http://corners.gmarket.co.kr' + sub_category['href'])
conn = pymysql.connect(host='localhost', port=3306, user='root', passwd='1234', db='mydb', charset='utf8')
cursor = conn.cursor()
res = requests.get("http://corners.gmarket.co.kr")
soup = BeautifulSoup(res.content, "html.parser")
categories = soup.select('div.gbest-cate ul.by-group li a')
for category in categories:
print('http://corners.gmarket.co.kr' + category['href'], category.get_text())
get_category('http://corners.gmarket.co.kr' + category['href'], category.get_text())
|
[
"ktb5891@gmail.com"
] |
ktb5891@gmail.com
|
19b532f43cbeab3da7e5490cd2edcb009dd28067
|
a9665b57c3ecf22fbd372b7433ea72b8959695c6
|
/catkin_ws/build/gazebo_simulation_scene/catkin_generated/pkg.develspace.context.pc.py
|
2ed6bf622a24187d4d45f0d536ba33fb81f3d89d
|
[] |
no_license
|
ShamaineChung/ROS_ABB_workspace
|
5196aa792927498da7d03de800f5c3695525de1b
|
64373a8287af1e53f50110e8def1463c63f0dcd7
|
refs/heads/main
| 2023-07-12T18:10:18.757134
| 2021-08-25T10:07:13
| 2021-08-25T10:07:13
| 385,182,730
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "gazebo_simulation_scene"
PROJECT_SPACE_DIR = "/home/shamaine/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
|
[
"A00215756@student.ait.ie"
] |
A00215756@student.ait.ie
|
11ff1d1611b9c40fc911df54b4e95a6d14dabf45
|
0ef53ff59c5584588ed2e85390b0383bd34de73e
|
/Python/SICP/3.5.py
|
a3a06237648c1eaa71c37fdd425ab5878e0cde2b
|
[] |
no_license
|
bgmnbear/learn
|
f309fa459e0328a80e450fc4d37fba68015a2ece
|
0a29be76c715d37e6d7124103283b849831c3b58
|
refs/heads/master
| 2023-01-22T15:12:25.526422
| 2019-08-18T09:02:10
| 2019-08-18T09:02:10
| 115,715,622
| 0
| 2
| null | 2023-01-19T10:34:36
| 2017-12-29T11:07:19
|
Python
|
UTF-8
|
Python
| false
| false
| 3,699
|
py
|
from functools import reduce
from operator import mul
class Exp(object):
"""A call expression in Calculator."""
def __init__(self, operator, operands):
self.operator = operator
self.operands = operands
def __repr__(self):
return 'Exp({0}, {1})'.format(repr(self.operator), repr(self.operands))
def __str__(self):
operand_strs = ', '.join(map(str, self.operands))
return '{0}({1})'.format(self.operator, operand_strs)
def calc_apply(operator, args):
"""Apply the named operator to a list of args."""
if operator in ('add', '+'):
return sum(args)
if operator in ('sub', '-'):
if len(args) == 0:
raise TypeError(operator + ' requires at least 1 argument')
if len(args) == 1:
return -args[0]
return sum(args[:1] + [-arg for arg in args[1:]])
if operator in ('mul', '*'):
return reduce(mul, args, 1)
if operator in ('div', '/'):
if len(args) != 2:
raise TypeError(operator + ' requires exactly 2 arguments')
numer, denom = args
return numer / denom
def tokenize(line):
"""Convert a string into a list of tokens."""
spaced = line.replace('(', ' ( ').replace(')', ' ) ').replace(',', ' , ')
return spaced.split()
known_operators = ['add', 'sub', 'mul', 'div', '+', '-', '*', '/']
def analyze(tokens):
"""Create a tree of nested lists from a sequence of tokens."""
assert_non_empty(tokens)
token = analyze_token(tokens.pop(0))
if type(token) in (int, float):
return token
if token in known_operators:
if len(tokens) == 0 or tokens.pop(0) != '(':
raise SyntaxError('expected ( after ' + token)
return Exp(token, analyze_operands(tokens))
else:
raise SyntaxError('unexpected ' + token)
def analyze_operands(tokens):
"""Analyze a sequence of comma-separated operands."""
assert_non_empty(tokens)
operands = []
while tokens[0] != ')':
if operands and tokens.pop(0) != ',':
raise SyntaxError('expected ,')
operands.append(analyze(tokens))
assert_non_empty(tokens)
tokens.pop(0) # Remove )
return operands
def assert_non_empty(tokens):
"""Raise an exception if tokens is empty."""
if len(tokens) == 0:
raise SyntaxError('unexpected end of line')
def analyze_token(token):
"""Return the value of token if it can be analyzed as a number, or token."""
try:
return int(token)
except (TypeError, ValueError):
try:
return float(token)
except (TypeError, ValueError):
return token
def calc_parse(line):
"""Parse a line of calculator input and return an expression tree."""
tokens = tokenize(line)
expression_tree = analyze(tokens)
if len(tokens) > 0:
raise SyntaxError('Extra token(s): ' + ' '.join(tokens))
return expression_tree
def calc_eval(expression_tree):
pass
def read_eval_print_loop():
"""Run a read-eval-print loop for calculator."""
while True:
try:
expression_tree = calc_parse(input('calc> '))
print(calc_eval(expression_tree))
except (SyntaxError, TypeError, ZeroDivisionError) as err:
print(type(err).__name__ + ':', err)
except (KeyboardInterrupt, EOFError): # <Control>-D, etc.
print('Calculation completed.')
return
if __name__ == '__main__':
print(calc_apply('+', [1, 2, 3]))
print(calc_apply('-', [10, 1, 2, 3]))
print(calc_apply('*', []))
print(calc_apply('/', [40, 5]))
e = Exp('add', [2, Exp('mul', [4, 6])])
print(e, str(e))
|
[
"jasonwhister@gmail.com"
] |
jasonwhister@gmail.com
|
19f4062693725a5f8233224741ba911bc40832cb
|
1dea0c959bdc0d8a3c4be5b89e09cda994d96ced
|
/preprocess/utils.py
|
c591317e31c29cfb31c99aef1583bdf5dead8d83
|
[
"Apache-2.0"
] |
permissive
|
fangzheng354/relogic
|
d06796b95969b68a211664605f446f91eccba743
|
2e0ebae773ff39260e35e29eaae625182031f991
|
refs/heads/main
| 2023-07-01T12:31:56.384754
| 2021-08-03T04:40:41
| 2021-08-03T04:40:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,874
|
py
|
import os
from config import *
import random
import json
from tqdm import tqdm
from sql_formatter.formatting import translate_sql
import sqlite3
import multiprocessing
from multiprocessing import Manager
import time
random.seed(33)
def mkdir(path):
if os.path.exists(path):
print("{} already exists".format(path))
else:
os.mkdir(path)
print("{} creates".format(path))
def read_json(path):
f = open(path, "r", encoding="utf-8")
content = json.load(f)
f.close()
return content
def write_json(path, data):
f = open(path, "w", encoding="utf-8")
f.write(json.dumps(data, indent=4))
f.close()
def preprocess_spider(rawdata, t):
preprocess = {}
print("preprocess {}".format(t))
for data in tqdm(rawdata):
query = data[Spider_query]
translated_sql, translated_struct_sql = translate_sql(query)
preprocess[query] = translated_struct_sql
print("{} done".format(t))
return preprocess
def execute_sql(c, mutated_sql, return_dict, executable_SQL):
try:
cursor = c.execute(mutated_sql)
if executable_SQL:
if list(cursor):
return_dict[mutated_sql] = mutated_sql
else:
return_dict[mutated_sql] = mutated_sql
except:
pass
def get_dbschema(path):
db_schema = {}
with open(path) as f:
db_file = json.load(f)
for data in db_file:
db_schema[data['db_id']] = {}
for tab_id, col in data['column_names_original']:
if col == '*':
continue
if tab_id not in db_schema[data['db_id']]:
db_schema[data['db_id']][tab_id] = [col, '~', '*']
else:
db_schema[data['db_id']][tab_id] += [col]
return db_schema
def mutate_sql(index, data, time_out, sql_dict, db_schema, db_dir):
manager = Manager()
return_dict = manager.dict()
jobs = []
db_id = data['db_id']
raw_sql = data['query']
sql = data['query_toks']
tables = db_schema[db_id]
db_path = os.path.join(db_dir, db_id, db_id + '.sqlite')
mutated_sqls = []
if raw_sql not in sql_dict:
sql_dict[raw_sql] = []
else:
return
executable_SQL = True
conn = sqlite3.connect(db_path, timeout=10.0)
c = conn.cursor()
try:
cursor = c.execute(raw_sql)
if not list(cursor):
executable_SQL = False
except:
executable_SQL = False
for i in range(mutate_iter_num):
mutated_sql = []
for tok_i, tok in enumerate(sql):
upper_tok = tok.upper()
new_tok = tok
if random.random() > alpha:
for k, v in swap_dict.items():
if upper_tok in v:
swap_tok = random.choice(v)
new_tok = swap_tok if swap_tok != tok.upper() else tok
if random.random() > beta:
for k, v in tables.items():
if '.' in tok:
alias = tok.split('.')[0]
col = tok.split('.')[1]
if col in v or col.capitalize() in v:
col = random.choice(v)
new_tok = alias + '.' + col
else:
if tok in v or tok.capitalize() in v:
new_tok = random.choice(v)
if random.random() > gamma and new_tok != tok:
new_tok = tok + ' , ' + new_tok
if tok.isnumeric() and random.random() < theta:
tok = max(int(tok) + random.randint(-10, 10), 0)
new_tok = str(tok)
mutated_sql.append(new_tok)
mutated_sql = ' '.join(mutated_sql)
mutated_sql = mutated_sql.replace(", ~ ", ",").replace(" ~ ,", ",").replace(", ~ ,", ",").replace("~",
"").replace(
'``', '\"').replace("''", '\"')
if mutated_sql == ' '.join(sql):
continue
p = multiprocessing.Process(target=execute_sql, args=(c, mutated_sql, return_dict, executable_SQL))
jobs.append(p)
p.start()
start = time.time()
while time.time() - start <= time_out:
if not any(p.is_alive() for p in jobs):
break
time.sleep(.1)
else:
print("Timeout with processing: {} \n".format(raw_sql))
for p in jobs:
p.terminate()
p.join()
mutated_sqls = return_dict.values()
mutated_sqls = list(set(mutated_sqls))
sql_dict[raw_sql] = mutated_sqls
if len(mutated_sqls) < 5:
print("SQL {}: {}".format(index, raw_sql))
print(mutated_sqls)
print('Valid Muatation: {}'.format(len(mutated_sqls)), "\n--------------------------------------")
def create_output(t, idir, odir):
rawdir = os.path.join(odir, Raw)
preprocessdir = os.path.join(odir, Preprocess)
mkdir(rawdir)
mkdir(preprocessdir)
if t == 'spider':
traindata = read_json(os.path.join(idir, Spider_train))
otherdata = read_json(os.path.join(idir, Spider_others))
devdata = read_json(os.path.join(idir, Spider_dev))
rawtrain = []
rawdev = []
rawtest = devdata
rawoutofdomain = otherdata
random.shuffle(traindata)
train_len = round(len(traindata) * 0.8)
print("spider raw starts")
for i, data in enumerate(tqdm(traindata)):
if i < train_len:
rawtrain.append(data)
else:
rawdev.append(data)
print("spider raw done")
write_json(os.path.join(rawdir, Trainjson), rawtrain)
write_json(os.path.join(rawdir, Devjson), rawdev)
write_json(os.path.join(rawdir, Testjson), rawtest)
write_json(os.path.join(rawdir, Outofdomainjson), rawoutofdomain)
print("spider preprocess starts")
preprocesstrain = preprocess_spider(rawtrain, 'train')
write_json(os.path.join(preprocessdir, Trainjson), preprocesstrain)
preprocessdev = preprocess_spider(rawdev, 'dev')
write_json(os.path.join(preprocessdir, Devjson), preprocessdev)
preprocesstest = preprocess_spider(rawtest, 'test')
write_json(os.path.join(preprocessdir, Testjson), preprocesstest)
preprocessoutofdomain = preprocess_spider(rawoutofdomain, 'outofdomain')
write_json(os.path.join(preprocessdir, Outofdomainjson), preprocessoutofdomain)
print("spider preprocess done")
print("mutate starts")
db_schema = get_dbschema(os.path.join(idir, Spider_table))
total_data = []
total_data += traindata + devdata + otherdata
sql_dict = {}
for index, data in enumerate(tqdm(total_data)):
time_out = 3
mutate_sql(index, data, time_out, sql_dict, db_schema, os.path.join(idir, Spider_database))
write_json(os.path.join(preprocessdir, Mutationjson), sql_dict)
print("mutate done")
else:
print("spider preprocess starts")
preprocesstrain = preprocess_spider(rawtrain, 'train')
write_json(os.path.join(preprocessdir, Trainjson), preprocesstrain)
print("spider preprocess done")
"""print("mutate starts")
db_schema = get_dbschema(os.path.join(idir, Spider_table))
total_data = []
total_data += traindata + devdata + otherdata
sql_dict = {}
for index, data in enumerate(tqdm(total_data)):
time_out = 3
mutate_sql(index, data, time_out, sql_dict, db_schema, os.path.join(idir, Spider_database))
write_json(os.path.join(preprocessdir, Mutationjson), sql_dict)
print("mutate done")"""
|
[
"cjs7605@AD.PSU.EDU@e5-cse-rz01.ad.psu.edu"
] |
cjs7605@AD.PSU.EDU@e5-cse-rz01.ad.psu.edu
|
ce70ac7410a74010846a19763d8efd727ced99c9
|
5fdc2f8550925e79f4ae7f526327456a10ad1148
|
/object/method.py
|
450e18d81444b8324cee8e577002353e63683cde
|
[] |
no_license
|
bingozhou/python
|
d58ae62bf371eaef54887cdc187b62eddd695627
|
7a794acddac84e76f22c8fe7ec42147df61b9058
|
refs/heads/master
| 2021-01-01T18:32:56.121045
| 2017-07-31T01:26:10
| 2017-07-31T01:26:10
| 98,365,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 123
|
py
|
#!/usr/bin/python
# Filename:method.py
class Person:
def sayHi(self):
print 'Hello,how are you?'
p=Person()
p.sayHi()
|
[
"bingozhouy@qq.com"
] |
bingozhouy@qq.com
|
2a37f2bf163bae21be3b19485e8128f0b4923702
|
fa25d937309fea55ff5a33d30262012adedfaf19
|
/02_Arrays/anti_diagonals.py
|
4906cf5524a596ee5f150a96e1300cb1eed2dff8
|
[
"MIT"
] |
permissive
|
alqamahjsr/InterviewBit-1
|
20d033a2feecb85a37b28f2ff178b8d85424a6ea
|
fe2ce1bd64814c3a5687bf9b827b46bdbcf9144f
|
refs/heads/master
| 2020-09-16T18:31:10.326427
| 2018-12-09T11:13:14
| 2018-12-09T11:13:14
| 223,853,679
| 1
| 3
|
MIT
| 2019-11-25T03:31:24
| 2019-11-25T03:31:23
| null |
UTF-8
|
Python
| false
| false
| 975
|
py
|
# Anti Diagonals
# https://www.interviewbit.com/problems/anti-diagonals/
#
# Give a N*N square matrix, return an array of its anti-diagonals. Look at the example for more details.
#
# Example:
#
# Input:
#
# 1 2 3
# 4 5 6
# 7 8 9
#
# Return the following :
#
# [
# [1],
# [2, 4],
# [3, 5, 7],
# [6, 8],
# [9]
# ]
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class Solution:
# @param A : list of list of integers
# @return a list of list of integers
def diagonal(self, A):
res = [list() for i in range(2 * len(A) - 1)]
for i in range(len(A)):
for j in range(len(A)):
res[i + j].append(A[i][j])
return res
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if __name__ == "__main__":
s = Solution()
print(s.diagonal([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
]))
|
[
"sladjan.kantar@modoolar.com"
] |
sladjan.kantar@modoolar.com
|
6d816f2493b0abbe65f6c1d2490503b6956f567a
|
f39806c814b8672b26c8b3d328c7003daa6ff3b5
|
/admin.py
|
6ddc3ea4b362b256b0d744898ccdf5835b17addf
|
[] |
no_license
|
veenakrishna123/POLLPROJECT
|
1f42fc1634f45ceb0a1a9133b15b5fe100c74873
|
457586a5931ff755356829a2095101df60a21c54
|
refs/heads/master
| 2020-06-07T14:50:27.380759
| 2019-06-21T06:34:18
| 2019-06-21T06:34:18
| 193,044,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
from django.contrib import admin
from .models import Question
from .models import Choice
admin.site.register(Question)
admin.site.register(Choice)
# Register your models here.
|
[
"noreply@github.com"
] |
veenakrishna123.noreply@github.com
|
1063f283444a8d4655b18e4f02e7125c1cd8136f
|
9a585288deadd7020eb61bd7dd61312fe64880a8
|
/BirdCNN/birdConstants.py
|
2f6a02bed6e5b0c72e8e4e888cd591e056458365
|
[] |
no_license
|
arevello/BirdProject
|
d55ce32db69393b7eebc3d9c3598b1ff63a49eb9
|
702b0e7f1a39c903a9c4f74bc46b56ccd70dc3d7
|
refs/heads/master
| 2023-07-29T05:29:29.143529
| 2021-09-08T18:35:18
| 2021-09-08T18:35:18
| 246,415,035
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,100
|
py
|
'''
Created on Jun 8, 2020
@author: Alex
'''
class BirdConstants(object):
#species classifications
HERG = 0
GBBG = 1
COEI = 2
#COEI_M = 2
#COEI_F = 3
TERN = 4
DCCO = 5
CANG = 6
GBHE = 7
LAGU = 8
#unsure of
SNEG = 9
BAEA = 10
GLIB = 11
BCNE = 12
BLGU = 13
ATPU = 14
TernSPP = 15
OTHER = 16
specieStrAll = ["HERG", "GBBG", "COEI", "TERN", "DCCO", "CANG", "GBHE", "LAGU", "SNEG", "BAEA", "GLIB", "BCNE", "BLGU", "ATPU", "Tern spp", "Other"]
specieStrUseful = ["HERG", "GBBG", "DCCO", "COEI", "Tern spp"]
#numSpeciesClass = 9
#behavior classifications
roosting = 0
nesting = 1
flying = 2
numBehaviorClass = 3
# def strToSpecies(self, spcStr):
# idx = 0
# while idx < len(self.specieStrAll):
# if spcStr == self.specieStrAll[idx]:
# return idx
# idx += 1
# print("cant find match for ", spcStr)
# return 16
def __init__(self):
'''
Constructor
'''
|
[
"alexander.revello@maine.edu"
] |
alexander.revello@maine.edu
|
7893ea96f01537030520c994aaa38ba14a74866f
|
259e48ed815719914cce22478361eb34f7d61e88
|
/api/file/urls.py
|
4993a4760cbe8e761f9279c089ab1c5e4661c3d5
|
[] |
no_license
|
YAG19/django-react
|
719ebf73688c51cf817193486364f680f1ca533d
|
0a58d94476b5ab820fcea502c182f743b2c6650b
|
refs/heads/main
| 2023-06-10T17:37:40.666854
| 2021-07-09T13:21:23
| 2021-07-09T13:21:23
| 384,442,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
from rest_framework import routers
from django.urls import path , include
from . import views
router = routers.DefaultRouter()
router.register(r'',views.ProductViewSet)
urlpatterns = [
path('',include(router.urls)),
]
|
[
"[yagnesh.patel9898@gmail.com]"
] |
[yagnesh.patel9898@gmail.com]
|
a1776bfbc30847148388fcd483940e42351ef4ec
|
a4681043cb56a9ab45be32a62fa9700b391f087f
|
/14-Statistics_with_Python/Histograms/Plotting_a_Histogram.py
|
45ed0cd1e64ea34834f4002801b2e547c54d40c1
|
[] |
no_license
|
MarceloDL-A/Python
|
b16b221ae4355b6323092d069bf83d1d142b9975
|
c091446ae0089f03ffbdc47b3a6901f4fa2a25fb
|
refs/heads/main
| 2023-01-01T02:29:31.591861
| 2020-10-27T19:04:11
| 2020-10-27T19:04:11
| 301,565,957
| 0
| 0
| null | 2020-10-27T19:04:12
| 2020-10-05T23:41:30
|
Python
|
WINDOWS-1252
|
Python
| false
| false
| 2,737
|
py
|
"""
HISTOGRAMS
Plotting a Histogram
At this point, you’ve learned how to find the numerical inputs to a histogram. Thus far the size of our datasets and bins have produced results that we can interpret. This becomes increasingly difficult as the number of bins in a histogram increases.
Because of this, histograms are typically viewed graphically, with bin ranges on the x-axis and counts on the y-axis. The figure below shows the graphical representation of the histogram for our exercise class example from last exercise. Notice, there are five equally-spaced bars, with each displaying a count for an age range. Compare the graph to the table, just below it.
Histogram
20-29 30-39 40-49 50-59 60-69
7 4 4 3 2
Histograms are an easy way to visualize trends in your data. When I look at the above graph, I think, “More people in the exercise class are in their twenties than any other decade. Additionally, the histogram is skewed, indicating the class is made of more younger people than older people.”
We created the plot above using the matplotlib.pyplot package. We imported the package using the following code:
from matplotlib import pyplot as plt
We plotted the histogram with the following code. Notice, the range and bins arguments are the same as we used in the last exercise:
plt.hist(exercise_ages, range = (20, 70), bins = 5, edgecolor='black')
plt.title("Decade Frequency")
plt.xlabel("Ages")
plt.ylabel("Count")
plt.show()
In the code above, we used the plt.hist() function to create the plot, then added a title, x-label, and y-label before showing the graph with plt.show().
"""
# Import packages
import codecademylib
import numpy as np
import pandas as pd
"""
At the top of script.py, we’ve imported codecademylib, which is a package that Codecademy uses to plot your histogram in the panel to the right. Don’t worry about this library. Any Python development environment that you may use will take care of this for you.
From matplotlib, import pyplot as plt.
"""
# import pyplot as plt
import pyplot as plt
# Read in transactions data
transactions = pd.read_csv("transactions.csv")
# Save transaction times to a separate numpy array
times = transactions["Transaction Time"].values
"""
Use the plt.hist() function to create a plot for each six-hour period in a day. Use the following range and number of bins.
Range: 0 to 24
Bins: 4
"""
# Use plt.hist() below
plt.hist(times, range = (0, 24), bins = 4, edgecolor='black')
"""
Use plt.show() to show the figure.
Feel free to add a title, x-label, and y-label if you want. You can copy the code from the hint as an example.
"""
plt.title("Weekday Frequency of Customers")
plt.xlabel("Hours (1 hour increments)")
plt.ylabel("Count")
plt.show()
|
[
"marcelo.delmondes.lima@usp.br"
] |
marcelo.delmondes.lima@usp.br
|
97aeecb03460947eec33e9b36bbba8f69d437700
|
f2e9eabc8ea32c4381525f8dfb7865aaa98af460
|
/LostLeptonBkg/python/makeLLFromNTuple_cff.py
|
2502d0e77c912eec7ff70d2966c438822893da35
|
[] |
no_license
|
kheine/RA2Classic
|
a75977dc3ae1ce5a51bc5471111c69c00137bfdb
|
0f48e482da6859dad96002ad68fb78b9a56fac57
|
refs/heads/master
| 2020-04-13T18:49:31.530643
| 2013-08-02T13:28:46
| 2013-08-02T13:28:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,205
|
py
|
# $Id: makeEffFromMC_cff.py,v 1.7 2012/12/05 13:10:48 adraeger Exp $
#
import FWCore.ParameterSet.Config as cms
def makeLLFromNTuple(process,
outFileName,
useCHSJets=True,
invertLeptonVeto=False,
NJetsMin=2,
HTMin=500.,
MHTMin=200.,
reportEveryEvt=10,
Global_Tag="",
testFileName=["/store/user/kheine/HT/RA2PreSelectionOnData_Run2012A_HT_PromptReco-v1_v5/71cce229addb17644d40a607fa20b5d7/RA2SkimsOnData_99_3_TPC.root"],
numProcessedEvt=1000):
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = Global_Tag
## --- Log output ------------------------------------------------------
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr = cms.untracked.PSet(
placeholder = cms.untracked.bool(True)
)
process.MessageLogger.statistics.append('cout')
process.MessageLogger.cout = cms.untracked.PSet(
INFO = cms.untracked.PSet(reportEvery = cms.untracked.int32(reportEveryEvt))
)
## --- Files to process ------------------------------------------------
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(12)
)
process.source = cms.Source("EmptySource" )
## --- Output file -----------------------------------------------------
process.TFileService = cms.Service(
"TFileService",
fileName = cms.string(outFileName+".root")
)
## --- Selection sequences ---------------------------------------------
# Filter-related selection
process.load('RA2Classic.TreeMaker.filterSelection_cff')
process.load('SandBox.Skims.RA2Leptons_cff')
process.CleaningSelection = cms.Sequence(
process.filterSelection
)
# Filter-related selection
# process.load('RA2Classic.TreeMaker.filterSelection_cff')
# from RecoMET.METFilters.jetIDFailureFilter_cfi import jetIDFailure
# process.PBNRFilter = jetIDFailure.clone(
# JetSource = cms.InputTag('MHTJets'),
# MinJetPt = cms.double(30.0),
# taggingMode = cms.bool(False)
# )
# process.filterSelection += process.PBNRFilter
# from RecoMET.METFilters.multiEventFilter_cfi import multiEventFilter
# process.HCALLaserEvtFilterList2012 = multiEventFilter.clone(
# file = cms.FileInPath('EventFilter/HcalRawToDigi/data/AllBadHCALLaser.txt'),
# taggingMode = cms.bool(False)
# )
# process.filterSelection += process.HCALLaserEvtFilterList2012
# Produce RA2 jets
if useCHSJets:
process.load('RA2Classic.Utils.produceRA2JetsPFCHS_cff')
process.ProduceRA2Jets = cms.Sequence(
process.produceRA2JetsPFCHS
)
else:
process.load('RA2Classic.Utils.produceRA2JetsAK5PF_cff')
process.ProduceRA2Jets = cms.Sequence(
process.produceRA2JetsAK5PF
)
# Select events with at least 'NJetsMin' of the above jets
from PhysicsTools.PatAlgos.selectionLayer1.jetCountFilter_cfi import countPatJets
process.NumJetSelection = countPatJets.clone(
src = cms.InputTag('HTJets'),
minNumber = cms.uint32(NJetsMin)
)
# HT selection
htInputCol = 'htPF'
if useCHSJets:
htInputCol = 'htPFchs'
from SandBox.Skims.RA2HT_cff import htPFFilter
process.HTSelection = htPFFilter.clone(
HTSource = cms.InputTag(htInputCol),
MinHT = cms.double(HTMin)
)
# MHT selection
mhtMin = 0.
mhtInputCol = 'mhtPF'
if useCHSJets:
mhtInputCol = 'mhtPFchs'
from SandBox.Skims.RA2MHT_cff import mhtPFFilter
process.MHTSelection = mhtPFFilter.clone(
MHTSource = cms.InputTag(mhtInputCol),
MinMHT = cms.double(MHTMin)
)
## --- Additional Filters (not tagging mode) ------------------------------
from RecoMET.METFilters.jetIDFailureFilter_cfi import jetIDFailure
process.PBNRFilter = jetIDFailure.clone(
JetSource = cms.InputTag('MHTJets'),
MinJetPt = cms.double(30.0),
taggingMode = cms.bool(False)
)
from RecoMET.METFilters.multiEventFilter_cfi import multiEventFilter
process.HCALLaserEvtFilterList2012 = multiEventFilter.clone(
file =
cms.FileInPath('RA2Classic/LostLeptonBkg/data/HCALLaserEventList_20Nov2012-v2_HT-HTMHT.txt'),
taggingMode = cms.bool(False)
)
process.AdditionalFiltersInTagMode = cms.Sequence(
process.PBNRFilter
)
# process.lostLeptonPrediction = llPrediction()
from RA2Classic.LostLeptonBkg.limit_ll_cfi import Limit_ll
process.limit_ll = Limit_ll.clone()
# process.lostLeptonPrediction = llPrediction()
## --- Final paths ----------------------------------------------------
process.dump = cms.EDAnalyzer("EventContentAnalyzer")
process.WriteTree = cms.Path(
process.limit_ll
# process.RA2TreeMaker
)
|
[
""
] | |
6e6268ff5a363f492e7aee2d497862051ed431f6
|
c13b953c274ea0801ccb37e036a9da98592f9745
|
/source/main/urls.py
|
dc856eec3e1aaaf3ada62a824b5cec955915c52d
|
[] |
no_license
|
Ruslan-dev-1996/python_4_home_work_50_keneshbaev_ruslan
|
911dfaa1c6dc0c0c83f596e0d419051591c27cb5
|
7247d2c81c4c83ea651fedd0b3275a76be5a7057
|
refs/heads/master
| 2023-05-02T18:41:25.848804
| 2019-10-02T23:28:24
| 2019-10-02T23:28:24
| 212,454,652
| 0
| 0
| null | 2023-04-21T20:37:41
| 2019-10-02T22:41:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,633
|
py
|
"""main URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from webapp.views import IndexView, ArticleView, ArticleCreateView, \
ArticleUpdateView, ArticleDeleteView, CommentCreateView, \
CommentView, CommentUpdateView, CommentDeleteView
urlpatterns = [
path('admin/', admin.site.urls),
path('', IndexView.as_view(), name='index'),
path('article/<int:pk>/', ArticleView.as_view(), name='article_view'),
path('article/add/', ArticleCreateView.as_view(), name='article_add'),
path('article/<int:pk>/edit/', ArticleUpdateView.as_view(), name='article_update'),
path('article/<int:pk>/delete/', ArticleDeleteView.as_view(), name='article_delete'),
path('comment/add/', CommentCreateView.as_view(), name='comment_add'),
path('comment/view', CommentView.as_view(), name='comment_view'),
path('comment/<int:pk>/edit/', CommentUpdateView.as_view(), name='comment_update'),
path('comment/<int:pk>/delete/', CommentDeleteView.as_view(), name='comment_delete'),
]
|
[
"Kasabolotov96.emil.ru"
] |
Kasabolotov96.emil.ru
|
fd2b90837b9a0d5297b449f4e936a93d12867567
|
9144b98606eafd5d5b4cb78f5adbdc012c092d79
|
/LoginApp/myapp/urls.py
|
324d33def053d6a795b63a1871cb6f0fa4130532
|
[] |
no_license
|
chaitrak05/chaitra_django_projects
|
5a48333d162051ef32b13cef179903783cdf8b74
|
0227a55b11eaf75d99c14be5fe867e0b5ea6664e
|
refs/heads/master
| 2022-05-30T21:39:16.514252
| 2020-04-30T05:13:37
| 2020-04-30T05:13:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
from django.contrib import admin
from django.urls import path
from .views import HomePageView
from . import views
urlpatterns = [
path('', views.HomePageView.as_view(),name='home'),
]
|
[
"chaitrak05@gmail.com"
] |
chaitrak05@gmail.com
|
8e965ea67180f65424e32935037adc72b6aa873f
|
104388651ccd05c4b6006b64c7ea4a16a858ba24
|
/queues/ended_linked_list.py
|
ff3ff8f5c038498336855802b468bfb592722156
|
[] |
no_license
|
MikeYu123/algorithms-lafore
|
d5ec02d13e267f96e7d084415db13aa1502ca2f8
|
8a59ab7149c4cb6fdc091512eb668f0bfc9f6a7c
|
refs/heads/master
| 2020-03-20T09:11:06.282882
| 2018-07-16T13:28:52
| 2018-07-16T13:28:52
| 137,330,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 29
|
py
|
../lists/ended_linked_list.py
|
[
"m.yurchenkov@maximatelecom.ru"
] |
m.yurchenkov@maximatelecom.ru
|
f253133c353abfb770023e56b2d1ec354db1532b
|
de0a605ab85cbc34bebb9638ab3aa681479d90aa
|
/app/modules/__init__.py
|
192744d1a077bad78461cbc9dad860332d954d5b
|
[] |
no_license
|
sbravell/Geocoding-Proxy-service
|
2989d21a2397ca25d35a19b1edf501549349ae8c
|
e2b2714c0863515d0441f9cb5136138eb75baaf7
|
refs/heads/master
| 2020-03-07T19:37:06.614404
| 2017-10-08T06:58:02
| 2017-10-09T20:35:50
| 127,675,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
from os.path import dirname, basename, isfile
import glob
modules = glob.glob(dirname(__file__)+"/*.py")
__all__ = [ basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py') and not f.endswith('base.py')]
|
[
"leo@sh1n.com"
] |
leo@sh1n.com
|
c33b0629a869598b6d3053841bfbc68cbba9368a
|
2fc415dfa31d77d6b396be5ae80752edc93947ee
|
/SimMuL1/test/New_Up/PU140_v2/tmbReadoutEarliest2.py
|
bab9c742a6a3bcbe320fbe70fa69d45f19bc1f4d
|
[] |
no_license
|
jrdimasvalle/GEMCode
|
ca86237cb878fed42d2fef51ba9b7e6cc32bd949
|
983aec67dd114d8fe5a10dd76c3d14cb03f44d9b
|
refs/heads/master
| 2021-01-16T20:46:11.356096
| 2014-06-30T04:37:36
| 2014-06-30T04:37:36
| 20,639,444
| 0
| 0
| null | 2015-07-08T22:25:15
| 2014-06-09T08:27:07
|
C++
|
UTF-8
|
Python
| false
| false
| 6,965
|
py
|
## pick your scenario:
## 1: 2019
## 2: 2019WithGem
## 3: 2023Muon
scenario = 1
## This configuration runs the DIGI+L1Emulator step
import os
import FWCore.ParameterSet.Config as cms
process = cms.Process("MUTRG")
## Standard sequence
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
if scenario is 1 or scenario is 2:
process.load('Configuration.Geometry.GeometryExtended2019Reco_cff')
process.load('Configuration.Geometry.GeometryExtended2019_cff')
elif scenario is 3:
process.load('Configuration.Geometry.GeometryExtended2023MuonReco_cff')
process.load('Configuration.Geometry.GeometryExtended2023Muon_cff')
else:
print 'Something wrong with geometry'
process.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load("Configuration.StandardSequences.SimL1Emulator_cff")
process.load("Configuration.StandardSequences.L1Extra_cff")
process.load('Configuration.StandardSequences.EndOfProcess_cff')
from Configuration.AlCa.GlobalTag import GlobalTag
if scenario is 1 or scenario is 2:
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:upgrade2019', '')
elif scenario is 3:
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:upgradePLS3', '')
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
## calibration
from CalibMuon.CSCCalibration.CSCIndexer_cfi import CSCIndexerESProducer
process.CSCIndexerESProducer= CSCIndexerESProducer
from CalibMuon.CSCCalibration.CSCChannelMapper_cfi import CSCChannelMapperESProducer
process.CSCChannelMapperESProducer= CSCChannelMapperESProducer
## input commands
process.source = cms.Source("PoolSource",
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
inputCommands = cms.untracked.vstring('keep *_*_*_*'),
fileNames = cms.untracked.vstring('file:out_digi.root')
)
## input
from GEMCode.SimMuL1.GEMCSCTriggerSamplesLib import eosfiles
from GEMCode.GEMValidation.InputFileHelpers import useInputDir
#ataset = '_Nu_SLHC12_2023Muon_PU140'
#dataset = "_pt2-50_SLHC11_2023Muon_PU140"
dataset = 'Digi_PU140'
process = useInputDir(process, eosfiles[dataset], True)
process.source.duplicateCheckMode = cms.untracked.string('noDuplicateCheck')
physics = False
if not physics:
## drop all unnecessary collections
process.source.inputCommands = cms.untracked.vstring(
'keep *_*_*_*',
'drop *_simCscTriggerPrimitiveDigis_*_*',
'drop *_simDtTriggerPrimitiveDigis_*_*',
'drop *_simRpcTriggerDigis_*_*',
'drop *_simCsctfTrackDigis_*_*',
'drop *_simDttfDigis_*_*',
'drop *_simCsctfDigis_*_*',
'drop *_simGmtDigis_*_*',
'drop *_l1extraParticles_*_*'
)
## output commands
theOutDir = ''
theFileName = 'out_L1' + '.root'
process.output = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string(theOutDir + theFileName),
outputCommands = cms.untracked.vstring('keep *_*_*_*')
)
physics = False
if not physics:
## drop all unnecessary collections
process.output.outputCommands = cms.untracked.vstring(
'keep *_*_*_*',
# drop all CF stuff
'drop *_mix_*_*',
# drop tracker simhits
'drop PSimHits_*_Tracker*_*',
# drop calorimetry stuff
'drop PCaloHits_*_*_*',
'drop L1Calo*_*_*_*',
'drop L1Gct*_*_*_*',
# drop calorimetry l1extra
'drop l1extraL1Em*_*_*_*',
'drop l1extraL1Jet*_*_*_*',
'drop l1extraL1EtMiss*_*_*_*',
# clean up simhits from other detectors
'drop PSimHits_*_Totem*_*',
'drop PSimHits_*_FP420*_*',
'drop PSimHits_*_BSC*_*',
# drop some not useful muon digis and links
'drop *_*_MuonCSCStripDigi_*',
'drop *_*_MuonCSCStripDigiSimLinks_*',
'drop *SimLink*_*_*_*',
'drop *RandomEngineStates_*_*_*',
'drop *_randomEngineStateProducer_*_*'
)
## custom sequences
process.mul1 = cms.Sequence(
process.SimL1MuTriggerPrimitives *
process.SimL1MuTrackFinders *
process.simRpcTriggerDigis *
process.simGmtDigis *
process.L1Extra
)
process.muL1Short = cms.Sequence(
process.simCscTriggerPrimitiveDigis *
process.SimL1MuTrackFinders *
process.simGmtDigis *
process.L1Extra
)
## define path-steps
shortRun = False
if shortRun:
process.L1simulation_step = cms.Path(process.muL1Short)
else:
process.L1simulation_step = cms.Path(process.mul1)
process.endjob_step = cms.Path(process.endOfProcess)
process.out_step = cms.EndPath(process.output)
## Schedule definition
process.schedule = cms.Schedule(
process.L1simulation_step,
process.endjob_step,
process.out_step
)
## customization
if scenario is 1:
from SLHCUpgradeSimulations.Configuration.combinedCustoms import cust_2019
process = cust_2019(process)
elif scenario is 2:
from SLHCUpgradeSimulations.Configuration.combinedCustoms import cust_2019WithGem
process = cust_2019WithGem(process)
elif scenario is 3:
from SLHCUpgradeSimulations.Configuration.combinedCustoms import cust_2023Muon
process = cust_2023Muon(process)
## some extra L1 customs
process.l1extraParticles.centralBxOnly = cms.bool(True)
process.l1extraParticles.produceMuonParticles = cms.bool(True)
process.l1extraParticles.produceCaloParticles = cms.bool(False)
process.l1extraParticles.ignoreHtMiss = cms.bool(False)
tmbp=process.simCscTriggerPrimitiveDigis.tmbSLHC
tmbp.tmbReadoutEarliest2 = cms.bool(False)
tmbp.tmbCrossBxAlgorithm = cms.uint32(1)
tmbp.matchEarliestClctME11Only = cms.bool(False)
tmbp.tmbDropUsedClcts=cms.bool(False)
tmbp.clctToAlct = cms.bool(False)
tmbp.tmbDropUsedAlcts = cms.bool(True)
tmbp.matchTrigWindowSize = cms.uint32(3)
clctp=process.simCscTriggerPrimitiveDigis.clctSLHC
clctp.clctUseCorrectedBx = cms.bool(True)
alctp=process.simCscTriggerPrimitiveDigis.alctSLHC
alctp.alctUseCorrectedBx = cms.bool(True)
clctp.clctMinSeparation = cms.uint32(5)
clctp.clctPidThreshPretrig = cms.uint32(4)
clctp.useDynamicStateMachineZone = cms.bool(True)
clctp.useDeadTimeZoning = cms.bool(True)
alctp.alctPretrigDeadtime = cms.uint32(0)
alctp.alctNarrowMaskForR1 = cms.bool(True)
alctp.alctGhostCancellationSideQuality = cms.bool(True)
alctp.alctGhostCancellationBxDepth = cms.int32(1)
process.simCscTriggerPrimitiveDigis.commonParam.disableME42 = cms.bool(True)
## messages
print
print 'Input files:'
print '----------------------------------------'
print process.source.fileNames
print
print 'Output file:'
print '----------------------------------------'
print process.output.fileName
print
|
[
"jrdv009@neo.tamu.edu"
] |
jrdv009@neo.tamu.edu
|
cc3a1dfdeb99711758e7df153e707f6f8f8f766d
|
31255e05b44feec469330b5a02c8ff2ba16cbe9e
|
/setup.py
|
0131b45573cdb263046de07429e4b6ff70d8dc1d
|
[] |
no_license
|
marco79423/paji-sdk.py
|
94dfb8fa1a576c3402adc21bcb76c1e05d4f2d30
|
de48b1a9bd2b3b0e6519695077577385818ab54a
|
refs/heads/main
| 2023-06-09T16:16:09.993931
| 2021-07-02T07:05:58
| 2021-07-02T07:05:58
| 382,250,729
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,124
|
py
|
# mypy: ignore_errors
import os
import setuptools
base_dir = os.path.abspath(os.path.dirname(__file__))
REQUIREMENTS = []
with open(os.path.join(base_dir, 'requirements.txt'), encoding='utf-8') as fp:
for line in fp.readlines():
line = line.strip()
if line and not line.startswith('#'):
REQUIREMENTS.append(line)
with open(os.path.join(base_dir, 'README.md'), encoding='utf-8') as fp:
long_description = fp.read()
setuptools.setup(
name="paji-sdk",
version='0.2.2',
author='兩大類',
author_email='marco79423@gmail.com',
url='https://github.com/marco79423/paji-sdk.py',
python_requires='>=3.6',
description='Python 開發工具包',
long_description=long_description,
long_description_content_type='text/markdown',
packages=setuptools.find_packages(),
install_requires=REQUIREMENTS,
include_package_data=True,
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
|
[
"marco79423@gmail.com"
] |
marco79423@gmail.com
|
c50d7f36277dcc42963584dba56133330ce1fabf
|
ac4c02606b84f5f09edc7e48fa44a10621e9ef81
|
/python/detectionformats/stationinfo.py
|
43624ebc54f7595953e0df86eaa319c5a3cf2d5d
|
[
"JSON",
"LicenseRef-scancode-public-domain",
"CC0-1.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
jpatton-USGS/earthquake-detection-formats
|
5f8df7e6d543abd1b64624522715663d49d8eddf
|
8d52104c2f093ede1d67a94f43fce51eb172c8cc
|
refs/heads/master
| 2022-07-01T19:28:46.224141
| 2021-06-08T20:40:20
| 2021-06-08T20:40:20
| 69,907,599
| 0
| 0
| null | 2016-10-03T20:18:57
| 2016-10-03T20:18:57
| null |
UTF-8
|
Python
| false
| false
| 7,274
|
py
|
#!/usr/bin/env python
#package imports
import detectionformats.site
import detectionformats.source
#stdlib imports
import json
class StationInfo:
""" StationInfo - a conversion class used to create, parse, and validate
station info data as part of detection data.
"""
# json keys
TYPE_KEY = "Type"
SITE_KEY = "Site"
QUALITY_KEY = "Quality"
ENABLE_KEY = "Enable"
USEFORTELESEISMIC_KEY = "UseForTeleseismic"
INFORMATIONREQUESTOR_KEY = "InformationRequestor"
def __init__(self, newSite=None, newQuality=None, newEnable=None,
newUseForTeleseismic=None, newInformationRequestor=None):
"""Initialize the station info object. Constructs an empty object
if all arguments are None
Args:
newSite: a required detectionformats.site.Site containing the desired
site
newLatitude: a required Number containing the latitude as a float in
degrees
newLongitude: a required Number containing the longitude as a float
in degrees
newElevation: a required Number containing the elevation as a float
newQuality: an optional Number containing the station quality
newEnable: an optional Boolean indicating whether the station should
be used or not
newUseForTeleseismic: an optional Boolean indicating whether the
station should for teleseismic calculations or not
newInformationRequestor: an optional detectionformats.source.Source
containing the source of the information
Returns:
Nothing
Raises:
Nothing
"""
# first required keys
self.type = 'StationInfo'
if newSite is not None:
self.site = newSite
else:
self.site = detectionformats.site.Site()
# second optional keys
if newQuality is not None:
self.quality = newQuality
if newEnable is not None:
self.enable = newEnable
if newUseForTeleseismic is not None:
self.useForTeleseismic = newUseForTeleseismic
if newInformationRequestor is not None:
self.informationRequestor = newInformationRequestor
else:
self.informationRequestor = detectionformats.source.Source()
def fromJSONString(self, jsonString):
"""Populates the object from a json formatted string
Args:
jsonString: a required String containing the json formatted text
Returns:
Nothing
Raises:
Nothing
"""
jsonObject = json.loads(jsonString)
self.fromDict(jsonObject)
def fromDict(self, aDict):
"""Populates the object from a dictionary
Args:
aDict: a required dictionary
Returns:
Nothing
Raises:
Nothing
"""
# first required keys
try:
self.type = aDict[self.TYPE_KEY]
self.site.fromDict(aDict[self.SITE_KEY])
except(ValueError, KeyError, TypeError) as e:
print("Dict format error, missing required keys: %s" % e)
# second optional keys
if self.QUALITY_KEY in aDict:
self.quality = aDict[self.QUALITY_KEY]
if self.ENABLE_KEY in aDict:
self.enable = aDict[self.ENABLE_KEY]
if self.USEFORTELESEISMIC_KEY in aDict:
self.useForTeleseismic = aDict[self.USEFORTELESEISMIC_KEY]
if self.INFORMATIONREQUESTOR_KEY in aDict:
self.informationRequestor.fromDict(aDict[self.INFORMATIONREQUESTOR_KEY])
def toJSONString(self):
"""Converts the object to a json formatted string
Args:
None
Returns:
The JSON formatted message as a String
Raises:
Nothing
"""
jsonObject = self.toDict()
return json.dumps(jsonObject, ensure_ascii=False)
def toDict(self):
"""Converts the object to a dictionary
Args:
None
Returns:
The dictionary
Raises:
Nothing
"""
aDict = {}
# first required keys
try:
aDict[self.TYPE_KEY] = self.type
aDict[self.SITE_KEY] = self.site.toDict()
except(NameError, AttributeError) as e:
print("Missing required data error: %s" % e)
# second optional keys
if hasattr(self, 'quality'):
aDict[self.QUALITY_KEY] = self.quality
if hasattr(self, 'enable'):
aDict[self.ENABLE_KEY] = self.enable
if hasattr(self, 'useForTeleseismic'):
aDict[self.USEFORTELESEISMIC_KEY] = self.useForTeleseismic
if hasattr(self, 'informationRequestor'):
aDict[self.INFORMATIONREQUESTOR_KEY] = self.informationRequestor.toDict()
return aDict
def isValid(self):
"""Checks to see if the object is valid
Args:
None
Returns:
True if the object is valid, False otherwise
Raises:
Nothing
"""
errorList = self.getErrors()
return not errorList
def getErrors(self):
"""Gets a list of object validation errors
Args:
None
Returns:
A List of Strings containing the validation error messages
Raises:
Nothing
"""
errorList = []
# first required keys
try:
if self.type == '':
errorList.append('Empty Type in StationInfo Class.')
elif self.type != 'StationInfo':
errorList.append('Non-StationInfo Type in StationInfo Class.')
except(NameError, AttributeError):
errorList.append('No Type in StationInfo Class.')
try:
if not self.site.isValid():
errorList.append('Invalid Site in StationInfo Class.')
except(NameError, AttributeError):
errorList.append('No Site in StationInfo Class.')
try:
if self.site.latitude < -90 or self.site.latitude > 90:
errorList.append('Latitude in StationInfo Class not in the range of -90 to 90.')
except(NameError, AttributeError):
errorList.append('No Latitude in StationInfo Class.')
try:
if self.site.longitude < -180 or self.site.longitude > 180:
errorList.append('Longitude in StationInfo Class not in the range of -180 to 180.')
except(NameError, AttributeError):
errorList.append('No Longitude in StationInfo Class.')
try:
if self.site.elevation < -550 or self.site.elevation > 8900:
errorList.append('Elevation in StationInfo Class not in the range of -550 to 8900.')
except(NameError, AttributeError):
errorList.append('No Elevation in StationInfo Class.')
# second optional keys
if hasattr(self, 'informationRequestor'):
if not self.informationRequestor.isValid():
errorList.append('Invalid InformationRequestor in StationInfo Class.')
return errorList
|
[
"jpatton@usgs.gov"
] |
jpatton@usgs.gov
|
9d09fc080a0cb3fd2bea907f9b35c1986e5f2c71
|
6d31a9de85ca2f32911a7ae5b69b31611dd2a44b
|
/Prac 09/sort_files_1.py
|
f59c5415b089f0269637b43de24c3bae1dcb55e1
|
[] |
no_license
|
SuriyaaMurali/Practicals
|
77a3dbb20c908e2270a592ae3091a1b10a781d05
|
9399855be2d014c4ed3c77cd7db29f2487cdb851
|
refs/heads/master
| 2022-12-26T11:21:12.027936
| 2020-10-01T06:58:46
| 2020-10-01T06:58:46
| 283,677,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
import os
def main():
os.chdir("FilesToSort")
for filename in os.listdir('.'):
if os.path.isdir(filename):
continue
extension = filename.split('.')[-1]
try:
os.mkdir(extension)
except FileExistsError:
pass
print("{}/{}".format(extension, filename))
os.rename(filename, "{}/{}".format(extension, filename))
main()
|
[
"suriyaa007car@gmail.com"
] |
suriyaa007car@gmail.com
|
e0e3c59ad146e502a0274b86c741095d6e3ecdd9
|
25adbd31e1e652d47a096e231859262619b0e15a
|
/icekey/utils.py
|
67872e82c459ec974fe3aba270433eae1a127c6f
|
[
"Unlicense"
] |
permissive
|
pixelindigo/icekey
|
eac140bed99be728193803ce29c1fd4dc295d021
|
f88294bc34af8b55bb0e2a768dc5cd86d16a8f89
|
refs/heads/master
| 2020-06-25T09:22:12.951903
| 2019-07-29T16:17:20
| 2019-07-29T16:17:20
| 199,270,430
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 599
|
py
|
def gf_mult(a, b, m):
"""Galois Field multiplication of a by b, modulo m.
Just like arithmetic multiplication, except that additions and
subtractions are replaced by XOR.
"""
res = 0
while b != 0:
if b & 1:
res ^= a
a <<= 1
b >>= 1
if a >= 256:
a ^= m
return res
def gf_exp7(b, m):
"""Galois Field exponentiation.
Raise the base to the power of 7, modulo m.
"""
if b == 0:
return 0
x = gf_mult(b, b, m)
x = gf_mult(b, x, m)
x = gf_mult(x, x, m)
return gf_mult(b, x, m)
|
[
"1370291+pixelindigo@users.noreply.github.com"
] |
1370291+pixelindigo@users.noreply.github.com
|
0542afffd1b6d7982c976d9431212f0b84581937
|
d8cf93900e6d86240ceb7643fd78bd2841b38152
|
/test/unit_test_g/unittest_simple/setup_teardown_usage.py
|
641da38a953ddd23cef166636e271683b8f2452d
|
[] |
no_license
|
Onebigbera/Daily_Practice
|
165cee0ee7883b90bcf126b23ff993fed0ceffef
|
8f1018a9c1e17c958bce91cbecae88b0bb3c946b
|
refs/heads/master
| 2020-04-09T01:20:48.857114
| 2019-01-03T03:24:59
| 2019-01-03T03:24:59
| 159,900,636
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,840
|
py
|
# -*-coding:utf-8 -*-
# File :setup_teardown_usage.py
# Author:George
# Date : 2018/12/2
"""
setUp 和 tearDown的运用
"""
import unittest
import HTMLTestRunner
import xmlrunner
class TestUsage(unittest.TestCase):
@classmethod
# 类开始运行前 比如在执行这些实例之前需要备份数据库等操作
def setUpClass(cls):
print('Before class operation perform')
@classmethod
# 类结束运行后 比如在执行完这些类后需要还原数据库
def tearDownClass(cls):
print('After class operation perform ')
# 测试用例执行前
def setUp(self):
string = 'before'
print('----这是在测试用例执行前----')
# 测试用例执行后
def tearDown(self):
string = 'after'
print('----测试用例执行后----')
def testProcess(self):
print('----测试执行1---')
self.assertEqual(1, 1)
def testProcess2(self):
print('---测试用例2----')
self.assertEqual(1, 2)
if __name__ == "__main__":
# 第一步 实例化suite套件
suite = unittest.TestSuite()
# 第二步 向实例化的套件中添加类或者方法
suite.addTest(unittest.makeSuite(TestUsage))
# 添加单独方法用例
# suite.addTest(TestUsage('testprocess'))
# 第三步 生成xml或者html打印装置
fw = open(r'F:\Python_guide\Daily_Practice\test\unit_test_g\unittest_simple\test_report\setUp_tearDown_usage.html', 'wb')
runner = HTMLTestRunner.HTMLTestRunner(stream=fw, title='usage_setUp_tearDown', description='how it work')
# 生成xml runner
# runner = xmlrunner.XMLTestRunner(output=r'F:\Python_guide\Daily_Practice\test\unit_test_g\unittest_simple\test_report')
# 第四步 运行xmlrunner
runner.run(suite)
# 第五步 在cmd中执行
|
[
"2578288992@qq.com"
] |
2578288992@qq.com
|
cefef0af30e3c643b07b5203e304558f983b808f
|
00ea72326a1f559e72a6512dad21812cab3a1714
|
/1-12-2020/puzzle1/resolve.py
|
8af0ce419eff2b7daccab5a46e52193386f1bf4d
|
[] |
no_license
|
Jnsll/AdventOfCode2020
|
5dcfdd2225d7fc06c16a5e22c30b60198dc3bd02
|
41b6558e157913bc6b3e23b5cfb2cfd893540249
|
refs/heads/main
| 2023-01-21T18:45:28.065686
| 2020-12-06T11:39:23
| 2020-12-06T11:39:23
| 317,467,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
import pandas as pd
expense_report = pd.read_csv("input", 'r', header=None)
expense_report.columns = ["expense"]
print(expense_report)
for i in range(len(expense_report)):
for j in range(i+1, len(expense_report)):
if expense_report.iloc[i, 0] + expense_report.iloc[j, 0] == 2020:
answer = expense_report.iloc[i, 0] * expense_report.iloc[j, 0]
print(expense_report.iloc[i, 0], expense_report.iloc[j, 0])
break
print(answer)
|
[
"june.benvegnu-sallou@irisa.fr"
] |
june.benvegnu-sallou@irisa.fr
|
e1057c349330190300117c83a27784ae8eb0df29
|
7d4124c4d98a9ea1f2fc06a5ab9519f7586d7a42
|
/MESSAGE/RECC_Paths.py
|
0a8a2a29edb74a58dac935d26dae707bf559366b
|
[] |
no_license
|
SteffiKlose/OMli
|
3d5c5f916f23019cee45e29ddcb2a17377211204
|
36dfb48225763e6588a83cc1655570add005f34c
|
refs/heads/master
| 2020-06-23T15:05:45.865791
| 2019-08-18T14:44:14
| 2019-08-18T14:44:14
| 198,657,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 931
|
py
|
'''
This is the RECC model path file. RECC will use the paths specified here to search for the necessary data and modules.
'''
### Start of path file ###
### These paths are for Steffi's windows machine ###
current = 'C:\\Users\\sklose\\Documents\\ODYM-RECC-Repos\\RECC-Cu-Repo\\ODYM-RECC Cu'
odym_path = 'C:\\Users\\sklose\\Documents\\ODYM-RECC-Repos\\ODYM-Repo\\ODYM\\odym\\modules\\'
data_path = 'C:\\Users\\sklose\\Documents\\ODYM-RECC-Repos\\RECC-Cu-Repo\\ODYM-RECC Cu\\data\\CURRENT'
results_path = 'C:\\Users\\sklose\\Documents\\ODYM-RECC-Repos\\RECC-Cu-Repo\\ODYM-RECC Cu\\RECC_Results'
rawdata_pathMESSAGE = 'C:\\Users\\sklose\\Documents\\ODYM-RECC-Repos\\RECC-Cu-Repo\\ODYM-RECC Cu\\MESSAGE'
rawdata_pathIMAGE = 'C:\\Users\\sklose\\Documents\\ODYM-RECC-Repos\\RECC-Cu-Repo\\ODYM-RECC Cu\\IMAGE'
rawdata_path = 'C:\\Users\\sklose\\Documents\\ODYM-RECC-Repos\\RECC-Cu-Repo\\ODYM-RECC Cu'
### End of path file ###
|
[
"stefanie.klose@indecol.uni-freiburg.de"
] |
stefanie.klose@indecol.uni-freiburg.de
|
5ae349c7e0f77dd771ef3a1f0957256772a92f70
|
91b29aa5a0f852cb89083d8b87e3cea2f1a7e08a
|
/noel.py
|
ba867b69fcb8c590823d1f109085b0ef52ae9797
|
[] |
no_license
|
Blakeh37/cse210-tc03
|
9c9c2a7a12d2915aa8c0ff202ada0102d3605b8d
|
94eb02961c25fdf406cf3a7e3e08632b583a50c1
|
refs/heads/main
| 2023-07-18T21:29:16.840644
| 2021-10-02T15:40:26
| 2021-10-02T15:40:26
| 412,800,794
| 0
| 0
| null | 2021-10-02T14:40:14
| 2021-10-02T13:14:08
| null |
UTF-8
|
Python
| false
| false
| 62
|
py
|
def greet():
print("Welcome to our collaborative project")
|
[
"noe21002@byui.edu"
] |
noe21002@byui.edu
|
3b39d6fbe492f80432018b1df6de5e0694515e92
|
8f268d06ab6be7a5910a4f68753c20dfc41ec5c4
|
/json_parser/screen_data_parser.py
|
c6385eb3eda3ffd363587865a8325b9b50a1e651
|
[] |
no_license
|
Klooskie/GraphicsGenerator
|
5b5ad62cb95d9ef39067b14008613986fad48b1a
|
51be945c244b5a99bfca26eb1aa6422d8a992936
|
refs/heads/master
| 2020-03-18T10:37:01.779634
| 2019-03-26T23:10:31
| 2019-03-26T23:10:31
| 134,623,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,402
|
py
|
from pygame import display
from .colors_data_parser import format_color
class ScreenData:
def __init__(self, file_content, color_palette):
self._parse_screen_data(file_content, color_palette)
def generate_screen(self):
screen = display.set_mode(self._size)
screen.fill(self._bg_color)
return screen
def _parse_screen_data(self, file_content, color_palette):
screen_parameters = file_content['Screen']
if 'width' in screen_parameters.keys():
width = screen_parameters['width']
else:
print('Using default screen width of 500 pixels')
width = 500
if 'height' in screen_parameters.keys():
height = screen_parameters['height']
else:
print('Using default screen height of 500 pixels')
height = 500
self._size = (width, height)
if 'bg_color' in screen_parameters.keys():
self._bg_color = format_color(screen_parameters['bg_color'], color_palette)
else:
print('Using default background color - white')
self._bg_color = (255, 255, 255)
if 'fg_color' in screen_parameters.keys():
self.fg_color = format_color(screen_parameters['fg_color'], color_palette)
else:
print('Using default foreground color - black')
self.fg_color = (0, 0, 0)
|
[
"kuba.koniecznyxx@gmail.com"
] |
kuba.koniecznyxx@gmail.com
|
af076219a10cff760841a83970ec79b7faa08010
|
473f138db6d5f27007453c5550544663abefe992
|
/1_Cipher.py
|
fb68bb3a9a012ae2ca1f65cfb3c93141b093b727
|
[
"MIT"
] |
permissive
|
Jaber-Valinejad/Simple-Ciphers-Cryptanalysis
|
fbae71d0ac24e8ab7607a347224e28ef3bd10fb8
|
9456196e9f3699682fd8257b31e9136ff813e8b0
|
refs/heads/main
| 2023-09-05T11:35:53.171472
| 2021-09-29T02:31:38
| 2021-09-29T02:31:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,210
|
py
|
import numpy as np
Letter=['a','b', 'c', 'd', 'e', 'f','g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
numb_letter=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]
def Convert_to_letter(mat):
mat=Round_fun(mat)
MM=np.chararray((mat.shape[0], mat.shape[1]), unicode=True)
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
MM[i][j]=Letter[int(mat[i][j])]
return MM
def num_char(X):
X=X.upper()
return ord(X)-65
def text_to_num(Y):
Y=Y.replace(' ','')
return [num_char(i) for i in Y]
def Mat_text(text,n_key):
if len(text) % n_key != 0:
for i in range(0, len(text)):
text.append(text[i])
if len(text) % n_key == 0:
break
K=0
Out_text=np.zeros(( (len(text)// n_key) ,n_key ))
for i in range(len(text)// n_key):
for j in range(n_key):
Out_text[i][j] =text[K]
K=K+1
return Out_text
def Round_fun(mat):
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
mat[i][j]=round(mat[i][j],1)
return mat
def Multiplicative_inverse(determinant):
multiplicative_inverse = -1
for i in range(26):
inverse = determinant * i
if inverse % 26 == 1:
multiplicative_inverse = i
break
return multiplicative_inverse
def invesre_mat(key):
if np.linalg.det(key) == 0: print('Key is not invertible')
else:
det=round(np.linalg.det(key)%26)
det=Multiplicative_inverse(det)
if det == -1:
print("Determinant is not relatively prime to 26, uninvertible matrix")
Adj=(np.linalg.inv(key) *np.linalg.det(key) ) %26
return Round_fun(Adj*det)%26
def Text_encryption(text,key):
n_key=key.shape[0]
Matrix_text=Mat_text(text_to_num(text),n_key)
Matrix_tex=np.matmul(Matrix_text,key)
return Matrix_tex%26,Convert_to_letter(Matrix_tex%26)
def Text_Decryption(cipher_text,key):
n_key=key.shape[0]
key_inverse=invesre_mat(key)
Matrix_tex=np.matmul(cipher_text,key_inverse)
return Matrix_tex%26,Convert_to_letter(Matrix_tex%26)
|
[
"noreply@github.com"
] |
Jaber-Valinejad.noreply@github.com
|
3925ced5ea1d9889bfb4e137465e8102dae3f9ab
|
35d2bbd3813a3d5caf0c3bfb077a8fc3f3295dfb
|
/URI/uri_1011.py
|
819970b60be549a4ac5dffdfcfc376ae81e95005
|
[] |
no_license
|
carlosMachado1/carlosMachado1
|
e32283c841f7067e370f56ce535a01f55b21b906
|
19973a1e12d8511082f5b3a9be715e4164ce301d
|
refs/heads/main
| 2023-03-17T04:06:01.604381
| 2021-03-11T00:54:42
| 2021-03-11T00:54:42
| 341,776,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 121
|
py
|
raio = float(input())
pi = 3.14159
vol_esfera = (4 / 3) * pi * (raio ** 3)
print("VOLUME = {:.3f}".format(vol_esfera))
|
[
"cgdsm.eng18@uea.edu.br"
] |
cgdsm.eng18@uea.edu.br
|
fc7ab6c5b42fba3b85367fd1d7fa8adb2fcdd4f1
|
b6250655508f4b4f5b37edaf6963092588586b8b
|
/translateBatchToENG3.py
|
1fe988292819c46a0ee2bc932ccce54cdcfb45a3
|
[] |
no_license
|
E4RTTH/BigBirdNDSC2019
|
9953f37513b13bcd32fbec10d90f9b78eff4d733
|
bb3673f6d9ac395b3a2813769d53cd38454afaa8
|
refs/heads/master
| 2021-10-24T01:34:02.075613
| 2019-03-21T08:56:41
| 2019-03-21T08:56:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,828
|
py
|
import pandas as pd
import textblob
import re
# Create a function called "chunks" with two arguments, l and n:
def chunks(l, n):
# For item i in a range that is a length of l,
for i in range(0, len(l), n):
# Create an index range for l of n items:
yield l[i:i+n]
dataset = pd.read_csv('mobile_data_info_train_competition.csv', quoting = 3)
titles = dataset['title'].values.tolist()
titles_eng = []
chunks = list(chunks(titles, 10000))
print("length chunk=", len(chunks))
for i in range(0, len(chunks)/3):
eng_chunk = []
print("processing chunk num ", i)
for i, title in enumerate(chunks):
try:
test = str(textblob.TextBlob(title).translate(from_lang='id', to="en"))
except Exception:
test = title
title = re.sub('[^a-zA-Z0-9\.]', ' ', title)
eng_chunk.append(test)
titles_eng.extend(eng_chunk)
df = pd.DataFrame(data={'eng': titles_eng})
df.to_csv('mobile_data_info_train_competition_eng2a.csv', index=False)
dataset['title_eng'] = titles_eng
dataset.to_csv('mobile_data_info_train_competition_eng2.csv', index=False)
dataset = pd.read_csv('mobile_data_info_train_competition.csv', quoting = 3)
titles = dataset['title'].values
titles_eng = []
chunks = list(chunks(titles, 100))
for i in range(39, len(chunks)):
title_eng = [translator.translate(title) for title in chunks[i]]
title_eng = [re.sub('[^a-zA-Z0-9]', ' ', title) for title in title_eng]
titles_eng.append(title_eng)
engdf = pd.read_csv('mobile_data_info_train_competition_eng.csv', quoting = 3)
eng = engdf['eng'].values.tolist()
flat_list = [item for sublist in titles_eng for item in sublist]
eng.extend(flat_list)
df = pd.DataFrame(data={'eng': eng})
df.to_csv('mobile_data_info_train_competition_eng.csv', index=False)
|
[
"jiayuan.chia@besi.com"
] |
jiayuan.chia@besi.com
|
5d36c31abe62d3bc24967d257bd7acde33fa81c8
|
c38301f203d4af89c1d10c9cfe6626ef7666ac19
|
/extensions/info_commands.py
|
27a04c9258c04c687f6c4110d31138719730cce7
|
[] |
no_license
|
matthew-lowe/RoboJosh
|
a9551152507dac5fefbbf480aff8410eb928809a
|
ec8f09a221d3c2e8cecc31ba65623aa3920e6464
|
refs/heads/master
| 2022-07-03T16:40:43.249517
| 2020-05-09T18:32:20
| 2020-05-09T18:32:20
| 255,172,347
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,325
|
py
|
import datetime
import discord
from discord.ext import commands
class InfoCommands(commands.Cog):
def __init__(self, bot):
self.bot = bot
# Displays the avatar
@commands.command(help="Show the avatar of a user", usage=";avatar [user]")
async def avatar(self, ctx, target=None):
utils = self.bot.get_cog("Utils")
# Set the target user if given, else message author
user = utils.get_target(ctx, target)
if user is None:
await ctx.send("`Invalid user! Please tag a member of the server`")
return
# URL Discord stores avatars in
url = "https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.png?size=1024".format(user)
# Send avatar as a swanky embed
embed = discord.Embed(title="Avatar URL", url=url, description=user.name + '#' + user.discriminator)
embed.set_image(url=url)
utils.style_embed(embed, ctx)
await ctx.send(embed=embed)
# Gives some info about a user
@commands.command(help="Get information about a user", usage=";info [user]")
async def info(self, ctx, target=None):
utils = self.bot.get_cog("Utils")
# Set the target user if given, else message author
user = utils.get_target(ctx, target)
if user is None:
await ctx.send("`Invalid user! Please tag a member of the server`")
return
member = ctx.guild.get_member(user.id)
author_avatar = "https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.png?size=1024".format(ctx.author)
user_avatar = "https://cdn.discordapp.com/avatars/{0.id}/{0.avatar}.png?size=1024".format(user)
# Ommits the first value (@everyone)
member_roles = member.roles[1:]
role_string = ""
for role in member_roles:
role_string += f"<@&{role.id}> "
embed = discord.Embed(title=f"{user.name}#{user.discriminator}")
embed.add_field(name="User ID:", value=user.id, inline=True)
embed.add_field(name="Display name:", value=member.display_name, inline=True)
embed.add_field(name="Account Created:", value=user.created_at.strftime('%A %d %b %Y, %I:%M %p'), inline=False)
embed.add_field(name="Guild Join Date:", value=member.joined_at.strftime('%A %d %b %Y, %I:%M %p'), inline=False)
embed.add_field(name="Server Roles:", value=role_string, inline=False)
embed.set_thumbnail(url=user_avatar)
utils.style_embed(embed, ctx)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(InfoCommands(bot))
|
[
"jedijasper2004@gmail.com"
] |
jedijasper2004@gmail.com
|
59ee46640c67428e82b8c1ce5e8430455253579e
|
d1c55aa9a65bad122aeb2a0fcdf11c8fa4d76997
|
/divvy/app/models.py
|
61098cd5d7aff474311cce333213b3aee36f8e28
|
[] |
no_license
|
CristinaGradinaru/DivvyChallange
|
b62dc47afa06363ac9b85d4140bf87aac5578050
|
217b33f86e51fc045a738fc8d775611e32fc3450
|
refs/heads/master
| 2023-03-31T16:43:46.868716
| 2021-04-10T20:10:45
| 2021-04-10T20:10:45
| 356,683,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,084
|
py
|
from app import db
from datetime import datetime
class Divvy(db.Model):
trip_id= db.Column(db.Integer, primary_key = True)
starttime= db.Column(db.DateTime, nullable= False)
stoptime= db.Column(db.DateTime, nullable= False)
bikeid= db.Column(db.Integer, nullable=False)
from_station_id = db.Column(db.Integer)
from_station_name = db.Column(db.String)
to_station_id = db.Column(db.Integer)
to_station_name = db.Column(db.String)
usertype = db.Column(db.String)
gender = db.Column(db.String)
birthday = db.Column(db.String)
trip_duration = db.Column(db.Integer)
def __init__(self):
self.trip_id= trip_id
self.starttime= starttime
self.stoptime = stoptime
self.bikeid = bikeid
self.from_station_id= from_station_id
self.from_station_name= from_station_name
self.to_station_id = to_station_id
self.to_station_name = to_station_name
self.usertype = usertype
self.gender = gender
self.birthday=birthday
self.trip_duration = trip_duration
|
[
"cristinagradinaru90@gmail.com"
] |
cristinagradinaru90@gmail.com
|
f0f015d200157c4bc2421548d0d2b2ed31545e38
|
29d08e80ba14e903e95c92b91fc6a9019570d7c5
|
/Datasets/SmallNorbLoader.py
|
0254ecaf19eab4b878f0eb96e408dacd0a40d87b
|
[
"Apache-2.0"
] |
permissive
|
puzzlelib/PuzzleLib
|
0d576eaad761a90490450efc41d3253019006bfd
|
73a14457e4d8afc60fea331556581b641a34d125
|
refs/heads/master
| 2022-12-06T19:42:02.041605
| 2022-11-24T10:09:45
| 2022-11-24T10:09:45
| 243,770,185
| 59
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,988
|
py
|
import os, struct
import numpy as np
import h5py
from PIL import Image
from PuzzleLib.Datasets.DataLoader import DataLoader
class SmallNorbLoader(DataLoader):
def __init__(self, onSample=None, sampleInfo=None, cachename=None):
super().__init__(("data", "labels", "info"), "smallnorb.hdf" if cachename is None else cachename)
self.sampleInfo = lambda: (np.float32, (28, 28)) if sampleInfo is None else sampleInfo
self.onSample = lambda sample: np.array(Image.fromarray(sample).resize((28, 28))) \
if onSample is None else onSample
self.testdata = "smallnorb-5x01235x9x18x6x2x96x96-testing-dat.mat"
self.testlabels = "smallnorb-5x01235x9x18x6x2x96x96-testing-cat.mat"
self.testinfo = "smallnorb-5x01235x9x18x6x2x96x96-testing-info.mat"
self.traindata = "smallnorb-5x46789x9x18x6x2x96x96-training-dat.mat"
self.trainlabels = "smallnorb-5x46789x9x18x6x2x96x96-training-cat.mat"
self.traininfo = "smallnorb-5x46789x9x18x6x2x96x96-training-info.mat"
self.nlabels = 5
self.ninstances = 10
self.nelevs = 9
self.nazimuths = 18
self.nlights = 6
def load(self, path, sort=False, compress="gzip", log=True, onlyTest=False):
self.cachename = os.path.join(path, self.cachename)
if not os.path.exists(self.cachename):
if log:
print("[%s] Started unpacking ..." % self.__class__.__name__)
data, labels, info = None, None, None
files = [self.testdata] if onlyTest else [self.traindata, self.testdata]
for filename in files:
with open(os.path.join(path, filename), "rb") as file:
magic, ndim = struct.unpack("<ii", file.read(8))
dims = struct.unpack("<" + "i" * max(ndim, 3), file.read(max(ndim, 3) * 4))
trueMagic = 0x1E3D4C55
if magic != trueMagic:
raise ValueError("Bad magic number (got 0x%x, expected 0x%x)" % (magic, trueMagic))
indata = np.fromfile(file, dtype=np.uint8).reshape(*dims)
dtype, reqdims = self.sampleInfo()
outdata = np.empty(dims[:2] + reqdims, dtype=dtype)
for i in range(dims[0]):
for j in range(dims[1]):
outdata[i, j] = self.onSample(indata[i, j])
if (i + 1) % 100 == 0 and log:
print("[%s] Unpacked %s pairs out of %s" % (self.__class__.__name__, i + 1, dims[0]))
data = outdata if data is None else np.vstack((data, outdata))
for filename in [self.trainlabels, self.testlabels]:
with open(os.path.join(path, filename), "rb") as file:
magic, ndim = struct.unpack("<ii", file.read(8))
struct.unpack("<" + "i" * max(ndim, 3), file.read(max(ndim, 3) * 4))
trueMagic = 0x1E3D4C54
if magic != trueMagic:
raise ValueError("Bad magic number (got 0x%x, expected 0x%x)" % (magic, trueMagic))
inlabels = np.fromfile(file, dtype=np.uint32)
labels = inlabels if labels is None else np.concatenate((labels, inlabels))
for filename in [self.traininfo, self.testinfo]:
with open(os.path.join(path, filename), "rb") as file:
magic, ndim = struct.unpack("<ii", file.read(8))
dims = struct.unpack("<" + "i" * max(ndim, 3), file.read(max(ndim, 3) * 4))
trueMagic = 0x1E3D4C54
if magic != trueMagic:
raise ValueError("Bad magic number (got 0x%x, expected 0x%x)" % (magic, trueMagic))
ininfo = np.fromfile(file, dtype=np.uint32).reshape(dims[:2])
info = ininfo if info is None else np.vstack((info, ininfo))
if sort:
data, labels, info = self.sortDataset(data, labels, info, log=log)
print("[%s] Building cache ..." % self.__class__.__name__)
with h5py.File(self.cachename, "w") as hdf:
dsetname, lblsetname, infosetname = self.datanames
hdf.create_dataset(dsetname, data=data, compression=compress)
hdf.create_dataset(lblsetname, data=labels, compression=compress)
hdf.create_dataset(infosetname, data=info, compression=compress)
hdf = h5py.File(self.cachename, "r")
dsetname, lblsetname, infosetname = self.datanames
return hdf[dsetname], hdf[lblsetname], hdf[infosetname]
def sortDataset(self, data, labels, info, log=True):
shape = (self.nlabels, self.ninstances, self.nlights, self.nelevs, self.nazimuths)
sortdata = np.empty(shape + data.shape[1:], dtype=np.float32)
sortlabels = np.empty(shape, dtype=np.uint32)
sortinfo = np.empty(shape + info.shape[1:], dtype=np.uint32)
if log:
print("[%s] Started sorting dataset ..." % self.__class__.__name__)
for i in range(data.shape[0]):
instance, elev, azimuth, light = info[i]
label = labels[i]
sortdata[label, instance, light, elev, azimuth // 2] = data[i]
sortlabels[labels, instance, light, elev, azimuth // 2] = label
sortinfo[labels, instance, light, elev, azimuth // 2] = info[i]
if log and (i + 1) % 100 == 0:
print("[%s] Sorted %s pairs out of %s" % (self.__class__.__name__, i + 1, data.shape[0]))
return sortdata, sortlabels, sortinfo
def unittest():
smallnorb = SmallNorbLoader()
smallnorb.load(path="../TestData/", sort=True, onlyTest=True)
smallnorb.clear()
if __name__ == "__main__":
unittest()
|
[
"psukhachev@yahoo.com"
] |
psukhachev@yahoo.com
|
c98d95ce2a2eee2b73af1f2353638fe7a1da1ca2
|
7b36edbd77315c7d53a3c98dba9b5c8a4b4449a4
|
/Chapter4/4-3.py
|
89cfd5d8b33ae5b35aae4ba71a21d34aa30770c9
|
[] |
no_license
|
chenfancy/PAT_Python
|
974ffde2b5c67e8b706b29696e9605279e30dd77
|
f9bb1c2f39246d7c70433d4880f4d57d3dfbd0a7
|
refs/heads/master
| 2022-03-30T06:29:23.665693
| 2020-01-28T14:10:55
| 2020-01-28T14:10:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 66
|
py
|
n=int(input())
x=1
for _ in range(1,n):
x=(x+1)*2
print(x)
|
[
"han@163.com"
] |
han@163.com
|
2860d75f902b1c7deae9502759161e1be8413369
|
dff19113a90e93db18c09b05bd94cfb41d917fb1
|
/build_train_dataset.py
|
4b444e465739bbd6b14bdf0fbabe5cfc650a79e3
|
[] |
no_license
|
johncuicui/grapeMRCNN
|
ec97cb070f3ba2ab7f3fd952d023a5401c38d579
|
2eea60ad5c212275f97b6e535ae4ce39efcfccb5
|
refs/heads/master
| 2020-08-22T19:02:13.891261
| 2019-10-22T08:17:56
| 2019-10-22T08:17:56
| 216,461,939
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,749
|
py
|
import os
import numpy as np
import random
import shutil
def create_dataset(dataset_folder,data_folder,image_list):
for image in image_list:
image_src_path=data_folder+image+'.jpg'
image_dst_path=dataset_folder+os.sep+image+'.jpg'
shutil.copy2(image_src_path,image_dst_path)
bbox_src_path = data_folder + image + '.txt'
bbox_dst_path = dataset_folder +os.sep+image+'.txt'
shutil.copy2(bbox_src_path, bbox_dst_path)
mask_src_path = data_folder + image + '.npz'
mask_dst_path = dataset_folder + os.sep + image + '.npz'
shutil.copy2(mask_src_path, mask_dst_path)
data_folder='D:/DLCode/wgisd/data/'
train_masked_path ='D:/DLCode/wgisd/train_masked.txt'
ROOT_DIR = os.path.abspath(".")
print(ROOT_DIR)
# load the names of the images
with open(train_masked_path, 'r') as fp:
data_list = fp.readlines()
data_list = set([i[:-1] for i in data_list])
# split
data_list=sorted(data_list)
random.shuffle(data_list)
i = int(len(data_list) * 0.8)
data_list_train = data_list[:i]
data_list_val = data_list[i:]
#create dataset folder
dataset_folder= os.path.sep.join([ROOT_DIR,"dataset"])
if not os.path.exists(dataset_folder):
os.makedirs(dataset_folder)
#build train dataset
dataset_folder_train= os.path.sep.join([dataset_folder,"train"])
if not os.path.exists(dataset_folder_train):
os.makedirs(dataset_folder_train)
create_dataset(dataset_folder_train,data_folder,data_list_train)
# build Validation dataset
dataset_folder_val= os.path.sep.join([dataset_folder,"val"])
if not os.path.exists(dataset_folder_val):
os.makedirs(dataset_folder_val)
create_dataset(dataset_folder_val,data_folder,data_list_val)
#for i in data_list:
# print(i)
print("\ntrain:{},val:{}".format(len(data_list_train),len(data_list_val)))
|
[
"johncuicui@163.com"
] |
johncuicui@163.com
|
914e091332b302fa954ad8c9bb449d1ce9798f9e
|
280847f527e7064c6e767ec60c5017ab6ddd94eb
|
/catkin_ws/build/my_first_topic/cmake/my_first_topic-genmsg-context.py
|
c8600df1b9a5b8c9d5fc9baf38013503ed615209
|
[] |
no_license
|
WiloSensei07/ROS
|
b268e5c2ab09fc16688530ac8ec94b7bb3a0428d
|
b0c5bb0430c39bb4a519a0cfc9b79c06f4ace5ab
|
refs/heads/main
| 2023-05-23T23:11:26.555642
| 2021-06-09T10:12:29
| 2021-06-09T10:12:29
| 375,308,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/wilo/catkin_ws/src/my_first_topic/msg/position.msg"
services_str = ""
pkg_name = "my_first_topic"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "my_first_topic;/home/wilo/catkin_ws/src/my_first_topic/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
[
"sinkamwilfried@gmail.com"
] |
sinkamwilfried@gmail.com
|
af827d4a20d673b71ec3580e040f84acab99e937
|
d2af7c3c2ccd8fb87e5f593658d466d07e310c21
|
/lesson08/exception/ExceptDemo3.py
|
844e12514f0218c8139e33b2b8f2835b5ccc72d0
|
[] |
no_license
|
yuan018/yzu_python
|
cd64131bdf3f923b9b68c34350b181d6f4f0da11
|
f72088834814f4843fe0b2407b926ccc23302a96
|
refs/heads/master
| 2021-05-26T10:22:59.021685
| 2020-06-03T13:31:27
| 2020-06-03T13:31:27
| 254,094,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
def input_number():
x = 10
try:
y = int(input('請輸入數字:'))
z = x / y
except ZeroDivisionError as e:
print('分母不可 = 0, 請重新輸入~', e)
input_number()
except ValueError as e:
print('輸入資料錯誤, 請重新輸入~', e)
input_number()
except Exception as e:
print("發生了一個我料想不到的錯誤,", e)
else:
print(z)
if __name__ == '__main__':
input_number()
|
[
"kakab45@gmail.com"
] |
kakab45@gmail.com
|
9623c677ffe0f82fb1e9d27cfa80474221f3c11d
|
5df24c960d03f6b569247dc625c49116bb517fb7
|
/logica/CampanaPrevencion.py
|
9684da2bd17996654b9265580f35fb1a0a0f804a
|
[] |
no_license
|
BryanTabarez/ProyectoBD
|
7a60e4ab0cc2a5200bceebfefdcdf24258d73107
|
ed9f42aaa390b695004e3acfd7afc669968df22c
|
refs/heads/master
| 2021-01-18T01:43:35.990092
| 2015-10-31T03:31:16
| 2015-10-31T03:31:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
class CampanaPrevencion():
"""Clase CampanaPrevencion( [{codigo,} id_medico, nombre, fecha_realizacion,
objetivo] )"""
def __init__(self, *args):
if len(args) is 5:
self.__codigo = args[0]
self.__id_medico = args[1]
self.__nombre = args[2]
self.__fecha_realizacion = args[3]
self.__objetivo = args[4]
if len(args) is 4:
self.__id_medico = args[0]
self.__nombre = args[1]
self.__fecha_realizacion = args[2]
self.__objetivo = args[3]
def get_codigo(self):
return self.__codigo
def get_id_medico(self):
return self.__id_medico
def get_nombre(self):
return self.__nombre
def get_fecha_realizacion(self):
return self.__fecha_realizacion
def get_objetivo(self):
return self.__objetivo
def set_nombre(self, nombre):
self.__nombre = nombre
def set_id_medico(self, id_medico):
self.__id_medico = id_medico
def set_fecha_realizacion(self, fecha_realizacion):
self.__fecha_realizacion = fecha_realizacion
def set_objetivo(self, objetivo):
self.__objetivo = objetivo
|
[
"userbryan@gmail.com"
] |
userbryan@gmail.com
|
a1a9fd6ebc0d15c11122032f395e325a20dc3ec7
|
9348adc3dbf30292ff63542d778fb8dc617ea482
|
/autoregressor/test/test_data_pipeline.py
|
84ab7900d90dcea26a9728d8b1fc5587f9f8ead0
|
[] |
no_license
|
myrywy/autoregressor
|
2a4883d805567379088fcf40ecc696d143b3dea2
|
7dabab4f9da69cd94d485fafd6a3150df92db4ed
|
refs/heads/master
| 2020-04-03T23:41:45.436751
| 2019-01-17T23:20:55
| 2019-01-17T23:20:55
| 155,628,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,713
|
py
|
from data_pipeline import DataPipeline
import tensorflow as tf
import numpy as np
import pytest
from pytest import approx
def test_add_structural_transformation():
def input_data_generator():
yield np.array([1,2,3,4])
yield np.array([1,2,3])
yield np.array([1,2])
yield np.array([1])
yield np.array([1,2])
yield np.array([1,2,3])
yield np.array([1,2,3,4])
def expected_output_data_generator():
yield {"input_sequnce": np.array([1,2,3,4]), "length": 4}
yield {"input_sequnce": np.array([1,2,3]), "length": 3}
yield {"input_sequnce": np.array([1,2]), "length": 2}
yield {"input_sequnce": np.array([1]), "length": 1}
yield {"input_sequnce": np.array([1,2]), "length": 2}
yield {"input_sequnce": np.array([1,2,3]), "length": 3}
yield {"input_sequnce": np.array([1,2,3,4]), "length": 4}
input_dataset = tf.data.Dataset.from_generator(input_data_generator, output_types=tf.int32)
expected_output_dataset = tf.data.Dataset.from_generator(expected_output_data_generator, output_types={"input_sequnce": tf.int32, "length": tf.int32})
def add_length(input_sequnce):
return {
"input_sequnce": input_sequnce,
"length": tf.shape(input_sequnce)[0]
}
pipeline = DataPipeline()
pipeline.add_structural_transformation(add_length)
output_dataset = pipeline.transform_dataset(input_dataset)
output_next = output_dataset.make_one_shot_iterator().get_next()
expected_next = expected_output_dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
for _ in range(7):
r_output, r_expected = sess.run((output_next, expected_next))
assert r_output["input_sequnce"] == approx(r_expected["input_sequnce"])
assert r_output["length"] == approx(r_expected["length"])
def test_add_unit_transformation():
def input_data_generator():
yield {"input_sequence": np.array([1,2,3,4]), "length": 4}
yield {"input_sequence": np.array([1,2,3]), "length": 3}
yield {"input_sequence": np.array([1,2]), "length": 2}
yield {"input_sequence": np.array([1]), "length": 1}
yield {"input_sequence": np.array([1,2]), "length": 2}
yield {"input_sequence": np.array([1,2,3]), "length": 3}
yield {"input_sequence": np.array([1,2,3,4]), "length": 4}
def expected_output_data_generator():
yield {"input_sequence": np.array([4,5,6,7]), "length": 4}
yield {"input_sequence": np.array([4,5,6]), "length": 3}
yield {"input_sequence": np.array([4,5]), "length": 2}
yield {"input_sequence": np.array([4]), "length": 1}
yield {"input_sequence": np.array([4,5]), "length": 2}
yield {"input_sequence": np.array([4,5,6]), "length": 3}
yield {"input_sequence": np.array([4,5,6,7]), "length": 4}
input_dataset = tf.data.Dataset.from_generator(input_data_generator, output_types={"input_sequence": tf.int32, "length": tf.int32})
expected_output_dataset = tf.data.Dataset.from_generator(expected_output_data_generator, output_types={"input_sequence": tf.int32, "length": tf.int32})
def add3(x):
return x+3
pipeline = DataPipeline()
pipeline.add_unit_transformation(add3, "input_sequence")
output_dataset = pipeline.transform_dataset(input_dataset)
output_next = output_dataset.make_one_shot_iterator().get_next()
expected_next = expected_output_dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
for _ in range(7):
r_output, r_expected = sess.run((output_next, expected_next))
assert r_output["input_sequence"] == approx(r_expected["input_sequence"])
assert r_output["length"] == approx(r_expected["length"])
def test_add_unit_transformation_nested():
def input_data_generator():
yield {"input_sequence": np.array([1,2,3,4]), "length": 4}, 9
yield {"input_sequence": np.array([1,2,3]), "length": 3}, 9
yield {"input_sequence": np.array([1,2]), "length": 2}, 9
yield {"input_sequence": np.array([1]), "length": 1}, 9
yield {"input_sequence": np.array([1,2]), "length": 2}, 9
yield {"input_sequence": np.array([1,2,3]), "length": 3}, 9
yield {"input_sequence": np.array([1,2,3,4]), "length": 4}, 9
def expected_output_data_generator():
yield {"input_sequence": np.array([4,5,6,7]), "length": 4}, 9
yield {"input_sequence": np.array([4,5,6]), "length": 3}, 9
yield {"input_sequence": np.array([4,5]), "length": 2}, 9
yield {"input_sequence": np.array([4]), "length": 1}, 9
yield {"input_sequence": np.array([4,5]), "length": 2}, 9
yield {"input_sequence": np.array([4,5,6]), "length": 3}, 9
yield {"input_sequence": np.array([4,5,6,7]), "length": 4}, 9
input_dataset = tf.data.Dataset.from_generator(input_data_generator, output_types=({"input_sequence": tf.int32, "length": tf.int32}, tf.int32))
expected_output_dataset = tf.data.Dataset.from_generator(expected_output_data_generator, output_types=({"input_sequence": tf.int32, "length": tf.int32},tf.int32))
def add3(x):
return x+3
pipeline = DataPipeline()
pipeline.add_unit_transformation(add3, 0, "input_sequence")
output_dataset = pipeline.transform_dataset(input_dataset)
output_next = output_dataset.make_one_shot_iterator().get_next()
expected_next = expected_output_dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
for _ in range(7):
r_output, r_expected = sess.run((output_next, expected_next))
assert r_output[0]["input_sequence"] == approx(r_expected[0]["input_sequence"])
assert r_output[0]["length"] == approx(r_expected[0]["length"])
assert r_output[1] == approx(r_expected[1])
def test_add_unit_transformation_simple():
def input_data_generator():
yield np.array([1,2,3,4])
yield np.array([1,2,3])
yield np.array([1,2])
yield np.array([1])
yield np.array([1,2])
yield np.array([1,2,3])
yield np.array([1,2,3,4])
def expected_output_data_generator():
yield np.array([4,5,6,7])
yield np.array([4,5,6])
yield np.array([4,5])
yield np.array([4])
yield np.array([4,5])
yield np.array([4,5,6])
yield np.array([4,5,6,7])
input_dataset = tf.data.Dataset.from_generator(input_data_generator, output_types=tf.int32)
expected_output_dataset = tf.data.Dataset.from_generator(expected_output_data_generator, output_types=tf.int32)
def add3(x):
return x+3
pipeline = DataPipeline()
pipeline.add_unit_transformation(add3)
output_dataset = pipeline.transform_dataset(input_dataset)
output_next = output_dataset.make_one_shot_iterator().get_next()
expected_next = expected_output_dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
for _ in range(7):
r_output, r_expected = sess.run((output_next, expected_next))
assert r_output == approx(r_expected)
def test_add_unit_transformation_simple_tensor_slices():
input_data = np.array(
[
[1,2,3,4],
[1,2,3,0],
[1,2,0,0],
[1,0,0,0],
]
)
expected_output_data = np.array(
[
[4, 5, 6, 7],
[4, 5, 6, 3],
[4, 5, 3, 3],
[4, 3, 3, 3],
]
)
input_dataset = tf.data.Dataset.from_tensor_slices(input_data)
expected_output_dataset = tf.data.Dataset.from_tensor_slices(expected_output_data)
def add3(x):
return x+3
pipeline = DataPipeline()
pipeline.add_unit_transformation(add3)
output_dataset = pipeline.transform_dataset(input_dataset)
output_next = output_dataset.make_one_shot_iterator().get_next()
expected_next = expected_output_dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
for _ in range(4):
r_output, r_expected = sess.run((output_next, expected_next))
assert r_output == approx(r_expected)
def test_add_unit_transformation_tuple_tensor_slices():
input_data = np.array(
[
[1,2,3,4],
[1,2,3,0],
[1,2,0,0],
[1,0,0,0],
]
)
expected_output_data = np.array(
[
[4, 5, 6, 7],
[4, 5, 6, 3],
[4, 5, 3, 3],
[4, 3, 3, 3],
]
)
input_dataset = tf.data.Dataset.from_tensor_slices((input_data, input_data[:,0]))
expected_output_dataset = tf.data.Dataset.from_tensor_slices((expected_output_data, input_data[:,0]))
def add3(x):
return x+3
pipeline = DataPipeline()
pipeline.add_unit_transformation(add3, 0)
output_dataset = pipeline.transform_dataset(input_dataset)
output_next = output_dataset.make_one_shot_iterator().get_next()
expected_next = expected_output_dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
for _ in range(4):
r_output, r_expected = sess.run((output_next, expected_next))
assert r_output[0] == approx(r_expected[0])
assert r_output[1] == approx(r_expected[1])
def test_add_unit_transformation_one_element_tuple_tensor_slices():
input_data = np.array(
[
[1,2,3,4],
[1,2,3,0],
[1,2,0,0],
[1,0,0,0],
]
)
expected_output_data = np.array(
[
[4, 5, 6, 7],
[4, 5, 6, 3],
[4, 5, 3, 3],
[4, 3, 3, 3],
]
)
input_dataset = tf.data.Dataset.from_tensor_slices((input_data,))
expected_output_dataset = tf.data.Dataset.from_tensor_slices(expected_output_data)
def add3(x):
return x+3
pipeline = DataPipeline()
pipeline.add_unit_transformation(add3)
output_dataset = pipeline.transform_dataset(input_dataset)
output_next = output_dataset.make_one_shot_iterator().get_next()
expected_next = expected_output_dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
for _ in range(4):
r_output, r_expected = sess.run((output_next, expected_next))
assert r_output == approx(r_expected)
def test_add_unit_transformation_one_element_tuple():
def input_data_generator():
yield np.array([1,2,3,4]),
yield np.array([1,2,3]),
yield np.array([1,2]),
yield np.array([1]),
yield np.array([1,2]),
yield np.array([1,2,3]),
yield np.array([1,2,3,4]),
def expected_output_data_generator():
yield np.array([4,5,6,7])
yield np.array([4,5,6])
yield np.array([4,5])
yield np.array([4])
yield np.array([4,5])
yield np.array([4,5,6])
yield np.array([4,5,6,7])
input_dataset = tf.data.Dataset.from_generator(input_data_generator, output_types=(tf.int32,))
expected_output_dataset = tf.data.Dataset.from_generator(expected_output_data_generator, output_types=tf.int32)
def add3(x):
return x+3
pipeline = DataPipeline()
pipeline.add_unit_transformation(add3)
output_dataset = pipeline.transform_dataset(input_dataset)
output_next = output_dataset.make_one_shot_iterator().get_next()
expected_next = expected_output_dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
for _ in range(7):
r_output, r_expected = sess.run((output_next, expected_next))
assert r_output == approx(r_expected)
|
[
"marcinlewy22@gmail.com"
] |
marcinlewy22@gmail.com
|
b562693940f57aee2704a7f4d8653268ddf53124
|
63e2bed7329c79bf67279f9071194c9cba88a82c
|
/SevOneApi/python-client/test/test_net_flow_direction_dto.py
|
7b1fdcddfbf76aa18f834699f063812d07cefed0
|
[] |
no_license
|
jsthomason/LearningPython
|
12422b969dbef89578ed326852dd65f65ab77496
|
2f71223250b6a198f2736bcb1b8681c51aa12c03
|
refs/heads/master
| 2021-01-21T01:05:46.208994
| 2019-06-27T13:40:37
| 2019-06-27T13:40:37
| 63,447,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 947
|
py
|
# coding: utf-8
"""
SevOne API Documentation
Supported endpoints by the new RESTful API # noqa: E501
OpenAPI spec version: 2.1.18, Hash: db562e6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.net_flow_direction_dto import NetFlowDirectionDto # noqa: E501
from swagger_client.rest import ApiException
class TestNetFlowDirectionDto(unittest.TestCase):
"""NetFlowDirectionDto unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNetFlowDirectionDto(self):
"""Test NetFlowDirectionDto"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.net_flow_direction_dto.NetFlowDirectionDto() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"johnsthomason@gmail.com"
] |
johnsthomason@gmail.com
|
dbefbc5f019d54bb59addf4ec8cda717551bc439
|
e8f4df6ccf3ee4673e1d8be3320d04650200d3c4
|
/CRF/2B/q2q3.py
|
d6cee5d599c9f332d36ec8df051b9dc4590dcce8
|
[] |
no_license
|
ashishiiith/Graphical-Models
|
16559bd17958daad9266a05ec59c007f2430bd30
|
1e312b62b6254057e74889e23a095e85e1eca83f
|
refs/heads/master
| 2021-01-01T16:55:17.667267
| 2015-05-20T18:43:09
| 2015-05-20T18:43:09
| 30,876,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,270
|
py
|
#Author: Ashish Jain
#How to Run Code? python sumProduct.py <arg1> <arg2>
#arg 1 - path of test image file, arg2 - test word corresponding to that test file
import os
import sys
import time
import itertools
from numpy import *
from math import *
import scipy
from scipy.optimize import fmin_bfgs
from scipy.optimize import fmin_l_bfgs_b
char_ordering = {'e':0, 't':1, 'a':2, 'i':3, 'n':4, 'o':5, 's':6, 'h':7, 'r':8, 'd':9}
y_label = {}
FP = zeros( (10, 321) )
TP = zeros( (10, 10) )
node_potential = None
clique_potential = None
beliefs = None
size = 0
n=10
feat_size = 321
num_words = None
forward_msg = {}
backward_msg = {}
pairwise_marginal = None
marginal_distribution = None
xij = None
correct_char = 0
total_char = 0
likelihood = 1.0
def load_FP(fname):
#loading feature parameters into a dictionary
global FP
lines = open(fname, "r").readlines()
l = []
for i in xrange(0, len(lines)):
FP[i] = map(lambda x: float(x), lines[i].strip().split())
def load_TP(fname):
#loading transition parameter
global TP
y = 0
for line in open(fname, "r"):
x = 0
tokens = line.strip().split()
for token in tokens:
TP[x][y] = float(token)
x+=1
y+=1
def compute_node_potential(fname):
global node_potential
global xij
node_potential = []
xij = []
for vector in open(fname, "r"):
feature_vec = map(lambda x:float(x), vector.split())
vec = []
for i in xrange(0, 10):
vec.append(dot(FP[i], feature_vec)) #dot product of feature vector and learned feature parameter from model
node_potential.append(vec)
xij.append(feature_vec)
#print "Node Potential: " + str(node_potential)
return node_potential
def compute_clique_potential(fname, word):
# computing clique potential corresponding to each of the clique in markov network
global clique_potential
global size
size = len(word)
clique_potential = zeros(shape=(len(word)-1, 10 ,10))
for i in xrange(0, len(word)-1):
#storing clique potential for each of the clique node
if i == len(word)-2:
clique_potential[i] = matrix(node_potential[i]).T + matrix(node_potential[i+1]) + TP
else:
clique_potential[i] = matrix(node_potential[i]).T + TP
#for i in xrange(0, len(word)-1):
# for char1 in ['t', 'a', 'h']:
# for char2 in ['t', 'a', 'h']:
# print str(clique_potential[i][char_ordering[char1]][char_ordering[char2]]) + " ",
# print
# print
def logsumexp(vector):
c = max(vector)
vector = map(lambda x : math.exp(x-c), vector)
return c + math.log( sum(vector) )
def sumproduct_message():
global forward_msg
global backward_msg
potential = zeros(shape=(10,10))
''' Implementing forward message passing.
'''
forward_msg[1] = [0.0 for i in xrange(10)]
for i in xrange(0, len(clique_potential)-1):
key = str(i+1) + "->" + str(i+2)
potential = clique_potential[i]
forward_msg[i+2] = []
for j in xrange(0, 10):
forward_msg[i+2].append(logsumexp(array(potential[:,j]+matrix(forward_msg[i+1])).flatten()))
#print key + ":" + str(forward_msg[i+2])
'''Implementing backward message passing
'''
backward_msg[size-1] = [0.0 for i in xrange(10)]
for i in xrange(size-2, 0, -1):
key = str(i+1) + "->"+str(i)
potential = clique_potential[i]
backward_msg[i] = []
for j in xrange(0, 10):
backward_msg[i].append(logsumexp(array(potential[j, :] + matrix(backward_msg[i+1])).flatten()))
#print key + ":" + str(backward_msg[i])
def logbeliefs():
global beliefs
beliefs = zeros(shape=(size-1, 10, 10))
for i in xrange(size-1):
if i == 0:
beliefs[i] = clique_potential[i] + matrix(backward_msg[i+1])
elif i == size-2:
beliefs[i] = clique_potential[i] + matrix(forward_msg[i+1]).T
else:
beliefs[i] = clique_potential[i] + matrix(backward_msg[i+1]) + matrix(forward_msg[i+1]).T
#for i in xrange(0, len(beliefs)):
# for ch1 in ['t', 'a']:
# for ch2 in ['t', 'a']:
# print ch1 + " : " + ch2 + " " + str(beliefs[i][char_ordering[ch1]][char_ordering[ch2]])
def marginal_probability():
global marginal_distribution
global pairwise_marginal
l = len(beliefs)
pairwise_marginal = zeros(shape=(l, 10, 10))
marginal_distribution = zeros(shape=(l+1, 10))
for i in xrange(l):
normalizer = 0.0
for ch1 in xrange(0, 10):
for ch2 in xrange(0, 10):
normalizer+=exp(beliefs[i][ch1][ch2])
for ch1 in xrange(0,10):
for ch2 in xrange(0,10):
#normalizing each value in belief table
pairwise_marginal[i][ch1][ch2] = exp(beliefs[i][ch1][ch2])/normalizer
#for ch1 in ['t', 'a', 'h']:
# for ch2 in ['t', 'a', 'h']:
# print ch1 + " : " + ch2 + " " + str(pairwise_marginal[i][char_ordering[ch1]][char_ordering[ch2]])
#adding up parisewise marginal probability along a row to compute marginal probability
for j in xrange(10):
marginal_distribution[i][j] = sum(pairwise_marginal[i][j])
if i==l-1:
marginal_distribution[i+1][j] = sum(pairwise_marginal[i,:,j])
#for i in xrange(l+1):
# for j in char_ordering.keys():
# print str(j) + " " + str(marginal_distribution[i][char_ordering[j]]) + " ",
# print
def predict_character(correct_word):
global correct_char
global total_char
#using marginal probability to predict character for a given state
predicted_word = ""
for i in xrange(0, len(marginal_distribution)):
index = argmax(array(marginal_distribution[i]).flatten())
for char, order in char_ordering.items():
if order==index:
predicted_word+=char
for j in xrange(0, len(predicted_word)):
if predicted_word[j] == correct_word[j]:
correct_char +=1
total_char += len(correct_word)
#print predicted_word
def average_loglikelihood(word):
global likelihood
for i in xrange(0, len(word)):
likelihood *= marginal_distribution[i][char_ordering[word[i]]]
def partition_function():
l = []
for i in xrange(0,10):
l.append(logsumexp(list(beliefs[0][i])))
return logsumexp(l)
def loglikelihood(word):
#compute energy
energy = 0.0
for i in xrange(0, len(word)-1):
energy+=clique_potential[i][char_ordering[word[i]]][char_ordering[word[i+1]]]
logZ = partition_function()
return energy - logZ
def sumProduct(fname, word):
compute_node_potential(fname)
compute_clique_potential(fname, word)
sumproduct_message()
logbeliefs()
marginal_probability()
predict_character(word)
def load_weights(wgts):
global TP
global FP
i = 0
for c in range(10):
for cprime in range(10):
TP[c][cprime] = wgts[i]
i += 1
for c in range(10):
for f in range(321):
FP[c][f] = wgts[i]
i += 1
def objective_function(weights):
global TP
global FP
TP = weights[0:n*n].reshape([n, n])
FP = weights[n*n:].reshape([n, feat_size])
#load_weights(init_weights)
count =1
likelihood = 0.0
for word in open("../2A/data/train_words.txt", "r"):
sumProduct("../2A/data/train_img"+str(count)+".txt" , str(word.strip('\n')))
likelihood += loglikelihood(str(word.strip('\n')))
count+=1
if count == num_words+1:
break
avg_likelihood = -likelihood/float(num_words)
return avg_likelihood
def gradient_function(weights):
global TP
global FP
TP = weights[0:n*n].reshape([n, n])
FP = weights[n*n:].reshape([n, feat_size])
gradient_feat = zeros([10, feat_size])
gradient_trans = zeros([10, 10])
count = 1
for words in open("../2A/data/train_words.txt", "r"):
word = str(words.strip('\n'))
sumProduct("../2A/data/train_img"+str(count)+".txt" , word)
#for transition distribution
for i in xrange(0, size-1):
label1 = char_ordering[word[i]]
label2 = char_ordering[word[i+1]]
gradient_trans[label1][label2] += 1
for label1 in xrange(0, 10):
for label2 in xrange(0, 10):
gradient_trans[label1][label2] -= pairwise_marginal[i][label1][label2]
#print "tansition gradient\n"
#print gradient_trans
#for marginal distribution
#print "len xij : " + str(word) + " " + str( len(xij))
for i in xrange(0, size):
label = char_ordering[word[i]]
for f in xrange(0, feat_size):
gradient_feat[label][f]+=xij[i][f]
for c in xrange(0, 10):
gradient_feat[c][f] -= marginal_distribution[i][c]*xij[i][f]
count+=1
if count == num_words+1:
break
gradient_feat = concatenate(gradient_feat, axis=1)
gradient_trans = concatenate(gradient_trans, axis=1)
print -concatenate([gradient_trans, gradient_feat], axis=1)/float(num_words)
return -concatenate([gradient_trans, gradient_feat], axis=1)/float(num_words)
def output_result(result):
fd = open("result", "a")
for val in result:
fd.write(str(val) + " ")
fd.flush()
fd.close()
def main():
t0 = time.clock()
global num_words
wgts = open("result", "r").readline().split()
load_weights(wgts)
count = 1
for word in open("../2A/data/test_words.txt", "r"):
sumProduct("../2A/data/test_img"+str(count)+".txt" , str(word.strip('\n')))
count+=1
print correct_char
print total_char
if __name__ == "__main__":
main() #uncomment this function if you want to see results for Question 3.5
|
[
"ashish.iiith@gmail.com"
] |
ashish.iiith@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.