blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1f125f92f62b4c5888b38d7319cd74ce3606f18d | 63c5275249e536a327bc2914cc96775eb746ffbe | /day13_part2.py | 5c2f005311e0c25244b671a746ddd94f4bb0b051 | [
"MIT"
] | permissive | alu-/advent-of-code-2019 | 0e3df3015bd528d20407bb85535de1cb1679940e | 117fc49cb6e89e6302cfbd64b1c3d58f21b47378 | refs/heads/master | 2020-09-22T13:46:39.322828 | 2019-12-22T02:19:22 | 2019-12-22T02:19:22 | 225,225,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | #!/usr/bin/env python3
from intcode.machine import Machine
from intcode.screen import Screen
def main():
with open("./inputs/day13.txt") as file:
raw_input = file.read()
raw_input = "2{}".format(raw_input[1:])
vm = Machine(raw_input)
screen = Screen(vm)
screen.run(interactive=True)
if __name__ == '__main__':
main()
| [
"alu@pad"
] | alu@pad |
4a086ff2b0cdebd555074cf5e647bb77e25ed7d5 | 0f19ec42bbee96db42a6973d99d679fa33d7aba1 | /Chapter03/Exercise3.05/Test3_05.py | 0812c195cb806f245289f2f397e31779311b9840 | [
"MIT"
] | permissive | a1ip/The-Data-Science-Workshop | ab3aa67c24daac18cbccbf9dc96b5436e46e1754 | 5992a4d14f7ea06c8815e395d57a9941bac47254 | refs/heads/master | 2022-12-18T08:19:04.550755 | 2020-09-24T04:10:38 | 2020-09-24T04:10:38 | 286,596,069 | 1 | 0 | MIT | 2020-09-03T00:40:51 | 2020-08-10T23:05:29 | Jupyter Notebook | UTF-8 | Python | false | false | 1,040 | py | import unittest
import import_ipynb
import pandas as pd
import pandas.testing as pd_testing
from sklearn import preprocessing
from matplotlib import pyplot
class Test(unittest.TestCase):
def setUp(self):
import Exercise3_05
self.exercises = Exercise3_05
self.file_url = 'https://raw.githubusercontent.com/PacktWorkshops/The-Data-Science-Workshop/master/Chapter03/bank-full.csv'
self.bankData = pd.read_csv(self.file_url, sep=";")
self.bankNumeric = self.bankData[['age','balance','day','duration','campaign','pdays','previous']]
self.bankCorr = self.bankNumeric.corr(method = 'pearson')
def test_file_url(self):
self.assertEqual(self.exercises.file_url, self.file_url)
def test_df(self):
pd_testing.assert_frame_equal(self.exercises.bankData, self.bankData)
def test_corr(self):
pd_testing.assert_frame_equal(self.exercises.bankCorr,self.bankCorr)
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | a1ip.noreply@github.com |
cb0b6b7036d3690b73505aa3107529ed3a66ec74 | c1eefd525582b5ec6eb02fd0b11ccd4a6fde29bb | /sample_programs/python/1_HelloWorld/hello.py | 3e1503e7e4231d1403aa97bd190f12a202624e23 | [] | no_license | igoramidzic/robotics-projects | 7e7f63a998f86ab6625bf47e37bd7492b32fd7c4 | 0ae1a2b29999ac728d586518b2571010248b681a | refs/heads/master | 2020-12-19T05:43:52.776490 | 2020-04-02T18:52:39 | 2020-04-02T18:52:39 | 235,636,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | # This program prints hello world to the console every 500 milliseconds.
# Open the folder that contains this file.
# Then click Tools > Open current folder in terminal.
# Enter the command "python3 hello.py" to execute it.
# You can use the app named Thonny to modify Python files,
# but we recommend using the above command to execute it.
import time
print("Press Ctrl+C to exit.")
while True:
print("Hello world!")
time.sleep(0.5)
| [
"amidzicigor46@gmail.co"
] | amidzicigor46@gmail.co |
4f6cd196808b377db603151bb34d561d3be9887c | 4efd76755354b0b6e7684e0773df87d0f4fa7c5a | /asite/settings/base.py | a518894aaca4f87460fdcc8568db57259eeabf73 | [] | no_license | peeterss/asite | 301e59af070bf0fbbdb9914f35ca54693cc55db6 | d50a3a14f10e61866344980d321eb104677585f8 | refs/heads/master | 2020-07-19T05:15:57.171245 | 2019-09-08T15:15:14 | 2019-09-08T15:15:14 | 206,380,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,312 | py | """
Django settings for asite project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
'home',
'search',
'blog',
'wagtail.contrib.forms',
'wagtail.contrib.redirects',
'wagtail.embeds',
'wagtail.sites',
'wagtail.users',
'wagtail.snippets',
'wagtail.documents',
'wagtail.images',
'wagtail.search',
'wagtail.admin',
'wagtail.core',
'modelcluster',
'taggit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'wagtail.core.middleware.SiteMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
]
ROOT_URLCONF = 'asite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'asite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, 'static'),
]
# ManifestStaticFilesStorage is recommended in production, to prevent outdated
# Javascript / CSS assets being served from cache (e.g. after a Wagtail upgrade).
# See https://docs.djangoproject.com/en/2.2/ref/contrib/staticfiles/#manifeststaticfilesstorage
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Wagtail settings
WAGTAIL_SITE_NAME = "asite"
# Base URL to use when referring to full URLs within the Wagtail admin backend -
# e.g. in notification emails. Don't include '/admin' or a trailing slash
BASE_URL = 'http://example.com'
| [
"peeterss83@gmail.com"
] | peeterss83@gmail.com |
f263a6183283d1f869704eda248b45826d33de58 | 3fd3da4f11a251cc43d44d1d61ff2ffe5c82a4ce | /零起点Tensorflow快速入门tf_demo/ty04_mnist_dae.py | b071fb373d15c86bd361c5e55a70e194e2361ef9 | [] | no_license | dumpinfo/TsBook | d95faded917bce3e024e77ff06afd30717ed9ef4 | 8fadfcd2ebf935cd49784fd27d66b2fd9f307fbd | refs/heads/master | 2023-05-27T07:56:24.149421 | 2019-07-31T20:51:52 | 2019-07-31T20:51:52 | 198,481,031 | 1 | 3 | null | 2023-05-22T21:13:31 | 2019-07-23T17:47:19 | Jupyter Notebook | UTF-8 | Python | false | false | 5,205 | py | #coding=utf-8
'''
Created on 2017.03.03
极宽版·深度学习·案例
摘自·极宽深度学习·系列培训课件
@ www.TopQuant.vip www.ziwang.com
Top极宽量化开源团队
简单的MNIST手写字体识别案例
使用TensorLayer简化接口
@from:
http://tensorlayer.org
'''
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import set_keep
import time,os
#------------------------------------------
#1
print('\n#1,Session')
#rlog='/ailib/log_tmp'
rlog='logs/'
if os.path.exists(rlog):tf.gfile.DeleteRecursively(rlog)
sess = tf.InteractiveSession()
#2
print('\n#2,set.dat')
X_train, y_train, X_val, y_val, X_test, y_test = \
tl.files.load_mnist_dataset(shape=(-1,784), path="data/mnist/")
X_train = np.asarray(X_train, dtype=np.float32)
y_train = np.asarray(y_train, dtype=np.int32)
X_val = np.asarray(X_val, dtype=np.float32)
y_val = np.asarray(y_val, dtype=np.int32)
X_test = np.asarray(X_test, dtype=np.float32)
y_test = np.asarray(y_test, dtype=np.int32)
print('X_train.shape', X_train.shape)
print('y_train.shape', y_train.shape)
print('X_val.shape', X_val.shape)
print('y_val.shape', y_val.shape)
print('X_test.shape', X_test.shape)
print('y_test.shape', y_test.shape)
print('X %s y %s' % (X_test.dtype, y_test.dtype))
#3
print('\n#3,定义placeholder占位符参数')
# placeholder
x = tf.placeholder(tf.float32, shape=[None, 784], name='x')
y_ = tf.placeholder(tf.int64, shape=[None, ], name='y_')
#4
print('\n#4,构建神经网络算法模型')
network = tl.layers.InputLayer(x, name='input')
network = tl.layers.DropoutLayer(network, keep=0.5, name='denoising1') # if drop some inputs, it is denoise AE
network = tl.layers.DenseLayer(network, n_units=196,act = tf.nn.relu, name='relu1')
recon_layer1 = tl.layers.ReconLayer(network, x_recon=x, n_units=784,act = tf.nn.softplus, name='recon_layer1')
#5
print('\n#5,# 打印神经网络各层的属性参数')
attrs = vars(network)
print(', '.join("%s: %s\n" % item for item in attrs.items()))
print('\nnetwork.all_drop')
print(network.all_drop) # {'drop1': 0.8, 'drop2': 0.5, 'drop3': 0.5}
print('\nnetwork.all_layers')
print(network.all_layers)
#6
print('\n#6,# 定义cost损失函数和衡量指标,SOFTMAX多项式回归模型,使用的是tl模块内置函数,以提高速度')
y = network.outputs
cost = tl.cost.cross_entropy(y, y_, name='xentropy')
correct_prediction = tf.equal(tf.argmax(y, 1), y_)
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
y_op = tf.argmax(tf.nn.softmax(y), 1)
#7
print('\n#7,优化cost代价函数')
# You can add more penalty to the cost function as follow.
# cost = cost + tl.cost.maxnorm_regularizer(1.0)(network.all_params[0]) + tl.cost.maxnorm_regularizer(1.0)(network.all_params[2])
# cost = cost + tl.cost.lo_regularizer(0.0001)(network.all_params[0]) + tl.cost.lo_regularizer(0.0001)(network.all_params[2])
# cost = cost + tl.cost.maxnorm_o_regularizer(0.001)(network.all_params[0]) + tl.cost.maxnorm_o_regularizer(0.001)(network.all_params[2])
#8
print('\n#8,初始化全部变量参数,定义Optimizer优化函数')
params = network.all_params
# train
n_epoch = 5 #100
batch_size = 128
learning_rate = 0.0001
print_freq = 5
train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999,
epsilon=1e-08, use_locking=False).minimize(cost)
tl.layers.initialize_global_variables(sess)
#9
print('\n#9.1,输出网络模型变量参数')
network.print_params()
#
print('\n#9.2,输出网络模型参数')
network.print_layers()
#
print('\n#9.3,其他参数')
print(' learning_rate: %f' % learning_rate)
print(' batch_size: %d' % batch_size)
#10
print('\n#10,迭代模式,训练模型')
tl.utils.fit(sess, network, train_op, cost, X_train, y_train, x, y_,
acc=acc, batch_size=batch_size, n_epoch=n_epoch, print_freq=1,
X_val=X_val, y_val=y_val, eval_train=False,
tensorboard =True,tensorboard_weight_histograms=True,tensorboard_graph_vis=True)
#11
print('\n#11,评估模型训练效果')
test_loss, test_acc, n_batch = 0, 0, 0
for X_test_a, y_test_a in tl.iterate.minibatches(
X_test, y_test, batch_size, shuffle=True):
dp_dict = tl.utils.dict_to_one( network.all_drop ) # disable noise layers
feed_dict = {x: X_test_a, y_: y_test_a}
feed_dict.update(dp_dict)
err, ac = sess.run([cost, acc], feed_dict=feed_dict)
test_loss += err; test_acc += ac; n_batch += 1
print(" test loss: %f" % (test_loss/n_batch))
print(" test acc: %f" % (test_acc/n_batch))
#12
print('\n#12,保存训练好的模型数据')
saver = tf.train.Saver()
save_path = saver.save(sess, "tmp/model.ckpt")
print("Model saved in file: %s" % save_path)
# You can also save the parameters into .npz file.
tl.files.save_npz(network.all_params , name='tmp/model.npz')
# You can only save one parameter as follow.
# tl.files.save_npz([network.all_params[0]] , name='model.npz')
# Then, restore the parameters as follow.
# load_params = tl.files.load_npz(path='', name='model.npz')
# tl.files.assign_params(sess, load_params, network)
#13
print('\n#13,Session.close')
sess.close()
| [
"twtravel@126.com"
] | twtravel@126.com |
0bec3f3d2f42ec3c4c5aaeb2e995a95f0fa7f9d7 | 3bbce1e8c88fe001ce17aa5c7a1d59f190f2fa97 | /weibo/server/analysis_weibo.py | 4da07c668929c3c2159f21683eddc5e70402b6d8 | [] | no_license | flysky1991/weibo | 2e975bc95d7c0b2f5b0f9e34f412c2b561a4132d | 4c2b1a83c014d927939b1119086163cab6004e3c | refs/heads/master | 2021-01-11T14:09:05.766341 | 2016-04-11T13:48:08 | 2016-04-11T13:48:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | # -*- coding: UTF-8 -*-
import zerorpc
import jieba.analyse
import jieba
import sys
class AnalysisWeiboRPC(object):
def __init__(self):
jieba.set_dictionary('../dict/dict.txt.big')
jieba.analyse.set_stop_words('../dict/stop_words.txt')
jieba.initialize()
def ExtractWeiboKeywords(self, content):
return jieba.analyse.extract_tags(content, topK=50, withWeight=True, allowPOS=('ns', 'n', 'vn', 'v'))
def ceshi(self):
return "test"
if __name__ == '__main__':
s = zerorpc.Server(AnalysisWeiboRPC())
s.bind("tcp://0.0.0.0:4243")
s.run()
| [
"763687347@qq.com"
] | 763687347@qq.com |
c2d60a6202cd75d38d67da54d59570178aad0398 | a9659f882c96507786f0a898c2ca8599a684ac8a | /k101cmap2.py | 168476cc1c1b36e1b77d79446034fb6aed9f1fd0 | [] | no_license | equalll/zk10 | f2bbad2353cc0225a2761bf4170e4831372d35a2 | dc52edc3b0aa34d68ca51141a6a8033c5c8acd91 | refs/heads/master | 2021-06-28T13:57:17.568075 | 2017-09-19T14:21:40 | 2017-09-19T14:21:40 | 104,084,868 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib as mpl
# =======================
mpl.style.use('seaborn-whitegrid');
def sta001(k,nyear,xd):
d2=np.fv(k,nyear,-xd,-xd);
d2=round(d2)
return d2
# =======================
dx05=[sta001(0.05,x,1.4) for x in range(0,40)]
dx10=[sta001(0.10,x,1.4) for x in range(0,40)]
dx15=[sta001(0.15,x,1.4) for x in range(0,40)]
dx20=[sta001(0.20,x,1.4) for x in range(0,40)]
df=pd.DataFrame(columns=['dx05','dx10','dx15','dx20']);
df['dx05']=dx05;df['dx10']=dx10;
df['dx15']=dx15;df['dx20']=dx20;
print(df.tail())
df.plot(colormap='xss')
#df.plot(colormap='hot')
| [
"zxcv@zxcvdeAir.lan"
] | zxcv@zxcvdeAir.lan |
ecf0b538759f7f47899d95133baf31ffefb74016 | 1e87dc43f6b5d8a36dca8a34593616e8d44d3995 | /nighthawk_web/nighthawk/settings.py | c2d9cfe36c3268d09a806f7a013b8038f24bd4cb | [] | no_license | nguyenducnhaty/nightHawkResponse | 05de49847af24711d6ddd6f5da31252891cd837f | ff5d9310896944b39c9382ed7fb32d08445fc7d2 | refs/heads/master | 2021-01-24T21:41:10.557707 | 2016-07-18T11:21:32 | 2016-07-18T11:21:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,602 | py | """
Django settings for brstriage project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sga@u9^8uc%7cs)nb6sr=y#u=cpph3@p=*=l5))8grywge-j5('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'nighthawk.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR + '/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'nighthawk.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/opt/nighthawk/var/db/db.sqlite3'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
STATIC_URL = '/static/'
MEDIA_DIR = '/opt/nighthawk/var/media'
NIGHTHAWK_GO = '/opt/nighthawk/bin'
LOGGING_DIR = '/opt/nighthawk/var/log'
## Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S"
},
},
'handlers': {
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'logfile': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': LOGGING_DIR + "/nighthawk_django.log",
'maxBytes': 50000,
'backupCount': 2,
'formatter': 'standard',
},
'console':{
'level':'INFO',
'class':'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'django': {
'handlers':['console'],
'propagate': True,
'level':'WARN',
},
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'nighthawk': {
'handlers': ['console', 'logfile'],
'level': 'DEBUG',
},
}
}
| [
"biggiesmalls@biggiesmalls.local"
] | biggiesmalls@biggiesmalls.local |
634a2453a9b4b2a8fc54994b877044ddb9b0b990 | b5dca50903ef654f14ddc0b61a1a3d47caa2b3b9 | /idlhelp2txt.py | 0dc82e5ae507cad39abe36681bbd38410b9e77b6 | [] | no_license | mgalloy/ridl | e58a3389ec338f0dd9851a05eeb02cdff60621bd | d846bbf3541095433d9d64514d5a9e62bacc1aea | refs/heads/master | 2021-01-21T21:55:35.754220 | 2017-11-17T17:48:27 | 2017-11-17T17:48:27 | 6,301,833 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,547 | py | #!/usr/bin/env python
from HTMLParser import HTMLParser
from optparse import OptionParser
import os
class IDLHelpParser(HTMLParser):
def handle_charref(self, name):
if (name == "160"):
self.current_text = self.current_text + " "
def handle_data(self, data):
# compress whitespace
stripped_data = " ".join(data.split())
# exit if nothing left
if (len(stripped_data) == 0):
return
# keep a single space at beginning or end if they were there to start with
if (data[0] != stripped_data[0]):
stripped_data = " " + stripped_data
if (data[-1] != stripped_data[-1]):
stripped_data = stripped_data + " "
# append to current text
self.current_text = self.current_text + stripped_data
def handle_starttag(self, tag, attrs):
if (tag == "p"):
self.current_text = ""
# assume the paragraph is a normal paragraph
self.type = "plain_para"
# hide if it has specific attributes
for a in attrs:
if (a[0] == "class" and a[1] == "Code"):
self.type = "code_para"
self.last_was_code_para = True
if (a[0] == "class" and a[1] == "MCWebHelpFramesetLink"):
self.type = "hide_para"
if (a[0] == "MadCap:conditions" and a[1] == "Reference Material.Online_Help_Only"):
self.type = "hide_para"
if (a[0] == "class" and a[1] == "HideSearchTerms"):
self.type = "hide_para"
if (tag == "h1" or tag == "h2" or tag == "h3"):
self.current_text = ""
self.type = "header"
if (tag == "img"):
self.current_text = "[image]"
self.type = "image"
if (tag == "br"):
self.current_text = self.current_text + "\n"
def handle_endtag(self, tag):
if self.last_was_code_para:
extra_line = "\n"
else:
extra_line = ""
if (tag in self.para_tags):
self.last_was_code_para = False
if (tag == "p" and self.type == "plain_para"):
if (len(self.current_text) > 0):
self.output = self.output + extra_line + self.current_text + "\n\n"
if (tag == "p" and self.type == "code_para"):
if (len(self.current_text) > 0):
self.output = self.output + " " + self.current_text + "\n"
self.last_was_code_para = True
if (tag == "h1" or tag == "h2" or tag == "h3"):
if (tag == "h1"): char = "="
if (tag == "h2"): char = "-"
if (tag == "h3"): char = "~"
if (tag == "h1"):
self.output = self.output + extra_line + "".join([char for x in range(len(self.current_text))]) + "\n"
extra_line = ""
self.output = self.output + extra_line + self.current_text + "\n"
self.output = self.output + "".join([char for x in range(len(self.current_text))]) + "\n"
if (tag == "img"):
self.output = self.output + extra_line + self.current_text + "\n\n"
def reset(self):
HTMLParser.reset(self)
self.current_text = ""
self.type = ""
self.output = ""
self.last_was_code_para = False
def __init__(self):
HTMLParser.__init__(self)
self.para_tags = ["p", "img", "h1", "h2", "h3"]
self.reset()
def idlhelp2txt_file(filename):
f = open(filename, "r")
lines = f.read()
f.close()
lines = lines.replace('<![CDATA[ ]]>', '')
h = IDLHelpParser()
h.feed(lines)
return(h.output)
# idlhelp2txt.py helpdir outputdir
def main():
parser = OptionParser()
(options, args) = parser.parse_args()
helpdir = os.path.normpath(args[0])
outputdir = os.path.normpath(args[1])
# make output directory if not already present
if not os.path.isdir(outputdir):
os.mkdir(outputdir)
for dirpath, subdirs, files in os.walk(helpdir):
print 'Processing directory: %s' % dirpath
# find and create output path if it doesn't already exist
outputpath = outputdir + dirpath[len(helpdir):]
if not os.path.isdir(outputpath):
os.mkdir(outputpath)
# filter out anything not an .html file
files = [f for f in files if f[-4:] == "html"]
for basename in files:
filename = os.path.join(dirpath, basename)
outputname = os.path.join(outputpath, basename)
(outputname, ext) = os.path.splitext(outputname)
outputname = outputname + '.txt'
output = idlhelp2txt_file(filename)
f = open(outputname, "w")
f.write(output)
f.close()
if __name__ == "__main__":
main()
| [
"mgalloy@6cd51d45-9568-0410-80bb-ac2a7bf28483"
] | mgalloy@6cd51d45-9568-0410-80bb-ac2a7bf28483 |
3d2cd1586ff2852fb05d9036828797e26eb6b87d | 79d2bb228a15efaa9d793ecd80cdfa7a9a09f44c | /Assignment8/node_modules/mongoose/node_modules/mongodb/node_modules/mongodb-core/node_modules/kerberos/build/config.gypi | 442873c02fb3e7e796be9b43f3e64c27469c91bf | [
"Apache-2.0",
"MIT"
] | permissive | eduardorojas/LearningWebAppDev | 39b8c23aa20983e2fd0a011a488c0d26662b1465 | 2647b707214a5e5f940d6a0b58d7658955aa6bb9 | refs/heads/master | 2021-01-18T09:23:07.919472 | 2015-05-07T06:37:03 | 2015-05-07T06:37:03 | 30,628,663 | 0 | 0 | null | 2015-02-11T03:29:00 | 2015-02-11T03:29:00 | null | UTF-8 | Python | false | false | 3,377 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 48,
"host_arch": "x64",
"icu_small": "false",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_mdb": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "x64",
"uv_library": "static_library",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "/home/eduardo/.node-gyp/0.12.0",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"sign_git_tag": "",
"user_agent": "npm/2.5.1 node/v0.12.0 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"init_version": "1.0.0",
"user": "",
"force": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"cache_max": "Infinity",
"userconfig": "/home/eduardo/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"cafile": "",
"https_proxy": "",
"onload_script": "",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"prefix": "/usr/local",
"browser": "",
"cache_lock_wait": "10000",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"scope": "",
"searchopts": "",
"versions": "",
"cache": "/home/eduardo/.npm",
"ignore_scripts": "",
"searchsort": "name",
"version": "",
"local_address": "",
"viewer": "man",
"color": "true",
"fetch_retry_mintimeout": "10000",
"umask": "0022",
"fetch_retry_maxtimeout": "60000",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"save": "",
"unicode": "true",
"long": "",
"production": "",
"unsafe_perm": "true",
"node_version": "0.12.0",
"tag": "latest",
"git_tag_version": "true",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"npat": "",
"proprietary_attribs": "true",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/usr/local/etc/npmrc",
"init_module": "/home/eduardo/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/local/etc/npmignore",
"cache_lock_retries": "10",
"save_prefix": "^",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": "",
"spin": "true"
}
}
| [
"eduardorojas@csu.fullerton.edu"
] | eduardorojas@csu.fullerton.edu |
ad83fdab6ab11cbf459e1859825e5ed321ea77dd | c6f432fe7dc88969c2a7bb4f9cf11794902a3ce7 | /WEEK 6/newsmedia/newsmedia/pipelines.py | 190f67dc57a2e1cb9801bc22f241aae51520f8c9 | [] | no_license | muhammadsyafr/progress-internship | 6b6073ef50c6a3bd59d2c606d2e8022c87718dea | 7e3f6d8837b6372c301b7cb765bab97acf757a91 | refs/heads/master | 2023-02-11T11:33:22.549456 | 2021-01-07T09:46:05 | 2021-01-07T09:46:05 | 297,983,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,532 | py | from itemadapter import ItemAdapter
import mysql.connector
class NewsMediaPipeline:
def __init__(self):
self.create_connection()
self.create_table()
def create_connection(self):
self.conn = mysql.connector.connect(
host = 'localhost',
user = 'root',
passwd = '',
database = 'scrapy_kompas1'
)
self.curr = self.conn.cursor()
def create_table(self):
self.curr.execute("""DROP TABLE IF EXISTS tbl_news""")
self.curr.execute("""create table tbl_news(
id int,
title text,
url text,
tanggal text
)""")
self.curr.execute("""ALTER TABLE `tbl_news` CHANGE `id` `id` INT(11) NOT NULL AUTO_INCREMENT, add PRIMARY KEY (`id`)""")
self.curr.execute("""DROP TABLE IF EXISTS tbl_news_detail""")
self.curr.execute("""create table tbl_news_detail(
id int,
id_detail_news int,
title text,
img_url text,
time text,
categories text,
tags text,
content text
)""")
self.curr.execute("""ALTER TABLE `tbl_news_detail` CHANGE `id` `id` INT(11) NOT NULL AUTO_INCREMENT, add PRIMARY KEY (`id`)""")
# self.curr.execute("""ALTER TABLE `tbl_news` ADD CONSTRAINT `relation_news` FOREIGN KEY (`id`) REFERENCES `tbl_news_detail`(`id`) ON DELETE RESTRICT ON UPDATE RESTRICT""")
def process_item(self, item, spider):
self.store_db(item)
return item
def store_db(self, item):
# self.curr.execute("""insert into tbl_news values (%s, %s, %s, %s)""", (
self.curr.execute("""INSERT into tbl_news(title, url, tanggal) VALUES(%s, %s, %s)""", (
item['title'][0],
item['link_url'],
item['time'],
))
# self.curr.execute("""insert into tbl_news_detail values (%s, %s, %s, %s, %s, %s, %s)""", (
self.curr.execute("""INSERT into tbl_news_detail(title, img_url, time, categories, tags, content) VALUES(%s, %s, %s, %s, %s, %s)""", (
item['title'][0],
item['img'],
item['time'],
item['categories'],
item['tags'],
item['content'],
))
self.conn.commit()
# SQL TO CREATE FOREIGN KEY TBL NEWS DETAIL
# ALTER TABLE `tbl_news` ADD CONSTRAINT `relation_news` FOREIGN KEY (`id`) REFERENCES `tbl_news_detail`(`id`) ON DELETE RESTRICT ON UPDATE RESTRICT
| [
"muhammadsyafr@gmail.com"
] | muhammadsyafr@gmail.com |
e3eca69358ba36f71ba1c9d09d6d5fe37180ae93 | 363283660da27c4f8858baf005df2a6953eee414 | /plugin/relationships/depends_on_setting.py | 7c881a1d618dadb6652fb055d97993cc7ec33e0a | [
"Apache-2.0"
] | permissive | MSO4SC/cloudify-im-extension | 2b543e8f86fe1e4be8f33a291edd18ffc87f1c27 | b8e6dfeb9a7902a38f602735780390a256fb72b7 | refs/heads/master | 2020-03-29T16:00:37.953408 | 2018-11-14T15:13:07 | 2018-11-14T15:13:07 | 150,092,366 | 1 | 1 | Apache-2.0 | 2018-11-22T16:11:07 | 2018-09-24T11:27:20 | Python | UTF-8 | Python | false | false | 1,381 | py | from cloudify import ctx
from cloudify.state import ctx_parameters as inputs
from cloudify.decorators import operation
from cloudify.exceptions import *
from plugin.relationships.utils import *
@operation
def preconfigure(config, simulate, **kwargs):
if (not simulate):
reset_log_indentation()
ctx.logger.debug('{0} Preconfigure operation: Begin'.format(get_log_indentation()))
increase_log_indentation()
target_radl = get_child(dictionary=ctx.target.instance.runtime_properties, key='settings', required=True)
ctx.logger.debug('{0} RADL: {1}'.format(get_log_indentation(), str(target_radl)))
if target_radl:
source_radl = get_child(ctx.source.instance.runtime_properties, key='settings')
if not source_radl or not isinstance(source_radl, dict):
source_radl = create_child(ctx.source.instance.runtime_properties, key='settings', value={})
ctx.logger.debug('{0} Copy partial RADL from target to source:'.format(get_log_indentation()))
increase_log_indentation()
for key in target_radl:
partial_source_radl = create_child(source_radl, key=key, value=target_radl[key])
decrease_log_indentation()
decrease_log_indentation()
ctx.logger.debug('{0} Preconfigure operation: End'.format(get_log_indentation()))
| [
"victorsv@gmail.com"
] | victorsv@gmail.com |
ba0dc0c1dd56f8ee1b451ab59c07c29c3a801095 | 2d54ab7a1e829f89b554d6abc27527fdb38539ff | /inform/migrations/0004_informs.py | 20c7996dc57f36f4fd18bc30a56bf45231e75bbf | [] | no_license | zhtjtcz/Software-Backend | 1c3c73d8863d0d0df9cdfa08e4900f878127ed6c | ca865f1fe75493098050b236634f776f7b97d04d | refs/heads/main | 2023-06-07T06:28:05.345830 | 2021-06-17T16:30:47 | 2021-06-17T16:30:47 | 367,622,524 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | # Generated by Django 3.2 on 2021-06-13 11:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inform', '0003_rename_infromid_score_informid'),
]
operations = [
migrations.CreateModel(
name='Informs',
fields=[
('ID', models.IntegerField(primary_key=True, serialize=False)),
('type', models.IntegerField()),
('name', models.CharField(max_length=100)),
('userid', models.IntegerField()),
('text', models.CharField(max_length=100)),
('isread', models.BooleanField()),
('score', models.BooleanField()),
('date', models.CharField(max_length=100)),
('goodid', models.IntegerField()),
('demandid', models.IntegerField()),
],
),
]
| [
"18377221@buaa.edu.cn"
] | 18377221@buaa.edu.cn |
c1b8cb4afb317bb4a3ff1178eb82bc5bd069cdda | 8020d31217cf69ebf567b6d3f33185eb708ef64d | /Author disambiguation/author_disambiguation6.py | 4d5d09efa915635f3850c0040c6700553bdc8add | [] | no_license | myusernameforg/Codes-for-Web-of-Science-data | ed2e3d779988133b553b5b00bf0c37e695639ab6 | fc15148d6f4d353b038a57132a1375dc63b06ba7 | refs/heads/master | 2022-01-24T04:00:22.868927 | 2018-04-17T01:51:58 | 2018-04-17T01:51:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,611 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 12 16:24:32 2017
@description:
Author disambiguation use the name of authors.
Searching if the two cited each other at least one.
@author: Lanry Fan
"""
import pymysql
import cited_eachother
db=pymysql.connect('localhost','user','psw','dbname')
#Step-1:Initial [aid_raw5] use [aid_raw4]
cursor_init=db.cursor()
sql_init='update author_init set aid_raw9=aid_raw8'
try:
print('正在初始化[aid_raw9]...')
cursor_init.execute(sql_init)
db.commit()
except Exception as e0:
print('e0:',e0)
db.rollback()
print('初始化完成...')
#step-2:Get lname list from [author_init]
cursor=db.cursor()
sql='select count(xuhao),lname from author_init group by lname'
#用[fname,mname,fname_initial,mname_initial]进行合作者匹配
data=[]
counter=0
try:
cursor.execute(sql)
rs1=cursor.fetchall()
for row in rs1:
print('正在处理第',counter+1,'条姓名数据...')
number=row[0]
lname=row[1]
#Step-3:Get the name information list use the lname
cursor_disab=db.cursor()
sql_disab='select xuhao,aid_raw9,fname,mname,fname_initial,mname_initial\
from author_init where lname="%s"' %(lname)
try:
cursor_disab.execute(sql_disab)
rs2=cursor_disab.fetchall()
lname=lname.replace('||','').strip().lower()
data_disab=[]
xuhao=0;aid=0;fname='';mname=''
fname_ini='';mname_ini=''
for row2 in rs2:
xuhao=row2[0]
aid=row2[1]
fname=row2[2]
mname=row2[3]
fname_ini=row2[4]
mname_ini=row2[5]
if fname is None:
fname=''
if mname is None:
mname=''
if fname_ini is None:
fname_ini=''
if mname_ini is None:
mname_ini=''
removestr=['||','/','\\','[',']','"',"'"]
for rmstr in removestr:
fname=fname.replace(rmstr,'')
mname=mname.replace(rmstr,'')
fname_ini=fname_ini.replace(rmstr,'')
mname_ini=mname_ini.replace(rmstr,'')
fname=fname.strip()
mname=mname.strip()
fname_ini=fname_ini.strip()
mname_ini=mname_ini.strip()
#The last para of data_disab is the tag whether this record has changed{0:no,1:yes}
#The [-2]para of data_disab is the raw aid of author
data_disab.append([xuhao,aid,fname,mname,fname_ini,mname_ini,aid,0])
length=len(data_disab)
#Step-4:Author disambiguation use the cited references
#More than one record
if length>1:
for i in range(length-1):
for j in range(i+1,length):
#Only if the two have different aid,then decide disambiguation
if data_disab[i][1]!=data_disab[j][1]:
#If the two both have the [fname]
if data_disab[i][2]!='' and data_disab[j][2]!='':
#If the two both have the [mname],use[fname,mname]
if data_disab[i][3]!='' and data_disab[j][3]!='':
#If two [fname,mname] are the same
if data_disab[i][2]==data_disab[j][2] and data_disab[i][3]==data_disab[j][3]:
#IF two author have cited each other at least one,decide them as one author
mycocitation=cited_eachother.co_citation()
tag=mycocitation.is_co_citation(data_disab[i][6],data_disab[j][6])
if tag:
aid1=data_disab[i][1]
aid2=data_disab[j][1]
minaid=min(aid1,aid2)
for item in data_disab:
if item[1]==aid1 or item[1]==aid2:
item[1]=minaid
item[7]=1#Tag changed
else:
pass
else:
pass
#Either A or B haven't the [mname],use[fname]
else:
#If two [fname] are the same
if data_disab[i][2]==data_disab[j][2]:
#IF two author have cited each other at least one,decide them as one author
mycocitation=cited_eachother.co_citation()
tag=mycocitation.is_co_citation(data_disab[i][6],data_disab[j][6])
if tag:
aid1=data_disab[i][1]
aid2=data_disab[j][1]
minaid=min(aid1,aid2)
for item in data_disab:
if item[1]==aid1 or item[1]==aid2:
item[1]=minaid
item[7]=1#Tag changed
else:
pass
else:
pass
#Either A or B haven't the [fname]
else:
#If the two both have the [fname_ini]
if data_disab[i][4]!='' and data_disab[j][4]!='':
#If the two both have the [mname_ini],use[fname_ini,mname_ini]
if data_disab[i][5]!='' and data_disab[j][5]!='':
#If two [fname_ini,mname_ini] are the same
if data_disab[i][4]==data_disab[j][4] and data_disab[i][5]==data_disab[j][5]:
#IF two author have cited each other at least one,decide them as one author
mycocitation=cited_eachother.co_citation()
tag=mycocitation.is_co_citation(data_disab[i][6],data_disab[j][6])
if tag:
aid1=data_disab[i][1]
aid2=data_disab[j][1]
minaid=min(aid1,aid2)
for item in data_disab:
if item[1]==aid1 or item[1]==aid2:
item[1]=minaid
item[7]=1#Tag changed
else:
pass
else:
pass
#Either A or B haven't the [mname_ini],use[fname_ini]
else:
#If two [fname_ini] are the same
if data_disab[i][4]==data_disab[j][4]:
#IF two author have cited each other at least one,decide them as one author
mycocitation=cited_eachother.co_citation()
tag=mycocitation.is_co_citation(data_disab[i][6],data_disab[j][6])
if tag:
aid1=data_disab[i][1]
aid2=data_disab[j][1]
minaid=min(aid1,aid2)
for item in data_disab:
if item[1]==aid1 or item[1]==aid2:
item[1]=minaid
item[7]=1#Tag changed
else:
pass
else:
pass
#Either A or B haven't the [fname_ini],decide them as two individual authors
else:
pass
for row3 in data_disab:
if row3[7]==1:
data.append([row3[0],row3[1],row3[2],row3[3],row3[4],row3[5]])
#Only one record
else:
pass
except Exception as e1:
print('e1:',e1)
counter+=1
except Exception as e:
print('e:',e)
print('数据处理完成,等待写入数据库...')
#Step-5:Update data
length_data=len(data)
counter1=0
try:
for row in data:
cursor_update=db.cursor()
sql_update='update author_init set aid_raw9=%d\
where xuhao=%d' % (row[1],row[0])
try:
print('共',length_data,'条记录,正在插入第',counter1+1,'条记录...')
cursor_update.execute(sql_update)
db.commit()
except Exception as e2:
db.rollback()
print('e2:',e2)
counter1+=1
except Exception as e0:
print('e0:',e0)
print('第六轮匹配完成!')
db.close()
| [
"noreply@github.com"
] | myusernameforg.noreply@github.com |
32fa298487509f6a5acbaae51165cc28c565dd84 | b7e1c8199a850cfc0545553b29efe84967fdb3a0 | /BD Commission Reports Final.py | 67e2ce11fc0cfdba9d217ab781defd9452476f7f | [] | no_license | BBBhot/future-commission-reports | 7fd9c86125236351428ae8d33de054ab911a1d4f | 210aae292899d9d079a28ed6a316d2a588c0e64f | refs/heads/main | 2022-12-28T16:15:24.135925 | 2020-10-07T14:13:36 | 2020-10-07T14:13:36 | 302,057,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,019 | py | import pandas
import locale
import datetime
minDate = datetime.date(2020, 5, 24)
maxDate = datetime.date(2020, 5, 30)
locale.setlocale( locale.LC_ALL, 'en_US.UTF-8' )
def Date(temp):
month = int(temp[0:2])
day = int(temp[3:5])
year = int(temp[6:10])
a = datetime.date(year, month, day)
return a
def commAmount(df):
total = 0
for i in range(len(df)):
saleAmount = df.loc[i, 'Amount']
total += saleAmount
return total
def commRate(df):
arr = []
c = 0
for i in range(len(df)):
saleType = df.loc[i, 'Type']
bid = df.loc[i, 'BID']
if saleType == 'Rejected/Invalid Payment':
#c += 1
pass
else:
arr.append(bid)
arr2 = []
for i in arr:
if i in arr2:
pass
else:
arr2.append(i)
count = len(arr2)
if count <= 2:
rate = .35
elif count <= 4:
rate = .4
elif count <= 6:
rate = .45
elif count <= 8:
rate = .5
elif count == 9:
rate = .53
elif count >= 10:
rate = .55
else:
print('Illegal argument')
arr = []
#print(count, rate)
return rate
def manCommRate(df):
return .5
def seniorCommRate(df):
count = 0
for i in range(len(df)):
saleType = df.loc[i, 'Type']
if saleType == 'Rejected/Invalid Payment':
count -= 1
else:
count += 1
if count <= 2:
rate = .35
elif count <= 4:
rate = .4
elif count <= 6:
rate = .5
elif count <= 8:
rate = .55
elif count == 9:
rate = .6
elif count >= 10:
rate = .65
else:
print('Illegal argument')
return rate
def totalComm(amount, rate):
commission = amount * rate
return commission
data = pandas.read_csv('C:/Users/jkreuger/Downloads/PaymentReport (18).csv', error_bad_lines=False)
columnNames = ['BID', 'Invoice', 'Business', 'Joined', 'Billed', 'Item', 'Payment', 'Item Amt', 'Payment Date', 'Type', 'Notes', 'Sales', 'Retention Rep' ]
genPayReport = pandas.DataFrame(columns = columnNames)
testColumns = ['BID', 'Invoice']
testReport = pandas.DataFrame(columns = columnNames)
i = 0
j = 1
k = -1
for row in data.iterrows():
sales = data.loc[i, 'Sales']
if sales == ' ':
sales = sales.replace(' ', 'House')
bid = data.loc[i, 'BID']
invoice = data.loc[i, 'Invoice']
business = data.loc[i, 'Business']
joined = data.loc[i, 'Joined']
billed = data.loc[i, 'Billed']
item = data.loc[i, 'Item']
payment = data.loc[i, 'Payment']
itemAmt = data.loc[i, 'Item Amt']
payDate = data.loc[i, 'Payment Date']
type0 = data.loc[i, 'Type']
notes = data.loc[i, 'Notes']
#sales = data.loc[i, 'Sales']
retRep = data.loc[i, 'Retention Rep ']
#retRep = 0
testReport = testReport.append({'BID': bid, 'Invoice': invoice, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment': payment, 'Item Amt': itemAmt, 'Payment Date': payDate, 'Type': type0, 'Notes': notes, 'Sales': sales, 'Retention Rep': retRep}, ignore_index = True)
#genPayReport = genPayReport.append({'BID': bid, 'Invoice': invoice, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment': payment, 'Item Amt': itemAmt, 'Payment Date': payDate, 'Type': type0, 'Notes': notes, 'Sales': sales}, ingore_index = True)
i += 1
j += 1
k += 1
testReport.to_excel("C:/Users/jkreuger/Downloads/GenPayReport.xlsx")
#testReport['Sales'].fillna('House')
#testReport = testReport.sort_values(by=['Sales'])
sumColumns = ['BID', 'Business', 'Joined', 'Billed', 'Item', 'Payment Date', 'Type', 'Notes', 'Sales', 'Amount']
summaryGonzalez = pandas.DataFrame(columns = sumColumns)
summaryMckethan = pandas.DataFrame(columns = sumColumns)
summaryWest = pandas.DataFrame(columns = sumColumns)
summaryDeLaCruz = pandas.DataFrame(columns = sumColumns)
summaryBrown = pandas.DataFrame(columns = sumColumns)
summaryPellak = pandas.DataFrame(columns = sumColumns)
summaryMcCance = pandas.DataFrame(columns = sumColumns)
summaryMcAdams = pandas.DataFrame(columns = sumColumns)
summaryGSmith = pandas.DataFrame(columns = sumColumns)
summaryDudo = pandas.DataFrame(columns = sumColumns)
summaryVonVogt = pandas.DataFrame(columns = sumColumns)
summaryDawson = pandas.DataFrame(columns = sumColumns)
summaryAtkins = pandas.DataFrame(columns = sumColumns)
summaryJoffrion = pandas.DataFrame(columns = sumColumns)
summaryRifenburg = pandas.DataFrame(columns = sumColumns)
summaryAcosta = pandas.DataFrame(columns = sumColumns)
summaryFox = pandas.DataFrame(columns = sumColumns)
summaryYokom = pandas.DataFrame(columns = sumColumns)
summaryBononcini = pandas.DataFrame(columns = sumColumns)
summaryRobberson = pandas.DataFrame(columns = sumColumns)
summaryBacon = pandas.DataFrame(columns = sumColumns)
summaryChevere = pandas.DataFrame(columns = sumColumns)
summarySSmith = pandas.DataFrame(columns = sumColumns)
summaryLewis = pandas.DataFrame(columns = sumColumns)
summaryColeman = pandas.DataFrame(columns = sumColumns)
summaryOverton = pandas.DataFrame(columns = sumColumns)
summaryWagoner = pandas.DataFrame(columns = sumColumns)
summaryFerrigno = pandas.DataFrame(columns = sumColumns)
summaryBelford = pandas.DataFrame(columns = sumColumns)
summaryBarlow = pandas.DataFrame(columns = sumColumns)
summaryYsasi = pandas.DataFrame(columns = sumColumns)
i = 0
j = 1
for i in range(len(testReport)):
bid = testReport.loc[i, 'BID']
business = testReport.loc[i, 'Business']
joined = testReport.loc[i, 'Joined']
if joined == ' ':
continue
else:
joinDate = Date(joined)
if joinDate <= minDate or joinDate >= maxDate:
continue
elif testReport.loc[i, 'Type'] == 'Write Off':
continue
billed = testReport.loc[i, 'Billed']
item = testReport.loc[i, 'Item']
payDate = testReport.loc[i, 'Payment Date']
type0 = testReport.loc[i, 'Type']
notes = testReport.loc[i, 'Notes']
salesRep = testReport.loc[i, 'Sales']
rawAmt = testReport.loc[i, 'Item Amt']
amt1 = rawAmt[1:]
amt = locale.atof(amt1)
if item == 'Dues (New)' or item == 'Additional Business(es)' or item == 'Logo Package' or item == 'Additional Location(s)':
pass
else:
continue
if type0 == 'Rejected/Invalid Payment':
amt = 0 - amt
if salesRep == 'Brandi Gonzalez':
summaryGonzalez = summaryGonzalez.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Dale Mckethan' or salesRep == 'Dale McKethan':
summaryMckethan = summaryMckethan.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Damon West':
summaryWest = summaryWest.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Danielle De La Cruz':
summaryDeLaCruz = summaryDeLaCruz.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Donna Brown':
summaryBrown = summaryBrown.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Donna Pellak':
summaryPellak = summaryPellak.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Elaine McCance':
summaryMcCance = summaryMcCance.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Galadriel McAdams':
summaryMcAdams = summaryMcAdams.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Gary Smith':
summaryGSmith = summaryGSmith.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Jack Dudo':
summaryDudo = summaryDudo.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Janice Von Vogt':
summaryVonVogt = summaryVonVogt.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Jessica Dawson':
summaryDawson = summaryDawson.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Jonathon Atkins' or salesRep == 'Jonathan Atkins':
summaryAtkins = summaryAtkins.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Kelly Overton':
summaryOverton = summaryOverton.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Katie Joffrion':
summaryJoffrion = summaryJoffrion.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Kim Rifenburg':
summaryRifenburg = summaryRifenburg.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Mario Acosta':
summaryAcosta = summaryAcosta.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Meghan Fox':
summaryFox = summaryFox.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Michael Yokom' or salesRep == 'Mike Yokom':
summaryYokom = summaryYokom.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Michelle Bononcini':
summaryBononcini = summaryBononcini.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Monica Robberson':
summaryRobberson = summaryRobberson.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Paul Bacon':
summaryBacon = summaryBacon.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Richard Chevere':
summaryChevere = summaryChevere.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Rose Wagoner':
summaryWagoner = summaryWagoner.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Shawn Ferrigno':
summaryFerrigno = summaryFerrigno.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Sheila Belford':
summaryBelford = summaryBelford.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Stephen Smith':
summarySSmith = summarySSmith.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Tony Barlow':
summaryBarlow = summaryBarlow.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Von Ysasi':
summaryYsasi = summaryYsasi.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Tori Lewis':
summaryLewis = summaryLewis.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
elif salesRep == 'Wanda Coleman':
summaryColeman = summaryColeman.append({'BID': bid, 'Business': business, 'Joined': joined, 'Billed': billed, 'Item': item, 'Payment Date': payDate, 'Type': type0, 'Notes': notes,'Sales': salesRep, 'Amount': amt}, ignore_index=True)
#print(totalComm(summaryGSmith))
#print(summaryGSmith.head(10))
commissionColumns = ['Rep', 'Office', 'Commissionable Amount', 'Commission Rate', 'Earned Commission', 'COVID-19 Minimum']
commissionBD = pandas.DataFrame(columns = commissionColumns)
#commissionBD = commissionBD.append({'Rep': 'Dale Mckethan', 'Office': 'Fort Worth', 'Commissionable Amount': commAmount(summaryMckethan), 'Commission Rate': commRate(summaryMckethan), 'Earned Commission': totalComm(commRate(summaryMckethan), commAmount(summaryMckethan))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Damon West', 'Office': 'Fort Worth', 'Commissionable Amount': commAmount(summaryWest), 'Commission Rate': seniorCommRate(summaryWest), 'Earned Commission': totalComm(seniorCommRate(summaryWest), commAmount(summaryWest))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Donna Brown', 'Office': 'Austin', 'Commissionable Amount': commAmount(summaryBrown), 'Commission Rate': commRate(summaryBrown), 'Earned Commission': totalComm(commRate(summaryBrown), commAmount(summaryBrown))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Donna Pellak', 'Office': 'Austin', 'Commissionable Amount': commAmount(summaryPellak), 'Commission Rate': commRate(summaryPellak), 'Earned Commission': totalComm(commRate(summaryPellak), commAmount(summaryPellak))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Elaine McCance', 'Office': 'San Antonio', 'Commissionable Amount': commAmount(summaryMcCance), 'Commission Rate': commRate(summaryMcCance), 'Earned Commission': totalComm(commRate(summaryMcCance), commAmount(summaryMcCance))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Gary Smith', 'Office': 'Austin', 'Commissionable Amount': commAmount(summaryGSmith), 'Commission Rate': commRate(summaryGSmith), 'Earned Commission': totalComm(commRate(summaryGSmith), commAmount(summaryGSmith))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Jack Dudo', 'Office': 'Austin', 'Commissionable Amount': commAmount(summaryDudo), 'Commission Rate': commRate(summaryDudo), 'Earned Commission': totalComm(commRate(summaryDudo), commAmount(summaryDudo))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Janice Von Vogt', 'Office': 'Fort Worth', 'Commissionable Amount': commAmount(summaryVonVogt), 'Commission Rate': commRate(summaryVonVogt), 'Earned Commission': totalComm(commRate(summaryVonVogt), commAmount(summaryVonVogt))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Jessica Dawson', 'Office': 'Austin', 'Commissionable Amount': commAmount(summaryDawson), 'Commission Rate': manCommRate(summaryDawson), 'Earned Commission': totalComm(manCommRate(summaryDawson), commAmount(summaryDawson))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Jonathon Atkins', 'Office': 'Fort Worth', 'Commissionable Amount': commAmount(summaryAtkins), 'Commission Rate': commRate(summaryAtkins), 'Earned Commission': totalComm(commRate(summaryAtkins), commAmount(summaryAtkins))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Kelly Overton', 'Office': 'Fort Worth', 'Commissionable Amount': commAmount(summaryOverton), 'Commission Rate': commRate(summaryOverton), 'Earned Commission': totalComm(commRate(summaryOverton), commAmount(summaryOverton))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Kim Rifenburg', 'Office': 'Austin', 'Commissionable Amount': commAmount(summaryRifenburg), 'Commission Rate': commRate(summaryRifenburg), 'Earned Commission': totalComm(commRate(summaryRifenburg), commAmount(summaryRifenburg))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Mario Acosta', 'Office': 'Austin', 'Commissionable Amount': commAmount(summaryAcosta), 'Commission Rate': commRate(summaryAcosta), 'Earned Commission': totalComm(commRate(summaryAcosta), commAmount(summaryAcosta))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Michelle Bononcini', 'Office': 'San Antonio', 'Commissionable Amount': commAmount(summaryBononcini), 'Commission Rate': commRate(summaryBononcini), 'Earned Commission': totalComm(commRate(summaryBononcini), commAmount(summaryBononcini))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Michael Yokom', 'Office': 'San Antonio', 'Commissionable Amount': commAmount(summaryYokom), 'Commission Rate': commRate(summaryYokom), 'Earned Commission': totalComm(commRate(summaryYokom), commAmount(summaryYokom))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Richard Chevere', 'Office': 'Fort Worth', 'Commissionable Amount': commAmount(summaryChevere), 'Commission Rate': commRate(summaryChevere), 'Earned Commission': totalComm(commRate(summaryChevere), commAmount(summaryChevere))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Rose Wagoner', 'Office': 'Fort Worth', 'Commissionable Amount': commAmount(summaryWagoner), 'Commission Rate': commRate(summaryWagoner), 'Earned Commission': totalComm(commRate(summaryWagoner), commAmount(summaryWagoner))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Shawn Ferrigno', 'Office': 'San Antonio', 'Commissionable Amount': commAmount(summaryFerrigno), 'Commission Rate': commRate(summaryFerrigno), 'Earned Commission': totalComm(commRate(summaryFerrigno), commAmount(summaryFerrigno))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Sheila Belford', 'Office': 'Fort Worth', 'Commissionable Amount': commAmount(summaryBelford), 'Commission Rate': commRate(summaryBelford), 'Earned Commission': totalComm(commRate(summaryBelford), commAmount(summaryBelford))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Stephern Smith', 'Office': 'Fort Worth', 'Commissionable Amount': commAmount(summarySSmith), 'Commission Rate': commRate(summarySSmith), 'Earned Commission': totalComm(commRate(summarySSmith), commAmount(summarySSmith))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Tony Barlow', 'Office': 'Fort Worth', 'Commissionable Amount': commAmount(summaryBarlow), 'Commission Rate': manCommRate(summaryBarlow), 'Earned Commission': totalComm(manCommRate(summaryBarlow), commAmount(summaryBarlow))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Von Ysasi', 'Office': 'Austin', 'Commissionable Amount': commAmount(summaryYsasi), 'Commission Rate': commRate(summaryYsasi), 'Earned Commission': totalComm(commRate(summaryYsasi), commAmount(summaryYsasi))}, ignore_index=True)
commissionBD = commissionBD.append({'Rep': 'Wanda Coleman', 'Office': 'Austin', 'Commissionable Amount': commAmount(summaryColeman), 'Commission Rate': commRate(summaryColeman), 'Earned Commission': totalComm(commRate(summaryColeman), commAmount(summaryColeman))}, ignore_index=True)
newSalesList = [summaryMckethan, summaryWest, summaryBrown, summaryPellak, summaryMcCance, summaryGSmith, summaryDudo, summaryVonVogt, summaryDawson, summaryAtkins, summaryOverton, summaryRifenburg, summaryAcosta, summaryBononcini, summaryYokom, summaryChevere, summaryWagoner, summaryFerrigno, summaryBelford, summarySSmith, summaryBarlow, summaryYsasi, summaryColeman]
austinList = [summaryBrown, summaryPellak, summaryGSmith, summaryDudo, summaryDawson, summaryRifenburg, summaryAcosta, summaryYsasi, summaryColeman]
fortWorthList = [summaryMckethan, summaryWest, summaryVonVogt, summaryAtkins, summaryOverton, summaryBelford, summarySSmith, summaryBarlow]
sanAntonioList = [summaryMcCance, summaryBononcini, summaryYokom, summaryFerrigno]
masterList = [austinList, fortWorthList, sanAntonioList]
writer = pandas.ExcelWriter('C:/Users/jkreuger/Downloads/report test.xlsx', engine='xlsxwriter')
reportList = []
for i in masterList:
for j in i:
print(j)
print(commAmount(j), commRate(j), totalComm(commRate(j), commAmount(j)))
reportList.append(j)
managerColumns = ['Item', 'Type', 'Notes', 'Processed', 'Sales', 'Item Amt']
austinManager = pandas.DataFrame(columns = managerColumns)
fortWorthManager = pandas.DataFrame(columns = managerColumns)
sanAntonioManager = pandas.DataFrame(columns = managerColumns)
df = summaryMckethan.append(summaryWest)
df = df.append(summaryBrown)
df = df.append(summaryPellak)
df = df.append(summaryMcCance)
df = df.append(summaryGSmith)
df = df.append(summaryDudo)
df = df.append(summaryVonVogt)
df = df.append(summaryDawson)
df = df.append(summaryAtkins)
df = df.append(summaryOverton)
df = df.append(summaryRifenburg)
df = df.append(summaryAcosta)
df = df.append(summaryBononcini)
df = df.append(summaryYokom)
df = df.append(summaryChevere)
df = df.append(summaryWagoner)
df = df.append(summaryFerrigno)
df = df.append(summaryBelford)
df = df.append(summarySSmith)
df = df.append(summaryBarlow)
df = df.append(summaryYsasi)
df = df.append(summaryColeman)
commissionBD.to_excel(writer, sheet_name = 'Summary', index = False)
df.to_excel(writer, sheet_name='New', index = False)
writer.save()
#summaryBelford.to_excel(writer, sheet_name = 'Belford')
#print(commissionBD)
#print(summaryDudo)
#print(commAmount(summaryDudo), commRate(summaryDudo), totalComm(commRate(summaryDudo), commAmount(summaryDudo)))
"""
writer = pandas.ExcelWriter('C:/Users/jkreuger/Downloads/wb test.xlsx', engine='xlsxwriter')
testReport.to_excel(writer, sheet_name='Sheet1')
commissionBD.to_excel(writer, sheet_name='Sheet2')
summarySSmith.to_excel(writer, sheet_name='Sheet3')
"""
| [
"noreply@github.com"
] | BBBhot.noreply@github.com |
7ba765617c732426446d0c13e8c0adc258575241 | 849faf896a1e3f496821fc60886e2e5b3f9bac91 | /BatteryLog_3.py | 8dda8b44036745aaad87f0743fedbd51055ce5c8 | [] | no_license | MeltryllisMew/mzi | c56c27c4c65c8cf842165211ef29433b8ed46c48 | 202df49dee46bb392cc099526eeaa7cc07a935e5 | refs/heads/master | 2023-04-28T19:09:23.556153 | 2021-05-09T09:27:02 | 2021-05-09T09:27:02 | 260,984,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,800 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import datetime
import tkinter
import tkinter.filedialog
def findFile(path):
#path = r'D:\log'
upcoming = []
yesterdayTest = []
today = datetime.datetime.now().strftime("%Y-%m-%d")
for root, dirs, files in os.walk(path):
for i in files:
if i[-25:] == "_dumpsys_batterystats.log":
upcoming.append(os.path.join(root, i))
if i[:10] == today:
yesterdayTest.append(os.path.join(root, i))
return upcoming, yesterdayTest
def strToTime(end):
a = end.index('h')
b = end.index('m')
seconds = int(end[:a]) * 3600 + int(end[a+1:b]) * 60 + int(end[b+1:-1])
return seconds
def readLog(file):
with open(file, encoding='utf-8')as f:
raw = f.readlines()
for i in range(len(raw)):
if re.search(r"RESET:TIME:", raw[i]):
startTime = (datetime.datetime.strptime(raw[i][-20:-1], "%Y-%m-%d-%H-%M-%S")).strftime("%Y-%m-%d %H:%M:%S")
print("开始时间:" + startTime)
startBattery = re.findall(r' (\d{3}) ', raw[i+1])[0]
print("初始电量:" + startBattery)
elif re.search(r'Per-PID Stats:', raw[i]):
#end = re.findall(r'\d+h\d+m\d+s', raw[i-2])[0]
endBattery = re.findall(r' (\d{3}) ', raw[i-2])[0]
if endBattery == '001':
endBattery = '000'
#usedSeconds = strToTime(end)
#stats = strToTime(end) / 3600
#print("耗电时长:" + str(stats))
print("剩余电量:" + endBattery)
break
'''
写完才发现文件名就是结束时间,不用算…
begin_hour = datetime.datetime.strptime(startTime, "%Y-%m-%d-%H-%M-%S")
end_hour = (begin_hour + datetime.timedelta(seconds = usedSeconds)).strftime("%Y-%m-%d %H:%M:%S")
print("结束时间:" + end_hour)
'''
endTime = (datetime.datetime.strptime(os.path.split(file)[-1][:19].replace('_', '-'), "%Y-%m-%d-%H-%M-%S")).strftime("%Y-%m-%d %H:%M:%S")
begin_hour = datetime.datetime.strptime(startTime, "%Y-%m-%d %H:%M:%S")
end_hour = datetime.datetime.strptime(os.path.split(file)[-1][:19].replace('_', '-'), "%Y-%m-%d-%H-%M-%S")
usedSeconds_2 = (end_hour - begin_hour).total_seconds()
stats_2 = usedSeconds_2 / 3600
print("耗电时长:" + str(stats_2))
print("结束时间:" + endTime)
standardBattery = stats_2 * 20
print("标准耗电量:" + str(standardBattery))
used = int(startBattery) - int(endBattery)
usedBattery = used * 3300 / 100
print("实际耗电量:" + str(usedBattery))
version = ''
try:
v = '//'.join(os.path.split(file)[0:-1]) + '//versionNumber'
with open(v)as h:
text = h.read()
version = text.split('_')[0]
except:
pass
with open('out.txt', 'a', encoding='utf-8')as g:
g.write(os.path.split(file)[-1][:16].replace('_', '-') + ' ' + version + '\n')
g.write("开始时间:" + startTime + '\n')
g.write("结束时间:" + endTime + '\n')
g.write("耗电时长:" + str(stats_2) + '\n')
g.write("初始电量:" + startBattery + '\n')
g.write("剩余电量:" + endBattery + '\n')
g.write("耗费电量:" + str(used) + '\n')
g.write("实际耗电:" + str(usedBattery) + '\n')
g.write("标准耗电:" + str(standardBattery) + '\n\n')
g.write("=====================\n\n")
def selectPath():
path = tkinter.filedialog.askdirectory()
upcoming, yesterdayTest = findFile(path)
with open('out.txt', 'w')as f:
pass
for i in upcoming:
readLog(i)
'''
print("BatteryLog总计%d个\n其中日期今天的%d个\n\n直接回车计算日期为今天的log\n输入任意字符回车计算全部log" % (len(upcoming), len(yesterdayTest)))
temp = input()
if temp:
for i in upcoming:
readLog(i)
else:
for j in yesterdayTest:
readLog(j)
'''
if upcoming:
os.startfile('out.txt')
#input('退出')
def main():
window = tkinter.Tk()
window.title('BatteryLog')
window.geometry('300x200')
tkinter.Label(window,text='计算该文件夹内的BatteryLog', font=('宋体', 14)).pack(padx=5,pady=25)
tkinter.Button(window, command = selectPath, text='选择文件夹', font=('宋体', 16), width=15, height=10).pack(padx=20,pady=30)
window.mainloop()
if __name__=='__main__':
main()
| [
"noreply@github.com"
] | MeltryllisMew.noreply@github.com |
2b271423c0c7d2c1832e51e64884c5716ec4fae9 | 1af1cfb3969b1dfdf45a4f4fba53f7606e5ac2e2 | /2) DataStructures/Week3-Files/upper.py | a71eeee2747f24c2e6e1c5e59abdc97f1a25e6e4 | [] | no_license | lom360/python-programming | cf1c53055a1097167e0eeccc7a9075a3e301d285 | c4dc24ea043d3f71507150637e201f2aa1393ffd | refs/heads/master | 2021-10-10T19:44:14.720009 | 2019-01-16T04:29:20 | 2019-01-16T04:29:20 | 104,544,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | fname = input("Enter file name: ")
try :
fh = open(fname)
except :
print("File does not exist")
quit()
for line in fh :
uppercase_line = line.rstrip().upper()
print(uppercase_line)
| [
"lom360@mail.fresnostate.edu"
] | lom360@mail.fresnostate.edu |
135a39176baf66276303b33c1c9f55d9c7dded39 | 366c06f0a5395ccd12d3c7b777a1615d567bd163 | /merge_sort.py | fe31b35b4daf772091489a381b905a53dc6adfaf | [
"MIT"
] | permissive | medisean/python-algorithm | 86867ae8460157ed03945f4f24d548900317dd49 | 9eb5544917bc174f3fe79256240ed642282c339b | refs/heads/master | 2021-01-23T21:55:19.323816 | 2017-02-27T06:43:39 | 2017-02-27T06:43:39 | 83,114,256 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | '''
Merge sort algorithm in python. It's a Stable sort.
Time complexity: O(nlogn)
Space complexity: O(1)
'''
def merge_sort(lists):
if len(lists) <= 1:
return lists
num = int(len(lists)/2)
left = merge_sort(lists[:num])
right = merge_sort(lists[num:])
return merge(left, right)
def merge(left, right):
l, r = 0, 0
result = []
while l < len(left) and r < len(right):
if left[l] < right[r]:
result.append(left[l])
l += 1
else:
result.append(right[r])
r += 1
result += right[r:]
result += left[l:]
return result
if __name__ == '__main__':
print(merge_sort([2, 3, 1, 5, 4])) | [
"liangmingzou@163.com"
] | liangmingzou@163.com |
157140fde9ce354ada3a80ee3b931e9b01497b01 | 7ac2ca7c952d40844c6576c18c439b426cff0152 | /2_planet_model.py | aeb54d34157da6c229c7d1056da8ac86062deb49 | [] | no_license | anyakors/MEPHI | 8c6067183e66bfc254a7eb38eb1f827d13c63c6d | 7a1dbdb7d714db11ef8fedf08a17142f333c092c | refs/heads/master | 2021-05-27T21:04:47.995510 | 2014-05-11T09:23:15 | 2014-05-11T09:23:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,117 | py | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
class planet:
def __init__(self,nm,nx,ny,nvx,nvy):
self.x = np.zeros(1)
self.y = np.zeros(1)
self.vx = np.zeros(1)
self.vy = np.zeros(1)
self.x[0] = nx
self.y[0] = ny
self.vx[0] = nvx
self.vy[0] = nvy
self.m=nm
class spaceTime:
def __init__(self):
self.list=[]
def add(self,p):
self.list.append(p)
def __getitem__(self,i):
return self.list[i]
def timeStep(self,tau):
for i in range(0,len(self.list)):
Fx=0
Fy=0
F=0
for j in range(0,len(self.list)):
if (i!=j):
F=self[i].m*self[j].m/((0.0+(self[i].x[-1]-self[j].x[-1])**2+(self[i].y[-1]-self[j].y[-1])**2)**(1.5))
Fx=Fx-F*(self[i].x[-1]-self[j].x[-1])
Fy=Fy-F*(self[i].y[-1]-self[j].y[-1])
self[i].x[-1]=self[i].x[-1]+tau*self[i].vx[-1]
self[i].y[-1]=self[i].y[-1]+tau*self[i].vy[-1]
self[i].vx[-1]=self[i].vx[-1]+tau*Fx/(self[i].m+0.0)
self[i].vy[-1]=self[i].vy[-1]+tau*Fy/(self[i].m+0.0)
def shift(self,deltaT):
for i in range(0,len(self.list)):
self[i].x=np.append(self[i].x,self[i].x[-1])
self[i].y=np.append(self[i].y,self[i].y[-1])
self[i].vx=np.append(self[i].vx,self[i].vx[-1])
self[i].vy=np.append(self[i].vy,self[i].vy[-1])
tau=0.01
n=int(deltaT/tau)
for j in range(0,n):
#for i in range(0,len(self.list)):
self.timeStep(tau)
A=spaceTime()
A.add(planet(10,100,0,200,10))
A.add(planet(1000,0,10,0,30))
#A.add(planet(500,500,500,0,0))
n=1000
for i in range(0,n):
A.shift(1)
fig, ax = plt.subplots()
ax.plot(A[0].x, A[0].y, 'b-')
ax.plot(A[1].x, A[1].y, 'r-')
#ax.plot(A[2].x, A[2].y, 'g-')
plt.show()
| [
"johndoe@example.com"
] | johndoe@example.com |
f47d699a3740bb2adf9a4b0d4de640ce00b693d2 | f7110aaab742fc92179302c5874691078ed05158 | /django_intro/First_Django_Project/your_app_name_here/views.py | 5445d8f0cdae6d3ecf905d47d4e1203d4fe3da2d | [] | no_license | Moha327/python_extra | 0f9252a46a652ffe83d97cd0d6906a1835c2abbf | a3e1b31831578484c651d76bfd01173fe9d6eb10 | refs/heads/master | 2023-05-23T14:03:21.962212 | 2021-06-16T13:50:06 | 2021-06-16T13:50:06 | 377,511,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,230 | py | from django.shortcuts import render, HttpResponse
# return HttpResponse("this is the equivalent of @app.route('/')!")
def index(request):
return HttpResponse("placeholder to display a new form to create a new blog")
def root_method(request):
return HttpResponse("String response from root_method")
def another_method(request):
return redirect("/blog")
def new(request):
return HttpResponse("placeholder to display a new form to create a new blog")
def create(request):
return HttpResponse("placeholder to display a new form to create a new blog")
def another_method(request):
return redirect("/blog/create")
def new(request):
return HttpResponse("placeholder to display a new form to create a new blog")
def show(request, number):
return HttpResponse(f"placeholder to display blog number {number}")
def edit(request, number):
return HttpResponse(f"placeholder to edit blog number {number}")
def root(request):
return HttpResponse("String response from root_method")
def destroy(request,number):
return redirect("/blogs")
def redirected_method(request):
data={
"title": "blogs",
"content": "content-hhhhhh"
}
return JsonResponse(data) | [
"m7amad9595@outlook.com"
] | m7amad9595@outlook.com |
009b42576420f47a183322ed92f4a0f4305f11cc | 578ccd29d176543df9c83bf6ec91a1c5646332e8 | /backend/app/__init__.py | cd64be71ad057bb7d38cf45bf7261e09f46b1f30 | [] | no_license | visheshdvn/Blockchain-and-Cryptocurrency | d651faa628a13ebedb5d0117308944ad98b5ef90 | 9f542e486a4ac99d9b95aa7f5bac25d2c4a850c6 | refs/heads/master | 2023-05-31T11:26:18.426633 | 2020-07-11T17:56:41 | 2020-07-11T17:56:41 | 273,066,703 | 0 | 0 | null | 2023-05-22T23:58:20 | 2020-06-17T19:58:23 | Python | UTF-8 | Python | false | false | 1,129 | py | from flask import Flask, jsonify
import os, random, requests
from backend.blockchain.blockchain import Blockchain
from backend.pubsub import PubSub
app = Flask(__name__)
blockchain = Blockchain()
pubsub = PubSub(blockchain)
@app.route('/')
def default():
return 'Welcome to the blockchain'
@app.route('/blockchain')
def route_blockchain():
return jsonify(blockchain.to_json())
@app.route('/blockchain/mine')
def route_blockchain_mine():
transaction_data = 'stubbed_transaction_data'
blockchain.add_block(transaction_data)
block = blockchain.chain[-1]
pubsub.broadcast_block(block)
return jsonify(block.to_json())
ROOT_PORT = 5000
PORT = ROOT_PORT
if os.environ.get('PEER') == 'True':
PORT = random.randint(5001, 6000)
result = requests.get(f'http://localhost:{ROOT_PORT}/blockchain')
result_blockchain = Blockchain.from_json(result.json())
try:
blockchain.replace_chain(result_blockchain.chain)
print('\n -- Successfully synchronized the local chain.')
except Exception as e:
print(f'\n -- Error synchronizing: {e}')
app.run(port=PORT)
| [
"vishesh.dh1@gmail.com"
] | vishesh.dh1@gmail.com |
4eb52a424e73dd3a52be04cb5261f666c98948c0 | 7786d4c2f43a78f34ebc95bd0607842eae71c30c | /wordcount/views.py | 76348506d8cf2638f4711ca4d78288eb1db65f05 | [] | no_license | kstevens67/wordcount-project | c5a235f2f4279ffb6fd56f69a6a869e1f9c63b88 | dcf6f8a80032e487a2c0ae9b5f059c95b70f1c0d | refs/heads/master | 2020-05-15T19:47:00.383128 | 2019-04-20T23:40:12 | 2019-04-20T23:40:12 | 182,465,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | from django.http import HttpResponse
from django.shortcuts import render
import operator
def home(request):
return render(request, 'home.html')
def about(request):
return render(request, 'about.html')
def count(request):
fulltext = request.GET['fulltext']
wordlist = fulltext.split()
worddictionary = {}
for word in wordlist:
if word in worddictionary:
#Increase
worddictionary[word] += 1
else:
#Add to dictionary
worddictionary[word] = 1
sortedwords = sorted(worddictionary.items(), key=operator.itemgetter(1), reverse=True)
return render(request, 'count.html',{'fulltext':fulltext, 'count':len(wordlist), 'sortedwords':sortedwords})
| [
"kstevens67@verizon.net"
] | kstevens67@verizon.net |
4a2e9950369065b640636f78eb810414bda63ba3 | b947e245d15c8b61e23c98b8010b0b668b547c69 | /app/migrations/0001_initial.py | fba9cb852a64ddc08ef980a8275daf9972ea2dd1 | [] | no_license | NoorAlqoj/mptt-wagtail-admin | accacfeef10525c86247e3e201b4223b3fca1dc3 | 22286ec57b6dc3fcfb17a223e541492e05919f08 | refs/heads/master | 2023-03-30T05:33:03.962886 | 2021-04-01T12:18:41 | 2021-04-01T12:18:41 | 353,672,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | # Generated by Django 3.1.7 on 2021-03-24 08:17
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('slug', models.SlugField(max_length=100, unique=True)),
('lft', models.PositiveIntegerField(editable=False)),
('rght', models.PositiveIntegerField(editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='app.category')),
],
options={
'abstract': False,
},
),
]
| [
"noor.alqoj@kuwaitnet.com"
] | noor.alqoj@kuwaitnet.com |
d971b4f7d0d4b32e87dfff46df709275c33adce2 | 461c78e97658e4a0c89655ca36af3774f7c40457 | /src/apps/posts/serializers.py | efd30e5c1dcb63db6251314a3d4696821b9d1398 | [] | no_license | AlekseiChirkov/social-network | 62bfb76467c1b3e7a156b6265f1f23cac0d0d140 | 3f33b13ffdf03f41cfdfbd2291d23ddcb8d7f2ed | refs/heads/master | 2023-06-20T03:06:47.886321 | 2021-07-23T10:36:32 | 2021-07-23T10:36:32 | 386,913,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | from django.db.models import Count
from rest_framework import serializers
from apps.posts.models import Post, Like
class PostSerializer(serializers.ModelSerializer):
"""
Post's serializer
"""
class Meta:
model = Post
fields = "__all__"
class LikeSerializer(serializers.ModelSerializer):
"""
Like's serializer
"""
class Meta:
model = Like
fields = "__all__"
class LikeAnalyticsSerializer(serializers.ModelSerializer):
"""
Serializer for likes analytics
"""
likes_count = serializers.SerializerMethodField('is_named_likes_count')
class Meta:
model = Like
fields = ('created_date', 'likes_count')
@staticmethod
def is_named_likes_count(obj: Like) -> int:
"""
Method filters users likes by created date and count it for the date
:param obj: Like model object
:return: int - likes count for the date
"""
likes = Like.objects.filter(created_date=obj.created_date)
return likes.count()
| [
"tektonikboy98@gmail.com"
] | tektonikboy98@gmail.com |
94d62c8b0f085a1c9ff8557bc9a8e36e12dcc526 | 11dd6436678fcfd608707145298dfb11aa22e59b | /Schedule Web Scraper.py | 0dc159af57f641f4b2f04411e2fb22afc8b1d3e9 | [
"MIT"
] | permissive | sezenack/Red-Army-App | 2640f5530b6953ec3b85677d9a866ace27192857 | d5a37dd5c59e27eccc805e566f5a4f908303855b | refs/heads/master | 2020-04-17T20:17:28.268814 | 2019-08-06T18:11:28 | 2019-08-06T18:11:28 | 166,899,088 | 3 | 4 | MIT | 2019-04-16T19:34:49 | 2019-01-22T00:19:51 | Swift | UTF-8 | Python | false | false | 424 | py | import requests
import urllib.request
import time
from bs4 import BeautifulSoup
# Schedule
url1 = 'https://rpiathletics.com/schedule.aspx?path=hockey&'
response1 = requests.get(url1, headers={'User-Agent': 'Custom'})
soup1 = BeautifulSoup(response1.text, 'html.parser')
table1 = soup1.findAll('span')
infostrings1 = []
for t in table1:
infostrings1.append(t.get_text())
infostrings1 = infostrings1[0:578]
| [
"noreply@github.com"
] | sezenack.noreply@github.com |
e4b11c617b8c19e66071c18899d2212e6fbaf258 | 2463092c9bef6cb019ee369575a71d1c9e36c2e9 | /scrape_mars.py | 50e4497428a93294e97498fe2decd99cecd63255 | [] | no_license | sarahgrant11/NASA-Data-Webscraping-Flask | f5b51e1fac5c78e22f8dde9ca2e5b952926739fd | 6f3ccfbb0f86c5b4b4ff63d1b9cb734e300e65bb | refs/heads/master | 2023-02-10T16:01:17.787367 | 2021-01-07T01:26:50 | 2021-01-07T01:26:50 | 294,553,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,929 | py | import pandas as pd
from splinter import Browser
from time import sleep
from pprint import pprint
from bs4 import BeautifulSoup
import requests
import pymongo
import time
news_url = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'
jpl_url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
jpl_base = 'https://www.jpl.nasa.gov'
hemi_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
facts_url = 'https://space-facts.com/mars/'
def init_browser():
executable_path = {"executable_path": "/usr/local/bin/chromedriver"}
return Browser("chrome", **executable_path, headless=False)
def scrape():
mars_data = {}
browser = init_browser()
browser.visit(news_url)
html = browser.html
news_data = BeautifulSoup(html, "html.parser")
#strip headline
news_headline = news_data.find_all('div', class_='content_title')[1].text
#strip body
news_desc = news_data.find_all('div', class_='article_teaser_body')[0].text
mars_data.update( {
'news_headline': news_headline,
'news_description': news_desc
})
# JPL Featureed Space Image
sleep(1)
browser.visit(jpl_url)
html = browser.html
JPL_image = BeautifulSoup(html, "html.parser")
featured_image = JPL_image.find(class_='carousel_item')['style']
image_urlend = featured_image.split("'")[1]
image_url = jpl_base + image_urlend
mars_data.update( {
"featured_img": image_url
})
# Mars Facts tables
sleep(1)
Facts_Tables = pd.read_html(facts_url)
tables_df = Facts_Tables[0]
tables_df.columns = ['Item', 'Values']
tables_df.set_index('Item', inplace=True)
html_table = tables_df.to_html(
classes='table table-striped table-hover')
mars_data.update({
"html_table": html_table
})
browser.visit(hemi_url)
# Hemisphere images
sleep(1)
browser.visit(hemi_url)
html = browser.html
hemi_image = BeautifulSoup(html, "html.parser")
hemi_urls = hemi_image.find_all('div', class_='item')
links = hemi_image.find_all('div', class_='item')
hemi_photos_urls = []
for x in links:
link_base = "https://astrogeology.usgs.gov"
img_link = x.find("div", class_="description").a["href"]
title = x.find('h3').text
hemilink = link_base + img_link
browser.visit(hemilink)
hemi_html = browser.html
hemi_soup = BeautifulSoup(hemi_html, 'html.parser')
img_url = hemi_soup.find("img", class_="wide-image")["src"]
hemi_photos_urls.append(
{'title': title, 'url': 'https://astrogeology.usgs.gov' + img_url})
mars_data.update({
"hemishere_urls": hemi_photos_urls
})
browser.quit()
print(mars_data)
return mars_data
| [
"sarahgrant11@gmail.com"
] | sarahgrant11@gmail.com |
0922d95d605a8ff9ba191632f45196ebe588128e | c8c05bd2f7bd1cf5a9cbfc6b6250199977ba221f | /db/models.py | 8f873f61b1836b9e1a68db63bb45334800b664b9 | [] | no_license | unraveldata-org/saas | 4d507d216fa13855ab348af663705018dbe1740d | 8a82dd5dbce473af04a39d7a7fe58cf2e8b63971 | refs/heads/master | 2020-12-26T14:03:50.573762 | 2020-03-08T13:36:25 | 2020-03-09T05:06:33 | 237,532,153 | 0 | 0 | null | 2020-10-27T22:18:28 | 2020-01-31T23:13:48 | Python | UTF-8 | Python | false | false | 39,671 | py | # Python standard library imports
from datetime import datetime, timedelta
import random, string
import logging
# Third-party imports
from sqlalchemy import create_engine
from sqlalchemy import Column, Boolean, Integer, Float, String, DateTime, ForeignKey, Text
from sqlalchemy import func, and_, or_, not_
from sqlalchemy.ext.declarative import as_declarative
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.pool import QueuePool
from sqlalchemy.exc import DatabaseError
# Local imports
from db.config import Config
# SQLAlchemy session
session = None
logger = logging.getLogger("SQLAlchemyModels")
class DBRunner(object):
"""
Represents a long-running session to the MySQL database using MySQL diaect and PyMySQL connector.
"""
# Set to True to print the JDBC url (which includes the password), and also echo all statements.
DEBUG = False
@classmethod
def get_unravel_jdbc_url(cls):
"""
Get the Unravel JDBC URL, which includes the username and password
:return: Return a string representing the JDBC URL
"""
jdbc_url = Config.db_jdbc_url
db_type = jdbc_url.split("://")[0]
host_port_db = jdbc_url[len(db_type) + 3:]
if Config.db_password == "":
url = "{}+pymysql://{}@{}".format(db_type, Config.db_username, host_port_db)
else:
url = "{}+pymysql://{}:{}@{}".format(db_type, Config.db_username, Config.db_password, host_port_db)
# Do not print the URL since it contains a password
if cls.DEBUG:
print("JDBC URL: {}".format(url))
return url
@classmethod
def setup_session(cls, jdbc_url):
"""
Given the JDBC url, set the global SQLAlchemy session and connect it to the appropriate engine
:param jdbc_url: JDBC URL string
"""
global session
global engine
# https://docs.sqlalchemy.org/en/13/core/engines.html
# Also, take a look at pessimistic/optimistic connection handling
# https://docs.sqlalchemy.org/en/13/core/pooling.html#sqlalchemy.pool.QueuePool
session = None
engine = None
try:
# Can enable echo=True for debugging
echo = cls.DEBUG is True
engine = create_engine(jdbc_url, pool_size=128, max_overflow=10, poolclass=QueuePool, echo=echo)
Session = sessionmaker()
# Actually bind it to an engine once we know the DB configs
Session.configure(bind=engine)
session = Session()
except Exception as exc:
raise Exception("Setup session failed with exception: {}".format(exc))
return session, engine
@as_declarative()
class Base(object):
"""
Class that is meant to be used as a base for all models that want
an auto-increment ID as a PK and some CRUD functions that don't auto-commit.
in order to CRUD to the session and flush, but don't commit.
E.g.,
class SomeModel(Base):
def __init__():
pass
element = SomeModel()
element.set_x(val=y)
element.save()
session.commit()
element.update({"x": "y"})
session.commit()
element.delete()
session.commit()
"""
id = Column(Integer, primary_key=True)
def save(self):
session.add(self)
self._flush()
return self
def update(self, **kwargs):
for attr, value in kwargs.items():
setattr(self, attr, value)
return self.save()
def delete(self):
session.delete(self)
self._flush()
def _flush(self):
try:
session.flush()
except DatabaseError:
session.rollback()
raise
class TrialRequest(Base):
"""
Represents a Free Trial request.
"""
class State:
PENDING = "pending"
APPROVED = "approved"
DENIED = "denied"
class Type:
FREE = "free"
PAID = "paid"
# The length of Strings is only used during a CREATE TABLE statement
first_name = Column(String(128), nullable=False)
last_name = Column(String(128), nullable=False)
email = Column(String(256), nullable=False)
title = Column(String(256), nullable=True)
company = Column(String(256), nullable=True)
ip = Column(String(32), nullable=True)
state = Column(String(32), nullable=False)
trial_type = Column(String(128), nullable=False)
start_date = Column(DateTime, default=datetime.utcnow, nullable=False)
cloud_provider = Column(String(128), nullable=False)
create_cluster = Column(Boolean, nullable=False)
notify_customer = Column(String(32), nullable=True)
__tablename__ = "trial_request"
def __init__(self, first_name, last_name, email, title, company, ip, state, trial_type, start_date, cloud_provider, create_cluster, notify_customer):
"""
Construct a TrialRequest object
:param first_name: Customer first name (str)
:param last_name: Customer last name (str)
:param email: Customer email (str, should already be validated)
:param title: Customer job title (str)
:param company: Customer company name (str)
:param ip: Customer IP address (str) used to issue the request, useful in detecting DDOS
:param state: Entity state (str)
:param trial_type: Trial type, e.g., "free" (str)
:param start_date: Python DateTime UTC object for when the request was created
:param cloud_provider: Desired Cloud Provider name (str)
:param create_cluster: Boolean indicating whether to create a cluster.
:param notify_customer: State of whether the customer has been notified.
"""
self.first_name = first_name
self.last_name = last_name
self.email = email
self.title = title
self.company = company
self.ip = ip
self.state = state
self.trial_type = trial_type
self.start_date = start_date if start_date is not None else datetime.utcnow()
self.cloud_provider = cloud_provider
self.create_cluster = create_cluster
# None means no actions, "pending" means need to send, "finished" means sent.
self.notify_customer = notify_customer
def __repr__(self):
"""
Machine-readable representation of this object.
:return: Return a machine-readable string that exactly describes this object.
"""
return u"<TrialRequest(id: {}, first name: {}, last name: {}, email: {}, company: {}, ip: {}, state: {}, " \
u"trial_type: {}, start_date: {}, cloud_provider: {}, create cluster: {}, notify customer: {})>". \
format(self.id, self.first_name, self.last_name, self.email, self.company, self.ip, self.state,
self.trial_type, self.start_date, self.cloud_provider, self.create_cluster, self.notify_customer)
@classmethod
def get_all(cls):
"""
Get all of the TrialRequest objects that exist.
:return: Return a list of TrialRequest objects, which could be an empty list.
"""
return session.query(TrialRequest).all()
@classmethod
def get_all_pending(cls):
"""
Get all TrialRequest objects whose state is PENDING.
:return: Return a list of TrialRequest objects, which could be an empty list.
"""
trials = session.query(TrialRequest).filter_by(state=cls.State.PENDING).all()
return trials
@classmethod
def get_by_states_or_after_datetime(cls, states, date):
"""
Get all TrialRequest objects whose state matches or whose start_date >= given date.
:param states: List of states (str)
:param date: Python DateTime object
:return: Return a list of TrialRequest objects, which could be an empty list.
"""
return session.query(TrialRequest).filter(or_(TrialRequest.state.in_(states), TrialRequest.start_date >= date)).all()
@classmethod
def get_by_id(cls, id):
"""
Get a TrialRequest object given its id.
:param id: ID (int PK)
:return: Return the TrialRequest object if it exists, otherwise, None.
"""
return session.query(TrialRequest).filter_by(id=id).first()
@classmethod
def get_num_created_after_datetime(cls, date):
"""
Get the number of TrialRequest objects whose start_date is >= the given date.
This is useful to throttle and prevent an attack.
:param date: Python DateTime object
:return: Return the number of TrialRequest objects (int)
"""
count = session.query(func.count(TrialRequest.id)).filter(TrialRequest.start_date >= date).scalar()
return count
@classmethod
def create_if_not_exists(cls, first_name, last_name, email, title, company, ip, cloud_provider, create_cluster, notify_customer=None):
"""
Create a TrialRequest with an initial state.
:return: Return the TrialRequest object that was created
"""
start_date = datetime.utcnow()
trial = TrialRequest(first_name=first_name, last_name=last_name, email=email, title=title, company=company,
ip=ip, state=cls.State.PENDING, trial_type=cls.Type.FREE, start_date=start_date, cloud_provider=cloud_provider,
create_cluster=create_cluster, notify_customer=notify_customer)
# Still need to call trial.save() and session.commit()
return trial
def set_state(self, state):
"""
Change the state as long as it is allowed.
:param state: Desired state, which must be one of TrialRequest.State
"""
allowed_transitions = {
TrialRequest.State.PENDING: {TrialRequest.State.APPROVED, TrialRequest.State.DENIED},
TrialRequest.State.APPROVED: {},
TrialRequest.State.DENIED: {}
}
if state == self.state:
return
allowed_states = allowed_transitions[self.state]
if state in allowed_states:
# Still need to call self.update() and session.commit()
self.state = state
else:
raise Exception("Cannot transition from state {} to {}".format(self.state, state))
class NodeSpec(Base):
"""
Represents a Node Spec request.
"""
class State:
PENDING = "pending"
FINISHED = "finished"
DEFAULT_TTL_HOURS = 72
cloud_provider = Column(String(128), nullable=False)
region = Column(String(256), nullable=False)
state = Column(String(32), nullable=False)
# User that requested it
user = Column(String(64), nullable=False)
node_type = Column(String(256), nullable=False)
storage_config = Column(Text, nullable=True)
unravel_version = Column(String(64), nullable=False)
unravel_tar = Column(String(256), nullable=True)
mysql_version = Column(String(256), nullable=True)
install_ondemand = Column(Boolean, nullable=False)
extra = Column(Text, nullable=True)
date_requested = Column(DateTime, default=datetime.utcnow, nullable=False)
ttl_hours = Column(Integer, default=DEFAULT_TTL_HOURS, nullable=False)
# Nullable FK
trial_request_id = Column(Integer, ForeignKey("trial_request.id"))
__tablename__ = "node_spec"
def __repr__(self):
"""
Machine-readable representation of this object.
:return: Return a machine-readable string that exactly describes this object.
"""
return u"<NodeSpec(id: {}, cloud_provider: {}, region: {}, state: {}, user: {}, node_type: {}, unravel_version: {}, unravel_tar: {}, " \
u"date_requested: {}, ttl_hours: {}, trial_request_id: {})>". \
format(self.id, self.cloud_provider, self.region, self.state, self.user, self.node_type, self.unravel_version, self.unravel_tar,
self.date_requested, self.ttl_hours, self.trial_request_id)
def __init__(self, cloud_provider, region, user, node_type, storage_config, unravel_version, unravel_tar, mysql_version, install_ondemand, extra, ttl_hours, trial_request_id=None):
"""
Construct a NodeSpec object
:param cloud_provider: Cloud Provider name (str)
:param region: Region name (str)
:param user: User that requested it (str)
:param node_type: Node/VM type (str)
:param storage_config: Some information about its storage, such as number of disks, etc.
:param unravel_version: Unravel version to install (str), e.g., 4.6.0.1
:param unravel_tar: Unravel tarball path to wget. Once we move to a tarball approach instead of RPM,
this may be utilized instead.
:param mysql_version: MySQL version to install (str)
:param install_ondemand: Boolean indicating if should also install Unravel Ondemand.
:param extra: Extra information in JSON in a text/blob column.
:param ttl_hours: Time to live in hours (int)
:param trial_request_id: FK (int) to the corresponding TrialRequest object if one exists.
"""
self.cloud_provider = cloud_provider
self.region = region
self.state = self.State.PENDING
self.user = user
self.node_type = node_type
self.storage_config = storage_config
self.unravel_version = unravel_version
self.unravel_tar = unravel_tar
self.mysql_version = mysql_version
self.install_ondemand = install_ondemand
self.extra = extra
self.date_requested = datetime.utcnow()
# Current number of hours to expire after date_launched.
# When expiring a resource, simply set it to 0
self.ttl_hours = ttl_hours if ttl_hours >= 0 else 0
# FK may be None
self.trial_request_id = trial_request_id
@classmethod
def get_by_trial_request_id(cls, trial_request_id):
"""
Get the single NodeSpec that was created from the trial request id
:param trial_request_id: Trial request id (int)
:return: Return a list of NodeSpec objects (which should be a singleton list).
"""
spec = session.query(NodeSpec).filter_by(trial_request_id=trial_request_id).all()
return spec
@classmethod
def get_all(cls):
"""
Get all of the NodeSpec objects that exist.
:return: Return a list of NodeSpec objects, which could be an empty list.
"""
return session.query(NodeSpec).all()
@classmethod
def get_all_pending(cls):
"""
Get all NodeSpec objects whose state is PENDING.
:return: Return a list of NodeSpec objects, which could be an empty list.
"""
trials = session.query(NodeSpec).filter_by(state=cls.State.PENDING).all()
return trials
@classmethod
def get_by_id(cls, id):
"""
Get a NodeSpec object given its id.
:param id: ID (int PK)
:return: Return the NodeSpec object if it exists, otherwise, None.
"""
return session.query(NodeSpec).filter_by(id=id).first()
@classmethod
def create_if_not_exists(cls, cloud_provider, region, user, node_type, storage_config, unravel_version, unravel_tar,
mysql_version, install_ondemand, extra, ttl_hours, trial_request_id=None):
"""
Create a NodeSpec with an initial state.
:return: Return the NodeSpec object that was created
"""
spec = NodeSpec(cloud_provider=cloud_provider, region=region, user=user, node_type=node_type,
storage_config=storage_config, unravel_version=unravel_version, unravel_tar=unravel_tar,
mysql_version=mysql_version, install_ondemand=install_ondemand, extra=extra,
ttl_hours=ttl_hours, trial_request_id=trial_request_id)
# Still need to call spec.save() and session.commit()
return spec
def get_cloud_provider(self):
return self.cloud_provider
def get_state(self):
return self.state
def get_date_requested(self):
return self.date_requested
def set_state(self, state):
"""
Change the state as long as it is allowed.
:param state: Desired state, which must be one of NodeSpec.State
"""
allowed_transitions = {
NodeSpec.State.PENDING: {NodeSpec.State.FINISHED, },
NodeSpec.State.FINISHED: {}
}
if state == self.state:
return
allowed_states = allowed_transitions[self.state]
if state in allowed_states:
# Still need to call self.update() and session.commit()
self.state = state
else:
raise Exception("Cannot transition from state {} to {}".format(self.state, state))
class Node(Base):
"""
Represents a Node object to instantiate, monitor, delete.
"""
class State:
LAUNCHED = "launched"
READY = "ready"
EXPIRED = "expired"
DELETED = "deleted"
cloud_provider = Column(String(128), nullable=False)
region = Column(String(256), nullable=False)
state = Column(String(32), nullable=False)
node_type = Column(String(256), nullable=False)
node_ip = Column(String(256), nullable=True)
ttl_hours = Column(Integer, nullable=False)
date_launched = Column(DateTime, default=datetime.utcnow, nullable=False)
date_ready = Column(DateTime, nullable=True)
date_expired = Column(DateTime, nullable=True)
date_deleted = Column(DateTime, nullable=True)
# Nullable FK
node_spec_id = Column(Integer, ForeignKey("node_spec.id"))
__tablename__ = "node"
def __repr__(self):
"""
Machine-readable representation of this object.
:return: Return a machine-readable string that exactly describes this object.
"""
dates_msg = ""
if self.state in [Node.State.LAUNCHED, Node.State.READY, Node.State.EXPIRED, Node.State.DELETED]:
dates_msg += " date_launched: {},".format(self.date_launched)
if self.state in [Node.State.READY, Node.State.EXPIRED, Node.State.DELETED]:
dates_msg += " date_ready: {},".format(self.date_ready)
if self.state in [Node.State.EXPIRED, Node.State.DELETED]:
dates_msg += " date_expired: {},".format(self.date_expired)
if self.state in [Node.State.DELETED]:
dates_msg += " date_deleted: {},".format(self.date_deleted)
msg = u"<Node(id: {}, cloud_provider: {}, region: {}, state: {}, node_type: {}, node_ip: {}, " \
u"ttl_hours: {},{} node_spec_id: {})>".\
format(self.id, self.cloud_provider, self.region, self.state, self.node_type, self.node_ip,
self.ttl_hours, dates_msg, self.node_spec_id)
return msg
def __init__(self, cloud_provider, region, node_type, node_ip, ttl_hours, node_spec_id):
"""
Construct a Node object
:param cloud_provider: Cloud Provider name (str)
:param region: Region name (str)
:param node_type: Node/VM type (str)
:param node_ip: Node IP address (str)
:param ttl_hours: Time to live in hours (int)
:param node_spec_id: FK (int) to the NodeSpec's id.
"""
self.cloud_provider = cloud_provider
self.region = region
self.state = self.State.LAUNCHED
self.node_type = node_type
self.node_ip = node_ip
self.ttl_hours = ttl_hours if ttl_hours >= 0 else 0
self.date_launched = datetime.utcnow()
# FK
self.node_spec_id = node_spec_id
@classmethod
def get_by_node_spec_id(cls, node_spec_id):
"""
Get a Node given the FK to its NodeSpec's id.
:param node_spec_id: FK (int) of the NodeSpec's id
:return: Return the singleton list of the Node object.
"""
node = session.query(Node).filter_by(node_spec_id=node_spec_id).all()
return node
@classmethod
def get_all(cls):
"""
Get all of the Node objects that exist.
:return: Return a list of Node objects, which could be an empty list.
"""
return session.query(Node).all()
@classmethod
def get_by_state(cls, state):
"""
Get all of the Node objects with the given state
:param state: State (str)
:return: Return a list of Node objects.
"""
nodes = session.query(Node).filter_by(state=state).all()
return nodes
@classmethod
def get_by_states(cls, states):
"""
Get all of the Node objects whose state is in the given list.
:param states: List of states (str)
:return: Return a list of Node objects.
"""
nodes = session.query(Node).filter(Node.state.in_(states)).all()
return nodes
@classmethod
def get_by_id(cls, id):
"""
Get a Node object given its id.
:param id: ID (int PK)
:return: Return the Node object if it exists, otherwise, None.
"""
return session.query(Node).filter_by(id=id).first()
@classmethod
def create_from_node_spec(cls, node_spec):
"""
Construct a Node object given a NodeSpec.
:param node_spec: Source information, which is the NodeSpec
:return: Return a Node object
"""
# The IP will be determined later
node = Node.create_if_not_exists(node_spec.cloud_provider, node_spec.region, node_spec.node_type, None, node_spec.ttl_hours, node_spec.id)
return node
@classmethod
def create_if_not_exists(cls, cloud_provider, region, node_type, node_ip, ttl_hours, node_spec_id):
"""
Create a Node with initial state.
:return: Return the Node object that was created
"""
node = Node(cloud_provider=cloud_provider, region=region, node_type=node_type,
node_ip=node_ip, ttl_hours=ttl_hours, node_spec_id=node_spec_id)
# Still need to call node.save() and session.commit()
return node
@classmethod
def get_all_ready_to_expire(self):
"""
Get a list of the Node objects that are ready to be expired. Their current state could be either
LAUNCHED or READY.
:return: Return a list of Node objects to expire.
"""
now = datetime.utcnow()
expired = []
launched = Node.get_by_state(Node.State.LAUNCHED)
for node in launched:
if Node.date_launched is not None and (node.date_launched + timedelta(hours=node.ttl_hours)) <= now:
expired.append(node)
ready = Node.get_by_state(Node.State.READY)
for node in ready:
if node.date_ready is not None and (node.date_ready + timedelta(hours=node.ttl_hours)) <= now:
expired.append(node)
return expired
def set_state(self, state):
"""
Change the state as long as it is allowed.
:param state: Desired state, which must be one of Node.State
"""
now = datetime.utcnow()
allowed_transitions = {
Node.State.LAUNCHED: {Node.State.READY, Node.State.EXPIRED},
Node.State.READY: {Node.State.EXPIRED},
Node.State.EXPIRED: {Node.State.DELETED},
Node.State.DELETED: {}
}
if state == self.state:
return
allowed_states = allowed_transitions[self.state]
if state in allowed_states:
self.state = state
if state == Node.State.READY:
self.date_ready = now
elif state == Node.State.EXPIRED:
self.date_expired = now
elif state == Node.State.DELETED:
self.date_deleted = now
else:
raise Exception("Cannot transition from state {} to {}".format(self.state, state))
def set_ttl_hours(self, ttl_hours):
"""
Change the TTL hours. This is typically done to either
* extend (increase from current value)
* expire (set to 0)
"""
self.ttl_hours = ttl_hours
class ClusterSpec(Base):
"""
Represents a Cluster Spec request.
"""
class State:
PENDING = "pending"
FINISHED = "finished"
DEFAULT_TTL_HOURS = 72
# No guarantee we can actually request that name
cluster_name = Column(String(128), nullable=True)
cloud_provider = Column(String(128), nullable=False)
region = Column(String(256), nullable=False)
state = Column(String(32), nullable=False)
# User that requested it
user = Column(String(64), nullable=False)
num_head_nodes = Column(Integer, nullable=False)
head_node_type = Column(String(256), nullable=False)
num_worker_nodes = Column(Integer, nullable=False)
worker_node_type = Column(String(256), nullable=False)
os_family = Column(String(128), nullable=True)
stack_version = Column(String(128), nullable=False)
cluster_type = Column(String(128), nullable=False)
jdk = Column(String(32), nullable=True)
storage = Column(String(1024), nullable=True)
services = Column(Text, nullable=True)
bootstrap_action = Column(Text, nullable=True)
is_hdfs_ha = Column(Boolean, nullable=False)
is_rm_ha = Column(Boolean, nullable=False)
is_ssl = Column(Boolean, nullable=False)
is_kerberized = Column(Boolean, nullable=False)
extra = Column(Text, nullable=True)
date_requested = Column(DateTime, default=datetime.utcnow, nullable=False)
ttl_hours = Column(Integer, default=DEFAULT_TTL_HOURS, nullable=False)
# Nullable FK
trial_request_id = Column(Integer, ForeignKey("trial_request.id"))
__tablename__ = "cluster_spec"
def __repr__(self):
"""
Machine-readable representation of this object.
:return: Return a machine-readable string that exactly describes this object.
"""
# TODO, add more stuff here
return u"<ClusterSpec(id: {}, cluster_name: {}, cloud_provider: {}, region: {}, state: {}, user: {}, trial_request_id: {})>". \
format(self.id, self.name, self.cloud_provider, self.region, self.state, self.user, self.trial_request_id)
def __init__(self, cluster_name, cloud_provider, region, user, num_head_nodes, head_node_type, num_worker_nodes, worker_node_type,
os_family, stack_version, cluster_type, jdk, storage, services, bootstrap_action,
is_hdfs_ha, is_rm_ha, is_ssl, is_kerberized, extra, ttl_hours, trial_request_id=None):
"""
Construct a ClusterSpec object
# TODO, populate these
:param extra: Extra information in JSON in a text/blob column.
:param ttl_hours: Time to live in hours (int)
:param trial_request_id: FK (int) to the corresponding TrialRequest object if one exists.
"""
self.cluster_name = cluster_name
self.cloud_provider = cloud_provider
self.region = region
self.state = self.State.PENDING
self.user = user
self.num_head_nodes = num_head_nodes
self.head_node_type = head_node_type
self.num_worker_nodes = num_worker_nodes
self.worker_node_type = worker_node_type
self.os_family = os_family
self.stack_version = stack_version
self.cluster_type = cluster_type
self.jdk = jdk
self.storage = storage
self.services = services
self.bootstrap_action = bootstrap_action
self.is_hdfs_ha = is_hdfs_ha
self.is_rm_ha = is_rm_ha
self.is_ssl = is_ssl
self.is_kerberized = is_kerberized
self.extra = extra
self.date_requested = datetime.utcnow()
self.ttl_hours = ttl_hours if ttl_hours >= 0 else 0
# FK may be None
self.trial_request_id = trial_request_id
@classmethod
def get_by_trial_request_id(cls, trial_request_id):
"""
Get the single ClusterSpec that was created from the trial request id
:param trial_request_id: Trial request id (int)
:return: Return a list of ClusterSpec objects (which should be a singleton list).
"""
spec = session.query(ClusterSpec).filter_by(trial_request_id=trial_request_id).all()
return spec
@classmethod
def get_all(cls):
"""
Get all of the ClusterSpec objects that exist.
:return: Return a list of ClusterSpec objects, which could be an empty list.
"""
return session.query(ClusterSpec).all()
@classmethod
def get_all_pending(cls):
"""
Get all ClusterSpec objects whose state is PENDING.
:return: Return a list of ClusterSpec objects, which could be an empty list.
"""
trials = session.query(ClusterSpec).filter_by(state=cls.State.PENDING).all()
return trials
@classmethod
def get_by_id(cls, id):
"""
Get a ClusterSpec object given its id.
:param id: ID (int PK)
:return: Return the ClusterSpec object if it exists, otherwise, None.
"""
return session.query(ClusterSpec).filter_by(id=id).first()
@classmethod
def create_if_not_exists(cls, cluster_name, cloud_provider, region, user, num_head_nodes, head_node_type,
num_worker_nodes, worker_node_type, os_family, stack_version, cluster_type,
jdk, storage, services, bootstrap_action, is_hdfs_ha, is_rm_ha, is_ssl, is_kerberized,
extra, ttl_hours, trial_request_id=None):
"""
Create a ClusterSpec with an initial state.
:return: Return the ClusterSpec object that was created
"""
spec = ClusterSpec(cluster_name=cluster_name, cloud_provider=cloud_provider, region=region, user=user, num_head_nodes=num_head_nodes,
head_node_type=head_node_type, num_worker_nodes=num_worker_nodes, worker_node_type=worker_node_type,
os_family=os_family, stack_version=stack_version, cluster_type=cluster_type, jdk=jdk,
storage=storage, services=services, bootstrap_action=bootstrap_action,
is_hdfs_ha=is_hdfs_ha, is_rm_ha=is_rm_ha, is_ssl=is_ssl, is_kerberized=is_kerberized,
extra=extra, ttl_hours=ttl_hours, trial_request_id=trial_request_id)
# Still need to call spec.save() and session.commit()
return spec
def get_cloud_provider(self):
return self.cloud_provider
def get_state(self):
return self.state
def get_date_requested(self):
return self.date_requested
def set_state(self, state):
"""
Change the state as long as it is allowed.
:param state: Desired state, which must be one of NodeSpec.State
"""
allowed_transitions = {
ClusterSpec.State.PENDING: {ClusterSpec.State.FINISHED, },
ClusterSpec.State.FINISHED: {}
}
if state == self.state:
return
allowed_states = allowed_transitions[self.state]
if state in allowed_states:
# Still need to call self.update() and session.commit()
self.state = state
else:
raise Exception("Cannot transition from state {} to {}".format(self.state, state))
class Cluster(Base):
"""
Represents a Cluster object to instantiate, monitor, delete.
"""
class State:
LAUNCHED = "launched"
READY = "ready"
EXPIRED = "expired"
DELETED = "deleted"
# The actual ID and Name assigned by the Cloud Provider, which over time may not be unique.
cluster_id = Column(String(256), nullable=True)
cluster_name = Column(String(256), nullable=True)
cloud_provider = Column(String(128), nullable=False)
region = Column(String(256), nullable=False)
state = Column(String(32), nullable=False)
config = Column(Text, nullable=True)
ttl_hours = Column(Integer, nullable=False)
date_launched = Column(DateTime, default=datetime.utcnow, nullable=False)
date_ready = Column(DateTime, nullable=True)
date_expired = Column(DateTime, nullable=True)
date_deleted = Column(DateTime, nullable=True)
# Nullable FK
cluster_spec_id = Column(Integer, ForeignKey("cluster_spec.id"))
__tablename__ = "cluster"
def __repr__(self):
"""
Machine-readable representation of this object.
:return: Return a machine-readable string that exactly describes this object.
"""
dates_msg = ""
if self.state in [Node.State.LAUNCHED, Node.State.READY, Node.State.EXPIRED, Node.State.DELETED]:
dates_msg += " date_launched: {},".format(self.date_launched)
if self.state in [Node.State.READY, Node.State.EXPIRED, Node.State.DELETED]:
dates_msg += " date_ready: {},".format(self.date_ready)
if self.state in [Node.State.EXPIRED, Node.State.DELETED]:
dates_msg += " date_expired: {},".format(self.date_expired)
if self.state in [Node.State.DELETED]:
dates_msg += " date_deleted: {},".format(self.date_deleted)
msg = u"<Cluster(id: {}, cluster_id: {}, cluster_name: {}, cloud_provider: {}, region: {}, state: {}, " \
u"ttl_hours: {},{} cluster_spec_id: {})>".\
format(self.id, self.cluster_id, self.cluster_name, self.cloud_provider, self.region, self.state,
self.ttl_hours, dates_msg, self.cluster_spec_id)
return msg
def __init__(self, cluster_id, cluster_name, cloud_provider, region, config, ttl_hours, cluster_spec_id):
"""
Construct a Cluster object
:param cluster_id: Optional Cluster ID (str)
:param cluster_name: Optional Cluster name (str)
:param cloud_provider: Cloud Provider name (str)
:param region: Region name (str)
:param config: Config metadata (str)
:param ttl_hours: Time to live in hours (int)
:param cluster_spec_id: FK (int) to the ClusterSpec's id.
"""
self.cluster_id = cluster_id
self.cluster_name = cluster_name
self.cloud_provider = cloud_provider
self.region = region
self.state = self.State.LAUNCHED
self.config = config
# Current number of hours to expire after date_launched.
# When expiring a resource, simply set it to 0
self.ttl_hours = ttl_hours if ttl_hours >= 0 else 0
self.date_launched = datetime.utcnow()
# FK
self.cluster_spec_id = cluster_spec_id
@classmethod
def get_by_cluster_spec_id(cls, cluster_spec_id):
"""
Get a Cluster given the FK to its ClusterSpec's id.
:param cluster_spec_id: FK (int) of the ClusterSpec's id
:return: Return the singleton list of the Cluster object.
"""
node = session.query(Cluster).filter_by(cluster_spec_id=cluster_spec_id).all()
return node
@classmethod
def get_all(cls):
"""
Get all of the Cluster objects that exist.
:return: Return a list of Cluster objects, which could be an empty list.
"""
return session.query(Cluster).all()
@classmethod
def get_by_state(cls, state):
"""
Get all of the Cluster objects with the given state
:param state: State (str)
:return: Return a list of Cluster objects.
"""
nodes = session.query(Cluster).filter_by(state=state).all()
return nodes
@classmethod
def get_by_states(cls, states):
"""
Get all of the Cluster objects whose state is in the given list.
:param states: List of states (str)
:return: Return a list of Cluster objects.
"""
nodes = session.query(Cluster).filter(Cluster.state.in_(states)).all()
return nodes
@classmethod
def get_by_id(cls, id):
"""
Get a Cluster object given its id.
:param id: ID (int PK)
:return: Return the Cluster object if it exists, otherwise, None.
"""
return session.query(Cluster).filter_by(id=id).first()
@classmethod
def create_from_cluster_spec(cls, cluster_spec):
"""
Construct a Cluster object given a ClusterSpec.
:param cluster_spec: Source information, which is the ClusterSpec
:return: Return a Cluster object
"""
# The Id/Name will be determined later
cluster = Cluster.create_if_not_exists(cluster_spec.cluster_name, cluster_spec.cloud_provider, cluster_spec.region,
cluster_spec.ttl_hours, cluster_spec.id)
return cluster
@classmethod
def create_if_not_exists(cls, cluster_name, cloud_provider, region, ttl_hours, cluster_spec_id):
"""
Create a Cluster with initial state.
:return: Return the Cluster object that was created
"""
# TODO, for now generate a random id
cluster_id = "cluster_" + ''.join(random.choices(string.ascii_letters + string.digits, k=16))
cluster = Cluster(cluster_id=cluster_id, cluster_name=cluster_name, cloud_provider=cloud_provider, region=region,
config=None, ttl_hours=ttl_hours, cluster_spec_id=cluster_spec_id)
# Still need to call node.save() and session.commit()
return cluster
@classmethod
def get_all_ready_to_expire(self):
"""
Get a list of the Cluster objects that are ready to be expired. Their current state could be either
LAUNCHED or READY.
:return: Return a list of Cluster objects to expire.
"""
now = datetime.utcnow()
expired = []
launched = Cluster.get_by_state(Cluster.State.LAUNCHED)
for cluster in launched:
if cluster.date_launched is not None and (cluster.date_launched + timedelta(hours=cluster.ttl_hours)) <= now:
expired.append(cluster)
ready = Cluster.get_by_state(Cluster.State.READY)
for cluster in ready:
if cluster.date_ready is not None and (cluster.date_ready + timedelta(hours=cluster.ttl_hours)) <= now:
expired.append(cluster)
return expired
def set_state(self, state):
"""
Change the state as long as it is allowed.
:param state: Desired state, which must be one of Cluster.State
"""
now = datetime.utcnow()
allowed_transitions = {
Cluster.State.LAUNCHED: {Cluster.State.READY, Cluster.State.EXPIRED},
Cluster.State.READY: {Cluster.State.EXPIRED},
Cluster.State.EXPIRED: {Cluster.State.DELETED},
Cluster.State.DELETED: {}
}
if state == self.state:
return
allowed_states = allowed_transitions[self.state]
if state in allowed_states:
self.state = state
if state == Cluster.State.READY:
self.date_ready = now
elif state == Cluster.State.EXPIRED:
self.date_expired = now
elif state == Cluster.State.DELETED:
self.date_deleted = now
else:
raise Exception("Cannot transition from state {} to {}".format(self.state, state))
def set_ttl_hours(self, ttl_hours):
"""
Change the TTL hours. This is typically done to either
* extend (increase from current value)
* expire (set to 0)
"""
self.ttl_hours = ttl_hours
| [
"alejandro@apache.org"
] | alejandro@apache.org |
8f60b6e2507dd0bdca21ab846f4ce4d9ce6205e5 | 82c05807fd24e85336643ca3920fc008291835c6 | /apps/alert/models.py | 1f0b5ee714323064138d77b8d16c8a709630b98e | [] | no_license | iyunw/server_alert | 61ae5e4c101b551049773754caa49310b68dac20 | 65d33f7537175cc9546a8b6f793a25b73f056ced | refs/heads/master | 2023-07-12T06:08:54.856230 | 2021-08-08T12:51:07 | 2021-08-08T12:51:07 | 390,762,507 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,193 | py | import os
from server_alert.settings import BASE_DIR
from django.db import models
def get_alert_bankend():
"""
获取后端发送支持的列表,使整个系统动态
:return:
"""
bankend = set()
for root, dirs, files in os.walk(os.path.join(BASE_DIR, "apps/utils/alert")):
for file in files:
if not str(file).endswith(".py"):
continue
if "__init__" in file:
continue
file_name = file.replace(".py", "")
bankend.add((file_name, file_name))
return bankend
def default_json():
return {}
class Status(models.IntegerChoices):
ENABLE = 0, "开启"
CLOSE = 1, "关闭"
DELETE = 9, "删除"
class GroupAlertServer(models.Model):
name = models.CharField(verbose_name="报警组名", max_length=128)
type = models.CharField(verbose_name="报警后端类型", choices=get_alert_bankend(), max_length=128)
status = models.IntegerField(verbose_name="状态", choices=Status.choices, default=Status.ENABLE)
create_datetime = models.DateTimeField(verbose_name="创建时间", auto_now_add=True)
update_datetime = models.DateTimeField(verbose_name="创建时间", auto_now=True)
config = models.JSONField(default=default_json, verbose_name="报警服务端配置")
create_user = models.ForeignKey("user.User", verbose_name="创建用户", on_delete=models.CASCADE)
def __str__(self):
return self.name
class Meta:
verbose_name = "报警服务器配置"
verbose_name_plural = verbose_name
ordering = ["-id",]
class SendHistory(models.Model):
create_datetime = models.DateTimeField(verbose_name="创建时间", auto_now_add=True)
user = models.ForeignKey("user.User", verbose_name="发送用户", on_delete=models.CASCADE)
request_data = models.JSONField(default=default_json, verbose_name="请求内容")
respones_data = models.JSONField(default=default_json, verbose_name="请求内容")
def __str__(self):
return self.user.cn_name
class Meta:
verbose_name = "发送历史内容"
verbose_name_plural = verbose_name
ordering = ["-id", ]
| [
"351937287@qq.com"
] | 351937287@qq.com |
1f2a223714d8ed37266c2d906d54ca3d841f623b | 28a8a9fd5f334c8c3363be50e7d963003fa2ca4f | /upload.py | e77367831e2bce57bebea9d5f0652b9a2e9b6564 | [] | no_license | shehasv/Medical-Image-Enhancement- | 4a0644ba0fd00902b217e912729162122772b9ca | 6841f390fdaff4c59d720759cf298d47aff4a208 | refs/heads/master | 2022-12-24T04:49:39.593173 | 2020-04-24T12:12:13 | 2020-04-24T12:12:13 | 258,500,592 | 0 | 2 | null | 2020-10-01T18:16:19 | 2020-04-24T12:02:31 | Python | UTF-8 | Python | false | false | 3,514 | py | from flask import *
app = Flask(__name__)
app = Flask(__name__)
app.secret_key = "secret key"
import os
#import magic
import urllib.request
from PIL import Image
from flask import Flask, flash, request, redirect, render_template
from werkzeug.utils import secure_filename
import os
import pandas as pd
import numpy as np
from PIL import Image
from PIL import ImageEnhance
from sklearn.cluster import KMeans
from joblib import dump
import sys
from PIL import Image,ImageFilter
ALLOWED_EXTENSIONS = set([ 'jpeg','jpg' ])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def upload_form():
return render_template('upload.html')
@app.route('/', methods=['POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No file selected for uploading')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
im = Image.open(file)
pixel_np = np.asarray(im)
# reshape array (remove rows and columns)
image_height = im.height
image_width = im.width
pixel_np = np.reshape(pixel_np, (image_height * image_width, 3))
flash('Image found with width: {}, height: {}'.format(image_width,image_height))
# run k-means clustering on the pixel data
num_of_centroids = 16 # a 4-bit image is represented by 2^4 colours
num_of_runs = 10 # number of times to run the k-means algorithm before determining the best centroids
max_iterations = 300 # number of iterations before k-means comes to an end for a single run
verbosity = 0 # show what's going on when the algorithm is running
# initiate a kmeans object
compressor = KMeans(n_clusters=num_of_centroids, n_init=num_of_runs,max_iter=max_iterations, verbose=verbosity)
flash('Runnign K-means')
# run k-means clustering
compressor.fit(pixel_np)
# save the fitted model
dump(compressor, "compressor.joblib")
# create an array replacing each pixel label with its corresponding cluster centroid
pixel_centroid = np.array([list(compressor.cluster_centers_[label]) for label in compressor.labels_])
# convert the array to an unsigned integer type
pixel_centroid = pixel_centroid.astype("uint8")
# reshape this array according to the height and width of our image
pixel_centroids_reshaped = np.reshape(pixel_centroid, (image_height, image_width, 3), "C")
# create the compressed image
compressed_im = Image.fromarray(pixel_centroids_reshaped)
#image enhancement starts
enh_bri = ImageEnhance.Brightness(compressed_im)
brightness = 1.0
image_brightened = enh_bri.enhance(brightness)
enh_col = ImageEnhance.Color(image_brightened)
color = 1.0
image_colored = enh_col.enhance(color)
enh_con = ImageEnhance.Contrast(image_colored)
contrast = 1.5
image_contrasted = enh_con.enhance(contrast)
enh_sha = ImageEnhance.Sharpness(image_contrasted)
sharpness = 1.5
image_sharped = enh_sha.enhance(sharpness)
flash('Image Processing Completed Successfully !')
image_sharped.save("/home/shehas/testing/out.jpg")
flash('File Downloaded Succesfully ')
return redirect('/')
else:
flash('Allowed file type is jpeg')
return redirect(request.url)
if __name__ == "__main__":
app.run()
| [
"noreply@github.com"
] | shehasv.noreply@github.com |
46b584d31b0b8bd3d8b9ed488a0ecc0fdc9a730d | e24da8bd3329b6764fc7fdba28b9c09f79926eff | /scATAC/rules/sc_atac_link_fragment.smk | dea06c9f208803c0ead5f1e5e0e9081cdac778b5 | [
"MIT"
] | permissive | crazyhottommy/pyflow-single-cell | f56a642e8798e23828aa69486a5cc2b47b290104 | 0ee9a0c1045ee3d1a46467e2fe630570298c41bd | refs/heads/master | 2023-03-15T10:40:03.262732 | 2020-11-24T00:41:40 | 2020-11-24T00:41:40 | 269,082,123 | 4 | 2 | MIT | 2021-03-05T19:21:42 | 2020-06-03T12:28:25 | Python | UTF-8 | Python | false | false | 453 | smk |
rule scatac_link_fragments:
input:
frag = lambda wildcards: FILES[wildcards.sample]
output:
frag_dedup = "Result/minimap2/{sample}/fragments_corrected_dedup_count.tsv",
fraggz = "Result/minimap2/{sample}/fragments_corrected_dedup_count.tsv.gz"
shell:
"""
# make sure it is sorted
gunzip -c {input.frag} | sort -k1,1 -k2,2n > {output.frag_dedup}
cat {output.frag_dedup} | bgzip > {output.fraggz}
tabix -p bed {output.fraggz}
"""
| [
"tangming2005@gmail.com"
] | tangming2005@gmail.com |
19496b155460372aa9099fd1c87da3ac74f7006a | c86aa739a506072ca476456794942f741410068f | /lstm_example/lstm-datascience-task/src/data/make_dataset.py | 3a9838594a3a642ca347a3c34dd95c6691ca3ccd | [] | no_license | ENate/ml_models | 23ecf46398d4a530217acca4ccd0ad91de1b0e78 | 169bccfc95e1705fa219c5b26cbb1e54d093cd4a | refs/heads/master | 2023-01-29T18:18:42.772029 | 2020-12-14T16:46:12 | 2020-12-14T16:46:12 | 287,022,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,852 | py | # -*- coding: utf-8 -*-
import click
import logging
import pandas as pd
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
"""
Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
:param input_filepath: Data source folder
:param output_filepath: Data destination folder
:return:
"""
# return processed data and save in the output files
in_data_y, y_output, in_data = make_data_set(input_filepath)
in_data_y.to_csv(output_filepath)
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
return in_data_y, y_output, in_data
def make_data_set(data_file_raw):
"""
:param data_file_raw: input original data set for pre-processing
:return: ds_without_activity: normalized training data set without output label.
ds_with_activity: normalized training containing fewer columns without activity column
output_label: activity of machine in different states
Call function with the format: python make_dataset.py /path/to/datasource/file /path/to/destination/file
"""
df_dataset = pd.read_csv(data_file_raw, sep=',')
# Drop both the 'Unnamed :0' and 'timestamp' columns from the original data set
ds_with_activity = df_dataset[df_dataset.columns.difference(['timestamp', 'Unnamed: 0'])]
# Drop both the 'Unnamed :0', 'timestamp' and 'activity' columns from the original data set
ds_without_activity = df_dataset[df_dataset.columns.difference(['timestamp', 'Unnamed: 0', 'activity'])]
# Assign the output label column containing the different machine states:
output_label = df_dataset['activity']
for col in ds_without_activity.columns:
# feats_without_activity[col] = feats_without_activity[col].replace(-200, np.nan) # check nans
# print(col, ':', feats_without_activity[col].isna().sum()/len(feats_without_activity))
if ds_without_activity[col][:int(len(ds_without_activity) * 0.8)].isna().sum() \
/ int(len(ds_without_activity) * 0.8) > 0.5: # at least 50% in train not nan
ds_without_activity.drop(col, axis=1, inplace=True)
else: # fill nans
ds_without_activity[col] = ds_without_activity[col].interpolate(method='linear', limit_direction='both')
# Normalize data column-wise
ds_without_activity /= ds_without_activity.max()
# save normalized features in the interim folder
ds_without_activity.to_csv('/home/nath/tasks/talpa-datascience-task/data/interim/'
'formatted_feats.csv', sep='\t', encoding='utf-8')
# print(ds_without_activity.shape)
# write data to a new .csv file in the ../processed folder
return ds_with_activity, ds_without_activity, output_label
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# define the raw data folder name
raw_data_file = 'data_case_study.csv' # data_case_study.csv'
interim_out_data_file = 'processed_data.csv' # write to interim folder
# x_train, y_train, x_train_no_y = make_data_set(raw_data_file)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automatically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
# x_train, y_train, x_train_no_y = main(raw_data_file, interim_out_data_file)
# print(x_train.shape)
| [
"diogenolaudah@gmail.com"
] | diogenolaudah@gmail.com |
27d9bcabf55ce6e54de1de19687dd63e11857f24 | 7d67aa17a1cef639c05bf3bb159f53ebe89c277c | /reporter/models.py | ed5b5699dc065fb8ba8c6a62c0ddb332cb03348e | [] | no_license | AnushreeDhar/Geodjango- | 08df36c6895629dd88edc9b489e2a57c098f009e | f1891a1670dc8f97cb8485dcb24a45e8d1ed5539 | refs/heads/master | 2020-03-27T06:02:16.286973 | 2018-08-25T08:35:25 | 2018-08-25T08:35:25 | 146,073,262 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py |
from django.db import models
from django.contrib.gis.db import models
from django.contrib.gis.geos import GEOSGeometry
# Create your models here.
class Incidences(models.Model):
name = models.CharField(max_length=200)
location = models.PointField(srid=4326, spatial_index=False)
def _unicode_(self):
return self.name
class Counties(models.Model):
counties = models.CharField(max_length=25)
codes = models.IntegerField()
cty_code = models.CharField(max_length=24)
dis = models.IntegerField()
geom = models.MultiPolygonField(null=True)
def _unicode_(self):
return self.counties
| [
"noreply@github.com"
] | AnushreeDhar.noreply@github.com |
61db1e21237d636d848987f0e5310e762d697737 | 21033e8ee7a76cce124438116001b11448fa2362 | /scripts/test_sms.py | f391712fd95b562815c0b2113bd41bfe47d6b417 | [] | no_license | koryd-bixly/nebri-gottovote | 68b610db3c49b36ea3320fe83562791f24016f6d | 974344cd97d6a06780a9a5c529a9fbcdb0fec6cb | refs/heads/master | 2021-01-10T07:56:15.593290 | 2016-01-18T17:52:59 | 2016-01-18T17:52:59 | 49,894,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | class test_sms(NebriOS):
# listens_to = ['korytest']
listens_to = ['auth_check']
def check(self):
return True
def action(self):
send_email("koryd@bixly.com","""Test message test_sms""") | [
"koryd@bixly.com"
] | koryd@bixly.com |
60ed092200dd2e61d66aedce4e6f34247952ea53 | dc2a3100207046a41ff18b2a088c39eb3912a512 | /crawler/spiders/games_3dm_shouyou_spider.py | c42aac152d57a95d8a306084609bc74442a913dd | [
"MIT"
] | permissive | ZhanZongHan/crawler | 3ec8bd33f7e5ca52022f0a84d6e214d174792a45 | 9f9a14ff622eff75bc7762d4afb32285cc36c486 | refs/heads/master | 2020-06-30T07:45:09.315727 | 2019-09-06T03:18:18 | 2019-09-06T03:18:18 | 200,769,607 | 0 | 0 | null | 2019-08-06T03:29:07 | 2019-08-06T03:29:07 | null | UTF-8 | Python | false | false | 3,360 | py | # -*- coding: utf-8 -*-
import scrapy
from crawler.items import ThreeDMShouYouGame
import requests
from crawler.tool import random_filename
class ThreedmconsolegameSpider(scrapy.Spider):
name = 'games_3dm_shouyou_spider'
allowed_domains = ['shouyou.3dmgame.com']
start_urls = ['https://shouyou.3dmgame.com/zt/']
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
'Upgrade-Insecure-Requests': '1',
}
base_url = 'https://shouyou.3dmgame.com'
custom_settings = {
'MONGODB_COLLECTION': 'games_3dm_shouyou',
'ITEM_PIPELINES': {
# 'crawler.pipelines.CrawlerPipeline': 300,
'crawler.pipelines.ImgDownloadPipeline': 300,
# 'scrapy_redis.pipelines.RedisPipeline': 400,
'crawler.pipelines.MongoPipeline': 400,
}
}
item_index = 'name'
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url, headers=self.headers, callback=self.parse)
def parse(self, response):
item = ThreeDMShouYouGame()
lists = response.xpath('//div[@class="warp_post"]/ul/li')
for i in lists:
item = self.deal_with_data(item=item, response=i)
if item:
yield item
next = response.xpath('//div[@class="pagewrap"]/ul/li[@class="next"]/a/@href').extract_first()
if next:
yield scrapy.Request(next, headers=self.headers, callback=self.parse)
def deal_with_data(self, item, response):
# 将数据规范化
try:
item['name'] = response.xpath('div[2]/a/text()').extract_first()
item['category'] = response.xpath('div[2]/p[1]/span[1]/text()').extract_first().split(':')[-1]
item['language'] = response.xpath('div[2]/p[1]/span[2]/text()').extract_first().split(':')[-1]
item['volume'] = response.xpath('div[2]/p[1]/span[3]/text()').extract_first().split(':')[-1]
# 判断 a1 为安卓, a2 为苹果
platform = response.xpath('div[2]/p[2]/span[1]/a/@class').extract()
if 'a1' in platform and 'a2' in platform:
item['platform'] = '安卓 苹果'
elif 'a1' in platform:
item['platform'] = '安卓'
elif 'a2' in platform:
item['platform'] = '苹果'
else:
item['platform'] = '未知'
publisher = response.xpath('div[2]/p[2]/span[2]/text()').extract_first().split(':')[-1]
if not publisher:
publisher = '未知'
item['publisher'] = '手游 ' + publisher
item['publish_time'] = response.xpath('div[2]/p[2]/span[3]/text()').extract_first().split(':')[-1]
item['description'] = response.xpath('div[2]/p[3]/text()').extract_first().replace('\n', '').strip()
item['score'] = response.xpath('div[2]/div/div[2]/text()').extract_first()
img_url = self.base_url + response.xpath('div[1]/a/img/@src').extract_first()
filename = random_filename(img_url)
item['img_url'] = img_url
item['img_path'] = '/media/' + filename
return item
except Exception as e:
print(e)
return None
| [
"347649372@qq.com"
] | 347649372@qq.com |
1aef27e3303ff6b762d2d8d660504253da0b4768 | 25e92e706e98831a884f63b41ade3e7f7face2d2 | /sim.py | 6f00ac2aba021cee0afc2bb3f24a4e49c15a16f5 | [] | no_license | CrueLu/freeliquid-ct | 87bdcf21230f71230c9af01ad973b1dd6c6dcd8d | 4e7dd1de5ad508158cd599a82ce64fe6a1221dc8 | refs/heads/master | 2023-03-24T16:41:05.930601 | 2021-03-21T07:36:08 | 2021-03-21T07:36:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,126 | py |
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
supply = 0
balance = {}
lastUpdateTime = 0
rewardRate = 2
rewardPerTokenStored = 0
userRewardPerTokenPaid = {}
rewards = {}
def rewardPerToken(curTime):
if supply == 0:
return rewardPerTokenStored
dt = curTime - lastUpdateTime
rptd = (dt * rewardRate) / supply
return rewardPerTokenStored + rptd
def updateReward(curTime):
global rewardPerTokenStored
global lastUpdateTime
rewardPerTokenStored = rewardPerToken(curTime)
lastUpdateTime = curTime
def stake(curTime, account, amnt):
updateRewardAccount(curTime, account)
balance[account] = balance.get(account, 0) + amnt
global supply
supply += amnt
def unstake(curTime, account, amnt):
updateRewardAccount(curTime, account)
balance[account] = balance.get(account, 0) - amnt
global supply
supply -= amnt
def earned(curTime, account):
rewardChange = rewardPerToken(curTime) - userRewardPerTokenPaid.get(account, 0)
return balance.get(account, 0) * rewardChange + rewards.get(account, 0)
def updateRewardAccount(curTime, account):
updateReward(curTime)
rewards[account] = earned(curTime, account)
userRewardPerTokenPaid[account] = rewardPerTokenStored
def loop(stopTime, upd, stakes, newaccounts={}):
curTime = 0
accounts = set()
hist = []
while curTime < stopTime:
naa = newaccounts.get(curTime)
if naa is not None:
for na in naa:
accounts.add(na[0])
stake(curTime, na[0], na[1])
e = {}
for a in accounts:
if upd.get(a, False):
if upd.get(a, False) == 1 or curTime % upd.get(a, False) == 0:
# updateRewardAccount(curTime, a)
stake(curTime, a, 0.0001)
s = stakes.get(a)
if s is not None:
(time, amt) = s
if time == curTime:
if amt > 0:
stake(curTime, a, amt)
else:
unstake(curTime, a, -amt)
e[str(a)] = earned(curTime, a)
if curTime == 200:
unstake(curTime, a, 100)
hist.append(e)
curTime += 1
return hist
if __name__ == "__main__":
panes = 1
fig, axs = plt.subplots(panes, 1, tight_layout=True, sharex=True, squeeze=True, figsize=(30, 10))
stopTime = 30
newaccounts = {0:[(1, 100), (2, 100), (3, 150), (4, 100)]}
newaccounts[10] = [(5, 100)]
newaccounts[15] = [(5, 800)]
newaccounts[20] = [(6, 1000)]
newaccounts[23] = [(2, 1000)]
df1 = pd.DataFrame(loop(stopTime, {}, {}))
stakes = {1:(3, 150), 2:(5, 250), 3:(6, -150)}
df2 = pd.DataFrame(loop(stopTime, {1:True, 2:2, 3:3}, stakes, newaccounts))
print(df2)
print(df2.sum(axis=1).diff())
for c in df2.columns:
df1["upd_"+str(c)] = df2[c]
df2.plot()
# df1.plot(ax=axs[0])
# df2.plot(ax=axs[1])
plt.savefig("res.png", dpi=300)
plt.close()
| [
"Freeliquid@Freeliquid.com"
] | Freeliquid@Freeliquid.com |
8c1ae4c043ded390bd27c6dba01954f60b505fe0 | 4cecdad726371e467f62b8685a9e28ea825db843 | /ch-02-linked-lists/02-kth-to-last.py | eab19496fcc481715cc017311ba3345cf21c1634 | [] | no_license | kazuki-shin/CCI | cd0e6041d1d359243c9d3ac8b7922cec58a40c56 | 42e760b5df8f4910aac83f2f7c0d0d6860d89e37 | refs/heads/master | 2020-06-04T18:38:51.889606 | 2019-07-03T02:16:29 | 2019-07-03T02:16:29 | 192,147,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | # Return the k^{th} to last node in a linked list.
import unittest
def kth_to_last(head, k):
node = head
cnt = 0
if node:
node.next
cnt += 1
node2 = head
if cnt > k:
node.next
cnt-=1
return node.data
class Node():
def __init__(self, data, next=None):
self.data, self.next = data, next
class Test(unittest.TestCase):
def test_kth_to_last(self):
head = Node(1,Node(2,Node(3,Node(4,Node(5,Node(6,Node(7)))))))
self.assertEqual(None, kth_to_last(head, 0));
self.assertEqual(7, kth_to_last(head, 1).data);
self.assertEqual(4, kth_to_last(head, 4).data);
self.assertEqual(2, kth_to_last(head, 6).data);
self.assertEqual(1, kth_to_last(head, 7).data);
self.assertEqual(None, kth_to_last(head, 8));
if __name__ == "__main__":
unittest.main()
| [
"kazukis2@illinois.edu"
] | kazukis2@illinois.edu |
03bbac5bad7cdcbe038bb42411a7d63a714bc77c | ebf71cb2d7aff1d3908b92601680bd6f1f833d9d | /app/Course/view.py | 30d669828a59fe11b764aa42719d251c167d8418 | [
"MIT"
] | permissive | eq99/sophiadb | 92c14764682e99d1b7bdea7f73dbcbe64562c3a6 | 74ac82bf32141ef502a93f005666e09ac0303d1a | refs/heads/master | 2023-05-01T10:24:05.454897 | 2021-01-26T12:38:15 | 2021-01-26T12:38:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,484 | py | from datetime import datetime
from flask_restful import Resource, reqparse, abort, fields, marshal
from flask import Blueprint, jsonify
from plugins import db, api
from app.Course.model import Course
from app.User.view import user_fields
from app.User.model import User
from app.File.model import File
from app.File.view import file_fields
course_bp = Blueprint('course', __name__, url_prefix='/api')
course_fields = {
'id': fields.Integer,
'name': fields.String,
'managers': fields.List(fields.Nested(user_fields)),
'repo_url': fields.String,
'description': fields.String,
'cover_url': fields.String,
'isbn': fields.String,
'created_time': fields.DateTime,
'files': fields.List(fields.Nested(file_fields)),
}
def create_repo(name):
return f'/repos/{name}'
class CoursesAPI(Resource):
def __init__(self):
self.parser = reqparse.RequestParser(bundle_errors=True)
self.parser.add_argument('name', required=True, help='course name is required')
self.parser.add_argument('user_id', required=True, help='user_id is required')
self.parser.add_argument('decription', required=False)
def get(self):
'''
uri: /api/courses
Used to get all courses in database
'''
return marshal(Course.query.all(), course_fields), 200
def post(self):
'''
uri: /api/courses
A new course is created here
'''
args = self.parser.parse_args()
name = args.name
user_id = args.user_id
user = User.query.filter(User.id == user_id).all()
files = File.query.filter(Course.name == name).all()
course = Course.query.filter(Course.name == name).all()
if len( course ) == 1:
# course already exists
return marshal(course, course_fields), 409
else:
# create a new course here
repo_url = create_repo(name)
course = Course(
name = name,
managers = user,
repo_url = repo_url,
description = '计算机组成原理与技术',
cover_url = '',
isbn = '123456',
created_time = datetime.now(),
files = files
)
db.session.add(course)
db.session.commit()
return marshal(course, course_fields), 201
class CourseAPI(Resource):
def get(self, course_id):
'''
uri: /api/courses/<course_id>
'''
return marshal(Course.query.filter(Course.id == course_id).all(), course_fields), 200
api.add_resource(CoursesAPI, '/courses')
api.add_resource(CourseAPI, '/course/<course_id>') | [
"zuiaiqiansen@163.com"
] | zuiaiqiansen@163.com |
0a1006fe9c1e5871bd56c2e058f713f979000beb | d169d6f963b9f3aab3362ee964b94eee2d769b5c | /HubUpdateChoServer.py | 04306916289992d78f1689b568407ceaa8bb060a | [] | no_license | thaduri-karunakar/UIMCHOInfraMonitoring | 02da6797dc16e0f6973bdfb579dfdd8436a5f432 | 1322651c32fe1b2ab18df31f70703f88fd7c516d | refs/heads/master | 2023-07-21T06:04:21.839472 | 2021-08-24T04:56:12 | 2021-08-24T04:56:12 | 299,653,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | from pypsexec.client import Client
import time
import sys # to raise specific exception using sys.exc_info()
start = time.time()
ip ='10.17.162.2'
try:
c = Client('10.17.162.2', 'administrator', 'interOP@123', encrypt='False')
c.connect()
c.create_service()
print('service created for following "{}".......\n\n'.format(ip))
hubCount = 15
for hub in range(hubCount,31):
callback = r"""C:\Progra~1\Nimsoft\bin\pu -u administrator -p interOP@123 /CHOSERVER1_domain/CHOSERVER1_hub/CHOSERVER1/automated_deployment_engine deploy_probe hub 9.30 /CHOSERVER1_domain/chosechub{}/chosechub{}""".format(hubCount,hubCount)
print(callback)
stdout, stderr, rc= c.run_executable("cmd.exe",arguments='''/c "{}"'''.format(callback))
stdout = str(stdout, 'utf-8')
stderr = str(stderr, 'utf-8')
print (stdout)
#if rc == 0:
#print('Call back executed successfully :\nCallBackName :\n{}\nOutPut :\n{}\n\n'.format(callback, stdout))
if rc != 0:
print('Call back failed with error :\nCallBackName :\n{}\nOutPut :\n{}\n\n{}\n\n'.format(callback, stderr,stdout))
print('=======================================================================')
print('Sleeping for 5 Seconds..........')
time.sleep(5)
hubCount+=1
except Exception as e:
print('Below exception occured .....\n')
print(e)
print()
finally:
c.remove_service()
c.disconnect()
print('service removed for following "{}"'.format(ip))
print ('Script has taken',(time.time()-start)/60, 'Minuts..') | [
"reddy.karnakar4@gmail.com"
] | reddy.karnakar4@gmail.com |
ea40c5db19399378ff47141803cfeb08151060b0 | 3bbe4abbb501b3dbfc24f9dbdb147c3376413b67 | /.venv/lib/python3.7/encodings/oem.py | 245f1ae53fad3f83ebb7583663d2fe17ae9c7cd3 | [] | no_license | SonGokussj4/kbl2nas | 38f02f2b0959f517294273604df9ce754ba8ec31 | fd06f118795987648f08db40a2b877907360245c | refs/heads/master | 2020-05-18T20:23:33.255556 | 2019-06-13T16:46:58 | 2019-06-13T16:46:58 | 184,627,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | /expSW/SOFTWARE/python371/lib/python3.7/encodings/oem.py | [
"jverner@evektor.cz"
] | jverner@evektor.cz |
edc23ba6a96b09d762519c7a0dabfd4f0528c643 | d382cdd35b700ae2d19b4847d80e07cddbf39b1e | /forum/views/readers.py | 3b47757acb2f7f8773adc873f9f05e271c885831 | [] | no_license | dal-bhaat/stack5 | 9509cda9b6b71d3729b95e903b53dbe30cbafccd | 04989e48128398d1cd22759f0a7b3283a209e0f5 | refs/heads/master | 2020-05-30T11:30:24.510678 | 2014-07-24T10:53:46 | 2014-07-24T10:53:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,660 | py | # encoding:utf-8
import datetime
import logging
from urllib import unquote
from forum import settings as django_settings
from forum.settings.privilege import *
from forum.settings.minrep import *
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse, Http404, HttpResponsePermanentRedirect
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.template import RequestContext
from django import template
from django.utils.html import *
from django.utils import simplejson
from django.utils.encoding import smart_unicode
from django.db.models import Q, Count
from django.utils.translation import ugettext as _
from django.template.defaultfilters import slugify
from django.core.urlresolvers import reverse
from django.utils.datastructures import SortedDict
from django.views.decorators.cache import cache_page
from django.utils.http import urlquote as django_urlquote
from django.template.defaultfilters import slugify
from django.utils.safestring import mark_safe
from forum.utils.html import sanitize_html, hyperlink
from forum.utils.diff import textDiff as htmldiff2
from forum.utils.diff2 import textDiff as htmldiff
from forum.utils.diff2 import tagsDiff as tagsdiff
from forum.utils import pagination
from forum.templatetags.extra_tags import tags_html
from forum.forms import *
from forum.models import *
from forum.forms import get_next_url
from forum.actions import QuestionViewAction,TagViewAction
from forum.http_responses import HttpResponseUnauthorized
from forum.feed import RssQuestionFeed, RssAnswerFeed
from forum.utils.pagination import generate_uri
from forum.templatetags.extra_filters import static_content
import decorators
from random import randint
class HottestQuestionsSort(pagination.SortBase):
def apply(self, questions):
return questions.annotate(new_child_count=Count('all_children')).filter(
all_children__added_at__gt=datetime.datetime.now() - datetime.timedelta(days=1)).order_by('-new_child_count')
class QuestionListPaginatorContext(pagination.PaginatorContext):
def __init__(self, id='QUESTIONS_LIST', prefix='', default_pagesize=15):
super (QuestionListPaginatorContext, self).__init__(id, sort_methods=(
('active', pagination.SimpleSort(_('active'), '-last_activity_at', _("Most <strong>recently updated</strong> questions"))),
('newest', pagination.SimpleSort(_('newest'), '-added_at', _("most <strong>recently asked</strong> questions"))),
('hottest', HottestQuestionsSort(_('hottest'), _("most <strong>active</strong> questions in the last 24 hours</strong>"))),
('mostvoted', pagination.SimpleSort(_('most voted'), '-score', _("most <strong>voted</strong> questions"))),
), pagesizes=(15, 20, 30), default_pagesize=default_pagesize, prefix=prefix)
class AnswerSort(pagination.SimpleSort):
def apply(self, answers):
if not settings.DISABLE_ACCEPTING_FEATURE:
return answers.order_by(*(['-marked'] + list(self._get_order_by())))
else:
return super(AnswerSort, self).apply(answers)
class AnswerPaginatorContext(pagination.PaginatorContext):
def __init__(self, id='ANSWER_LIST', prefix='', default_pagesize=10):
super (AnswerPaginatorContext, self).__init__(id, sort_methods=(
(_('oldest'), AnswerSort(_('oldest answers'), 'added_at', _("oldest answers will be shown first"))),
(_('newest'), AnswerSort(_('newest answers'), '-added_at', _("newest answers will be shown first"))),
(_('votes'), AnswerSort(_('popular answers'), ('-score', 'added_at'), _("most voted answers will be shown first"))),
), default_sort=_('votes'), pagesizes=(5, 10, 20), default_pagesize=default_pagesize, prefix=prefix)
class TagPaginatorContext(pagination.PaginatorContext):
def __init__(self):
super (TagPaginatorContext, self).__init__('TAG_LIST', sort_methods=(
('name', pagination.SimpleSort(_('by name'), 'name', _("sorted alphabetically"))),
('used', pagination.SimpleSort(_('by popularity'), '-used_count', _("sorted by frequency of tag use"))),
), default_sort=_('used'), pagesizes=(36, 36, 120))
def feed(request):
return RssQuestionFeed(
request,
Question.objects.filter_state(deleted=False).order_by('-last_activity_at'),
settings.APP_TITLE + _(' - ')+ _('latest questions'),
settings.APP_DESCRIPTION)(request)
@decorators.render('index.html')
def index(request):
paginator_context = QuestionListPaginatorContext()
paginator_context.base_path = '/'
known_tip = None
if len(settings.KNOWN_TIPS_PAGE_TEXT.strip()) > 0 :
known_tips = settings.KNOWN_TIPS_PAGE_TEXT.split("|")
known_tip = known_tips[randint(0,len(known_tips)-1)]
now = datetime.datetime.now()
#if request.path == '/' :
#questions = Question.objects.filter(last_activity_at__gte=datetime.datetime(now.year,now.month-3,now.day)) # first index page just now last three month's data , i think i will speed up
#else:
questions = Question.objects.all()
if request.user.is_authenticated():
good_tags = [t.tag.id for t in request.user.tag_selections.filter(reason="good")]
if request.user.only_interesting_tag and len(good_tags) != 0 :
questions = questions.filter(tags__id__in=good_tags)
else:
questions = questions.exclude(tags__id__in=[t.tag.id for t in request.user.tag_selections.filter(reason="bad")])
return question_list(request,
questions,
page_title=_("New Questions"),
base_path=reverse('questions'),
feed_url=reverse('latest_questions_feed'),
paginator_context=paginator_context,
is_home_page=True,
known_tip=known_tip.strip())
@decorators.render('questions.html', 'unanswered', _('unanswered'), weight=400)
def unanswered(request):
return question_list(request,
Question.objects.exclude(id__in=Question.objects.filter(children__marked=True).distinct()),
_('open questions without an accepted answer'),
None,
_("Unanswered Questions"))
@decorators.render('questions.html', 'questions', _('questions'), weight=0)
def questions(request):
return question_list(request, Question.objects.all(), _('questions'))
@decorators.render('questions.html')
def tag(request, tag):
try:
tag = Tag.active.get(name=unquote(tag))
except Tag.DoesNotExist:
raise Http404
# Getting the questions QuerySet
questions = Question.objects.filter(tags__id=tag.id)
if request.method == "GET":
user = request.GET.get('user', None)
if user is not None:
try:
questions = questions.filter(author=User.objects.get(username=user))
except User.DoesNotExist:
raise Http404
return question_list(request,
questions,
mark_safe(_('questions tagged <span class="tag">%(tag)s</span>') % {'tag': tag}),
None,
mark_safe(_('Questions Tagged With %(tag)s') % {'tag': tag}),
False,tag=tag)
@decorators.render('questions.html', 'questions', tabbed=False)
def user_questions(request, mode, user, slug):
user = get_object_or_404(User, id=user)
if mode == _('asked-by'):
questions = Question.objects.filter(author=user)
description = _("Questions asked by %s")
elif mode == _('answered-by'):
questions = Question.objects.filter(children__author=user, children__node_type='answer').distinct()
description = _("Questions answered by %s")
elif mode == _('subscribed-by'):
if not (request.user.is_superuser or request.user == user):
return HttpResponseUnauthorized(request)
questions = user.subscriptions
if request.user == user:
description = _("Questions you subscribed %s")
else:
description = _("Questions subscribed by %s")
else:
raise Http404
return question_list(request, questions,
mark_safe(description % hyperlink(user.get_profile_url(), user.username)),
page_title=description % user.username)
def allsite(request):
_allsite=[
#{"name":_("qigu365"),"id":"www.qigu365.com","FaviconUrl":"http://www.qigu365.com/m/default/media/images/favicon.ico","Description":_("qigu365 is a site for ask question about stock")},
{"name":_("Stack Enqueue"),"id":"www.stackenqueue.com","FaviconUrl":"http://www.stackenqueue.com/m/default/media/images/favicon.ico","Description":_("stackenqueue is a site for ask question about programe")},
{"name":_("Seminar Math"),"id":"www.seminarmath.com","FaviconUrl":"http://www.seminarmath.com/m/default/media/images/favicon.ico","Description":_("seminarmath is a site for ask question about math")}
]
return HttpResponse(simplejson.dumps(_allsite), mimetype="application/json")
def hotquestion(request):
tmpqs = Question.objects.filter(score__gte=1).filter(extra_count__gte=100).exclude(state_string__contains="deleted").order_by("-score");
ques = []
for tq in tmpqs:
q = {}
q["SiteId"] ="http://www.stackenqueue.com"
q["DisplayScore"] = tq.score
q["Id"] = tq.id
q["Title"] = tq.title
ques.append(q)
return HttpResponse(simplejson.dumps(ques), mimetype="application/json")
def inbox(request):
if not request.user.is_authenticated():
return HttpResponse(simplejson.dumps("nlogin"), mimetype="application/json")
actions = Action.objects.filter(action_type__in=("answer","comment")).filter(node__parent__author=request.user).order_by('-action_date')
# actions = Action.objects.filter(action_type__in=("answer","comment"))
res = []
for action in actions[:50]:
item = {}
item["Url"]=action.node.get_absolute_url()
item["Type"]=_(action.action_type)
item["FaviconUrl"]= settings.APP_URL + "/m/default/media/images/favicon.ico"
item["SiteUrl"]= settings.APP_URL
item["Count"]=0
t = action.action_date
time_z = "%s %s at %s:%s" % (t.month,t.day,t.hour,t.minute )
item["CreationDate"]=time_z
if action.node.added_at>request.user.last_login :
item["IsNew"] = True
else:
item["IsNew"] = False
if action.action_type == "answer":
item["Title"]=action.node.parent.title
item["Summary"]=action.node.summary[:20]+"..."
elif action.action_type == "comment":
node = action.node.parent
if node.get_type() == "question":
item["Title"]=action.node.parent.title
else:
item["Title"]=action.node.parent.summary[:20]
item["Summary"]=action.node.summary[:20]+"..."
res.append(item)
return HttpResponse(simplejson.dumps(res), mimetype="application/json")
def question_list(request, initial,
list_description=_('questions'),
base_path=None,
page_title=_("All Questions"),
allowIgnoreTags=True,
feed_url=None,
paginator_context=None,
is_home_page=False,
tag = None,
known_tip = None):
questions = initial.filter_state(deleted=False)
if request.user.is_authenticated() and allowIgnoreTags:
questions = questions.filter(~Q(tags__id__in = request.user.marked_tags.filter(user_selections__reason = 'bad')))
if page_title is None:
page_title = _("Questions")
if request.GET.get('type', None) == 'rss':
questions = questions.order_by('-added_at')
return RssQuestionFeed(request, questions, page_title, list_description)(request)
keywords = ""
if request.GET.get("q"):
keywords = request.GET.get("q").strip()
#answer_count = Answer.objects.filter_state(deleted=False).filter(parent__in=questions).count()
#answer_description = _("answers")
if not feed_url:
req_params = "&".join(generate_uri(request.GET, (_('page'), _('pagesize'), _('sort'))))
if req_params:
req_params = '&' + req_params
feed_url = mark_safe(escape(request.path + "?type=rss" + req_params))
return pagination.paginated(request, ('questions', paginator_context or QuestionListPaginatorContext()), {
"questions" : questions.distinct(),
"questions_count" : len(questions),
"keywords" : keywords,
"list_description": list_description,
"base_path" : base_path,
"page_title" : page_title,
"tab" : "questions",
'feed_url': feed_url,
'is_home_page' : is_home_page,
'tag' : tag,
'known_tip' : known_tip,
})
def search(request):
if request.method == "GET" and "q" in request.GET:
keywords = request.GET.get("q")
search_type = request.GET.get("t")
if not keywords:
return HttpResponseRedirect(reverse(index))
if search_type == 'tag':
return HttpResponseRedirect(reverse('tags') + '?q=%s' % urlquote(keywords.strip()))
elif search_type == "user":
return HttpResponseRedirect(reverse('users') + '?q=%s' % urlquote(keywords.strip()))
else:
return question_search(request, keywords)
else:
return render_to_response("search.html", context_instance=RequestContext(request))
@decorators.render('questions.html')
def question_search(request, keywords):
#can_rank, initial = Question.objects.search(keywords)
can_rank=False
initial = Question.objects.filter(title__icontains=keywords) | Question.objects.filter(body__icontains=keywords)
if can_rank:
paginator_context = QuestionListPaginatorContext()
paginator_context.sort_methods[_('ranking')] = pagination.SimpleSort(_('relevance'), '-ranking', _("most relevant questions"))
paginator_context.force_sort = _('ranking')
else:
paginator_context = None
feed_url = mark_safe(escape(request.path + "?type=rss&q=" + keywords))
return question_list(request, initial,
#_("questions matching '%(keywords)s'") % {'keywords': keywords},
_("questions"),
None,
_("questions matching '%(keywords)s'") % {'keywords': keywords},
paginator_context=paginator_context,
feed_url=feed_url)
@decorators.render('tags.html', 'tags', _('tags'), weight=100)
def tags(request):
stag = ""
tags = Tag.active.all()
if request.method == "GET":
stag = request.GET.get("q", "").strip()
if stag:
tags = tags.filter(name__icontains=stag)
return pagination.paginated(request, ('tags', TagPaginatorContext()), {
"tags" : tags,
"stag" : stag,
"keywords" : stag
})
def update_question_view_times(request, question):
last_seen_in_question = request.session.get('last_seen_in_question', {})
last_seen = last_seen_in_question.get(question.id, None)
if (not last_seen) or (last_seen < question.last_activity_at):
QuestionViewAction(question, request.user, ip=request.META['REMOTE_ADDR']).save()
last_seen_in_question[question.id] = datetime.datetime.now()
request.session['last_seen_in_question'] = last_seen_in_question
def match_question_slug(id, slug):
slug_words = slug.split('-')
qs = Question.objects.filter(title__istartswith=slug_words[0])
for q in qs:
if slug == urlquote(slugify(q.title)):
return q
return None
def answer_redirect(request, answer):
pc = AnswerPaginatorContext()
sort = pc.sort(request)
if sort == _('oldest'):
filter = Q(added_at__lt=answer.added_at)
elif sort == _('newest'):
filter = Q(added_at__gt=answer.added_at)
elif sort == _('votes'):
filter = Q(score__gt=answer.score) | Q(score=answer.score, added_at__lt=answer.added_at)
else:
raise Http404()
count = answer.question.answers.filter(Q(marked=True) | filter).exclude(state_string="(deleted)").count()
pagesize = pc.pagesize(request)
page = count / pagesize
if count % pagesize:
page += 1
if page == 0:
page = 1
return HttpResponsePermanentRedirect("%s?%s=%s#%s" % (
answer.question.get_absolute_url(), _('page'), page, answer.id))
def tag_subscriber_info(request, name):
tag = get_object_or_404(Tag, name=name)
pt = MarkedTag.objects.filter(tag__name=tag).filter(subscribed=True)#todo,add subscribed=True
subscribed_count = len(pt)
subscribed_tags = []
if request.user.is_authenticated():
subscribed_tags = pt.filter(user=request.user).values_list('tag__name', flat=True)
return render_to_response('tag_subscriber_info.html', {
'subscribed_tags':subscribed_tags,
'subscribed_count' :subscribed_count,
'tag': tag,
}, context_instance=RequestContext(request))
def _diff_tag_post(revision,last_revision,use_default=False,render_mode=False):
about_diff = None
detail_diff = None
descs = []
if use_default:
about_diff = revision.about
detail_diff = revision.detail
if last_revision is not None:
if revision.about != last_revision.about:
about_diff = mark_safe(htmldiff(revision.about,last_revision.about ))
descs.append(_("edited about"))
if revision.detail != last_revision.detail:
if not render_mode:
detail_diff = mark_safe(htmldiff(revision.detail,last_revision.detail ))
else:
revision_detail = static_content(revision.detail,"markdown")
last_revision_detail = static_content(last_revision.detail,"markdown")
detail_diff = htmldiff(revision_detail,last_revision_detail,render_mode=True )
descs.append(_("edited detail"))
return (about_diff,detail_diff,",".join(descs))
def _diff_post(revision,last_revision,use_default=False,render_mode=False,is_answer=False):
title_diff = None
body_diff = None
tags_diff = None
descs = []
if use_default:
title_diff = revision.title
body_diff = revision.html
tags_diff = mark_safe(tags_html(revision.tagname_list()))
if last_revision is not None:
if not is_answer and last_revision.title != revision.title:
title_diff = mark_safe(htmldiff(revision.title,last_revision.title ))
descs.append( _("edited title"))
if revision.html != last_revision.html:
if not render_mode:#markdown diff
body_diff = mark_safe(htmldiff(revision.body,last_revision.body ))
else:#render html diff
revision_html = static_content(revision.html,"markdown")
last_revision_html = static_content(last_revision.html,"markdown")
body_diff = htmldiff(revision_html,last_revision_html,render_mode=True )
descs.append(_("edited body"))
current_tags = " ".join(revision.tagname_list())
last_tags = " ".join(last_revision.tagname_list())
if last_tags != current_tags:
tags_diff = tagsdiff(current_tags,last_tags)
tags_diff = mark_safe(tags_html(tags_diff))
descs.append(_("edited tags"))
return (title_diff,body_diff,tags_diff,",".join(descs))
def post_body(request,id):
action = get_object_or_404(Action, id=id)
post = get_object_or_404(Node, id=action.node.id).leaf
if action.action_type in [ "revise" , "suggest" ]:
revisions = list(post.revisions.order_by('revised_at'))
revision = None
last_revision = None
for i, revise in enumerate(revisions):
if revise.revised_at.ctime() == action.at.ctime():
revision = revise
break
else:
last_revision = revise
(title_diff,body_diff,tags_diff,desc) = _diff_post(revision,last_revision,is_answer=(type(post)==Answer))
return render_to_response('node/revision.html', {
'title': title_diff,
'html': body_diff,
'tags': tags_diff,
}, context_instance=RequestContext(request))
return render_to_response('node/post.html', {
'post': post,
'action':action
}, context_instance=RequestContext(request))
def user_flair_html(request,id):
user = get_object_or_404(User, id=id)
return render_to_response('user_flair.html',{
'user':user,
},context_instance=RequestContext(request))
def user_flair(request,name,id):
user = get_object_or_404(User, id=id)
return render_to_response('flair.html',{
'user':user,
"can_view_private": (user == request.user) or request.user.is_superuser,
'view_user':user,
},context_instance=RequestContext(request))
def user_info(request, id):
user = get_object_or_404(User, id=id)
return render_to_response('user_info.html', {
'user': user,
}, context_instance=RequestContext(request))
def user_day_rep(request,id,year,month,day):
user = get_object_or_404(User, id=id)
reps = user.reputes.filter(Q(date__year=year)&Q(date__month=month)&Q(date__day=day)).order_by('-date')
return render_to_response('users/user_day_rep.html', {
'reps': reps,
}, context_instance=RequestContext(request))
def tag_info(request, id):
tag = get_object_or_404(Tag, id=id)
editors = len(tag.revisions.values('author_id').distinct())
hot_questions = Question.objects.filter(tags__id=tag.id)[:10]
top_answerers_ids = Answer.objects.filter(parent__tags__id=tag.id).values('author').annotate(Count('author')).order_by('-author__count')[:5]
top_answerers = []
for a in top_answerers_ids:
top_answerers.append(User.objects.get(id=a["author"]))
#update tag view times
last_seen_in_tag = request.session.get('last_seen_in_tag', {})
last_seen = last_seen_in_tag.get(tag.id, None)
if (not last_seen) :
TagViewAction(tag, ip=request.META['REMOTE_ADDR']).save()
last_seen_in_tag[tag.id] = datetime.datetime.now()
request.session['last_seen_in_tag'] = last_seen_in_tag
return render_to_response('tag_info.html', {
'tag': tag,
'editors':editors,
'hot_questions':hot_questions,
'top_answerers':top_answerers,
}, context_instance=RequestContext(request))
@decorators.render("question.html", 'questions')
def question(request, id, slug='', answer=None):
try:
question = Question.objects.get(id=id)
question_headline = question.headline
question_body = question.html
tags = question.tags.all()
if question.pendding_suggestion and question.pendding_suggestion.author == request.user:
question_body = static_content(question.pendding_suggestion.body,"markdown")
question_headline = question.pendding_suggestion.title
tags = list(Tag.objects.filter(name__in=question.pendding_suggestion.tagname_list()))
except:
if slug:
question = match_question_slug(id, slug)
if question is not None:
return HttpResponseRedirect(question.get_absolute_url())
raise Http404()
if question.nis.deleted and not request.user.can_view_deleted_post(question):
raise Http404
if request.GET.get('type', None) == 'rss':
return RssAnswerFeed(request, question, include_comments=request.GET.get('comments', None) == 'yes')(request)
if answer:
answer = get_object_or_404(Answer, id=answer)
if (question.nis.deleted and not request.user.can_view_deleted_post(question)) or answer.question != question:
raise Http404
if answer.marked:
return HttpResponsePermanentRedirect(question.get_absolute_url())
return answer_redirect(request, answer)
if settings.FORCE_SINGLE_URL and (slug != slugify(question.title)):
return HttpResponsePermanentRedirect(question.get_absolute_url())
if request.POST:
answer_form = AnswerForm(request.POST, user=request.user)
else:
answer_form = AnswerForm(user=request.user)
answers = request.user.get_visible_answers(question)
update_question_view_times(request, question)
if request.user.is_authenticated():
try:
subscription = QuestionSubscription.objects.get(question=question, user=request.user)
except:
subscription = False
else:
subscription = False
return pagination.paginated(request, ('answers', AnswerPaginatorContext()), {
"question" : question,
"question_headline" : question_headline,
"question_body" : question_body,
"tags" : tags,
"answer" : answer_form,
"answers" : answers,
"similar_questions" : question.get_related_questions(),
"subscription": subscription,
})
@decorators.render("privileges.html", 'privileges')
def privileges(request,slug=''):
privilege_list = []
slug = slug.replace("-"," ")
select_priv = None
if request.user.is_authenticated():
current_value = request.user.reputation
else:
current_value = 0
for priv in PRIVILEGE_SET:
if priv is ALL_PRIVILEGE_PAGE:
continue
p = {}
p["name"] = priv.field_context["label"]
p["href"] = p["name"].replace(" ","-")
p["value"] = priv.field_context["reputation"].value
proc = 100
if current_value < p["value"]:
proc = ((current_value+0.0)*100//p["value"])
p["proc"] = proc
p["here"] = False
if slug == p["name"]:
p["here"] = True
select_priv = priv
privilege_list.append(p)
privilege_list = sorted(privilege_list,key = lambda priv:priv["value"],reverse=True)
title = ""
desc = ""
if select_priv:
title = select_priv.field_context["label"]
current_value = select_priv.field_context["reputation"].value
desc = select_priv.value
else:
title = ALL_PRIVILEGE_PAGE.field_context["label"]
desc = ALL_PRIVILEGE_PAGE.value
return render_to_response('privileges.html', {
"privileges":privilege_list,
"select_priv":select_priv,
"current_value":current_value,
"title":title,
"desc":desc,
}, context_instance=RequestContext(request));
REVISION_TEMPLATE = template.loader.get_template('node/revision.html')
def tag_revisions(request,id):
post = get_object_or_404(Tag, id=id)
revisions = list(post.revisions.exclude(suggest_status="pendding").order_by('revised_at'))
rev_ctx = []
last_revision = None
about_diff,body_diff = (None,None)
edit_desc = None
for i, revision in enumerate(revisions):
if i > 0 :
(about_diff,body_diff,edit_desc) = _diff_tag_post(revision,last_revision,use_default=True)
else:
(about_diff,body_diff) = (revision.about,revision.detail)
rev_ctx.append(dict(inst=revision))
rev_ctx[i]['title'] = about_diff
rev_ctx[i]['html'] = body_diff
if len(revision.summary) == 0:
rev_ctx[i]['summary'] = edit_desc
else:
rev_ctx[i]['summary'] = revision.summary
last_revision = revision
rev_ctx.reverse()
return render_to_response('revisions.html', {
'post': post,
'revisions': rev_ctx,
}, context_instance=RequestContext(request))
def revisions(request, id ):
post = get_object_or_404(Node, id=id).leaf
revisions = list(post.revisions.exclude(suggest_status="pendding").order_by('revised_at'))
rev_ctx = []
last_revision = None
title_diff,body_diff,tags_diff = (None,None,None)
edit_desc = None
for i, revision in enumerate(revisions):
if i > 0 :
(title_diff,body_diff,tags_diff,edit_desc) = _diff_post(revision,last_revision,use_default=True)
else:
(title_diff,body_diff,tags_diff) = (revision.title,revision.html,mark_safe(tags_html(revision.tagname_list())))
rev_ctx.append(dict(inst=revision))
rev_ctx[i]['title'] = title_diff
rev_ctx[i]['html'] = body_diff
rev_ctx[i]['tags'] = tags_diff
if len(revision.summary) == 0:
rev_ctx[i]['summary'] = edit_desc
else:
rev_ctx[i]['summary'] = revision.summary
last_revision = revision
rev_ctx.reverse()
return render_to_response('revisions.html', {
'post': post,
'revisions': rev_ctx,
'is_post':True,
}, context_instance=RequestContext(request))
def suggested_edits_tag(request,id):
tag = get_object_or_404(Tag,id=id)
suggestion = tag.pendding_suggestion
if suggestion is None:
return HttpResponse(_("suggestion has been handled by others!"), status=404,content_type="text/plain")
(about_diff,render_detail_diff,edit_desc) = _diff_tag_post(suggestion,tag.active_revision,use_default=True,render_mode=True)
(t,markdown_detail_diff,t) = _diff_tag_post(suggestion,tag.active_revision,use_default=True)
detail_text_body = tag.active_revision.detail.replace("\n", "<br>").replace(" "," ")
if len(suggestion.summary) == 0:
summary = edit_desc
else:
summary = suggestion.summary
post_type = type(tag).__name__.lower()
return render_to_response('suggest_check.html', {
'post': tag,
'title_diff':about_diff,
'render_body_diff':render_detail_diff,
'post_text_body':detail_text_body,
'markdown_body_diff':markdown_detail_diff,
'summary':summary,
'post_type':post_type,
}, context_instance=RequestContext(request))
def suggested_edits(request,id,isTag=False):
post = get_object_or_404(Node, id=id).leaf
suggestion = post.pendding_suggestion
if suggestion is None:
return HttpResponse(_("suggestion has been handled by others!"), status=404,content_type="text/plain")
(title_diff,render_body_diff,tags_diff,edit_desc) = _diff_post(suggestion,post.active_revision,use_default=True,render_mode=True)
(t,markdown_body_diff,t,t) = _diff_post(suggestion,post.active_revision,use_default=True)
post_text_body = post.active_revision.body.replace("\n", "<br>").replace(" "," ")
if len(suggestion.summary) == 0:
summary = edit_desc
else:
summary = suggestion.summary
post_type = type(post).__name__.lower()
return render_to_response('suggest_check.html', {
'post':post,
'title_diff':title_diff,
'render_body_diff':render_body_diff,
'post_text_body':post_text_body,
'markdown_body_diff':markdown_body_diff,
'tags_diff':tags_diff,
'summary':summary,
'post_type':post_type,
}, context_instance=RequestContext(request))
| [
"pappudada@mailinator.com"
] | pappudada@mailinator.com |
5124df50c0b04b5c2156d4b43959f9d323071d3b | 26b66f2d11b28bc5f859021e011d3b5d1e1ed8ee | /src/py-ds-local/nightjar_ds_local/__init__.py | 1c88e778293d97f2c84cb7656012763674fc1020 | [
"MIT"
] | permissive | groboclown/nightjar-mesh | 8f12c8b90a0b4dd5b6f871123e2d3d0c89001db2 | 3655307b4a0ad00a0f18db835b3a0d04cb8e9615 | refs/heads/master | 2022-12-13T13:04:02.096054 | 2020-08-12T23:30:30 | 2020-08-12T23:30:30 | 207,360,091 | 3 | 1 | MIT | 2022-12-13T11:13:30 | 2019-09-09T16:59:02 | Python | UTF-8 | Python | false | false | 529 | py |
"""
Data Store for simple, flat files, stored locally.
This stores one copy of the templates and configurations. These are
just JSON formatted files.
The template storage file is located in the environment variable `DM_LOCAL__TEMPLATE_FILE`,
and if that is not given, then the default `/etc/data-store/templates.json` is used.
The configuration storage file is located in the environment variable
`DM_LOCAL__CONFIGURATION_FILE`, and if that is not given, then the default
`/etc/data-store/configurations.json` is used.
"""
| [
"matt@groboclown.net"
] | matt@groboclown.net |
0c8d03fe7d951c70eb7461057696b30e15622c45 | 32045e1aea61551badbe3b54f7de206c51feb8c3 | /mysite2/settings.py | a049b09e42a01589da9368ab5c2de8cea8ab50eb | [] | no_license | jhwan04/my-first-blog | e79db0dd7fbd582b274bb200ef0cff7faa4fb229 | 95e78cdbbd0ec47501ab59eddb5369707b678029 | refs/heads/master | 2020-04-10T15:56:58.810805 | 2018-12-10T06:26:10 | 2018-12-10T06:26:10 | 161,127,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,413 | py | """
Django settings for mysite2 project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'exqdius8wq@xnx8vp=5%*s8ry_0fjb*l@mg=o_(2zb@6u--n4%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bookmark',
'blog',
'tagging',
'disqus',
'django.contrib.sites',
]
DISQUS_WEBSITE_SHORTNAME = 'kang-disqus-com-1'
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')], # 수정
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') | [
"jhwan04@naver.com"
] | jhwan04@naver.com |
7bae55f5cd68ee57c4d09fa42a12ca47aa791d39 | c97ea13036c0dfdd97c7e4c0f286933935222754 | /playground/pyramid_restapi/hello_world.py | 3df201d59f1a716161a18f4c2cc273bfc3e13401 | [] | no_license | marionleborgne/angular_webapp | 2fe45204a865554d244450ecd7813b0ebd1ebed1 | 7d3c6f0f845d4389cda4cca91c4aef923994d18b | refs/heads/master | 2021-01-22T12:07:40.036375 | 2015-07-11T20:02:55 | 2015-07-11T20:02:55 | 19,565,273 | 0 | 1 | null | 2019-09-04T21:57:47 | 2014-05-08T07:56:03 | HTML | UTF-8 | Python | false | false | 1,097 | py | #!python
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from pyramid.response import Response
def hello_world(request):
return Response('Hello %(name)s!' % request.matchdict)
class TestServer(object):
"""Pyramid Test"""
global server
global config
global serverAdress
global serverPort
def __init__(self, address, port):
self.serverAdress = address
self.serverPort = port
self.config = Configurator()
def configure(self):
self.config.add_route('hello', '/hello/{name}')
self.config.add_view(hello_world, route_name='hello')
app = self.config.make_wsgi_app()
self.server = make_server(self.serverAdress, self.serverPort, app)
def run(self):
self.server.serve_forever()
if __name__ == '__main__':
port = 9090
address = 'localhost'
server = TestServer(address, port)
server.configure()
server.run()
| [
"marion@lyatiss.com"
] | marion@lyatiss.com |
fccc8567a3538d0dfa57c8bf90ef76e56e6eea2c | e846fe707fdd9d3ef645d8fd3b404eb8a16fc395 | /splay-tree.py | 94b4ac32313cc6a103f74f8a89e6b05b0b8664e3 | [] | no_license | roma646/home_tasks | f156fc7a92cd7237ce1dabcc0ca032382d1fe17b | 297cf8a16fa41abafb0c70e764af2112fa77aa4b | refs/heads/master | 2022-12-26T16:29:36.327331 | 2020-10-05T12:22:49 | 2020-10-05T12:22:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | from Node import Node
from splay import *
import copy
def F(x, s):
return (x + s) % 1000000001
def plus(i, tree):
if find(tree, i).key == i:
return tree
else:
tree = add(tree, Node(i))
return tree
def minos(i, tree):
if find(tree, i).key != i:
return
else:
tree = remove(find(tree, i))
return tree
def search(i, tree):
if find(tree, i).key == i:
return 'Found'
else:
return 'Not found'
def sum_from_l_r(f_l, f_r, tree):
tree_copy = copy.deepcopy(tree)
tree_1, tree_2 = split(tree, f_l)
tree_1_1, tree_1_2 = split(tree_2, f_r + 0.000001)
return sum_all(tree_1_1), tree_copy
tree, ss = None, 0
res = []
n = int(input())
for i in range(n):
s = input()
if s[0] == '+':
if tree == None:
tree = Node(F(int(s[2:]), ss))
else:
tree = plus(F(ss, int(s[2:])), tree)
if s[0] == '-':
if tree == None:
continue
else:
tree = minos(F(ss, int(s[2:])), tree)
if s[0] == '?':
if tree == None:
res.append('Not found')
else:
res.append(search(F(ss, int(s[2:])), tree))
if s[0] == 's':
if tree == None:
res.append(0)
else:
s = s[2:]
l,r = map(int, s.split())
ss, tree = sum_from_l_r(F(l, ss), F(r, ss), tree)
res.append(ss)
for i in res:
print(i)
| [
"noreply@github.com"
] | roma646.noreply@github.com |
42c699193149a09a0da7464a1f0b0a15dd48c2be | 52d96272ed356314bb0bcd1e5a5758a674d9f4aa | /module/extra/listpid.py | 6f2f4b59d79fb2be08914eab9b33a26cb5b8019c | [] | no_license | mockillo/Brunobot | b1e52f458e73b15724e9ac6d3bb72df586c93c37 | 33ae796e20aaf224ac7506d1a2436aa5df044c45 | refs/heads/master | 2021-01-16T01:26:42.990525 | 2013-03-12T17:56:13 | 2013-03-12T17:56:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | ''' Required for Brunobot module'''
author = 'Russ Anderson, Vegard Veiset'
version = '1.0'
name = 'listpid'
require = ['communication']
listen = ['cmd']
cmd = ['listpid','pid','pidlist']
usage = 'pid \.py'
description = 'Displays processes matching a regular expression'
from subprocess import Popen, PIPE
def list_pids(grep):
'''lists all PIDs which match a grep value'''
proc = Popen(['ps aux | grep %s | grep -v grep' % (grep)], shell=True, stdin=PIPE, stdout=PIPE)
value = proc.communicate()
pids = []
for output in value[0].split('\n'):
if output:
try:
pids.append(output.split()[1])
except:
pass
return pids
def main(data):
if data['argv']:
communication.say(data['channel'], ", ".join(list_pids(data['argv'][0])))
| [
"veiset@gmail.com"
] | veiset@gmail.com |
039223819b5506d0a34de149bdf7f9bee3fba418 | f618cb7a1b1f49c02396a2bb969cc7518fd163ab | /doc/_gallery/2_3_instantaneous_frequency.py | a17ab147991133d5033cfc0b18306c67145421d0 | [] | no_license | kingjr/pytftb | b968b8e2fc294a19cec8bf63e7d289f368ddf194 | 0bcacf5eef46bd173d90a23c00a7f4b8ee284b22 | refs/heads/master | 2021-01-16T22:27:05.587174 | 2015-06-25T05:16:02 | 2015-06-25T05:16:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2015 jaidev <jaidev@newton>
#
# Distributed under terms of the MIT license.
"""
Example in section 2.3 of the tutorial.
"""
from tftb.generators import fmlin
from tftb.processing import plotifl, inst_freq
import numpy as np
signal, _ = fmlin(256)
time_samples = np.arange(3, 257)
ifr = inst_freq(signal)[0]
plotifl(time_samples, ifr)
| [
"deshpande.jaidev@gmail.com"
] | deshpande.jaidev@gmail.com |
2b42bc0862e430557454e9b5a96192024a18c331 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02694/s576792057.py | 701f7d18f2a81a811cc0bb6ccfa3946af1036f75 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | import math
n=int(input())
c=100
for i in range(4000):
c=(c*101)//100
if c>=n:
print(i+1)
break | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
75754cc97d069fa3a14220afd217e2f9a3f0eb7e | 21a7725407f6d2ff73b42ae57c9014484a3ca875 | /requestsCase/test_case1.py | 09a3abda1f71e6ec5273f2d8024554d3e606f7dd | [] | no_license | niudaxx/Page_Object_Model | e7a16d148afc770ec1bce3a5869e0186159b25f1 | 0a64553c9877f8fc9c837c9674d0a305349c69b1 | refs/heads/master | 2023-04-06T08:41:39.410602 | 2021-04-19T09:40:18 | 2021-04-19T09:40:18 | 346,654,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,975 | py | # @Time:2021/4/13 13:42
# @Author:testDa
# @File:test_case1.py
# @Reason:
from requestsCase.base_test import BaseTest
from public.models.getyaml import getYaml
from config import setting
from public.models.log import Log
import os
log = Log()
testData = getYaml(setting.REQUESTS_DIR + '/' + 'test_case1.yaml')
class TestCase1(BaseTest):
def test_case1(self):
url = testData.get_requestsParam_url(0)
type = testData.get_requestsParam_type(0)
data = testData.get_requestsParam_data(0)
result = self.case_method(url=url,req_type=type,data=data,headers=self.session.headers,cookie=self.cookiejar.get_dict())
self.assertIn(result,[{'hasFile':'Y'},{'hasFile':'N'}])
def test_case2(self):
url = testData.get_requestsParam_url(1)
type = testData.get_requestsParam_type(1)
data = testData.get_requestsParam_data(1)
result = self.case_method(url=url, req_type=type, data=data, headers=self.session.headers,
cookie=self.cookiejar.get_dict())
print(result)
def test_case3(self):
url = testData.get_requestsParam_url(2)
type = testData.get_requestsParam_type(2)
data = testData.get_requestsParam_data(2)
result = self.case_method(url=url, req_type=type, data=data, headers=self.session.headers,
cookie=self.cookiejar.get_dict())
print(result)
import unittest,datetime
from BeautifulReport import BeautifulReport
if __name__ == '__main__':
casePath = os.path.dirname(os.path.realpath(__file__))
discover = unittest.defaultTestLoader.discover(casePath,pattern='test*.py',top_level_dir=None)
time = datetime.datetime.now().strftime('%Y-%m-%d %H_%M_%S')
filename = '测试报告'+str(time)
BeautifulReport(discover).report(description='测试报告',filename=filename,log_path=setting.REPORT_DIR)
# runner = unittest.TextTestRunner()
# runner.run(discover) | [
"1134808131@qq.com"
] | 1134808131@qq.com |
80ea3e4d68dd65c0b8d23ae37c7d591e095f2e11 | 7efc2d4d0c8f985a6e2aae6b973767525b8af5a9 | /tests/test_message.py | 14e00582585e540f25b96beb0a94265543eae77f | [
"BSD-3-Clause"
] | permissive | wtolson/gnsq | d0db69b924cbeedf5713ab14fd36083e3117920e | 1303230cbd977c81a06ff3d739fd0c915b57de3e | refs/heads/master | 2020-12-24T16:24:12.732599 | 2020-10-21T19:12:03 | 2020-10-21T19:12:03 | 21,590,545 | 73 | 18 | BSD-3-Clause | 2020-10-12T02:36:20 | 2014-07-07T23:25:59 | Python | UTF-8 | Python | false | false | 3,258 | py | import pytest
import gnsq
class MockConnection(object):
def __init__(self, message, operations):
message.on_finish.connect(self.finish)
message.on_requeue.connect(self.requeue)
message.on_touch.connect(self.touch)
self.operations = iter(operations)
def finish(self, message):
exp_name, exp_args = next(self.operations)
assert exp_name == 'finish'
assert exp_args == (message,)
def requeue(self, message, timeout, backoff):
exp_name, exp_args = next(self.operations)
assert exp_name == 'requeue'
assert exp_args == (message, timeout, backoff)
def touch(self, message):
exp_name, exp_args = next(self.operations)
assert exp_name == 'touch'
assert exp_args == (message,)
def assert_finished(self):
with pytest.raises(StopIteration):
next(self.operations)
def test_basic():
message = gnsq.Message(0, 42, '1234', 'sup')
assert message.timestamp == 0
assert message.attempts == 42
assert message.id == '1234'
assert message.body == 'sup'
assert message.has_responded() is False
def test_finish():
message = gnsq.Message(0, 42, '1234', 'sup')
mock_conn = MockConnection(message, [
('finish', (message,)),
])
assert message.has_responded() is False
message.finish()
assert message.has_responded() is True
with pytest.raises(gnsq.errors.NSQException):
message.finish()
mock_conn.assert_finished()
def test_requeue():
message = gnsq.Message(0, 42, '1234', 'sup')
mock_conn = MockConnection(message, [
('requeue', (message, 0, True)),
])
assert message.has_responded() is False
message.requeue()
assert message.has_responded() is True
with pytest.raises(gnsq.errors.NSQException):
message.requeue()
mock_conn.assert_finished()
def test_requeue_timeout():
message = gnsq.Message(0, 42, '1234', 'sup')
mock_conn = MockConnection(message, [
('requeue', (message, 1000, True)),
])
assert message.has_responded() is False
message.requeue(1000)
assert message.has_responded() is True
with pytest.raises(gnsq.errors.NSQException):
message.requeue(1000)
mock_conn.assert_finished()
def test_backoff():
message = gnsq.Message(0, 42, '1234', 'sup')
mock_conn = MockConnection(message, [
('requeue', (message, 0, False)),
])
assert message.has_responded() is False
message.requeue(backoff=False)
assert message.has_responded() is True
with pytest.raises(gnsq.errors.NSQException):
message.requeue()
mock_conn.assert_finished()
def test_touch():
message = gnsq.Message(0, 42, '1234', 'sup')
mock_conn = MockConnection(message, [
('touch', (message,)),
('touch', (message,)),
('touch', (message,)),
('finish', (message,)),
])
assert message.has_responded() is False
message.touch()
message.touch()
message.touch()
assert message.has_responded() is False
message.finish()
assert message.has_responded() is True
with pytest.raises(gnsq.errors.NSQException):
message.touch()
mock_conn.assert_finished()
| [
"wtolson@gmail.com"
] | wtolson@gmail.com |
fed31394e27df3de8d69c3d2a7f05eb06bbb7993 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/merula.py | c1f2d7293ba7348aea72669a9a5c6149d0381a0b | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 158 | py | ii = [('TennAP.py', 1), ('ClarGE2.py', 1), ('GellWPT2.py', 2), ('CarlTFR.py', 1), ('WadeJEB.py', 2), ('KirbWPW2.py', 1), ('SoutRD2.py', 1), ('DibdTRL.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
bc79d20ccda8feb69ef9730b1672b3f4913fa27e | 75dcb56e318688499bdab789262839e7f58bd4f6 | /_algorithms_challenges/leetcode/lc-all-solutions/101.symmetric-tree/symmetric-tree.py | fa6722de53dc2ece0bc489f146b03a176b240eab | [] | no_license | syurskyi/Algorithms_and_Data_Structure | 9a1f358577e51e89c862d0f93f373b7f20ddd261 | 929dde1723fb2f54870c8a9badc80fc23e8400d3 | refs/heads/master | 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 | Jupyter Notebook | UTF-8 | Python | false | false | 555 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isSymmetric(self, node):
"""
:type root: TreeNode
:rtype: bool
"""
def helper(root, mirror):
if not root and not mirror:
return True
if root and mirror and root.val == mirror.val:
return helper(root.left, mirror.right) and helper(root.right, mirror.left)
return False
return helper(node, node)
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
6ddeac1bfa135c0e88efc5de26c020aa9ead87b7 | 86d00c6ce86e9cc600530a044912abce652e4e60 | /temp.py | ddb8f18cfa9b027251df9c36bdbe1102e0f120ce | [] | no_license | chihoxtra/AIND-Pacman | 4ee4bfc7120edde5a7f738690c72ed40613ccb84 | 273c045ed60701f2f39c5c3e0419f985b884a358 | refs/heads/master | 2020-04-01T23:07:20.210947 | 2018-10-19T07:22:57 | 2018-10-19T07:22:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,415 | py |
""" manh distance + wall denstiy + block wall cost"""
dCost = 0.0
wCost = 0.0
bCost = 0.0
dCostArray = []
wCostArray = []
bCostArray = []
if bool(foodGrid.asList()):
for f in foodGrid.asList():
#distance component
manhdist = util.manhattanDistance(currPos, f)
manhdist_mean = float(1.*manhdist/(w.height-2 + w.width-2))
dCostArray.append(manhdist_mean)
#wall component
wallDensity = gridDensity(w, currPos, f)
wCostArray.append(wallDensity)
#blocking wall
bWallCount = 0
bWallCost = blockingWall(w, currPos, f)
if bWallCost > 0:
bWallCount += 1
bCostArray.append(bWallCost)
#if f[0] == 1:
# print(f, currPos, wallDensity, manhdist_mean)
#print(f, currPos,manhdist_mean,wallDensity,bWallCost)
dCost = min(dCostArray)
wCost = min(wCostArray)
bCost = min(bCostArray)
if bWallCount != 0:
bCost = 1.*sum(bCostArray)/bWallCount
else:
bCost = 0
dCost + wCost: 16456
dCost + wCost + bCost(min): 16006
dCost + bCost(average): cannot pass
dCost + bCost(min): 16099
""" actual cost to shorest food + points """
currPos, foodGrid = state
w = p.walls
#WAKA
#step 1: find closest food
closestFood = findClosestFood(currPos, foodGrid, w)
#step 2: find the actual distance
dCost1 = 0.0
points = 0.0
if bool(closestFood):
dCost1 = mazeDistance(currPos, closestFood, p.startingGameState)
#step 3: add points to rest of foods
for f in foodGrid.asList():
if f != closestFood and f[0] != currPos[0] and f[1] != currPos[1]:
points += 1
hCost = dCost1 + points
""" CROSS REGION """
def foodHeuristic(state, p):
currPos, foodGrid = state
w = p.walls
TotalFcost = 0.0
# part 1 identify the cross region for foods
# done for first time only
if 'fCrossRegionList' not in p.heuristicInfo:
p.heuristicInfo['fCrossRegionList'] = {}
p.heuristicInfo['hCost'] = {}
for f in foodGrid.asList():
fCrossRegion = getCrossRegion(f, w)
p.heuristicInfo['fCrossRegionList'][f] = fCrossRegion
# part II: get pacman cross region
pacRegion = getCrossRegion(currPos, w)
for f in foodGrid.asList():
fcost = 0.0
fCrossRegion = p.heuristicInfo['fCrossRegionList'][f]
intersacts = [x for x in fCrossRegion if x in pacRegion]
if len(intersacts) > 0:
print(f, currPos, (intersacts))
for interact in intersacts:
if f == (1,5):
a1 = util.manhattanDistance(f, interact)
b1 = util.manhattanDistance(currPos, interact)
print(currPos, a1, b1)
fcost += util.manhattanDistance(f, interact)
if f in p.heuristicInfo['hCost']:
p.heuristicInfo['hCost'][f].append(fcost)
costHistory = p.heuristicInfo['hCost'][f]
Fcost = float(sum(costHistory))/float(len(costHistory))
else:
p.heuristicInfo['hCost'][f] = []
p.heuristicInfo['hCost'][f].append(fcost)
Fcost = fcost
TotalFcost += Fcost
return TotalFcost
| [
"samuelpun@gmail.com"
] | samuelpun@gmail.com |
6fbd4c3818fa153168ceecbdb8d21684998179b8 | b7c32f83f92f3e75ee1e677bdb58fed6d9faac88 | /modubot/bot.py | 3af5324f1eaff95f36cc9201759997ce5fc28d76 | [] | no_license | kryptx/sc2-bots | 9abe27582a876f5092589d20eb9fc8ddbbd781d0 | 263e4670f472d57f925e1dbfb429f8293e1b837b | refs/heads/master | 2022-12-15T10:07:39.038659 | 2021-05-15T05:04:46 | 2021-05-15T05:04:46 | 205,922,481 | 0 | 0 | null | 2022-12-08T08:20:25 | 2019-09-02T19:22:36 | Python | UTF-8 | Python | false | false | 8,139 | py | import logging
import math
import os
import random
import sc2
import sys
import time
from pythonjsonlogger import jsonlogger
from sc2 import Race
from sc2.constants import UnitTypeId, UpgradeId
from sc2.unit_command import UnitCommand
from sc2.units import Units
from sc2.position import Point2
from modubot.modules.game_state import SurrenderedException
from modubot.planners.protoss import ProtossBasePlanner
from modubot.planners.zerg import ZergBasePlanner
from modubot.common import Urgency, list_flatten, OptionsObject, is_worker, LoggerWithFields
def urgencyValue(req):
return req.urgency
log_level = os.getenv("LOG_LEVEL", "warn")
numeric_level = getattr(logging, log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError(f"Invalid log level: {log_level}")
handler = logging.FileHandler(filename='logs/sc2.log',encoding='utf-8')
handler.setFormatter(jsonlogger.JsonFormatter())
logging.basicConfig(level=numeric_level,handlers=[handler])
### EL BOT ###
class ModuBot(sc2.BotAI):
def __init__(self, modules=[], limits=dict()):
self.shared = OptionsObject() # just a generic object
self.shared.optimism = 1 # Because the linter is an asshole
self.unit_command_uses_self_do = True
# Various info that's often needed
self.shared.enemy_race = None
# we'll deal with this once the game starts
self.planner = None
# things a consumer should provide
self.limits = limits
self.modules = modules
# for cross-referencing with other bots that are created at the same time
self.start_time = str(int(time.time()))
def deallocate(self, tag_set):
for module in self.modules:
module.deallocate(tag_set)
async def on_start(self):
bot_id = f"{self.start_time}-{self.player_id}-{self.race}"
self.log = LoggerWithFields(logging.getLogger(), { "bot_id": bot_id, "start_time": self.start_time })
if self.race == Race.Protoss:
self.planner = ProtossBasePlanner(self)
elif self.race == Race.Zerg:
self.planner = ZergBasePlanner(self)
for module in self.modules:
await module.on_start()
async def on_end(self, game_result):
for module in self.modules:
await module.on_end(game_result)
async def on_unit_created(self, unit):
for module in self.modules:
await module.on_unit_created(unit)
async def on_unit_destroyed(self, tag):
for module in self.modules:
await module.on_unit_destroyed(tag)
async def on_building_construction_started(self, unit):
for module in self.modules:
await module.on_building_construction_started(unit)
async def on_building_construction_complete(self, unit):
for module in self.modules:
await module.on_building_construction_complete(unit)
async def on_upgrade_complete(self, upgrade_id):
if upgrade_id == UpgradeId.WARPGATERESEARCH:
self.shared.warpgate_complete = True
for module in self.modules:
await module.on_upgrade_complete(upgrade_id)
def log_request_header(self, iteration):
self.log.info({
"message": "Beginning iteration",
"iteration": iteration,
"optimism": self.shared.optimism,
"log_optimism": math.log(max(self.shared.optimism, 0.001)),
"minerals": self.minerals,
"vespene": self.vespene,
"supply_used": self.supply_used,
"supply_cap": self.supply_cap,
"known_enemies": len(self.shared.known_enemy_units),
"allocated": dict(zip(
[ type(m).__name__ for m in self.modules ],
[ len(m.allocated) for m in self.modules ],
)),
"unallocated": len(self.unallocated())
})
def log_request_result(self, request, original_request, result_msg):
self.log.info({
"message": "Request evaluated",
"urgency": request.urgency,
"expense": str(request.expense),
"result": result_msg,
"request": {
"type": type(original_request).__name__,
"expense": original_request.expense,
}
})
async def on_step(self, iteration):
self.log = self.log.withFields({ "game_time": self.time })
requests = []
for module in self.modules:
try:
module_result = await module.on_step(iteration) or []
requests.extend(module_result)
except SurrenderedException:
self.log.info("Exiting due to surrender")
return
requests.sort(key=urgencyValue, reverse=True)
mineral_threshold = None
vespene_threshold = None
supply_threshold = None
minerals = self.minerals
vespene = self.vespene
supply = self.supply_left
checked = set()
self.log_request_header(iteration)
while requests:
request = requests.pop(0)
original_request = request
if not request.urgency:
break
result = await request.fulfill(self)
while hasattr(result, 'fulfill'):
self.log.debug({
"message": "Replacing request",
"requested": {
"request_type": type(request),
"expense": request.expense
},
"replacement": {
"request_type": type(result),
"expense": result.expense
}
})
request = result
result = await request.fulfill(self)
if request.expense in checked:
self.log_request_result(request, original_request, "duplicate request")
continue
checked.add(request.expense)
cost = self.calculate_cost(request.expense)
supply_cost = self.calculate_supply_cost(request.expense) if isinstance(request.expense, UnitTypeId) else 0
if cost.minerals > 0 and mineral_threshold and request.urgency < mineral_threshold:
self.log_request_result(request, original_request, f"urgency is below mineral threshold (costs {cost.minerals})")
continue
if cost.vespene > 0 and vespene_threshold and request.urgency < vespene_threshold:
self.log_request_result(request, original_request, f"urgency is below vespene threshold (costs {cost.vespene})")
continue
if supply_cost > 0 and supply_threshold and request.urgency < supply_threshold:
self.log_request_result(request, original_request, f"urgency is below supply threshold (costs {supply_cost})")
continue
can_afford = True
if cost.minerals > 0 and cost.minerals > minerals:
can_afford = False
mineral_threshold = request.urgency
if cost.vespene > 0 and cost.vespene > vespene:
can_afford = False
vespene_threshold = request.urgency
if supply_cost > 0 and supply_cost > supply:
can_afford = False
supply_threshold = request.urgency
cost_msg = 'cost not deducted'
if result or isinstance(request.expense, UnitTypeId):
cost_msg = 'real cost deducted'
minerals -= max(cost.minerals, 0)
vespene -= max(cost.vespene, 0)
supply -= max(supply_cost, 0)
if can_afford:
if not result:
self.log_request_result(request, original_request,
f"dependency already in progress ({cost_msg})"
)
continue
self.do(result)
self.log_request_result(request, original_request, "️✔ Filled")
else:
self.log_request_result(request, original_request, f"️Can't afford ({cost_msg})")
handler.flush()
handler.close()
def bases_centroid(self):
return Point2.center([base.position for base in self.townhalls])
# Modules that want to claim units are required to:
# - Implement an `urgency` property
# - report a set of tags of allocated units
# - respond to requests to deallocate units
# In exchange for meeting these requirements, a module may add units freely to its allocated set,
# provided that another module has not claimed them at a higher urgency.
def unallocated(self, unit_types=None, urgency=Urgency.NONE):
units = self.units.ready(unit_types) if unit_types else self.units.ready.filter(lambda u: not is_worker(u))
return units.tags_not_in(list_flatten([
list(module.allocated) if module.urgency >= urgency
else []
for module in self.modules
]))
| [
"kryptx@gmail.com"
] | kryptx@gmail.com |
33799c324715078a20e4aa313614ba00ba22a7fd | 84455064ba5802840b1fea6d43cfea4034b1c7bb | /first_task.py | 55b3e67214cc8edcf3ccd695c29159d2e944ac28 | [] | no_license | hannan13231/most_frequent | 2b6bb368eddccb7491671f09e87445e9c30c47a9 | bf9a08b9d80a6390423bedaf4dc1c70daaef873d | refs/heads/main | 2023-02-04T00:56:53.239741 | 2020-12-22T10:45:02 | 2020-12-22T10:45:02 | 323,597,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 961 | py | # most_frequent function
def most_frequent(string) :
# to remove duplicates from a string
str1 = "".join(dict.fromkeys(string))
# check frequency of each alphabet and add the alphabet and count in dictonary
for i in str1 :
count = 0
for k in string :
if i == k :
count = count + 1
obj.add( i, count )
# class to create a dictonary
class dictonary(dict) :
def __init__ (self):
self = dict()
def add( self, key, value ) :
self[key] = value
# input
str1 = input("Please enter a string : ")
print(str1)
obj = dictonary()
print("Output :")
#calling the most_frequent function
most_frequent(str1)
# sorting and printing the frequency of alphabet in decrasing order
sort = dict(sorted(obj.items(), key = lambda item: item[1], reverse = True))
for key, value in sort.items() :
print( key , "=", value)
| [
"noreply@github.com"
] | hannan13231.noreply@github.com |
0d1737f676248993459efa90b6363218f8da90ee | 978caa22c8ccf0c92fce62bf39225201ea7976f9 | /users/migrations/0006_prescreption.py | 69a6537682fc79e8dccf194c400850e050d31670 | [] | no_license | gaurav-patel-git/hospital-mng-sys | 912a9140edfadd8b068ddbb52e9ee813678be9f9 | 79d48ac0f636ffa17f17ac708b6114bff893fed0 | refs/heads/master | 2023-01-06T14:57:45.814527 | 2020-11-03T08:48:54 | 2020-11-03T08:48:54 | 301,130,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | # Generated by Django 3.0.5 on 2020-05-25 04:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0005_profile_phone'),
]
operations = [
migrations.CreateModel(
name='Prescreption',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('symptoms', models.CharField(max_length=50)),
('prescreption', models.TextField(max_length=500)),
('doctor_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='doctor_prescribed', to=settings.AUTH_USER_MODEL)),
('patient_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='patient_prescribed', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"gp8966891720@gmail.com"
] | gp8966891720@gmail.com |
a9f466a4e4748119563e298e78b62432c49560bf | 5dd1177d34759f78a08c96b4130ba9ffa25bda92 | /des.py | 6297bf3007c2f4cf1bbdb9fc8d05a53be8e34985 | [] | no_license | TennoClash/5E-excel | bcc1a3bd8d9511df55d065b1bcad1427ab6d01f2 | ac57c377984ffb8bb0ad107c9a162be6dba2b5d9 | refs/heads/master | 2022-11-07T18:30:31.327926 | 2020-06-25T06:13:37 | 2020-06-25T06:13:37 | 274,839,131 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | # -*- coding: utf-8 -*-
import xlsxwriter
import datetime
import os
import time
#startTime=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')#现在
#print startTime
startTime1 = time.time()
#print startTime1
print(os.path.join(os.path.expanduser("~"), 'Desktop'))
print(os.path.join(os.path.expanduser("~"), 'Desktop').replace('\\','/'))
workbook = xlsxwriter.Workbook(os.path.join(os.path.expanduser("~"), 'Desktop')+"/kami1.xlsx")
worksheet = workbook.add_worksheet() #创建一个sheet
title = [U'名称',U'副标题'] #表格title
worksheet.write_row('A1',title) #title 写入Excel
for i in range(1,100):
num0 = str(i+1)
num = str(i)
row = 'A' + num0
data = [u'学生'+num,num,"hmp"+num]
worksheet.write_row(row, data)
i+=1
workbook.close()
#time.sleep(60)
#endTime=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')#结束
#print endTime
endTime1 = time.time()
#print endTime1
print (endTime1-startTime1)
| [
"371349024@qq.com"
] | 371349024@qq.com |
06a7d49f4f3f197a089d05561ff0e7ac9e3d2e24 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/crsmithdev_arrow/arrow-master/tests/parser_tests.py | 7682df8c3b3785f8f43a33bec67dffd88e6abf6a | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 26,860 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from chai import Chai
from datetime import datetime
from dateutil import tz
import calendar
import time
from arrow import parser
from arrow.parser import DateTimeParser, ParserError
class DateTimeParserTests(Chai):
def setUp(self):
super(DateTimeParserTests, self).setUp()
self.parser = parser.DateTimeParser()
def test_parse_multiformat(self):
mock_datetime = mock()
expect(self.parser.parse).args('str', 'fmt_a').raises(ParserError)
expect(self.parser.parse).args('str', 'fmt_b').returns(mock_datetime)
result = self.parser._parse_multiformat('str', ['fmt_a', 'fmt_b'])
assertEqual(result, mock_datetime)
def test_parse_multiformat_all_fail(self):
expect(self.parser.parse).args('str', 'fmt_a').raises(ParserError)
expect(self.parser.parse).args('str', 'fmt_b').raises(ParserError)
with assertRaises(ParserError):
self.parser._parse_multiformat('str', ['fmt_a', 'fmt_b'])
def test_parse_multiformat_unexpected_fail(self):
class UnexpectedError(Exception):
pass
expect(self.parser.parse).args('str', 'fmt_a').raises(UnexpectedError)
with assertRaises(UnexpectedError):
self.parser._parse_multiformat('str', ['fmt_a', 'fmt_b'])
def test_parse_token_nonsense(self):
parts = {}
self.parser._parse_token('NONSENSE', '1900', parts)
assertEqual(parts, {})
def test_parse_token_invalid_meridians(self):
parts = {}
self.parser._parse_token('A', 'a..m', parts)
assertEqual(parts, {})
self.parser._parse_token('a', 'p..m', parts)
assertEqual(parts, {})
def test_parser_no_caching(self):
expect(parser.DateTimeParser, '_generate_pattern_re').args('fmt_a').times(100)
self.parser = parser.DateTimeParser(cache_size=0)
for _ in range(100):
self.parser._generate_pattern_re('fmt_a')
def test_parser_1_line_caching(self):
expect(parser.DateTimeParser, '_generate_pattern_re').args('fmt_a').times(1)
self.parser = parser.DateTimeParser(cache_size=1)
for _ in range(100):
self.parser._generate_pattern_re('fmt_a')
expect(parser.DateTimeParser, '_generate_pattern_re').args('fmt_b').times(1)
for _ in range(100):
self.parser._generate_pattern_re('fmt_a')
self.parser._generate_pattern_re('fmt_b')
expect(parser.DateTimeParser, '_generate_pattern_re').args('fmt_a').times(1)
for _ in range(100):
self.parser._generate_pattern_re('fmt_a')
def test_parser_multiple_line_caching(self):
expect(parser.DateTimeParser, '_generate_pattern_re').args('fmt_a').times(1)
self.parser = parser.DateTimeParser(cache_size=2)
for _ in range(100):
self.parser._generate_pattern_re('fmt_a')
expect(parser.DateTimeParser, '_generate_pattern_re').args('fmt_b').times(1)
for _ in range(100):
self.parser._generate_pattern_re('fmt_a')
self.parser._generate_pattern_re('fmt_b')
expect(parser.DateTimeParser, '_generate_pattern_re').args('fmt_a').times(0)
for _ in range(100):
self.parser._generate_pattern_re('fmt_a')
class DateTimeParserParseTests(Chai):
def setUp(self):
super(DateTimeParserParseTests, self).setUp()
self.parser = parser.DateTimeParser()
def test_parse_list(self):
expect(self.parser._parse_multiformat).args('str', ['fmt_a', 'fmt_b']).returns('result')
result = self.parser.parse('str', ['fmt_a', 'fmt_b'])
assertEqual(result, 'result')
def test_parse_unrecognized_token(self):
mock_input_re_map = mock(self.parser, '_input_re_map')
expect(mock_input_re_map.__getitem__).args('YYYY').raises(KeyError)
with assertRaises(parser.ParserError):
self.parser.parse('2013-01-01', 'YYYY-MM-DD')
def test_parse_parse_no_match(self):
with assertRaises(parser.ParserError):
self.parser.parse('01-01', 'YYYY-MM-DD')
def test_parse_separators(self):
with assertRaises(parser.ParserError):
self.parser.parse('1403549231', 'YYYY-MM-DD')
def test_parse_numbers(self):
expected = datetime(2012, 1, 1, 12, 5, 10)
assertEqual(self.parser.parse('2012-01-01 12:05:10', 'YYYY-MM-DD HH:mm:ss'), expected)
def test_parse_year_two_digit(self):
expected = datetime(1979, 1, 1, 12, 5, 10)
assertEqual(self.parser.parse('79-01-01 12:05:10', 'YY-MM-DD HH:mm:ss'), expected)
def test_parse_timestamp(self):
tz_utc = tz.tzutc()
timestamp = int(time.time())
expected = datetime.fromtimestamp(timestamp, tz=tz_utc)
assertEqual(self.parser.parse(str(timestamp), 'X'), expected)
def test_parse_names(self):
expected = datetime(2012, 1, 1)
assertEqual(self.parser.parse('January 1, 2012', 'MMMM D, YYYY'), expected)
assertEqual(self.parser.parse('Jan 1, 2012', 'MMM D, YYYY'), expected)
def test_parse_pm(self):
expected = datetime(1, 1, 1, 13, 0, 0)
assertEqual(self.parser.parse('1 pm', 'H a'), expected)
assertEqual(self.parser.parse('1 pm', 'h a'), expected)
expected = datetime(1, 1, 1, 1, 0, 0)
assertEqual(self.parser.parse('1 am', 'H A'), expected)
assertEqual(self.parser.parse('1 am', 'h A'), expected)
expected = datetime(1, 1, 1, 0, 0, 0)
assertEqual(self.parser.parse('12 am', 'H A'), expected)
assertEqual(self.parser.parse('12 am', 'h A'), expected)
expected = datetime(1, 1, 1, 12, 0, 0)
assertEqual(self.parser.parse('12 pm', 'H A'), expected)
assertEqual(self.parser.parse('12 pm', 'h A'), expected)
def test_parse_tz_hours_only(self):
expected = datetime(2025, 10, 17, 5, 30, 10, tzinfo=tz.tzoffset(None, 0))
parsed = self.parser.parse('2025-10-17 05:30:10+00', 'YYYY-MM-DD HH:mm:ssZ')
assertEqual(parsed, expected)
def test_parse_tz_zz(self):
expected = datetime(2013, 1, 1, tzinfo=tz.tzoffset(None, -7 * 3600))
assertEqual(self.parser.parse('2013-01-01 -07:00', 'YYYY-MM-DD ZZ'), expected)
def test_parse_tz_name_zzz(self):
for tz_name in (
# best solution would be to test on every available tz name from
# the tz database but it is actualy tricky to retrieve them from
# dateutil so here is short list that should match all
# naming patterns/conventions in used tz databaze
'Africa/Tripoli',
'America/Port_of_Spain',
'Australia/LHI',
'Etc/GMT-11',
'Etc/GMT0',
'Etc/UCT',
'Etc/GMT+9',
'GMT+0',
'CST6CDT',
'GMT-0',
'W-SU',
):
expected = datetime(2013, 1, 1, tzinfo=tz.gettz(tz_name))
assertEqual(self.parser.parse('2013-01-01 %s' % tz_name, 'YYYY-MM-DD ZZZ'), expected)
# note that offsets are not timezones
with assertRaises(ParserError):
self.parser.parse('2013-01-01 +1000', 'YYYY-MM-DD ZZZ')
def test_parse_subsecond(self):
expected = datetime(2013, 1, 1, 12, 30, 45, 900000)
assertEqual(self.parser.parse('2013-01-01 12:30:45.9', 'YYYY-MM-DD HH:mm:ss.S'), expected)
assertEqual(self.parser.parse_iso('2013-01-01 12:30:45.9'), expected)
expected = datetime(2013, 1, 1, 12, 30, 45, 980000)
assertEqual(self.parser.parse('2013-01-01 12:30:45.98', 'YYYY-MM-DD HH:mm:ss.SS'), expected)
assertEqual(self.parser.parse_iso('2013-01-01 12:30:45.98'), expected)
expected = datetime(2013, 1, 1, 12, 30, 45, 987000)
assertEqual(self.parser.parse('2013-01-01 12:30:45.987', 'YYYY-MM-DD HH:mm:ss.SSS'), expected)
assertEqual(self.parser.parse_iso('2013-01-01 12:30:45.987'), expected)
expected = datetime(2013, 1, 1, 12, 30, 45, 987600)
assertEqual(self.parser.parse('2013-01-01 12:30:45.9876', 'YYYY-MM-DD HH:mm:ss.SSSS'), expected)
assertEqual(self.parser.parse_iso('2013-01-01 12:30:45.9876'), expected)
expected = datetime(2013, 1, 1, 12, 30, 45, 987650)
assertEqual(self.parser.parse('2013-01-01 12:30:45.98765', 'YYYY-MM-DD HH:mm:ss.SSSSS'), expected)
assertEqual(self.parser.parse_iso('2013-01-01 12:30:45.98765'), expected)
expected = datetime(2013, 1, 1, 12, 30, 45, 987654)
assertEqual(self.parser.parse('2013-01-01 12:30:45.987654', 'YYYY-MM-DD HH:mm:ss.SSSSSS'), expected)
assertEqual(self.parser.parse_iso('2013-01-01 12:30:45.987654'), expected)
def test_parse_subsecond_rounding(self):
expected = datetime(2013, 1, 1, 12, 30, 45, 987654)
format = 'YYYY-MM-DD HH:mm:ss.S'
# round up
string = '2013-01-01 12:30:45.9876539'
assertEqual(self.parser.parse(string, format), expected)
assertEqual(self.parser.parse_iso(string), expected)
# round down
string = '2013-01-01 12:30:45.98765432'
assertEqual(self.parser.parse(string, format), expected)
#import pudb; pudb.set_trace()
assertEqual(self.parser.parse_iso(string), expected)
# round half-up
string = '2013-01-01 12:30:45.987653521'
assertEqual(self.parser.parse(string, format), expected)
assertEqual(self.parser.parse_iso(string), expected)
# round half-down
string = '2013-01-01 12:30:45.9876545210'
assertEqual(self.parser.parse(string, format), expected)
assertEqual(self.parser.parse_iso(string), expected)
def test_map_lookup_keyerror(self):
with assertRaises(parser.ParserError):
parser.DateTimeParser._map_lookup({'a': '1'}, 'b')
def test_try_timestamp(self):
assertEqual(parser.DateTimeParser._try_timestamp('1.1'), 1.1)
assertEqual(parser.DateTimeParser._try_timestamp('1'), 1)
assertEqual(parser.DateTimeParser._try_timestamp('abc'), None)
class DateTimeParserRegexTests(Chai):
def setUp(self):
super(DateTimeParserRegexTests, self).setUp()
self.format_regex = parser.DateTimeParser._FORMAT_RE
def test_format_year(self):
assertEqual(self.format_regex.findall('YYYY-YY'), ['YYYY', 'YY'])
def test_format_month(self):
assertEqual(self.format_regex.findall('MMMM-MMM-MM-M'), ['MMMM', 'MMM', 'MM', 'M'])
def test_format_day(self):
assertEqual(self.format_regex.findall('DDDD-DDD-DD-D'), ['DDDD', 'DDD', 'DD', 'D'])
def test_format_hour(self):
assertEqual(self.format_regex.findall('HH-H-hh-h'), ['HH', 'H', 'hh', 'h'])
def test_format_minute(self):
assertEqual(self.format_regex.findall('mm-m'), ['mm', 'm'])
def test_format_second(self):
assertEqual(self.format_regex.findall('ss-s'), ['ss', 's'])
def test_format_subsecond(self):
assertEqual(self.format_regex.findall('SSSSSS-SSSSS-SSSS-SSS-SS-S'),
['SSSSSS', 'SSSSS', 'SSSS', 'SSS', 'SS', 'S'])
def test_format_tz(self):
assertEqual(self.format_regex.findall('ZZ-Z'), ['ZZ', 'Z'])
def test_format_am_pm(self):
assertEqual(self.format_regex.findall('A-a'), ['A', 'a'])
def test_format_timestamp(self):
assertEqual(self.format_regex.findall('X'), ['X'])
def test_month_names(self):
p = parser.DateTimeParser('en_us')
text = '_'.join(calendar.month_name[1:])
result = p._input_re_map['MMMM'].findall(text)
assertEqual(result, calendar.month_name[1:])
def test_month_abbreviations(self):
p = parser.DateTimeParser('en_us')
text = '_'.join(calendar.month_abbr[1:])
result = p._input_re_map['MMM'].findall(text)
assertEqual(result, calendar.month_abbr[1:])
def test_digits(self):
assertEqual(parser.DateTimeParser._TWO_DIGIT_RE.findall('12-3-45'), ['12', '45'])
assertEqual(parser.DateTimeParser._FOUR_DIGIT_RE.findall('1234-56'), ['1234'])
assertEqual(parser.DateTimeParser._ONE_OR_TWO_DIGIT_RE.findall('4-56'), ['4', '56'])
class DateTimeParserISOTests(Chai):
def setUp(self):
super(DateTimeParserISOTests, self).setUp()
self.parser = parser.DateTimeParser('en_us')
def test_YYYY(self):
assertEqual(
self.parser.parse_iso('2013'),
datetime(2013, 1, 1)
)
def test_YYYY_MM(self):
for separator in DateTimeParser.SEPARATORS:
assertEqual(
self.parser.parse_iso(separator.join(('2013', '02'))),
datetime(2013, 2, 1)
)
def test_YYYY_MM_DD(self):
for separator in DateTimeParser.SEPARATORS:
assertEqual(
self.parser.parse_iso(separator.join(('2013', '02', '03'))),
datetime(2013, 2, 3)
)
def test_YYYY_MM_DDTHH_mmZ(self):
assertEqual(
self.parser.parse_iso('2013-02-03T04:05+01:00'),
datetime(2013, 2, 3, 4, 5, tzinfo=tz.tzoffset(None, 3600))
)
def test_YYYY_MM_DDTHH_mm(self):
assertEqual(
self.parser.parse_iso('2013-02-03T04:05'),
datetime(2013, 2, 3, 4, 5)
)
def test_YYYY_MM_DDTHH_mm_ssZ(self):
assertEqual(
self.parser.parse_iso('2013-02-03T04:05:06+01:00'),
datetime(2013, 2, 3, 4, 5, 6, tzinfo=tz.tzoffset(None, 3600))
)
def test_YYYY_MM_DDTHH_mm_ss(self):
assertEqual(
self.parser.parse_iso('2013-02-03T04:05:06'),
datetime(2013, 2, 3, 4, 5, 6)
)
def test_YYYY_MM_DD_HH_mmZ(self):
assertEqual(
self.parser.parse_iso('2013-02-03 04:05+01:00'),
datetime(2013, 2, 3, 4, 5, tzinfo=tz.tzoffset(None, 3600))
)
def test_YYYY_MM_DD_HH_mm(self):
assertEqual(
self.parser.parse_iso('2013-02-03 04:05'),
datetime(2013, 2, 3, 4, 5)
)
def test_YYYY_MM_DD_HH_mm_ssZ(self):
assertEqual(
self.parser.parse_iso('2013-02-03 04:05:06+01:00'),
datetime(2013, 2, 3, 4, 5, 6, tzinfo=tz.tzoffset(None, 3600))
)
def test_YYYY_MM_DD_HH_mm_ss(self):
assertEqual(
self.parser.parse_iso('2013-02-03 04:05:06'),
datetime(2013, 2, 3, 4, 5, 6)
)
def test_YYYY_MM_DDTHH_mm_ss_S(self):
assertEqual(
self.parser.parse_iso('2013-02-03T04:05:06.7'),
datetime(2013, 2, 3, 4, 5, 6, 700000)
)
assertEqual(
self.parser.parse_iso('2013-02-03T04:05:06.78'),
datetime(2013, 2, 3, 4, 5, 6, 780000)
)
assertEqual(
self.parser.parse_iso('2013-02-03T04:05:06.789'),
datetime(2013, 2, 3, 4, 5, 6, 789000)
)
assertEqual(
self.parser.parse_iso('2013-02-03T04:05:06.7891'),
datetime(2013, 2, 3, 4, 5, 6, 789100)
)
assertEqual(
self.parser.parse_iso('2013-02-03T04:05:06.78912'),
datetime(2013, 2, 3, 4, 5, 6, 789120)
)
# ISO 8601:2004(E), ISO, 2004-12-01, 4.2.2.4 ... the decimal fraction
# shall be divided from the integer part by the decimal sign specified
# in ISO 31-0, i.e. the comma [,] or full stop [.]. Of these, the comma
# is the preferred sign.
assertEqual(
self.parser.parse_iso('2013-02-03T04:05:06,789123678'),
datetime(2013, 2, 3, 4, 5, 6, 789124)
)
# there is no limit on the number of decimal places
assertEqual(
self.parser.parse_iso('2013-02-03T04:05:06.789123678'),
datetime(2013, 2, 3, 4, 5, 6, 789124)
)
def test_YYYY_MM_DDTHH_mm_ss_SZ(self):
assertEqual(
self.parser.parse_iso('2013-02-03T04:05:06.7+01:00'),
datetime(2013, 2, 3, 4, 5, 6, 700000, tzinfo=tz.tzoffset(None, 3600))
)
assertEqual(
self.parser.parse_iso('2013-02-03T04:05:06.78+01:00'),
datetime(2013, 2, 3, 4, 5, 6, 780000, tzinfo=tz.tzoffset(None, 3600))
)
assertEqual(
self.parser.parse_iso('2013-02-03T04:05:06.789+01:00'),
datetime(2013, 2, 3, 4, 5, 6, 789000, tzinfo=tz.tzoffset(None, 3600))
)
assertEqual(
self.parser.parse_iso('2013-02-03T04:05:06.7891+01:00'),
datetime(2013, 2, 3, 4, 5, 6, 789100, tzinfo=tz.tzoffset(None, 3600))
)
assertEqual(
self.parser.parse_iso('2013-02-03T04:05:06.78912+01:00'),
datetime(2013, 2, 3, 4, 5, 6, 789120, tzinfo=tz.tzoffset(None, 3600))
)
# Properly parse string with Z timezone
assertEqual(
self.parser.parse_iso('2013-02-03T04:05:06.78912Z'),
datetime(2013, 2, 3, 4, 5, 6, 789120)
)
def test_gnu_date(self):
"""
regression tests for parsing output from GNU date(1)
"""
# date -Ins
assertEqual(
self.parser.parse_iso('2016-11-16T09:46:30,895636557-0800'),
datetime(
2016, 11, 16, 9, 46, 30, 895636,
tzinfo=tz.tzoffset(None, -3600 * 8),
)
)
# date --rfc-3339=ns
assertEqual(
self.parser.parse_iso('2016-11-16 09:51:14.682141526-08:00'),
datetime(
2016, 11, 16, 9, 51, 14, 682142,
tzinfo=tz.tzoffset(None, -3600 * 8),
)
)
def test_isoformat(self):
dt = datetime.utcnow()
assertEqual(self.parser.parse_iso(dt.isoformat()), dt)
class TzinfoParserTests(Chai):
def setUp(self):
super(TzinfoParserTests, self).setUp()
self.parser = parser.TzinfoParser()
def test_parse_local(self):
assertEqual(self.parser.parse('local'), tz.tzlocal())
def test_parse_utc(self):
assertEqual(self.parser.parse('utc'), tz.tzutc())
assertEqual(self.parser.parse('UTC'), tz.tzutc())
def test_parse_iso(self):
assertEqual(self.parser.parse('01:00'), tz.tzoffset(None, 3600))
assertEqual(self.parser.parse('+01:00'), tz.tzoffset(None, 3600))
assertEqual(self.parser.parse('-01:00'), tz.tzoffset(None, -3600))
def test_parse_str(self):
assertEqual(self.parser.parse('US/Pacific'), tz.gettz('US/Pacific'))
def test_parse_fails(self):
with assertRaises(parser.ParserError):
self.parser.parse('fail')
class DateTimeParserMonthNameTests(Chai):
def setUp(self):
super(DateTimeParserMonthNameTests, self).setUp()
self.parser = parser.DateTimeParser('en_us')
def test_shortmonth_capitalized(self):
assertEqual(
self.parser.parse('2013-Jan-01', 'YYYY-MMM-DD'),
datetime(2013, 1, 1)
)
def test_shortmonth_allupper(self):
assertEqual(
self.parser.parse('2013-JAN-01', 'YYYY-MMM-DD'),
datetime(2013, 1, 1)
)
def test_shortmonth_alllower(self):
assertEqual(
self.parser.parse('2013-jan-01', 'YYYY-MMM-DD'),
datetime(2013, 1, 1)
)
def test_month_capitalized(self):
assertEqual(
self.parser.parse('2013-January-01', 'YYYY-MMMM-DD'),
datetime(2013, 1, 1)
)
def test_month_allupper(self):
assertEqual(
self.parser.parse('2013-JANUARY-01', 'YYYY-MMMM-DD'),
datetime(2013, 1, 1)
)
def test_month_alllower(self):
assertEqual(
self.parser.parse('2013-january-01', 'YYYY-MMMM-DD'),
datetime(2013, 1, 1)
)
def test_localized_month_name(self):
parser_ = parser.DateTimeParser('fr_fr')
assertEqual(
parser_.parse('2013-Janvier-01', 'YYYY-MMMM-DD'),
datetime(2013, 1, 1)
)
def test_localized_month_abbreviation(self):
parser_ = parser.DateTimeParser('it_it')
assertEqual(
parser_.parse('2013-Gen-01', 'YYYY-MMM-DD'),
datetime(2013, 1, 1)
)
class DateTimeParserMeridiansTests(Chai):
def setUp(self):
super(DateTimeParserMeridiansTests, self).setUp()
self.parser = parser.DateTimeParser('en_us')
def test_meridians_lowercase(self):
assertEqual(
self.parser.parse('2013-01-01 5am', 'YYYY-MM-DD ha'),
datetime(2013, 1, 1, 5)
)
assertEqual(
self.parser.parse('2013-01-01 5pm', 'YYYY-MM-DD ha'),
datetime(2013, 1, 1, 17)
)
def test_meridians_capitalized(self):
assertEqual(
self.parser.parse('2013-01-01 5AM', 'YYYY-MM-DD hA'),
datetime(2013, 1, 1, 5)
)
assertEqual(
self.parser.parse('2013-01-01 5PM', 'YYYY-MM-DD hA'),
datetime(2013, 1, 1, 17)
)
def test_localized_meridians_lowercase(self):
parser_ = parser.DateTimeParser('hu_hu')
assertEqual(
parser_.parse('2013-01-01 5 de', 'YYYY-MM-DD h a'),
datetime(2013, 1, 1, 5)
)
assertEqual(
parser_.parse('2013-01-01 5 du', 'YYYY-MM-DD h a'),
datetime(2013, 1, 1, 17)
)
def test_localized_meridians_capitalized(self):
parser_ = parser.DateTimeParser('hu_hu')
assertEqual(
parser_.parse('2013-01-01 5 DE', 'YYYY-MM-DD h A'),
datetime(2013, 1, 1, 5)
)
assertEqual(
parser_.parse('2013-01-01 5 DU', 'YYYY-MM-DD h A'),
datetime(2013, 1, 1, 17)
)
class DateTimeParserMonthOrdinalDayTests(Chai):
def setUp(self):
super(DateTimeParserMonthOrdinalDayTests, self).setUp()
self.parser = parser.DateTimeParser('en_us')
def test_english(self):
parser_ = parser.DateTimeParser('en_us')
assertEqual(
parser_.parse('January 1st, 2013', 'MMMM Do, YYYY'),
datetime(2013, 1, 1)
)
assertEqual(
parser_.parse('January 2nd, 2013', 'MMMM Do, YYYY'),
datetime(2013, 1, 2)
)
assertEqual(
parser_.parse('January 3rd, 2013', 'MMMM Do, YYYY'),
datetime(2013, 1, 3)
)
assertEqual(
parser_.parse('January 4th, 2013', 'MMMM Do, YYYY'),
datetime(2013, 1, 4)
)
assertEqual(
parser_.parse('January 11th, 2013', 'MMMM Do, YYYY'),
datetime(2013, 1, 11)
)
assertEqual(
parser_.parse('January 12th, 2013', 'MMMM Do, YYYY'),
datetime(2013, 1, 12)
)
assertEqual(
parser_.parse('January 13th, 2013', 'MMMM Do, YYYY'),
datetime(2013, 1, 13)
)
assertEqual(
parser_.parse('January 21st, 2013', 'MMMM Do, YYYY'),
datetime(2013, 1, 21)
)
assertEqual(
parser_.parse('January 31st, 2013', 'MMMM Do, YYYY'),
datetime(2013, 1, 31)
)
with assertRaises(ParserError):
parser_.parse('January 1th, 2013', 'MMMM Do, YYYY')
with assertRaises(ParserError):
parser_.parse('January 11st, 2013', 'MMMM Do, YYYY')
def test_italian(self):
parser_ = parser.DateTimeParser('it_it')
assertEqual(parser_.parse('Gennaio 1º, 2013', 'MMMM Do, YYYY'),
datetime(2013, 1, 1))
def test_spanish(self):
parser_ = parser.DateTimeParser('es_es')
assertEqual(parser_.parse('Enero 1º, 2013', 'MMMM Do, YYYY'),
datetime(2013, 1, 1))
def test_french(self):
parser_ = parser.DateTimeParser('fr_fr')
assertEqual(parser_.parse('Janvier 1er, 2013', 'MMMM Do, YYYY'),
datetime(2013, 1, 1))
assertEqual(parser_.parse('Janvier 2e, 2013', 'MMMM Do, YYYY'),
datetime(2013, 1, 2))
assertEqual(parser_.parse('Janvier 11e, 2013', 'MMMM Do, YYYY'),
datetime(2013, 1, 11))
class DateTimeParserSearchDateTests(Chai):
def setUp(self):
super(DateTimeParserSearchDateTests, self).setUp()
self.parser = parser.DateTimeParser()
def test_parse_search(self):
assertEqual(
self.parser.parse('Today is 25 of September of 2003', 'DD of MMMM of YYYY'),
datetime(2003, 9, 25))
def test_parse_search_with_numbers(self):
assertEqual(
self.parser.parse('2000 people met the 2012-01-01 12:05:10', 'YYYY-MM-DD HH:mm:ss'),
datetime(2012, 1, 1, 12, 5, 10))
assertEqual(
self.parser.parse('Call 01-02-03 on 79-01-01 12:05:10', 'YY-MM-DD HH:mm:ss'),
datetime(1979, 1, 1, 12, 5, 10))
def test_parse_search_with_names(self):
assertEqual(
self.parser.parse('June was born in May 1980', 'MMMM YYYY'),
datetime(1980, 5, 1))
def test_parse_search_locale_with_names(self):
p = parser.DateTimeParser('sv_se')
assertEqual(
p.parse('Jan föddes den 31 Dec 1980', 'DD MMM YYYY'),
datetime(1980, 12, 31))
assertEqual(
p.parse('Jag föddes den 25 Augusti 1975', 'DD MMMM YYYY'),
datetime(1975, 8, 25))
def test_parse_search_fails(self):
with assertRaises(parser.ParserError):
self.parser.parse('Jag föddes den 25 Augusti 1975', 'DD MMMM YYYY')
def test_escape(self):
format = "MMMM D, YYYY [at] h:mma"
assertEqual(
self.parser.parse("Thursday, December 10, 2015 at 5:09pm", format),
datetime(2015, 12, 10, 17, 9))
format = "[MMMM] M D, YYYY [at] h:mma"
assertEqual(
self.parser.parse("MMMM 12 10, 2015 at 5:09pm", format),
datetime(2015, 12, 10, 17, 9))
format = "[It happened on] MMMM Do [in the year] YYYY [a long time ago]"
assertEqual(
self.parser.parse("It happened on November 25th in the year 1990 a long time ago", format),
datetime(1990, 11, 25))
format = "[It happened on] MMMM Do [in the][ year] YYYY [a long time ago]"
assertEqual(
self.parser.parse("It happened on November 25th in the year 1990 a long time ago", format),
datetime(1990, 11, 25))
format = "[I'm][ entirely][ escaped,][ weee!]"
assertEqual(
self.parser.parse("I'm entirely escaped, weee!", format),
datetime(1, 1, 1))
| [
"659338505@qq.com"
] | 659338505@qq.com |
e6905f4e1c0610dd44d7c8e75055f7ca166f85b7 | 18346732982b35516e5dba1edd3b079607882b69 | /send_mail.py | 4e9ad8b129724517e309513a584e935f9a192149 | [] | no_license | srujana-patwari/python_codes | 2fa5f2eb1830896335611c653140ea7caa0f99da | 82b9a49f1ac52e96f8bf5480992d6c0a40020170 | refs/heads/master | 2022-12-07T07:55:35.788770 | 2020-08-29T17:41:23 | 2020-08-29T17:41:23 | 289,732,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
def send(filename):
from_add = 'kulkarnisrujana66@gmail.com'
to_add = 'srujanapatwari@gmail.com'
subject = 'Finance Stock Report'
msg = MIMEMultipart()
msg['From'] = from_add
msg['To'] = to_add
msg['Subject'] = subject
body = '<bTodays Finance Report Attached.</b'
msg.attach(MIMEText(body,'html'))
my_file = open(filename,'rb')
part = MIMEBase('application', 'octet-stream')
part.set_payload((my_file).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition','attachment; filename= ' + filename)
msg.attach(part)
message = msg.as_string()
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login('kulkarnisrujana66@gmail.com','sgciaicylljgwivc')
server.sendmail(from_add, to_add,message)
server.quit() | [
"noreply@github.com"
] | srujana-patwari.noreply@github.com |
edb9e528fe0d4c12899483456a6a3c923e0792b3 | 2433bf78bc97396329f711b18ec2b8a5fc35ae3d | /ddmms/fcns/ml_fcns.py | f791191ff2638e06c4bdd4a1275ced9d2e4ef44d | [] | no_license | simon596/dataDrivenHomogenization | 34b20856742542b3fad501ee9e0c74a7d858caa7 | bc9b0123e05170ec53d0706dc87fb5b1a7ecebc4 | refs/heads/master | 2023-03-17T02:56:11.648473 | 2020-07-19T22:40:59 | 2020-07-19T22:40:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | import tensorflow as tf
import numpy as np
def get_mask_tensor_with_inner_zeros(shape=(5,5), dtype=tf.float32):
""" create a m*n*1 tensor with the inner region to be zero """
mask_tensor = np.zeros(shape)
mask_tensor[0,:] = 1
mask_tensor[-1,:] = 1
mask_tensor[:,0] = 1
mask_tensor[:,-1] = 1
mask_tensor = np.expand_dims(mask_tensor, axis=2)
mask_tensor = tf.convert_to_tensor(mask_tensor, dtype=dtype)
return mask_tensor
if __name__ == '__main__':
data = np.load('numpy-3-61-61-1.npy')
data = data[0:3,:,:,:]
# np.save('numpy.vtk', data)
data = tf.convert_to_tensor(data, dtype=tf.float32)
mask_tensor = get_mask_tensor_with_inner_zeros(shape=(5,5))
print("small mask(5*5): ", mask_tensor[:,:,0])
mask_tensor = get_mask_tensor_with_inner_zeros(shape=(61,61))
print("size of mask_tensor:", np.shape(mask_tensor))
output = tf.math.multiply(data, mask_tensor)
print("data size: ", np.shape(data))
print("output size: ", np.shape(output))
print(data[0,:,:,0])
print(output[0,:,:,0])
| [
"zhangxx2643@gmail.com"
] | zhangxx2643@gmail.com |
67d55d54687231d4e713766e49ec70a2377e53a9 | bca9c2fa3c4c3d06dd612280ce39090a9dfab9bd | /neekanee/job_scrapers/plugins/com/link/mzeal.py | c66dffc8ed7f69e9834f510183f61c44f6ba40c6 | [] | no_license | thayton/neekanee | 0890dd5e5cf5bf855d4867ae02de6554291dc349 | f2b2a13e584469d982f7cc20b49a9b19fed8942d | refs/heads/master | 2021-03-27T11:10:07.633264 | 2018-07-13T14:19:30 | 2018-07-13T14:19:30 | 11,584,212 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,627 | py | import re, urllib2, urlparse
from neekanee.jobscrapers.jobscraper import JobScraper
from neekanee.htmlparse.soupify import soupify, get_all_text, get_mailto
from neekanee_solr.models import *
COMPANY = {
'name': 'mZeal',
'hq': 'Fitchburg, MA',
'home_page_url': 'http://www.mzeal.com',
'jobs_page_url': 'http://www.mzeal.com/htdocs/mzeal-careers.php',
'empcnt': [11,50]
}
class mZealJobScraper(JobScraper):
def __init__(self):
super(mZealJobScraper, self).__init__(COMPANY)
def scrape_job_links(self, url):
jobs = []
self.br.open(url)
s = soupify(self.br.response().read())
d = s.find('div', id='content')
r = re.compile(r'^javascript:popUp\("([^"]+)')
for a in d.findAll('a', href=r):
m = re.search(r, a['href'])
u = urllib2.quote(m.group(1))
job = Job(company=self.company)
job.title = a.text
job.url = urlparse.urljoin(self.br.geturl(), u)
job.location = self.company.location
jobs.append(job)
return jobs
def scrape_jobs(self):
job_list = self.scrape_job_links(self.company.jobs_page_url)
self.prune_unlisted_jobs(job_list)
new_jobs = self.new_job_listings(job_list)
for job in new_jobs:
self.br.open(job.url)
s = soupify(self.br.response().read())
d = s.div
job.desc = get_all_text(d)
job.save()
def get_scraper():
return mZealJobScraper()
if __name__ == '__main__':
job_scraper = get_scraper()
job_scraper.scrape_jobs()
| [
"thayton@neekanee.com"
] | thayton@neekanee.com |
13d4998fa967ed28daada838660962f5811b3fbe | 1ba58b17f33122abf4236e9e430a51d375e0eb53 | /km73/Kostetskiy_Nazar/4/task12.py | 240d587ec18870449ebf92f0584f0174c5bec47d | [] | no_license | igortereshchenko/amis_python | c4f8d86b88ab036d08ff0ce35c9b42ebeabecc42 | c6f0f2a70c82d5f269b3078eb296f82271b5bb10 | refs/heads/master | 2021-10-22T16:21:19.990650 | 2017-11-01T07:26:54 | 2017-11-01T07:26:54 | 104,785,028 | 0 | 139 | null | 2020-04-21T21:27:09 | 2017-09-25T18:11:42 | Python | UTF-8 | Python | false | false | 122 | py | m=int(input())
n=int(input())
if m%2==0 or n%2==0:
answer = "Yes"
else:
answer = "NO"
print(answer)
input() | [
"noreply@github.com"
] | igortereshchenko.noreply@github.com |
0f9d8f2e3fa2dd8d60f1cbe56ae09c2bbc4b8a8e | de5f0b48475772a4c5a44af099fe27e43f26e649 | /code/23/part_2.py | 17e27e6caa5c185978cc423a04edc36cd6aa320f | [] | no_license | lkboyles/advent | 293010ad6f72ff8cf70b5b74bc7e61aebc56cd86 | a1086956c03e508163b71a054b7247922d31b10e | refs/heads/master | 2021-01-21T10:26:09.873302 | 2015-12-25T04:11:23 | 2015-12-25T04:11:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | #!/usr/bin/env python3
"""Solve Day 23/Part 2 of the AdventOfCode
"""
import part_1
def main(filename):
with open(filename, 'r') as f:
instructions = []
instructions.append(part_1.parse_instruction('inc a'))
for line in f:
instructions.append(part_1.parse_instruction(line))
program = part_1.Program(instructions)
while program.step():
pass
register_b = program.state.registers[1]
print(register_b)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('filename')
args = parser.parse_args()
main(**vars(args))
| [
"thobson125@gmail.com"
] | thobson125@gmail.com |
f54cd3540ecc235efafc291f877fffaa52fcfb32 | 66a7338dab4443929badf73a180f4ac994732511 | /Python/cubesat2017/soft/desktop/app/test/virtual/parser_test_virtual.py | e00daa6d92ee7bcdc85bf53465b87722a5b00906 | [
"MIT"
] | permissive | Misha91908/Portfolio | 0d1ecd2572de298abc68a446c9b399bc6a997b2a | c10b06462ec45f039778c77aa6c84e871cac34f6 | refs/heads/main | 2023-01-13T05:43:04.846916 | 2020-11-12T02:29:32 | 2020-11-12T02:29:32 | 302,099,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | import sys
import pytest
import os
def test_is_valid_packet_length(packet):
counter = 0
valid_counter = 0
for i in range(len(packet)):
if len(packet_parser.parse(packet[i])) == 16:
counter += 1
valid_counter += 1
else:
counter += 1
assert counter == valid_counter
def test_is_valid_type_of_data(packet):
counter = 0
valid_counter = 0
for i in range(len(packet)):
buf = packet_parser.parse(packet[i])
int_count = 0
float_count = 0
for i in packet:
if type(packet[i]) == int:
int_count += 1
elif type(packet[i]) == float:
float_count += 1
if int_count == 3 and float_count == 13:
counter += 1
valid_counter += 1
else:
counter += 1
assert counter == valid_counter
| [
"misha91908@gmail.com"
] | misha91908@gmail.com |
835d35129366cb6b7c7fe25a3b58cb31c862fcee | 3d6f8dc406a18397c354df72ce7dbf5e87712567 | /Threading/Thread_Lock.py | 527cddf42033a3031b9bf111ad23034f0a96b633 | [] | no_license | HonryZhang/Python-Learning | b7410153eff7cd4e51e6e5d69cf7a9bc739a8243 | e29a75eb42588b2ac31d9b498827cba9c87fc157 | refs/heads/master | 2021-01-16T21:41:45.429416 | 2018-05-16T01:36:31 | 2018-05-16T01:36:31 | 100,246,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'Hongrui'
import threading
import time
num = 0
lock = threading.Lock()
def run(n):
lock.acquire()
global num
# time.sleep(1)
num+=1
lock.release()
thread_list = []
for i in range(100):
t = threading.Thread(target=run,args=('thread-%s'%i,))
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
print '------all threads done------'
print "num:",num
| [
"noreply@github.com"
] | HonryZhang.noreply@github.com |
37862978e100f45c9b40e44de5af8037129df588 | 073d8aef7099fb40eb8fc4d9b443fab6a4c71912 | /utils/tools.py | 99a5179a0430721243e61986d5738875d91a67f6 | [
"MIT"
] | permissive | xiaonanQua/experiment | 7f4de2fbf5ff1b41a2480b0a2acd53497822ae57 | 19925c9af5cffc73451dc7674bc3afce25abf772 | refs/heads/master | 2020-09-23T14:03:03.960836 | 2020-06-04T12:17:55 | 2020-06-04T12:17:55 | 225,517,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,731 | py | # -*- coding:utf-8 -*-
from __future__ import division, print_function, absolute_import
import os, sys, time, math, torch, zipfile, random
from torch.utils.data import random_split
import numpy as np
import pandas as pd
import seaborn as sn
import matplotlib.pyplot as plt
from config.cifar10_config import Cifar10Config
from torch.utils.data import DataLoader
def get_mean_std(dataset):
"""
计算数据集的均值和标准差
:parm data2: 数据集
"""
# 定义均值和标准差
mean = torch.zeros(3)
std = torch.zeros(3)
# 获得数据加载器
data_loader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=4)
print('Compute the mean and std...')
for image,label in data_loader:
# 统计每个图像的每个通道的均值和标准差
for i in range(3):
mean[i] += image[:, i, :, :].mean()
std[i] += image[:, i, :, :].std()
# 求出整体样本的平均均值和标准差
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def view_bar(message, num, total):
"""
进度条工具
:param message: 进度条信息
:param num: 当前的值,从1开始..
:param total: 整体的值
:return:
"""
rate = num / total
rate_num = int(rate * 40)
rate_nums = math.ceil(rate * 100)
r = '\r%s:[%s%s]%d%%\t%d/%d' % (message, ">"*rate_num, ""*(40-rate_num), rate_nums, num, total)
sys.stdout.write(r)
sys.stdout.flush()
def mkdir(dir_path):
"""
判断文件夹是否存在,创建文件夹
:param dir_path: 文件夹路径
:return:
"""
if not os.path.exists(dir_path):
os.mkdir(dir_path)
def image_show(image, title=None):
"""
显示图像,根据
:param images: 图像
:param title: 标题
:return:
"""
# 将PIL格式的图像转化成numpy形式,再将图像维度转化成(高度,宽度,颜色通道)
images = images.numpy().transpose([1, 2, 0])
# 设置平均值和标准差
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
# 将进行归一化后的图像还原
images = images * std + mean
images = np.clip(images, 0, 1)
plt.imshow(images)
if title is not None:
plt.title(title)
def one_hot_embedding(labels, num_classes):
"""
将标签嵌入成one-hot形式
:param labels: 标签,(LongTensor)类别标签,形状[N,]
:param num_classes: 类别数,
:return:(tensor)被编码的标签,形状(N,类别数)
"""
# 返回2维张量,对角线全是1,其余全是0
y = torch.eye(num_classes)
return y[labels] # 使用按行广播机制
def split_valid_set(dataset, save_coef):
"""
从原始数据集中划分出一定比例的验证集
:param dataset: 原始数据集,一般是训练集。这里的数据集是经过pytorch中DataSet读取出来的数据集对象。
:param save_coef: 保存原始数据集的系数
:return: 划分后的数据集。格式类似于:train_dataset, valid_dataset
"""
# 训练集的长度
train_length = int(save_coef*len(dataset))
# 验证集的长度
valid_length = len(dataset) - train_length
# 使用pytorch中的随机划分成数据集来划分
train_dataset, valid_dataset = random_split(dataset, [train_length, valid_length])
return train_dataset, valid_dataset
def show_label_distribute(data_loader):
"""
显示数据集标签的分布情况
:param data_loader: 数据集加载器(pytorch加载器对象)
:return:
"""
print('label distribution ..')
figure, axes = plt.subplots()
labels = [label.numpy().tolist() for _, label in data_loader]
print(labels)
class_labels, counts = np.unique(labels, return_counts=True)
axes.bar(class_labels, counts)
axes.set_xticks(class_labels)
plt.show()
def vis(test_accs, confusion_mtxes, labels, figsize=(20, 8)):
cm = confusion_mtxes[np.argmax(test_accs)]
cm_sum = np.sum(cm, axis=1, keepdims=True)
cm_perc = cm / cm_sum * 100
annot = np.empty_like(cm).astype(str)
nrows, ncols = cm.shape
for i in range(nrows):
for j in range(ncols):
c = cm[i, j]
p = cm_perc[i, j]
if c == 0:
annot[i, j] = ''
else:
annot[i, j] = '%.1f%%' % p
cm = pd.DataFrame(cm, index=labels, columns=labels)
cm.index.name = 'Actual'
cm.columns.name = 'Predicted'
fig = plt.figure(figsize=figsize)
plt.subplot(1, 2, 1)
plt.plot(test_accs, 'g')
plt.grid(True)
plt.subplot(1, 2, 2)
sn.heatmap(cm, annot=annot, fmt='', cmap="Blues")
plt.show()
def confusion_matrix(targets, preds):
"""
生成混淆矩阵
:param targets: 真实标签数据,数据格式list
:param preds: 与真实标签对应的预测标签数据,数据格式list
:return: 混淆矩阵
"""
# 统计真实标签中的类别数量
num_class = len(set(targets))
# 初始化相应类别数量大小的混淆矩阵
conf_matrix = np.zeros(shape=[num_class, num_class])
print(conf_matrix)
# 判断真实标签与预测标签的数量是否相等
if len(targets) != len(preds):
raise Exception('The number of real and predicted labels is inconsistent')
# 进行标签的统计
for i in range(len(targets)):
true_i = np.array(targets[i])
pred_i = np.array(preds[i])
conf_matrix[true_i, pred_i] += 1.0
return conf_matrix
def visiual_confusion_matrix(confusion_mat, classes_name, graph_name=None, out_path=None):
"""
可视化混淆矩阵
:param confusion_mat: 统计好的混淆矩阵
:param classes_name: 混淆矩阵对应的类别名称
:param graph_name: 当前图的名称
:param out_path: 以png的图像格式保存混淆矩阵
:return:
"""
# 判断混淆矩阵中的类别与类别名称中类别数量是否一致
if confusion_mat.shape[0] != len(classes_name):
raise Exception('Inconsistent number of categories')
# 对混淆矩阵逐行进行数值归一化
confusion_mat_normal = confusion_mat.copy()
for i in range(len(classes_name)):
confusion_mat_normal[i, :] = confusion_mat[i, :] /confusion_mat_normal[i, :].sum()
print(confusion_mat_normal)
# 获取颜色
cmap = plt.cm.get_cmap('Greys')
plt.imshow(confusion_mat_normal, cmap=cmap)
plt.colorbar()
# 设置文字
xlocations = np.array(range(len(classes_name)))
plt.xticks(xlocations, classes_name, rotation=60)
plt.yticks(xlocations, classes_name)
plt.xlabel('Predict label')
plt.ylabel('True label')
plt.title('' + graph_name)
# 打印数字
for i in range(confusion_mat_normal.shape[0]):
for j in range(confusion_mat_normal.shape[1]):
plt.text(x=j, y=i, s=int(confusion_mat[i, j]), va='center', ha='center', color='red', fontsize=10)
# 保存
if out_path is not None:
plt.savefig(os.path.join(out_path, 'Confusion_Matrix_' + graph_name + '.png'))
plt.show()
plt.close()
def read_and_write_videos(video_files=None, out_files=None):
"""
通过OpenCV中的VideoCapture函数调用系统摄像头读取视频图像,或者读取特定视频文件
:param video_files: 读取的视频文件地址,若为None则读取摄像头文件
:param out_files: 输出文件
:return:
"""
# 创建VideoCapture进行一帧一帧视频读取
if video_files is None:
# 调用系统单个摄像头作为视频输入
cap = cv2.VideoCapture(0)
else:
# 读取特定视频文件
cap = cv2.VideoCapture(video_files)
# 判断摄像头是否打开
if cap.isOpened() is False:
print('Error opening video stream or file')
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
out = cv2.VideoWriter(out_files,
cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
10, (frame_width, frame_height))
# 读取视频,直到读取所有时间段视频
while cap.isOpened():
# 一帧一帧的读取视频
ret, frame = cap.read()
if ret == True:
out.write(frame)
font = cv2.FONT_HERSHEY_COMPLEX
cv2.putText(frame, 'xiaonan', (30, 30), font, 1, (0, 0, 255), 1, cv2.LINE_AA)
cv2.putText(frame, 'xiaoshuai', (30, 90), font, 1, (0, 0, 255), 2)
# 显示帧结果
cv2.imshow('frame', frame)
# 播放每一帧时等待25秒或者按q结束
if cv2.waitKey(1)&0xFF==ord('q'):
print('结束..')
break
else: # 结束循环
break
# 当视频读取结束时,释放视频捕捉的输出O
cap.release()
# 关闭所有帧窗口
cv2.destroyAllWindows()
def read_jay_lyrics(num_examples=None):
"""
读取周杰伦歌词数据集
:param samples: 设置读取的样本数
:return:
"""
# 打开周杰伦歌词文件
file_path = '/home/team/xiaonan/Dataset/lyrics/jaychou_lyrics.txt.zip'
with zipfile.ZipFile(file=file_path) as zin:
with zin.open('jaychou_lyrics.txt') as file:
lyrics = file.read().decode('utf-8')
lyrics = lyrics.replace('\n', ' ').replace('\r', ' ')
if num_examples is not None:
train_set = lyrics[:num_examples]
else:
train_set = lyrics
# 建立字符索引
idx_to_char = list(set(train_set))
char_to_idx = dict([(char, i) for i, char in enumerate(idx_to_char)])
vocab_size = len(char_to_idx)
# 将训练集字符转化成索引
train_set = [char_to_idx[char] for char in train_set]
return train_set, vocab_size, idx_to_char, char_to_idx
def seq_random_sample(samples_indices, batch_size, num_steps, device=None):
"""
时序数据的随机采样
:param samples_indices:样本数据的索引
:param batch_size: 批次大小,每个小批量的样本数
:param num_steps: 每个样本所包含的时间步数
:param device: 数据采样放置在什么设备上
:return:
"""
# 减1是因为输出的索引x是相应输入的索引y加1
num_examples = (len(samples_indices) -1)//num_steps
# 周期大小
epoch_size = num_examples // batch_size
# 样本索引
example_indices = list(range(num_examples))
random.shuffle(example_indices)
# 放置设备
if device is None:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
for i in range(epoch_size):
# 每次读取batch_size个的样本
i = i*batch_size
batch_indices = example_indices[i:i+batch_size]
X = [samples_indices[j*num_steps: j*num_steps+num_steps] for j in batch_indices]
Y = [samples_indices[j*num_steps+1: j*num_steps+1+num_steps] for j in batch_indices]
yield torch.tensor(X, dtype=torch.float32, device=device), \
torch.tensor(Y, dtype=torch.float32, device=device)
def seq_adjacent_sample(example_indices, batch_size, num_steps, device=None):
# 获取设备
if device is None:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 将索引数据转化成张量属性
example_indices = torch.tensor(example_indices, dtype=torch.float32,
device=device)
# 序列数据的长度
data_len = example_indices.size(0)
# 批次数量
num_batch = data_len // batch_size
# 转化索引数据成(批次大小,批次长度)格式
indices = example_indices[0: batch_size*num_batch].view(batch_size, num_batch)
# 计算周期大小
epoch_size = (num_batch-1)//num_steps
for i in range(epoch_size):
i = i*num_steps
x = indices[:, i:i+num_steps]
y = indices[:, i+1:i+num_steps+1]
yield x, y
def _one_hot(seq_data, vocab_size, dtype=torch.float32):
"""
将序列数据转化成one-hot向量,即转成词向量。
:param seq_data: 序列数据的索引,格式:[batch_size]-->[batch_size, vocab_size]
:param vocab_size: 序列数据中不同词的数量
:param dtype: 数据类型,默认FLOAT32
:return:
"""
x = seq_data.long()
res = torch.zeros(seq_data.shape[0], vocab_size, dtype=dtype, device=x.device)
res.scatter_(1, x.view(-1, 1), 1)
return res
def seq_one_hot(seq_data, vocab_size):
"""
将序列数据转化成one-hot向量,即转成词向量。
:param seq_data: 序列数据的索引,格式:[batch_size, seq_len]-->序列长度个[batch_size, vocab_size]
:param vocab_size: 序列数据中不同词的数量
:return:
"""
return [_one_hot(seq_data[:, i], vocab_size) for i in range(seq_data.shape[1])]
def rnn_predict(prefix, num_chars, rnn, params, init_rnn_state, num_hiddens,
vocab_size, device, idx_to_char, char_to_idx):
"""
根据一段前缀的词进行预测
:param prefix: 预测的词
:param num_chars:字符数量
:param rnn:rnn函数
:param params:初始化的参数
:param init_rnn_state:初始化隐状态
:param num_hiddens:隐藏单元的数量
:param vocab_size: 不同字典的数量
:param device: 设备
:param idx_to_char: 根据索引找字符
:param char_to_idx: 根据字符找索引
:return:
"""
# 初始化隐状态
state = init_rnn_state(1, num_hiddens, device)
# 输出
output = [char_to_idx[prefix[0]]]
for t in range(num_chars+len(prefix)-1):
# 将上一时间步的输出作为当前时间步的输入
X = seq_one_hot(torch.tensor([[output[-1]]], device=device), vocab_size)
# 计算输出和更新隐藏状态
(Y, state) = rnn(X, state, params)
# 下一个时间步的输入是prefix里的字符或者当前的最佳预测字符
if t < len(prefix)-1:
output.append(char_to_idx[prefix[t+1]])
else:
output.append(int(Y[0].argmax(dim=1).item()))
return ''.join([idx_to_char[i] for i in output])
def grad_clipping(params, theta, device):
norm = torch.tensor([0.0], device=device)
for param in params:
norm += (param.grad.data **2).sum()
norm = norm.sqrt().item()
if norm > theta:
for param in params:
param.grad.data *= (theta/norm)
def rnn(inputs, state, params):
"""
在一个时间步里如何计算隐藏状态和输出。
input和output都是num_steps时间步个形状为(batch_size, vocab_size)的词向量
:param inputs:当次输入数据
:param state:隐状态
:param params:参数
:return:(当前层的输出,隐状态)
"""
# 获取初始的参数、上一时刻的隐藏
W_xh, W_hh, b_h, W_hq, b_q = params
H, = state
outputs = []
for X in inputs:
# 隐状态
H = torch.tanh(torch.matmul(X, W_xh) + torch.matmul(H, W_hh) + b_h)
# 输出
Y = torch.matmul(H, W_hq) + b_q
# 保存输出
outputs.append(Y)
return outputs, (H,)
# 初始化隐藏数量
def init_rnn_state(batch_size, num_hiddens, device):
return (torch.zeros((batch_size, num_hiddens), device=device), )
def sgd(params, lr, batch_size):
"""
根据计算的梯度更新参数
:param params: 需更新的参数
:param lr: 学习率
:param batch_size: 批次大小
:return:
"""
for param in params:
param.data -= lr*param.grad / batch_size
def visual_multi_image(data=None):
"""
在一个图中可视化多个图
:param data: 字典格式,{‘name’:{x:[x1,x2,..], y:[y1, y2, y3], title:''}, ...}
:return:
"""
x1 = list(range(10))
x2 = list(range(10))
y1 = list(range(10))
y2 = list(range(10))
plt.subplot(2, 1, 1)
plt.plot(x1, y1, 'o-')
plt.subplot(2, 1, 2)
plt.plot(x2, y2, '.-')
plt.savefig('test.png')
plt.show()
if __name__ == "__main__":
# labels = torch.tensor([1, 2, 3, 1])
# # print(labels.squeeze(1))
# print(one_hot_embedding(labels, 4))
# cfg = Cifar10Config()
# test_loader = cfg.dataset_loader(cfg.cifar_10_dir, train=False, shuffle=False)
# show_label_distribute(test_loader)
# video_file = '/home/xiaonan/sf6_1.avi'
# read_and_write_videos()
# my_seq = list(range(30))
# for X,Y in seq_adjacent_sample(my_seq, batch_size=3, num_steps=5):
# print(X,Y)
# x = torch.arange(10).view(2, 5)
# print(x)
# inputs = seq_one_hot(x, 2045)
# print(len(inputs), inputs[0].size())
visual_multi_image() | [
"1425877026@qq.com"
] | 1425877026@qq.com |
22ad2d08f0f429589ce8831fcda174a83dea41b4 | 2f8beb0b5b87057fce587e2273a6946627a0f167 | /Samples/test_pk_v1.py | 1d668e885bd64783c9e80b2b7bc9ae725935ed76 | [
"Apache-2.0"
] | permissive | davidnugent2425/balder | 0dee10b81c185c9e7484b4ff8cd11154662e015c | 70b56da1df67025ba8c068a1c5a75eb47ec5155a | refs/heads/master | 2021-09-24T17:40:15.204403 | 2018-10-12T16:25:27 | 2018-10-12T16:25:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,969 | py | # coding: utf-8
# #
# In[ ]:
## v1
import datetime
import os
import sys
from timeit import default_timer as timer
import logging
import numpy as np
import tensorflow as tf
from PIL import Image
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from object_detection.utils import ops as utils_ops
SCRIPT_NAME = 'Test Object Detection Round PK V1'
print(SCRIPT_NAME)
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
# # Model preparation
##Initial Log
root = logging.getLogger()
root.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_GRAPH = '/home/denny/run_ai/test_ai/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = '/home/denny/run_ai/test_ai/label_map.pbtxt'
NUM_CLASSES = 2
# ## Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# # Detection
def run_inference_for_images(images, graph):
with graph.as_default():
with tf.Session() as sess:
output_dict_array = []
for image in images:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
output_dict_array.append(output_dict)
return output_dict_array
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# In[ ]:
input_images = []
input_image_filenames = []
output_dir = '/home/denny/run_ai/test_ai/output'
img_dir = '/home/denny/run_ai/test_ai/images'
valid_images = [".jpg", ".gif", ".png"]
for f in os.listdir(img_dir):
fn = os.path.splitext(f)[0]
ext = os.path.splitext(f)[1]
if ext.lower() not in valid_images:
continue
input_image_filenames.append(fn)
logging.debug("Loading {}...".format(fn))
image_filename = os.path.join(img_dir, f)
image = Image.open(image_filename)
image_np = load_image_into_numpy_array(image)
input_images.append(image_np)
logging.debug(" Image:{} loaded...".format(fn))
now = datetime.datetime.now()
start = timer()
output_dict_array = run_inference_for_images(input_images, detection_graph)
end = timer()
avg = (end - start) / len(input_images)
print("===TF inference took: " + str(end - start) + " for [" + str(len(input_images)) + "] images, average[" + str(
avg) + "]===")
print("output array has:" + str(len(output_dict_array)))
for idx in range(len(output_dict_array)):
output_dict = output_dict_array[idx]
image_np_org = input_images[idx]
vis_util.visualize_boxes_and_labels_on_image_array(
image_np_org,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=3)
img_out = Image.fromarray(image_np_org, 'RGB')
img_out.save(os.path.join(output_dir, 'output_image_{}.jpg'.format(input_image_filenames[idx])))
| [
"dennywangtenk@gmail.com"
] | dennywangtenk@gmail.com |
96a0064c203eeb2ac596e10ba2d39ee3bf3c7811 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/ospf/rsextdeftoospfarea.py | 02670cb210100358c19c0024717b98aed6513d1e | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 8,355 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RsExtDefToOspfArea(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = SourceRelationMeta("cobra.model.ospf.RsExtDefToOspfArea", "cobra.model.ospf.Area")
meta.cardinality = SourceRelationMeta.N_TO_ONE
meta.moClassName = "ospfRsExtDefToOspfArea"
meta.rnFormat = "rsExtDefToOspfArea"
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "Area"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.ospf.ExtDef")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.reln.To")
meta.rnPrefixes = [
('rsExtDefToOspfArea', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 34221, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 2664
prop.defaultValueStr = "ospfArea"
prop._addConstant("ospfArea", None, 2664)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 34220, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tType", "tType", 105, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("LIfCtxToNwIf", "Physical Interfaces", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("LIfCtxToCompVNic", "Virtual Nics", "cobra.model.comp.VNic"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcIpEpgPolToLocale3", "Fabric Nodes(EP)", "cobra.model.fabric.Node"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcIpEpgPolToLocale2", "Fabric Nodes(Service EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcIpEpgPolToLocale1", "Fabric Nodes(EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgIpPolToLocale3", "Fabric Nodes(EP)", "cobra.model.fabric.Node"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgIpPolToLocale2", "Fabric Nodes(Service EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgIpPolToLocale1", "Fabric Nodes(EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpEpgPolToLocale3", "Fabric Nodes(EP)", "cobra.model.fabric.Node"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpEpgPolToLocale2", "Fabric Nodes(Service EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpEpgPolToLocale1", "Fabric Nodes(EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgEpPolToLocale3", "Fabric Nodes(EP)", "cobra.model.fabric.Node"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgEpPolToLocale2", "Fabric Nodes(Service EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgEpPolToLocale1", "Fabric Nodes(EPg)", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AcEpgEpgPolToLocale", "Fabric Nodes", "cobra.model.nw.If"))
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
bae6e624ed20eb34036d0e3c8a708a7841ef770b | 736015bd5edb7ccd96ea56e261b61f032e6a97a2 | /setup.py | e8e9f5dd91b749075f53af6dac3d4c93fea31297 | [
"MIT"
] | permissive | koalalee37/graphgraph.py | 304ecb32eb7410f36515456fbfd80be7a4d1bc38 | 4e9b97c8a4b56c355317e62e7d81f0ecacf82e7c | refs/heads/main | 2023-02-23T23:37:01.365122 | 2021-01-29T07:26:47 | 2021-01-29T07:30:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,223 | py | from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import sys
import setuptools
import os
__version__ = '0.0.1.dev1'
# Prepare and send a new release to PyPI
if "release" in sys.argv[-1]:
os.system("python setup.py sdist")
os.system("twine upload dist/*")
os.system("rm -rf dist/graphgraph*")
sys.exit()
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __init__(self, user=False):
self.user = user
def __str__(self):
import pybind11
return pybind11.get_include(self.user)
ext_modules = [
Extension(
'graphgraph.operators',
['graphgraph/src/operators.cpp'],
include_dirs=[
# Path to pybind11 headers
get_pybind_include(),
get_pybind_include(user=True)
],
extra_link_args=["-stdlib=libc++"],
language='c++'
),
]
# As of Python 3.6, CCompiler has a `has_flag` method.
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import tempfile
with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:
f.write('int main (int argc, char **argv) { return 0; }')
try:
compiler.compile([f.name], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
return True
def cpp_flag(compiler):
"""Return the -std=c++[11/14] compiler flag.
The c++14 is prefered over c++11 (when it is available).
"""
if has_flag(compiler, '-std=c++14'):
return '-std=c++14'
elif has_flag(compiler, '-std=c++11'):
return '-std=c++11'
else:
raise RuntimeError('Unsupported compiler -- at least C++11 support '
'is needed!')
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {
'msvc': ['/EHsc'],
'unix': [],
}
if sys.platform == 'darwin':
c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
def build_extensions(self):
# compiler flags
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
if ct == 'unix':
opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version())
opts.append(cpp_flag(self.compiler))
if has_flag(self.compiler, '-fvisibility=hidden'):
opts.append('-fvisibility=hidden')
elif ct == 'msvc':
opts.append('/DVERSION_INFO=\\"%s\\"' % self.distribution.get_version())
for ext in self.extensions:
ext.extra_compile_args = opts
# third-party libraries flags
localincl = "third-party"
if not os.path.exists(os.path.join(localincl, "eigen_3.3.7", "Eigen",
"Core")):
raise RuntimeError("couldn't find Eigen headers")
include_dirs = [
os.path.join(localincl, "eigen_3.3.7"),
]
for ext in self.extensions:
ext.include_dirs = include_dirs + ext.include_dirs
# run standard build procedure
build_ext.build_extensions(self)
setup(
name='graphgraph',
version=__version__,
author='Ze Vinicius',
author_email='jvmirca@gmail.com',
url='https://github.com/mirca/graphgraph.py',
description='A Python library for graph learning from data',
long_description='',
ext_modules=ext_modules,
install_requires=['pybind11>=2.2', 'numpy'],
setup_requires=['pybind11>=2.2', 'numpy'],
cmdclass={'build_ext': BuildExt},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Financial and Insurance Industry',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.0',
],
zip_safe=False,
include_package_data=True,
)
| [
"jvmirca@gmail.com"
] | jvmirca@gmail.com |
dfc11ea81a9e5e31ba1ed888074310f0d7523ceb | 74912c10f66e90195bf87fd71e9a78fa09f017ec | /execroot/syntaxnet/bazel-out/local-opt/bin/syntaxnet/beam_reader_ops_test.runfiles/org_tensorflow/tensorflow/contrib/learn/python/learn/models.py | 6d0daf4c6b55a6b96adcbeebf6e40287ffc3a1c3 | [] | no_license | koorukuroo/821bda42e7dedbfae9d936785dd2d125- | 1f0b8f496da8380c6e811ed294dc39a357a5a8b8 | 237fcc152ff436f32b2b5a3752a4181d279b3a57 | refs/heads/master | 2020-03-17T03:39:31.972750 | 2018-05-13T14:35:24 | 2018-05-13T14:35:24 | 133,244,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | /root/.cache/bazel/_bazel_root/821bda42e7dedbfae9d936785dd2d125/external/org_tensorflow/tensorflow/contrib/learn/python/learn/models.py | [
"k"
] | k |
88b1c9f386eafb2d6a31fa233061bbcf655bec8c | c91775afdc25f8897c6839cf8294869f3e928083 | /PythonFiles/snowmass_cfg_tj_14TEV_500_1000.py | 6a4364571fa4ccee4878ee53e40059c5934e8c00 | [] | no_license | Saptaparna/Miscellaneous | 7e6df9cdfd10d4861e2e382b1837dbd4c26fb249 | b954189d85e56a02fe257b5f5cbd779365719c00 | refs/heads/master | 2021-01-23T13:29:30.283308 | 2017-12-20T08:26:37 | 2017-12-20T08:26:37 | 42,525,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,261 | py | import FWCore.ParameterSet.Config as cms
import FWCore.PythonUtilities.LumiList as LumiList
import FWCore.ParameterSet.Types as CfgTypes
#
# Parameters that can be set via command line
# when submitting Condor jobs
#
isMc_settable = True
isSignalMc_settable = False
def FindFile(name):
fname = 'file.txt'
return fname
process = cms.Process("LJMetCom")
##################################################################
#
# All input files needed for the job to run
# Specify them here, and they will automatically be correctly
# transferred to Condor when needed
# NOTE: you can define as many or as few entries as you wish,
# names are up to you
miscFiles = {}
miscFiles['jec_uncertainty'] = '../cond/Summer12_V2_DATA_AK5PF_UncertaintySources.txt'
miscFiles['btag_performance'] = '../cond/btag_performance_db062012.root'
miscFiles['json'] = '../data/json/Cert_190456-208686_8TeV_PromptReco_Collisions12_JSON.txt'
miscFiles['MCL1JetPar'] = '../data/START53_V7G_L1FastJet_AK5PFchs.txt'
miscFiles['MCL2JetPar'] = '../data/START53_V7G_L2Relative_AK5PFchs.txt'
miscFiles['MCL3JetPar'] = '../data/START53_V7G_L3Absolute_AK5PFchs.txt'
miscFiles['DataL1JetPar'] = '../data/FT_53_V10_AN3_L1FastJet_AK5PFchs.txt'
miscFiles['DataL2JetPar'] = '../data/FT_53_V10_AN3_L2Relative_AK5PFchs.txt'
miscFiles['DataL3JetPar'] = '../data/FT_53_V10_AN3_L3Absolute_AK5PFchs.txt'
miscFiles['DataResJetPar'] = '../data/FT_53_V10_AN3_L2L3Residual_AK5PFchs.txt'
#Arguments from condor submit script which are used more than once
condorIsMC = bool(True)
relBase = str('/uscms_data/d2/sapta/work/LJMetCode_fromGena/Dilepton_Feb25/CMSSW_5_3_7_patch4')
condorJSON = str('None')
# Dilepton calculator options
process.load('LJMet.Com.DileptonCalc_cfi')
process.DileptonCalc.isMc = condorIsMC
process.DileptonCalc.dataType = cms.string('None')
############################################################
#
# FWLite application options
#
process.ljmet = cms.PSet(
isMc = cms.bool(condorIsMC),
runs = cms.vint32([]),
verbosity = cms.int32(0)
)
#Exclude unnecessary calculators
process.ljmet.excluded_calculators = cms.vstring(
'WprimeCalc',
'LjetsTopoCalc',
'LjetsTopoCalcNew',
'StopCalc'
)
############################################################
#
# common calculator options
process.load('LJMet.Com.commonCalc_cfi')
process.CommonCalc.dummy_parameter = cms.string('Dummy parameter value')
############################################################
#
# pileup calculator options
process.load('LJMet.Com.pileupCalc_cfi')
process.PileUpCalc.verbosity = process.ljmet.verbosity
############################################################
#
# Event selector options
#
process.event_selector = cms.PSet(
selection = cms.string('DileptonSelector'),
isMc = cms.bool(condorIsMC),
# cuts
#HLT
trigger_cut = cms.bool(True),
dump_trigger = cms.bool(False),
#Can use same trigger paths for data and MC since MC is always one of the data versions
trigger_path_ee = cms.vstring('HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v15',
'HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v16',
'HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v17',
'HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v18',
'HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v19'),
trigger_path_em = cms.vstring('HLT_Mu8_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v4', 'HLT_Mu8_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v5',
'HLT_Mu8_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v6', 'HLT_Mu8_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v7',
'HLT_Mu8_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v8', 'HLT_Mu8_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v9',
'HLT_Mu17_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v4', 'HLT_Mu17_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v5',
'HLT_Mu17_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v6', 'HLT_Mu17_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v7',
'HLT_Mu17_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v8', 'HLT_Mu17_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v9'),
trigger_path_mm = cms.vstring('HLT_Mu17_Mu8_v16', 'HLT_Mu17_Mu8_v17', 'HLT_Mu17_Mu8_v18',
'HLT_Mu17_Mu8_v19', 'HLT_Mu17_Mu8_v21', 'HLT_Mu17_Mu8_v22',
'HLT_Mu17_TkMu8_v9', 'HLT_Mu17_TkMu8_v10', 'HLT_Mu17_TkMu8_v11',
'HLT_Mu17_TkMu8_v12', 'HLT_Mu17_TkMu8_v13', 'HLT_Mu17_TkMu8_v14'),
pv_cut = cms.bool(False),
hbhe_cut = cms.bool(False),
jet_cuts = cms.bool(False),
jet_minpt = cms.double(20.0),
jet_maxeta = cms.double(5),
min_jet = cms.int32(0),
max_jet = cms.int32(4000),
muon_cuts = cms.bool(True),
min_muon = cms.int32(0),
muon_minpt = cms.double(10.0),
muon_maxeta = cms.double(4.0),
max_muon = cms.int32(20),
electron_cuts = cms.bool(True),
min_electron = cms.int32(0),
electron_minpt = cms.double(10.0),
electron_maxeta = cms.double(4.0),
max_electron = cms.int32(20),
min_lepton = cms.int32(2),
met_cuts = cms.bool(False),
min_met = cms.double(0.0),
btag_cuts = cms.bool(False),
btagOP = cms.string("CSVM"),
btag_1 = cms.bool(True),
btag_2 = cms.bool(True),
btag_3 = cms.bool(False),
trigger_collection = cms.InputTag('TriggerResults::HLT'),
pv_collection = cms.InputTag('goodOfflinePrimaryVertices'),
jet_collection = cms.InputTag('goodPatJetsPFlow'),
muon_collection = cms.InputTag('selectedPatMuonsPFlowLoose'),
electron_collection = cms.InputTag('selectedPatElectronsPFlowLoose'),
met_collection = cms.InputTag('patMETsPFlow'),
JEC_txtfile = cms.string(miscFiles['jec_uncertainty']),
JECup = cms.bool(False),
JECdown = cms.bool(False),
JERup = cms.bool(False),
JERdown = cms.bool(False),
BTagUncertUp = cms.bool(False),
BTagUncertDown = cms.bool(True),
do53xJEC = cms.bool(True),
MCL1JetPar = cms.string(miscFiles['MCL1JetPar']),
MCL2JetPar = cms.string(miscFiles['MCL2JetPar']),
MCL3JetPar = cms.string(miscFiles['MCL3JetPar']),
DataL1JetPar = cms.string(miscFiles['DataL1JetPar']),
DataL2JetPar = cms.string(miscFiles['DataL2JetPar']),
DataL3JetPar = cms.string(miscFiles['DataL3JetPar']),
DataResJetPar = cms.string(miscFiles['DataResJetPar']),
keepFullMChistory = cms.bool(True)
)
##################################################################
#
# Input files
#
# NOTE: keep your test inputs in the python files as in
# this example, and they will be correctly substituted with
# specified input events when you submit to Condor
# (
#
# nEvents and skipEvents are for interactive use, their
# values will be correctly reset when you submit Condor
#
input_module = 'LJMet.Com.tj_14TEV_500_1000'
process.load(input_module)
process.inputs.nEvents = cms.int32(-1)
process.inputs.skipEvents = cms.int32(0)
############################################################
#
# JSON
JsonFile = miscFiles['json']
myList = LumiList.LumiList(filename=JsonFile).getCMSSWString().split(',')
if not condorIsMC:
process.inputs.lumisToProcess.extend(myList)
#######################################################
#
# Output
#
process.outputs = cms.PSet (
outputName = cms.string('tj_14TEV_500_1000'),
treeName = cms.string('ljmet'),
)
#######################################################
#
# Object selector options
#
# Primary vertex
process.load('PhysicsTools.SelectorUtils.pvSelector_cfi')
process.pvSelector.pvSrc = cms.InputTag('goodOfflinePrimaryVertices')
process.pvSelector.minNdof = cms.double(4.0)
process.pvSelector.maxZ = cms.double(24.0)
process.pvSelector.maxRho = cms.double(2.0)
# jets
process.load('PhysicsTools.SelectorUtils.pfJetIDSelector_cfi')
process.pfJetIDSelector.version = cms.string('FIRSTDATA')
process.pfJetIDSelector.quality = cms.string('LOOSE')
| [
"saptaparna@gmail.com"
] | saptaparna@gmail.com |
43009bda6f38abbff784e606cddd85f79ec39da5 | a5698f82064aade6af0f1da21f504a9ef8c9ac6e | /huaweicloud-sdk-bssintl/huaweicloudsdkbssintl/v2/model/list_order_coupons_by_order_id_response.py | efa3bfa42ebe822922501858843820138839f155 | [
"Apache-2.0"
] | permissive | qizhidong/huaweicloud-sdk-python-v3 | 82a2046fbb7d62810984399abb2ca72b3b47fac6 | 6cdcf1da8b098427e58fc3335a387c14df7776d0 | refs/heads/master | 2023-04-06T02:58:15.175373 | 2021-03-30T10:47:29 | 2021-03-30T10:47:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,261 | py | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ListOrderCouponsByOrderIdResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'count': 'int',
'user_coupons': 'list[CouponInfoV2]'
}
attribute_map = {
'count': 'count',
'user_coupons': 'user_coupons'
}
def __init__(self, count=None, user_coupons=None):
"""ListOrderCouponsByOrderIdResponse - a model defined in huaweicloud sdk"""
super().__init__()
self._count = None
self._user_coupons = None
self.discriminator = None
if count is not None:
self.count = count
if user_coupons is not None:
self.user_coupons = user_coupons
@property
def count(self):
"""Gets the count of this ListOrderCouponsByOrderIdResponse.
|参数名称:符合条件的记录总数。| |参数的约束及描述:符合条件的记录总数。|
:return: The count of this ListOrderCouponsByOrderIdResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ListOrderCouponsByOrderIdResponse.
|参数名称:符合条件的记录总数。| |参数的约束及描述:符合条件的记录总数。|
:param count: The count of this ListOrderCouponsByOrderIdResponse.
:type: int
"""
self._count = count
@property
def user_coupons(self):
"""Gets the user_coupons of this ListOrderCouponsByOrderIdResponse.
|参数名称:客户订单详情信息。具体请参见表 CustomerOrderV2| |参数约束以及描述:客户订单详情信息。具体请参见表 CustomerOrderV2|
:return: The user_coupons of this ListOrderCouponsByOrderIdResponse.
:rtype: list[CouponInfoV2]
"""
return self._user_coupons
@user_coupons.setter
def user_coupons(self, user_coupons):
"""Sets the user_coupons of this ListOrderCouponsByOrderIdResponse.
|参数名称:客户订单详情信息。具体请参见表 CustomerOrderV2| |参数约束以及描述:客户订单详情信息。具体请参见表 CustomerOrderV2|
:param user_coupons: The user_coupons of this ListOrderCouponsByOrderIdResponse.
:type: list[CouponInfoV2]
"""
self._user_coupons = user_coupons
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListOrderCouponsByOrderIdResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
194217704d53d5a40b10e5d93d1fe04ec2e5f44c | c0fdf3ea874e2c88dd5b04dcd03fdfecf20d45fd | /utils.py | f51eeb099afb343c540094ed2359512592076771 | [] | no_license | myplayareas/mypoo | 141fae1707f3a071b7cbeb377c68bf57a6f5867e | f6583de47526798130168184262e885103702263 | refs/heads/main | 2023-09-01T08:48:02.209257 | 2021-10-08T14:40:18 | 2021-10-08T14:40:18 | 415,012,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,851 | py | import datetime
import re
class Validador:
padrao_telefone_com_ddd = "[(][0-9]{2}[)][0-9]{4,5}[-]*[0-9]{4}"
padrao_email = "[a-zA-Z0-9_.]{1,30}@[a-zA-Z0-9_.]{1,30}"
padrao_data_nascimento = '[0-9]{2}[/][0-9]{2}[/][0-9]{4}'
padrao_cep = '[0-9]{5}[-][0-9]{3}'
@classmethod
def valida_telefone(cls,telefone):
checa_telefone = re.match(cls.padrao_telefone_com_ddd, telefone)
if checa_telefone is not None:
return telefone
else:
mensagem = f'Telefone {telefone} inválido! A formatação deve ser (XX)XXXXX-XXXX ou (XX)XXXX-XXXX'
raise Exception(mensagem)
@classmethod
def valida_email(cls, email):
checa_email = re.match(cls.padrao_email, email)
if checa_email is not None:
return email
else:
mensagem = f'E-mail {email} inválido! A formatação deve ser xxxx@yyyy.zzz...'
raise Exception(mensagem)
@classmethod
def valida_data(cls, data):
checa_data = re.match(cls.padrao_data_nascimento, data)
if checa_data is not None :
dia = data.split('/')[0]
mes = data.split('/')[1]
ano = data.split('/')[2]
try:
data_auxiliar = datetime.datetime(int(ano), int(mes), int(dia) )
except Exception as e:
print(f'Data {data} inválida! {e}')
return data
else:
mensagem = f'Data {data} inválida! A formatação deve ser dd/mm/yyyy'
raise Exception(mensagem)
@classmethod
def valida_cep(cls, cep):
checa_cep = re.match(cls.padrao_cep, cep)
if checa_cep is not None:
return cep
else:
mensagem = f'CEP {cep} inválido! A formatação deve ser XXXXX-XXX'
raise Exception(mensagem) | [
"armando.sousa@gmail.com"
] | armando.sousa@gmail.com |
80096409522fce91ef9effa519a7c42de2d12216 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03607/s019750765.py | 8cbf45ccab5124b64ddc5991fa8d4f7dfd2eb199 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | import sys
## io ##
def IS(): return sys.stdin.readline().rstrip()
def II(): return int(IS())
def MII(): return list(map(int, IS().split()))
from functools import partial, reduce
from collections import Counter
#======================================================#
def f_chain(*args):
return reduce(lambda x, f: f(x), args)
def is_odd(n):
return n%2==1
def main():
n = II()
aa = [II() for _ in range(n)]
c = Counter(aa)
f_chain(c.values(),
partial(filter, is_odd),
list,
len,
print,
)
if __name__ == '__main__':
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
37ac761a18ecbf51d15178145d20d3c4a7ab073d | 458e61dcae8bd16aaeceeece091399f195f7f252 | /D4/source/WebInterface/wttune/urls.py | 32f2c858c538f06ac317dcaaf81ccefb447fcac2 | [] | no_license | hgagneja/WhatsThatTune | 385c690b09dd11b272c66652987768df815c3008 | dd0d5cff3aa669d0a650a88573c3a40bcf2f25db | refs/heads/master | 2021-01-19T07:17:56.970692 | 2017-04-17T00:47:43 | 2017-04-17T00:48:38 | 80,393,908 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | from django.conf.urls import url
from wttune import views
urlpatterns = [
url(r'^$', views.HomePage.as_view()),
url(r'^about/$', views.AboutPage.as_view()),
url(r'^services/$', views.ServicesPage.as_view()),
url(r'^contact/$', views.ContactPage.as_view()),
]
| [
"ganeshsanthar@gmail.com"
] | ganeshsanthar@gmail.com |
1dc31fdf34250e91d0fc6c8d6501cceef736e66e | 7e65f4e873f9a2d9c00f18460bccfe4a0ffa2ddc | /tests/__init__.py | e07d9c120061d1b97b052989a13b615309897574 | [] | no_license | deathbeds/nostalgiaforever | 6b00601363295fc3af0a8e0b8d8c68d1514c403d | 50ea15ae2e6a608e0d90b7dd41a11354e1ea2e1c | refs/heads/master | 2020-03-17T05:07:32.264543 | 2018-05-15T18:06:08 | 2018-05-15T18:06:08 | 133,303,188 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | from pidgin.markdown import load_ipython_extension
load_ipython_extension()
from pidgin.conventions import load_ipython_extension
load_ipython_extension()
with __import__('importnb').Notebook():
from . import test_nostalgia
from pidgin.markdown import unload_ipython_extension
unload_ipython_extension()
from pidgin.conventions import load_ipython_extension
unload_ipython_extension()
| [
"tony.fast@gmail.com"
] | tony.fast@gmail.com |
21708cdebfba84b81ee7cb975756e0d8c5ed0f28 | d302880316a4eb37952ac95fcfb6f4ae23979a41 | /dataset.py | 7686f793122940667fcddcb1a6391bc0a64d52db | [] | no_license | sivaprakashSP/FlaskAPI-Stuffs | c87f71788c03c53ee219ce330c533034b04ac648 | bcdfafb00734b77c9822219fbecebdb1aa6fc5ab | refs/heads/master | 2023-02-13T22:30:40.375613 | 2021-01-11T15:36:20 | 2021-01-11T15:36:20 | 268,286,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,825 | py | import flask
import random
#from flask import request,jsonify
#import json
app=flask.Flask(__name__)
app.config["DEBUG"]=True
@app.route('/api')
def page():
return "API for collecting datasets"
@app.route('/api/normal',methods=['GET'])
def normal():
data={
"temparture":random.randint(36,37),
"pressure":random.randint(90,120),
"respiration":random.randint(12,16),
"glucose":random.randint(72,140),
"heartrate":random.randint(60,100),
"oxygenSaturation":random.randint(95,100),
"cholestrol":random.randint(125,200)
}
return(data)
@app.route('/api/diabetes',methods=['GET'])
def diabetes():
dataset={
"temperature": random.randint(35, 37),
"pressure": random.randint(90, 120),
"respiration": random.randint(12, 16),
"glucose": random.randint(72, 140),
"heartRate": random.randint(60, 100),
"oxygenSaturation": random.randint(50, 96),
"cholesterol": random.randint(125, 200)
}
return(dataset)
@app.route('/api/prediabetes',methods=['GET'])
def prediabetes():
dataset={
"temperature": random.randint(36, 38),
"pressure": random.randint(90, 120),
"respiration": random.randint(12, 16),
"glucose": random.randint(140,199),
"heartRate": random.randint(60, 100),
"oxygenSaturation": random.randint(95,100),
"cholesterol": random.randint(125, 200)
}
return(dataset)
@app.route('/api/ModerateAcuteAsthma',methods=['GET'])
def ModerateAcuteAsthma():
dataset={
"temperature": random.randint(35, 37),
"pressure": random.randint(90, 120),
"respiration": random.randint(20, 30),
"glucose": random.randint(72, 140),
"heartRate": random.randint(100, 125),
"oxygenSaturation": random.randint(92, 95),
"cholesterol": random.randint(125, 200),
}
return(dataset)
@app.route('/api/Hypoxemia',methods=['GET'])
def hypoxemia():
dataset={
"temperature": random.randint(35, 37),
"pressure": random.randint(90, 120),
"respiration": random.randint(20, 30),
"glucose": random.randint(72, 140),
"heartRate": random.randint(100, 125),
"oxygenSaturation": random.randint(92, 95),
"cholesterol": random.randint(125, 200),
}
return(dataset)
@app.route('/api/Bronchiectasis',methods=['GET'])
def bronchiectasis():
dataset={
"temperature": random.randint(35, 37),
"pressure": random.randint(90, 120),
"respiration": random.randint(40, 60),
"glucose": random.randint(72, 140),
"heartRate": random.randint(60, 100),
"oxygenSaturation": random.randint(95, 100),
"cholesterol": random.randint(125, 200),
}
return(dataset)
app.run(debug=True) | [
"sivaprakashsp@gmail.com"
] | sivaprakashsp@gmail.com |
4e2da9b7e58dd7169f72fe62e4899f4b94ee4a4f | a8a5fb5ad28a2af8a12b4a7699ac7a24c9acdd98 | /tests/test_io.py | 8aa18e2e09f16b5a875e04119edec67cb075ca33 | [
"MIT"
] | permissive | robtucker/pyspark-tooling | 31a2cc1aa588642815c163f9d18c5821bc341feb | 946773975b4069c448dca1590eff3ae77a25be98 | refs/heads/master | 2021-01-03T19:02:48.467728 | 2020-05-29T17:06:08 | 2020-05-29T17:06:08 | 240,201,045 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,775 | py | import pytest
import psycopg2
import random
import string
from pyspark.sql import SQLContext
from pyspark.sql.types import StructType, StructField, StringType, IntegerType
from tests import base
from pyspark_tooling import io
# @pytest.mark.focus
class TestIOUtils(base.BaseTest):
@pytest.mark.usefixtures("spark")
def test_csv_happy_path(self, spark: SQLContext):
data = [("a", 1), ("b", 2), ("c", None)]
schema = StructType(
[
StructField("key", StringType(), nullable=False),
StructField("value", IntegerType(), nullable=True),
]
)
df = spark.createDataFrame(data, schema)
path = self._random_path()
io.write_csv(path, df, schema=schema)
res = io.read_csv(spark, path, schema=schema).orderBy("key")
self.validate_values(res, schema, data)
self.wipe_data_folder()
@pytest.mark.usefixtures("spark")
def test_csv_enforces_nullable(self, spark: SQLContext):
"""Confirm that non null columns are enforced"""
data = [("a", 1), ("b", 2), ("c", None)]
nullable_schema = StructType(
[
StructField("key", StringType(), nullable=True),
StructField("value", IntegerType(), nullable=True),
]
)
strict_schema = StructType(
[
StructField("key", StringType(), nullable=False),
StructField("value", IntegerType(), nullable=False),
]
)
# create the dataframe without validating nullables
df = spark.createDataFrame(data, nullable_schema)
path = self._random_path()
# write the dataframe without validating nullables
io.write_csv(path, df)
with pytest.raises(Exception):
# should throw an error - "This field is not nullable, but got None"
io.read_csv(spark, path, schema=strict_schema).collect()
@pytest.mark.usefixtures("spark")
def test_parquet_happy_path(self, spark: SQLContext):
"""Read and write parquet with fixed schema"""
data = [("a", 1), ("b", 2), ("c", None)]
schema = StructType(
[
StructField("key", StringType(), nullable=False),
StructField("value", IntegerType(), nullable=True),
]
)
df = spark.createDataFrame(data, schema)
path = self._random_path()
io.write_parquet(path, df, schema)
res = io.read_parquet(spark, path, schema=schema).orderBy("key")
self.validate_values(res, schema, data)
self.wipe_data_folder()
@pytest.mark.usefixtures("spark")
def test_parquet_enforces_nullable(self, spark: SQLContext):
"""Confirm that non null columns are enforced"""
data = [("a", 1), ("b", 2), ("c", None)]
nullable_schema = StructType(
[
StructField("key", StringType(), nullable=True),
StructField("value", IntegerType(), nullable=True),
]
)
strict_schema = StructType(
[
StructField("key", StringType(), nullable=False),
StructField("value", IntegerType(), nullable=False),
]
)
# create the dataframe without validating nullables
df = spark.createDataFrame(data, nullable_schema)
path = self._random_path()
# write the dataframe without validating nullables
io.write_parquet(path, df)
with pytest.raises(Exception):
# should throw an error - "This field is not nullable, but got None"
io.read_parquet(spark, path, schema=strict_schema).collect()
@pytest.mark.usefixtures("spark", "postgres_credentials")
def test_jdbc_happy_path(self, spark, postgres_credentials):
table = self.table_name()
user = postgres_credentials["user"]
password = postgres_credentials["password"]
url = self.pyspark_jdbc_url(postgres_credentials)
data = [("a", 1), ("b", 2), ("c", None)]
df = spark.createDataFrame(data, ["key", "value"])
io.write_jdbc(url, table, user, password, df)
res = self.select_and_drop_table(postgres_credentials, table)
actual = sorted([tuple(i) for i in res], key=lambda x: x[0])
assert actual == data
def pyspark_jdbc_url(self, postgres_credentials: dict):
host = postgres_credentials["host"]
port = postgres_credentials["port"]
database = postgres_credentials["database"]
return f"jdbc:postgresql://{host}:{port}/{database}"
def select_and_drop_table(self, postgres_credentials: dict, table: str):
data = self.select(postgres_credentials, f"SELECT * FROM {table}")
# print('selected data', table, data)
self.exec(postgres_credentials, f"DROP TABLE {table};")
# print('dropped table', table)
return data
def select(self, postgres_credentials: dict, query: str):
"""Select rows from the database"""
conn = self.get_conn(postgres_credentials)
cur = conn.cursor()
cur.execute(query)
data = cur.fetchall()
conn.close()
cur.close()
return data
def exec(self, postgres_credentials: dict, query: str):
conn = self.get_conn(postgres_credentials)
cur = conn.cursor()
cur.execute(query)
conn.commit()
conn.close()
cur.close()
def get_conn(self, postgres_credentials: dict):
return psycopg2.connect(**postgres_credentials)
def table_name(self, count=10):
return str(
"".join([random.choice(string.ascii_letters) for _ in range(count)])
).upper()
| [
"rob@coderlab.co.uk"
] | rob@coderlab.co.uk |
8d5966f9480e8730f62c18c0b0bd037c1b64eaf0 | fe75921a0f303f4954f1df91b7bbddc075ad4373 | /product_importer/migrations/0001_initial.py | f3e50cd9e0b20a93563c550dc18a16479fc7605f | [] | no_license | jagadeesh-r1/fulfil | 8f8e1a893b3ca506be4cbb54beb0ae8e7ad29c2a | da172e6f276e9ab32eb6bdd207dc244ad5445deb | refs/heads/main | 2023-06-30T16:43:54.060309 | 2021-08-01T13:19:19 | 2021-08-01T13:19:19 | 390,650,920 | 0 | 0 | null | 2021-08-01T13:19:20 | 2021-07-29T08:14:23 | Python | UTF-8 | Python | false | false | 617 | py | # Generated by Django 3.2.5 on 2021-07-30 15:17
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Products',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('sku_id', models.CharField(max_length=50, unique=True)),
('description', models.TextField()),
],
),
]
| [
"mail4jagadeeshreddy@gmail.com"
] | mail4jagadeeshreddy@gmail.com |
8b4db53c8bebf284a0620aba78a3fc463f9e0295 | eb61d62ca1f6f0123e3771105f5dfbbd6115138d | /.history/19-08-21_20210912000723.py | 4ea5b8ff6f4a3d8b68e5e42719cd1347908f6ed8 | [] | no_license | Alopezm5/CORRECTO-2 | e0f14bcc3a88c0e222d10e3261e68532008bc42e | 223613f1fb04dce3fac9f82f243cb2f22fe100f3 | refs/heads/main | 2023-07-29T06:52:48.147424 | 2021-09-12T20:33:27 | 2021-09-12T20:33:27 | 388,995,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,157 | py | class Empresa:
def __init__(self,nom="El mas barato",ruc="0999999999",tel="042971234",dir="Juan Montalvo"):
self.nombre=nom
self.ruc=ruc
self.telefono=tel
self.direccion=dir
def mostrarEmpresa(self):
print("Empresa: {:17}, RUC: {}".format(self.nombre,self.ruc))
class Cliente:
def __init__(self,nom,ced,tel):
self.nombre=nom
self.cedula=ced
self.telefono=tel
def mostrarCliente(self):
print(self.nombre,self.cedula,self.telefono)
class ClienteCorporativo(Cliente):
def __init__(self,nomb,cedu,telecontrato):
super().__init__(nomb,cedu,tele,contrato)
self.__contrato=contrato
@property
def contrato(self): #getter: obtener el valor del atributo privado
return self.__contrato
@contrato.setter
def contrato(self,value): #setter: asigna el valor del atributo privado
if value:
self.__contrato=value
else:
self.__contrato="Sin contrato"
def mostrarCliente(self):
print(self.nombre, self.__contrato)
class ClientePersonal(Cliente):
def __init__(self,nom,ced,tel,promocion=True):
super().__init__(nom,ced,tel,)
self.__promocion=promocion
@property
def promocion(self): #getter: obtener el valor del atributo privado
if self.__promocion==True:
return "10% descuento"
else:
return "No hay descuento"
def mostrarCliente(self):
print(self.nombre, self.__promocion)
class DetVenta:
lineaDetalle=0
def __init__(self,articulo,cantidad):
DetVenta.lineaDetalle+=1
self.line
class CabVenta:
def __init__(self,fac,empresa,fecha,cliente,tot=0):
self.empresa=empresa
self.factura=fac
self.fecha=fecha
self.cliente=cliente
self.total=tot
self.detalleVen=[]
# emp=Empresa("El mas barato","0953156049","0998132446","Coop. Juan Montalvo")
# emp.mostrarEmpresa()
# print(emp.nombre)
cli1=ClientePersonal("Jose","0912231499","042567890",True)
cli1.mostrarCliente
| [
"85761855+Alopezm5@users.noreply.github.com"
] | 85761855+Alopezm5@users.noreply.github.com |
cf406bcab3aa81a862ce748f8e3ba8feb952dfa3 | 592b671dd5badfd199f27422b442c65a7db4a697 | /knowledge/cron/API_user_portrait/person_organization.py | 0cafe2c8fb55f8db594111d55448c3de2d647436 | [] | no_license | gaofeifei/knowledge_revised | eea10058a73454519a038a315e4b23a0ad4b3afa | 0ee7fc70e54e99d5d70f78030b90aceaa43aa98c | refs/heads/master | 2020-05-22T20:53:16.230561 | 2017-03-22T02:29:12 | 2017-03-22T02:29:12 | 84,723,562 | 0 | 0 | null | 2017-03-12T12:25:28 | 2017-03-12T12:25:28 | null | UTF-8 | Python | false | false | 14,378 | py | # -*- coding: UTF-8 -*-
import os
import time
import scws
import csv
import sys
import json
import heapq
from elasticsearch import Elasticsearch
from config_relation import *
#sys.path.append('../manage_neo4j/')
from neo4j_relation import *
class TopkHeap(object):
def __init__(self, k):
self.k = k
self.data = []
def Push(self, elem):
if len(self.data) < self.k:
heapq.heappush(self.data, elem)
else:
topk_small = self.data[0][0]
if elem[0] > topk_small:
heapq.heapreplace(self.data, elem)
def TopK(self):
return [x for x in reversed([heapq.heappop(self.data) for x in xrange(len(self.data))])]
def get_profile_by_uid(uidlist):#根据uid查询用户的背景信息
user_dict = dict()
search_result = es_user_portrait.mget(index=portrait_index_name, doc_type=portrait_index_type, body={"ids": uidlist})["docs"]
for item in search_result:
uid = item['_id']
if not item['found']:
user_dict[str(uid)] = {'verified_type':'Null','description':set()}
continue
else:
data = item['_source']
des = data['description'].encode('utf-8')
v_type = data['verify_type']
if len(des) > 0:
des_set = cut_des(des)
else:
des_set = set()
user_dict[str(uid)] = {'verified_type':v_type,'description':des_set}
return user_dict
def get_interaction_by_uid(uidlist):#根据uid查询用户的交互情况
s_uid = uidlist[0]
ts = get_db_num(time.time())
friend_dict = dict()
search_result = es_retweet.mget(index=retweet_index_name_pre+str(ts), doc_type=retweet_index_type, body={"ids": uidlist})["docs"]
for item in search_result:
uid = item['_id']
if not item['found']:
continue
else:
data = item['_source']['uid_retweet']
data = eval(data)
for k,v in data.iteritems():
if uid == k:
continue
uid_str = uid + '&' + k
uid_str2 = k + '&' + uid
if friend_dict.has_key(uid_str):
friend_dict[uid_str] = friend_dict[uid_str] + int(v)
elif friend_dict.has_key(uid_str2):
friend_dict[uid_str2] = friend_dict[uid_str2] + int(v)
else:
friend_dict[uid_str] = int(v)
search_result = es_retweet.mget(index=be_retweet_index_name_pre+str(ts), doc_type=be_retweet_index_type, body={"ids": uidlist})["docs"]
for item in search_result:
uid = item['_id']
if not item['found']:
continue
else:
data = item['_source']['uid_be_retweet']
data = eval(data)
for k,v in data.iteritems():
if uid == k:
continue
uid_str = uid + '&' + k
uid_str2 = k + '&' + uid
if friend_dict.has_key(uid_str):
friend_dict[uid_str] = friend_dict[uid_str] + int(v)
elif friend_dict.has_key(uid_str2):
friend_dict[uid_str2] = friend_dict[uid_str2] + int(v)
else:
friend_dict[uid_str] = int(v)
search_result = es_comment.mget(index=comment_index_name_pre+str(ts), doc_type=comment_index_type, body={"ids": uidlist})["docs"]
for item in search_result:
uid = item['_id']
if not item['found']:
continue
else:
data = item['_source']['uid_comment']
data = eval(data)
for k,v in data.iteritems():
if uid == k:
continue
uid_str = uid + '&' + k
uid_str2 = k + '&' + uid
if friend_dict.has_key(uid_str):
friend_dict[uid_str] = friend_dict[uid_str] + int(v)
elif friend_dict.has_key(uid_str2):
friend_dict[uid_str2] = friend_dict[uid_str2] + int(v)
else:
friend_dict[uid_str] = int(v)
search_result = es_comment.mget(index=be_comment_index_name_pre+str(ts), doc_type=be_comment_index_type, body={"ids": uidlist})["docs"]
for item in search_result:
uid = item['_id']
if not item['found']:
continue
else:
data = item['_source']['uid_be_comment']
data = eval(data)
for k,v in data.iteritems():
if uid == k:
continue
uid_str = uid + '&' + k
uid_str2 = k + '&' + uid
if friend_dict.has_key(uid_str):
friend_dict[uid_str] = friend_dict[uid_str] + int(v)
elif friend_dict.has_key(uid_str2):
friend_dict[uid_str2] = friend_dict[uid_str2] + int(v)
else:
friend_dict[uid_str] = int(v)
if not len(friend_dict):
return [],[]
keyword = TopkHeap(interaction_count)
for k,v in friend_dict.iteritems():
if v >= inter_sta:
keyword.Push((v,k))
friend_list = []
keyword_data = keyword.TopK()
if not len(keyword_data):
return [],[]
for item in keyword_data:
k1,k2 = item[1].split('&')
if k1 == s_uid:
friend_list.append(k2)
elif k2 == s_uid:
friend_list.append(k1)
else:
continue
profile_result = get_profile_by_uid(friend_list)
people_list = []
organization_list = []
for k,v in profile_result.iteritems():
if v['verified_type'] == 'Null':
continue
if v['verified_type'] in org_list:
organization_list.append(k)
else:
people_list.append(k)
return people_list,organization_list
def get_colleague_r(des,s_uid):#提取业务关联关系(人物与人物、人物与机构)
people_list = []
organization_list = []
w_list = []
if len(des) == 0:
return []
elif len(des) > 0 and len(des) <= 3:
n = 1
else:
n = float(len(des))*event_sta
n = int(n)
for w in des:
w_list.append({"term":{"description":w}})
query_body = {
"query":{
"bool":{
"should":w_list,
"minimum_should_match": n
}
},
"size":2000
}
search_results = es_user_portrait.search(index=portrait_index_name, doc_type=portrait_index_type, body=query_body)['hits']['hits']
n = len(search_results)
if n > 0:
for item in search_results:
uid = item['_id'].encode('utf-8')
if uid == s_uid:
continue
data = item['_source']
if data['verify_type'] in org_list:
organization_list.append(uid)
else:
people_list.append(uid)
return people_list,organization_list
def get_ip_r(uidlist,s_uid):#IP关联关系
user_dict = dict()
people_list = []
search_result = es_user_portrait.mget(index=portrait_index_name, doc_type=portrait_index_type, body={"ids": uidlist})["docs"]
if len(search_result) == 0:
return []
for item in search_result:
uid = item['_id']
if not item['found']:
user_dict[str(uid)] = {}
continue
else:
data = item['_source']
a_ip = data['activity_ip']
h_ip = data['home_ip']
j_ip = data['job_ip']
user_dict[str(uid)] = {'activity_ip':a_ip,'home_ip':h_ip,'job_ip':j_ip}
for k,v in user_dict.iteritems():
if len(v):#有数据
w_list = []
for k1,v1 in v.iteritems():
if len(v1):#不为空
w_list.append({"term":{k1:v1}})
else:
continue
if not len(w_list):
return []
query_body = {
"query":{
"bool":{
"should":w_list,
"minimum_should_match": 1
}
},
"size":2000
}
search_results = es_user_portrait.search(index=portrait_index_name, doc_type=portrait_type, body=query_body)['hits']['hits']
if len(search_results) > 0:
for item in search_results:
uid = item['_id'].encode('utf-8')
if uid == s_uid:
continue
data = item['_source']
if data['verify_type'] not in org_list:
people_list.append(uid)
else:
continue
else:
return []
return people_list
def person_organization(uid_list,relation_dict):#计算人物-人物,人物-机构之间的关系
'''
输入数据:
uid 人物或机构
relation_dict 关系字典
输出数据:
字符串提示语:
'Empty Uid List' 数据为空
'Partly Success' 部分成功
'Totally Success' 节点和关系建立成功
'''
if len(uid_list) == 0:
return 'Empty Uid List'#数据为空
if len(relation_dict) == 0:
r_flag = 1
else:
r_flag = 0
count = 1
for uid in uid_list:
profile = get_profile_by_uid([uid])
if r_flag == 1:#关系字典没有数据
relation_list = [colleague,friend,ip_relation]
else:
try:
relation_list = relation_dict[uid]
except KeyError:
relation_list = [colleague,friend,ip_relation]
if len(profile[uid]['description']):
p1,o1 = get_colleague_r(profile[uid]['description'],uid)#自述关联关系
else:
p1 = []
o1 = []
p2,o2 = get_interaction_by_uid([uid])#交互关系
if profile[uid]['verified_type'] == 'Null':#没有数据
p3 = get_ip_r([uid],uid)#IP关联关系
relation_dict = {colleague:{'person':p1,'organization':o1},friend:{'person':p2,'organization':o2},ip_relation:{'people':p3}}
flag = '-1'
else:
if profile[uid]['verified_type'] not in org_list:#输入的为人物
p3 = get_ip_r([uid],uid)#IP关联关系
relation_dict = {colleague:{'person':p1,'organization':o1},friend:{'person':p2,'organization':o2},ip_relation:{'people':p3}}
flag = '1'
else:
relation_dict = {or_colleague:{'person':p1,'organization':o1},or_friend:{'person':p2,'organization':o2}}
flag = '0'
if flag == '0':#节点类型为机构
r = create_person(org_node, org_primary, uid, org_index_name)
if r == 'Node Wrong' or r == 'Node Type Wrong':
continue
input_list = []
for k,v in relation_dict.iteritems():
if k in relation_list:
try:
person_list = v['person']
except KeyError:
person_list = []
try:
organ_list = v['organization']
except KeyError:
organ_list = []
for p in person_list:
r = create_person(people_node, people_primary, p, node_index_name)
if r == 'Wrong' or r == 'Node Type Wrong':
continue
input_list.append([[0,uid],k,[1,p]])
for o in organ_list:
r = create_person(org_node, org_primary, o, org_index_name)
if r == 'Wrong' or r == 'Node Type Wrong':
continue
input_list.append([[0,uid],k,[0,o]])
else:
continue
else:
r = create_person(people_node, people_primary, uid, node_index_name)
if r == 'Node Wrong' or r == 'Node Type Wrong':
continue
input_list = []
for k,v in relation_dict.iteritems():
if k in relation_list:
try:
person_list = v['person']
except KeyError:
person_list = []
try:
organ_list = v['organization']
except KeyError:
organ_list = []
for p in person_list:
r = create_person(people_node, people_primary, p, node_index_name)
if r == 'Wrong' or r == 'Node Type Wrong':
continue
input_list.append([[0,uid],k,[1,p]])
for o in organ_list:
r = create_person(org_node, org_primary, o, org_index_name)
if r == 'Wrong' or r == 'Node Type Wrong':
continue
input_list.append([[0,uid],k,[0,o]])
else:
continue
if len(input_list) > 0:
nodes_rels(input_list)
count = count + 1
if count < len(uid_list):
return 'Partly Success'
else:
return 'Totally Success'
if __name__ == '__main__':
result = person_organization(['2117306420','5779325975'],{'2117306420':['friend','colleague'],'5779325975':['friend','colleague']})
print result
## relation_list = [[[0,'5779325975'],'friend',[0,'1703371307']]]
## result = nodes_rels(relation_list)
## print result
## p_list = get_colleague_r(["消失","命运"])
## print p_list
| [
"1257819385@qq.com"
] | 1257819385@qq.com |
fb814dd78c19550480f1867a3da068ab17c60a80 | ec1fa293ccdb3b11ab0172aad1893c95a6aacbb5 | /comm/pipeline.py | 3793c7e5847820aa43b60e9cf1cf179f5f8b70f6 | [] | no_license | benkehoe/python | c6ce388a488b87a63aaf8c07d28593beb147293c | e1ee1ef836b0bfe009c47e7991609db617c7826a | refs/heads/master | 2021-01-18T10:31:52.209835 | 2012-07-23T22:05:29 | 2012-07-23T22:05:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,655 | py | import networkx as nx
from os.path import isdir, join, exists, basename
from glob import glob
import matplotlib.pyplot as plt
from time import sleep, time
from . import comm
import subprocess, signal
from utils.colorize import colorize
import traceback, os, logging, json
class Pipeline(object):
env = {}
def __init__(self):
self.graph = nx.DiGraph()
def add_topic(self, topic, **kw):
self.graph.add_node(topic,type="topic",**kw)
def add_file(self, file):
self.graph.add_node(file,type="file")
def add_program(self, program, command, dependencies, products):
self.graph.add_node(program,type="program",command=command)
for item in dependencies:
if not self.graph.has_node(item): raise Exception("you forgot to add item %s"%item)
self.graph.add_edge(item, program)
for item in products:
if not self.graph.has_node(item): raise Exception("you forgot to add item %s"%item)
self.graph.add_edge(program, item)
def restrict_to_target(self, target):
revgraph = self.graph.reverse()
nodes = nx.dfs_postorder(revgraph, target)
new_graph = self.graph.subgraph(nodes)
new_pipeline = Pipeline()
new_pipeline.graph = new_graph
new_pipeline.env = self.env
return new_pipeline
def get_all_status(self):
return dict([(item,self.get_status(item)) for item in self.get_items()])
def get_status(self,item):
if self.graph.node[item]["type"] == "topic":
if exists(item): return data_nums(item)
else: return None
elif self.graph.node[item]["type"] == "file":
if exists(item): return 1
else: return 0
def get_items(self):
return [node for node in self.graph.nodes() if self.graph.node[node]["type"] in ["topic","file"]]
def get_programs(self):
return [node for node in self.graph.nodes() if self.graph.node[node]["type"] == "program"]
def get_topics(self):
return [node for node in self.graph.nodes() if self.graph.node[node]["type"] == "topic"]
def get_source_topic(self):
return "kinect"
# for topic in nx.topological_sort(self.graph):
# if self.graph.node[topic]["type"] == "topic" and not (: return topic
def get_target_topic(self):
return nx.topological_sort(self.graph)[-1]
def ready_to_run(self, program, item2status):
item2status = self.get_all_status()
return all(bool(item2status[pred]) for pred in self.graph.predecessors(program))
def products_already_made(self, program, item2done):
return all(item2done[item] for item in self.graph.successors(program))
def makes_topic(self, program):
return any(self.graph.node[succ]["type"] == "topic" for succ in self.graph.successors(program))
def get_all_doneness(self, item2status):
source_topic = self.get_source_topic()
source_count = max(item2status[source_topic]) if bool(item2status[source_topic]) else 0
item2done = {}
for (item, status) in item2status.items():
if self.graph.node[item]["type"] == "topic":
item2done[item] = bool(item2status[item]) and max(item2status[item]) == source_count
elif self.graph.node[item]["type"] == "file":
item2done[item] = bool(item2status[item])
return item2done
def cleanup_all(self, lifetime):
target_topic = self.get_target_topic()
item2status = self.get_all_status()
max_id_target = status2count(item2status[target_topic]) - 1
min_id = max(max_id_target-lifetime,0)
if max_id_target == -1: return
for topic in self.get_topics():
if item2status[topic] and not self.graph.node[topic].get("dont_clean"):
if not self.graph.node[topic].get("extension"):
self.graph.node[topic]["extension"] = glob(comm.filePath("data0*",topic))[0].split('.')[-1]
if self.graph.node[topic].get("async"):
dataFiles = glob(comm.filePath("data*",topic))
infoFiles = glob(comm.filePath("info*",topic))
lastTargetInfoFile = comm.makeNamePair(max_id_target, "ass", target_topic)[1]
with open(lastTargetInfoFile,"r") as fh: info = json.load(fh)
lastTargetTime = info["time"]
for (dataName, infoName) in zip(dataFiles,infoFiles):
if lastTargetTime - os.stat(infoName).st_mtime > 10: #10 seconds
logging.info("deleting %s",dataName)
logging.info("deleting %s",infoName)
os.unlink(dataName)
os.unlink(infoName)
else:
for id in item2status[topic]:
if id < min_id:
dataName, infoName = comm.makeNamePair(id, self.graph.node[topic]["extension"], topic)
logging.info("deleting %s",dataName)
logging.info("deleting %s",infoName)
os.unlink(dataName)
os.unlink(infoName)
def throttle(self, target_topic, item2status, max_lag):
source_topic = self.get_source_topic()
comm.setThrottled(source_topic,
status2count(item2status[source_topic]) - status2count(item2status[target_topic]) > max_lag)
def status2count(status):
"status = None | [Int]"
return 0 if bool(status)==False else max(status)
class PipelinePlot(object):
first_draw = True
def __init__(self, pipeline):
self.pipeline = pipeline
self.graph = pipeline.graph
type2color = dict(file = "orange", topic="red", program="blue")
self.colors = [type2color[self.graph.node[node]["type"]] for node in self.graph.nodes()]
self.positions = nx.graphviz_layout(self.graph, prog="dot")
self.labels = dict(zip(self.graph.nodes(), self.graph.nodes()))
def draw(self, item2status = None):
if item2status is None:
item2status = self.pipeline.get_all_status()
self.update_labels(item2status)
if self.first_draw:
nx.draw_networkx(self.graph, pos = self.positions, labels = self.labels, node_color = self.colors, nodelist = self.graph.nodes())
self.first_draw = False
else:
for text_obj in plt.gca().texts:
maybe_node = text_obj.get_text().split()[0]
if self.labels.has_key(maybe_node): text_obj.set_text(self.labels[maybe_node])
def update_labels(self, item2status):
for (item, status) in item2status.items():
if status is None: status_str = "-"
elif status == []: status_str = "0"
elif isinstance(status, list): status_str = "%i: %i"%(min(status),max(status))
else: status_str = str(status)
self.labels[item] = "%s\n%s"%(item,status_str)
def loop(self):
plt.ion()
while True:
self.draw()
plt.draw()
sleep(.1)
def execute_series(pipeline, dry_run=False):
nodedict = pipeline.graph.node
ordered_progs = [node for node in nx.topological_sort(pipeline.graph) if nodedict[node]["type"] == "program"]
for prog in ordered_progs:
command = nodedict[prog]["command"]
item2status = pipeline.get_all_status()
if pipeline.products_already_made(prog, item2status):
print "next:", colorize("skipping %s"%prog, "red")
logging.info("skipping %s",prog)
continue
print colorize(command,"red")
logging.info(command)
raw_input("press enter to continue")
if not dry_run:
child = subprocess.Popen(command.split(), env=pipeline.env)
try:
interrupted=False
while child.poll() is None: sleep(.1)
except KeyboardInterrupt:
interrupted=True
if not interrupted and child.returncode != 0:
raise subprocess.CalledProcessError(child.returncode, command)
#try:
#while child.poll():
#sleep(1)
#except KeyboardInterrupt:
#print "got signal. terminating subprocess"
#child.send_signal(signal.SIGINT)
def execute_parallel(pipeline, lifetime, max_lag, noclean = False):
nodedict = pipeline.graph.node
target_topic = pipeline.get_target_topic()
prog2child = {}
remaining_progs = pipeline.get_programs()
#plot = PipelinePlot(pipeline)
try:
while True:
item2status = pipeline.get_all_status()
item2done = pipeline.get_all_doneness(item2status)
remaining_progs = [prog for prog in remaining_progs if not pipeline.products_already_made(prog, item2done) or pipeline.makes_topic(prog)]
for prog in remaining_progs:
if pipeline.ready_to_run(prog, item2status):
remaining_progs.remove(prog)
command = nodedict[prog]["command"]
print colorize(command, "red")
logging.info(command)
child = subprocess.Popen(command.split(), env=pipeline.env)
prog2child[prog] = child
for (prog, child) in prog2child.items():
if child.poll() is not None and child.returncode != 0:
print colorize("%s failed"%prog, "red")
logging.error("%s failed",prog)
for child in prog2child.values():
if child.poll() is None: child.terminate()
return
if not noclean: pipeline.cleanup_all(lifetime)
pipeline.throttle(target_topic, item2status, max_lag)
#plot.draw()
sleep(.1)
except Exception:
traceback.print_exc()
for child in prog2child.values():
if child.poll() is None: child.terminate()
return
def data_nums(topic):
fnames = glob(join(topic,"info*.json"))
nums = sorted([int(basename(fname)[4:16]) for fname in fnames])
return nums
| [
"joschu@rhino.(none)"
] | joschu@rhino.(none) |
d0757be0929862c2d268c72e9f6ef4c07bdec91f | a662ccb2b5fcddec6b5d987096cce1384b7ae347 | /swinf/conf/code_templates/controller.py | 6774dc14e2a53ce11ff1925421cb88fcfe1f0779 | [] | no_license | swinf/Swinf | 5d7c9790cfd6461871dc3fb0a00b5eebadb0a6b1 | a961448636dc8de586d827b0aec2f9878ab3d4f7 | refs/heads/master | 2021-01-19T00:16:42.142213 | 2013-04-06T06:41:43 | 2013-04-06T06:41:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | from swinf.selector import handler, bind_environ
__handlespace__ = {}
bind_environ(__handlespace__)
# ------------- your code here --------------------
@handler()
def hello():
"""
visit http://localhost:8080/hello in your brower
"""
return "<h1>Hello World!</h1>"
| [
"superjom@gmail.com"
] | superjom@gmail.com |
efedb27beb0b38e56bddf55c643519adf011af13 | 0958c7bd7cba2ba1784ce1594daebffd57a8b24c | /mentoruserapp/models.py | 8d8a81cb854a92711ef5bf8120210735e9068c43 | [] | no_license | boddachappu/mentoruser | e9bbea6c41fded954e92d5887c41a3fb5ed46c35 | 52ee9f64987a624cdcd7bf55df11c658d9f82d6a | refs/heads/master | 2023-03-02T08:48:45.912954 | 2021-02-13T15:41:59 | 2021-02-13T15:41:59 | 338,606,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,880 | py | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
# Create your models here.
class UserManager(BaseUserManager):
def create_user(self, email, password=None):
if not email:
raise ValueError("Users must have an email address")
user = self.model(
email=self.normalize_email(email),
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user = self.create_user(
email=self.normalize_email(email),
password=password
)
user.is_staff = True
user.admin = True
user.is_superuser = True
user.save(using=self._db)
return user
def get_by_natural_key(self, username):
return self.get(email=username)
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(verbose_name="email", max_length=60, unique=True)
date_joined = models.DateTimeField(verbose_name="date joined", auto_now_add=True)
last_login = models.DateTimeField(verbose_name="last login", auto_now_add=True)
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
return self.is_admin
def has_module_perms(self, app_label):
return True
class Mentor(models.Model):
username = models.CharField(max_length=30, null=False, blank=False)
password = models.CharField(max_length=8, null=False, blank=False)
is_mentor = models.BooleanField()
| [
"vinodcharan6@gmail.com"
] | vinodcharan6@gmail.com |
b223f986c44a1b5fc45f07b1347a2f08b488a185 | e5be73046c4c4f394fc953be50fb45c4d91c66e7 | /tensorflow-tutorial-exercises/9_1(test inception-v3).py | 821e582b4deef1c8ace708bc2ad68f11dbf899bc | [] | no_license | bei1994/Directed-Research | b6477f44b704490abc968a37f7317027c2696c23 | 7e39105acadad32301d0fce1a9f49b630ce417c6 | refs/heads/master | 2020-03-26T21:03:01.433964 | 2018-08-20T23:00:01 | 2018-08-20T23:00:01 | 145,363,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,843 | py | import tensorflow as tf
import os
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
classId_strId_filepath = "retrain/output_labels.txt"
lines = tf.gfile.GFile(classId_strId_filepath).readlines()
classId_to_strId = {}
for classId, line in enumerate(lines):
line = line.strip("\n")
classId_to_strId[classId] = line
def classId_to_depict(classId):
if classId not in classId_to_strId:
return ""
return classId_to_strId[classId]
with tf.Session() as sess:
# import inception_v3 model into sess.graph
with tf.gfile.FastGFile("retrain/output_graph.pb", "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def)
softmax_tensor = sess.graph.get_tensor_by_name("import/final_result:0")
# iterate all images to test
for root, dirs, files in os.walk("retrain/test_images/"):
for file in files:
if not file.endswith('.jpg') or file.startswith('.'):
continue # Skip!
# fetch image
image_data = tf.gfile.FastGFile(os.path.join(root, file), "rb").read()
predict = sess.run(softmax_tensor, feed_dict={"import/DecodeJpeg/contents:0": image_data})
predict = np.squeeze(predict)
image_path = os.path.join(root, file)
print(image_path)
# img = Image.open(image_path)
# plt.imshow(img)
# plt.axis("off")
# plt.show()
top_5 = predict.argsort()[-5:][::-1]
for classId in top_5:
depict = classId_to_depict(classId)
score = predict[classId]
print("%s (score = %.5f)" % (depict, score))
print()
writer = tf.summary.FileWriter("inception_log/", sess.graph)
writer.close()
| [
"noreply@github.com"
] | bei1994.noreply@github.com |
583d2c467aa249663bf516584f21df08ceb777db | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/ThirteenTeV/GMSB/GMSB_L200TeV_Ctau1200cm_Pythia8_13TeV_cff.py | 54ddbc89689a3f7e9c6eb29d5914856572088784 | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 1,018 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
SLHAFileForPythia8 = cms.string('Configuration/Generator/data/GMSB/GMSB_Lambda200TeV_CTau1200cm.slha'),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'SUSY:all on',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"shubhanshu.chauhan.cern.ch"
] | shubhanshu.chauhan.cern.ch |
b931bf521cb6431486fd01c48dac2d10651941ea | d38a0692cd1759367b25a65261335a3be9040fb6 | /Challenges/C_CPP/0007_buffer_under_read/feedback.py | 9739d56cd93e103402e18f3207c4d24562fa7ab9 | [
"MIT"
] | permissive | saucec0de/sifu | 8308f43e70e084002233b944ca0c9becbcb91571 | 7924844e1737c7634016c677237bccd7e7651818 | refs/heads/main | 2023-01-28T11:22:18.058418 | 2020-12-02T14:10:01 | 2020-12-02T14:10:01 | 317,881,746 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | #!/usr/bin/env python3
#
# Copyright (c) Siemens AG, 2020
# tiago.gasiba@gmail.com
#
# SPDX-License-Identifier: MIT
#
# This will add tags to the findings in unit_test.json that were added by analyse.py
import json
FILE = './sifu_results/unit_test.json'
with open(FILE, 'r+') as f:
# read json
# =========
data = json.load(f)
# modify json
# ===========
for finding in data:
if 'underflow' in finding['msg'].lower():
finding['tag'] = 'INCREMENTAL_2_GET_INDEX_OF_RIGHTMOST_CHAR_'
# write json back
# ===============
f.seek(0)
json.dump(data, f, indent=2)
f.truncate()
| [
"tiago.gasiba@gmail.com"
] | tiago.gasiba@gmail.com |
c839c009010060d138364ec7eebd197de5c57ed7 | 34b0492d2bb4a43673497a2c66e7d45245e3ec76 | /scrapy.py | bf72eb4ca82cb12b53f7be52a24cfcdd485666b5 | [] | no_license | liangweiSam/AutoTtScrapy | 1230a83876a00f05321e2eb08ca562370bb21216 | bb12f6fb41ddf12f68046ecebb635f4bfad704bb | refs/heads/master | 2021-05-10T21:50:57.509081 | 2018-01-20T11:56:13 | 2018-01-20T11:56:13 | 118,241,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,383 | py | # -*- coding:utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
import xlwt, xlrd
from PIL import Image
from urllib import request
import requests
import time
import io
import sys, os
import re
import goods_Scrapy
import shutil
class scrapy(object):
def __init__(self):
self.url = 'https://sso.toutiao.com/login/?service=https://mp.toutiao.com/sso_confirm/?redirect_url=JTJG'
self.js = "arguments[0].scrollIntoView()"
def iCode(self, driver):
imgCode = driver.find_element_by_xpath('//img[@class="y-right captcha"]')
imgUrl = imgCode.get_attribute('src')
if 'iImg' not in os.listdir():
os.makedirs('iImg')
response = request.urlopen(imgUrl)
name = str(int(time.time()))
with open('iImg/%s.jpg' %(name), mode = 'wb') as iImg:
iImg.write(response.read())
time.sleep(0.5)
return 'iImg/%s.jpg' %(name)
# s = io.BytesIO()
# s.write(response.read())
# image = Image.open(s)
# image.show()
# return input('请输入验证码:')
def phoneCode(self, driver, tel, codeN):
mobile = driver.find_element_by_id('mobile')
captcha1 = driver.find_element_by_id('captcha1')
code_btn = driver.find_element_by_xpath('/html/body/div/div/div[2]/div/div/div/form/div[3]/span')
mobile.send_keys(tel)
captcha1.send_keys(codeN)
time.sleep(1.5)
code_btn.click()
# return input('请输入手机验证码:')
def pressSubmit(self, driver, phoneCode):
submitBtn = driver.find_elements_by_name('submitBtn')
code = driver.find_element_by_id('code')
code.send_keys(phoneCode)
submitBtn[0].click()
time.sleep(1)
self.Work(driver)
def getInfoFromExcel(self, times):
data = xlrd.open_workbook('excel/Good%s.xls' %str(times+1))
table = data.sheets()[0]
nrows = table.nrows
urlLinks = []
descriptions = []
imgPaths = []
for i in range(nrows):
if i == 0:
continue
descriptions.append(table.row_values(i)[0])
urlLinks.append(table.row_values(i)[0])
imgPaths.append(table.row_values(i)[0])
return descriptions, urlLinks, imgPaths
def elementExist(self, driver):
while True:
time.sleep(1)
try:
element = driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[2]/div/div[2]/div[6]/div[2]/div[2]/span/div/div[2]/div[3]/button[1]')
element.click()
time.sleep(1)
return True
break
except Exception as e:
return False
break
def is_element_exist(self, xpath, driver):
# s = driver.find_element_by_xpath(xpath)
# driver.implicitly_wait(1)
try:
s = driver.find_element_by_xpath(xpath)
driver.implicitly_wait(1)
return True
except Exception as e:
return False
# if s == None:
# # print("元素未找到")
# return False
# else:
# return True
'''
'''
def Work(self, driver):
times = 0
'''
上传图片
'''
while times < 4:
current_handle = driver.current_window_handle
print(times)
while True:
if driver.current_url != self.url:
break
print('准备开始上传图片')
driver.get('https://mp.toutiao.com/profile_v2/figure')
driver.implicitly_wait(1)
if times > 0:
# delete = driver.find_element_by_xpath()
delete_exist = self.is_element_exist('//*[@id="root"]/div/div[2]/div[2]/div/div[1]/div/div/span[2]', driver)
if delete_exist == True:
delete = driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[2]/div/div[1]/div/div/span[2]')
driver.execute_script(self.js, delete)
delete.click()
upLoadBtn = driver.find_element_by_xpath('//button[@class="tui-btn tui-btn-negative pgc-button"]')
driver.implicitly_wait(5)
driver.execute_script(self.js, upLoadBtn)
time.sleep(2)
upLoadBtn.click()
time.sleep(1)
descriptions, urlLinks, imgPaths = self.getInfoFromExcel(times)
# goodList = os.listdir('img')
# need imgUrl
newGoodList = []
for i in imgPaths:
a = os.path.abspath(i)
newGoodList.append(a)
for y in newGoodList[:10]:
print(y)
fileInput = driver.find_element_by_xpath('//*[@id="pgc-upload-img"]//input')
fileInput.send_keys(y)
time.sleep(0.5)
while True:
try:
enter = driver.find_element_by_xpath('//*[@id="pgc-upload-img"]/div/div[2]/div[2]/button[1]')
enter.click()
continue
except Exception as e:
break
time.sleep(0.5)
print('success to upload data')
'''
编辑数据
'''
'''
for i in range(0, len(urlLinks[:10])):
if urlLinks[i] != '':
good_info = driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[2]/div/div[2]/div[1]/div/div[%s]/div[4]/span' %(i+1))
time.sleep(1)
driver.execute_script(self.js, good_info)
good_info.click()
# need good_link
# linkUrls = self.getInfoFromExcel(times)
good_link = driver.find_element_by_xpath('//*[@id="pgc-add-product"]/div[2]/div/span[1]/input')
good_link.send_keys(urlLinks[i])
get_info_btn = driver.find_element_by_xpath('//*[@id="pgc-add-product"]/div[2]/div/span[2]')
driver.execute_script(self.js, get_info_btn)
get_info_btn.click()
time.sleep(0.5)
# need descrition
refermence = driver.find_element_by_xpath('//*[@id="pgc-add-product"]/div[3]/div[2]/div[3]/span/textarea')
refermence.send_keys(descriptions[i])
time.sleep(1.5)
btn = driver.find_element_by_xpath('//*[@id="pgc-add-product"]/div[4]/button[1]')
driver.execute_script(self.js, btn)
btn.click()
else:
refermence_txt = driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[2]/div/div[2]/div[1]/div/div[1]/div[2]/textarea')
driver.execute_script(self.js, refermence_txt)
refermence_txt.send_keys(descriptions[i])
'''
# need title
data = xlrd.open_workbook('TtExcel/1.xls')
table = data.sheets()[0]
nrows = table.nrows
title_txt = table.row_values(times+1)[0]
title = driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[2]/div/div[2]/div[3]/div[2]/div/input')
title.send_keys(title_txt)
auto = driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[2]/div/div[2]/div[4]/div[2]/div/div[1]/div/label[3]/div/input')
driver.execute_script(self.js, auto)
auto.send_keys(Keys.SPACE)
time.sleep(0.5)
# save = driver.find_element_by_link_text('#root > div > div.layout > div.stage > div > div.content-wrapper > div.edit-cell.figure-footer > div.edit-input > div:nth-child(2) > div')
# //*[@id="root"]/div/div[2]/div[2]/div/div[2]/div[6]/div[2]/div[2]
save = driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[2]/div/div[2]/div[6]/div[2]/div[2]')
driver.execute_script(self.js, save)
save.click()
print('success to write data')
b = self.elementExist(driver)
if b == True:
handles = driver.window_handles
for handle in handles:
if handle != current_handle:
driver.switch_to_window(handle)
break
driver.implicitly_wait(5)
save = driver.find_element_by_xpath('//*[@id="root"]/div/div[2]/div[2]/div/div[2]/div[6]/div[2]/div[2]/div')
driver.execute_script(self.js, save)
time.sleep(1)
save.click()
times+= 1
if times == 4:
self.rmdir()
time.sleep(5)
driver.quit()
time.sleep(2)
def rmdir(self):
shutil.rmtree('excel')
shutil.rmtree('img')
shutil.rmtree('TtExcel')
# shutil.rmtree('iImg')
'''
登陆,获取driver
'''
def Login(self):
# dcap = dict(DesiredCapabilities.PHANTOMJS)
# dcap["phantomjs.page.settings.userAgent"] = ("Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2 ")
# s = goods_Scrapy.goods_Scrapy()
# s.processUrl()
driver = webdriver.Chrome(executable_path = 'webdriver/chromedriver.exe')
driver.get(self.url)
mail_phone = driver.find_element_by_xpath('/html/body/div/div/div[2]/div/div/div/ul/li[1]')
driver.execute_script(self.js, mail_phone)
time.sleep(2)
mail_phone.click()
# mobile = driver.find_element_by_id('mobile')
# imgCode = driver.find_element_by_xpath('//img[@class="y-right captcha"]')
# submitBtn = driver.find_elements_by_name('submitBtn')
# captcha1 = driver.find_element_by_id('captcha1')
# code = driver.find_element_by_id('code')
# mobile.send_keys('17727759494')
# mobile.send_keys('17727759494')
# imgUrl = imgCode.get_attribute('src')
# icodeUrl = self.iCode(imgUrl)
# self.iCode(imgUrl)
# captcha1.send_keys(codeN)
# return icodeUrl
return driver
##
# phoneCode = self.phoneCode(driver)
# code.send_keys(phoneCode)
# time.sleep(1.5)
# submitBtn[0].click()
# self.Work(driver)
# time.sleep(10)
# driver.quit()
# def sss(self):
# url = 'https://sso.toutiao.com/login/?service=https://mp.toutiao.com/sso_confirm/?redirect_url=JTJG'
# driver = webdriver.Chrome(executable_path = 'C:/Users/Administrator/Desktop/python/webdriver/chromedriver.exe')
# driver.get(url)
# mail_phone = driver.find_element_by_xpath('/html/body/div/div/div[2]/div/div/div/ul/li[7]')
# print(typemail_phone)
if __name__ == '__main__':
s = scrapy()
s.Login()
# goodList = os.listdir('img')
# newGoodList = []
# for i in goodList:
# a = os.path.abspath('img/%s' %(i))
# newGoodList.append(a)
# print(newGoodList) | [
"343580702@qq.com"
] | 343580702@qq.com |
997d741b76dbdc49051556bae1fafd9970539a60 | 92f28bf7f715a4afe9911962deae6ee1997a26a2 | /Lexer.py | 2c85a6ec7acab460e57120946310ec3bcb936da0 | [] | no_license | JeedyTV/vsopcompiler | 938010af94100877ad5fe18b1dcf7a0c7ca209ff | 03f27641f18aa7a7a94730bd457dec49e920ac59 | refs/heads/main | 2023-03-26T11:07:02.929704 | 2021-03-22T12:31:13 | 2021-03-22T12:31:13 | 338,573,934 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,618 | py | from sly import Lexer
import sys
import re
class CalcLexer(Lexer):
def __init__(self,sourceCode,sourceCodeName):
self.sourceCode = sourceCode
self.sourceCodeName = sourceCodeName
self.single_line_com = False #state of the lexer
self.multiple_line_com = list() #state of the lexer
self.string_unfinish = False
#define the tokens accepted by the lexer
tokens = {STRING_LITERAL, INTEGER_LITERAL, TYPE_IDENTIFIER, OBJECT_IDENTIFIER}
#handle string correctly
def handleStringLit(self,value,line,column):
#in case or error
error_line = value.count('\\\n', 0, len(value))
tab_line_fine_handle = value.split("\\\n")
# Removing \\\n, \t, \b and \r from the original string
value = re.sub(r'(\\\n([\t\b\r ])*|\\\n)', '', value)
# Checking the validity of the string
if '\n' in value:
for bad in tab_line_fine_handle:
if '\n' in bad:
erro_column = bad.rfind('\n', 0, len(bad)) +1
self.print_error(self.sourceCodeName,line+error_line,erro_column,"String contains line feed without backslash")
if value.rfind('\\', 0, len(value))+2 == len(value):
self.print_error(self.sourceCodeName,line,column+value.rfind('\\', 0, len(value)),"String contains backslash without linefeed or escape character")
accept = ['b','t','n','r','"',r'\\','x']
if ('\\' in value) :
after = value[value.rfind('\\', 0, len(value))+1]
if after not in accept:
self.print_error(self.sourceCodeName,line,column+value.rfind('\\', 0, len(value)),"String contains backslash without linefeed or escape character")
if '\\x00' in value:
self.print_error(self.sourceCodeName,line,column+value.rfind('\\x00', 0, len(value)),"String contains null character")
# Replacing escape characters into hexadecimals
value = value.replace(r'\b', r'\x08')
value = value.replace(r'\t', r'\x09')
value = value.replace(r'\n', r'\x0a')
value = value.replace(r'\r', r'\x0d')
value = value.replace(r'\"', r'\x22')
value = value.replace(r'\\', r'\x5c')
for c in value:
# Replacing characters that aren't in the range(32, 127)...
if not(32 <= ord(c) and ord(c) <= 127):
hexa = hex(ord(c))[2::]
# ... into hexadecimals
if len(hexa) == 1:
value = value.replace(c, '\\x0'+str(hexa))
else:
value = value.replace(c, '\\x'+str(hexa))
# Getting "\" posistion(s) in the string
escaped_index = [m.start() for m in re.finditer(r'\\', value)]
for index in escaped_index:
# Checking the char after '\'
if value[index + 1] == 'x':
hex_value = value[index + 2:index + 4]
try:
int('0x'+str(hex_value), 0)
except ValueError:
self.print_error(self.sourceCodeName,line,column+index,"Not a valid hexadecimal expression in back of \\")
break
else:
self.print_error(self.sourceCodeName,line,column+index,"String contains backslash without any valid hexadecimal number or escape character")
return value
def in_line_comment(self):
#renvoie true if single comment
return self.single_line_com
def in_multiple_comment(self):
#renvoie true if multiple comments
return not (len(self.multiple_line_com) == 0)
def in_comment(self):
#renvoie true if in comment
return self.in_line_comment() or self.in_multiple_comment()
@_(r'\/\/')
def COM(self, t):
if not self.in_comment(): #if we are not in single comment state pass in single comment state
self.single_line_com = True
@_(r'\(\*')
def COMIN(self, t):
if not self.in_line_comment(): #if we are not in single comment state pass in multiple-comment state
self.multiple_line_com.append((self.lineno,self.find_column(self.sourceCode,t))) # to print error correctly
@_(r'\*\)')
def COMOUT(self, t):
t.type = "COMOUT " +str(len(self.multiple_line_com)) #remind the nested level
if not self.in_line_comment(): #if we are not in single comment state handle properly
if not self.in_multiple_comment(): #if we are not in multiple comment state it's an error
self.print_error(self.sourceCodeName,self.lineno,self.find_column(self.sourceCode,t),"*) doesn't match to a (*.")
else:
self.multiple_line_com.pop()
# String containing ignored characters
ignore_tab = '\t'
ignore = ' '
@_(r'!|`')
def EX(self,t):
if not (self.in_comment()):
self.print_error(self.sourceCodeName,self.lineno,self.find_column(self.sourceCode,t),"Bad character : "+str(t.value[0]))
@_(r'\"(?:[^\"\\]|\\.|\\\n)*\"')
def STRING_LITERAL(self,t):
self.lineno += t.value.count("\\\n")
t.value = self.handleStringLit(t.value,t.lineno,self.find_column(self.sourceCode,t))
if not self.in_comment(): #if in comment state droped the token
if not self.string_unfinish: #if an string unfinish error is raised do not create token
return t
@_(r'\"(?:[^\"\\\(\*\*\)\n]|\\.|\\\n)*\n',
r'\"(?:[^\"\\\(\*\*\)]|\\.|\\\n)*')
def STRING_LITERAL_UNFINISH(self,t):
in_comment = self.in_comment()
# update line if needed
self.lineno += t.value.count("\\\n")
if t.value.endswith("\n") > 0:
self.lineno += 1
if not self.in_multiple_comment():
self.single_line_com = False
if not in_comment: #if in comment state droped the token
self.string_unfinish = True #if it's an string unfinish change state of the lexer
self.handleStringLit(t.value,t.lineno,self.find_column(self.sourceCode,t))
self.print_error(self.sourceCodeName,t.lineno,self.find_column(self.sourceCode,t),"EOF in a string")
@_(r'[a-z][a-zA-Z0-9_]*')
def OBJECT_IDENTIFIER(self,t):
if(t.value == 'and'):
t.type = "AND"
if(t.value == 'bool'):
t.type = "BOOL"
if(t.value == 'class'):
t.type = "CLASS"
if(t.value == 'do'):
t.type = "DO"
if(t.value == 'else'):
t.type = "ELSE"
if(t.value == 'extends'):
t.type = "EXTENDS"
if(t.value == 'false'):
t.type = "FALSE"
if(t.value == 'if'):
t.type = "IF"
if(t.value == 'int32'):
t.type = "INT32"
if(t.value == 'in'):
t.type = "IN"
if(t.value == 'isnull'):
t.type = "ISNULL"
if(t.value == 'let'):
t.type = "LET"
if(t.value == 'new'):
t.type = "NEW"
if(t.value == 'not'):
t.type = "NOT"
if(t.value == 'self'):
t.type = "SELF"
if(t.value == 'string'):
t.type = "STRING"
if(t.value == 'then'):
t.type = "THEN"
if(t.value == 'true'):
t.type = "TRUE"
if(t.value == 'unit'):
t.type = "UNIT"
if(t.value == 'while'):
t.type = "WHILE"
"""
if in comment state droped the token or
if an string unfinish error is raised do not create token
"""
if not self.in_comment() and not self.string_unfinish:
return t
@_(r'[A-Z][a-zA-Z0-9_]*')
def TYPE_IDENTIFIER(self,t):
"""
if in comment state droped the token or
if an string unfinish error is raised do not create token
"""
if not self.in_comment() and not self.string_unfinish:
return t
@_(r'0x[0-9a-zA-Z]+',
r'[0-9][0-9a-zA-Z]*')
def INTEGER_LITERAL(self, t):
in_comment = self.in_comment()
if t.value.startswith('0x'):
try :
t.value = int(t.value[2:], 16)
except ValueError:
if not in_comment:
self.print_error(self.sourceCodeName,self.lineno,self.find_column(self.sourceCode,t),"Can't convert {"+str(t.value)+"} in hexadecimal.")
else:
try:
t.value = int(t.value)
except ValueError:
if not in_comment:
self.print_error(self.sourceCodeName,self.lineno,self.find_column(self.sourceCode,t),"Can't convert {"+str(t.value)+"} in integer.")
"""
if in comment state droped the token or
if an string unfinish error is raised do not create token
"""
if not in_comment and not self.string_unfinish:
return t
@_(r'<=|<-|\{|\}|\(|\)|:|;|,|\+|-|\*|/|\^|\.|=|<')
def OP(self,t):
if(t.value == '<=') :
t.type = "LOWER_EQUAL"
if(t.value == '<-') :
t.type = "ASSIGN"
if(t.value =='{'):
t.type = "LBRACE"
if(t.value =='}'):
t.type = "RBRACE"
if(t.value =='('):
t.type = "LPAR"
if(t.value ==')'):
t.type = "RPAR"
if(t.value ==':'):
t.type = "COLON"
if(t.value ==';'):
t.type = "SEMICOLON"
if(t.value ==','):
t.type = "COMMA"
if(t.value =='+'):
t.type = "PLUS"
if(t.value =='-'):
t.type = "MINUS"
if(t.value =='*'):
t.type = "TIMES"
if(t.value =='/'):
t.type = "DIV"
if(t.value =='^'):
t.type = "POW"
if(t.value =='.'):
t.type = "DOT"
if(t.value =='='):
t.type = "EQUAL"
if(t.value =='<'):
t.type = "LOWER"
"""
if in comment state droped the token or
if an string unfinish error is raised do not create token
"""
if not self.in_comment() and not self.string_unfinish:
return t
def error(self, t):
self.print_error(self.sourceCodeName,self.lineno,self.find_column(self.sourceCode,t),"Bad character : "+str(t.value[0]))
@_(r'\n+')
def ignore_newline(self, t):
#update line of tokens
self.lineno += t.value.count('\n')
#quit single line mode if needed i.e if we are not in multiple line state
if not self.in_multiple_comment():
self.single_line_com = False
def find_column(self,text, token):
last_cr = text.rfind('\n', 0, token.index) + 1
column = (token.index - last_cr) + 1
return column
def print_error(self,filename,line,column,description):
#Print the error on the stderr in the good format
sys.exit('{}:{}:{}: lexical error: {}'.format(filename, line, column, description))
def make_token(sourceCode,sourceCodeName):
"""
make tokens of the source code and print on stdout print error if any on stderr
"""
lexer = CalcLexer(sourceCode,sourceCodeName)
for token in lexer.tokenize(sourceCode):
token_line = token.lineno
token_column = lexer.find_column(sourceCode,token)
token_class = token.type.replace("_", "-").lower()
if (token.type == "TYPE_IDENTIFIER" or token.type == "OBJECT_IDENTIFIER" or
token.type == "INTEGER_LITERAL" or token.type == "STRING_LITERAL"):
token_value = token.value
else :
token_value = None
if token_value == None:
print(token_line,token_column,token_class,sep=",")
pass
else :
print(token_line,token_column,token_class,token_value,sep=",")
pass
#chech wether the comment trigger an error or not
if(len(lexer.multiple_line_com) != 0):
lexer.print_error(sourceCodeName,lexer.multiple_line_com[-1][0],lexer.multiple_line_com[-1][1],"EOF in multiple line comment")
| [
"jdmukolonga@student.uliege.be"
] | jdmukolonga@student.uliege.be |
c63d9df36c8d3bccc10ed3002ed80b48a0cead8a | 3ad3c58bc289fa1a556db153b55ae2bbdcec893f | /src/allocation/entrypoints/flask_app.py | dcf9a97ecc83fabb074c9f99b510c310080c1f7e | [] | no_license | vic3r/python-AP | 7f25b9dc2523d561f53e8f03e1d8ac5820edce86 | 5c56c7a3e93e9ee0a56564efbbff449cb4dafc95 | refs/heads/master | 2023-02-10T10:43:31.728232 | 2021-01-02T01:22:35 | 2021-01-02T01:22:35 | 288,585,808 | 0 | 0 | null | 2021-01-02T01:22:36 | 2020-08-18T23:29:05 | Python | UTF-8 | Python | false | false | 1,129 | py | from datetime import datetime
from flask import Flask, jsonify, request
from allocation.domain import commands
from allocation.service_layer.handlers import InvalidSku
from allocation import bootstrap, views
app = Flask(__name__)
bus = bootstrap.bootstrap()
@app.route("/add_batch", methods=['POST'])
def add_batch():
eta = request.json['eta']
if eta is not None:
eta = datetime.fromisoformat(eta).date()
cmd = commands.CreateBatch(
request.json['ref'], request.json['sku'], request.json['qty'], eta,
)
bus.handle(cmd)
return 'OK', 201
@app.route("/allocate", methods=['POST'])
def allocate_endpoint():
try:
cmd = commands.Allocate(
request.json['orderid'], request.json['sku'], request.json['qty'],
)
bus.handle(cmd)
except InvalidSku as e:
return jsonify({'message': str(e)}), 400
return 'OK', 202
@app.route("/allocations/<orderid>", methods=['GET'])
def allocations_view_endpoint(orderid):
result = views.allocations(orderid, bus.uow)
if not result:
return 'not found', 404
return jsonify(result), 200 | [
"victor.g64r@gmail.com"
] | victor.g64r@gmail.com |
f3ba80aea7d10de31d95d357f068ca9979a88e6f | 11923c207e27752127a39cb042af26c7809da910 | /generator3.py | 7c939e85cff301000006e8c36b97db39023c6303 | [] | no_license | afsana1210/python-learning | a8aea5a66960c5bd88c48b85225aab0da7dd3603 | df72b786fd133142a69f68edc6b2867f13fee6a6 | refs/heads/master | 2020-05-15T22:17:52.142594 | 2019-05-23T10:06:24 | 2019-05-23T10:06:24 | 182,523,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | s='hello'
for letter in s: #print all the given vlaue
print letter
#iter() function print the one vlaue at a time
s_iter=iter(s)
print next(s_iter)
print next(s_iter)
print next(s_iter)
print next(s_iter)
print next(s_iter)
print next(s_iter) | [
"afsanaansari1210@gmail.com"
] | afsanaansari1210@gmail.com |
b428cfda4a16708b16e4e47233744ebc92cc5103 | a0b1954a9cf89c2d601b885657cb9f8460e7dace | /py/test_jla.py | c044c5fe9795bdf98583ce20925d3c5b9519045b | [] | no_license | fhazenbe/lsst-calibration-impact | 3e06b08dbbf2b028c14cee52362f713d74eb9b2b | 55a5a11395bea73284e039bdc45db08606abd77e | refs/heads/master | 2020-03-24T18:05:14.856371 | 2019-07-01T15:01:07 | 2019-07-01T15:01:07 | 142,882,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,594 | py | # -*- Encoding: utf-8 -*-
import numpy as np
from croaks import NTuple
import os
from saunerie import bspline
# import example
import matplotlib.pyplot as plt
from saunerie.instruments import InstrumentModel
from scipy.sparse import csr_matrix, coo_matrix
from saunerie.interpolation import Func1D
import saunerie.constants as constants
from saunerie import saltpath, salt2
from saunerie.fitparameters import FitParameters
from saunerie.stellarlibs import FilterSet, ALIGN_WITH_INTEG_FLUX
import scipy.sparse as sparse
from scipy.interpolate import interp1d
from scikits.sparse.cholmod import cholesky
from pycosmo import cosmo
import scipy
import time
from saunerie.linearmodels import SparseSystem
import pyfits
from saunerie.spectrum import Spectrum
from saunerie.instruments import FilterWheel
from glob import glob
###
# Choix du modèle pour les performances
###
# model_type = "wwa"
model_type = "lambdaCDM"
if model_type == "wwa":
mod = cosmo.Cosmow0wa()
else:
mod = cosmo.CosmoLambda()
###
# Extraction du spectre SALT2
###
class NotKeptError(Exception):
pass
print 'Starting --- '+time.ctime()
m = InstrumentModel("LSSTPG")
cfg = saltpath.read_card_file(saltpath.fitmodel_filename)
salt2_model_path = saltpath.SALTPATH + os.sep + cfg['SALT2']
M0_filename = salt2_model_path + os.sep + 'salt2_template_0.dat'
nt = NTuple.fromtxt(M0_filename)
idx = nt['f0'] == 0
gx = np.linspace(nt['f1'][idx].min()-1e-10, nt['f1'][idx].max()+1e-10, 100)
base = bspline.BSpline(gx, order=4)
p = base.linear_fit(nt['f1'][idx], nt['f2'][idx])
color_disp_fit = np.array([ -1.33846154e-18, 3.98286713e-14, -4.66894522e-10,
2.69988432e-06, -7.71355752e-03, 8.74507867e+00])
# def color_disp_func(l):
# """ SALT2 colour dispersion as a function of wavelength (from Guy et al. 2010, Fig 8)
# """
# disps = np.zeros(len(l))
# disps[l<3000] = 2e-1
# disps[(3000 <= l) & (l < 4000)] = 2e-1 - (1.8e-1 / 1000. * (l[(3000 <= l) & (l < 4000)]-3000))
# disps[(4000 <= l) & (l < 5500)] = 2e-2
# disps[(5500 <= l) & (l < 6250)] = 2e-2 + (1.5e-2 / 750. * (l[(5500 <= l) & (l < 6250)]-5500))
# disps[6250 <= l] = 3.5e-2
# return disps
color_dispersion_source = NTuple.fromtxt('/home/fhazenbe/software/snfit_data/snfit_data/salt2-4/salt2_color_dispersion.dat')
color_disp_func = scipy.interpolate.interp1d(color_dispersion_source['l'], color_dispersion_source['s'])
### Préparation de la loi de couleur
color_law_params = np.array([ 0.01076587, -0.0856708 , 0.38838263, -1.3387273 , -0.02079356])
beta = 3.
###
dt = [('z', float), ('c', float), ('X0', float),
('X1', float), ('dL', float),
('band', 'S20'), ('#SN', int),
('A', float),
('l_eff', float),
('zp', float),
('snr', float)]
def find_amplitude(snova, band, k='LSSTPG::'):
band = k + band
amplitude = snova.lcmodel(snova.sn, [snova.sn.DayMax], [band])[0]
return amplitude
def find_snr(snova, band, k='LSSTPG::'):
band = k + band
snr = snova.amplitude_snr(band)
return snr
def rescale_mag(fi, dL, zp, z):
A_hc = 50341170
M = -2.5*np.log10(fi) - 30 + 2.5 * np.log10(ALIGN_WITH_INTEG_FLUX) - zp + 2.5*np.log10(A_hc) - 2.5*np.log10(1+z)
return M
def get_lambda_f(band, inst):
filtre_trans = inst.get_transmission(band)
x_min = filtre_trans.x_min
x_max = filtre_trans.x_max
step = 1.
lambdas = np.arange(x_min, x_max+step, step)
l = Func1D(lambda x: x)
num = (l**2*filtre_trans).integrate(x_min, x_max)
den = (l*filtre_trans).integrate(x_min, x_max)
return num/den
### Pas un vrai Zp, : -2.5 log(\int T(\lambda) \lambda d\lambda))
def get_zp(band, inst):
it = 0.1
lambdas = np.arange(2000, 20000+it, it)
filtre = inst.get_transmission(band)
return -2.5*np.log10((lambdas*filtre(lambdas)).sum()*it)
##### Calcul du NTuple des flux interpolés à DayMax
survey_to_instrument = {'SDSS' : 'SDSS',
'SNLS' : 'MEGACAMPSF',
'CSP' : 'SWOPE2',
'CfAI' : '4SHOOTER2',
'CfAII' : '4SHOOTER2',
'CfAIII' : 'KEPLERCAM',
}
def from_mb(ligne):
lambdas = np.linspace(3000, 9000, 600)
spec = Spectrum(lambdas, base.eval(lambdas)*p)
try:
inst_name = survey_to_instrument[ligne['SURVEY']]
except NotKeptError:
inst_name = 'STANDARD'
if ligne['SURVEY'] == 'RiessHST':
inst_name = np.random.choice(['ACSWF', 'NICMOS2'])
inst = InstrumentModel(inst_name)
bessel = InstrumentModel('STANDARD')
B = bessel.get_transmission('B')
spec.y /= spec.IntegFlux(B)
bands = []
mags = []
snr = []
all_bands = FilterWheel(inst.path, 'FilterWheel').range.viewkeys()
for band in all_bands:
nom_complet = inst_name+'_'+band
if nom_complet in ['4SHOOTER2_U', 'MEGACAMPSF_u', 'MEGACAMPSF_y', 'SWOPE2_V1', 'SWOPE2_V2', 'SWOPE2_Y']:
continue
filtre = inst.get_transmission(band)
mags += [-2.5*np.log10(spec.IntegFlux(filtre, z=ligne['z'])*10**(-0.4*ligne['msb']))]
bands += [nom_complet]
snr += [1./np.sqrt(ligne['msbe']**2-ligne['sigma_coh']**2)]
return np.array(bands), np.array(mags), np.array(snr)/np.sqrt(len(bands)), inst
#return np.array(bands), np.array(mags), np.array(snr), inst
def from_salt_results(ligne, bands):
lambdas = np.linspace(3000, 9000, 600)
spec = Spectrum(lambdas, base.eval(lambdas)*p)
bessel = InstrumentModel('STANDARD')
B = bessel.get_transmission('B')
spec.y /= spec.IntegFlux(B)
mags = []
for band in bands:
inst_name, _band = band.split('_')
inst = InstrumentModel(inst_name)
filtre = inst.get_transmission(_band)
mags += [-2.5*np.log10(spec.IntegFlux(filtre, z=ligne['z'])*10**(-0.4*ligne['msb']))]
return np.array(mags)
#### Essai d'harmonisation
def open_lc(fichier, tableau):
idx_name = None
for i, t_name in enumerate(tableau['name']):
if t_name not in fichier:
continue
else:
idx_name = i
if idx_name is None:
raise NotKeptError('No entry for %s found in the salt2 outpu' % (fichier))
lc = NTuple.fromtxt(fichier)
bands = np.array([band.replace('::', '_') for band in np.unique(lc['Filter'])])
# if lc.keys['SURVEY'] == 'SDSS':
# try:
# sn_name = lc.keys['SURVEY'] + str(int(lc.keys['SN']))
# except ValueError:
# sn_name = str(lc.keys['SN'])
# else:
# sn_name = str(lc.keys['SN'])
_mags = from_salt_results(tableau[idx_name], bands)
snr, mags, zps = np.zeros(len(bands)), np.zeros(len(bands)), np.zeros(len(bands)) #, dtype=np.dtype([(a, float) for a in bands]))
for i, band in enumerate(bands):
idx = lc['Filter'] == band.replace('_', '::')
wi = 1./lc[idx]['Fluxerr']**2
fi2 = lc[idx]['Flux']**2
snr[i] = np.sqrt(np.sum(wi * fi2))
mags[i] = _mags[bands==band]
zps[i] = np.mean(lc[idx]['ZP'])
return bands, mags, snr, zps, tableau[idx_name]
entree = NTuple.fromtxt('selected_sn_complete.list')
# fichier = 'data_jla.nxt' ### Ne prend pas de courbes de lumière
fichier = 'data_test_lc.nxt' ### Prend des courbes de lumière et sommes les SNR
# lcs_files = glob('/home/betoule/cosmo_jla1/data/lowz-2013-06-20_schlafly/*.list') + glob('/home/betoule/cosmo_jla1/data/sdss_2013-06-20_schlafly/SDSS*.DAT') + glob('/home/betoule/cosmo_jla1/data/snls3_recalibrated_lc_v2_5/lc*.list')
lcs_files = glob('/data/betoule/jla1/preproc_GMG5BWI/data/lc*.list')
if os.path.exists(fichier):
print 'Already have a data file --- '+time.ctime()
data_ok = NTuple.fromfile(fichier).view(np.ndarray)
else:
data = np.zeros(720*10, dtype=dt)
k = 0
ii = 0
print 'Creating data ntuple (needs SALT2 redo) --- '+time.ctime()
for i, lc_file in enumerate(lcs_files):
try:
bands, mags, snr, zps, lc = open_lc(lc_file, entree)
except NotKeptError:
print "No line found for %s in %s" % (lc_file, 'selected_sn_complete.list')
continue
for j in range(len(bands)):
instru = InstrumentModel(bands[j].split('_')[0])
# data[k+j] = (lc['z'], lc['c'], lc['x0'], lc['x1'], np.nan, bands[j], i,
# 10**(-0.4*mags[j]), get_lambda_f(bands[j].split('::')[-1], instru), get_zp(bands[j].split('::')[-1], instru), snr[j])
data[k+j] = (lc['z'], lc['c'], lc['x0'], lc['x1'], mod.dL(lc['z']), bands[j], ii,
10**(-0.4*mags[j]), get_lambda_f(bands[j].split('_')[-1], instru), zps[j], snr[j])
k += len(bands)
ii += 1
if (i+1)%100 == 0:
print 'Data computed for %d supernovae' % (i+1)
data['l_eff'] = 1./(1.+data['z'])*data['l_eff']
data = data[data['band'] != '']
data_ok = data[data['snr'] != 0]
data_ok = data_ok[data_ok['A'] > 0]
data_ok.view(NTuple).tofile(fichier)
############################################################
################################################################
def plot_sn(**kwargs):
plt.plot(data_ok['l_eff'], rescale_mag(data_ok['A'], data_ok['dL'], data_ok['zp'], data_ok['z']), **kwargs)
A_hc = 50341170.
###########################
###############
#### FIT ######
###############
data_ok = np.sort(data_ok, order='z')
### Calcul des dérivées du spectre de HD165459 (pour les delta_lambda)
standard = pyfits.getdata('p330e_stisnic_008.fits')
spec = Spectrum(standard['WAVELENGTH'], standard['FLUX'])
spec_2 = Spectrum(standard['WAVELENGTH']-1, standard['FLUX'])
#####################################################################
leff = np.array(data_ok['l_eff'])
n_SN = len(np.unique(data_ok['#SN']))
X0 = 76516964.662612781
n = 20
lambda_grid = np.linspace(np.min(leff)-10, np.max(leff)+10, n)
base_spectre = bspline.BSpline(lambda_grid, order=3)
ext = pyfits.open('/home/betoule/cosmo_jla1/results/snlssdss_v9/smoothed_derivatives.fits')
params_names = ext[2].data['CalPar']
zp_list = list(params_names[:37])
dl_list = list(params_names[37:])
if model_type == "wwa":
par_list = [('color_law', len(color_law_params)), 'Omega_m', 'Omega_k', 'w', 'wa', 'H0', 'Omega_b_h2', 'beta'] + [('theta_salt', n+1), ('mB', n_SN), ('c', n_SN)] + zp_list + dl_list
else:
par_list = [('color_law', len(color_law_params)), 'Omega_m', 'Omega_lambda', 'w', 'H0', 'Omega_b_h2', 'beta'] + [('theta_salt', n+1), ('mB', n_SN), ('c', n_SN)] + zp_list + dl_list
params = FitParameters(par_list)
#fixed_pars = dl_list + zp_list
#fixed_pars = ['theta_salt']
fixed_pars = []
par_names = []
for ppp in par_list:
if type(ppp) == tuple:
a = ppp[0]
else:
a = ppp
if a in fixed_pars:
continue
else:
par_names += [a]
from saunerie.indextools import make_index
snindex = make_index(data_ok['#SN'])
snindex = [sn[0] for sn in snindex]
params['mB'] = -2.5*np.log10(X0)
params['c'] = data_ok[snindex]['c']
params['color_law'] = color_law_params
params['Omega_m'] = mod.Omega_m
if model_type == 'wwa':
params['w'], params['wa'] = mod.w, mod.wa
params['Omega_k'] = mod.Omega_k
else:
params['Omega_lambda'] = mod.Omega_lambda
params['w'] = mod.w
params['H0'], params['Omega_b_h2'] = mod.H0, mod.Omega_b_h2
params['beta'] = 3.
lb = 4343.78
lv = 5462.1
def transfo(lam):
ret = (lam-lb)/(lv-lb)
return ret
zpb = np.zeros(len(data_ok))
for band1 in np.unique(data_ok['band']):
band = band1.replace('::', '_')
zpb[data_ok['band']==band] = params['ZP_'+band].free
flx = -2.5*np.log10(data_ok['A']) - (params['mB'].free[data_ok['#SN']] + mod.mu(data_ok['z']) + zpb + 44 + 2.5*np.log10(1+data_ok['z']) + data_ok['zp'] - 2.5*np.log10(A_hc) + params['c'].free[data_ok['#SN']]*(np.polyval(params['color_law'].free, transfo(data_ok['l_eff']))+params['beta'].free))
spectrum_fit = base_spectre.linear_fit(np.array(data_ok['l_eff']), np.array(flx))
params['theta_salt'] = spectrum_fit
for par in fixed_pars:
params.fix(par)
#### MODEL ####
### m_{b} = P(\lambda_b / (1+z)) - 2.5log(X_0) + 30 + \mu
### - 2.5log( \int T_b(\lambda) \lambda d\lambda) + 2.5log(1+z) - 2.5log(A_hc) + \Delta ZP_b + c*Cl(\lambda_b) + \beta*c
###############
#### Fit cosmo
from pycosmo import priors
class Model(object):
def __init__(self, d, p, cosmo, spline_base, intrinsic_dispersion=0.10):
global model_type
self.data = d
self.n_sn = len(np.unique(d['#SN']))
self.spline_base = spline_base
self.params = p
self.model_cosmo = cosmo
self.free_cosmo_params = []
self.cosmo_free_idx = np.zeros(len(self.model_cosmo.param_names))
for i, par in enumerate(self.model_cosmo.param_names):
if self.params[par].indexof() != -1:
self.free_cosmo_params += [par]
self.cosmo_free_idx[i] = 1
self.intrinsic_dispersion = intrinsic_dispersion
self.color_law_degree = len(p['color_law'].free)-1
self.impacted = False
self.lines, self.cols, self.J_dat = np.zeros(0), np.zeros(0), np.zeros(0)
self.cal_C = pyfits.getdata('/home/betoule/cosmo_jla1/results/snlssdss_v9/CalCov.fits')
#self.cal_C = self.cal_C[:37, :37]
if self.cal_C.shape[0] != 37:
for ite in np.arange(62, 69):
self.cal_C[ite, ite] += 1e-4
self.cal_C[37:, :] *=10
self.cal_C[:, 37:] *=10
self.jambon = []
def new_cosmo_model(self, p):
if model_type == "wwa":
model = cosmo.Cosmow0wa()
model.Omega_k = p['Omega_k'].free
model.wa = p['wa'].free
else:
model = cosmo.CosmoLambda()
model.Omega_lambda = p['Omega_lambda'].free
model.Omega_m = p['Omega_m'].free
model.w = p['w'].free
model.H0 = p['H0'].free
model.Omega_b_h2 = p['Omega_b_h2'].free
return model
def __call__(self, p):
zps = np.zeros(len(self.data))
momo = self.new_cosmo_model(p)
for band in np.unique(self.data['band']):
zps[self.data['band']==band] = p['zp'+band].free
return p['mB'].free[self.data['#SN']] + momo.mu(self.data['z']) + np.dot(self.spline_base.eval(np.array(self.data['l_eff'])).toarray(), p['theta_salt'].free) + zps + 20 + 2.5*np.log10(1+data_ok['z']) + data_ok['zp'] - 2.5*np.log10(A_hc) + p['c'].free[self.data['#SN']]*(np.polyval(p['color_law'].free, transfo(self.data['l_eff']))+p['beta'].free)
def spec(self, p):
zps = np.zeros(len(self.data))
momo = self.new_cosmo_model(p)
return -2.5*np.log10(data_ok['A']) - self(p) + np.dot(self.spline_base.eval(np.array(self.data['l_eff'])).toarray(), p['theta_salt'].free)
def update_lines(self, _lines, _cols, dat, jac=True):
l, c = np.meshgrid(_lines, _cols)
lines, cols = l.flatten(), c.flatten()
idx0 = dat != 0
if np.iterable(dat):
lines, cols = lines[idx0], cols[idx0]
dat = dat[idx0]
if len(lines) != len(cols):
raise ValueError('Lines and columns have different lengths')
if not np.iterable(dat):
dat = dat * np.ones(len(lines))
if len(dat) != len(lines):
raise ValueError('Indexes and data have different lengths')
if jac:
self.lines = np.hstack((self.lines, lines))
self.cols = np.hstack((self.cols, cols))
self.J_dat = np.hstack((self.J_dat, dat))
else:
self.Clines = np.hstack((self.Clines, lines))
self.Ccols = np.hstack((self.Ccols, cols))
self.C_dat = np.hstack((self.C_dat, dat))
def compute_derivatives(self, param, index=None, epsi=1e-4, graph=False):
p1 = self.params
p2 = p1.copy()
h = np.zeros_like(p1.free)
if index is None:
p2[param] = p1[param].free + epsi
h[self.params[param].indexof()] = epsi
else:
p2[param][index] = p1[param].free[index] + epsi
h[self.params[param].indexof(index)] = epsi
Jp = np.dot(MODEL.J[:len(self.data), :], h)
if graph:
plt.plot((self(p2)-self(p1)-Jp), 'o')
return self(p2), self(p1), Jp
def compare_derivatives(self, param, index=None, epsi=1e-8, graph=True):
if index is None:
ret = np.hstack((self.J[:len(self.data), self.params[param].indexof()].reshape((len(self.data), 1)),
self.compute_derivatives(param, index, epsi).reshape((len(self.data), 1))))
else:
ret = np.hstack((self.J[:len(self.data), self.params[param].indexof(index)].reshape((len(self.data), 1)),
self.compute_derivatives(param, index, epsi).reshape((len(self.data), 1))))
if graph:
plt.plot(self.data['z'], ret[:, 0]-ret[:, 1], 'o', label='model-numerical derivatives')
plt.legend()
return ret
def construct_jacobian(self):
print 'Constructing the Jacobian ...'
d, p, cosmo, base = self.data, self.params, self.model_cosmo, self.spline_base
zs = d['z'].reshape((len(d), 1))
x0 = cosmo.pars()
eps = np.sqrt(np.sqrt(np.finfo(float).eps))
#eps = np.finfo(float).eps
f0 = cosmo.mu(zs)
c = d['c']
all_lines = np.arange(len(d))
for par in self.free_cosmo_params:
h1 = 1e-4
p2 = self.params.copy()
p2[par] = self.params[par].free + h1
self.update_lines(all_lines, p[par].indexof(), ((self.new_cosmo_model(p2).mu(zs) - f0)/h1).flatten())
if 'theta_salt' not in fixed_pars:
bev = base.eval(np.array(d['l_eff']))
self.lines = np.hstack((self.lines, bev.row)); self.cols = np.hstack((self.cols, bev.col+ p['theta_salt'].indexof(0)))
self.J_dat = np.hstack((self.J_dat, bev.data))
#self.update_lines(bev.row, bev.col + p['theta_salt'].indexof(0), bev.data)
for j in np.unique(d['#SN']):
idx = d['#SN'] == j
snd = d[idx]; taille = len(snd)
if 'c' not in fixed_pars:
self.update_lines(all_lines[idx], p['c'].indexof(j), beta + np.polyval(color_law_params, transfo(snd['l_eff'])))
if 'mB' not in fixed_pars:
self.update_lines(all_lines[idx], p['mB'].indexof(j), np.ones(len(snd)))
for band1 in np.unique(d['band']):
idxband = d['band']==band1
###
### ATTENTION CA CHANGE ICI EN FONCTION DE L UNITE DE ZP ET DL
###
band = band1.replace('::', '_')
if 'ZP_'+band not in fixed_pars:
self.update_lines(all_lines[idxband], p['ZP_'+band].indexof(), 1.)
if 'DL_'+band not in fixed_pars:
derder = base.deriv(np.array(d['l_eff'][idxband])) * params['theta_salt'].full
if np.isnan(derder).sum() != 0:
self.jambon += list(d['band'][idxband][np.isnan(derder)])
self.update_lines(all_lines[idxband], p['DL_'+band].indexof(),
(1./(1+d['z'][idxband])*(derder + d['c'][idxband]*1./(lv-lb)*np.polyval(np.polyder(color_law_params), transfo(d['l_eff'][idxband])))).flatten())
if 'beta' not in fixed_pars:
self.update_lines(all_lines, p['beta'].indexof(), c)
kl = self.color_law_degree
if 'color_law' not in fixed_pars:
self.update_lines(all_lines, p['color_law'].indexof(), np.array([c*transfo(d['l_eff'])**(kl-k) for k in range(kl+1)]).flatten())
self.Clines = np.arange(len(d))
self.Ccols = np.arange(len(d))
self.C_dat = 1./d['snr']**2+color_disp_func(d['l_eff'])**2
print '... DONE !'
self.W = sparse.coo_matrix(((1./(1./np.array(d['snr']**2)+color_disp_func(d['l_eff'])**2)), (all_lines, all_lines)))
def update_cov(self, cov, w=None):
if not np.iterable(cov):
cov = np.array([[cov]])
n, m = np.max(self.Clines)+1, np.max(self.Ccols)+1
_n, _m = cov.shape
self.update_lines(n+np.arange(_n), m+np.arange(_m), np.array(cov).flatten(), jac=False)
if w is None:
self.W = sparse.block_diag((self.W, np.linalg.inv(cov)))
else:
self.W = sparse.block_diag((self.W, sparse.coo_matrix(w)))
def update_J(self, params, entity):
n, m = np.max(self.lines)+1, np.max(self.cols)+1
_n, _m = entity.shape
if type(params) == str:
# ret[:, self.params[params].indexof()] = entity
self.update_lines(n+np.arange(_n), self.params[params].indexof(), np.array(entity).flatten())
elif type(params) == list:
#ret[:, np.hstack([self.params[param].indexof() for param in params])] = entity
self.update_lines(n+np.arange(_n), np.hstack([self.params[param].indexof() for param in params]), np.array(entity).flatten())
else:
raise TypeError('Params must be a str of list type')
print '%d lines were added to J ' % (np.max(self.lines)-n+1)
def update_model(self, params, entity, cov, w=None):
if type(params) is str and all(self.params[params].indexof() != -1):
self.update_J(params, entity)
self.update_cov(cov, w)
elif type(params) is list:
_params = []
good_idx = []
for ii, param in enumerate(params):
if all(self.params[param].indexof() != -1):
_params += [param]
good_idx += [ii]
else:
print '%s is fixed' % param
good_idx = np.array(good_idx)
if len(_params) != 0:
self.update_J(_params, entity)
if len(good_idx) != 0:
self.update_cov(cov, w)
else:
print 'All parameters are fixed in this group'
elif type(params) is str and all(self.params[params].indexof() == -1):
print '%s is fixed' % params
else:
raise TypeError('Parameters entry type not understood, use *str* or *list* of *str*')
def add_priors(self, Priors, el=1e-8):
kl = self.color_law_degree
d, p, cosmo, base = self.data, self.params, self.model_cosmo, self.spline_base
lB = 0.
lV = 1.
lbw = np.linspace(lb, lv, 300)[:150]
lvw = np.linspace(lb, lv, 300)[150:]
### Color priors
print 'Adding color priors'
self.update_model('color_law', np.array([lB**(kl-k) for k in range(kl+1)]).reshape((1, kl+1)), el)
self.update_model('color_law', np.array([lV**(kl-k) for k in range(kl+1)]).reshape((1, kl+1)), el)
#self.update_model('c', np.ones(self.n_sn).reshape((1, self.n_sn)), np.array([[1e-2/self.n_sn]]))
### Spectrum priors
print 'Adding spectrum priors'
self.update_model('theta_salt',
(base.eval(lbw).toarray().sum(axis=0) + base.eval(lvw).toarray().sum(axis=0)).reshape((1, base.n_knots+1)),
el)
self.update_model('theta_salt',
(base.eval(lbw).toarray().sum(axis=0) - base.eval(lvw).toarray().sum(axis=0)).reshape((1, base.n_knots+1)),
el)
### mb priors
print 'Adding intrinsic dispersion'
mds = np.array([(color_disp_func(d['l_eff'][d['#SN']==ii])**2).mean() for ii in np.unique(d['#SN'])])
#mds = 0
self.update_model('mB', np.identity(self.n_sn), (self.intrinsic_dispersion**2)*np.identity(self.n_sn), w=1./(self.intrinsic_dispersion**2)*np.identity(self.n_sn))
### Experiment priors
print 'Adding experiment priors'
for pp in Priors:
k = np.max(self.lines) + 1
wj = sparse.coo_matrix(pp.jac(self.model_cosmo, self.cosmo_free_idx))
self.lines = np.hstack((self.lines, wj.row+k))
self.cols = np.hstack((self.cols, wj.col+p[self.free_cosmo_params[0]].indexof(0)))
self.J_dat = np.hstack((self.J_dat, wj.data))
#self.update_model(self.free_cosmo_params, pp.jac(self.model_cosmo, self.cosmo_free_idx), pp.C)
self.update_cov(pp.C, w=pp.W)
def calib_impact(self):
if self.impacted:
self.reset()
else:
self.J_reco = self.lines, self.cols, self.J_dat
self.C_reco = self.Clines, self.Ccols, self.C_dat
self.W_reco = self.W
### delta-zp priors
print 'Adding calibration priors'
C = self.cal_C
J_calib = np.identity(self.cal_C.shape[0])
self.update_model(zp_list+dl_list, J_calib, C)
self.impacted = True
self.generate_J()
self.generate_C()
return C
def generate_J(self):
idx_J = self.J_dat != 0
self.J = sparse.coo_matrix((self.J_dat[idx_J], (self.lines[idx_J], self.cols[idx_J])))
return self.J
def generate_C(self):
idx_C = self.C_dat != 0
self.C = sparse.coo_matrix((self.C_dat[idx_C], (self.Clines[idx_C], self.Ccols[idx_C])))
return self.C
def reset(self):
if self.impacted:
self.lines, self.cols, self.J_dat = self.J_reco
self.Clines, self.Ccols, self.C_dat = self.C_reco
self.W = self.W_reco
else:
return 0
def inv_mat(M):
fac = cholesky(M)
uni = sparse.coo_matrix(((np.arange(M.shape[0]), np.arange(M.shape[0])), np.ones(M.shape[0])))
return fac(uni)
def extract_blocks(A, s):
N = A.shape[0]
assert A.shape[1] == N
n = len(s)
i = np.arange(N)
J = coo_matrix((np.ones(n),
(i[np.in1d(i,s)], np.arange(n))), shape=(N,N))
K = coo_matrix((np.ones(N-n),
(i[~np.in1d(i,s)], np.arange(N-n))), shape=(N,N))
l = [J.T*A*J, J.T*A*K, K.T*A*J, K.T*A*K]
m = N-n
shapes = [(n,n), (n,m), (m,n), (m,m)]
r = []
for p,U in enumerate(l):
U = U.tocoo()
r.append(coo_matrix((U.data, (U.row, U.col)), shape=shapes[p]))
return r
def block_cov_matrix(W, s):
"""
extract block inverse from W.
the indices of the block elements are specified in the array s
"""
A, B, C, D = extract_blocks(W, s)
f = cholesky(D)
w = A - B * f(C)
return np.linalg.inv(w.todense())
def deriv(f, x):
h = 1e-1
return (f(x+h) - f(x))/h
#data_ok['snr'] = data_ok['snr'] * 2
#ncosmo = len(mod.pars())
planck = priors.PLANCK
boss = priors.BOSS
sdssr = priors.SDSSR
MODEL = Model(data_ok, params, mod, base_spectre)
MODEL.construct_jacobian()
MODEL.add_priors([planck], el=1e-8)
FoM = None
if fixed_pars != dl_list + zp_list:
C_calib = MODEL.calib_impact()
#MODEL.calib_impact(zp_uncertainties[i], 1e-3)
J, C = MODEL.generate_J(), MODEL.generate_C()
W = MODEL.W
Fisher = J.T*W*J
print 'Starting block decomposition of Fisher matrix --- '+time.ctime()
if model_type == "wwa":
covw0wa = block_cov_matrix(Fisher, [params['w'].indexof(0), params['wa'].indexof(0)])
FoM = 1. / np.sqrt(np.linalg.det(covw0wa))
print 'Analysis done with a w0wa cosmology model, we find a FoM of %.1f' % FoM
sigma_w = np.sqrt(block_cov_matrix(Fisher, [params['w'].indexof(0)]))
print 'OK! --- '+time.ctime()
print 'We find an uncertainty on w of %.2f %%' % (sigma_w*100)
| [
"fhazenbe@lpnhe.in2p3.fr"
] | fhazenbe@lpnhe.in2p3.fr |
f83ec57974835325041731f39e3af1492a47fad4 | e1fb7d1ffe317bbfe0f7814b50c1087fe83382a9 | /sales/autocomplete_light_registry.py | 4ad410fdaf4236690c4dc08fb5a9a8ad058b740b | [] | no_license | drakejanier/management_system | 27c8e12bd9599687281a4c7c7acb4b7fd43963a8 | 7771217be54975d555a4df478a44d646c14fd81a | refs/heads/master | 2022-12-09T20:07:25.664557 | 2019-07-05T15:18:01 | 2019-07-05T15:18:01 | 193,214,121 | 0 | 0 | null | 2022-12-08T01:04:39 | 2019-06-22T09:08:15 | JavaScript | UTF-8 | Python | false | false | 554 | py | import autocomplete_light
from inventory.models import Products
autocomplete_light.register(Products,
# Just like in ModelAdmin.search_fields
search_fields=['^Name'],
attrs={
'placeholder': 'Search Item',
# This will set the yourlabs.Autocomplete.minimumCharacters
# options, the naming conversion is handled by jQuery
'data-autocomplete-minimum-characters': 1,
},
widget_attrs={
'data-widget-maximum-values': 4,
# Enable modern-style widget !
'class': 'modern-style',
},
) | [
"jeromejanier2@gmail.com"
] | jeromejanier2@gmail.com |
1fe72cd3503e9abbff83c580e23d9fe15bd11c8f | a91b80f0fa166a9630c41054b8f0c8a430f1905e | /time_evol.py | 3ddaf7b6820b1fae1e000ccd85d90c50a8d08f93 | [] | no_license | SES591/Manny_Economics | fe9630c7392aea444e005ca8fd00e3fd0641e8c5 | 6e6fd72a4e78bcf50333bd8a549373b60a173cd5 | refs/heads/master | 2021-01-10T15:32:38.294329 | 2016-05-04T18:37:00 | 2016-05-04T18:37:00 | 51,455,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,606 | py | #!/usr/bin/python
#bioinfo.py
__author__ = '''Hyunju Kim'''
import os
import sys
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from collections import OrderedDict
import input_net as inet
import updating_rule as ur
################# BEGIN: decimal_to_binary(nodes_list, decState, Nbr_States=2) ########################
def decimal_to_binary(nodes_list, decState, Nbr_States=2): # more left in the nodes list means higher order of 2 in binary
biStates = {}
x = len(nodes_list) -1
for u in nodes_list:
biStates[u] = decState / np.power(Nbr_States, x)
decState = decState % np.power(Nbr_States, x)
x = x - 1
return biStates
################# END: decimal_to_binary(nodes_list, decState, Nbr_States=2) ########################
################# BEGIN: binary_to_decimal(nodes_list, biStates, Nbr_States=2) ########################
def binary_to_decimal(nodes_list, biStates, Nbr_States=2): # more left in the nodes list means higher order of 2 in binary
decState = 0
x = len(nodes_list) -1
for u in nodes_list:
decState = decState + biStates[u] * np.power(Nbr_States, x)
x = x - 1
return decState
################# END: binary_to_decimal(nodes_list, biStates, Nbr_States=2) ########################
'''
################# BEGIN: biological_sequence(net, nodes_list, Nbr_States=2) ########################
def biological_sequence(net, nodes_list, bio_initStates, fileName, Nbr_States=2):
bioSeq = []
currBiStates = bio_initStates
finished = False
while(not finished):
oneDiff = 0
prevBiStates = currBiStates.copy()
bioSeq.append(prevBiStates)
currBiStates = ur.sigmoid_updating(net, prevBiStates)
for u in nodes_list:
if abs(prevBiStates[u] - currBiStates[u]) > 0:
oneDiff = 1
break
finished = (oneDiff < 1)
OUTPUT_FILE = open(fileName, 'w')
OUTPUT_FILE.write('time step')
for u in nodes_list:
OUTPUT_FILE.write('\t%s'%(u))
OUTPUT_FILE.write('\n')
for i in range(len(bioSeq)):
OUTPUT_FILE.write('%d'%i)
for u in nodes_list:
OUTPUT_FILE.write('\t%d'%(bioSeq[i][u]))
OUTPUT_FILE.write('\n')
#return bioSeq
################# END: biological_sequence(net, nodes_list, Nbr_States=2) ########################
'''
################# BEGIN: time_series_en(net, nodes_list, Nbr_States=2, MAX_TimeStep=20, Transition_Step=0) ########################
def time_series_all(net, nodes_list, Nbr_Initial_States, Nbr_States, MAX_TimeStep=20):
'''
Description:
-- compute TE for every pair of nodes using distribution from all possible initial conditions or an arbitrary set of initial conditions
Arguments:
-- 1. net
-- 2. nodes_list
-- 3. Initial_States_List
-- 4. Nbr_States
-- 5. MAX_TimeStep
Return:
-- 1. timeSeriesData
'''
#Nbr_Nodes = len(net.nodes())
#Nbr_All_Initial_States = np.power(Nbr_States, Nbr_Nodes)
timeSeriesData = {}
for n in net.nodes():
timeSeriesData[n] = {}
for initState in range(Nbr_Initial_States):
timeSeriesData[n][initState] = []
for initDecState in range(Nbr_Initial_States):
currBiState = decimal_to_binary(nodes_list, initDecState, Nbr_States)
for step in range(MAX_TimeStep):
prevBiState = currBiState.copy()
for n in nodes_list:
timeSeriesData[n][initDecState].append(prevBiState[n])
currBiState = ur.sigmoid_updating(net, prevBiState)
return timeSeriesData
################# END: time_series_en(net, nodes_list, Nbr_States=2, MAX_TimeStep=20) ########################
################# BEGIN: net_state_transition_map(net, nodes_list, Nbr_States=2) ########################
def net_state_transition(net, nodes_list, Nbr_States=2):
'''
Arguments:
1. net
2. Nbr_States
Return:
1. decStateTransMap
'''
Nbr_Nodes = len(net.nodes())
Nbr_All_Initial_States = np.power(Nbr_States, Nbr_Nodes)
decStateTransMap = nx.DiGraph()
for prevDecState in range(Nbr_All_Initial_States):
prevBiState = decimal_to_binary(nodes_list, prevDecState, Nbr_States)
currBiState = ur.sigmoid_updating(net, prevBiState)
currDecState = binary_to_decimal(nodes_list, currBiState, Nbr_States)
decStateTransMap.add_edge(prevDecState, currDecState)
return decStateTransMap
################# END: net_state_transition_map(net, nodes_list, Nbr_States=2) ########################
################# BEGIN: find_attractor_old(decStateTransMap) ########################
def find_attractor_old(decStateTransMap):
'''
Arguments:
1. decStateTransMap
Return:
1. attractor
'''
attractor_list = nx.simple_cycles(decStateTransMap) #in case of deterministic system, any cycle without considering edge direction will be directed cycle.
attractors = {}
attractors['fixed'] = []
attractors['cycle'] = []
for u in attractor_list:
if len(u) == 1:
attractors['fixed'].append(u)
else:
attractors['cycle'].append(u)
return attractors
################# END: find_attractor_old(decStateTransMap) ########################
################# BEGIN: attractor_analysis(decStateTransMap) ########################
def find_attractor(decStateTransMap):
'''
Arguments:
-- 1. decStateTransMap
Return:
-- attractor
'''
attractor_list = nx.simple_cycles(decStateTransMap) #in case of deterministic system, any cycle without considering edge direction will be directed cycle.
attractors = {}
#attractors['fixed'] = []
#attractors['cycle'] = []
undirectedMap = nx.DiGraph.to_undirected(decStateTransMap)
for u in attractor_list:
attractors[u[0]] = {}
if len(u) == 1:
attractors[u[0]]['type'] = 'fixed'
else:
attractors[u[0]]['type'] = 'cycle'
for v in attractors.iterkeys():
basin = nx.node_connected_component(undirectedMap, v)
attractors[v]['basin'] = basin
attractors[v]['basin-size'] = len(basin)
sorted_attractors = OrderedDict(sorted(attractors.items(), key=lambda kv: kv[1]['basin-size'], reverse=True))
return sorted_attractors
################# END: attractor_analysis(decStateTransMap) ########################
################# BEGIN: time_series_pa(net, nodes_list, Initial_States_List, Nbr_States=2, MAX_TimeStep=20) ########################
def time_series_pa(net, nodes_list, Initial_States_List, Nbr_States, MAX_TimeStep=20):
'''
Description:
-- compute TE for every pair of nodes using distribution from all initial conditions that converge to the primary or biological attractor
Arguments:
-- 1. net
-- 2. nodes_list
-- 3. Initial_States_List
-- 4. Nbr_States
-- 5. MAX_TimeStep
Return:
-- 1. timeSeriesData (only for primary attractor)
'''
timeSeriesData = {}
for n in net.nodes():
timeSeriesData[n] = {}
for initState in range(len(Initial_States_List)):
timeSeriesData[n][initState] = []
for initState in range(len(Initial_States_List)):
initDecState = Initial_States_List[initState]
currBiState = decimal_to_binary(nodes_list, initDecState, Nbr_States)
for step in range(MAX_TimeStep):
prevBiState = currBiState.copy()
for n in nodes_list:
timeSeriesData[n][initState].append(prevBiState[n])
currBiState = ur.sigmoid_updating(net, prevBiState)
return timeSeriesData
################# END: time_series_pa(net, nodes_list, Nbr_States=2, MAX_TimeStep=20) ########################
################# BEGIN: time_series_one(net, nodes_list, Initial_State, Nbr_States=2, MAX_TimeStep=20) ########################
def time_series_one(net, nodes_list, Initial_State, Nbr_States, MAX_TimeStep=20):
'''
Description:
-- compute TE for every pair of nodes using distribution from all initial conditions that converge to the primary or biological attractor
Arguments:
-- 1. net
-- 2. nodes_list
-- 3. Initial_States_List
-- 4. Nbr_States
-- 5. MAX_TimeStep
Return:
-- 1. timeSeriesData (only for primary attractor)
'''
timeSeriesData = {}
for n in net.nodes():
timeSeriesData[n] = {}
timeSeriesData[n][0] = []
currBiState = Initial_State
for step in range(MAX_TimeStep):
prevBiState = currBiState.copy()
for n in nodes_list:
timeSeriesData[n][0].append(prevBiState[n])
currBiState = ur.sigmoid_updating(net, prevBiState)
return timeSeriesData
################# END: time_series_one(net, nodes_list, Initial_State, Nbr_States=2, MAX_TimeStep=20) ########################
def main():
print "time_evol module is the main code."
## to import a network of 3-node example
EDGE_FILE = 'C:\Boolean_Delay_in_Economics\Manny\EDGE_FILE.dat'
NODE_FILE = 'C:\Boolean_Delay_in_Economics\Manny\NODE_FILE.dat'
net = inet.read_network_from_file(EDGE_FILE, NODE_FILE)
nodes_list = inet.build_nodes_list(NODE_FILE)
'''
## to obtain time series data for all possible initial conditions for 3-node example network
timeSeriesData = ensemble_time_series(net, nodes_list, 2, 10)#, Nbr_States=2, MAX_TimeStep=20)
initState = 1
biStates = decimal_to_binary(nodes_list, initState)
print 'initial state', biStates
## to print time series data for each node: a, b, c starting particualr decimal inital condition 1
print 'a', timeSeriesData['a'][1]
print 'b', timeSeriesData['b'][1]
print 'c', timeSeriesData['c'][1]
'''
## to obtain and visulaize transition map in the network state space
decStateTransMap = net_state_transition(net, nodes_list)
nx.write_graphml(decStateTransMap,'C:\Boolean_Delay_in_Economics\Manny\Results\BDE.graphml')
'''
nx.draw(decStateTransMap)
plt.show()
## to find fixed point attractors and limited cycle attractors with given transition map.
attractors = find_attractor(decStateTransMap)
print attractors
'''
'''
## to obtain biological sequence for the Fission Yeast Cell-Cycle Net starting from biological inital state
EDGE_FILE = 'C:\Boolean_Delay_in_Economics\Manny\EDGE_FILE.dat'
NODE_FILE = 'C:\Boolean_Delay_in_Economics\Manny\NODE_FILE.dat'
#BIO_INIT_FILE = '../data/fission-net/fission-net-bioSeq-initial.txt'
net = inet.read_network_from_file(EDGE_FILE, NODE_FILE)
nodes_list = inet.build_nodes_list(NODE_FILE)
bio_initStates = inet.read_init_from_file(BIO_INIT_FILE)
outputFile = 'C:\Boolean_Delay_in_Economics\Manny\Results\BDE-bioSeq.txt'
bioSeq = biological_sequence(net, nodes_list, bio_initStates, outputFile)
'''
if __name__=='__main__':
main()
| [
"mbanda024@gmail.com"
] | mbanda024@gmail.com |
e8dcfafb5a0535d8eeb92c52ede33dd3afb37626 | a5fc0412036cc0b248736490c515037d42260211 | /analysis/analyze.py | d2a4cbb86037047257507452aa890ed78789d5a0 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | Kortemme-Lab/ddg | 7359fc1d81973263dff302b44019954bd70f4793 | 37d405af2dac41477c689e6e63d5f5c2b9f5a665 | refs/heads/master | 2016-09-15T16:29:02.249626 | 2016-01-31T01:34:03 | 2016-01-31T01:34:03 | 29,941,362 | 14 | 8 | null | 2016-01-31T01:34:03 | 2015-01-27T23:54:34 | Python | UTF-8 | Python | false | false | 4,002 | py | #!/usr/bin/env python2
# The MIT License (MIT)
#
# Copyright (c) 2015 Shane O'Connor
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""\
Outputs statistics and a scatterplot for the given XY data set.
Usage:
analyze.py [options] <inputfile>...
Options:
-o FILE --output FILE
File name for the generated scatterplot [default: scatterplot.png]
The input file should be a comma-separated values file where the first three columns are:
ID,Experimental,Predicted
Authors:
Shane O'Connor
"""
import sys
import os
from libraries import docopt
from stats import get_xy_dataset_statistics, plot, read_file, RInterface, format_stats_for_printing
correlation_coefficient_scatterplotplot = RInterface.correlation_coefficient_gplot
def read_json(filename):
try:
try:
import json
except:
import simplejson as json
return json.loads(read_file(filename))
except:
return None
def parse_csv(filename):
separator = ','
if filename.endswith('.tsv'):
separator = '\t'
try:
table = []
id = 1
contents = read_file(filename)
lines = [l.strip().split(separator) for l in contents.split('\n') if l.strip() and not(l.strip().startswith('#'))]
for linetokens in lines:
if len(linetokens) >= 3:
table.append(dict(Experimental = float(linetokens[0]), Predicted = float(linetokens[1]), ID = str(linetokens[2])))
elif len(linetokens) == 2:
table.append(dict(Experimental = float(linetokens[0]), Predicted = float(linetokens[1]), ID = id))
id += 1
else:
raise Exception('At least two columns (experimental DDG, predicted DDG) are expected.')
return table
except Exception, e:
raise Exception('An exception occurred parsing the CSV/TSV file: %s' % str(e))
if __name__ == '__main__':
try:
arguments = docopt.docopt(__doc__.format(**locals()))
except Exception, e:
print('Failed while parsing arguments: %s.' % str(e))
sys.exit(1)
# Read file input file
input_filename = arguments['<inputfile>'][0]
if not os.path.exists(input_filename):
print('Error: the input file %s does not exist.' % input_filename)
sys.exit(2)
analysis_table = read_json(input_filename)
if not analysis_table:
analysis_table = parse_csv(input_filename)
# Set up the output filename
output_filename = arguments['--output']
output_filename_ext = os.path.splitext(output_filename)[1].lower()
if output_filename_ext not in ['.png', '.pdf']: # todo: check eps output ('.eps')
output_filename += '.png'
print('\n' + '*'*10 + ' Statistics ' +'*'*10)
print(format_stats_for_printing(get_xy_dataset_statistics(analysis_table)))
print('\nSaving scatterplot to %s.\n' % output_filename)
plot(analysis_table, output_filename, correlation_coefficient_scatterplotplot)
| [
"spiderbaby@gmail.com"
] | spiderbaby@gmail.com |
e6f78f9667f29503faff37b2107387030d261c1a | f867599bfff077f3bf445edebc3ce603f2a0a304 | /users/forms.py | fc0a2c567a10773cc03c0df72e8c8a47477efb58 | [] | no_license | AntonMinski/Tennis_partner_Django_Vue | 1f4e2e68a235d1998ae116aedaeebbc2d8be3591 | 5b535ff3402c2e472118d996b2c9c445f52dd430 | refs/heads/main | 2023-07-20T03:01:26.485635 | 2021-09-02T14:24:10 | 2021-09-02T14:24:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | from django_registration.forms import RegistrationForm
from .models import BaseUser
class CustomUserForm(RegistrationForm):
class Meta(RegistrationForm.Meta):
model = BaseUser | [
"anton_minski5@ukr.net"
] | anton_minski5@ukr.net |
9cfc6a3f4581652820fad3125572fca2ba04efd1 | 9b5e9d74bb53b57a953a0f3ecf31ea98f889307e | /0Hacking Tools/Upload_de_arquivos.py | 36ba4a57903b2f9702fa5d2d7b3d26418f7147bd | [] | no_license | luied/FIRST-ONE---PYTHON | e6718af72055486a66988f71486a4856b9c606cc | 677be23d120e7121f0522e16eb239fc5bc2624f0 | refs/heads/master | 2020-03-22T19:24:54.160959 | 2018-09-27T23:43:49 | 2018-09-27T23:43:49 | 140,526,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 847 | py | import requests
import re
import time
arquivo = "C://Users//Lui19//Desktop//shell_simples.php"
username = ["natas13","jmLTY0qiPZBbaKc9341cqPQZBJv7MQbY"]
url = "http://natas13.natas.labs.overthewire.org/"
session = requests.Session()
response = session.get(url, auth=(username[0],username[1]))
print(response.text)
time.sleep(10)
response = session.post(url, auth=(username[0],username[1]), files={"uploadedfile": open(arquivo,"rb")},data={"filename":arquivo})
content = response.text
print(content)
upload = input("Aonde foi colocado o arquivo?\nEXEMPLO: upload/file.php\n>>> ")
while True:
comando = input("www-data: ")
resposta =session.get(url+upload+"?cmd="+comando, auth=(username[0],username[1]))
try:
print(resposta.content)
except:
print("Nao foi possivel decodificar")
pass
| [
"noreply@github.com"
] | luied.noreply@github.com |
1ec0367f10400adcbbae093589418ba83cf39a50 | 40aef10e5ef29db11746a2e25911c1c72cffdea6 | /scripts/quantize_graph.py | 03c309b62b5cd5469902f47cb6a420a23477e80e | [] | no_license | DevelopIdeas/tflite_tools | 18c4fa476d7159c7064d9eed8cd99d315d3a8e4d | a7e09852b2fc428678588f3faeaf6701643b842e | refs/heads/master | 2021-10-11T10:42:32.755126 | 2019-01-24T19:52:04 | 2019-01-24T19:52:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57,206 | py | #!/usr/bin/env python
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Transforms a float-trained graph into an equivalent quantized version.
An example of command-line usage is:
bazel build tensorflow/tools/quantization:quantize_graph \
&& bazel-bin/tensorflow/tools/quantization/quantize_graph \
--input=tensorflow_inception_graph.pb
--output_node_names="softmax2" --print_nodes --output=/tmp/quantized_graph.pb \
--mode=eightbit --logtostderr
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import collections
import re
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import app
from tensorflow.python.platform import flags as flags_lib
from tensorflow.python.platform import gfile
flags = flags_lib
FLAGS = flags.FLAGS
flags.DEFINE_boolean("print_nodes", False, """Lists all nodes in the model.""")
flags.DEFINE_string("input", "", """TensorFlow 'GraphDef' file to load.""")
flags.DEFINE_string("output_node_names", "",
"""Output node names, comma separated.""")
flags.DEFINE_string("output", "", """File to save the output graph to.""")
flags.DEFINE_integer("bitdepth", 8,
"""How many bits to quantize the graph to.""")
flags.DEFINE_string("mode", "round",
"""What transformation to apply (round, quantize,"""
""" eightbit, weights, or weights_rounded).""")
flags.DEFINE_string("test_input_dims", "1,224,224,3",
"""The size of the input tensor to use when testing a"""
""" graph loaded from a file.""")
flags.DEFINE_boolean("strip_redundant_quantization", True,
"""Removes redundant dequantize/quantize pairs.""")
flags.DEFINE_boolean("quantized_input", False,
"If true, assume Placeholders are quantized with values "
"covering [--quantized_input_min,--quantized_input_max]. "
"Only supported when --mode=eightbit")
flags.DEFINE_float("quantized_input_min", 0,
"The minimum of the actual input range when "
"--quantized_input")
flags.DEFINE_float("quantized_input_max", 1,
"The maximum of the actual input range when "
"--quantized_input")
flags.DEFINE_float(
"quantized_fallback_min", None,
"The fallback 'min' value to use for layers which lack min-max "
"information. Note: this should be considered a coarse tool just good "
"enough for experimentation purposes, since graphs quantized in this way "
"would be very inaccurate.")
flags.DEFINE_float(
"quantized_fallback_max", None,
"The fallback 'max' value to use for layers which lack min-max "
"information. Note: this should be considered a coarse tool just good "
"enough for experimentation purposes, since graphs quantized in this way "
"would be very inaccurate.")
def print_input_nodes(current_node, nodes_map, indent, already_visited):
print(" " * indent + current_node.op + ":" + current_node.name)
already_visited[current_node.name] = True
for input_node_name in current_node.input:
if input_node_name in already_visited:
continue
input_node = nodes_map[input_node_name]
print_input_nodes(input_node, nodes_map, indent + 1, already_visited)
def create_node(op, name, inputs):
new_node = node_def_pb2.NodeDef()
new_node.op = op
new_node.name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node
def create_constant_node(name, value, dtype, shape=None):
node = create_node("Const", name, [])
set_attr_dtype(node, "dtype", dtype)
set_attr_tensor(node, "value", value, dtype, shape)
return node
def copy_attr(node, key, attr_value):
try:
node.attr[key].CopyFrom(attr_value)
except KeyError:
pass
def set_attr_dtype(node, key, value):
try:
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(type=value.as_datatype_enum))
except KeyError:
pass
def set_attr_shape(node, key, value):
try:
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(shape=tensor_shape.as_shape(value).as_proto()))
except KeyError:
pass
def set_attr_tensor(node, key, value, dtype, shape=None):
try:
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
value, dtype=dtype, shape=shape)))
except KeyError:
pass
def set_attr_string(node, key, value):
try:
node.attr[key].CopyFrom(attr_value_pb2.AttrValue(s=value))
except KeyError:
pass
def set_attr_int_list(node, key, value):
list_value = attr_value_pb2.AttrValue.ListValue(i=value)
try:
node.attr[key].CopyFrom(attr_value_pb2.AttrValue(list=list_value))
except KeyError:
pass
def set_attr_bool(node, key, value):
try:
node.attr[key].CopyFrom(attr_value_pb2.AttrValue(b=value))
except KeyError:
pass
def set_attr_int(node, key, value):
try:
node.attr[key].CopyFrom(attr_value_pb2.AttrValue(i=value))
except KeyError:
pass
def set_attr_float(node, key, value):
try:
node.attr[key].CopyFrom(attr_value_pb2.AttrValue(f=value))
except KeyError:
pass
def node_name_from_input(node_name):
"""Strips off ports and other decorations to get the underlying node name."""
if node_name.startswith("^"):
node_name = node_name[1:]
m = re.search(r"(.*):\d+$", node_name)
if m:
node_name = m.group(1)
return node_name
def ensure_tensor_name_has_port(node_name):
"""Makes sure that a tensor name has :0 if no explicit port exists."""
m = re.search(r"(.*):\d+$", node_name)
if m:
name_with_port = node_name
else:
name_with_port = node_name + ":0"
return name_with_port
def unique_node_name_from_input(node_name):
"""Replaces invalid characters in input names to get a unique node name."""
return node_name.replace(":", "__port__").replace("^", "__hat__")
def quantize_array(arr, num_buckets):
"""Quantizes a numpy array.
This function maps each scalar in arr to the center of one of num_buckets
buckets. For instance,
quantize_array([0, 0.3, 0.6, 1], 2) => [0.25, 0.25, 0.75, 0.75]
Args:
arr: The numpy array to quantize.
num_buckets: The number of buckets to map "var" to.
Returns:
The quantized numpy array.
Raises:
ValueError: when num_buckets < 1.
"""
if num_buckets < 1:
raise ValueError("num_buckets must be >= 1")
arr_max = arr.max()
arr_min = arr.min()
if arr_max == arr_min:
return arr
bucket_width = (arr_max - arr_min) / num_buckets
# Map scalars to bucket indices. Take special care of max(arr).
bucket_indices = np.floor((arr - arr_min) / bucket_width)
bucket_indices[bucket_indices == num_buckets] = num_buckets - 1
# Map each scalar to the center of a bucket.
arr = arr_min + bucket_width * (bucket_indices + 0.5)
return arr
def quantize_weight_rounded(input_node):
"""Returns a replacement node for input_node containing bucketed floats."""
input_tensor = input_node.attr["value"].tensor
tensor_value = tensor_util.MakeNdarray(input_tensor)
shape = input_tensor.tensor_shape
# Currently, the parameter FLAGS.bitdepth is used to compute the
# number of buckets as 1 << FLAGS.bitdepth, meaning the number of
# buckets can only be a power of 2.
# This could be fixed by introducing a new parameter, num_buckets,
# which would allow for more flexibility in chosing the right model
# size/accuracy tradeoff. But I didn't want to add more parameters
# to this script than absolutely necessary.
num_buckets = 1 << FLAGS.bitdepth
tensor_value_rounded = quantize_array(tensor_value, num_buckets)
tensor_shape_list = tensor_util.TensorShapeProtoToList(shape)
return [
create_constant_node(
input_node.name,
tensor_value_rounded,
dtypes.float32,
shape=tensor_shape_list)
]
def quantize_weight_eightbit(input_node, quantization_mode):
"""Returns replacement nodes for input_node using the Dequantize op."""
base_name = input_node.name + "_"
quint8_const_name = base_name + "quint8_const"
min_name = base_name + "min"
max_name = base_name + "max"
float_tensor = tensor_util.MakeNdarray(input_node.attr["value"].tensor)
min_value = np.min(float_tensor.flatten())
max_value = np.max(float_tensor.flatten())
# Make sure that the range includes zero.
if min_value > 0.0:
min_value = 0.0
# min_value == max_value is a tricky case. It can occur for general
# tensors, and of course for scalars. The quantized ops cannot deal
# with this case, so we set max_value to something else.
# It's a tricky question what is the numerically best solution to
# deal with this degeneracy.
# TODO(petewarden): Better use a tolerance than a hard comparison?
if min_value == max_value:
if abs(min_value) < 0.000001:
max_value = min_value + 1.0
elif min_value > 0:
max_value = 2 * min_value
else:
max_value = min_value / 2.0
sess = session.Session()
with sess.as_default():
quantize_op = array_ops.quantize_v2(
float_tensor,
min_value,
max_value,
dtypes.quint8,
mode=quantization_mode)
quint8_tensor = quantize_op[0].eval()
shape = tensor_util.TensorShapeProtoToList(input_node.attr["value"]
.tensor.tensor_shape)
quint8_const_node = create_constant_node(
quint8_const_name, quint8_tensor, dtypes.quint8, shape=shape)
min_node = create_constant_node(min_name, min_value, dtypes.float32)
max_node = create_constant_node(max_name, max_value, dtypes.float32)
dequantize_node = create_node("Dequantize", input_node.name,
[quint8_const_name, min_name, max_name])
set_attr_dtype(dequantize_node, "T", dtypes.quint8)
set_attr_string(dequantize_node, "mode", quantization_mode)
return [quint8_const_node, min_node, max_node, dequantize_node]
EightbitizeRecursionState = collections.namedtuple(
"EightbitizeRecursionState",
["already_visited", "output_node_stack", "merged_with_fake_quant"])
class GraphRewriter(object):
"""Takes a float graph, and rewrites it in quantized form."""
def __init__(self,
input_graph,
mode,
quantized_input_range,
fallback_quantization_range=None):
"""Sets up the class to rewrite a float graph.
Args:
input_graph: A float graph to transform.
mode: A string controlling how quantization is performed -
round, quantize, eightbit, or weights.
quantized_input_range: if set, assume the input is
quantized and represents the range
[quantized_input_range[0], quantized_input_range[1]]
fallback_quantization_range: if set, then for nodes where the quantization
range can't be inferred from the graph, use the range
[fallback_quantization_range[0], fallback_quantization_range[1]) instead
of using a RequantizationRange node in the graph.
Raises:
ValueError: Two nodes with the same name were found in the graph.
"""
self.input_graph = input_graph
self.nodes_map = self.create_nodes_map(input_graph)
self.output_graph = None
self.mode = mode
self.final_node_renames = {}
if quantized_input_range:
self.input_range = (quantized_input_range[0], quantized_input_range[1])
if self.input_range[0] >= self.input_range[1]:
raise ValueError("Invalid quantized_input_range: [%s,%s]" %
self.input_range)
if self.mode != "eightbit":
raise ValueError(
"quantized_input_range can only be specified in eightbit mode")
else:
self.input_range = None
if fallback_quantization_range:
self.fallback_quantization_range = [
fallback_quantization_range[0], fallback_quantization_range[1]
]
if (self.fallback_quantization_range[0] >=
self.fallback_quantization_range[1]):
raise ValueError("Invalid fallback_quantization_range: [%s,%s]" %
self.fallback_quantization_range)
if self.mode != "eightbit":
raise ValueError("fallback_quantization_range can only be "
"specified in eightbit mode")
else:
self.fallback_quantization_range = None
# Data that is valid only during the recursive call to rewrite the graph.
self.state = None
def create_nodes_map(self, graph):
"""Builds a mapping of node names to their defs from the graph."""
nodes_map = {}
for node in graph.node:
if node.name not in nodes_map.keys():
nodes_map[node.name] = node
else:
raise ValueError("Duplicate node names detected.")
return nodes_map
def rewrite(self, output_node_names):
"""Triggers rewriting of the float graph.
Args:
output_node_names: A list of names of the nodes that produce the final
results.
Returns:
A quantized version of the float graph.
"""
self.output_graph = graph_pb2.GraphDef()
output_nodes = [
self.nodes_map[output_node_name]
for output_node_name in output_node_names
]
if self.mode == "round":
self.already_visited = {}
for output_node in output_nodes:
self.round_nodes_recursively(output_node)
elif self.mode == "quantize":
self.already_visited = {}
self.already_quantized = {}
for output_node in output_nodes:
self.quantize_nodes_recursively(output_node)
elif self.mode == "eightbit":
self.set_input_graph(graph_util.remove_training_nodes(self.input_graph))
output_nodes = [
self.nodes_map[output_node_name]
for output_node_name in output_node_names
]
self.state = EightbitizeRecursionState(
already_visited={}, output_node_stack=[], merged_with_fake_quant={})
for output_node in output_nodes:
self.eightbitize_nodes_recursively(output_node)
self.state = None
if self.input_range:
self.add_output_graph_node(
create_constant_node("quantized_input_min_value", self.input_range[
0], dtypes.float32, []))
self.add_output_graph_node(
create_constant_node("quantized_input_max_value", self.input_range[
1], dtypes.float32, []))
if self.fallback_quantization_range:
self.add_output_graph_node(
create_constant_node("fallback_quantization_min_value",
self.fallback_quantization_range[0],
dtypes.float32, []))
self.add_output_graph_node(
create_constant_node("fallback_quantization_max_value",
self.fallback_quantization_range[1],
dtypes.float32, []))
if FLAGS.strip_redundant_quantization:
self.output_graph = self.remove_redundant_quantization(
self.output_graph)
self.remove_dead_nodes(output_node_names)
self.apply_final_node_renames()
elif self.mode == "weights":
self.output_graph = self.quantize_weights(self.input_graph,
b"MIN_COMBINED")
self.remove_dead_nodes(output_node_names)
elif self.mode == "weights_rounded":
self.output_graph = self.quantize_weights(self.input_graph, self.mode)
self.remove_dead_nodes(output_node_names)
else:
print("Bad mode - " + self.mode + ".")
return self.output_graph
def round_nodes_recursively(self, current_node):
"""The entry point for simple rounding quantization."""
if self.already_visited[current_node.name]:
return
self.already_visited[current_node.name] = True
for input_node_name in current_node.input:
input_node_name = node_name_from_input(input_node_name)
input_node = self.nodes_map[input_node_name]
self.round_nodes_recursively(input_node)
nodes_to_quantize = ["Conv2D", "BiasAdd", "MatMul"]
if any(current_node.op in s for s in nodes_to_quantize):
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(current_node)
new_node.name = current_node.name + "_original"
self.add_output_graph_node(new_node)
levels = 1 << FLAGS.bitdepth
constant_name = current_node.name + "_round_depth"
constant_tensor = constant_op.constant(
levels, dtype=dtypes.int32, name=constant_name)
constant_node = constant_tensor.op.node_def
self.add_output_graph_node(constant_node)
quantize_node = node_def_pb2.NodeDef()
quantize_node.op = "RoundToSteps"
quantize_node.name = current_node.name
quantize_node.input.extend([current_node.name + "_original"])
quantize_node.input.extend([constant_node.name])
self.add_output_graph_node(quantize_node)
else:
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
def quantize_nodes_recursively(self, current_node):
"""The entry point for quantizing nodes to eight bit and back."""
if self.already_visited[current_node.name]:
return
self.already_visited[current_node.name] = True
for input_node_name in current_node.input:
input_node_name = node_name_from_input(input_node_name)
input_node = self.nodes_map[input_node_name]
self.quantize_nodes_recursively(input_node)
nodes_to_quantize = ["Conv2D", "BiasAdd", "MatMul"]
if any(current_node.op in s for s in nodes_to_quantize):
for input_name in current_node.input:
input_name = node_name_from_input(input_name)
input_node = self.nodes_map[input_name]
self.quantize_node(input_node)
self.quantize_node(current_node)
else:
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
def quantize_node(self, input_node):
"""Handles quantizing a single node."""
input_name = input_node.name
if input_name in self.already_quantized:
return
self.already_quantized[input_name] = True
original_input_name = input_name + "_original"
reshape_name = input_name + "_reshape"
reshape_dims_name = input_name + "_reshape_dims"
max_name = input_name + "_max"
min_name = input_name + "_min"
dims_name = input_name + "_dims"
quantize_name = input_name + "_quantize"
dequantize_name = input_name
original_input_node = node_def_pb2.NodeDef()
original_input_node.CopyFrom(input_node)
original_input_node.name = original_input_name
self.add_output_graph_node(original_input_node)
reshape_dims_node = create_constant_node(reshape_dims_name, -1,
dtypes.int32, [1])
self.add_output_graph_node(reshape_dims_node)
reshape_node = create_node("Reshape", reshape_name,
[original_input_name, reshape_dims_name])
set_attr_dtype(reshape_node, "T", dtypes.float32)
self.add_output_graph_node(reshape_node)
dims_node = create_constant_node(dims_name, 0, dtypes.int32, [1])
self.add_output_graph_node(dims_node)
max_node = create_node("Max", max_name, [reshape_name, dims_name])
set_attr_dtype(max_node, "T", dtypes.float32)
set_attr_bool(max_node, "keep_dims", False)
self.add_output_graph_node(max_node)
min_node = create_node("Min", min_name, [reshape_name, dims_name])
set_attr_dtype(min_node, "T", dtypes.float32)
set_attr_bool(min_node, "keep_dims", False)
self.add_output_graph_node(min_node)
quantize_node = create_node("Quantize", quantize_name,
[original_input_name, min_name, max_name])
set_attr_dtype(quantize_node, "T", dtypes.quint8)
set_attr_string(quantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(quantize_node)
dequantize_node = create_node("Dequantize", dequantize_name,
[quantize_name, min_name, max_name])
set_attr_dtype(dequantize_node, "T", dtypes.quint8)
set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(dequantize_node)
def should_merge_with_fake_quant_node(self):
"""Should the current node merge with self.state.output_node_stack[-1]?"""
if not self.state.output_node_stack:
return False
top = self.state.output_node_stack[-1]
return top[1] == 0 and top[0].op in ["FakeQuantWithMinMaxVars"]
def should_quantize_const(self, node):
if not self.state.output_node_stack:
return False
top = self.state.output_node_stack[-1]
if not top[2]:
return False
dtype = dtypes.as_dtype(node.attr["dtype"].type)
assert dtype == dtypes.float32, (
"Failed to quantized constant %s of type %s" % (node.name, dtype))
return True
def eightbitize_nodes_recursively(self, current_node):
"""The entry point for transforming a graph into full eight bit."""
if current_node.name in self.state.already_visited:
if (self.should_merge_with_fake_quant_node() or
current_node.name in self.state.merged_with_fake_quant):
raise ValueError("Unsupported graph structure: output of node %s "
"is processed by a FakeQuant* node and should have "
"no other outputs.", current_node.name)
return
self.state.already_visited[current_node.name] = True
for i, input_node_name in enumerate(current_node.input):
quantize_input = False
if current_node.op in ("MatMul", "Conv2D", "BiasAdd", "MaxPool",
"AvgPool", "Relu", "Relu6",
"BatchNormWithGlobalNormalization"):
quantize_input = True
elif current_node.op == "Concat" and i > 0:
quantize_input = (
dtypes.as_dtype(current_node.attr["T"].type) == dtypes.float32)
elif current_node.op == "Reshape" and i == 0:
quantize_input = (
dtypes.as_dtype(current_node.attr["T"].type) == dtypes.float32)
self.state.output_node_stack.append((current_node, i, quantize_input))
input_node_name = node_name_from_input(input_node_name)
input_node = self.nodes_map[input_node_name]
self.eightbitize_nodes_recursively(input_node)
self.state.output_node_stack.pop()
if current_node.op == "MatMul":
self.eightbitize_mat_mul_node(current_node)
elif current_node.op == "Conv2D":
self.eightbitize_conv_node(current_node)
elif current_node.op == "BiasAdd":
self.eightbitize_bias_add_node(current_node)
elif current_node.op == "MaxPool" or current_node.op == "AvgPool":
self.eightbitize_single_input_tensor_node(current_node,
self.add_pool_function)
elif current_node.op == "Relu" or current_node.op == "Relu6":
self.eightbitize_single_input_tensor_node(current_node,
self.add_relu_function)
elif (current_node.op == "Concat" and
dtypes.as_dtype(current_node.attr["T"].type) == dtypes.float32):
self.eightbitize_concat_node(current_node)
elif current_node.op == "BatchNormWithGlobalNormalization":
self.eightbitize_batch_norm_node(current_node)
elif (current_node.op == "Reshape" and
dtypes.as_dtype(current_node.attr["T"].type) == dtypes.float32):
self.eightbitize_reshape_node(current_node)
elif (self.input_range and
current_node.op in ("Placeholder", "PlaceholderV2")):
self.eightbitize_placeholder_node(current_node)
elif current_node.op == "FakeQuantWithMinMaxVars":
# It will have been merged into the underlying node.
pass
elif current_node.op == "Const":
if self.should_quantize_const(current_node):
for n in quantize_weight_eightbit(current_node, b"MIN_FIRST"):
self.add_output_graph_node(n)
else:
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
###################################################################
# Note: if more cases are added here, you may need to update the op
# name lists in the loop over children at the start of the function.
###################################################################
else:
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
if (self.should_merge_with_fake_quant_node() and
current_node.name not in self.state.merged_with_fake_quant):
raise ValueError(
"FakeQuant* node %s failed to merge with node %s of type %s" %
(self.state.output_node_stack[-1][0], current_node.name,
current_node.op))
def add_eightbit_prologue_nodes(self, original_node):
"""Adds input conversion nodes to handle quantizing the underlying node."""
namespace_prefix = original_node.name + "_eightbit"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
input_names = []
min_max_names = []
for original_input_name in original_node.input:
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_input_name,
reshape_dims_name,
reduction_dims_name))
input_names.append(quantize_input_name)
min_max_names.append(min_input_name)
min_max_names.append(max_input_name)
all_input_names = []
all_input_names.extend(input_names)
all_input_names.extend(min_max_names)
return all_input_names
def add_common_quantization_nodes(self, namespace_prefix):
"""Builds constant nodes needed for quantization of inputs."""
reshape_dims_name = namespace_prefix + "_reshape_dims"
reduction_dims_name = namespace_prefix + "_reduction_dims"
reshape_dims_node = create_constant_node(reshape_dims_name, -1,
dtypes.int32, [1])
self.add_output_graph_node(reshape_dims_node)
reduction_dims_node = create_constant_node(reduction_dims_name, 0,
dtypes.int32, [1])
self.add_output_graph_node(reduction_dims_node)
return reshape_dims_name, reduction_dims_name
def eightbitize_input_to_node(self, namespace_prefix, original_input_name,
reshape_dims_name, reduction_dims_name):
"""Takes one float input to an op, and converts it to quantized form."""
unique_input_name = unique_node_name_from_input(original_input_name)
reshape_input_name = namespace_prefix + "_reshape_" + unique_input_name
min_input_name = namespace_prefix + "_min_" + unique_input_name
max_input_name = namespace_prefix + "_max_" + unique_input_name
quantize_input_name = namespace_prefix + "_quantize_" + unique_input_name
reshape_input_node = create_node("Reshape", reshape_input_name,
[original_input_name, reshape_dims_name])
set_attr_dtype(reshape_input_node, "T", dtypes.float32)
self.add_output_graph_node(reshape_input_node)
min_input_node = create_node("Min", min_input_name,
[reshape_input_name, reduction_dims_name])
set_attr_dtype(min_input_node, "T", dtypes.float32)
set_attr_bool(min_input_node, "keep_dims", False)
self.add_output_graph_node(min_input_node)
max_input_node = create_node("Max", max_input_name,
[reshape_input_name, reduction_dims_name])
set_attr_dtype(max_input_node, "T", dtypes.float32)
set_attr_bool(max_input_node, "keep_dims", False)
self.add_output_graph_node(max_input_node)
quantize_input_node = create_node(
"QuantizeV2", quantize_input_name,
[original_input_name, min_input_name, max_input_name])
set_attr_dtype(quantize_input_node, "T", dtypes.quint8)
set_attr_string(quantize_input_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(quantize_input_node)
min_output_name = quantize_input_name + ":1"
max_output_name = quantize_input_name + ":2"
return quantize_input_name, min_output_name, max_output_name
def add_quantize_down_nodes(self, original_node, quantized_output_name):
quantized_outputs = [
quantized_output_name, quantized_output_name + ":1",
quantized_output_name + ":2"
]
min_max_inputs = None
if self.should_merge_with_fake_quant_node():
# Use the inputs to the FakeQuantWithMinMaxVars node as the inputs to
# Requantize.
fake_quant_node = self.state.output_node_stack[-1][0]
min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
assert original_node.name not in self.state.merged_with_fake_quant
self.state.merged_with_fake_quant[original_node.name] = True
elif self.fallback_quantization_range:
min_max_inputs = [
"fallback_quantization_min_value:0",
"fallback_quantization_max_value:0"
]
else:
# Add a RequantizationRange node for finding the min and max values.
requant_range_node = create_node(
"RequantizationRange", original_node.name + "_eightbit_requant_range",
quantized_outputs)
set_attr_dtype(requant_range_node, "Tinput", dtypes.qint32)
self.add_output_graph_node(requant_range_node)
min_max_inputs = [
requant_range_node.name + ":0", requant_range_node.name + ":1"
]
requantize_node = create_node("Requantize",
original_node.name + "_eightbit_requantize",
quantized_outputs + min_max_inputs)
set_attr_dtype(requantize_node, "Tinput", dtypes.qint32)
set_attr_dtype(requantize_node, "out_type", dtypes.quint8)
self.add_output_graph_node(requantize_node)
return requantize_node.name
def add_dequantize_result_node(self,
quantized_output_name,
original_node_name,
min_tensor_index=1):
min_max_inputs = [
"%s:%s" % (quantized_output_name, min_tensor_index),
"%s:%s" % (quantized_output_name, (min_tensor_index + 1))
]
dequantize_name = original_node_name
if self.should_merge_with_fake_quant_node():
fake_quant_node = self.state.output_node_stack[-1][0]
if original_node_name not in self.state.merged_with_fake_quant:
min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
self.state.merged_with_fake_quant[original_node_name] = True
dequantize_name = fake_quant_node.name
dequantize_node = create_node(
"Dequantize", dequantize_name,
[quantized_output_name, min_max_inputs[0], min_max_inputs[1]])
set_attr_dtype(dequantize_node, "T", dtypes.quint8)
set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(dequantize_node)
def eightbitize_mat_mul_node(self, original_node):
"""Replaces a MatMul node with the eight bit equivalent sub-graph."""
quantized_mat_mul_name = original_node.name + "_eightbit_quantized_mat_mul"
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_mat_mul_node = create_node("QuantizedMatMul",
quantized_mat_mul_name,
all_input_names)
set_attr_dtype(quantized_mat_mul_node, "T1", dtypes.quint8)
set_attr_dtype(quantized_mat_mul_node, "T2", dtypes.quint8)
set_attr_dtype(quantized_mat_mul_node, "Toutput", dtypes.qint32)
copy_attr(quantized_mat_mul_node, "transpose_a",
original_node.attr["transpose_a"])
copy_attr(quantized_mat_mul_node, "transpose_b",
original_node.attr["transpose_b"])
self.add_output_graph_node(quantized_mat_mul_node)
quantize_down_name = self.add_quantize_down_nodes(original_node,
quantized_mat_mul_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def eightbitize_conv_node(self, original_node):
"""Replaces a Conv2D node with the eight bit equivalent sub-graph."""
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_conv_name = original_node.name + "_eightbit_quantized_conv"
quantized_conv_node = create_node("QuantizedConv2D", quantized_conv_name,
all_input_names)
copy_attr(quantized_conv_node, "strides", original_node.attr["strides"])
copy_attr(quantized_conv_node, "padding", original_node.attr["padding"])
set_attr_dtype(quantized_conv_node, "Tinput", dtypes.quint8)
set_attr_dtype(quantized_conv_node, "Tfilter", dtypes.quint8)
set_attr_dtype(quantized_conv_node, "out_type", dtypes.qint32)
self.add_output_graph_node(quantized_conv_node)
quantize_down_name = self.add_quantize_down_nodes(original_node,
quantized_conv_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def eightbitize_bias_add_node(self, original_node):
"""Replaces a BiasAdd node with the eight bit equivalent sub-graph."""
quantized_bias_add_name = (
original_node.name + "_eightbit_quantized_bias_add")
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_bias_add_node = create_node("QuantizedBiasAdd",
quantized_bias_add_name,
all_input_names)
set_attr_dtype(quantized_bias_add_node, "T1", dtypes.quint8)
set_attr_dtype(quantized_bias_add_node, "T2", dtypes.quint8)
set_attr_dtype(quantized_bias_add_node, "out_type", dtypes.qint32)
self.add_output_graph_node(quantized_bias_add_node)
quantize_down_name = self.add_quantize_down_nodes(original_node,
quantized_bias_add_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def eightbitize_single_input_tensor_node(self, original_node,
add_op_function):
"""Replaces a single-tensor node with the eight bit equivalent sub-graph.
Converts a node like this:
Shape(f) Input(f)
| |
+--------v v
Operation
|
v
(f)
Into a quantized equivalent:
Input(f) ReshapeDims
+------v v-------------+
| Reshape
| |
| | ReductionDims
| +-----+ |
| | +---c---------+
| v v v v-------+
| Min Max
| +----+ |
v v v--------+
Quantize
|
v
QuantizedOperation
| | |
v v v
Dequantize
|
v
(f)
Args:
original_node: Float node to be converted.
add_op_function: Function to create the actual node.
Returns:
Subgraph representing the quantized version of the original node.
"""
quantized_op_name = original_node.name + "_eightbit_quantized"
quantized_op_type = "Quantized" + original_node.op
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_op_node = create_node(quantized_op_type, quantized_op_name,
all_input_names)
add_op_function(original_node, quantized_op_node)
self.add_output_graph_node(quantized_op_node)
self.add_dequantize_result_node(quantized_op_name, original_node.name)
def add_pool_function(self, original_node, quantized_op_node):
set_attr_dtype(quantized_op_node, "T", dtypes.quint8)
copy_attr(quantized_op_node, "ksize", original_node.attr["ksize"])
copy_attr(quantized_op_node, "strides", original_node.attr["strides"])
copy_attr(quantized_op_node, "padding", original_node.attr["padding"])
def add_relu_function(self, unused_arg_node, quantized_op_node):
set_attr_dtype(quantized_op_node, "Tinput", dtypes.quint8)
def eightbitize_concat_node(self, original_node):
"""Replaces a Concat node with the eight bit equivalent sub-graph.
Converts a node like this:
Shape(f) Input0(f) Input1(f)
| | |
+--------v v v----------+
Concat
|
v
(f)
Into a quantized equivalent:
Shape(f) Input0(f) ReshapeDims Input1(f)
| +------v v--------------+------------------v v------+
| | Reshape Reshape |
| | | | |
| | | ReductionDims | |
| | +------+ | +--------+ |
| | | +---c---------+-----------c-----+ | |
| | +v v v v-------+---------v v v v+ |
| | Min Max Min Max |
| | +----+ | | +-----+ |
| v v v--------+ +----------v v v
| Quantize Quantize
| +------------------+ +----------------------+
+-------------------------------+ | |
v v v
QuantizedConcat
| | |
v v v
Dequantize
|
v
(f)
Args:
original_node: Float node to be converted.
Returns:
Subgraph representing the quantized version of the original node.
"""
namespace_prefix = original_node.name + "_eightbit"
quantized_concat_name = namespace_prefix + "_quantized_concat"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
shape_input_name = original_node.input[0]
original_inputs = original_node.input[1:]
input_names = []
min_names = []
max_names = []
for original_input_name in original_inputs:
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_input_name,
reshape_dims_name,
reduction_dims_name))
input_names.append(quantize_input_name)
min_names.append(min_input_name)
max_names.append(max_input_name)
all_input_names = [shape_input_name]
all_input_names.extend(input_names)
all_input_names.extend(min_names)
all_input_names.extend(max_names)
quantized_concat_node = create_node("QuantizedConcat",
quantized_concat_name, all_input_names)
set_attr_int(quantized_concat_node, "N", len(original_inputs))
set_attr_dtype(quantized_concat_node, "T", dtypes.quint8)
self.add_output_graph_node(quantized_concat_node)
self.add_dequantize_result_node(quantized_concat_name, original_node.name)
def eightbitize_placeholder_node(self, current_node):
"""Replaces a placeholder node with a quint8 placeholder node+dequantize."""
name = current_node.name
# Convert the placeholder into a quantized type.
output_node = node_def_pb2.NodeDef()
output_node.CopyFrom(current_node)
set_attr_dtype(output_node, "dtype", dtypes.quint8)
output_node.name += "_original_input"
self.add_output_graph_node(output_node)
# Add a dequantize to convert back to float.
dequantize_node = create_node("Dequantize", name, [
output_node.name, "quantized_input_min_value",
"quantized_input_max_value"
])
set_attr_dtype(dequantize_node, "T", dtypes.quint8)
set_attr_string(dequantize_node, "mode", b"MIN_FIRST")
self.add_output_graph_node(dequantize_node)
# For the descent over the graph to work, the dequantize node must be named
# current_node.name. However, for the feeding of the graph to work, the
# placeholder must have the name current_node.name; so record a final set
# of renames to apply after all processing has been done.
self.final_node_renames[output_node.name] = name
self.final_node_renames[dequantize_node.name] = name + "_dequantize"
def eightbitize_reshape_node(self, original_node):
"""Replaces a Reshape node with the eight bit equivalent sub-graph.
Args:
original_node: Float node to be converted.
Returns:
Subgraph representing the quantized version of the original node.
"""
namespace_prefix = original_node.name + "_eightbit"
quantized_reshape_name = namespace_prefix + "_quantized_reshape"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
shape_input_name = original_node.input[1]
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_node.input[0],
reshape_dims_name, reduction_dims_name))
quantized_reshape_node = create_node(
"QuantizedReshape", quantized_reshape_name,
[quantize_input_name, shape_input_name, min_input_name, max_input_name])
set_attr_dtype(quantized_reshape_node, "T", dtypes.quint8)
self.add_output_graph_node(quantized_reshape_node)
self.add_dequantize_result_node(quantized_reshape_name, original_node.name)
def eightbitize_batch_norm_node(self, original_node):
"""Replaces a MatMul node with the eight bit equivalent sub-graph."""
namespace_prefix = original_node.name + "_eightbit"
original_input_name = original_node.input[0]
original_mean_name = original_node.input[1]
original_variance_name = original_node.input[2]
original_beta_name = original_node.input[3]
original_gamma_name = original_node.input[4]
quantized_batch_norm_name = namespace_prefix + "_quantized_batch_norm"
reshape_dims_name, reduction_dims_name = self.add_common_quantization_nodes(
namespace_prefix)
quantize_input_name, min_input_name, max_input_name = (
self.eightbitize_input_to_node(namespace_prefix, original_input_name,
reshape_dims_name, reduction_dims_name))
quantize_mean_name, min_mean_name, max_mean_name = (
self.eightbitize_input_to_node(namespace_prefix, original_mean_name,
reshape_dims_name, reduction_dims_name))
quantize_variance_name, min_variance_name, max_variance_name = (
self.eightbitize_input_to_node(namespace_prefix, original_variance_name,
reshape_dims_name, reduction_dims_name))
quantize_beta_name, min_beta_name, max_beta_name = (
self.eightbitize_input_to_node(namespace_prefix, original_beta_name,
reshape_dims_name, reduction_dims_name))
quantize_gamma_name, min_gamma_name, max_gamma_name = (
self.eightbitize_input_to_node(namespace_prefix, original_gamma_name,
reshape_dims_name, reduction_dims_name))
quantized_batch_norm_node = create_node(
"QuantizedBatchNormWithGlobalNormalization", quantized_batch_norm_name,
[
quantize_input_name, min_input_name, max_input_name,
quantize_mean_name, min_mean_name, max_mean_name,
quantize_variance_name, min_variance_name, max_variance_name,
quantize_beta_name, min_beta_name, max_beta_name,
quantize_gamma_name, min_gamma_name, max_gamma_name
])
set_attr_dtype(quantized_batch_norm_node, "Tinput", dtypes.quint8)
set_attr_dtype(quantized_batch_norm_node, "out_type", dtypes.qint32)
copy_attr(quantized_batch_norm_node, "scale_after_normalization",
original_node.attr["scale_after_normalization"])
copy_attr(quantized_batch_norm_node, "variance_epsilon",
original_node.attr["variance_epsilon"])
self.add_output_graph_node(quantized_batch_norm_node)
quantize_down_name = self.add_quantize_down_nodes(original_node,
quantized_batch_norm_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def add_output_graph_node(self, output_node):
"""Inserts one node into the new graph."""
self.output_graph.node.extend([output_node])
def remove_redundant_quantization(self, old_graph):
"""Removes unneeded pairs of quantize/dequantize ops from the graph.
This is a bit of a tricky function, because it's attempting to spot the
pattern of dequantizing from eight-bit up to float, and then immediately
quantizing back down to eight bits again, that's introduced by previous
passes that do 'key-hole' conversions of individual nodes but have to
convert back to float to match the previous output interface, since they
don't know that the next op can handle quantized tensors.
It works by:
- Looking for Quantize nodes.
- Checking to see if their first input is a Dequantize node.
- Seeing if their min/max inputs come from Min/Max nodes.
- Making sure those Min/Max nodes are being fed from the same Dequantize.
- Or that the Min is indirectly being fed from the same Dequantize as Max.
- Making sure the Dequantize is going through a Reshape (which we add
during the previous pass when we create the quantize sub-graph).
- Looking for the dims Const op for the Min/Max dims.
If all of these conditions are met, then it's a sub-graph pattern that
we know how to optimize out (and is likely the common one we've introduced).
We then rewire the graph to skip it entirely, and then rely on the dead node
removal pass to get rid of any nodes that are no longer needed.
Args:
old_graph: The model we'll be stripping redundant nodes from.
Returns:
A graph with the unnecessary nodes removed.
Raises:
ValueError: Two nodes with the same name were found in the graph.
"""
old_nodes_map = self.create_nodes_map(old_graph)
self.output_graph = graph_pb2.GraphDef()
inputs_to_rename = {}
# We go through all the nodes, looking for any that match the patterns we
# know how to optimize away.
for node in old_graph.node:
# We always start with a Quantize node, and examine its inputs to see if
# they are in a form that can be removed.
if node.op not in ["Quantize", "QuantizeV2"]:
continue
dequantize_node_name = node_name_from_input(node.input[0])
if dequantize_node_name not in old_nodes_map:
raise ValueError("Input node name '" + dequantize_node_name +
"' not found in node '" + node.name + "'")
dequantize_node = old_nodes_map[dequantize_node_name]
# Do we have a Dequantize feeding in, with the same type as the Quantize?
if dequantize_node.op != "Dequantize":
continue
if node.attr["T"] != dequantize_node.attr["T"]:
continue
# Now look at the other inputs, and ensure they're Min/Max nodes.
min_node_name = node_name_from_input(node.input[1])
max_node_name = node_name_from_input(node.input[2])
min_node = old_nodes_map[min_node_name]
max_node = old_nodes_map[max_node_name]
is_min_right_type = (min_node.op in ["Min", "Dequantize"])
is_max_right_type = (max_node.op in ["Max", "Dequantize"])
if not is_min_right_type or not is_max_right_type:
print("Didn't find expected types on inputs : %s, %s." % (min_node.op,
max_node.op))
continue
min_node_input_name = node_name_from_input(min_node.input[0])
max_node_input_name = node_name_from_input(max_node.input[0])
# There are two different patterns for Min nodes we can recognize, one
# where the input comes directly from the same one as the Max, and
# another where we run it through another Min first, so check for both.
is_same_input = False
if min_node_input_name == max_node_input_name:
is_same_input = True
else:
first_min_node_input = old_nodes_map[min_node_input_name]
if first_min_node_input.op == "Concat":
second_min_node_name = node_name_from_input(
first_min_node_input.input[1])
second_min_node = old_nodes_map[second_min_node_name]
if second_min_node.op == "Min":
second_min_node_input_name = node_name_from_input(
second_min_node.input[0])
is_same_input = (second_min_node_input_name == max_node_input_name)
if not is_same_input:
print("Different min/max inputs: " + min_node_input_name)
continue
# We recognize this pattern, so mark the graph edges to be rewired to
# route around it entirely, since we know it's a no-op.
dequantize_source_name = node_name_from_input(dequantize_node.input[0])
node_tensor_name = ensure_tensor_name_has_port(node.name)
min_tensor_name = node.name + ":1"
max_tensor_name = node.name + ":2"
inputs_to_rename[node_tensor_name] = dequantize_source_name
inputs_to_rename[min_tensor_name] = dequantize_node.input[1]
inputs_to_rename[max_tensor_name] = dequantize_node.input[2]
# Finally we apply all the rewiring we've marked to the graph.
for node in old_graph.node:
for index, input_full_name in enumerate(node.input):
input_name = ensure_tensor_name_has_port(input_full_name)
if input_name in inputs_to_rename:
node.input[index] = inputs_to_rename[input_name]
self.add_output_graph_node(node)
return self.output_graph
def apply_final_node_renames(self):
"""Applies node renames in self.final_node_renames to self.output_graph."""
old_graph = self.output_graph
self.output_graph = graph_pb2.GraphDef()
for node in old_graph.node:
node.name = self.final_node_renames.get(node.name, node.name)
for index, input_name in enumerate(node.input):
node_name = node_name_from_input(input_name)
input_full_name = ensure_tensor_name_has_port(input_name)
if node_name in self.final_node_renames:
node.input[index] = "%s%s" % (self.final_node_renames[node_name],
input_full_name[len(node_name):])
self.add_output_graph_node(node)
return self.output_graph
def remove_dead_nodes(self, output_names):
"""Removes nodes that are no longer needed for inference from the graph."""
old_output_graph = self.output_graph
self.output_graph = graph_util.extract_sub_graph(old_output_graph,
output_names)
def quantize_weights(self, input_graph, quantization_mode):
"""Quantize float Const ops.
There are two modes of operations, both replace float Const ops with
quantized values.
1. If quantization_mode is "weights_rounded", this function replaces float
Const ops with quantized float Const ops - same as the original op, but
float values being mapped to the center of one of 1<<FLAGS.bitdepth buckets.
This does not change the raw model size, but compression algorithms such as
zip (as used for compressing apks) or bzip2 will achieve a very good
compression ratio.
2. For other quantization modes ("MIN_COMBINED" or "MIN_FIRST"), float
Const ops are quantized and replaced by a tuple of four ops to perform
the dequantization at runtime:
* eight-bit Const (bucket indices, same shape as original float Const op
* two float Const ops (min and max value of original float Const op)
* Dequantize op to convert the eight-bit consts to float tensors.
The quantization mode is important because we see accuracy problems when
quantizing weights for different situations depending on the algorithm
used. We haven't figured out exactly what the underlying cause is yet,
unfortunately.
Args:
input_graph: A GraphDef of the model containing float Const ops.
quantization_mode: How to quantize and dequantize the values.
Returns:
A GraphDef of the converted graph.
Raises:
ValueError: If quantization_mode is unsupported.
"""
output_graph = graph_pb2.GraphDef()
for input_node in input_graph.node:
should_quantize = False
if input_node.op == "Const":
dtype = dtypes.as_dtype(input_node.attr["dtype"].type)
if dtype == dtypes.float32:
should_quantize = True
if should_quantize:
if quantization_mode == "weights_rounded":
output_graph.node.extend(quantize_weight_rounded(input_node))
elif quantization_mode in (b"MIN_COMBINED", b"MIN_FIRST"):
output_graph.node.extend(
quantize_weight_eightbit(input_node, quantization_mode))
else:
raise ValueError("Unsupported quantization mode %s." %
quantization_mode)
else:
output_node = node_def_pb2.NodeDef()
output_node.CopyFrom(input_node)
output_graph.node.extend([output_node])
return output_graph
def set_input_graph(self, new_input_graph):
self.input_graph = new_input_graph
self.nodes_map = self.create_nodes_map(self.input_graph)
def main(unused_args):
if not gfile.Exists(FLAGS.input):
print("Input graph file '" + FLAGS.input + "' does not exist!")
return -1
known_modes = [
"round", "quantize", "eightbit", "weights", "test", "weights_rounded"
]
if not any(FLAGS.mode in s for s in known_modes):
print("mode is '" + FLAGS.mode + "', not in " + ", ".join(known_modes) +
".")
return -1
tf_graph = graph_pb2.GraphDef()
with gfile.Open(FLAGS.input, "rb") as f:
data = f.read()
tf_graph.ParseFromString(data)
graph = ops.Graph()
with graph.as_default():
importer.import_graph_def(tf_graph, input_map={}, name="")
quantized_input_range = None
if FLAGS.quantized_input:
quantized_input_range = [
FLAGS.quantized_input_min, FLAGS.quantized_input_max
]
fallback_quantization_range = None
if (FLAGS.quantized_fallback_min is not None or
FLAGS.quantized_fallback_max is not None):
assert FLAGS.quantized_fallback_min is not None
assert FLAGS.quantized_fallback_max is not None
fallback_quantization_range = [
FLAGS.quantized_fallback_min, FLAGS.quantized_fallback_max
]
rewriter = GraphRewriter(tf_graph, FLAGS.mode, quantized_input_range,
fallback_quantization_range)
output_graph = rewriter.rewrite(FLAGS.output_node_names.split(","))
f = gfile.FastGFile(FLAGS.output, "wb")
f.write(output_graph.SerializeToString())
return 0
if __name__ == "__main__":
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
app.run()
| [
"jonrodd@gmail.com"
] | jonrodd@gmail.com |
5adedc67e96039244bbac014cd28b4c08b0e6fb5 | 11ee4dd7b3770e183c8df92fd47d630dd85dde4b | /parse-detail_topreality.py | 1d036777890fd6c25a1abe11706a4ca65c98800c | [] | no_license | marxsk/reality | 789619eb697ba2c50186a6472166144906503dc2 | 33a59b49d01cb75e14a14a5d39f322961a244fae | refs/heads/master | 2021-08-19T01:58:34.433189 | 2017-11-24T12:13:09 | 2017-11-24T12:13:09 | 111,900,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,237 | py | #!/usr/bin/python
# coding=utf-8
## time-spent: 1.5 + 1
import re
import sys
import reality
from bs4 import BeautifulSoup
reload(sys)
sys.setdefaultencoding('utf-8')
soup = BeautifulSoup(sys.stdin, 'html.parser')
info = {}
columns = ["url", "title", "text", "agency", "agency_person", "price", "price_currency", "type", "offer", "floor", "floor_max", "area_usable", "street", "id_estate", "condition", "attr_elevator", "attr_balcony", "attr_cellar"]
info["title"] = soup.find('h1').string.strip()
if soup.find('div', class_='contact'):
info["agency"] = soup.find('div', class_='contact').find('strong').string.strip()
info["agency_person"] = soup.find('div', class_='contactBox').find('strong').string.strip()
for item in soup.find('div', class_='properties').find('ul').find_all('li'):
prop = item.find('span').string.strip()
if prop == '':
continue
if prop in [u'Cena vrátane provízie', u'Cena']:
info["price"] = item.find('meta', itemprop='price').attrs['content']
info["price_currency"] = item.find('meta', itemprop='currency').attrs['content']
elif prop == u'Kategória':
info["type"] = item.find('strong').find('span').string
info["offer"] = item.find('strong').contents[1].strip()[2:]
elif prop == u'Poschodie':
value = item.find('strong').string
(current, max) = value.split(' / ')
info["floor"] = current
if max:
info["floor_max"] = max
elif u'plocha' in prop:
info['area_usable'] = item.find('strong').contents[0][:-2]
else:
info[prop] = item.find('strong').string
info["text"] = unicode(soup.find('p', itemprop='description')).replace("\n", " ").replace("\r", " ")
info["url"] = sys.argv[1]
## unification
reality.renameKey(info, u'Úžitková plocha', 'area_usable')
reality.renameKey(info, u'Ulica', 'street')
reality.renameKey(info, u'Identifikačné číslo:', 'id_estate')
reality.renameKey(info, u'Stav nehnuteľnosti:', 'condition')
reality.renameKey(info, u'Výťah', 'attr_elevator')
reality.renameKey(info, u'Balkón / loggia', 'attr_balcony')
reality.renameKey(info, u'Pivnica', 'attr_cellar')
info.pop(u'Aktualizácia')
reality.printAdv([info], 'csv', columns)
| [
"mgrac@redhat.com"
] | mgrac@redhat.com |
f6bd894147860a53b1f4453f0f72476d39d72a26 | 3380b7307cee312bf48fee11e285286dc4c89e91 | /ex41.py | 7c16a5637f2632e9e0abae0f69cc05d3c950d53f | [] | no_license | yeduxiling/pythonmaster | f6f8a69d7679449a8037e5720ecbae7d6bc3f838 | 480adc80d322b07f92c6b4449dfca9379c5fc441 | refs/heads/master | 2020-09-12T07:21:43.213437 | 2020-01-20T01:32:28 | 2020-01-20T01:32:28 | 222,353,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,318 | py | import random
from urllib.request import urlopen
import sys
WORD_URL = "http://learncodethehardway.org/words.txt"
WORDS = []
PHRASES = {
"class %%%(%%%)":
"Make a class named %%% that is-a %%%.",
"class %%%(object):\n\tdef __init__(self, ***)":
"class %%% has-a __init__ that takes self and *** params.",
"class %%%(object):\n\tdef ***(self, @@@)":
"class %%% has-a function *** that takes self and @@@ params.",
"*** = %%%()":
"Set *** to an instance of class %%%.",
"***.***(@@@)":
"From *** get the *** function, call it with params self, @@@.",
"***.*** = '***'":
"From *** get the *** attribute and set it to '***'."
}
# do they want to drill phrases first
if len(sys.argv) == 2 and sys.argv[1] == 'english':
PHRASE_FIRST = True
else:
PHRASE_FIRST = False
# load up the words from the website
for word in urlopen(WORD_URL).readlines():
WORDS.append(str(word.strip(), encoding = "utf-8"))
def convert(snippet, phrase):
class_names = [w.capitalize() for w in
random.sample(WORDS, snippet.count("%%%"))]
other_names = random.sample(WORDS, snippet.count("***"))
results = []
param_names = []
for i in range(0, snippet.count("@@@")):
param_count = random.randint(1, 3)
param_names.append(', '.join(
random.sample(WORDS, param_count)))
for sentence in snippet, phrase:
result = sentence[:]
# fake classs names
for word in class_names:
result = result.replace("%%%", word, 1)
# fake other names
for word in other_names:
result = result.replace("***", word, 1)
# fake parameter lists
for word in param_names:
result = result.replace("@@@", word, 1)
results.append(result)
return results
# keep going untill they hit CTRL-D
try:
while True:
snippets = list(PHRASES.keys())
random.shuffle(snippets)
for snippet in snippets:
phrase = PHRASES[snippet]
question, answer = convert(snippet, phrase)
if PHRASE_FIRST:
question, answer = answer, question
print(question)
input("> ")
print(f"ANSWER: {answer}\n\n")
except EOFError:
print("\nBye")
| [
"orcafang@163.com"
] | orcafang@163.com |
cf691c493c3362720715afca7733fbfc619f3e72 | cf01fdab4743790fb3bb330f1cbbe40687a8a5bf | /src/opnsense/scripts/dns/unbound_dhcpd.py | 9e1732f82c06c132cb74578cd376d031633e7ec9 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | silvrwolfboy/core-1 | 7c636f9adaf4c856e4e54ebc1e59acf0d76a7f46 | 2c25b1cffa2e9cc5f916d235e02e85596f1038d4 | refs/heads/master | 2021-02-24T22:57:05.326556 | 2020-03-06T11:35:29 | 2020-03-06T11:37:27 | 245,442,971 | 0 | 1 | BSD-2-Clause | 2020-03-06T14:39:37 | 2020-03-06T14:39:36 | null | UTF-8 | Python | false | false | 7,890 | py | #!/usr/local/bin/python3
"""
Copyright (c) 2016-2020 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
watch dhcp lease file and build include file for unbound
"""
import os
import sys
import subprocess
import time
import tempfile
import argparse
import syslog
sys.path.insert(0, "/usr/local/opnsense/site-python")
from daemonize import Daemonize
import watchers.dhcpd
def unbound_control(commands, output_stream=None):
""" execute (chrooted) unbound-control command
:param commands: command list (parameters)
:param output_stream: (optional)output stream
:return: None
"""
if output_stream is None:
output_stream = open(os.devnull, 'w')
subprocess.check_call(['/usr/sbin/chroot', '-u', 'unbound', '-g', 'unbound', '/',
'/usr/local/sbin/unbound-control', '-c', '/var/unbound/unbound.conf'] + commands,
stdout=output_stream, stderr=subprocess.STDOUT)
output_stream.seek(0)
class UnboundLocalData:
def __init__(self):
self._map_by_address = dict()
self._map_by_fqdn = dict()
self.load()
def load(self):
self._map_by_address = dict()
self._map_by_fqdn = dict()
with tempfile.NamedTemporaryFile() as output_stream:
unbound_control(['list_local_data'], output_stream)
for line in output_stream:
parts = line.decode().split()
if len(parts) > 4 and parts[3] == 'A':
self.add_address(parts[4], parts[0][:-1])
def add_address(self, address, fqdn):
if address not in self._map_by_address:
self._map_by_address[address] = list()
self._map_by_address[address].append(fqdn)
self._map_by_fqdn[fqdn] = address
def all_fqdns(self, address, fqdn):
result = set()
if address in self._map_by_address:
for unbfqdn in self._map_by_address[address]:
result.add(unbfqdn)
if fqdn in self._map_by_fqdn:
result.add(fqdn)
return result
def cleanup(self, address, fqdn):
if address in self._map_by_address:
for rfqdn in self._map_by_address[address]:
if rfqdn in self._map_by_fqdn:
del self._map_by_fqdn[rfqdn]
del self._map_by_address[address]
if fqdn in self._map_by_fqdn:
if self._map_by_fqdn[fqdn] in self._map_by_address:
del self._map_by_address[self._map_by_fqdn[fqdn]]
del self._map_by_fqdn[fqdn]
def is_equal(self, address, fqdn):
tmp = self.all_fqdns(address, fqdn)
return len(tmp) == 1 and fqdn in self._map_by_fqdn and self._map_by_fqdn[fqdn] == address
def run_watcher(target_filename, domain):
# cleanup interval (seconds)
cleanup_interval = 60
# initiate lease watcher and setup cache
dhcpdleases = watchers.dhcpd.DHCPDLease()
cached_leases = dict()
unbound_local_data = UnboundLocalData()
# start watching dhcp leases
last_cleanup = time.time()
while True:
dhcpd_changed = False
for lease in dhcpdleases.watch():
if 'ends' in lease and lease['ends'] > time.time() \
and 'client-hostname' in lease and 'address' in lease and lease['client-hostname']:
cached_leases[lease['address']] = lease
dhcpd_changed = True
if time.time() - last_cleanup > cleanup_interval:
# cleanup every x seconds
last_cleanup = time.time()
addresses = list(cached_leases)
for address in addresses:
if cached_leases[address]['ends'] < time.time():
syslog.syslog(
syslog.LOG_NOTICE,
"dhcpd expired %s @ %s" % (cached_leases[address]['client-hostname'], address)
)
unbound_control(['local_data_remove', cached_leases[address]['client-hostname']])
del cached_leases[address]
dhcpd_changed = True
if dhcpd_changed:
# dump dns output to target (used on initial startup, unbound_control is used as live feed)
with open(target_filename, 'w') as unbound_conf:
for address in cached_leases:
unbound_conf.write('local-data-ptr: "%s %s.%s"\n' % (
address, cached_leases[address]['client-hostname'], domain)
)
unbound_conf.write('local-data: "%s.%s IN A %s"\n' % (
cached_leases[address]['client-hostname'], domain, address)
)
# signal unbound
for address in cached_leases:
fqdn = '%s.%s' % (cached_leases[address]['client-hostname'], domain)
if not unbound_local_data.is_equal(address, fqdn):
for tmp_fqdn in unbound_local_data.all_fqdns(address, fqdn):
syslog.syslog(syslog.LOG_NOTICE, 'dhcpd entry changed %s @ %s.' % (tmp_fqdn, address))
unbound_control(['local_data_remove', tmp_fqdn])
unbound_local_data.cleanup(address, fqdn)
unbound_control(['local_data', address, 'PTR', fqdn])
unbound_control(['local_data', fqdn, 'IN A', address])
unbound_local_data.add_address(address, fqdn)
# wait for next cycle
time.sleep(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--pid', help='pid file location', default='/var/run/unbound_dhcpd.pid')
parser.add_argument('--target', help='target config file, used when unbound restarts',
default='/var/unbound/dhcpleases.conf')
parser.add_argument('--foreground', help='run in forground', default=False, action='store_true')
parser.add_argument('--domain', help='domain to use', default='local')
inputargs = parser.parse_args()
syslog.openlog('unbound', logoption=syslog.LOG_DAEMON, facility=syslog.LOG_LOCAL4)
if inputargs.foreground:
run_watcher(target_filename=inputargs.target, domain=inputargs.domain)
else:
syslog.syslog(syslog.LOG_NOTICE, 'daemonize unbound dhcpd watcher.')
cmd = lambda : run_watcher(target_filename=inputargs.target, domain=inputargs.domain)
daemon = Daemonize(app="unbound_dhcpd", pid=inputargs.pid, action=cmd)
daemon.start()
| [
"ad@opnsense.org"
] | ad@opnsense.org |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.