blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
23c887ec233ab7b130e0a23bd6c95cf6840e2a47
|
03cf9fd509477cc87910e1ba3f142b1b5fd57126
|
/word.py
|
2fe79223b60d475dd20feb6385fd388819c14190
|
[] |
no_license
|
shanmukr/interviews
|
5f037c7339165a588e3e990096d24ddf34332a4c
|
aa691cc9edd436f62852591d5adf1834d093a777
|
refs/heads/master
| 2020-03-08T01:15:28.028409
| 2019-08-19T07:43:02
| 2019-08-19T07:43:02
| 127,825,733
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
import re
dict = {}
list = []
for i in open("ss.pl", "r").readlines():
for j in i.split(" "):
dict[j] = [dict.keys()].count(j)
print(dict)
#lines.close()
#print ("test\n")
|
[
"shanreddy@BLR.ALLEGISINDIA.COM"
] |
shanreddy@BLR.ALLEGISINDIA.COM
|
178333942be49bf2442bdb7bd9a1e54639df14fe
|
cc128e9804ce0cb659421d2b7c98ff4bfbb9d90b
|
/pgd_multiGPU.py
|
c5ef0e9a21aefeb139bcc90c52935d3d305458ee
|
[] |
no_license
|
hope-yao/robust_attention
|
6beb2de2c3b849c66e79ec71ae81ed127cee3079
|
905a32f02bb8d4709666036f6a6e1f82684f8716
|
refs/heads/master
| 2020-04-02T08:52:48.430423
| 2018-10-30T00:13:55
| 2018-10-30T00:13:55
| 154,265,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,562
|
py
|
"""
Implementation of attack methods. Running this file as a program will
apply the attack to the model specified by the config file and store
the examples in an .npy file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import copy
def get_PGD(sess, adv_grad, feed_dict_pgd, x_input_pl, epsilon=0.1, a=0.002, k=50, rand=True, dist='Linf'):
if dist == 'Linf':
x = get_PGD_Linf(sess, adv_grad, feed_dict_pgd, x_input_pl, epsilon, a, k, rand)
elif dist == 'L2':
x = get_PGD_L2(sess, adv_grad, feed_dict_pgd, x_input_pl, epsilon, a, k, rand)
else:
print('not implemented')
return x
def get_PGD_Linf(sess, adv_grad, feed_dict_pgd, x_input_pl, epsilon, a, k, rand):
"""Given a set of examples (x_nat, y), returns a set of adversarial
examples within epsilon of x_nat in l_infinity norm."""
x_nat = feed_dict_pgd[x_input_pl]
if rand:
x = x_nat + np.random.uniform(-epsilon, epsilon, x_nat.shape)
else:
x = np.copy(x_nat)
for i in range(k):
grad = sess.run(adv_grad, feed_dict=feed_dict_pgd)
x += a * np.sign(grad)
x = np.clip(x, x_nat - epsilon, x_nat + epsilon)
x = np.clip(x, 0, 1) # ensure valid pixel range
return x
def sphere_rand(input_size, epsilon):
'''
algrithm adapted from: https://math.stackexchange.com/questions/87230/picking-random-points-in-the-volume-of-sphere-with-uniform-probability
:param epsilon:
:return:
'''
bs = input_size[0]
img_size = input_size[1:]
x = []
for i in range(bs):
perturb = np.random.normal(0, 1, img_size)
norm = np.linalg.norm(np.reshape(perturb,[-1]),2)
U = np.random.uniform(0, 1, img_size)
U = np.power(U, 1/(img_size[0]*img_size[1]*img_size[2]))
perturb = perturb / norm * epsilon * U
x += [np.expand_dims(perturb,0)]
return np.concatenate(x,0)
def get_PGD_L2(sess, adv_grad, feed_dict_pgd, x_input_pl, epsilon, a, k, rand):
"""Given a set of examples (x_nat, y), returns a set of adversarial
examples within epsilon of x_nat in l_infinity norm."""
x_nat = feed_dict_pgd[x_input_pl]
input_size = x_input_pl.get_shape().as_list()
bs = input_size[0]
if rand:
sphere_perturb = sphere_rand(input_size, np.random.uniform(0,epsilon))
# start from a random point inside L2 sphere
x = x_nat + sphere_perturb
else:
x = np.copy(x_nat)
for i in range(k):
grad = sess.run(adv_grad, feed_dict=feed_dict_pgd)
if 1:
# attack normalize
att_norm2 = np.linalg.norm(np.reshape(grad, [bs, -1]), ord=2, axis=1)
x_i = x + a * grad/np.reshape(att_norm2, [bs,1,1,1]) #perturb along the spherical projection with step size a
# adv img normalize
x_diff = x_i - x_nat #accumulated perturbation
img_norm2 = np.linalg.norm(np.reshape(x_diff, [bs, -1]), ord=2, axis=1)
# bounded_norm = np.clip(img_norm2, 0 ,epsilon)
ratio = np.asarray([img_norm2[i] if img_norm2[i]<epsilon else epsilon for i in range(bs)])#clip accumulated perturbation inside sphere radius epsilon
x = x_nat + x_diff/np.reshape(img_norm2,[bs,1,1,1]) * np.reshape(ratio,[bs,1,1,1])
# ensure valid pixel range
x = np.clip(x, 0, 1)
else:
# attack normalize
att_norm2 = np.linalg.norm(np.reshape(grad, [bs, -1]), ord=2, axis=1)
x_i = x + epsilon * grad / np.reshape(att_norm2, [bs, 1, 1, 1]) # perturb along the spherical projection with step size a
# ensure valid pixel range
x = np.clip(x_i, 0, 1)
return x
|
[
"hope-yao@asu.edu"
] |
hope-yao@asu.edu
|
f081fdf614ade82a67062865d661692811a7435e
|
b73573ca1a718981144dae2b224ecb3efa388de2
|
/Documents/TJ 2015-2016/AI/wordCheck.py
|
c9f0dbf4007c90382bedeff1f2864933cb5b1839
|
[] |
no_license
|
kcgirish11/AIOthello
|
079cfb35b69689bd4cb6c3b145b65d89b4b87203
|
6e899bff93bf35f819f1b3769f5ccd9ae2d2546c
|
refs/heads/master
| 2021-01-13T15:17:32.671362
| 2018-02-20T02:48:49
| 2018-02-20T02:48:49
| 79,284,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
def wordDifference(str1, str2):
diffCheck = False
if len(str1) == len(str2):
for i in range (len(str1)):
if str1[i] != str2[i] and diffCheck == False:
diffCheck = True
elif str1[i] != str2[i]:
return 0
return 1
else:
return -1
print wordDifference("kiran", "biran")
print wordDifference("monkey", "cookie")
print wordDifference("anu", "girish")
print wordDifference("castle", "cattle")
|
[
"Kiran@airbears2-10-142-147-175.airbears2.1918.berkeley.edu"
] |
Kiran@airbears2-10-142-147-175.airbears2.1918.berkeley.edu
|
99b268cf408aaaac1a2c69af91e2d5cbe6672722
|
3915a4fcd9eb67d7ca11392a8b3c8c0363dc8fee
|
/flipped.py
|
e519d78f44b631ae59f4ee351cb3c1ef302421fa
|
[
"MIT"
] |
permissive
|
Chunhai-Yu/CarND-Behavioral-Cloning
|
157a952e90120c22a82f790f051f929ba81c5020
|
809cd01a68c90a5f278635f760a5c38e98a7ad01
|
refs/heads/master
| 2022-11-17T20:04:17.564791
| 2020-07-18T21:16:10
| 2020-07-18T21:16:10
| 280,646,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 473
|
py
|
# example to show: How does the augmented data look like
import matplotlib.pyplot as plt
import cv2
import numpy as np
originalImage = cv2.imread('/home/workspace/CarND-Behavioral-Cloning-P3/center_2016_12_01_13_31_14_194.jpg')
image_original = cv2.cvtColor(originalImage, cv2.COLOR_BGR2RGB)
'''plt.imshow(image_original)
plt.title("image_original")
plt.show()'''
image_flipped = np.fliplr(image_original)
plt.imshow(image_flipped)
plt.title("image_flipped")
plt.show()
|
[
"qingluchenliu123@gmail.com"
] |
qingluchenliu123@gmail.com
|
bc77b5206343972814d61896dbc370d8d24d3573
|
a811daf7e8ecc3b50e41ae87b6d166067d80df64
|
/src/ml/train_torch_mf_edges.py
|
75e285f1819a7a9e12c80f1732464e7bdee4b8c9
|
[] |
no_license
|
hercules261188/recsys-nlp-graph
|
48656acb7cfeffa9303e432c766d16c6d5c22275
|
647b88779739d39d0912916f3e88f81960ca2d8c
|
refs/heads/master
| 2023-06-24T08:29:21.350155
| 2021-07-18T17:25:27
| 2021-07-18T17:25:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,331
|
py
|
import argparse
import datetime
import numpy as np
import pandas as pd
import torch
from sklearn.metrics import roc_auc_score
from torch import optim
from torch.utils.data import DataLoader
from src.config import MODEL_PATH
from src.ml.data_loader_edges import Edges, EdgesDataset
from src.ml.mf import MF
from src.utils.logger import logger
shuffle = True
emb_dim = 128
epochs = 5
initial_lr = 0.01
# Torch parameters
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logger.info('Device: {}, emb_dim: {}, epochs: {}, initial_lr: {}'.format(device, emb_dim, epochs, initial_lr))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Training embeddings on torch')
parser.add_argument('read_path', type=str, help='Path to sequences.npy')
parser.add_argument('val_path', type=str, help='Path to val.csv')
parser.add_argument('val_samp_path', type=str, help='Path to val_samp.csv')
parser.add_argument('batch_size', type=int, help='Batchsize for dataloader')
parser.add_argument('n_workers', type=int, help='Number of workers for dataloader')
args = parser.parse_args()
# Initialize dataset
edges = Edges(args.read_path, args.val_path)
dataset = EdgesDataset(edges)
dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=shuffle, num_workers=args.n_workers,
collate_fn=dataset.collate)
# Initialize validation set
val_samp = pd.read_csv(args.val_samp_path)
# Get product ID
word2id_func = np.vectorize(edges.get_product_id)
val_samp['product1_id'] = word2id_func(val_samp['product1'].values)
val_samp['product2_id'] = word2id_func(val_samp['product2'].values)
val_samp = val_samp[(val_samp['product1_id'] > -1) & (val_samp['product2_id'] > -1)] # Keep those with valid ID
logger.info('No. of validation samples: {}'.format(val_samp.shape[0]))
product1_id = val_samp['product1_id'].values
product2_id = val_samp['product2_id'].values
# Initialize model
mf = MF(edges.n_unique_tokens, emb_dim).to(device)
# Train loop
optimizer = optim.Adam(mf.parameters(), lr=initial_lr)
results = []
start_time = datetime.datetime.now()
for epoch in range(epochs):
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, len(dataloader))
running_loss = 0
# Training loop
for i, batches in enumerate(dataloader):
product1 = batches[0].to(device)
product2 = batches[1].to(device)
label = batches[2].to(device)
optimizer.zero_grad()
pred = mf.forward(product1, product2)
loss = mf.loss(pred, label)
loss.backward()
optimizer.step()
scheduler.step()
running_loss = running_loss * 0.9 + loss.item() * 0.1
if i > 0 and i % 1000 == 0:
# Validation Check
with torch.no_grad():
pred = mf.forward(torch.LongTensor(val_samp['product1_id']).to(device),
torch.LongTensor(val_samp['product2_id']).to(device))
score = roc_auc_score(val_samp['edge'], pred.detach().cpu().numpy())
logger.info("Epoch: {}, Seq: {:,}/{:,}, " \
"Loss: {:.4f}, AUC-ROC: {:.4f}, Lr: {:.6f}".format(epoch, i, len(dataloader), running_loss,
score, optimizer.param_groups[0]['lr']))
results.append([epoch, i, running_loss, score])
running_loss = 0
# save model
current_datetime = datetime.datetime.now().strftime('%Y-%m-%d-%H%M')
state_dict_path = '{}/mf_edges_epoch_{}_{}.pt'.format(MODEL_PATH, epoch, current_datetime)
torch.save(mf.state_dict(), state_dict_path)
logger.info('Model state dict saved to {}'.format(state_dict_path))
end_time = datetime.datetime.now()
time_diff = round((end_time - start_time).total_seconds() / 60, 2)
logger.info('Total time taken: {:,} minutes'.format(time_diff))
# Save results
results_df = pd.DataFrame(results, columns=['epoch', 'batches', 'loss', 'auc'])
results_df.to_csv('{}/model_metrics_mf_edges.csv'.format(MODEL_PATH), index=False)
|
[
"eugeneyanziyou@gmail.com"
] |
eugeneyanziyou@gmail.com
|
522c6e557077154eb95e4b57d2e40a9f642a56d3
|
37532586142349cec8d2b88c927585522f34b288
|
/110. Balanced Binary tree/isBalanced.py
|
4cd6cd6ed9daf91d530b5b2d42e166acbb0aa5a4
|
[] |
no_license
|
wangshanmin/leetcode
|
893da1113c97df7a49660dba04716c9e511fb81f
|
1eb962b57fe9e21d98611f3f8ab2629b1f6a0096
|
refs/heads/master
| 2021-04-15T14:57:30.643735
| 2018-07-05T12:44:33
| 2018-07-05T12:44:33
| 126,417,754
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,058
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 13 14:54:44 2018
@author: wangshanmin
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def cal_depth(self, root):
if not root:
return 0
else:
return max( 1 + self.cal_depth(root.left), 1 + self.cal_depth(root.right))
def isBalanced(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if not root:
return True
return abs(self.cal_depth(root.left) - self.cal_depth(root.right)) < 2 and self.isBalanced(root.left) and self.isBalanced(root.right)
if __name__ == '__main__':
a = TreeNode(1)
a.left = TreeNode(2)
a.right = TreeNode(3)
a.left.left = TreeNode(1)
a.left.left.left = TreeNode(1)
print(Solution().isBalanced(a))
|
[
"wangshanmin"
] |
wangshanmin
|
419991fe1dfba085648e6f42ecdb04586f9f14c2
|
1c52dae8197951434701b7bb53dce869417fc862
|
/wicked/config.py
|
65340ae9cf83a53938d0b26f68695852a4900db9
|
[] |
no_license
|
plone/wicked
|
5abe9b1c2efa67c83b6aafc148c11e6dac06c153
|
0c093f8304f29c38fb0edb232220455f0f7728fa
|
refs/heads/master
| 2023-04-19T06:09:37.354870
| 2023-04-09T15:48:08
| 2023-04-09T15:48:08
| 2,811,068
| 2
| 0
| null | 2014-06-12T09:09:19
| 2011-11-19T22:09:03
|
Python
|
UTF-8
|
Python
| false
| false
| 151
|
py
|
from txtfilter import WickedFilter
BACKLINK_RELATIONSHIP = 'Backlink->Source Doc'
FILTER_NAME = WickedFilter.name
GLOBALS = globals()
|
[
"whit@openplans.org"
] |
whit@openplans.org
|
15cb51e3e84e8a1de6d69f92dfcd65c7efe6c5d9
|
714992d03221df9aa4adc659095041d3c89a49b4
|
/pachong/test09_mongodb.py
|
95dce2a36b00a6202d42c95eb53eba71bc1584f8
|
[] |
no_license
|
zzy0119/test
|
f7961408ce5564adb8ae905b941e7927af9429a0
|
53c1a43e4b106878d6d99ecf0ec9dd857fabb5b5
|
refs/heads/master
| 2021-09-05T18:31:03.279884
| 2018-01-30T08:36:14
| 2018-01-30T08:36:14
| 115,862,446
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
# -*- coding:utf-8 -*-
from pymongo import MongoClient
client = MongoClient()
db = client.test #连接test数据库,没有则自动创建
my_set = db.set # 使用set集合,没有则自动创建
my_set.insert({"name": "zhangzongyan", "age": 28})
|
[
"1754590086@qq.com"
] |
1754590086@qq.com
|
e247a62b5974daae0e78938e36a11e8ab88163bf
|
0374f04d8b141e92bb6d48d3487cb608dde58b39
|
/tests/providers/google/cloud/operators/test_datastore_system_helper.py
|
1f90a47b44989f22a4b88625a4d5ce12281d5659
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause",
"Python-2.0"
] |
permissive
|
HubBucket-Team/airflow
|
941697776d2d73465a22889dcf93b23b2872d013
|
dcf87435219307d4e916a8abc2b819ad75e2b1cf
|
refs/heads/master
| 2021-01-14T05:31:16.531533
| 2020-02-24T00:05:07
| 2020-02-24T00:05:07
| 242,613,174
| 1
| 0
|
Apache-2.0
| 2020-02-24T00:17:29
| 2020-02-24T00:17:28
| null |
UTF-8
|
Python
| false
| false
| 1,405
|
py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from tests.contrib.utils.logging_command_executor import LoggingCommandExecutor
BUCKET = os.environ.get("GCP_DATASTORE_BUCKET", "datastore-system-test")
class GcpDatastoreSystemTestHelper(LoggingCommandExecutor):
def create_bucket(self):
self.execute_cmd(
[
"gsutil",
"mb",
"-l",
"europe-north1",
"gs://{bucket}".format(bucket=BUCKET),
]
)
def delete_bucket(self):
self.execute_cmd(["gsutil", "rm", "-r", "gs://{bucket}".format(bucket=BUCKET)])
|
[
"jarek@potiuk.com"
] |
jarek@potiuk.com
|
044e73b6d010ce93303ef15f0b71818981402414
|
6a601530859c0ad634d38ec9790ca064ba6eae78
|
/app.py
|
6d9e1abb2d6a1436eda691d5e91b8597b5923788
|
[] |
no_license
|
gyn7561/minglufenci
|
32ab2eca0a23b9ef56d8e6ce56373f4bf9e253cf
|
8d3968ac78aa9848506f2f4969d4b52d9ad047a8
|
refs/heads/master
| 2020-12-05T06:42:23.310758
| 2020-01-06T06:19:05
| 2020-01-06T06:19:05
| 232,037,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 675
|
py
|
import pkuseg
import os
seg = pkuseg.pkuseg() # 以默认配置加载模型
def process(file):
filepath, tmpfilename = os.path.split(file)
shotname, extension = os.path.splitext(tmpfilename)
if os.path.exists(shotname + "-words" + extension):
print("跳过" + file)
return
print("开始处理" + file)
fp = open(file)
content = fp.read()
fp.close()
text = seg.cut(content)
word_set = set(text)
with open(shotname + "-words" + extension, 'w') as file_obj:
file_obj.write("\n".join(list(word_set)))
mainPath = "名录"
pathDir = os.listdir(mainPath)
for file in pathDir:
process(mainPath + "/" + file)
|
[
"200804632@qq.com"
] |
200804632@qq.com
|
e085c826a3b7a9943196aa9e0497816be78dfbd9
|
9c37059737001d768c817601fdc28a53c8f0290c
|
/test.py
|
d33d6ee612be098144493e86518a850d6e71f2b0
|
[
"MIT"
] |
permissive
|
ribner/tooshort
|
86cec421759ca5046e23a3e6638d16626ba9657f
|
ffa3eda83fa3756e24524a126378acc3f293fed5
|
refs/heads/master
| 2022-07-09T04:54:59.171933
| 2020-02-20T05:00:36
| 2020-02-20T05:00:36
| 240,808,043
| 0
| 0
|
MIT
| 2022-06-22T01:12:48
| 2020-02-16T00:28:30
|
Python
|
UTF-8
|
Python
| false
| false
| 12,249
|
py
|
import unittest
from sklearn.datasets import load_boston, load_iris, load_wine
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from sklearn.linear_model import LinearRegression, SGDRegressor, SGDClassifier, LogisticRegression
from sklearn.svm import SVC
from too_short import TooShort
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.datasets import make_classification
from collections import Counter
from imblearn.over_sampling import SMOTE
from imblearn.pipeline import Pipeline
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
def get_iris():
wine = load_wine()
X = pd.DataFrame(wine.data)
X.columns = wine.feature_names
y = pd.DataFrame(wine.target)
y.columns = ["target"]
return X, y["target"].ravel()
def get_boston():
boston = load_boston()
X = pd.DataFrame(boston.data)
X.columns = boston.feature_names
y = pd.DataFrame(boston.target)
y.columns = ["target"]
return X, y["target"].ravel()
def get_wine():
wine = load_wine()
X = pd.DataFrame(wine.data)
X.columns = wine.feature_names
y = pd.DataFrame(wine.target)
y.columns = ["target"]
return X, y["target"].ravel()
class TestFeatureSelection(unittest.TestCase):
def testBasicFeatureSelection(self):
X, y = get_iris()
too_short = TooShort(X, y, prediction_type="classification")
X_train, X_test = too_short.preproc(
standard_scale=['alcohol', 'malic_acid', 'ash', 'alcalinity_of_ash', 'magnesium',
'total_phenols', 'flavanoids', 'nonflavanoid_phenols',
'proanthocyanins', 'color_intensity', 'hue',
'od280/od315_of_diluted_wines', 'proline'])
X_train_filtered, X_test_filtered = too_short.select_features()
self.assertTrue(len(X_train.columns) > len(X_train_filtered.columns))
self.assertTrue(len(X_test.columns) > len(X_test_filtered.columns))
class TestEDA(unittest.TestCase):
def testBasicEDA(self):
return None
class TestOversampling(unittest.TestCase):
def testBasicOversamplingNoDfWithUndersample(self):
too_short = TooShort()
X, y = make_classification(n_samples=10000, n_features=2, n_redundant=0,
n_clusters_per_class=1, weights=[0.99], flip_y=0, random_state=1)
too_short.set_attributes(X_train=X, y_train=y)
os_X, os_y = too_short.oversample()
count = Counter(os_y)
self.assertTrue(count[0] == 200)
self.assertTrue(count[1] == 200)
def testBasicOversamplingNoDfNoUndersampling(self):
too_short = TooShort()
X, y = make_classification(n_samples=10000, n_features=2, n_redundant=0,
n_clusters_per_class=1, weights=[0.75], flip_y=0, random_state=1)
too_short.set_attributes(X_train=X, y_train=y)
os_X, os_y = too_short.oversample()
count = Counter(os_y)
self.assertTrue(count[0] == 7500)
self.assertTrue(count[1] == 7500)
# slow
# def testCreditDatasetEndToEnd(self):
# df = pd.read_excel(
# "https://archive.ics.uci.edu/ml/machine-learning-databases/00350/default%20of%20credit%20card%20clients.xls", encoding="utf-8", skiprows=1)
# df = df.rename(
# columns={'default payment next month': 'DEFAULT_PAYMENT_NEXT_MONTH', 'PAY_0': 'PAY_1'})
# y = df['DEFAULT_PAYMENT_NEXT_MONTH'].ravel()
# X = df.drop(['DEFAULT_PAYMENT_NEXT_MONTH'], axis=1)
# too_short = TooShort(X, y, prediction_type="classification")
# too_short.oversample()
# too_short.preproc(standard_scale=['LIMIT_BAL', 'SEX', 'EDUCATION', 'MARRIAGE', 'AGE', 'PAY_1', 'PAY_2',
# 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6', 'BILL_AMT1', 'BILL_AMT2',
# 'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6', 'PAY_AMT1',
# 'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6'])
# too_short.choose_models()
# result = too_short.search()
# print(result)
# slow
# def testCreditDatasetAlternateScoringEndToEnd(self):
# df = pd.read_excel(
# "https://archive.ics.uci.edu/ml/machine-learning-databases/00350/default%20of%20credit%20card%20clients.xls", encoding="utf-8", skiprows=1)
# df = df.rename(
# columns={'default payment next month': 'DEFAULT_PAYMENT_NEXT_MONTH', 'PAY_0': 'PAY_1'})
# y = df['DEFAULT_PAYMENT_NEXT_MONTH'].ravel()
# X = df.drop(['DEFAULT_PAYMENT_NEXT_MONTH'], axis=1)
# too_short = TooShort(X, y, prediction_type="classification")
# too_short.oversample()
# too_short.preproc(standard_scale=['LIMIT_BAL', 'SEX', 'EDUCATION', 'MARRIAGE', 'AGE', 'PAY_1', 'PAY_2',
# 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6', 'BILL_AMT1', 'BILL_AMT2',
# 'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6', 'PAY_AMT1',
# 'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6'])
# too_short.select_features()
# too_short.choose_models()
# result = too_short.search(scoring="recall")
# print(result)
class TestEndToEnd(unittest.TestCase):
def testCatSmallEndToEnd(self):
X, y = get_iris()
too_short = TooShort(X, y, prediction_type="classification")
result = too_short.preproc(
standard_scale=['alcohol', 'malic_acid', 'ash', 'alcalinity_of_ash', 'magnesium',
'total_phenols', 'flavanoids', 'nonflavanoid_phenols',
'proanthocyanins', 'color_intensity', 'hue',
'od280/od315_of_diluted_wines', 'proline'])
models = too_short.choose_models()
result = too_short.search()
model_keys = result.keys()
self.assertIn('SVC', model_keys)
def testRegressionSmallEndToEnd(self):
X, y = get_boston()
too_short = TooShort(X, y, prediction_type="regression")
result = too_short.preproc(
standard_scale=['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX',
'PTRATIO', 'B', 'LSTAT'])
models = too_short.choose_models()
result = too_short.search()
model_keys = result.keys()
self.assertIn('Ridge', model_keys)
class TestGridSearch(unittest.TestCase):
def test_basic_custom_grid_search(self):
X, y = get_iris()
too_short = TooShort(X, y)
result = too_short.preproc(
standard_scale=too_short.X_train.columns)
too_short.set_attributes(models=[KNeighborsClassifier])
result = too_short.search()
model_keys = result.keys()
self.assertEqual(len(model_keys), 1)
self.assertIn('KNeighborsClassifier', model_keys)
class TestChooseModels(unittest.TestCase):
def test_returns_regression_models_small_samples(self):
X, y = get_iris()
too_short = TooShort(X, y, prediction_type="regression")
result = too_short.choose_models()
self.assertIn(LinearRegression, result)
self.assertNotIn(SGDRegressor, result)
def test_returns_regression_models_many_samples(self):
too_short = TooShort(prediction_type="regression")
y = np.random.choice([0, 1, 2, 3, 4], 110000)
too_short.set_attributes(y_train=y)
result = too_short.choose_models()
self.assertIn(LinearRegression, result)
self.assertIn(SGDRegressor, result)
def test_returns_classification_models_small_samples(self):
X, y = get_iris()
too_short = TooShort(X, y, prediction_type="classification")
result = too_short.choose_models()
self.assertIn(SVC, result)
self.assertNotIn(SGDClassifier, result)
def test_returns_classification_models_many_samples(self):
too_short = TooShort(prediction_type="classification")
y = np.random.choice([0, 1, 2, 3, 4], 110000)
too_short.set_attributes(y_train=y)
result = too_short.choose_models()
self.assertIn(SVC, result)
self.assertIn(SGDClassifier, result)
class TestGetHyperParamGrids(unittest.TestCase):
def test_returns_linear_regression_params(self):
too_short = TooShort()
lr_params = {
'normalize': [True, False]
}
result = too_short.get_param_grid(LinearRegression)
self.assertEqual(result, lr_params)
class TestPreproc(unittest.TestCase):
def test_does_not_alter_original_df(self):
X, y = get_wine()
X['A_FAKE_CAT'] = np.random.randint(4, size=len(y))
X['B_FAKE_CAT'] = np.random.randint(4, size=len(y))
X['C_FAKE_CAT'] = np.random.choice(['SWEET', 'SOUR', 'TART'], len(y))
X['D_FAKE_LABEL_CAT'] = np.random.choice(
['BAD', 'OK', 'GOOD', 'GREAT'], len(y))
X_copy = X.copy()
too_short = TooShort(X, y)
too_short.preproc(OHE=np.array(
['A_FAKE_CAT', 'B_FAKE_CAT', 'C_FAKE_CAT']),
label_encode={
'D_FAKE_LABEL_CAT': ['BAD', 'OK', 'GOOD', 'GREAT']
},
standard_scale=['alcohol', 'malic_acid', 'ash', 'alcalinity_of_ash', 'magnesium',
'total_phenols', 'flavanoids', 'nonflavanoid_phenols',
'proanthocyanins', 'color_intensity', 'hue',
'od280/od315_of_diluted_wines', 'proline'])
assert_frame_equal(X, X_copy)
def test_create_ohe(self):
X, y = get_wine()
X['A_FAKE_CAT'] = np.random.randint(4, size=len(y))
X['B_FAKE_CAT'] = np.random.randint(4, size=len(y))
X['C_FAKE_CAT'] = np.random.choice(['SWEET', 'SOUR', 'TART'], len(y))
X['D_FAKE_LABEL_CAT'] = np.random.choice(
['BAD', 'OK', 'GOOD', 'GREAT'], len(y))
too_short = TooShort(X, y)
X_train, X_test = too_short.preproc(OHE=np.array(
['A_FAKE_CAT', 'B_FAKE_CAT', 'C_FAKE_CAT']),
label_encode={
'D_FAKE_LABEL_CAT': ['BAD', 'OK', 'GOOD', 'GREAT']
},
standard_scale=['alcohol', 'malic_acid', 'ash', 'alcalinity_of_ash', 'magnesium',
'total_phenols', 'flavanoids', 'nonflavanoid_phenols',
'proanthocyanins', 'color_intensity', 'hue',
'od280/od315_of_diluted_wines', 'proline'])
result_df = X_train
self.assertCountEqual(result_df.columns[0:11], ['A_FAKE_CAT_0', 'A_FAKE_CAT_1', 'A_FAKE_CAT_2', 'A_FAKE_CAT_3',
'B_FAKE_CAT_0', 'B_FAKE_CAT_1', 'B_FAKE_CAT_2', 'B_FAKE_CAT_3',
'C_FAKE_CAT_SOUR', 'C_FAKE_CAT_SWEET', 'C_FAKE_CAT_TART'])
self.assertFalse('A_FAKE_CAT' in result_df.columns)
self.assertIn(result_df['A_FAKE_CAT_0'][0], [0.0, 1.0])
def test_standard_scaled(self):
X, y = get_wine()
X['A_FAKE_CAT'] = np.random.randint(4, size=len(y))
X['B_FAKE_CAT'] = np.random.randint(4, size=len(y))
X['C_FAKE_CAT'] = np.random.choice(['SWEET', 'SOUR', 'TART'], len(y))
X['D_FAKE_LABEL_CAT'] = np.random.choice(
['BAD', 'OK', 'GOOD', 'GREAT'], len(y))
too_short = TooShort(X, y)
result = too_short.preproc(
standard_scale=['alcohol', 'malic_acid', 'ash', 'alcalinity_of_ash', 'magnesium',
'total_phenols', 'flavanoids', 'nonflavanoid_phenols',
'proanthocyanins', 'color_intensity', 'hue',
'od280/od315_of_diluted_wines', 'proline'])
result_df = result[0]
self.assertAlmostEqual(
result_df['alcohol'].mean(), result_df['malic_acid'].mean())
if __name__ == '__main__':
unittest.main()
|
[
"elliottribner@Elliotts-MacBook-Pro.local"
] |
elliottribner@Elliotts-MacBook-Pro.local
|
8f875ea94e41164e8fde720f8fa32662def4ae83
|
b184d2e5bd950da935d5585fd4b79a9f51b8130b
|
/src/cppyythonizations/tuple/__init__.py
|
974d005bd5104f2f9678e6fd17fb58d11196e085
|
[
"MIT"
] |
permissive
|
flatsurf/cppyythonizations
|
7bc2b5bce97923aae6f57c1e9d8d66c264585115
|
7169d7d0b11160edfaee782bf9c5c60ee32e4da2
|
refs/heads/master
| 2022-09-27T02:18:42.590304
| 2022-09-13T12:49:52
| 2022-09-13T12:49:52
| 214,221,798
| 3
| 0
|
MIT
| 2023-09-13T21:18:45
| 2019-10-10T15:39:21
|
Python
|
UTF-8
|
Python
| false
| false
| 5,088
|
py
|
r"""
Modifies some aspects std::tuple such as printing and indexing.
EXAMPLES::
>>> import cppyy
>>> from cppyythonizations.tuple import add_tuple_pythonizations
>>> add_tuple_pythonizations()
>>> t = cppyy.gbl.std.tuple[int, str, float](13, "x", 3.7)
>>> str(t)
"(13, b'x', 3.7...)"
Note that this only changes `__str__`, if you also want tuples to print as
Python tuples in a Python prompt, you need to `enable_pretty_printing` from
`cppyythonizations.printing`.
>>> t
<cppyy.gbl.std.tuple<int,std::string,float> object at ...>
>>> repr(t)
'<cppyy.gbl.std.tuple<int,std::string,float> object at ...>'
"""
# ********************************************************************
# This file is part of cppyythonizations.
#
# Copyright (C) 2020 Julian Rüth
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ********************************************************************
import re
import cppyy
from ..util import filtered
def enable_tuple_printing(proxy, name):
r"""
Print proxy objects as Python tuples.
EXAMPLES::
>>> import cppyy
>>> from cppyythonizations.tuple import enable_tuple_printing
>>> from cppyythonizations.printing import enable_pretty_printing
>>> from cppyythonizations.util import filtered
>>> cppyy.py.add_pythonization(filtered("tuple<int, float>")(enable_tuple_printing), "std")
>>> cppyy.py.add_pythonization(filtered("tuple<int, float>")(enable_tuple_printing), "std")
>>> cppyy.gbl.std.tuple[int, float](1, 2)
(1, 2.0)
"""
proxy.__str__ = lambda self: str(tuple(self))
def enable_tuple_indexing(proxy, name):
r"""
Allowing indexing into tuples with the [] operator.
Actually, tuples come with an implementation of ``__getitem__`` out of the
box in cppyy. However, this implementation vanishes once we add a
Pythonization, see
https://bitbucket.org/wlav/cppyy/issues/272/pythonization-on-tuple-erases-__getitem__.
EXAMPLES::
>>> import cppyy
>>> from cppyythonizations.tuple import enable_tuple_indexing
>>> from cppyythonizations.util import filtered
>>> cppyy.py.add_pythonization(filtered("tuple<string, string>")(enable_tuple_indexing), "std")
>>> t = cppyy.gbl.std.tuple[str, str]("a", "b")
>>> t[0]
b'a'
>>> t[1]
b'b'
>>> t[2]
Traceback (most recent call last):
...
IndexError: tuple index out of range
>>> list(t)
[b'a', b'b']
>>> len(t)
2
>>> t[::2]
(b'a',)
"""
def getitem(self, key):
size = len(self)
def get(index):
if index >= 0:
if index >= size:
raise IndexError("tuple index out of range")
return cppyy.gbl.std.get[index](self)
else:
if -index > size:
raise IndexError("tuple index out of range")
return get(size - index)
if isinstance(key, slice):
return tuple(get(i) for i in list(range(size))[key])
else:
return get(int(key))
proxy.__getitem__ = getitem
proxy.__len__ = lambda self: cppyy.gbl.std.tuple_size[proxy].value
def add_tuple_pythonizations():
r"""
Enable printing of `std::tuple<>` as a Python tuple, and Python tuple indexing.
EXAMPLES::
>>> import re
>>> import cppyy
>>> from cppyythonizations.tuple import add_tuple_pythonizations
>>> from cppyythonizations.printing import enable_pretty_printing
>>> add_tuple_pythonizations()
>>> cppyy.py.add_pythonization(filtered(re.compile("tuple<.*>"))(enable_pretty_printing), "std")
>>> cppyy.gbl.std.tuple[int, str](1, "x")
(1, b'x')
>>> _[1]
b'x'
"""
cppyy.py.add_pythonization(filtered(re.compile("tuple<.*>"))(enable_tuple_printing), "std")
cppyy.py.add_pythonization(filtered(re.compile("tuple<.*>"))(enable_tuple_indexing), "std")
|
[
"julian.rueth@fsfe.org"
] |
julian.rueth@fsfe.org
|
70c49836b4d9fcdea82858e5b8d8e5f943de97a7
|
06420a51bd0498aaa29039a71de714ee661891bb
|
/base64.py
|
c6a9c5a3d7d67484b87a200ed68251a54e1e52c7
|
[] |
no_license
|
Security-Development/base64
|
aeacea34a2ce4bf8ae90b168ac4444c8a680dfd3
|
2f4518ee1e66e2d03cba8e24ecfd6b63c80cf85b
|
refs/heads/main
| 2023-08-21T13:12:09.779568
| 2021-10-12T04:22:53
| 2021-10-12T04:22:53
| 416,151,560
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,378
|
py
|
base64_box = {}
index = 0
args = {
"A": 26,
"a": 26,
"0": 10,
"+": 1,
"/": 1
}
def CreateBoxTool(string, length):
global index
ascii_string = None
for i in range(length):
ascii_string = ord(string) + i
base64_box[index] = chr(ascii_string)
index += 1
#print(chr(ascii_string)) <= 확인 출력
for k, v in args.items():
CreateBoxTool(k ,v)
print(base64_box)
def setUp(binary_string):
v,k = 0, 6
len_binary = len(binary_string)
for i in range(len_binary):
if len_binary < v:
break
bin_str = str(binary_string[v:k])
if len(bin_str) != 6:
bin_str = bin_str.ljust(6, "0")
if bin_str.count("0") == 6:
bin_str = ""
else:
print(base64_box[int("0b"+bin_str, 2)], end="")
v,k = v+6, k+6
if len_binary % 3 == 1:
print("=")
elif len_binary % 3 == 2:
print("==")
binary = ""
def CreateBase64(string):
global binary
ascii_string = None
for i in range(len(string)):
ascii_string = ord(string[i])
binary += "{0:b}".format(ascii_string).zfill(8)
print(binary)
#print(string[i] + " => " + "{0:b}".format(binary1))
setUp(binary)
CreateBase64("hello")
|
[
"noreply@github.com"
] |
Security-Development.noreply@github.com
|
00b736a0d0d95265fdf3d417dbe4f5e683bf9f59
|
74d840ba895342c04c2a3b4946918a81ff7d55ed
|
/noge/preprocessors/__init__.py
|
e334e2d61826747685613a8c7647bac4c785336c
|
[
"MIT"
] |
permissive
|
johny-c/noge
|
cbad8a8b08ec350021779e2e3319c3af69e8f5a9
|
88e68ba8c51ff0d63577991e233e9110cb76e228
|
refs/heads/main
| 2023-03-31T05:16:26.101441
| 2021-04-09T21:59:08
| 2021-04-09T21:59:08
| 333,765,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 137
|
py
|
from .preprocessor import Preprocessor
from .input_meas import InputMeasPreprocessor
from .target_preprocessor import TargetPreprocessor
|
[
"johnyc.code@gmail.com"
] |
johnyc.code@gmail.com
|
2e9abafbd5759bad9bf4631dfd11153c706d0708
|
ac43ba65a0d2206f776c73c798aa4e2f0fa1579c
|
/apps/cn_a_stocks/migrations/0029_auto_20191216_1001.py
|
ee2aa11a5dca7f472fb17d3907c6c4c59e06ab44
|
[] |
no_license
|
xyq946692052/globalinvestor
|
d2b1616006f5bf9a4c268389f2d0a6bcc5b87bed
|
180637300649d9b0a227f67794242a427296bc1f
|
refs/heads/master
| 2022-12-12T18:44:16.969005
| 2020-03-19T09:21:29
| 2020-03-19T09:21:29
| 225,184,905
| 0
| 0
| null | 2022-12-08T03:15:28
| 2019-12-01T15:37:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,023
|
py
|
# Generated by Django 2.0 on 2019-12-16 02:01
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cn_a_stocks', '0028_merge_20191212_2017'),
]
operations = [
migrations.AlterModelOptions(
name='astocksprofit',
options={'ordering': ('-stat_date',)},
),
migrations.AlterField(
model_name='astocksclseprice',
name='exchange_date',
field=models.DateField(db_index=True, null=True),
),
migrations.AlterField(
model_name='astocksheader',
name='ipodate',
field=models.DateField(blank=True, default=datetime.datetime(2019, 12, 16, 10, 1, 3, 809242), null=True),
),
migrations.AlterField(
model_name='astocksheader',
name='outdate',
field=models.DateField(blank=True, default=datetime.datetime(2019, 12, 16, 10, 1, 3, 809242), null=True),
),
]
|
[
"kevin.xie@r-pac.com.cn"
] |
kevin.xie@r-pac.com.cn
|
35b0a6b8f508a407f7a4bbc749b088b3e5fac588
|
120d1252ba5a38018a14b22a494cdc22859850e2
|
/Face Detection/Face Detection Loop.py
|
14184ccb24cc9bc8b2dfd82ea5094dca2a0338ce
|
[] |
no_license
|
KishoreR11/computer-vision
|
df8160b73f9a9bba94780ed89c510ae5102272bb
|
556c224245b8015498345eca46e3ec27217572ed
|
refs/heads/master
| 2023-08-31T13:49:10.286984
| 2021-10-21T13:05:41
| 2021-10-21T13:05:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,173
|
py
|
import numpy as np
import cv2 as cv
import os
import pathlib as Path
haar_cascade = cv.CascadeClassifier(r'D:\computer-vision\Face Detection\haar_face.xml')
people = ['Ben Afflek', 'Elton John', 'Jerry Seinfield', 'Madonna', 'Mindy Kaling']
# features = np.load('features.npy',allow_pickle=True)
# labels = np.load('labels.npy')
fac_recognizer = cv.face.LBPHFaceRecognizer_create()
fac_recognizer.read(r'D:\computer-vision\Face Detection\face_trained.yml')
# p = (r'D:\computer-vision\Photos\Faces\val\mindy_kaling')
# p = (r'D:\computer-vision\Photos\Faces\val\elton_john')
p = (r'D:\computer-vision\Photos\Faces\val\madonna')
# p = (r'D:\computer-vision\Photos\Faces\val\ben_afflek')
for filepath in os.listdir(p):
img = cv.imread(os.path.join(p, filepath))
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
cv.imshow('Person',gray)
fac_recog = haar_cascade.detectMultiScale(gray,scaleFactor=1.1, minNeighbors=4)
for (x,y,w,h) in fac_recog:
face_roi = gray[y:y+h,x:x+w]
labels, confidence = fac_recognizer.predict(face_roi)
print(f'Labels = {people[labels]} with a confidence = {int(confidence)} %')
cv.waitKey(3)
|
[
"souvikdatta123@gmail.com"
] |
souvikdatta123@gmail.com
|
be6cb17a337f83c070f47e8192c983039f1a71c7
|
8b634dc196162dff328d61bf6f8d4121dfb59bd4
|
/Binary search/peakIndexInMountainArray.py
|
ec459692519bf962f4bb15b684b610fb7afb17dc
|
[] |
no_license
|
kqg13/LeetCode
|
84268b2146dc8323cb71f041b6664069baaa339c
|
1c584f4ca4cda7a3fb3148801a1ff4c73befed24
|
refs/heads/master
| 2023-08-05T09:46:28.103910
| 2023-07-29T21:02:26
| 2023-07-29T21:02:26
| 165,123,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,579
|
py
|
# Easy binary search problem 852: Peak Index in a Mountain Array
# Let's call an array A a mountain if the following properties hold:
# A.length >= 3
# There exists some 0 < i < A.length - 1 such that
# A[0] < A[1] < ... A[i-1] < A[i] > A[i+1] > ... > A[A.length - 1]
# Given an array that is definitely a mountain, return any i such that
# A[0] < A[1] < ... A[i-1] < A[i] > A[i+1] > ... > A[A.length - 1].
# Example:
# Input: [0,2,1,0] Output: 1
class Solution:
# Time: O(N)
def peakIndexInMountainArray(self, A):
"""
:type A: List[int]
:rtype: int
"""
for i in range(len(A)):
if A[i] > A[i + 1]:
return i
# Time: O(logN)
def peakIndexInMountainArrayBinary(self, A):
"""
:type A: List[int]
:rtype: int
"""
low, high = 0, len(A) - 1
while low < high:
mid = (low + high) // 2
if A[mid] < A[mid + 1]:
low = mid + 1
else:
high = mid
return low
def peakIndexInMountainArrayBin(self, A):
peak = self.peakIndexInMountainArrayBinaryHelper(A, 0, len(A) - 1)
return peak
def peakIndexInMountainArrayBinaryHelper(self, A, low, high):
mid = (low + high) // 2
if low < high:
if A[mid] < A[mid - 1]:
return self.peakIndexInMountainArrayBinaryHelper(A, low, mid-1)
elif A[mid] < A[mid + 1]:
return self.peakIndexInMountainArrayBinaryHelper(A, mid + 1, high)
return mid
|
[
"skg2016@nyu.edu"
] |
skg2016@nyu.edu
|
24685a49422fe576d8f35ff2f9f4aa17c81dfa6d
|
4a4d27b3223eddbca904da0eb393204a2c76545e
|
/ex_10/ex_10_03.py
|
de034a1cd043a8a039c4acac7d15b8096fe3d2ef
|
[] |
no_license
|
bassmannate/py4e-old
|
24850bc1ea28a221ee02f1870222b2d67b00010d
|
6d579efcda70880447b665216a4971efdad109af
|
refs/heads/master
| 2023-08-26T02:44:42.121530
| 2021-10-30T20:18:54
| 2021-10-30T20:18:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
while True:
file = input("Please enter a file name or \"quit\" to quit: ")
if len(file) < 1:
fhand = open("../words.txt")
break
elif file.lower() == "quit":
exit()
else:
try:
fhand = open(file)
break
except:
print("You must enter a valid file name.")
continue
lettercount = dict()
lst = list()
for line in fhand:
words = line.rstrip().lower().split()
for word in words:
for letter in word:
if letter.isalpha():
lettercount[letter] = lettercount.get(letter, 0) + 1
for k, v in list(lettercount.items()):
lst.append((v, k))
lst.sort(reverse = True)
for v, k in lst:
print(k,"-", v)
|
[
"bassmannate@users.noreply.github.com"
] |
bassmannate@users.noreply.github.com
|
58d83cc7a2d421637fe7d8467c0e2f072f6125bb
|
4e4885120cd7782ff14eee50e871e8c3c1d6978c
|
/app/migrations/0001_initial.py
|
52848604f070944e39c9df996d94343f6a60e01c
|
[] |
no_license
|
mkdirken/XMining
|
62dd3d0b8f137596f7a18844118a6d06776ccd2f
|
b620ae36a7e84ea785cd0e5bf9cc2a6b1ecb6a22
|
refs/heads/master
| 2021-04-15T16:18:11.199133
| 2018-03-22T12:12:33
| 2018-03-22T12:12:33
| 126,329,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,025
|
py
|
# Generated by Django 2.0.3 on 2018-03-21 13:54
import app.models
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('avatar', models.ImageField(default='users/user.png', upload_to='users', verbose_name='Profil Fotoğrafı')),
('hesap', models.FloatField(default=0, verbose_name='Hesap')),
('tel', models.CharField(default='(000) 000 00 00', max_length=20, verbose_name='Cep Telefonu')),
('tc_no', models.CharField(default='00000000000', max_length=11, verbose_name='T.C Kimlik Numarası')),
('bankName', models.CharField(blank=True, default='', max_length=50, verbose_name='Banka Adı')),
('iban', models.CharField(blank=True, default='', max_length=40, verbose_name='İban No')),
('code', models.CharField(blank=True, default=app.models.random_olustur, max_length=5, verbose_name='Code')),
('code_active_date', models.DateTimeField(blank=True, default=django.utils.timezone.now, verbose_name='Kodun Geçerlilik Süresi')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'abstract': False,
'verbose_name_plural': 'users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Bank',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='Girilmemiş', max_length=100, verbose_name='İşlem')),
('date', models.DateField(default=django.utils.timezone.now, verbose_name='İşlem Tarihi')),
('pay', models.FloatField(default=0, verbose_name='Ödeme')),
('islem', models.CharField(choices=[('GİRDİ', 'GİRDİ'), ('ÇIKTI', 'ÇIKTI')], default='GİRDİ', max_length=20, verbose_name='İşlem Türü')),
('user', models.ForeignKey(on_delete=False, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Kasa',
'verbose_name_plural': 'Kasa',
},
),
migrations.CreateModel(
name='Investment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pay', models.IntegerField(default=0, verbose_name='Yatırım Tutarı')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='Yatırım Zamanı')),
('status', models.BooleanField(default=False, verbose_name='Hesaba Aktarma')),
('user', models.ForeignKey(on_delete=False, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='machine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='machine', verbose_name='Görsel')),
('model', models.CharField(max_length=50, verbose_name='Model')),
('properties', models.TextField(verbose_name='Özellikler')),
('fiyat', models.FloatField(verbose_name='Fiyat')),
('miner_power', models.FloatField(verbose_name='Kazım Gücü')),
('miner_power_rate', models.CharField(choices=[('TH', 'TH/s'), ('GH', 'GH/s'), ('MH', 'MH/s')], max_length=10, verbose_name='Kazım Güç Türü')),
('warranty', models.CharField(choices=[('3 AY', '3 AY'), ('6 AY', '6 AY'), ('9 AY', '9 AY'), ('12 AY', '12 AY'), ('18 AY', '18 AY'), ('24 AY', '24 AY')], max_length=25, verbose_name='Garanti Süresi')),
('lifetime', models.IntegerField(choices=[(1, '1 YIL'), (2, '2 YIL')], verbose_name='Kullanım Ömrü')),
],
options={
'verbose_name': 'Yeni Makine Oluştur',
'verbose_name_plural': 'Yeni Makine Oluştur',
},
),
migrations.CreateModel(
name='news',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='Başlık')),
('post', models.TextField(verbose_name='Kısa Yazı')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='Zaman')),
],
options={
'verbose_name': 'Duyuru Oluştur',
'verbose_name_plural': 'Duyuru Oluştur',
},
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fullname', models.CharField(blank=True, max_length=120, verbose_name='İsim')),
('bankname', models.CharField(blank=True, max_length=60, verbose_name='Banka Adı')),
('iban', models.CharField(blank=True, max_length=40, verbose_name='İban No')),
('amount', models.IntegerField(default=0, verbose_name='Tutar')),
('cellphone', models.CharField(blank=True, max_length=20, verbose_name='Telefon Numarası')),
('date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Bildirim Gönderim Zamanı')),
('status', models.BooleanField(default=False, verbose_name='Ödeme Durumu')),
('user', models.ForeignKey(on_delete=False, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Gelen Ödeme Bildirimleri',
'verbose_name_plural': 'Gelen Ödeme Bildirimleri',
},
),
migrations.CreateModel(
name='RequestPayment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fullname', models.CharField(blank=True, max_length=120, verbose_name='İsim')),
('bankName', models.CharField(blank=True, max_length=60, verbose_name='Banka Adı')),
('iban', models.CharField(blank=True, max_length=40, verbose_name='İban No')),
('amount', models.IntegerField(default=0, verbose_name='Tutar')),
('date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Talep Tarihi')),
('status', models.BooleanField(default=False, verbose_name='Ödeme Durumu')),
('user', models.ForeignKey(on_delete=False, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'İstenen Ödeme Talebi',
'verbose_name_plural': 'İstenen Ödeme Talebi',
},
),
migrations.CreateModel(
name='TheMachineGain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gain', models.FloatField(verbose_name='Kazanç')),
('date', models.DateField(verbose_name='Zaman')),
('machine', models.ForeignKey(on_delete=False, to='app.machine', verbose_name='Makina Modeli')),
],
options={
'verbose_name': 'Makinelerın Günlük Kazancı',
'verbose_name_plural': 'Makinelerın Günlük Kazancı',
},
),
migrations.CreateModel(
name='user_machine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(verbose_name='Makine Alım Zamanı')),
('machine_dead', models.DateTimeField(verbose_name='Makine Ölüm Zamanı')),
('miner_power', models.FloatField(verbose_name='Kazım Gücü')),
('miner_power_rate', models.CharField(choices=[('TH', 'TH/s'), ('GH', 'GH/s'), ('MH', 'MH/s')], max_length=10, verbose_name='Kazım Güç Türü')),
('fiyat', models.FloatField(verbose_name='Fiyat')),
('active', models.BooleanField(default=0, verbose_name='Cihaz Aktifliği')),
('machine', models.ForeignKey(on_delete=False, to='app.machine')),
('user', models.ForeignKey(on_delete=False, related_name='usermachine', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='user_machine_log',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('machine_id', models.IntegerField(verbose_name='Makine ID')),
('date', models.DateTimeField()),
('machine_dead', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Makina Ölüm Zamanı')),
('pay', models.FloatField(default=0, verbose_name='Kazanç')),
('payment', models.BooleanField(default=False, verbose_name='Ödeme Yapıldımı')),
('user', models.ForeignKey(on_delete=False, to=settings.AUTH_USER_MODEL)),
('user_machine', models.ForeignKey(on_delete=False, to='app.user_machine')),
],
),
]
|
[
"mkdirken@gmail.com"
] |
mkdirken@gmail.com
|
180511e3cd7732736763b7ea99f296aa0cb45727
|
fea15349ea09985eccd3ed630691efa9456445e1
|
/presstatic/storage/s3.py
|
b3393015b2cdb755d053406a4ef2178c15707a76
|
[
"MIT"
] |
permissive
|
King-Maverick007/presstatic
|
f8c5dd383bf093a394e45001bd028c8bfde75725
|
e912c5b5d6d759c15c1b0a11cf33cfc3f7163f4a
|
refs/heads/master
| 2022-12-25T19:44:24.812894
| 2014-12-02T14:25:50
| 2014-12-02T14:25:50
| 300,202,505
| 0
| 0
|
MIT
| 2020-10-01T08:22:56
| 2020-10-01T08:21:14
| null |
UTF-8
|
Python
| false
| false
| 906
|
py
|
# -*- coding: utf-8 -*-
import os
from boto.s3.key import Key
from boto.s3.connection import S3Connection
from presstatic.storage import Storage, FileStorageIntent
class S3FileStorageIntent(FileStorageIntent):
def __init__(self, from_path, to_path, bucket):
super(S3FileStorageIntent, self).__init__(from_path, to_path)
self.bucket = bucket
def store(self):
k = Key(self.bucket)
k.key = self.to_path
k.set_contents_from_filename(self.from_path)
class S3Storage(Storage):
def __init__(self, bucket_name):
self.connection = S3Connection(os.environ.get('AWS_ACCESS_KEY_ID'),
os.environ.get('AWS_SECRET_ACCESS_KEY'))
self.bucket = self.connection.create_bucket(bucket_name)
def storage_intent(self, from_path, to_path):
return S3FileStorageIntent(from_path, to_path, self.bucket)
|
[
"filiperegadas@gmail.com"
] |
filiperegadas@gmail.com
|
4493917acfef8710f2a543f938e4d8828945823b
|
d0c521db0302002723b0fa03f55239e5b7d1a0b4
|
/single_overlap_test.py
|
db6a3266d6f5cc4dc73d55e2169f22c51e4fbda6
|
[
"MIT"
] |
permissive
|
caslab-vt/DeepPaSTL
|
4e028fb42ec1867de44512a788098966d526af3c
|
a928a0fc1f1bbe5a27f7bc1e7d1e320c023d13c6
|
refs/heads/main
| 2023-08-02T06:40:22.318280
| 2021-10-05T10:44:37
| 2021-10-05T10:44:37
| 413,774,872
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,922
|
py
|
import torch
import torch.nn as nn
import warnings
import numpy as np
import matplotlib
import pandas as pd
import scipy.io
warnings.filterwarnings('ignore')
matplotlib.rcParams['figure.figsize'] = (12.0, 12.0)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
from config_args import parse_args
from data_utils.crop_utils import prep_overlap, predict_tiles, undo_overlap, predict_batch_tiles
from data_utils.data_postprocess import plot_surface, scatter_plot, plot_contour
from trainer_utils.trainer import TorchTrainer
from networks.encoderdecoder3d import EncoderDecoderWrapper3d
torch.manual_seed(420)
np.random.seed(420)
def main():
print("Starting")
# Parse arguments and load data
args = parse_args()
df = pd.read_csv(args.data_folder + 'processed_lidar_data.csv')
feature_list = ['h_in']
c = 1
t = 1
h = args.window_size
w = args.window_size
x_features = (c, t, h, w)
model = EncoderDecoderWrapper3d(args, None, None, feature_list, x_features)
print(f'GPUs used: {torch.cuda.device_count()}')
model = nn.DataParallel(model) # , device_ids=[0], output_device=[0])
model.to(args.device)
loss_fn = torch.nn.MSELoss()
model_optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=1e-2)
optimizers = [model_optimizer]
schedulers = []
trainer = TorchTrainer(
args.exp_name,
model,
optimizers,
loss_fn,
schedulers,
args.device,
scheduler_batch_step=True,
pass_y=False,
args=args
)
# print(repr(model))
trainer._load_checkpoint(only_model=True, epoch=args.epoch_load)
predict_single(df, args, trainer, split=True, plot=True)
def predict_single(df, args, trainer, split=True, plot=False):
# print(df.head())
height_list = ["h" + str(i + 1) for i in range(args.num_features)]
# In: batch, seq, dim, dim
scale_map_test = {}
scaled_data_test = pd.DataFrame()
scaled_data_test = pd.concat([scaled_data_test, df], ignore_index=True)
for h in height_list:
scaled_data_test[h] = (scaled_data_test[h] - df[h].min()) / (df[h].max() - df[h].min())
scale_map_test[h] = {'min_test': df[h].min(), 'max_test': df[h].max()}
h_aggr_list = np.array([np.array(scaled_data_test[h]) for h in height_list])
h_aggr_list = np.swapaxes(h_aggr_list, 1, 0)
h_aggr_list = np.reshape(h_aggr_list, (-1, args.xdim, args.ydim))
h_aggr_list = h_aggr_list[np.newaxis]
print(f"Shape of the given data: {h_aggr_list.shape}")
h_out = h_aggr_list
seq_len = h_aggr_list.shape[1]
h_aggr_list = prep_overlap((args, h_aggr_list))
print(f"Total Len of overlap: {len(h_aggr_list)} and shape: {h_aggr_list[0].shape}")
if split:
print("Os it coming?")
seq_len = int(seq_len/2)
print(f'Splitting across time: {seq_len}')
h_in = [h[:, :seq_len] for h in h_aggr_list]
h_out = h_out[:, seq_len:]
else:
h_in = h_aggr_list
print(f"Shape of the input: {h_in[0].shape}, Output: {h_out.shape}")
"""
Defining the Model
"""
# x = ([torch.randn(size=(10, 5, 32, 32))], [])
#
# vis_graph = make_dot(model(x), params=dict(model.named_parameters()))
# vis_graph.render("attached", format="png")
#
# return
"""
Running Predictions
"""
# h_in = ([torch.as_tensor(h_in, dtype=torch.float32)], [])
# h_out = [torch.as_tensor(h_out, dtype=torch.float32)]
print("Starting tile prediction")
h_pred = predict_batch_tiles(h_in, h_out, args, trainer)
h_pred_mean, h_pred_std = h_pred
print("Startin Overlap Undo")
print(f"Undo Overlap: {len(h_pred_mean)}, {h_pred_mean[0].shape}")
h_pred_mean = undo_overlap((args, h_pred_mean))
print(f"Undo Overlap: {len(h_pred_std)}, {h_pred_std[0].shape}")
h_pred_std = undo_overlap((args, h_pred_std))
h_target = h_out[0]
h_error = h_target - h_pred_mean
print(f'Mean: {h_pred_mean.shape}, Std: {h_pred_std.shape}, Target: {h_target.shape}')
# Scaling
min_test_scale = []
max_test_scale = []
for i in range(args.xdim * args.ydim):
min_test_scale.append(scale_map_test['h' + str(i + 1)]['min_test'])
max_test_scale.append(scale_map_test['h' + str(i + 1)]['max_test'])
min_test_scale = np.asarray(min_test_scale).reshape((args.xdim, args.ydim))
max_test_scale = np.asarray(max_test_scale).reshape((args.xdim, args.ydim))
h_pred_mean = np.multiply(h_pred_mean, max_test_scale - min_test_scale) + min_test_scale
h_pred_std = np.multiply(h_pred_std, max_test_scale - min_test_scale)
if plot:
h_error = np.multiply(h_error, max_test_scale - min_test_scale)
h_target = np.multiply(np.expand_dims(h_target, 0), max_test_scale - min_test_scale) + min_test_scale
for i in range(seq_len):
predict_mean = h_pred_mean[0][i]
predict_std = h_pred_std[0][i]
predict_err = h_error[0][i]
target_values = h_target[0][i]
plot_contour(args, predict_mean, title=f"3D Mean: Time: {i}")
plot_contour(args, predict_std, title=f"3D Std: Time: {i}")
plot_contour(args, predict_err, title=f"3D Error: Time: {i}")
plot_contour(args, target_values, title=f'3D Target: Time: {i}')
scatter_plot(args, h_error, h_pred_std, title="Error vs Std. Deviation")
y_mdic = {'y_predict_mean': h_pred_mean[0], 'y_predict_std': h_pred_std[0], 'y_predict_err': h_error[0],
'y_target': h_target[0]}
scipy.io.savemat(
args.data_folder + args.predict_folder + args.model + '_predict_data_' + args.predict_run + '_' + args.exp_name + '.mat', mdict=y_mdic, oned_as='row')
else:
return h_pred_mean, h_pred_std
if __name__ == '__main__':
main()
|
[
"murtazar@vt.edu"
] |
murtazar@vt.edu
|
28a7a95aef0ce520b596a4e1255e2750f99f75fe
|
8fc3f131bbc4e9fed9b3e3d0797505b9d09c7642
|
/login_app/views.py
|
4014ad835d116475048c170404c93f603bfd582e
|
[] |
no_license
|
reidyanabu/login_proj
|
09f10df3170d9343a6fa8888be7ab7364817278b
|
a6773aa33a372a70c74a26abb528e6966e27e94d
|
refs/heads/master
| 2023-04-08T10:17:02.415374
| 2021-04-19T15:35:17
| 2021-04-19T15:35:17
| 359,511,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,271
|
py
|
from django.shortcuts import render, redirect
from .models import User, Message, Comment, Book
from datetime import datetime
from django.contrib import messages
from django.http import JsonResponse
import bcrypt
def show_login(request):
# remove previous session .. this is my prerogative :)
if 'user_first_name' in request.session:
del request.session['user_first_name']
return render(request, "login.html")
def register_user(request):
# remove previous session .. this is my prerogative :)
if 'user_first_name' in request.session:
del request.session['user_first_name']
post_data = request.POST
# validate users input
errors = User.objects.create_user_data_validator(post_data)
if len(errors) > 0:
# if the errors dictionary contains anything, loop through each key-value pair and make a flash message
for key, value in errors.items():
messages.error(request, value)
return redirect('/')
else:
# register the new user
first_name_in = post_data['first_name']
last_name_in = post_data['last_name']
email_in = post_data['email']
birthday_in = datetime.strptime(post_data['birthday'], "%Y-%m-%d")
password_in = post_data['password']
pw_hash = bcrypt.hashpw(password_in.encode(), bcrypt.gensalt()).decode()
# we use bcryot to generate a salt, and use it to bcrypt hash the password, which is stored in the db
user = User.objects.create(first_name=first_name_in,last_name=last_name_in,email=email_in,birthday=birthday_in,password=pw_hash)
# create a user session and place user in it
request.session['user_first_name'] = user.first_name
request.session['user_id'] = user.id
return redirect("/success")
def process_login(request):
# remove previous session .. this is my prerogative :)
if 'user_first_name' in request.session:
del request.session['user_first_name']
post_data = request.POST
errors = User.objects.user_login_validator(post_data)
if len(errors) > 0:
# if the errors dictionary contains anything, loop through each key-value pair and make a flash message
for key, value in errors.items():
messages.error(request, value)
# we had errors .. set values BACK to context so we can re-populate the page
#context = {
# "login_email": post_data['login_email'],
# "login_password": post_data['login_password']
#}
return redirect('/')
else:
# no validation errors, proceed to get the user
email_in = post_data['login_email']
password_in = post_data['login_password']
try:
user = User.objects.get(email=email_in)
if bcrypt.checkpw(password_in.encode(), user.password.encode()):
# create a user session and place user in it
request.session['user_first_name'] = user.first_name
request.session['user_id'] = user.id
return redirect("/success")
else:
# passwords did not match!
errors['login_password'] = "Incorrect password entered"
# if the errors dictionary contains anything, loop through each key-value pair and make a flash message
for key, value in errors.items():
messages.error(request, value)
return redirect('/')
except Exception as e:
print(f"exception logging in user: {e}")
errors['General'] = f"Error logging in user: {e}"
# if the errors dictionary contains anything, loop through each key-value pair and make a flash message
for key, value in errors.items():
messages.error(request, value)
return redirect('/')
def show_success(request):
# get the user from the session and pass to view
if 'user_first_name' in request.session:
return render(request, "success.html")
else:
# not logged in
return redirect("/")
def logout(request):
if 'user_id' in request.session:
del request.session['user_first_name']
del request.session['user_id']
return redirect("/")
# AJAX call which takes an email string and compares it to values in the database and returns a JsonResponse object
def email_exists(request):
email = request.GET['email']
email_exists = User.objects.filter(email=email).exists()
data = {
'email_exists': email_exists
}
return JsonResponse(data)
def wall(request):
if 'user_id' not in request.session or 'user_first_name' not in request.session:
return redirect("/")
# get user
#user_id = request.session['user_id']
# get messages
messages = Message.objects.all()
context = {
"messages": messages
}
return render(request, "wall.html", context)
def post_message(request):
# get user
user_id = int(request.session['user_id'])
user = User.objects.get(id=user_id)
message_txt = request.POST['message']
# create message for given user with entered text .. VALIDATE TEXT ?????????
message = Message.objects.create(message=message_txt,user=user)
print(f"created message with id = {message.id}")
return redirect('/wall')
def post_comment(request):
# get user and message
user_id = int(request.session['user_id'])
message_id = int(request.POST['message_id'])
user = User.objects.get(id=user_id)
message = Message.objects.get(id=message_id)
comment_txt = request.POST['comment']
# create comment for given user with entered text .. VALIDATE TEXT ??????????
comment = Comment.objects.create(comment=comment_txt,user=user,message=message)
return redirect('/wall')
def delete_message(request):
# get message
message_id = int(request.GET['message_id'])
Message.objects.get(id=message_id).delete()
return redirect('/wall')
def delete_comment(request):
# get comment
comment_id = int(request.GET['comment_id'])
Comment.objects.get(id=comment_id).delete()
return redirect('/wall')
def books(request):
books = Book.objects.all()
context = {
"books": books
}
return render(request, "books.html", context)
def add_book(request):
post_data = request.POST
errors = User.objects.create_book_data_validator(post_data)
if (len(errors)>0):
# if the errors dictionary contains anything, loop through each key-value pair and make a flash message
for key, value in errors.items():
messages.error(request, value)
return redirect('/books')
else:
# no validation errors, create the book
title = post_data['title']
desc = post_data['desc']
user_id = int(request.session['user_id'])
user = User.objects.get(id=user_id)
book = Book.objects.create(title=title,desc=desc,uploaded_by=user)
book.users_who_likes.add(user)
book.save()
return redirect('/books')
def update_book(request):
post_data = request.POST
book_id = int(post_data['book_id'])
errors = User.objects.create_book_data_validator(post_data)
if (len(errors)>0):
# if the errors dictionary contains anything, loop through each key-value pair and make a flash message
for key, value in errors.items():
messages.error(request, value)
else:
# no validation errors, update the book
title = post_data['title']
desc = post_data['desc']
book = Book.objects.get(id=book_id)
book.desc = desc
book.title = title
#book.users_who_likes.add(user)
book.save()
return redirect(f'/books/{book_id}')
def delete_book(request,book_id):
Book.objects.get(id=book_id).delete()
return redirect('/books')
def show_book(request,book_id):
book = Book.objects.get(id=book_id)
# store flag whether user has favorited this book
user_id = int(request.session['user_id'])
user = User.objects.get(id=user_id)
is_favorite = False
users_who_like_book = book.users_who_likes
for current_user in book.users_who_likes.all():
if current_user == user:
is_favorite = True
context = {
"book": book,
"is_favorite": is_favorite
}
return render(request, "book.html", context)
def like_book(request,book_id):
user_id = int(request.session['user_id'])
user = User.objects.get(id=user_id)
# get the book and add user as a favorite
book = Book.objects.get(id=book_id)
user_who_likes_this_book = book.users_who_likes
user_who_likes_this_book.add(user)
return redirect(f"/books/{book_id}")
def remove_like_book(request,book_id):
user_id = int(request.session['user_id'])
user = User.objects.get(id=user_id)
# get the book and add remove user from the list of favorites
book = Book.objects.get(id=book_id)
user_who_likes_this_book = book.users_who_likes
user_who_likes_this_book.remove(user)
return redirect(f"/books/{book_id}")
|
[
"reid.yanabu@gmail.com"
] |
reid.yanabu@gmail.com
|
d673178723d40a89edc0e07b8902ea56bd5ac6a1
|
02193ece59037456d298d519b38661b5dfd0ab17
|
/3rd-year/semester-2/projet-dev/parser-pdf/sprint4/parser17.py
|
667775439d8973d0758c6eaa51a750548edd404b
|
[] |
no_license
|
pakpake/licence-informatique
|
561558d00f012a536ae97f74ee705e6c04dcecda
|
c9877ad75d3c4ee6e3904fe8b457f8b3242c7c3f
|
refs/heads/main
| 2023-05-09T06:33:19.927698
| 2021-05-26T19:49:03
| 2021-05-26T19:49:03
| 368,866,811
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,264
|
py
|
#!/usr/bin/python3.9
"""
Programme parser pdf vers txt
@author: Team
Utilise la fonction /usr/bin/pdftotext ou /usr/bin/pdf2txt
"""
import sys # pour les parametres pris en compte
import os,glob # pour la manipulation de nom de fichier
import shutil # pour la manipulation des dossiers
import re # pour les expressions regulieres
import subprocess # pour appeler des commandes linux
def usage():
""" fonction d'usage de la commande """
print("Usage : ./parser <options> repertoire")
print("Options : (type de sortie)")
print(" -a : génère un fichier XML et TXT pour chaque pdf")
print(" -t : génère un fichier TXT pour chaque pdf")
print(" -x : génère un fichier XML pour chaque pdf")
exit(1)
def verifPDF(fichier):
""" Prend un fichier en parametre l'ouvre et lit la premiere ligne
pour verifier que c'est bien "%PDF-" """
return b'%PDF-' in open(fichier,"rb").readline()
def isDir(param):
""" fonction qui test si le parametre est un repertoire"""
if (not os.path.isdir(param)):
print("Le parametre entree n'est pas un repertoire !")
usage()
exit(1)
def getBasename(f):
""" recupere le nom du fichier sans extension, mais en gardant le chemin complet """
filename, filext = os.path.splitext(f)
return filename
def getTitle1(fpdf):
""" recupere le titre du fichier .html entre les balises <title>...</title> avec une regex
fonction qui appelle la commande pdftotext -htmlmeta pour convertir le pdf en html
"""
# creation du fichier en .html (au meme endroit que le fichier d'origine)
subprocess.run(["/usr/bin/pdftotext","-l","1","-htmlmeta",fpdf])
# nom du fichier html
fhtml = getBasename(fpdf)+".html"
PATTERN = "<title>.*</title>" # regex du titre
try :
# ouverture en lecture du fichier
f1 = open(fhtml,"r")
# pour toutes les lignes du fichier
for ligne in f1:
# on cherche l'expression reguliere
res = re.match(PATTERN, ligne)
if res != None:
# on recupere le titre entre les balises
title = res[0][7:-8]
# cas particulier des fichiers transformes avec dvips
if "dvips" in title:
# on supprime le titre
title = ""
return title
except ValueError as err:
print("Erreur getTitle1 : ",err)
finally:
# fermeture du fichier
f1.close()
# suppression du fichier html temporaire
os.remove(getBasename(fpdf)+".html")
def getTitle2(fpdf):
""" fonction qui récupère le titre v2 """
# subprocess.run(["/usr/bin/pdftotext","-l","1","-raw",fpdf])
# ftxt = getBasename(fpdf)+".txt"
ftxt = getBasename(fpdf)+".txt"
try:
# ouverture du fichier en lecture seule
f = open(ftxt,"r")
# lecture
line = f.readline()
if line == "" or re.search("^c$",line,re.IGNORECASE) or "2002" in line:
while line == "" or re.search("arxiv",line,re.IGNORECASE) or re.search("[a-zA-Z]$",line) or re.search("^ [a-zA-Z]*",line) or "2002" in line or line == "c":
line = f.readline()
titre = line
# print(":".join("{:02x}".format(ord(c)) for c in titre))
line = f.readline()
if line != "1st" and line != "" and "∗" not in line and "é" not in line:
titre = titre[:-1] + '" ' + line[:-1]
titre.replace('\n',' ')
# titre = ''.join(c for c in titre if ord(c) < 128)
return titre
except ValueError as err:
print("Erreur getTitle2 : ",err)
finally:
# fermeture du fichier
f.close
# suppression du fichier txt temporaire
# os.remove(ftxt)
def getTitle(fpdf):
""" appelle les differentes methodes getTitle """
# on convertit tout le pdf en txt, il est appelé de cette façon 4 fois dans les fonctions
subprocess.run(["pdftotext","-raw",fpdf])
getTitle.t = getTitle1(fpdf)
if getTitle.t == "":
getTitle.t = getTitle2(fpdf)
return getTitle.t
def getAbstract1(fpdf):
""" recupere le resume du fichier passe en parametre
convertit le pdf en txt avec la commande pdftotext
ne convertit que la premiere page qui contient toujours le titre et l'abstract
pdftotext est plus rapide que pdf2txt
Cas general qui marche pour la plupart des fichiers, se base sur les sauts lignes"""
# creation du fichier tmp.txt qui contient le texte brut
subprocess.run(["/usr/bin/pdftotext","-l","1",fpdf,"tmp.txt"])
ftxt = "tmp.txt"
try:
# open file
f1 = open(ftxt,"r")
monAbstract = ""
line = f1.readline() # lit une ligne du fichier et va a la ligne suivante
while not re.search("abstract",line,re.IGNORECASE) and line != '':
line = f1.readline()
# la ligne suivant est une ligne blanche
#line = f1.readline() # on passe cette ligne et on va a la suivante
monAbstract += line
line = f1.readline() # on lit la prochaine ligne
if line != "\n":
monAbstract += line
# tant qu'on ne trouve pas une ligne blacnhe
# on stocke les lignes dans monAbstract
while line != "\n" and line != '':
monAbstract += line
line = f1.readline()
# on supprime la cesure
# creation d'une expression reguliere "-\n"
regex = re.compile(r'-\n')
# remplacement de la regex par '' (rien) dans monAbstract
monAbstract = regex.sub('',monAbstract)
# on supprime les sauts de lignes
regex = re.compile(r'\n')
monAbstract = regex.sub(' ',monAbstract)
# on supprimer le mot "Abstract" au debut s'il existe
regex = re.compile(r"Abstract.?")
monAbstract = regex.sub('',monAbstract)
return monAbstract
except ValueError as err:
print("Erreur getAbstract : ",err)
finally:
f1.close()
# suppression du fichier txt temporaire
os.remove(ftxt)
def getAbstract2(fpdf):
"""Cas particulier des fichiers ou l'abstract est entre une ligne contenant "abstract" et une ligne contenant "introduction" sans sauts de lignes avant et apres"""
# creation du fichier tmp.txt qui contient le texte brut
#subprocess.run(["/usr/bin/pdftotext","-l","1","-raw",fpdf,"tmp.txt"])
ftxt = getBasename(fpdf)+".txt"
try:
# open file
f1 = open(ftxt,"r")
monAbstract = ""
line = f1.readline() # lit une ligne du fichier et va a la ligne suivante
while not re.search("abstract",line,re.IGNORECASE) and line != '':
line = f1.readline()
# la ligne suivant est une ligne blanche
#line = f1.readline() # on passe cette ligne et on va a la suivante
monAbstract += line
line = f1.readline() # on lit la prochaine ligne
while not re.search("introduction",line,re.IGNORECASE) and line != '':
monAbstract += line
line = f1.readline()
# on supprime la cesure
# creation d'une expression reguliere "-\n"
regex = re.compile(r'-\n')
# remplacement de la regex par '' (rien) dans monAbstract
monAbstract = regex.sub('',monAbstract)
# on supprime les sauts de lignes
regex = re.compile(r'\n')
monAbstract = regex.sub(' ',monAbstract)
# on supprimer le mot "Abstract" au debut s'il existe
#re.sub('abstract.?','', monAbstract, flags=re.IGNORECASE)
# idem que la ligne du dessus
regex = re.compile(r"Abstract.?")
monAbstract = regex.sub('',monAbstract)
return monAbstract
except ValueError as err:
print("Erreur getAbstract : ",err)
finally:
f1.close()
# suppression du fichier txt temporaire
# os.remove(ftxt)
def getAbstract3(fpdf):
"""cas des fichiers ou il n'existe pas de mot abstract, ce sera le paragraphe avant l'introduction"""
# creation du fichier tmp.txt qui contient le texte brut
subprocess.run(["/usr/bin/pdftotext","-l","1",fpdf,"tmp.txt"])
ftxt = "tmp.txt"
try:
# open file
f = open(ftxt,"r")
monAbstract = ""
line = f.readline()
line2 = line
# on parcourt toutes les lignes avant "introduction" et on
# enregistre tout ce qui se passe avant
while not re.search("introduction",line2,re.IGNORECASE):
monAbstract += line
line2 = f.readline()
# si on a une ligne blanche suivit d'une ligne qui ne contient pas
# "introduction", alors on remet monAbstract a vide
if line == "\n" and not re.search("introduction",line2,re.IGNORECASE):
monAbstract = ""
monAbstract += line2
line = line2
# on supprime la cesure
# creation d'une expression reguliere "-\n"
regex = re.compile(r'-\n')
# remplacement de la regex par '' (rien) dans monAbstract
monAbstract = regex.sub('',monAbstract)
# on supprime les sauts de lignes
regex = re.compile(r'\n')
monAbstract = regex.sub(' ',monAbstract)
return monAbstract
except ValueError as err:
print("Erreur getAbstract : ",err)
finally:
f.close()
# suppression du fichier txt temporaire
#os.remove(ftxt)
def getAbstract(fpdf):
""" on applique les differentes methodes pour recuperer l'abstract """
a = getAbstract1(fpdf)
if a == "":
a = getAbstract2(fpdf)
if a == "":
a = getAbstract3(fpdf)
return a
def getAuthor1(fpdf):
""" Recupere les auteurs du fichier dans les metadonnees de ce dernier
fonction qui appelle la commande pdfinfo pour avoir les differentes informations du fichier """
# stdout est la voie de sortie, en faisant .PIPE on indique que la voie de sortie standard est ouverte
# permettant de recuieillir les informations du subprocess
# appel de la commande pdfinfo/grep/cut en subprocess.Popen, creant ainsi un object
# permettant la manipulation des entrees/sorties des commandes
pdfinfo=subprocess.Popen(["/usr/bin/pdfinfo","-f","1",fpdf],stdout=subprocess.PIPE)
grep=subprocess.Popen(["grep","Author"],stdin=pdfinfo.stdout,stdout=subprocess.PIPE)
cut=subprocess.Popen(["cut","-d:","-f2"], stdin=grep.stdout,stdout=subprocess.PIPE, universal_newlines=True)
# On lit la sortie standart de cut et on separe les differents elements avec "\n"
author=cut.stdout.read().split("\n")
# si l'auteur contient aussi une adresse email
match = re.search(r'([\w.-]+)@([\w.-]+)', author[0])
if match:
author[0] = re.sub(r'([\w.-]+)@([\w.-]+)',r'',author[0])
# on enleve tout les espaces devant et derriere
author[0]=author[0].strip()
return author[0]
def getAuthor2(fpdf):
""" Dans le cas ou les informations ne sont pas dans les metadonnees du fichier """
# creation du fichier tmp.txt qui contient le texte brut
# subprocess.run(["/usr/bin/pdftotext","-l","1","-raw",fpdf])
ftxt=getBasename(fpdf)+".txt"
try:
# ouverture du fichier
f1 = open(ftxt,"r")
author = ""
# On recupere le titre
#titre=getTitle(fpdf)
titre=getTitle.t
line=f1.readline()
# On fait attention aux caracteres speciaux
titre=titre.replace("fi","fi")
line=line.strip()
# On cree une liste de la phrase
mot=line.split(" ")
# Si le premier mot est dans le titre c'est que la ligne courante fait partie du titre
while re.search(mot[0],titre) :
line=f1.readline()
mot=line.split(" ")
# On recupere l'abstract
abs=getAbstract(fpdf)
# Tant que le premier mot ne fait pas partie de l'abstract on copie tout dans auteur
while not re.search(mot[0],abs):
author+=line
line=f1.readline()
mot=line.split(" ")
# On cree une liste ou la separation se fait par "\n"
author=author.split("\n")
# On recupere tout les elements de la liste qui ne repondent a ces criteres :
# si l'element ne contient pas de chiffres, s'il ne contient pas d'emails,
# s'il n'a pas "Université"/ "Google" / "Abstract" dans l'element,
# s'il n'est pas vide et si le premier caractere n'est pas une minuscule
author = [x for x in author if not any(c.isdigit() for c in x) and not re.search(r'([\w.-]+)@([\w.-]+)', x)and not "Universit" in x and not "Google" in x and not "Abstract" in x and x!='' and x[0].isupper()]
# On separe les differents auteurs par un ;
othor='; '.join(author)
return othor
finally:
f1.close()
# Suppression du fichier txt temporaire
#os.remove(ftxt)
def getAuthor3(fpdf):
""" Dans le cas ou on arrive pas a recuperer le titre """
# subprocess.run(["/usr/bin/pdftotext","-l","1","-raw",fpdf])
ftxt=getBasename(fpdf)+".txt"
try:
f1 = open(ftxt,"r")
author = ""
line=f1.readline()
# Tant que la ligne ne contient pas le mot abstract, on copie tout
while not re.search("Abstract",line):
line=f1.readline()
author+=line
author=author.split("\n")
# On recupere tout les elements de la liste qui ne repondent a ces criteres :
# si l'element ne contient pas de chiffres,
# s'il ne contient pas d'emails,
# s'il n'a pas "Universite"/ "Google" / "Abstract" dans l"element,
# s'il n'est pas vide et si le premier caractere n'est pas une minuscule
author = [x for x in author if not any(c.isdigit() for c in x) and not re.search(r'([\w.-]+)@([\w.-]+)', x)and not "Universit" in x and not "Google" in x and not "Abstract" in x and x!='' and x[0].isupper()]
# On separe les differents auteurs par un ;
othor='; '.join(author)
return othor
finally:
f1.close()
# suppression du fichier txt temporaire
#os.remove(ftxt)
def getAuthor(fpdf):
""" appelle les differentes methodes getAuthor """
b = getAuthor1(fpdf)
if b=="":
b = getAuthor2(fpdf)
if b=="":
b = getAuthor3(fpdf)
#email = getEmail(fpdf)
return b#+" ; " +email
def getEmail(fpdf):
""" permet de recuperer les emails """
email=subprocess.Popen(["/usr/bin/pdftotext","-raw","-l","1",fpdf,"-"],stdout=subprocess.PIPE)
grep=subprocess.Popen(["grep","@"],stdin=email.stdout,stdout=subprocess.PIPE)
rev=subprocess.Popen(["rev"],stdin=grep.stdout,stdout=subprocess.PIPE)
cut=subprocess.Popen(["cut","-d"," ","-f1"],stdin=rev.stdout,stdout=subprocess.PIPE)
rev2=subprocess.Popen(["rev"],stdin=cut.stdout,stdout=subprocess.PIPE,universal_newlines=True)
# On lit la sortie standart de cut et on separe les differents elements avec "\n"
email=rev2.stdout.read().split("\n")
emails = " ; ".join(email)
return emails
def regroupeAuthorMail(fpdf):
"""Permet d associer les auteurs avec leur adresse mail"""
#On recupere la liste des auteurs avec la fonction ecrite precedemment
author = getAuthor(fpdf)
#On recupere la liste des mails avec la fonction ecrite precedemment et on met les caracteres en minuscule
mail = getEmail(fpdf)
mail = mail.lower()
#Initialisation de la variable retournee
regroupement=""
# creation d'une expression reguliere ","
regex = re.compile(r',')
# remplacement de la regex par ';' dans la liste des auteurs
author = regex.sub(';',author)
# creation d'une expression reguliere " "
regex = re.compile(r' ')
# remplacement de la regex par '' (rien) dans la liste des mails
mail = regex.sub('',mail)
#Creation de tableaux pour les auteurs et pous les mails
author=author.split(";")
mail=mail.split(";")
#On enleve les elements du tableau des mails qui sont vides
mail = [ m for m in mail if m != '']
#Pour chaque auteur on cherche s'il a une adresse mail qui lui est associee
for a in author:
#Separation du nom et du prenom pour prendre en compte les adresses mail qui ne comporte pas les 2
nomPrenom = a.split(" ")
#On enleve les elements du tableau des noms et prenoms qui sont vides
nomPrenom = [ i for i in nomPrenom if i != '']
for m in mail:
for n in nomPrenom:
#On les met en minuscule pour les comparer avec les adresses mail aussi mises en minucules
n=n.lower()
#Si on trouve soit le nom soit le prenom d'un auteur dans une adresse mail on les associe
if n in m:
regroupement += (" <auteur>" + a + " : " + m + "</auteur>\n")
#On retire des listes l'auteur et son adresse mail parce qu'ils ont deja ete associes
#A la fin ces tableaux representent les auteurs et mails non associes mais a afficher quand meme
mail=[p for p in mail if p != m]
author=[o for o in author if o != a]
#On arrete de parcourir le tableau nomPrenom
break
#S'il reste des auteurs et/ou des mails qui n'ont pas d'association on les liste a la fin
for a in author:
regroupement += " <auteur>" + a + "</auteur>\n"
for m in mail:
regroupement += " <mail>" + m + "</mail>\n"
return regroupement
def getRef(fpdf):
ftxt=getBasename(fpdf)+".txt"
f = open(ftxt,"r")
references = ""
line = f.readline()
# regex
while not re.search("^references",line,re.IGNORECASE) and not re.search("(\n)*references",line,re.IGNORECASE):
line = f.readline()
line = f.readline()
while(line != ''):
references += line
line = f.readline()
regex = re.compile(r'^|\n\d+\n+')
references = regex.sub('',references)
regex = re.compile(r'-\n')
references = regex.sub('',references)
regex = re.compile(r'\n')
references = regex.sub('##',references)
regex = re.compile(r'\.##')
references = regex.sub('\n',references)
regex = re.compile(r'##')
references = regex.sub(' ',references)
return references
def getRef2(fpdf):
ftxt=getBasename(fpdf)+".txt"
f = open(ftxt,"r")
references = ""
line = f.readline()
c = subprocess.getoutput('tac '+ftxt+'|grep -m1 -ni "^[[:space:]]*references" | cut -d: -f1')
for line in f.readlines()[-int(c):]:
references += line
regex = re.compile(r'\r')
references = regex.sub('',references)
regex = re.compile(r'^|\n[0-9]*')
references = regex.sub('\n',references)
regex = re.compile(r'-\n')
references = regex.sub('',references)
regex = re.compile(r'\n')
references = regex.sub('##',references)
regex = re.compile(r'\.##')
references = regex.sub('\n',references)
regex = re.compile(r'##')
references = regex.sub(' ',references)
return references
def getIntroEtCorps(fpdf):
""" Recupere l'introduction de l'article """
subprocess.run(["pdftotext","-raw",fpdf])
ftxt=getBasename(fpdf)+".txt"
f = open(ftxt,"r")
intro = ""
corps = ""
line = f.readline()
while not re.search("introduction",line,re.IGNORECASE) and line != '':
line = f.readline()
intro += line
line = f.readline() # on lit la prochaine ligne
while not re.search("^([0-9]|II)\.? +\w+",line,re.IGNORECASE) and line != '':
intro += line
line = f.readline()
corps += line
line = f.readline()
while not re.search("(conclusion|discussion)",line,re.IGNORECASE) and line != '':
corps += line
line = f.readline()
# on supprime les lignes qui ne contiennent qu'un nombre tout seul
regex = re.compile(r'^|\n[0-9]*')
intro = regex.sub('\n',intro)
corps = regex.sub('\n',corps)
# nettoyage de l'intro
regex = re.compile(r'-\n')
intro = regex.sub('',intro)
corps = regex.sub('',corps)
regex = re.compile(r'\n')
intro = regex.sub('##',intro)
corps = regex.sub('##',corps)
regex = re.compile(r'\.##')
intro = regex.sub('\n',intro)
corps = regex.sub('\n',corps)
regex = re.compile(r'##')
intro = regex.sub(' ',intro)
corps = regex.sub(' ',corps)
# on supprimer le mot "introduction" au debut s'il existe
regex = re.compile(r"([0-9]|I)\.? +introduction.?",re.IGNORECASE)
intro = regex.sub('',intro)
return intro, corps
def getAffli(fpdf):
subprocess.run(["/usr/bin/pdftotext","-raw",fpdf])
ftxt =getBasename(fpdf)+".txt"
try:
# ouverture du fichier
f1 = open(ftxt,"r")
author = ""
# On recupere le titre
titre=getTitle(fpdf)
line=f1.readline()
while not re.search("abstract",line,re.IGNORECASE) and line != '' and not re.search("introduction",line,re.IGNORECASE):
author+=line
line=f1.readline()
if re.search(r"\[]",line):
line=line.replace("[]","\[]")
if re.search(r"\{}",line):
line=line.replace("{}","\{}")
if re.search(r"\(\)",line):
line=line.replace("()","\(\)")
author=author.split("\n")
i=0
while i<len(author):
if "and" in author[i]:
author[i]=re.sub(r'and',r'',author[i])
if "1,2" in author[i]:
author[i]=re.sub(r"1,2.*",r'',author[i])
author[i]=author[i].strip()
i=i+1
print(author)
print("\n")
a=[]
for i in range(1, len(author)):
if re.search(r'\A(d\’'')',author[i]) :
if author[i] not in a:
a.append(author[i])
if "UPF" in author[i]:
if author[i] not in a:
a.append(author[i])
if "Universit" in author[i]:
if author[i] not in a:
a.append(author[i])
if "Insti" in author[i]:
if author[i] not in a:
a.append(author[i])
if "parte" in author[i]:
if author[i] not in a:
a.append(author[i])
if "Labo" in author[i]:
if author[i] not in a:
a.append(author[i])
if "cole" in author[i]:
if author[i] not in a:
a.append(author[i])
#author = [x for x in author if not ]
othor=', '.join(a)
return othor
finally:
f1.close()
# suppression du fichier txt temporaire
os.remove(ftxt)
def getConclu(fpdf):
""" Recupere la conclusion de l'article """
subprocess.run(["pdftotext","-raw",fpdf])
ftxt=getBasename(fpdf)+".txt"
f = open(ftxt,"r")
conclu = ""
line = f.readline()
while not re.search("conclusion",line,re.IGNORECASE) and line != '':
line = f.readline()
conclu += line
line = f.readline() # on lit la prochaine ligne
while not re.search(".*(References|Acknowledgment|Appendix|Follow-Up) *\w*",line,re.IGNORECASE) and line != '':
conclu += line
line = f.readline()
# on supprime les lignes qui ne contiennent qu'un nombre tout seul
regex = re.compile(r'^|\n[0-9]*')
conclu = regex.sub('\n',conclu)
# on supprimer le mot "conclusion" au debut s'il existe
regex = re.compile(r".*conclusion.*\n",re.IGNORECASE)
conclu = regex.sub('',conclu)
# nettoyage de la conclu
regex = re.compile(r'-\n')
conclu = regex.sub('',conclu)
regex = re.compile(r'\n')
conclu = regex.sub('##',conclu)
regex = re.compile(r'\.##')
conclu = regex.sub('\n',conclu)
regex = re.compile(r'##')
conclu = regex.sub(' ',conclu)
return conclu
def getDiscus(fpdf):
""" Recupere la discussion de l'article """
subprocess.run(["pdftotext","-raw",fpdf])
ftxt=getBasename(fpdf)+".txt"
f = open(ftxt,"r")
discu = ""
line = f.readline()
while not re.search("discussion",line,re.IGNORECASE) and line != '':
line = f.readline()
discu += line
line = f.readline() # on lit la prochaine ligne
while not re.search(".*(Acknowledgment|Appendix|conclusion|references) *\w*",line,re.IGNORECASE) and line != '':
discu += line
line = f.readline()
# on supprime les lignes qui ne contiennent qu'un nombre tout seul
regex = re.compile(r'^|\n[0-9]*')
discu = regex.sub('\n',discu)
# on supprimer le mot "discussion" au debut s'il existe
regex = re.compile(r".*discussion.*\n",re.IGNORECASE)
discu = regex.sub('',discu)
# nettoyage de la discussion
regex = re.compile(r'-\n')
discu = regex.sub('',discu)
regex = re.compile(r'\n')
discu = regex.sub('##',discu)
regex = re.compile(r'\.##')
discu = regex.sub('\n',discu)
regex = re.compile(r'##')
discu = regex.sub(' ',discu)
return discu
def traite1fichier(fpdf):
""" affiche ce que l'on veut pour 1 seul pdf """
liste = []
# on met le nom de fichier dans la liste
# en enlevant le nom du dossier
liste.append(os.path.basename(fpdf))
# on recupere le titre
t = getTitle(fpdf)
# et on le met dans la liste
liste.append(t)
# on recupere les auteurs et les mails qui leur sont associes
#u=getAuthor(fpdf)
u=regroupeAuthorMail(fpdf)
# et on le met dans la liste
liste.append(u)
# on recupere l'abstact
a = getAbstract(fpdf)
# et on le met dans la liste
liste.append(a)
# on recupere l'introduction
i,cp = getIntroEtCorps(fpdf)
# et on la met dans la liste
liste.append(i)
liste.append(cp)
# on recupere la conclusion
c = getConclu(fpdf)
# et on la met dans la liste
liste.append(c)
# on recupere la conclusion
d = getDiscus(fpdf)
# et on la met dans la liste
liste.append(d)
# on recupere les references
r = getRef2(fpdf)
# et on le met dans la liste
liste.append(r)
# on recupere les references
t = getAffli(fpdf)
# et on le met dans la liste
liste.append(t)
# on supprime le fichier raw temporaire
os.remove(getBasename(fpdf)+".txt")
return liste
if __name__ == "__main__":
# on teste le nombre d'arguments qui doit etre 1 exactement
if len(sys.argv) != 3:
usage()
# si ce n'est pas la bonne option
if sys.argv[1] != "-a" and sys.argv[1] != "-t" and sys.argv[1] != "-x":
usage()
# on recupere le nom du repertoire de travail
nomDossier = sys.argv[2]
#on verifie que le premier argument est bien un repertoire
isDir(nomDossier)
# nom du repertoire de resultat
resultName = nomDossier+"/results"
# si le repertoire de resultats existe
if os.path.exists(resultName):
# on force la suppression du repertoire de resultat
shutil.rmtree(resultName)
# Menu de selection des fichiers du repertoire
i=0
lpdf=[]
for files in glob.glob(nomDossier+"/*.pdf"):
print("["+str(i)+"]","---",files)
lpdf.append(os.path.basename(files))
i+=1
# selection des fichiers par leur indice
print("Entrez les numéros de fichiers à traiter, séparés par un espace, (\"*\" pour tous) : ")
sidx = input()
if sidx == "*":
lf = lpdf
else:
fileidx = [int(s) for s in sidx.split()]
# on ne retient que les indices valides
res = [x for x in fileidx if x<len(lpdf) ]
lf = [lpdf[i] for i in res]
#Creation de la liste des fichiers qui sont en pdf et ceux qu'il faut parser
listeAParser = []
#Creation de la liste des fichiers qui ne sont pas en pdf et ceux qu'il faut indiquer à l'utilisateur
listeNePasParser = []
# pour chacun des fichiers dans le repertoire,
for i in lf:
# on traite le prochain fichier
#Si verifPDF retourne TRUE alors le fichier est un PDF
if verifPDF(nomDossier+"/"+i):
#On peut l'ajouter a la liste
listeAParser.append(i)
#Si verifPDF retourne FALSE, le fichier n'est pas un PDF et l'utilisateur est prevenu
else:
print("Attention, " + i + " n'est pas un PDF")
listeNePasParser.append(i)
# on cree une liste vide qui contiendra des listes avec les infos demandees
listeFinale = []
# pour chacun des fichiers dans le repertoire,
a=1
for i in listeAParser:
# on traite le fichier
print("pdf courant [",a,"] : "+i)
a+=1
l = traite1fichier(nomDossier+"/"+i)
listeFinale.append(l)
# on cree le repetoire "results"
os.makedirs(resultName)
# pour tous les fichiers de notre liste finale
if sys.argv[1] == "-a" or sys.argv[1] == "-t":
for k in listeFinale:
# on ouvre le fichier en ecriture
fichier = open(getBasename(resultName+"/"+os.path.basename(k[0]))+".txt","w+")
for i in range(9):
# on remplit le fichier avec les elements de la liste
fichier.write(k[i]+"\n")
# on ferme le fichier courant
fichier.close()
if sys.argv[1] == "-a" or sys.argv[1] == "-x":
for k in listeFinale:
# on ouvre le fichier en ecriture
fichier = open(getBasename(resultName+"/"+os.path.basename(k[0]))+".xml","w+")
fichier.write("<article>\n")
# preambule
fichier.write(" <preambule> "+k[0]+" </preambule>\n")
# titre
fichier.write(" <titre> "+k[1]+" </titre>\n")
# auteur
fichier.write(" <auteurs>\n"+k[2]+" </auteurs>\n")
# Affliliations
fichier.write(" <affliliations> "+k[9]+" </affliliations>\n")
# abstract
fichier.write(" <abstract> "+k[3]+" </abstract>\n")
# introduction
fichier.write(" <introduction> "+k[4]+" </introduction>\n")
# corps
fichier.write(" <corps> "+k[5]+" </corps>\n")
# conclusion
fichier.write(" <conclusion> "+k[6]+" </conclusion>\n")
# discussion
fichier.write(" <discussion> "+k[7]+" </discussion>\n")
# biblio
fichier.write(" <biblio> "+k[8]+" </biblio>\n")
fichier.write("</article>\n")
# on ferme le fichier courant
fichier.close()
#Si des fichiers du repertoire ne sont pas des PDF on les liste dans un fichierm sinon ce fichier n'est pas cree
if len(listeNePasParser) != 0:
# non du fichier liste erreurs
resultError = resultName+"/Liste_Fichiers_Non_PDF.txt"
#Ouvreture du fichier et ecriture de son but
fileError = open(resultError,"w+")
fileError.write("Voici la liste des fichiers de votre dossier " + nomDossier + " qui ne sont pas des PDF :\n\n")
for j in range(len(listeNePasParser)):
# On remplit le fichier avec les elements de la liste
fileError.write("-> "+listeNePasParser[j]+"\n")
fileError.close()
print("Vous trouverez un repertoire 'results' dans " + nomDossier + " contenant un fichier texte pour chaque PDF avec les informations principales,\nainsi qu'un fichier : 'Liste_Fichiers_Non_PDF.txt', listant les fichiers de " + nomDossier + " qui ne sont pas des PDF.")
else: print("Vous trouverez un repertoire 'results' dans " + nomDossier + " contenant un fichier texte pour chaque PDF avec les informations principales.")
|
[
"test13344@protonmail.com"
] |
test13344@protonmail.com
|
62a9d1cd2b8d4fc8e87befa78f7044a6d5a16698
|
d5316aef8810866057590e64b4f5c4d8540a7a0f
|
/posts/migrations/0003_auto_20210419_0716.py
|
104d49e39b10ebd08cc22f8ccb19152154c8643f
|
[] |
no_license
|
waynecornwall/flashcard
|
a5e9ece247f596858e7295c8866dadea155acde0
|
bcb78e68d320fa45b26f33632ad97edd90975488
|
refs/heads/main
| 2023-04-23T23:52:43.906722
| 2021-04-24T13:39:12
| 2021-04-24T13:39:12
| 360,993,779
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,108
|
py
|
# Generated by Django 3.2 on 2021-04-19 11:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20210413_1849'),
]
operations = [
migrations.RemoveField(
model_name='term',
name='definition',
),
migrations.RemoveField(
model_name='term',
name='ref_point',
),
migrations.RemoveField(
model_name='term',
name='source',
),
migrations.AddField(
model_name='source',
name='definition',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='source',
name='ref_point',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='source',
name='term',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='posts.term'),
),
]
|
[
"waynecornwall85@gmail.com"
] |
waynecornwall85@gmail.com
|
5faf5bdf31629aca50ee551d71810b822590a7f7
|
0d2f0d0f7720c004223531d0e72bce0b0f1a6253
|
/MarkovModel/ParameterClasses.py
|
c849a4e181e99c544869e2069c1b27e6b0a59f8a
|
[] |
no_license
|
ms3456/HPM573_SHE_HW11
|
11e83aae5bf77515fa0855ffb8db6ef5519b067b
|
8218a02e5353ec042d1863ff009f8016d925d9c1
|
refs/heads/master
| 2020-03-12T12:53:56.397804
| 2018-04-23T02:28:50
| 2018-04-23T02:28:50
| 130,629,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,507
|
py
|
from enum import Enum
import numpy as np
import scipy.stats as stat
import math as math
import InputData as Data
import scr.MarkovClasses as MarkovCls
import scr.RandomVariantGenerators as Random
import scr.FittingProbDist_MM as Est
class HealthStats(Enum):
""" health states of patients with HIV """
WELL = 0
STROKE = 1
POST_STROKE = 2
DEATH = 3
BACKGROUND_DEATH = 4
class Therapies(Enum):
""" mono vs. combination therapy """
NONE = 0
ANTICOAG = 1
class ParametersFixed():
def __init__(self, therapy):
# selected therapy
self._therapy = therapy
# simulation time step
self._delta_t = Data.DELTA_T
self._adjDiscountRate = Data.DISCOUNT * Data.DELTA_T
# initial health state
self._initialHealthState = HealthStats.WELL
# annual treatment cost
if self._therapy == Therapies.NONE:
self._annualTreatmentCost = 0
if self._therapy == Therapies.ANTICOAG:
self._annualTreatmentCost = 0
# transition probability matrix of the selected therapy
self._prob_matrix = []
# treatment relative risk
self._treatmentRR = 0
if self._therapy == Therapies.NONE:
self._annualStateCosts = Data.HEALTH_COST
else:
self._annualStateCosts = Data.ANTICOAG_COST
#self._annualStateCosts = Data.HEALTH_COST
self._annualStateUtilities = Data.HEALTH_UTILITY
self._prob_matrix=[]
if therapy==Therapies.NONE:
self._prob_matrix[:], p=MarkovCls.continuous_to_discrete(Data.RATE_MATRIX_NONE,Data.DELTA_T)
else:
self._prob_matrix[:],p=MarkovCls.continuous_to_discrete(Data.RATE_MATRIX_ANTI, Data.DELTA_T)
def get_initial_health_state(self):
return self._initialHealthState
def get_delta_t(self):
return self._delta_t
def get_adj_discount_rate(self):
return self._adjDiscountRate
def get_transition_prob(self, state):
return self._prob_matrix[state.value]
def get_annual_state_cost(self, state):
if state == HealthStats.DEATH:
return 0
else:
return self._annualStateCosts[state.value]
def get_annual_state_utility(self, state):
if state == HealthStats.DEATH:
return 0
else:
return self._annualStateUtilities[state.value]
def get_annual_treatment_cost(self):
return self._annualTreatmentCost
|
[
"meng.she@yale.edu"
] |
meng.she@yale.edu
|
2c81d1b33ccd79f204e82c7086e123bea925b6bb
|
0e9a0a570921b0c5ffe967f2647556f1a3866237
|
/custom_components/spacex/sensor.py
|
ebf7f7ec88cd5cc5f97fc4ff006fa1b5d1603321
|
[] |
no_license
|
aukjan/Home-Assistant_Config
|
64e9b2eb35528f6be29566b49f2aaa1c0e83f339
|
d95c0490c36c4ea428f7ede2db17a730d9482afd
|
refs/heads/master
| 2022-12-22T00:18:17.641637
| 2020-09-19T09:37:17
| 2020-09-19T09:37:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,466
|
py
|
"""Definition and setup of the SpaceX Binary Sensors for Home Assistant."""
import datetime
import logging
from homeassistant.components.sensor import ENTITY_ID_FORMAT
from homeassistant.const import LENGTH_KILOMETERS, SPEED_KILOMETERS_PER_HOUR
from homeassistant.helpers.entity import Entity
from .const import COORDINATOR, DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities, discovery_info=None):
"""Set up the sensor platforms."""
coordinator = hass.data[DOMAIN][entry.entry_id][COORDINATOR]
sensors = []
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Mission",
"spacex_next_launch_mission",
"mdi:information-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Day",
"spacex_next_launch_day",
"mdi:calendar",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Time",
"spacex_next_launch_time",
"mdi:clock-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Site",
"spacex_next_launch_site",
"mdi:map-marker",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Rocket",
"spacex_next_launch_rocket",
"mdi:rocket",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Next Launch Payload",
"spacex_next_launch_payload",
"mdi:package",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Mission",
"spacex_latest_launch_mission",
"mdi:information-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Day",
"spacex_latest_launch_day",
"mdi:calendar",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Time",
"spacex_latest_launch_time",
"mdi:clock-outline",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Site",
"spacex_latest_launch_site",
"mdi:map-marker",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Rocket",
"spacex_latest_launch_rocket",
"mdi:rocket",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Latest Launch Payload",
"spacex_latest_launch_payload",
"mdi:package",
"spacexlaunch",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Starman Speed",
"spacex_starman_speed",
"mdi:account-star",
"spacexstarman",
)
)
sensors.append(
SpaceXSensor(
coordinator,
"Starman Distance",
"spacex_starman_distance",
"mdi:map-marker-distance",
"spacexstarman",
)
)
async_add_entities(sensors, update_before_add=True)
class SpaceXSensor(Entity):
"""Defines a SpaceX Binary sensor."""
def __init__(self, coordinator, name, entity_id, icon, device_identifier):
"""Initialize Entities."""
self._name = name
self.entity_id = ENTITY_ID_FORMAT.format(entity_id)
self._state = None
self._icon = icon
self._kind = entity_id
self._device_identifier = device_identifier
self.coordinator = coordinator
self._unit_of_measure = None
self.attrs = {}
@property
def should_poll(self):
"""Return the polling requirement of an entity."""
return True
@property
def unique_id(self):
"""Return the unique Home Assistant friendly identifier for this entity."""
return self.entity_id
@property
def name(self):
"""Return the friendly name of this entity."""
return self._name
@property
def icon(self):
"""Return the icon for this entity."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit of measurement for this entity."""
return self._unit_of_measure
@property
def device_state_attributes(self):
"""Return the attributes."""
return self.attrs
@property
def state(self):
"""Return the state."""
return self._state
async def async_update(self):
"""Update SpaceX Binary Sensor Entity."""
await self.coordinator.async_request_refresh()
_LOGGER.debug("Updating state of the sensors.")
coordinator_data = self.coordinator.data
starman_data = coordinator_data[0]
launch_data = coordinator_data[1]
latest_launch_data = coordinator_data[2]
self.attrs["last_updated"] = launch_data.get("last_date_update")
if self._kind == "spacex_next_launch_mission":
self._state = launch_data.get("mission_name")
self.attrs["mission_patch"] = launch_data["links"].get("mission_patch")
if launch_data.get("details") is not None:
self.attrs["details"] = launch_data.get("details")[0:255]
self.attrs["video_link"] = launch_data["links"].get("video_link")
elif self._kind == "spacex_next_launch_day":
self._state = datetime.datetime.fromtimestamp(
launch_data.get("launch_date_unix")
).strftime("%d-%b-%Y")
self.attrs["launch_date_unix"] = launch_data.get("launch_date_unix")
self.attrs["launch_date_utc"] = launch_data.get("launch_date_utc")
elif self._kind == "spacex_next_launch_time":
self._state = datetime.datetime.fromtimestamp(
launch_data.get("launch_date_unix")
).strftime("%I:%M %p")
elif self._kind == "spacex_next_launch_site":
self._state = launch_data["launch_site"].get("site_name_long")
self.attrs["short_name"] = launch_data["launch_site"].get("site_name")
elif self._kind == "spacex_next_launch_rocket":
self._state = launch_data["rocket"].get("rocket_name")
core_counter = 1
for this_core in launch_data["rocket"]["first_stage"].get("cores"):
self.attrs["core_" + str(core_counter) + "_serial"] = this_core.get(
"core_serial"
)
self.attrs["core_" + str(core_counter) + "_flight"] = this_core.get(
"flight"
)
self.attrs["core_" + str(core_counter) + "_block"] = this_core.get(
"block"
)
self.attrs[
"core_" + str(core_counter) + "_landing_intent"
] = this_core.get("landing_intent")
self.attrs["core_" + str(core_counter) + "_lz"] = this_core.get(
"landing_vehicle"
)
core_counter = core_counter + 1
self.attrs["fairings_reused"] = launch_data["rocket"]["fairings"].get(
"reused"
)
elif self._kind == "spacex_next_launch_payload":
self._state = launch_data["rocket"]["second_stage"]["payloads"][0].get(
"payload_id"
)
self.attrs["nationality"] = launch_data["rocket"]["second_stage"][
"payloads"
][0].get("nationality")
self.attrs["manufacturer"] = launch_data["rocket"]["second_stage"][
"payloads"
][0].get("manufacturer")
self.attrs["payload_type"] = launch_data["rocket"]["second_stage"][
"payloads"
][0].get("payload_type")
self.attrs["payload_mass"] = (
str(
launch_data["rocket"]["second_stage"]["payloads"][0].get(
"payload_mass_kg"
)
)
+ " kg"
)
self.attrs["payload_mass_us"] = (
str(
launch_data["rocket"]["second_stage"]["payloads"][0].get(
"payload_mass_lbs"
)
)
+ " lbs"
)
self.attrs["orbit"] = launch_data["rocket"]["second_stage"]["payloads"][
0
].get("orbit")
elif self._kind == "spacex_latest_launch_mission":
self._state = latest_launch_data.get("mission_name")
self.attrs["mission_patch"] = latest_launch_data["links"].get("mission_patch")
if latest_launch_data.get("details") is not None:
self.attrs["details"] = latest_launch_data.get("details")[0:255]
self.attrs["video_link"] = latest_launch_data["links"].get("video_link")
elif self._kind == "spacex_latest_launch_day":
self._state = datetime.datetime.fromtimestamp(
latest_launch_data.get("launch_date_unix")
).strftime("%d-%b-%Y")
self.attrs["launch_date_unix"] = latest_launch_data.get("launch_date_unix")
self.attrs["launch_date_utc"] = latest_launch_data.get("launch_date_utc")
elif self._kind == "spacex_latest_launch_time":
self._state = datetime.datetime.fromtimestamp(
latest_launch_data.get("launch_date_unix")
).strftime("%I:%M %p")
elif self._kind == "spacex_latest_launch_site":
self._state = latest_launch_data["launch_site"].get("site_name_long")
self.attrs["short_name"] = latest_launch_data["launch_site"].get("site_name")
elif self._kind == "spacex_latest_launch_rocket":
self._state = latest_launch_data["rocket"].get("rocket_name")
core_counter = 1
for this_core in latest_launch_data["rocket"]["first_stage"].get("cores"):
self.attrs["core_" + str(core_counter) + "_serial"] = this_core.get(
"core_serial"
)
self.attrs["core_" + str(core_counter) + "_flight"] = this_core.get(
"flight"
)
self.attrs["core_" + str(core_counter) + "_block"] = this_core.get(
"block"
)
self.attrs[
"core_" + str(core_counter) + "_landing_intent"
] = this_core.get("landing_intent")
self.attrs["core_" + str(core_counter) + "_lz"] = this_core.get(
"landing_vehicle"
)
core_counter = core_counter + 1
self.attrs["fairings_reused"] = latest_launch_data["rocket"]["fairings"].get(
"reused"
)
elif self._kind == "spacex_latest_launch_payload":
self._state = latest_launch_data["rocket"]["second_stage"]["payloads"][0].get(
"payload_id"
)
self.attrs["nationality"] = latest_launch_data["rocket"]["second_stage"][
"payloads"
][0].get("nationality")
self.attrs["manufacturer"] = latest_launch_data["rocket"]["second_stage"][
"payloads"
][0].get("manufacturer")
self.attrs["payload_type"] = latest_launch_data["rocket"]["second_stage"][
"payloads"
][0].get("payload_type")
self.attrs["payload_mass"] = (
str(
latest_launch_data["rocket"]["second_stage"]["payloads"][0].get(
"payload_mass_kg"
)
)
+ " kg"
)
self.attrs["payload_mass_us"] = (
str(
latest_launch_data["rocket"]["second_stage"]["payloads"][0].get(
"payload_mass_lbs"
)
)
+ " lbs"
)
self.attrs["orbit"] = latest_launch_data["rocket"]["second_stage"]["payloads"][
0
].get("orbit")
elif self._kind == "spacex_starman_speed":
self._state = int(starman_data["speed_kph"])
self._unit_of_measure = SPEED_KILOMETERS_PER_HOUR
self.attrs["machspeed"] = float(starman_data["speed_kph"]) / 1235
elif self._kind == "spacex_starman_distance":
self._state = int(starman_data["earth_distance_km"])
self._unit_of_measure = LENGTH_KILOMETERS
self.attrs["au_distance"] = float(starman_data["earth_distance_km"]) / (1.496 * (10**8))
async def async_added_to_hass(self):
"""Subscribe to updates."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
|
[
"inquiry@beardedtinker.com"
] |
inquiry@beardedtinker.com
|
ce1a587ed985770cef40e201c534f3b60bd48b02
|
4eb5d040b797f2b3b78f9d34ee6876d1b391336a
|
/Parallel_GAN_structure/sndcgan_zgp/ops/blocks/__init__.py
|
2db9aa2b5bc7525845e6abc9d56f52b94299f6ea
|
[
"Apache-2.0"
] |
permissive
|
skang29/GANs
|
34587c9b5a356a90b94bfc939e6c67a9eb9cc509
|
83e3338bf3611aafbd75f0df1272276e3bc07d46
|
refs/heads/master
| 2020-04-03T19:12:00.530259
| 2019-12-12T06:29:18
| 2019-12-12T06:29:18
| 155,514,200
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
from .residual_block import pre_activation_residual_block as residual_block
__all__ = ['residual_block']
|
[
"noreply@github.com"
] |
skang29.noreply@github.com
|
5876f1dd982277fdd25ff90c3eeaee1cccf478ee
|
56cf69e39e5dbf07a29966ae225ba3948187e57e
|
/smc/administration/access_rights.py
|
00d93eeafb7905aa1da993b1b6e8ae98a201920d
|
[
"Apache-2.0"
] |
permissive
|
gabstopper/smc-python
|
df587a6e71e7a466b3355138544fb4249c36d9f0
|
54386c8a710727cc1acf69334a57b155d2f5408c
|
refs/heads/master
| 2021-06-06T09:40:24.456578
| 2021-04-06T14:16:41
| 2021-04-06T14:16:41
| 58,158,709
| 31
| 16
|
Apache-2.0
| 2019-07-30T09:19:52
| 2016-05-05T20:30:33
|
Python
|
UTF-8
|
Python
| false
| false
| 5,539
|
py
|
"""
Access Control Lists are assigned to SMC admin accounts to grant limited
access permissions to either Engines, Policies or Domains.
"""
from smc.base.model import Element, ElementCreator
from smc.base.structs import NestedDict
from smc.base.util import element_resolver
from smc.administration.system import AdminDomain
class AccessControlList(Element):
"""
An ACL is assigned to an AdminUser to grant limited access permissions
to either Engines, Policies or Domains. The access control list will have
'granted elements' that represent the elements that apply to this
permission. The SMC provides default ACL's that can be used or new ones
can be created.
Find all available ACL's::
>>> AccessControlList.objects.all()
"""
typeof = 'access_control_list'
@classmethod
def create(cls, name, granted_element=None):
"""
Create a new ACL
:param str name: Name of ACL
:param list granted_elements: Elements to grant access to. Can be
engines, policies or other acl's.
:type granted_elements: list(str,Element)
:raises CreateElementFailed: failed creating ACL
:return: instance with meta
:rtype: AccessControlList
"""
granted_element = element_resolver(granted_element)
json = {'name': name,
'granted_element': granted_element}
return ElementCreator(cls, json)
@property
def permissions(self):
"""
Elements associated to this permission. Granted elements can be
Engines, Policies or other Access Control Lists.
:return: Element class deriving from :py:class:`smc.base.model.Element`
"""
return [Element.from_href(e) for e in self.granted_element]
def add_permission(self, elements):
"""
Add permission/s to this ACL. By default this change is committed
after the method is called.
:param list elements: Elements to grant access to. Can be engines,
policies, or other ACLs
:type elements: list(str,Element)
:raises UpdateElementFailed: Failed updating permissions
:return: None
"""
elements = element_resolver(elements)
self.data['granted_element'].extend(elements)
self.update()
def remove_permission(self, elements):
"""
Remove permission/s to this ACL. Change is committed at end of
method call.
:param list elements: list of element/s to remove
:type elements: list(str,Element)
:raises UpdateElementFailed: Failed modifying permissions
:return: None
"""
elements = element_resolver(elements)
for element in elements:
if element in self.granted_element:
self.data['granted_element'].remove(element)
self.update()
class Permission(NestedDict):
"""
Permissions are added to admin users that do not have super user access
rights. An Admin User can also have multiple permissions. There are three
primary fields associated with a permission:
* Domain to grant access
* Elements to grant access to (Engines, Policies or AccessControlLists)
* Role
A permission might be used to grant read-only access to specific policies
or firewalls (read-only vs read write). It can also be specific to the
Admin Domain.
.. seealso:: :py:mod:`smc.elements.user`
"""
def __init__(self, granted_elements=None, role_ref=None, granted_domain_ref=None):
data = dict(
granted_domain_ref=element_resolver(granted_domain_ref),
role_ref=element_resolver(role_ref),
granted_elements=element_resolver(granted_elements))
super(Permission, self).__init__(data=data)
@classmethod
def create(cls, elements, role, domain=None):
"""
Create a permission.
:param list granted_elements: Elements for this permission. Can
be engines, policies or ACLs
:type granted_elements: list(str,Element)
:param str,Role role: role for this permission
:param str,Element domain: domain to apply (default: Shared Domain)
:rtype: Permission
"""
if not domain:
domain = AdminDomain('Shared Domain')
return Permission(
granted_elements=elements, role_ref=role, granted_domain_ref=domain)
@property
def granted_elements(self):
"""
List of elements this permission has rights to. Elements will be of type
Engine, Policy or ACLs
:rtype: list(Element)
"""
return [Element.from_href(element) for element in self.get('granted_elements')]
@property
def role(self):
"""
Specific Role assigned to this permission. A role is what allows read/write
access to specific operations on the granted elements
:rtype: Role
"""
return Element.from_href(self.get('role_ref'))
@property
def domain(self):
"""
Domain this permission applies to. Shared Domain if unspecified.
:rtype: AdminDomain
"""
return Element.from_href(self.get('granted_domain_ref', 'Shared Domain'))
def __repr__(self):
return "Permission(elements={}, role={}, domain={})"\
.format(self.granted_elements, self.role, self.domain)
|
[
"dwlepage70@gmail.com"
] |
dwlepage70@gmail.com
|
100094a1d700bd6f69213fd67451a2c7c0791e94
|
c2fc06b067ff429d1ffff373b62a7d862a54309f
|
/periodapi/views.py
|
8710ae95c8bf3c53a4ab915d27eb38f653658b88
|
[] |
no_license
|
gerald-x/womans-period
|
f8b0bb44f37586a5c9e2d49fc3f9cccadb647df6
|
3b193c64bc43f7de83d3a09d482e407ffb9ed229
|
refs/heads/main
| 2023-08-26T04:22:57.798148
| 2021-10-15T13:36:01
| 2021-10-15T13:36:01
| 417,449,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,297
|
py
|
from django.shortcuts import render
from django.http import JsonResponse
from datetime import datetime, timedelta
from .models import FertilityWindow, PeriodData, ProcessedData
# Create your views here.
def create_cycles(request):
if request.method == "GET":
data = PeriodData.objects.all()
return render(request, "form.html", {"data": data})
else:
last_period = request.POST.get("last_period")
cycle_average = int(request.POST.get("cycle_average"))
period_average = int(request.POST.get("period_average"))
start_date = request.POST.get("start_date")
end_date = request.POST.get("end_date")
PeriodData.objects.all().delete()
ProcessedData.objects.all().delete()
FertilityWindow.objects.all().delete()
period_data = PeriodData(
last_period = last_period,
cycle_average = cycle_average,
period_average = period_average,
start_date= start_date,
end_date= end_date
)
period_data.save()
#convert strings to date objects
last_period1 = datetime.strptime(last_period, "%Y-%m-%d").date()
start_date1 = datetime.strptime(start_date, "%Y-%m-%d").date()
end_date1 = datetime.strptime(end_date, "%Y-%m-%d").date()
calculation_date = start_date1
while not calculation_date >= end_date1:
period_start_date = last_period1 + timedelta(days=cycle_average)
period_end_date = period_start_date + timedelta(days=period_average)
ovulation_date = period_start_date + timedelta(days=cycle_average//2)
fertility_window = []
fertility_window.append(ovulation_date - timedelta(days=4))
fertility_window.append(ovulation_date + timedelta(days=4))
pre_ovulation_window = "todo"
post_ovulation_window = "todo"
last_period1 = period_start_date
processed_data = ProcessedData(
period_start_date = period_start_date,
period_end_date = period_end_date,
ovulation_date = ovulation_date,
pre_ovulation_window = pre_ovulation_window,
post_ovulation_window = post_ovulation_window,
)
processed_data.save()
processed_data_object = ProcessedData.objects.get(period_start_date= period_start_date)
for value in fertility_window:
data_input = FertilityWindow(fertility_window=value, processed_data=processed_data_object)
data_input.save()
calculation_date = period_end_date
return JsonResponse({"total_created_cycles": ProcessedData.objects.all().count()})
def cycle_event(request):
if request.method == "GET":
cycle_variable = str(request.GET.get("date", None))
if not cycle_variable == None:
events = []
cycle_variable = datetime.strptime(cycle_variable, "%Y-%m-%d").date()
data = ProcessedData.objects.all().prefetch_related("fertility_window")
first_set = data.values(
"ovulation_date",
"period_start_date",
"period_end_date",
"pre_ovulation_window",
"post_ovulation_window")
second_set = data.values_list("fertility_window", flat=True)
for main_set in first_set:
for key, value in main_set.items():
if value == cycle_variable:
events_dict = {
"events": key,
"date": value
}
events.append(events_dict)
print(events)
windows = FertilityWindow.objects.filter(id__in=second_set).values("fertility_window")
for subset in windows:
for name, date in subset.items():
if date == cycle_variable:
events_dict = {
"events": name,
"date": date
}
events.append(events_dict)
print(events)
return JsonResponse({"events_to_happen": events})
|
[
"64314917+gerald-x@users.noreply.github.com"
] |
64314917+gerald-x@users.noreply.github.com
|
158f2e97bbfcd85ff6ecc67a53fd638df29262e6
|
aafeb17c82282f064958003a0bed19ac651a6100
|
/poi_id.py
|
34de10e746e07f3aba8bef6bd91710d1365bbd6f
|
[] |
no_license
|
Faylfire/identifying_enron_fraud_project_5_fang_lu
|
c3d78158c62910e955993cf7cce5418dc833861a
|
974032915e7ff014ee8a87e706055f8a67ebf0a8
|
refs/heads/master
| 2021-01-10T21:18:04.297494
| 2015-11-27T22:49:38
| 2015-11-27T22:49:38
| 42,611,467
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,316
|
py
|
#!/usr/bin/python
#Fang Lu
#Enron poi_id.py progression and testing
#
import sys
import pickle
sys.path.append("../tools/")
from scipy.stats import pearsonr as Pearson
from scipy.stats import pointbiserialr as Biserial
import numpy as np
import operator
from feature_format import featureFormat, targetFeatureSplit
from tester import test_classifier, dump_classifier_and_data
import pprint
from sklearn.naive_bayes import GaussianNB
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_selection import SelectKBest, f_regression, chi2
from sklearn.tree import DecisionTreeClassifier as DTC
from sklearn.ensemble import AdaBoostClassifier as ABC
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.cross_validation import StratifiedShuffleSplit
### Task 1: Select what features you'll use.
### features_list is a list of strings, each of which is a feature name.
### The first feature must be "poi".
#Feature selections performed by removing non-correlated features and performing exhaustive search on remaining
#Features Selected Originally DTC(min_samples_split=15) Precision = 0.48, Recall = 0.49
#features_list = ['poi','exercised_stock_options','shared_receipt_with_poi','to_poi_ratio','expenses']
#Final Features List
features_list = ['poi', 'exercised_stock_options', 'deferred_income', 'expenses']
#Function to get dataset statistics/Exploratory Calculates NaN
def getDataSetStat(data_dict, *args):
poiCount = 0
poilist = []
for i in data_dict:
if data_dict[i]['poi'] == True:
poilist.append(i)
poiCount += 1
flist = []
featureCount = len(data_dict['HANNON KEVIN P'])
for k in data_dict['HANNON KEVIN P']:
flist.append(k)
fmissing = {}
for l in flist:
count = 0
for i in data_dict:
if data_dict[i][l] == 'NaN':
count += 1
fmissing[l] = count
setDict = {}
setDict['poiCount']= poiCount
setDict['poilist'] = poilist
setDict['flist'] = flist
setDict['fmissing'] = fmissing
setDict['featureCount'] = featureCount
for i in args:
if i == 0:
pprint.pprint(setDict)
else:
return setDict
### Load the dictionary containing the dataset
data_dict = pickle.load(open("final_project_dataset.pkl", "r") )
getDataSetStat(data_dict, 0)
### Task 2: Remove outliers
### Removed the obvious outlier of TOTAL from the dataset.
data_dict.pop('TOTAL', 0)
### Task 3: Create new feature(s)
#Created the ratio features from and to poi feature divided by the total the total recieved and sent messages
for i in data_dict:
#avoid divide by zero
if data_dict[i]['from_messages'] != 'NaN' and data_dict[i]['from_messages'] != 0:
data_dict[i]['to_poi_ratio'] = float(data_dict[i]['from_this_person_to_poi'])/data_dict[i]['from_messages']
else:
data_dict[i]['to_poi_ratio']='NaN'
if data_dict[i]['to_messages'] != 'NaN' and data_dict[i]['to_messages'] != 0:
data_dict[i]['from_poi_ratio'] = float(data_dict[i]['from_poi_to_this_person'])/data_dict[i]['to_messages']
else:
data_dict[i]['from_poi_ratio']='NaN'
if data_dict[i]['to_messages'] != 'NaN' and data_dict[i]['to_messages'] != 0:
data_dict[i]['shared_poi_ratio'] = float(data_dict[i]['shared_receipt_with_poi'])/data_dict[i]['to_messages']
else:
data_dict[i]['shared_poi_ratio']='NaN'
### Store to my_dataset for easy export below.
my_dataset = data_dict
### Extract features and labels from dataset for local testing
data = featureFormat(my_dataset, features_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
### Task 4: Try a varity of classifiers
### Please name your classifier clf for easy export below.
### Note that if you want to do PCA or other multi-stage operations,
### you'll need to use Pipelines. For more info:
### http://scikit-learn.org/stable/modules/pipeline.html
#Feature Selection and Classifier Selection are Combined in Task 4
##--First Get Correlations--##
#Creates X and Y array for correlation given the raw data from featureFormat
def getXY(corrData):
x = []
y = []
for item in corrData:
y.append( item[0] )
x.append( item[1] )
return y, x
#Calculates the Point Biserial Correlation (Pearson) of Features to 'poi'
def corrPOI(myData):
flist = []
for k in myData['HANNON KEVIN P']:
flist.append(k)
flist.remove('email_address')
pbsDict = {}
for i in flist:
corrList = ['poi', i]
pbsCorr = getCorr(myData, corrList)
pbsDict[i] = pbsCorr
correlations = pbsDict
#Prints the Sorted Correlations Starting with the Highest Correlation
for w in sorted(correlations, key=correlations.get, reverse=True):
print w, correlations[w][0], correlations[w][1]
return pbsDict
#Performs Pearsons Correlation test (same as PointBiserial Mathematically)
def getCorr(myData, corrList):
corrData = featureFormat(myData, corrList, remove_all_zeroes = False, sort_keys = True)
y, x = getXY(corrData)
#Using pearsons makes getCorr more robust for feature correlation
return Pearson(y,x)
#Performs correlations on between all Features Results
def corrAll(myData):
flist = []
for k in myData['HANNON KEVIN P']:
flist.append(k)
flist.remove('email_address')
#Creates a dictionary to store all the correlations between features
corrDict = {}
for i in flist:
corrDict[i] = {}
for j in flist:
corrList = [i,j]
pbsCorr = getCorr(myData, corrList)
corrDict[i][j] = pbsCorr
#filters out highly correlated feature pairs
uncorr = {}
for i in corrDict:
uncorr[i]={}
for j in corrDict[i]:
if abs(corrDict[i][j][0]) <= 0.2:
uncorr[i][j]=corrDict[i][j]
return corrDict, uncorr
#Utility function for reading correlations
def readCorr(corrDict, f1, f2=None):
print '--------'
if f2:
print 'r and p-values for:',f1,'and',f2, corrDict['shared_receipt_with_poi']['to_poi_ratio']
else:
print 'All Correlations with ',f1
pprint.pprint(corrDict[f1])
#Call Correlation Functions
poiCorr = corrPOI(my_dataset)
allCorr, unCorr = corrAll(my_dataset)
#Examples on How to Access the Feature Correlation Results
featureOne = 'to_poi_ratio'
featureTwo = 'exercised_stock_options'
readCorr(allCorr, featureOne, featureTwo)
readCorr(allCorr, featureOne)
readCorr(poiCorr, featureOne)
readCorr(unCorr, featureTwo)
#PCA Analysis for insight into the features
#Also creates feature set to be tested
def pcaGet(myData):
#Builds the feature_list for all of the features
flist = []
for k in myData['HANNON KEVIN P']:
flist.append(k)
flist.remove('email_address')
flist.remove('poi')
flist.insert(0, 'poi')
#pprint.pprint(flist)
#Obtain the features in array format from featureFormat and split out 'poi'
pcaData = featureFormat(myData, flist , remove_all_zeroes = False, sort_keys = True)
labels, features = targetFeatureSplit(pcaData)
#Run PCA showing the first 5 components, change n_components to see more
pca = PCA(n_components=5, whiten=False)
pca.fit(features)
print '-----No StandardScalling-----'
pprint.pprint(pca.explained_variance_ratio_)
#uncomment to see breakdown of PC contributions by features
#pprint.pprint(pca.components_)
var = pca.explained_variance_ratio_
print 'Total Variance Captured: ', sum(var[0:5])
#newFeatures = pca.transform(features)
#With StandardScaler
stdScaler = StandardScaler()
scaledFeatures = stdScaler.fit_transform(features)
pcaStd = PCA(n_components=22, whiten=True)
pcaStd.fit(scaledFeatures)
print '-----With StandardScalling-----'
pprint.pprint(pcaStd.explained_variance_ratio_)
varStd = pcaStd.explained_variance_ratio_
numPC = 14
print 'Total Variance Captured: ', sum(varStd[0:14])
#pprint.pprint(pcaStd.components_)
newFeatures = pcaStd.transform(features)
return var, labels, newFeatures, features
#Call PCA functions for Analysis
variance, lab, newFeat, oldFeat = pcaGet(my_dataset)
#Cross-Validation and Exhaustive Search
PERF_FORMAT_STRING = "\
\tAccuracy: {:>0.{display_precision}f}\tPrecision: {:>0.{display_precision}f}\t\
Recall: {:>0.{display_precision}f}\tF1: {:>0.{display_precision}f}\tF2: {:>0.{display_precision}f}"
RESULTS_FORMAT_STRING = "\tTotal predictions: {:4d}\tTrue positives: {:4d}\tFalse positives: {:4d}\tFalse negatives: {:4d}\tTrue negatives: {:4d}"
#Modified test_classifier from tester.py, to be able to reduce the folds and use different random_state
#This modified classifier also allows for preloading of labels and features, thus can perform preprocessing such as PCA
def test_classifier_mod(clf, dataset, feature_list, folds = 1000, preload = False, lab = [], feat = [], printYes = True):
#Used to run preloaded feature set as in for PCA Analysis
if preload:
#print 'in preload'
labels = lab
features = feat
else:
data = featureFormat(dataset, feature_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
cv = StratifiedShuffleSplit(labels, folds, random_state = 42)
#cv = StratifiedShuffleSplit(labels, folds, random_state = None)
true_negatives = 0
false_negatives = 0
true_positives = 0
false_positives = 0
for train_idx, test_idx in cv:
features_train = []
features_test = []
labels_train = []
labels_test = []
for ii in train_idx:
features_train.append( features[ii] )
labels_train.append( labels[ii] )
for jj in test_idx:
features_test.append( features[jj] )
labels_test.append( labels[jj] )
### fit the classifier using training set, and test on test set
clf.fit(features_train, labels_train)
predictions = clf.predict(features_test)
for prediction, truth in zip(predictions, labels_test):
if prediction == 0 and truth == 0:
true_negatives += 1
elif prediction == 0 and truth == 1:
false_negatives += 1
elif prediction == 1 and truth == 0:
false_positives += 1
else:
true_positives += 1
try:
total_predictions = true_negatives + false_negatives + false_positives + true_positives
accuracy = 1.0*(true_positives + true_negatives)/(total_predictions)
precision = 1.0*true_positives/(true_positives+false_positives)
recall = 1.0*true_positives/(true_positives+false_negatives)
f1 = 2.0 * true_positives/(2*true_positives + false_positives+false_negatives)
f2 = (1+2.0*2.0) * precision*recall/(4*precision + recall)
#Can turn off Printing
if printYes:
print clf
print PERF_FORMAT_STRING.format(accuracy, precision, recall, f1, f2, display_precision = 5)
print RESULTS_FORMAT_STRING.format(total_predictions, true_positives, false_positives, false_negatives, true_negatives)
print ""
#returns Precision and Recall for easier access to results
return precision, recall
except:
print "Got a divide by zero when trying out:", clf
precision = 0
recall = 0
return precision, recall
#Test Multiple Classifiers
def test_classifiers(testOption, feat_list, use_pca_features = False):
print 'Feature List: ',feat_list
if testOption == 0:
clfvalid = GaussianNB()
print "GuassianNB:-----"
elif testOption == 1:
clfvalid = DTC(min_samples_split=2)
print "DTC:-----"
elif testOption == 2:
clfvalid = RFC()
print "RFC:-----"
elif testOption == 3:
clfvalid = ABC(DTC())
print "AdaBoostC:-----"
elif testOption == 4:
estimators = [('reduce_dim', PCA()), ('dtc', DTC())]
clfvalid = Pipeline(estimators)
print "PCA-DTC:-----"
elif testOption == 5:
estimators = [('reduce_dim', PCA(n_components=2)), ('dtc', DTC(min_samples_split=17))]
clfvalid = Pipeline(estimators)
print "Tuned-PCA-DTC:-----"
#Option to Use PCA features
if use_pca_features:
pre, re = test_classifier_mod(clfvalid, my_dataset, feat_list, preload = True, lab=lab, feat = newFeat)
else:
pre, re = test_classifier_mod(clfvalid, my_dataset, feat_list, printYes = True)
return pre, re
#Sample Call to test_classifiers
f_minPlusSharedR = ['poi',
'exercised_stock_options',
'to_poi_ratio',
'shared_receipt_with_poi']
p, r = test_classifiers(4, f_minPlusSharedR)
#Feature Testing functions using K-Fold cross validation and different feature sets
def featureTest(use_ftest = False, ftest = []):
#All Features
f_all = ['poi',
'exercised_stock_options',
'total_stock_value',
'bonus',
'salary',
'to_poi_ratio',
'deferred_income',
'long_term_incentive',
'shared_poi_ratio',
'restricted_stock',
'total_payments',
'shared_receipt_with_poi',
'loan_advances',
'expenses',
'from_poi_to_this_person',
'other',
'from_poi_ratio',
'from_this_person_to_poi',
'to_messages',
'restricted_stock_deferred',
'from_messages',
'deferral_payments',
'director_fees']
#13 Most Correlated Features that are Significant ~98% Confidence
f_correlated = ['poi',
'exercised_stock_options',
'total_stock_value',
'bonus',
'salary',
'to_poi_ratio',
'deferred_income',
'long_term_incentive',
'shared_poi_ratio',
'restricted_stock',
'total_payments',
'shared_receipt_with_poi',
'loan_advances',
'expenses']
#Financial Only
f_financial = ['poi',
'exercised_stock_options',
'total_stock_value',
'bonus',
'salary',
'long_term_incentive',
'restricted_stock',
'total_payments',
'loan_advances',
'expenses']
#E-mail Only
f_email_only = ['poi',
'to_poi_ratio',
'shared_poi_ratio',
'shared_receipt_with_poi']
f_email_2 = ['poi',
'to_poi_ratio',
'shared_poi_ratio',
'shared_receipt_with_poi']
f_email_1 = ['poi',
'to_poi_ratio']
f_email_original = ['poi',
'shared_receipt_with_poi',
'from_poi_to_this_person',
'from_this_person_to_poi',
'to_messages',
'from_messages']
f_email_created = ['poi',
'to_poi_ratio',
'shared_poi_ratio',
'from_poi_ratio']
#Misc Tests, By Selecting Top Correlations for Financial and E-mail
f_min = ['poi',
'exercised_stock_options',
'to_poi_ratio']
f_minPlus = ['poi',
'exercised_stock_options',
'to_poi_ratio',
'bonus',
'expenses']
f_minPlusExp = ['poi',
'exercised_stock_options',
'to_poi_ratio',
'expenses']
f_minPlusSharedR = ['poi',
'exercised_stock_options',
'to_poi_ratio',
'shared_poi_ratio']
f_minPlusShared = ['poi',
'exercised_stock_options',
'to_poi_ratio',
'shared_receipt_with_poi']
#Random Tests By Hand
f_test = ['poi',
'exercised_stock_options',
'shared_receipt_with_poi']
f_c_selected = ['poi',
'exercised_stock_options',
'bonus',
'to_poi_ratio',
'deferred_income',
'shared_receipt_with_poi',
'expenses']
f_c_selected_2 = ['poi',
'exercised_stock_options',
'bonus',
'shared_receipt_with_poi']
prStr = []
pre = 0
re = 0
if use_ftest:
for i in range(6):
pre, re = test_classifiers(i, ftest)
prStr.append(pre)
prStr.append(re)
else:
for i in range(6):
pre, re = test_classifiers(i, f_c_selected_2)
prStr.append(pre)
prStr.append(re)
#classifer_stratified_test(i, features_list, use_pca_features = True)
print prStr
return prStr
#Exhaustive feature testing after selection down to 6 variables
#function tests feature sets created by removing features individually
#Produces an array of arrays of the precision and recall scores for the 6 classifiers
def featIter(num=0):
f_c = ['poi',
'exercised_stock_options',
'total_stock_value',
'bonus',
'salary',
'to_poi_ratio',
'deferred_income',
'long_term_incentive',
'shared_poi_ratio',
'restricted_stock',
'total_payments',
'shared_receipt_with_poi',
'loan_advances',
'expenses']
f_c_selected = ['poi',
'exercised_stock_options',
'bonus',
'to_poi_ratio',
'deferred_income',
'shared_receipt_with_poi',
'expenses']
f_c_selected_2 = ['poi',
'exercised_stock_options',
'bonus',
'shared_receipt_with_poi']
#Expense down to 3 variables
f_sans_expense = ['poi', 'exercised_stock_options', 'bonus', 'to_poi_ratio', 'deferred_income', 'shared_receipt_with_poi']
f_sans_expense_def_inc = ['poi', 'exercised_stock_options', 'bonus', 'to_poi_ratio', 'shared_receipt_with_poi']
#Deferred_income down to 3 variables
f_sans_def_inc = ['poi', 'exercised_stock_options', 'bonus', 'to_poi_ratio', 'shared_receipt_with_poi', 'expenses']
f_sans_def_inc_tpr = ['poi', 'exercised_stock_options', 'bonus', 'shared_receipt_with_poi', 'expenses']
#Bonus down to 3 variables
f_sans_bonus = ['poi', 'exercised_stock_options', 'to_poi_ratio', 'deferred_income', 'shared_receipt_with_poi', 'expenses']
f_sans_bonus_shared = ['poi', 'exercised_stock_options', 'to_poi_ratio', 'deferred_income', 'expenses']
#Final
f_final = ['poi', 'exercised_stock_options', 'deferred_income', 'expenses']
#For performing a reverse test, by adding to Final features and see if the model improves
f_remaining1 = ['total_stock_value',
'bonus',
'salary',
'to_poi_ratio',
'long_term_incentive',
'shared_poi_ratio',
'restricted_stock',
'total_payments',
'shared_receipt_with_poi',
'loan_advances',
'from_poi_to_this_person',
'other',
'from_poi_ratio',
'from_this_person_to_poi',
'to_messages',
'restricted_stock_deferred',
'from_messages',
'deferral_payments',
'director_fees']
f_remaining = []
pr_Arr = []
#Removes
topRemove = False
#Test Final Feature Set by Addition of remaining features individually
final = True
if topRemove:
for i in range(num):
f_c_selected
f_c_selected.pop(1)
pr = featureTest(use_ftest = True, ftest = f_c_selected)
pr_Arr.append(pr)
elif final:
f_c_selected = f_final
pr = featureTest(use_ftest = True, ftest = f_c_selected)
pr_Arr.append(pr)
for i in range(len(f_remaining)):
f_c_selected = f_final+[f_remaining[i]]
pr = featureTest(use_ftest = True, ftest = f_c_selected)
pr_Arr.append(pr)
else:
#Change f_c_selected with desired feature list to perform removal of individual features
f_c_selected = f_sans_bonus_shared
pr = featureTest(use_ftest = True, ftest = f_c_selected)
pr_Arr.append(pr)
num = len(f_c_selected)-1
for i in range(num):
ftest = f_c_selected[0:(num-i)] + f_c_selected[(num-i+1):]
pr = featureTest(use_ftest = True, ftest = ftest)
pr_Arr.append(pr)
print ftest
print 'Tests Done...'
return pr_Arr
#Sample Call to iterFeat() uses the Final feature set
pr_Arr = featIter()
#Parameter Tuning GridSearchCV and Manual
#GridSearchCV for Classifier Parameter tuning
#PCA-Decision Tree GridSeachCV
def pcadtcGrid():
#features_list = ['poi', 'exercised_stock_options', 'deferred_income', 'expenses']
features_list = ['poi',
'exercised_stock_options',
'to_poi_ratio',
'shared_receipt_with_poi']
estimators = [('reduce_dim', PCA()), ('dtc', DTC())]
pipe = Pipeline(estimators)
param_grid = dict(reduce_dim__n_components=[1,2],
dtc__min_samples_split=np.arange(2,20))
#print param_grid
d = featureFormat(my_dataset, features_list, sort_keys = True)
y, X = targetFeatureSplit(d)
grid_search = GridSearchCV(pipe, param_grid=param_grid, verbose=False)
grid_search.fit(X, y)
print '----PCA-DTC-GridSeachCV----'
print(grid_search.best_estimator_)
#Decision Tree GridSearchCV
def dtcGrid():
features_list = ['poi', 'exercised_stock_options', 'deferred_income', 'expenses']
estimators = [('dtc', DTC())]
pipe = Pipeline(estimators)
param_grid = dict(dtc__min_samples_split=np.arange(2,36))
d = featureFormat(my_dataset, features_list, sort_keys = True)
y, X = targetFeatureSplit(d)
grid_search = GridSearchCV(pipe, param_grid=param_grid, verbose=False)
grid_search.fit(X, y)
print '----DTC-GridSeachCV----'
print(grid_search.best_estimator_)
#Manual Parameter Tuning using DTC
def paramTune(start,end):
scores= {}
for i in range(start,end+1):
#Uncomment to test
#Parameter Tune Pipelined classifiers
#estimators = [('scaling', StandardScaler()),('reduce_dim', PCA()), ('dtc', DTC(min_samples_split=i*2))]
#estimators = [('reduce_dim', PCA(n_components=2)), ('dtc', DTC(min_samples_split=i))]
#clfIter = Pipeline(estimators)
#clfIter.set_params(reduce_dim__n_components=3)
#Paramter Tune for simple classifiers
#clfIter = DTC(min_samples_leaf=i, min_samples_split=3)
#clfIter = DTC(min_samples_split=3, max_depth = i)
#test_classifier(clfIter, my_dataset, features_list)
clfIter = DTC(min_samples_split=i)
p,r = test_classifier_mod(clfIter, my_dataset, features_list, printYes = False)
scores[i]=p+r
print '----ParamTune----'
print 'Max Precision and Recall Combined Score: ', max(scores.values())
print 'Tuned Parameter: ', max(scores.iteritems(), key=operator.itemgetter(1))[0]
return scores
#Call GridSearchCV functions
pcadtcGrid()
dtcGrid()
#Change the features to tune different feature set
#features_list = ['poi', 'exercised_stock_options', 'deferred_income', 'expenses']
start = 2
end = 36
scoreDict = paramTune(start,end)
#Function to get feature importance for DTC
def getDTCimportance(features_list):
#features_list = ['poi', 'exercised_stock_options', 'deferred_income', 'expenses']
data = featureFormat(my_dataset, features_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
clf2 = DTC(min_samples_split=16)
clf2.fit(features, labels)
imp = clf2.feature_importances_
f_importance = {}
c = 0
for i in features_list[1:]:
f_importance[i]=imp[c]
c +=1
#pprint.pprint(f_importance)
return f_importance
f_importance = getDTCimportance(features_list)
print '----------'
print 'Feature Importances: ', f_importance
#Final Classifier and Result
clf = DTC(min_samples_split=3)
test_classifier(clf, my_dataset, features_list)
### Dump your classifier, dataset, and features_list so
### anyone can run/check your results.
dump_classifier_and_data(clf, my_dataset, features_list)
print 'Pickle Files Generated...'
|
[
"fanglu@Fangs-MacBook-Pro.local"
] |
fanglu@Fangs-MacBook-Pro.local
|
4a967eccec0bc29c4d36530ac28da34ffe6a069c
|
42b82c2015a85e9e4e80f40988d588cb7fdc3098
|
/tools/char2html.py
|
7f47b5fa1ce2c2668eff0d0f3160d3d8fc46ac1d
|
[] |
no_license
|
eric-lemesre/openerp-survey
|
64fe9dc266f6d9216cec445c80826c97b8d9171c
|
c30668969fa883f290aa8daa88cacd8103f5ef60
|
refs/heads/master
| 2020-03-19T13:51:39.996523
| 2013-10-24T10:41:59
| 2013-10-24T10:41:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 984
|
py
|
#!/usr/bin/python
import psycopg2
import re
import sys
re_path = re.compile(r'(\w+):(\w+)\.(\w+)')
def main(source, target):
parameters = zip(['src_db', 'src_table', 'src_field'], re_path.match(source).groups())
parameters.extend(zip(['tar_db', 'tar_table', 'tar_field'], re_path.match(target).groups()))
parameters=dict(parameters)
src_conn = psycopg2.connect("dbname={src_db}".format(**parameters))
dst_conn = psycopg2.connect("dbname={tar_db}".format(**parameters))
src_cur = src_conn.cursor()
dst_cur = dst_conn.cursor()
src_cur.execute('select {src_field},id from {src_table}'.format(**parameters))
src_data = src_cur.fetchall()
dst_cur.executemany('update {tar_table} set {tar_field}=%s where id=%s'.format(**parameters), src_data)
dst_conn.commit()
src_cur.close()
dst_cur.close()
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"cristian.rocha@moldeo.coop"
] |
cristian.rocha@moldeo.coop
|
81a5e811177a17107d7a5f9f267df26b2edfd4bd
|
bde06d0445ff0676168b6563ff98b94c3111b8d6
|
/.ci/ci-generate-srcinfo.py
|
ac07da113273a07f777d41b35c01a228114122cb
|
[
"BSD-3-Clause"
] |
permissive
|
pchemguy/MSYS2-packages
|
abf74f527de07706f10f0ae2ec50df62fa8646c1
|
c3556295fbed8c823814d54b3980bd481916fc5e
|
refs/heads/master
| 2021-06-11T01:35:16.161681
| 2021-04-08T06:47:18
| 2021-04-08T06:47:18
| 135,283,422
| 0
| 1
|
BSD-3-Clause
| 2018-05-29T11:03:16
| 2018-05-29T11:03:16
| null |
UTF-8
|
Python
| false
| false
| 7,583
|
py
|
#!/usr/bin/env python3
# Copyright 2017 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
import sys
import argparse
import os
import json
import shutil
from collections import OrderedDict
import hashlib
import time
import subprocess
from concurrent.futures import ThreadPoolExecutor
from typing import List, Iterator, Tuple, Dict, Optional, Union, Collection
CacheEntry = Dict[str, Union[str, Collection[str]]]
CacheTuple = Tuple[str, CacheEntry]
Cache = Dict[str, CacheEntry]
def normalize_repo(repo: str) -> str:
if repo.endswith(".git"):
repo = repo.rsplit(".", 1)[0]
return repo
def normalize_path(path: str) -> str:
return path.replace("\\", "/")
def check_output_retry(*args, **kwargs):
# XXX: git sometimes crashes when called concurrently,
# so we retry a few times..
run = 0
max_ = 5
while True:
try:
return subprocess.check_output(*args, **kwargs)
except subprocess.CalledProcessError as e:
if run <= max_ and e.returncode == 127:
time.sleep(0.1 * run)
run += 1
continue
else:
raise
def get_cache_key(pkgbuild_path: str) -> str:
pkgbuild_path = os.path.abspath(pkgbuild_path)
git_cwd = os.path.dirname(pkgbuild_path)
git_path = os.path.relpath(pkgbuild_path, git_cwd)
h = hashlib.new("SHA1")
with open(pkgbuild_path, "rb") as f:
h.update(f.read())
fileinfo = check_output_retry(
["git", "ls-files", "-s", "--full-name", git_path],
cwd=git_cwd).decode("utf-8").strip()
h.update(normalize_path(fileinfo).encode("utf-8"))
repo = check_output_retry(
["git", "ls-remote", "--get-url", "origin"],
cwd=git_cwd).decode("utf-8").strip()
repo = normalize_repo(repo)
h.update(repo.encode("utf-8"))
return h.hexdigest()
def get_srcinfo_for_pkgbuild(args: Tuple[str, str]) -> Optional[CacheTuple]:
pkgbuild_path, mode = args
pkgbuild_path = os.path.abspath(pkgbuild_path)
git_cwd = os.path.dirname(pkgbuild_path)
git_path = os.path.relpath(pkgbuild_path, git_cwd)
key = get_cache_key(pkgbuild_path)
bash = shutil.which("bash")
if bash is None:
print("ERROR: bash not found")
return None
print("Parsing %r" % pkgbuild_path)
try:
srcinfos = {}
if mode == "mingw":
for name in ["mingw32", "mingw64"]:
env = os.environ.copy()
env["MINGW_INSTALLS"] = name
srcinfos[name] = subprocess.check_output(
[bash, "/usr/bin/makepkg-mingw",
"--printsrcinfo", "-p", git_path],
cwd=git_cwd,
env=env).decode("utf-8")
else:
srcinfos["msys"] = subprocess.check_output(
[bash, "/usr/bin/makepkg",
"--printsrcinfo", "-p", git_path],
cwd=git_cwd).decode("utf-8")
repo = check_output_retry(
["git", "ls-remote", "--get-url", "origin"],
cwd=git_cwd).decode("utf-8").strip()
repo = normalize_repo(repo)
relpath = check_output_retry(
["git", "ls-files", "--full-name", git_path],
cwd=git_cwd).decode("utf-8").strip()
relpath = normalize_path(os.path.dirname(relpath))
date = check_output_retry(
["git", "log", "-1", "--format=%aI", git_path],
cwd=git_cwd).decode("utf-8").strip()
meta = {"repo": repo, "path": relpath, "date": date, "srcinfo": srcinfos}
except subprocess.CalledProcessError as e:
print("ERROR: %s %s" % (pkgbuild_path, e.output.splitlines()))
return None
return (key, meta)
def iter_pkgbuild_paths(repo_path: str) -> Iterator[str]:
repo_path = os.path.abspath(repo_path)
print("Searching for PKGBUILD files in %s" % repo_path)
for base, dirs, files in os.walk(repo_path):
for f in files:
if f == "PKGBUILD":
# in case we find a PKGBUILD, don't go deeper
del dirs[:]
path = os.path.join(base, f)
yield path
def get_srcinfo_from_cache(args: Tuple[str, Cache]) -> Tuple[str, Optional[CacheTuple]]:
pkgbuild_path, cache = args
key = get_cache_key(pkgbuild_path)
if key in cache:
return (pkgbuild_path, (key, cache[key]))
else:
return (pkgbuild_path, None)
def iter_srcinfo(repo_path: str, mode: str, cache: Cache) -> Iterator[Optional[CacheTuple]]:
with ThreadPoolExecutor() as executor:
to_parse: List[Tuple[str, str]] = []
pool_iter = executor.map(
get_srcinfo_from_cache, ((p, cache) for p in iter_pkgbuild_paths(repo_path)))
for pkgbuild_path, srcinfo in pool_iter:
if srcinfo is not None:
yield srcinfo
else:
to_parse.append((pkgbuild_path, mode))
print("Parsing PKGBUILD files...")
for srcinfo in executor.map(get_srcinfo_for_pkgbuild, to_parse):
yield srcinfo
def main(argv: List[str]) -> Optional[Union[int, str]]:
parser = argparse.ArgumentParser(description="Create SRCINFOs for all packages in a repo", allow_abbrev=False)
parser.add_argument('mode', choices=['msys', 'mingw'], help="The type of the repo")
parser.add_argument("repo_path", help="The path to GIT repo")
parser.add_argument("json_cache", help="The path to the json file used to fetch/store the results")
parser.add_argument("--time-limit", action="store",
type=int, dest="time_limit", default=0,
help='time after which it will stop and save, 0 means no limit')
args = parser.parse_args(argv[1:])
t = time.monotonic()
srcinfo_path = os.path.abspath(args.json_cache)
cache: Cache = {}
try:
with open(srcinfo_path, "rb") as h:
cache = json.loads(h.read())
except FileNotFoundError:
pass
srcinfos = []
for entry in iter_srcinfo(args.repo_path, args.mode, cache):
if entry is None:
continue
srcinfos.append(entry)
# So we stop before CI times out
if args.time_limit and time.monotonic() - t > args.time_limit:
print("time limit reached, stopping")
break
srcinfos_dict = OrderedDict(sorted(srcinfos))
with open(srcinfo_path, "wb") as h:
h.write(json.dumps(srcinfos_dict, indent=2).encode("utf-8"))
return None
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
[
"reiter.christoph@gmail.com"
] |
reiter.christoph@gmail.com
|
9577a6fdaa86cd4492c7057b0c940101e3d60cb8
|
0a5705dbd46dbe6fa876a0d6f9c66bfea2873658
|
/driver_stats_uploader.py
|
6c0ec1021d276f36acf91d3d60f456271b0738a2
|
[
"MIT"
] |
permissive
|
formulaj/formulaj.github.io
|
1522a64fd19704bec562dd90670e1fdb52b1291b
|
2e6a58c8d317eb3be64db51501c563a665827b64
|
refs/heads/master
| 2023-03-23T06:53:06.590428
| 2021-03-08T21:40:33
| 2021-03-08T21:40:33
| 337,518,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,761
|
py
|
from github import Github
import pandas as pd
from os import listdir
import sys
import datetime
g = Github("848013873a5d37a03ea9a1a133daf05d1cae3b86")
repo = g.get_user().get_repo("formulaj.github.io")
all_files = []
contents = repo.get_contents("")
while contents:
file_content = contents.pop(0)
if file_content.type == "dir":
contents.extend(repo.get_contents(file_content.path))
else:
file = file_content
all_files.append(str(file).replace('ContentFile(path="','').replace('")',''))
def find_csv_filenames( path_to_dir, suffix=".csv" ):
filenames = listdir(path_to_dir)
return [filename for filename in filenames if filename.endswith( suffix )]
file_list = []
filenames = find_csv_filenames("/Users/vedangjoshi/PycharmProjects/formulaj/driver_stats")
for name in filenames:
file_list.append(name)
name_list = []
for i in file_list:
name_list.append(i.split('_')[0])
print(name_list)
markdowntextinit = '''---
layout: post
title: %s Driver Statistics
---
'''
for i in range(len(name_list)):
df_driver_stat = pd.read_csv("/Users/vedangjoshi/PycharmProjects/formulaj/driver_stats/" + file_list[i])
df_driver_stat = df_driver_stat.set_index('Season')
markdown_df_drivers = df_driver_stat.to_markdown()
markdowndfwithtxt = markdowntextinit % (name_list[i]) + markdown_df_drivers
git_file = '%s_page.md'%(name_list[i])
if git_file in all_files:
contents = repo.get_contents(git_file)
repo.update_file(contents.path, "committing files", markdowndfwithtxt, contents.sha, branch="master")
print(git_file + ' UPDATED')
else:
repo.create_file(git_file, "committing files", markdowndfwithtxt, branch="master")
print(git_file + ' CREATED')
|
[
"noreply@github.com"
] |
formulaj.noreply@github.com
|
8cfe6075a39ae1cd2af77ea6985e9c367a1b5f8f
|
b6a2c6a345f7347d6d80d6ff633230af79270fa4
|
/socialApp/middleware.py
|
5394b43e259b52afc7b72f5f17b8d8864865c0d8
|
[] |
no_license
|
habib049/SocialApp
|
636f7cdeb45981010dac7be7c4876da393504cf8
|
ea9cd99d12d5314814c7643643f007738ee2efa0
|
refs/heads/master
| 2023-04-17T10:45:11.437079
| 2021-04-30T18:29:22
| 2021-04-30T18:29:22
| 338,523,912
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 835
|
py
|
import re
from django.conf import settings
from django.shortcuts import redirect
EXEMPT_URLS = [re.compile(settings.LOGIN_URL.lstrip('/'))]
if hasattr(settings, 'LOGIN_EXEMPT_URLS'):
EXEMPT_URLS += [re.compile(url) for url in settings.LOGIN_EXEMPT_URLS]
class LoginRequiredMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
return response
def process_view(self, request, view_func, view_args, view_kwargs):
assert hasattr(request, 'user')
path = request.path_info.lstrip('/')
url_is_exempt = any(url.match(path) for url in EXEMPT_URLS)
if not request.user.is_authenticated:
if not url_is_exempt:
return redirect(settings.LOGIN_URL)
|
[
"59080575+habib049@users.noreply.github.com"
] |
59080575+habib049@users.noreply.github.com
|
10fd355ee3e00142b8185d5407e0de179ba193cb
|
000c243b4c30bd089867f73ca1bcfede1c3ef801
|
/catkin_ws/build/turtlebot3_simulations/turtlebot3_gazebo/catkin_generated/pkg.develspace.context.pc.py
|
13dfd573221550b94ca6ec8fe3d5ee61c3276c43
|
[] |
no_license
|
dangkhoa1210/SLAM-AND-NAVIGATION-FOR-MOBILE-ROBOT-OUTDOOR-INDOOR-
|
b4d9bf2757d839d9766d512c2272731300320925
|
7273ea9e966353440d3993dcba112bc0a2262b98
|
refs/heads/master
| 2023-07-15T14:07:17.123812
| 2021-09-02T10:12:30
| 2021-09-02T10:12:30
| 402,361,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 587
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/khoa/catkin_ws/src/turtlebot3_simulations/turtlebot3_gazebo/include".split(';') if "/home/khoa/catkin_ws/src/turtlebot3_simulations/turtlebot3_gazebo/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;std_msgs;sensor_msgs;geometry_msgs;nav_msgs;tf;gazebo_ros".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot3_gazebo"
PROJECT_SPACE_DIR = "/home/khoa/catkin_ws/devel"
PROJECT_VERSION = "1.2.0"
|
[
"dangkhoaphamdang1210@gmail.com"
] |
dangkhoaphamdang1210@gmail.com
|
b3184e75431152f062a10f0a7e5c2ac1ac6de8f8
|
7a6d8764f3b5ed06070a9b0c1480908300239930
|
/testchild.py
|
bbc945727f27eff4109286d554756d38541c66b0
|
[] |
no_license
|
palon15/FirstTest
|
00c62933d78db9016550b0ba2b90b43e811a6a5a
|
5bd753d570d706ff993a4b77fe2729a9c5a6b4b9
|
refs/heads/main
| 2022-12-28T22:27:08.743179
| 2020-10-10T10:17:48
| 2020-10-10T10:17:48
| 302,871,426
| 0
| 0
| null | 2020-10-10T10:17:49
| 2020-10-10T10:02:02
|
Python
|
UTF-8
|
Python
| false
| false
| 63
|
py
|
# adding a file to a child branch
print("inside child branch")
|
[
"noreply@github.com"
] |
palon15.noreply@github.com
|
bc4d123d8af032677feaf2a2704e98d5dfd202d3
|
aabfbd4f6c940aa7c75195bd60d19a551fce3822
|
/tutorials/path_planning/config_space_plot.py
|
a831575392f9d04415e251446cd914cb3e986735
|
[] |
no_license
|
daoran/eth_supermegabot
|
9c5753507be243fc15133c9dfb1d0a5d4ff1d496
|
52b82300718c91344f41b4e11bbcf892d961af4b
|
refs/heads/master
| 2020-07-28T13:42:08.906212
| 2019-12-04T16:51:42
| 2019-12-04T16:51:42
| 209,428,875
| 1
| 1
| null | 2019-09-19T00:36:33
| 2019-09-19T00:36:33
| null |
UTF-8
|
Python
| false
| false
| 3,924
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import polygon_tools as poly
import robot_tools
from matplotlib.patches import Polygon as PlotPolygon
from matplotlib.collections import PatchCollection
from skimage import measure
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import copy
plt.rc('font',**{'family':'serif','sans-serif':['Computer Modern Roman']})
plt.rc('text', usetex=True)
nx = 101
num_obstacles = 5
n_obs_samples = 5
obs_std = 0.1
np.random.seed(5)
# Generate obstacles (random points then convex hull)
obs_centres = [poly.Point(*np.random.uniform(size=2)) for i in range(num_obstacles)]
obstacles = []
for pc in obs_centres:
px, py = np.random.normal(pc, obs_std, size=(n_obs_samples, 2)).T
px, py = np.clip(px, 0.0, 1.0), np.clip(py, 0.0, 1.0)
p = poly.PointList([poly.Point(x, y) for x, y in zip(px, py)])
p = poly.convex_hull(p)
obstacles.append(p)
# Get some random points and see if they're in the obstacles:
in_obs, out_obs = poly.PointList([]), poly.PointList([])
for i in range(200):
p = poly.Point(*np.random.uniform(size=2))
collision = False
for o in obstacles:
if o.point_inside(p):
collision = True
break
if collision:
in_obs.append(p)
else:
out_obs.append(p)
f1, a1 = plt.subplots()
h_obs = []
for o in obstacles:
h_obs.append(PlotPolygon(o, color='lightgrey', zorder=1))
c_obs = PatchCollection(h_obs)
a1.add_collection(c_obs)
a1.scatter(*zip(*in_obs), color='r', marker='x')
a1.scatter(*zip(*out_obs), color='g', marker='.')
print "Intersect: {0}".format(obstacles[0].intersect(obstacles[1]))
# Now try robot poses:
# robo_footprint = poly.PointList([poly.Point(0.05, 0.0), poly.Point(-0.03, 0.03), poly.Point(-0.03, -0.03)])
robo_footprint = poly.PointList([poly.Point(0.1, 0.01), poly.Point(-0.1, 0.01), poly.Point(-0.1, -0.01), poly.Point(0.1, -0.01)])
robo = robot_tools.Robot2D(footprint=robo_footprint)
a1.add_artist(PlotPolygon(robo.get_current_polygon(), facecolor='r'))
robo.set_position((0.25, 0.38))
robo.get_current_polygon().intersect(obstacles[-1])
x, y, h = np.linspace(0, 1, 51), np.linspace(0, 1, 51), np.linspace(0, np.pi, 41)
v = np.zeros((len(x), len(y), len(h)))
for i,xi in enumerate(x):
for j, yj in enumerate(y):
robo.set_position((xi, yj))
for k, hk in enumerate(h):
in_obs = 0.0
robo.set_heading(hk)
fp = robo.get_current_polygon()
for o in obstacles:
if fp.intersect(o):
in_obs = 1.0
break
v[i, j, k] = in_obs
verts, faces, normals, values = measure.marching_cubes(v, spacing=(x[1]-x[0], y[1]-y[0], (h[1]-h[0])*180/np.pi))
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
ax.plot_trisurf(verts[:, 0], verts[:,1], faces, verts[:, 2],
cmap='Spectral', lw=1)
ax.set_xlim(0, x[-1]) # a = 6 (times two for 2nd ellipsoid)
ax.set_ylim(0, y[-1]) # b = 10
ax.set_zlim(0, h[-1]*180/np.pi) # c = 16
ax.set_xlabel(r'$x_c$')
ax.set_ylabel(r'$y_c$')
ax.set_zlabel(r"$\theta (^{\circ})$")
robo.set_position([0.1, 0.1])
f2, a2 = plt.subplots(2, 2)
for i, ax in enumerate(a2.flat):
dex = int(i*0.25*(len(h)-1))
ax.matshow(v[:, :, dex].transpose(), origin='lower', extent=[0,1,0,1], cmap='Greys')
ax.add_collection(PatchCollection(copy.copy(h_obs)))
robo.set_heading(h[dex])
ax.add_artist(PlotPolygon(robo.get_current_polygon(), facecolor='r'))
ax.plot(*robo.position, color='g', marker='x')
ax.set_title(r"$\theta = {0}$".format(h[dex]*180/np.pi))
ax.tick_params(top=0, left=0)
# random.seed(1)
# true_g = fm_graphtools.CostmapGrid(gridsize[0], gridsize[1])
# true_g.obstacles = fm_plottools.generate_obstacles(gridsize[0], gridsize[1], nobs, obs_size)
#
# f1, a1 = fm_plottools.init_fig(true_g)
# fm_plottools.draw_grid(a1, true_g)
plt.show()
|
[
"nicholas.lawrance@mavt.ethz.ch"
] |
nicholas.lawrance@mavt.ethz.ch
|
d86bb661f72c4f169529ab2a853fd25893d52509
|
1e38b6cb91885bf7219bdbb3f734591ff7e3541f
|
/p011.py
|
f7a5010cdd26fae1e1bc499b6e3d254a15eb0eda
|
[] |
no_license
|
lajospajtek/thought-tracker.projecteuler
|
9190c80888788435b99508cac617f7a5583e13b2
|
13b8bb18f893e9b839ab268f3c7ae8aee873ed10
|
refs/heads/master
| 2020-06-07T06:33:39.490117
| 2015-06-12T21:22:16
| 2015-06-12T21:22:16
| 33,399,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,239
|
py
|
# project euler problem 011
# a quick hack
def read_square(filename):
square = []
file = open(filename)
for line in file:
numbers = line.strip().split(" ")
if numbers == ['']: break
tline = []
for i in numbers:
tline.append(int(i))
square.append(tline)
return square
def prod(a, b): return a * b
def max_horiz(square, max):
for l in square:
for i in range(0, len(l)-3):
p = reduce(prod, l[i:i+4])
if p > max: max = p
return max
def max_vert(s, max):
for i in range(0,16):
for j in range(0,19):
p = s[i][j]*s[i+1][j]*s[i+2][j]*s[i+3][j]
if p>max: max=p
return max
def max_diag1(s, max):
for i in range(0,16):
for j in range(0,16):
p = s[i][j]*s[i+1][j+1]*s[i+2][j+2]*s[i+3][j+3]
if p>max: max=p
return max
def max_diag2(s, max):
for i in range(0,16):
for j in range(3,19):
p = s[i][j]*s[i+1][j-1]*s[i+2][j-2]*s[i+3][j-3]
if p>max: max=p
return max
square = read_square("p011.txt")
max = max_horiz(square, 1)
max = max_vert(square, max)
max = max_diag1(square, max)
max = max_diag2(square, max)
print max
|
[
"lajos@localhost"
] |
lajos@localhost
|
4d9bf5fa2c1ea46bbb9d90855107455ca0d0b035
|
85057c6984ac2843bae8d8f4b2ee15ad6572ae4b
|
/STOCK_CHOOSE/standard_wave.py
|
b5cf8665a54e7591b416ecaad303119269933ed7
|
[] |
no_license
|
gxgjnn/live
|
28eba90eb7fb9071b3ebf2632c349d9e73a2fc7e
|
f2c3a3a072669fe1c499db0a92d23010c143ff30
|
refs/heads/master
| 2020-03-24T02:37:41.484954
| 2018-07-27T09:41:55
| 2018-07-27T09:41:55
| 142,384,328
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,242
|
py
|
# -*- coding: utf-8 -*-
from line import Line
import pandas as pd
import numpy as np
class Wave(Line):
"""
寻找满足平衡条件的配对波峰波谷的price值,以预测未来买卖参考价,(最后只会返回一对波峰波谷),如果不存在则全部返回0
第98行系数参数
"""
def __init__(self, stock_id):
super(Line, self).__init__(stock_id)
df_s, cv_s, price_s, stop_dot_s = self.support_line()
df_p, cv_p, price_p, stop_dot_p = self.pressure_line()
# 矫正x轴
if stop_dot_p > stop_dot_s:
dif = stop_dot_p - stop_dot_s
x_new = df_s.date + dif * self.ef
df_s.date = x_new
elif stop_dot_s > stop_dot_p:
dif = stop_dot_s - stop_dot_p
x_new = df_p.date + dif * self.ef
df_p.date = x_new
self.df_s = df_s
self.cv_s = cv_s
self.price_s = price_s
self.stop_dot_s = stop_dot_s
self.df_p = df_p
self.cv_p = cv_p
self.price_p = price_p
self.stop_dot_p = stop_dot_p
def data_provide(self):
"""
:return:
wave_data: 按日期由远及近的波峰波谷的DataFrame
data_ave_60: 60日均线数据集,时间由远及近
data_ave_60_r: 60日均线数据集的回归线数据集,时间由远及近
model: 60日均线回归线模型
"""
# wave_data是波峰/波谷数据集组合,时间由远及近
wave_data = pd.concat([self.df_s, self.df_p], axis=0)
# 由远及近排序
wave_data.sort_values(by=['date'], ascending=[True], inplace=True)
stop_dot = max(self.stop_dot_p, self.stop_dot_s)
# 60日均线数据集,时间由远及近
data_ave_60 = self.angle_data(ave=60, stop_dot=stop_dot)
# 通过回归线model,计算回归线数据集
angle, a, b, model = self.line_model(data_ave_60)
x = data_ave_60['date'].values.reshape(-1, 1)
y = model.predict(x)
# 60日均线数据集的回归线数据集,时间由远及近
data_ave_60_r = pd.DataFrame({'date': np.array(list(data_ave_60['date'])), 'price': y})
return wave_data, data_ave_60, data_ave_60_r, model
def standard_wave(self):
"""
(dis * 0.99) <= float(middle_y) <= (dis * 1.01):配对条件,其中0.99,1.01是可调参数
:return:
e_pressure_fox 标准压力点
e_support_fox 标准支撑点
e_pressure_fox_smaller 最近已知波谷得出的压力点
e_support_fox_larger 最近已知波峰得出的支撑点
current_dis 距离当前日天数
close_time_dis 平衡波点间距离天数(限制其天数)
fox 满足标准波计算的所有波对,靠近k线图右侧的date,price的数据框,用于测试校验
以上返回值均与所在60日均线值比较后返回结果,
"""
wave_data, data_ave_60, data_ave_60_r, model = self.data_provide()
# 计算满足标准涨跌配对的所有组合
fox_date = []
fox_price = []
fox_time_dis = []
if self.cv_p == 1:
e_pressure_fox = self.price_p
elif len(self.df_p) >= 2:
e_pressure_fox = round((list(self.df_p.price)[-1] + list(self.df_p.price)[-2]) / 2, 2)
else:
e_pressure_fox = round(list(self.df_p.price)[-1] * 0.98, 2)
if self.cv_s == 1:
e_support_fox = self.price_s
else:
e_support_fox = round(min(list(self.df_s.price)[-1], list(self.df_s.price)[-2]), 2)
e_pressure_fox_smaller = 0
e_support_fox_larger = 0
close_time_dis = 0
current_dis = 0
print '*' * 30
print 'len(df_s):', len(self.df_s)
print 'len(df_p):', len(self.df_p)
print '*' * 30
for i in range(len(self.df_s)):
# 波谷价
trough_dot_s = self.df_s['price'].iloc[i]
# 波谷对应的x值
date_dot_s = self.df_s['date'].iloc[i]
# 波谷对应在60日均线上的price
price_trough_bridge = data_ave_60.loc[data_ave_60.date < date_dot_s + self.ef, ]
price_trough_60 = list(price_trough_bridge.price.loc[price_trough_bridge.date > date_dot_s - self.ef, ])[0]
try:
for j in range(len(self.df_p)):
peak_dot_p = self.df_p['price'].iloc[j]
date_dot_p = self.df_p['date'].iloc[j]
# 波峰对应在60日均线上的price
price_peak_bridge = data_ave_60.loc[data_ave_60.date < date_dot_p + self.ef, ]
price_peak_60 = list(
price_peak_bridge.price.loc[price_peak_bridge.date > date_dot_p - self.ef, ])[0]
# 保存配对波峰波谷
middle_x = (date_dot_p + date_dot_s) / 2
middle_y_r = model.predict(middle_x)
# 为了避免出现list out of the range,增加self.ef
middle_y_l_bridge = data_ave_60.loc[data_ave_60.date < middle_x + self.ef, ]
middle_y_l = list(middle_y_l_bridge.price.loc[middle_y_l_bridge.date > middle_x - self.ef, ])[0]
# 取两个price的中间价
middle_y = (middle_y_r + middle_y_l) / 2
dis = (trough_dot_s + peak_dot_p) / 2
# 因为计算的是收盘价,所以预估有2%的浮动
print '*' * 30
print '中间价dis:', dis
print '中间价middle_y:', middle_y
print '*' * 30
if (dis * 0.98) <= middle_y <= (dis * 1.02):
# ij_data是满足标准走势的波峰点合并波谷点的数据集
ij_data = pd.concat(
[pd.DataFrame(self.df_s.iloc[i, :]).T, pd.DataFrame(self.df_p.iloc[j, :]).T], 0)
ij_data.sort_values(by=['date'], ascending=[True], inplace=True)
# 为了计算距离天数
dis_ij = (ij_data.date.iloc[1] - ij_data.date.iloc[0]) / self.ef
# 去除小波段干扰,对dis_ij做限制,80相当于不做限制
if (trough_dot_s /
price_trough_60 <= 0.95) and (peak_dot_p / price_peak_60 >= 1.05) and (dis_ij < 80):
print 'ij_data:', ij_data
fox_date.append(float(ij_data.date.iloc[-1]))
fox_price.append(float(ij_data.price.iloc[-1]))
fox_time_dis.append(dis_ij)
except Exception, e:
print e
fox = pd.DataFrame({'date': fox_date, 'price': fox_price, 'time_dis': fox_time_dis})
stop_dot = max(self.stop_dot_p, self.stop_dot_s)
if fox.empty is False:
# 计算标准涨跌价
max_data = fox.loc[fox.date == max(fox.date), :]
current_dis = stop_dot - list(max_data.date)[0] / self.ef
# 一个x对应一个以上y值
if len(max_data) > 1:
closer_price = float(max_data.loc[max_data.time_dis == min(max_data.time_dis), 'price'])
close_time_dis = min(max_data.time_dis)
else:
closer_price = float(fox.loc[fox.date == max(fox.date), 'price'])
close_time_dis = float(fox.loc[fox.date == max(fox.date), 'time_dis'])
# 计算后半段的中间价
middle_dis = (stop_dot - max(fox.date) / self.ef) / 2 + max(fox.date) / self.ef
close_date = middle_dis * self.ef
# close_date = max(fox.date)
# price_ave_60_predict_r = model.predict(close_date)
# 这里不用回归线或两线中值的原因是,会对current_dis做限制,靠最新数据距离越近均线比均线回归线更有价值,以下同理
data_ave_60_bridge = data_ave_60.loc[data_ave_60.date < close_date + self.ef, ]
price_ave_60_predict_l = list(
data_ave_60_bridge.price.loc[data_ave_60_bridge.date > close_date - self.ef, ])[0]
# price_ave_60_predict = (price_ave_60_predict_r + price_ave_60_predict_l) / 2
# closer_price 是离得最近的满足条件的波点的price
if price_ave_60_predict_l > closer_price:
e_pressure_fox = 2 * price_ave_60_predict_l - closer_price
else:
e_support_fox = 2 * price_ave_60_predict_l - closer_price
# 如果fox是空的,选最大值取对应值做e_support_fox/e_pressure_fox
else:
max_peak_price = max(np.array(self.df_p.price))
max_peak_data = np.array(self.df_p.date.loc[self.df_p.price == max_peak_price, ])[-1]
max_peak_data_dis = (stop_dot - max_peak_data / self.ef) / 2 + max_peak_data / self.ef
x = max_peak_data_dis * self.ef
x_bridge = data_ave_60.loc[data_ave_60.date < x + self.ef, ]
middle_peak_price_l = list(x_bridge.price.loc[x_bridge.date > x - self.ef, ])[0]
e_support_fox_bridge = 2 * middle_peak_price_l - max_peak_price
if e_support_fox_bridge < self.ave_price_60[0]:
print 'peak_max'
e_support_fox = e_support_fox_bridge * 1.03
min_trough_price = min(np.array(self.df_s.price))
min_trough_data = np.array(self.df_s.date.loc[self.df_s.price == min_trough_price, ])[-1]
min_trough_data_dis = (stop_dot - min_trough_data / self.ef) / 2 + min_trough_data / self.ef
x = min_trough_data_dis * self.ef
x_bridge = data_ave_60.loc[data_ave_60.date < x + self.ef, ]
middle_trough_price_l = list(x_bridge.price.loc[x_bridge.date > x - self.ef, ])[0]
e_pressure_fox_bridge = 2 * middle_trough_price_l - min_trough_price
# else的情况不考虑
if e_pressure_fox_bridge > self.ave_price_60[0]:
print 'trough_min'
e_pressure_fox = e_pressure_fox_bridge
# 计算最近标准涨跌价
closest_price = wave_data.price.iloc[-1]
closest_date_0 = wave_data.date.iloc[-1]
middle_dis = (stop_dot - closest_date_0 / self.ef) / 2 + closest_date_0 / self.ef
closest_date = middle_dis * self.ef
# price_ave_60_predict_r = model.predict(closest_date)
data_ave_60_bridge = data_ave_60.loc[data_ave_60.date < closest_date + self.ef, ]
price_ave_60_predict_l = list(
data_ave_60_bridge.price.loc[data_ave_60_bridge.date > closest_date - self.ef, ])[0]
# price_ave_60_predict = (price_ave_60_predict_r + price_ave_60_predict_l) / 2
if price_ave_60_predict_l > closest_price:
e_pressure_fox_smaller = 2 * price_ave_60_predict_l - closest_price
else:
e_support_fox_larger = 2 * price_ave_60_predict_l - closest_price
return e_pressure_fox, e_support_fox, e_pressure_fox_smaller, e_support_fox_larger, current_dis, close_time_dis, fox
def paint_paint_line(self):
"""
:return: 波峰波谷点和所在60日回归线图形
"""
wave_data, data_ave_60, data_ave_60_r, model = self.data_provide()
print 'stop_dot:\n', max(self.stop_dot_p, self.stop_dot_s)
# 作图
self.paint_line(wave_data, data_ave_60)
if __name__ == "__main__":
stock = '300346'
obj = Wave(stock)
# a,b,c,d= obj.data_provide()
# print 'wave_data:',a
# print 'data_ave_60:',b
# print 'data_ave_60_r:',c
# print 'model:',d
e_pressure = obj.price_p
e_support = obj.price_s
aa, bb, c, d, ee, f, ox = obj.standard_wave()
print '*' * 30
print 'e_pressure:', e_pressure
print 'e_support:', e_support
print 'e_pressure_fox', aa
print 'e_support_fox', bb
print 'e_pressure_fox_smaller', c
print 'e_support_fox_larger', d
print 'current_dis', ee
print 'close_time_dis', f
print 'fox', ox
print '*' * 30
obj.paint_paint_line()
|
[
"2320648142@qq.com"
] |
2320648142@qq.com
|
c57f5680aefea93c74464ba567d23502665c32f7
|
ae7a7e4e41b4834f1a66443579125a0b77000173
|
/mmaction/models/tenons/segmental_consensuses/simple_consensus.py
|
246b4f73dbdfeadf22532bde64bbb6389bcbe17f
|
[
"Apache-2.0"
] |
permissive
|
Solo777/mmaction
|
213192487fd5144baaecd63716fb189ea70e9628
|
40580ee6da148f639842d87edf899ac523060a49
|
refs/heads/master
| 2021-08-10T23:25:20.292879
| 2020-07-02T13:34:38
| 2020-07-02T13:34:38
| 198,738,105
| 1
| 0
|
Apache-2.0
| 2019-07-25T01:59:13
| 2019-07-25T01:59:13
| null |
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...registry import SEGMENTAL_CONSENSUSES
class _SimpleConsensus(torch.autograd.Function):
"""Simplest segmental consensus module"""
def __init__(self,
consensus_type='avg',
dim=1):
super(_SimpleConsensus, self).__init__()
assert consensus_type in ['avg']
self.consensus_type = consensus_type
self.dim = dim
self.shape = None
def forward(self, x):
self.shape = x.size()
if self.consensus_type == 'avg':
output = x.mean(dim=self.dim, keepdim=True)
else:
output = None
return output
def backward(self, grad_output):
if self.consensus_type == 'avg':
grad_in = grad_output.expand(self.shape) / float(self.shape[self.dim])
else:
grad_in = None
return grad_in
@SEGMENTAL_CONSENSUSES.register_module
class SimpleConsensus(nn.Module):
def __init__(self, consensus_type, dim=1):
super(SimpleConsensus, self).__init__()
assert consensus_type in ['avg']
self.consensus_type = consensus_type
self.dim = dim
def init_weights(self):
pass
def forward(self, input):
return _SimpleConsensus(self.consensus_type, self.dim)(input)
|
[
"thuzhaoyue@gmail.com"
] |
thuzhaoyue@gmail.com
|
dc1692b60cf7445a8f7c853dd76a1919a9afbaa5
|
1fabb8c605ee8187b2c637bc4a75ed35b1b53fc1
|
/ccm/ui/htmltrace.py
|
375d854da52cf4d12f69976f28ba692c20874097
|
[] |
no_license
|
ecphory/ccmsuite
|
dbc4c7e0495f47cfb46445a355c09eeaa0cf06a6
|
83081a786f749e6b298ade73253d178081dbfb96
|
refs/heads/master
| 2021-05-18T07:25:16.754476
| 2020-03-30T02:24:12
| 2020-03-30T02:24:12
| 251,178,559
| 1
| 0
| null | 2020-03-30T02:11:07
| 2020-03-30T02:11:06
| null |
UTF-8
|
Python
| false
| false
| 5,068
|
py
|
from ccm.ui.pytag import *
def splitKey(key):
r=[]
a=''
depth=0
for c in key:
if c=='.' and depth==0:
if a:
r.append(a)
a=''
elif c in '[(':
if a:
r.append(a)
a=''
depth+=1
elif c in '])':
if a:
r.append(a)
a=''
depth-=1
else:
a+=c
if a:
r.append(a)
a=''
return r
return key.split('.')
def makeHeader(table,keys):
keys=[splitKey(k) for k in keys]
size=max(len(x) for x in keys)
for k in keys:
while len(k)<size: k.append('')
noMerge=[False]*len(keys)
for i in range(size):
row=[keys[j][i] for j in range(len(keys))]
merged=[]
values=[row[0]]
count=1
for j in range(1,len(keys)):
if noMerge[j] or row[j]!=values[-1]:
merged.append(count)
count=1
values.append(row[j])
noMerge[j]=True
else:
count+=1
merged.append(count)
row=tr()
for j in range(len(merged)):
row[th(colspan=repr(merged[j]))[values[j]]]
table[row]
colors="""AliceBlue
AntiqueWhite
Aqua
Aquamarine
Azure
Beige
Bisque
BlanchedAlmond
BurlyWood
Chartreuse
Cornsilk
Cyan
DarkGrey
DarkKhaki
Darkorange
DarkSalmon
DarkSeaGreen
DarkTurquoise
DeepSkyBlue
DodgerBlue
Gainsboro
GhostWhite
Gold
GoldenRod
GreenYellow
HoneyDew
Ivory
Khaki
Lavender
LavenderBlush
LawnGreen
LemonChiffon
LightBlue
LightCyan
LightGoldenRodYellow
LightGray
LightGrey
LightGreen
LightPink
LightSeaGreen
LightSkyBlue
LightSteelBlue
LightYellow
Lime
LimeGreen
Linen
MediumAquaMarine
MediumSeaGreen
MediumSpringGreen
MediumTurquoise
MintCream
MistyRose
Moccasin
NavajoWhite
OldLace
Orange
PaleGoldenRod
PaleGreen
PaleTurquoise
PapayaWhip
PeachPuff
Pink
Plum
PowderBlue
Salmon
SandyBrown
Silver
SkyBlue
SpringGreen
Tan
Thistle
Turquoise
Wheat
WhiteSmoke
Yellow
YellowGreen""".split()
class HTMLTrace:
def __init__(self,trace):
self.trace=trace
def getColor(self,value):
if value=='': return 'white','white'
if value=='True' or value is True: return 'lightgreen','green'
if value=='False' or value is False: return 'pink','red'
if isinstance(value,(int,float)): return 'black','white'
num=hash(value)
return 'black',colors[num%len(colors)]
def fixValue(self,val):
if val is None or val=='None': val=''
try: val=val.replace('<','<').replace('>','>')
except: pass
if type(val) not in [int,float,bool] and ':' in val:
slots=val.split()
for i,slot in enumerate(slots):
if ':' in slot:
a,b=slot.split(':',1)
slots[i]='<i>%s:</i>%s'%(a,b)
val=' '.join(slots)
return val
def makeFixedTable(self,fixed):
t=table()
for k in fixed:
t[tr[td[k],td[self.trace.get_final(k)]]]
return t
def makeBody(self,table,keys,pts):
grouped={}
for k in keys:
grouped[k]=list(self.trace.group_pts(pts,k))
for pt in pts:
row=tr()
for k in keys:
if pt not in grouped[k][0]:
del grouped[k][0]
if pt==grouped[k][0][0]:
val=self.trace.get_at(k,pt)
val=self.fixValue(val)
if k=='time':
val='%1.3f'%val
c,bg='white','#333333'
else:
c,bg=self.getColor(val)
style='background:%s; color:%s;'%(bg,c)
row[td(rowspan=repr(len(grouped[k][0])),style=style)[val]]
table[row]
def generate(self,filename):
keys=self.trace.keys()
fixed_keys=self.trace.fixed_keys()
fixed_keys.sort()
keys=[k for k in keys if k not in fixed_keys]
keys.sort()
has_time=False
if 'time' in keys:
keys.remove('time')
has_time=True
pts=self.trace.get_pts(keys)
if has_time:
keys.insert(0,'time')
timePts=self.trace.get_pts(['time'])
if 'time' in keys:
self.trace.merge_pts(pts,'time')
tbl=table()
makeHeader(tbl,keys)
self.makeBody(tbl,keys,pts)
fixed=self.makeFixedTable(fixed_keys)
if not filename.endswith('.html'): filename+='.html'
f=file(filename,'w')
page=html[
head[
title[filename],
style["""
table {border-collapse: collapse; empty-cells:show;}
td {border: solid black 1px; vertical-align:top;}
th {border: solid #cccccc 1px; background:black; color:white;}
"""],
],
body[
tbl,
fixed,
]
]
print>>f,page
|
[
"tcstewar@uwaterloo.ca"
] |
tcstewar@uwaterloo.ca
|
538fd8b5f75edcefe005bfb2d255cb481cf45097
|
d2c0c5d802fb408a869005d5c643a929555fffc9
|
/RunAnalysis.py
|
35ca39f76a006a5650c65dbef242bf791ee5cea0
|
[] |
no_license
|
erccarls/GammaLike_dev
|
543f0d9dd2b746d932a574f8e48b53e6277309a1
|
3c376c94baa39e4af0d1e94910456edd0ed6ce0e
|
refs/heads/master
| 2021-01-13T11:59:19.850403
| 2016-05-23T20:00:57
| 2016-05-23T20:00:57
| 27,199,955
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,813
|
py
|
import numpy as np
import h5py
import Analysis
def AddFitMetadata(path, h5_path, A, extra_dict=None):
h5 = h5py.File(path)
try:
h5.create_group(h5_path)
except:
pass
fa = h5[h5_path].attrs
fit = A.SaveFit()
for key, val in fit.items():
if key not in ['Data', 'energies', 'loglike', 'PSC']:
fa.create('flux_'+key,val['flux'])
fa.create('fluxunc_'+key,val['fluxunc'])
fa.create('loglike_total',np.sum(A.loglike))
fa.create('loglike',A.loglike)
fa.create('energies',A.central_energies)
fa.create('bins', A.bin_edges)
fa.create('irf', A.irf)
fa.create('evclass', A.evclass)
fa.create('convtype', A.convtype)
fa.create('phfile', A.phfile)
fa.create('tag', A.tag)
if extra_dict is not None:
for key, val in extra_dict.items():
fa.create(key, val)
h5.close()
def LoadModel(basedir, galprop_tag):
# Load various diffuse models and run fits.
print 'Running Analysis for model', galprop_tag
A = Analysis.Analysis(tag='P7REP_CLEAN_V15_calore', basepath='/pfs/carlson/GCE_sys/')
A.GenSquareMask(l_range=[-20.,20.], b_range=[-20.,20.], plane_mask=2.)
A.BinPhotons(infile='binned_photons_'+A.tag+'.npy')
# Load 2FGL
A.AddPointSourceTemplate(fixNorm=True,pscmap='PSC_3FGL_with_ext.npy')
A.CalculatePixelWeights(diffuse_model='fermi_diffuse_'+A.tag+'.npy',psc_model='PSC_3FGL_with_ext.npy',
alpha_psc=5., f_psc=0.1)
A.AddIsotropicTemplate(fixNorm=False, fixSpectrum=False) # External chi^2 used to fix normalization within uncertainties
A.AddFermiBubbleTemplate(template_file='./bubble_templates_diskcut30.0.fits',
spec_file='./reduced_bubble_spec_apj_793_64.dat', fixSpectrum=False, fixNorm=False)
A.AddHDF5Template(hdf5file=basedir +'/'+ galprop_tag+'.hdf5',verbosity=1, multiplier=2., bremsfrac=1.25,
E_subsample=2, fixSpectrum=False, separate_ics=False)
return A
def Analyze(basedir, galprop_tag, A, analysis=0):
if analysis == 0:
#--------------------------------------------
# GC fit without DM
A.RunLikelihood(print_level=0, tol=2e2, precision=None, minos=True)[0]
AddFitMetadata(basedir +'/'+ galprop_tag+'.hdf5', h5_path='/fit_results/GC_no_dm/', A=A, extra_dict=None)
#--------------------------------------------
# GCE Fit
A.ResetFit()
A.AddDMTemplate(profile='NFW', limits=[None,None], decay=False, gamma=1.25,
r_s=20.0, axesratio=1, offset=(0, 0), spec_file=None,)
A.RunLikelihood(print_level=1, tol=2e2, precision=None, minos=True)[0]
AddFitMetadata(basedir +'/'+ galprop_tag+'.hdf5', h5_path='/fit_results/GC/', A=A, extra_dict=None)
elif analysis == 1:
#--------------------------------------------
# Scan Slope
gammas = np.linspace(.75,1.5,31)
loglike_total, loglike, dm_spec, dm_spec_unc = [], [], [], []
for i_g, gamma in enumerate(gammas):
A.ResetFit()
print 'axes offset fitting completed:', i_g/float(len(gammas))
A.AddDMTemplate(profile='NFW', limits=[None,None], decay=False, gamma=gamma,
r_s=20.0, axesratio=1, offset=(0, 0), spec_file=None,)
A.RunLikelihood(print_level=0, tol=2e2, precision=None, minos=False)[0]
loglike.append(A.loglike)
loglike_total.append(np.sum(A.loglike))
E, spec, specUnc = A.GetSpectrum('DM')
dm_spec.append(spec)
dm_spec_unc.append(specUnc)
AddFitMetadata(basedir +'/'+ galprop_tag+'.hdf5', h5_path='/fit_results/scan_gamma/', A=A,
extra_dict={'gamma': gammas,
'loglike':loglike,
'loglike_total':loglike_total,
'dm_spec':dm_spec,
'dm_spec_unc':dm_spec})
elif analysis == 2:
#--------------------------------------------
# Scan axes ratio
ars = np.linspace(.6,2,21)
loglike_total, loglike, dm_spec, dm_spec_unc = [], [], [], []
for i_ar, ar in enumerate(ars):
print 'axes offset fitting completed:', i_ar/float(len(ars))
A.ResetFit()
A.AddDMTemplate(profile='NFW', limits=[None,None], decay=False, gamma=1.25,
r_s=20.0, axesratio=ar, offset=(0, 0), spec_file=None,)
A.RunLikelihood(print_level=0, tol=2e2, precision=None, minos=False)[0]
loglike.append(A.loglike)
loglike_total.append(np.sum(A.loglike))
E, spec, specUnc = A.GetSpectrum('DM')
dm_spec.append(spec)
dm_spec_unc.append(specUnc)
AddFitMetadata(basedir +'/'+ galprop_tag+'.hdf5', h5_path='/fit_results/scan_axesratio/', A=A,
extra_dict={'axesratio': ars,
'loglike':loglike,
'loglike_total':loglike_total,
'dm_spec':dm_spec,
'dm_spec_unc':dm_spec},)
elif analysis == 3:
# #--------------------------------------------
# # Scan longitude offset
lons = np.linspace(-90,90,61)
loglike_total, loglike, dm_spec, dm_spec_unc, TS = [], [], [], [], []
for i_l, lon in enumerate(lons):
print 'lon offset fitting completed:', i_l/float(len(lons))
A.ResetFit()
A.templateList['Bubbles'].fixSpectrum = True
A.templateList['Bubbles'].fixNorm = True
A.GenSquareMask(l_range=[-20.+lon,20.+lon], b_range=[-20.,20.], plane_mask=2.)
A.RunLikelihood(print_level=0, tol=2e2, precision=None, minos=False)[0]
ll_nodm = np.sum(A.loglike)
A.ResetFit()
A.AddDMTemplate(profile='NFW', limits=[None,None], decay=False, gamma=1.25,
r_s=20.0, axesratio=1, offset=(lon, 0), spec_file=None,)
A.RunLikelihood(print_level=0, tol=2e2, precision=None, minos=False)[0]
loglike.append(A.loglike)
TS.append(2*(ll_nodm-np.sum(A.loglike)))
loglike_total.append(np.sum(A.loglike))
E, spec, specUnc = A.GetSpectrum('DM')
dm_spec.append(spec)
dm_spec_unc.append(specUnc)
AddFitMetadata(basedir +'/'+ galprop_tag+'.hdf5', h5_path='/fit_results/scan_longitude/', A=A,
extra_dict={'longitudes': lons,
'loglike':loglike,
'loglike_total':loglike_total,
'dm_spec':dm_spec,
'dm_spec_unc':dm_spec,
'TS': TS},)
#--------------------------------------------
# localize
elif analysis == 4:
lons = np.linspace(-1,1,21)
fval = np.zeros((len(lons), len(lons)))
for i_l, lon in enumerate(lons):
for i_b, lat in enumerate(lons):
print 'lat/lon fitting completed:', (len(lons)*i_l + i_b)/float(len(lons)**2)
A.ResetFit()
A.AddDMTemplate(profile='NFW', limits=[None,None], decay=False, gamma=1.25,
r_s=20.0, axesratio=1, offset=(lon, lat), spec_file=None,)
A.RunLikelihood(print_level=0, tol=2e2, precision=None, minos=False)[0]
fval[i_b, i_l] = np.sum(A.loglike)
AddFitMetadata(basedir +'/'+ galprop_tag+'.hdf5', h5_path='/fit_results/localize/', A=A,
extra_dict={'longitudes': lons,
'latitudes': lons,
'fval':fval},)
elif analysis == 5:
#--------------------------------------------
# Scan Slope
radius = np.linspace(2,20,10)
loglike_total, loglike, dm_spec, dm_spec_unc = [], [], [], []
for i_r, r in enumerate(radius[:-1]):
A.ResetFit()
print 'radius percent complete:', i_r/float(len(radius))
r1, r2 = r, radius[i_r+1]
A.GenRadialMask(r1,r2, plane_mask=2, merge=False)
A.AddDMTemplate(profile='NFW', limits=[None,None], decay=False, gamma=1.25,
r_s=20.0, axesratio=1, offset=(0, 0), spec_file=None,)
A.RunLikelihood(print_level=0, tol=2e2, precision=None, minos=False)[0]
loglike.append(A.loglike)
loglike_total.append(np.sum(A.loglike))
E, spec, specUnc = A.GetSpectrum('DM')
dm_spec.append(spec)
dm_spec_unc.append(specUnc)
r_bins = [(radius[i], radius[i+1]) for i in range(len(radius))]
AddFitMetadata(basedir +'/'+ galprop_tag+'.hdf5', h5_path='/fit_results/scan_radius/', A=A,
extra_dict={'radius':r_bins,
'loglike':loglike,
'loglike_total':loglike_total,
'dm_spec':dm_spec,
'dm_spec_unc':dm_spec})
import sys
if __name__ == "__main__":
basedir, galprop_tag, analysis = sys.argv[1:4]
A = LoadModel(basedir,galprop_tag)
Analyze(basedir,galprop_tag, A, int(analysis))
#A.ResetFit()
# Run Analysis at GC
# Run Analysis without DM template.
# Scan NFW slope
# Scan axis ratio
# scan offset.
# Localize?
|
[
"erccarls@ucsc.edu"
] |
erccarls@ucsc.edu
|
d8aea5dfb60c756d1c5132576c380c3f6ff066e2
|
932bd971740cc46086af8ffa7be81d61a181a719
|
/exercise/even_numbers.py
|
68c1296e4b66589126005c7e1c3c898dec7b76c6
|
[] |
no_license
|
elohor/bc---python-v
|
295ab4fff962957ad1899e0225e5c853c754fe81
|
f647881c5bb8933816dfe6e0bbb876f7a2aa201b
|
refs/heads/master
| 2021-01-10T12:34:50.464921
| 2016-02-18T20:28:10
| 2016-02-18T20:28:10
| 51,831,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
# Detects and even number
def even_numbers(low, high):
even_nos = []
for item in range(low, high):
if item % 2 == 0:
print item
even_nos.append(item)
print even_nos
|
[
"Jasmine"
] |
Jasmine
|
bbc978e0a52c37af0dd4c5a13f9a5dd6218f572d
|
59479a796e2f5d02bb207b7fdedd31d67af0433d
|
/utils.py
|
f5dd09ae64d566987d430ebef382a004fbd84069
|
[] |
no_license
|
edupaz2/FCND-Motion-Planning
|
12dbbbf89c96dcf67f284c32ae3f75ccbd3298b2
|
df2680bb6865ed5557a393d43d9b0a06deb8cd3a
|
refs/heads/master
| 2020-04-28T20:29:10.204572
| 2019-07-21T20:02:38
| 2019-07-21T20:02:38
| 175,545,855
| 0
| 0
| null | 2019-03-14T04:04:06
| 2019-03-14T04:04:06
| null |
UTF-8
|
Python
| false
| false
| 8,631
|
py
|
import networkx as nx
nx.__version__
import pickle
import matplotlib.pyplot as plt
from planning_utils import FLYING_ALTITUDE, SAFETY_DISTANCE, a_star_graph, heuristic
from bresenham import bresenham
import numpy as np
import numpy.linalg as LA
# Getting the largest connected subgraph
def remove_unconnected_subgraphs(Gr):
Gr = max(nx.connected_component_subgraphs(Gr), key=len)
return Gr
def get_next_node_in_chain(Gr, node, previous, not_accepted_nodes=[]):
neighbors = list(Gr.neighbors(node))
# print('get_next_node_in_chain, node {0}, prev {1}, neighbors {2}, not_accepted {3}'.format(node, previous, neighbors, not_accepted_nodes))
if len(neighbors) != 2:
return node
# We are only interested in nodes with 2 neighbors
if node not in not_accepted_nodes:
return node
# Keep going further
for neighbor in neighbors:
if neighbor == previous:
continue
return get_next_node_in_chain(Gr, neighbor, node, not_accepted_nodes)
def remove_unnecessary_nodes(Gr, Cg, safety_height):
nodes_to_remove = []
edges_to_add = []
for n in Gr.nodes:
neighbors = list(Gr.neighbors(n))
if len(neighbors) == 2:
left = get_next_node_in_chain(Gr, neighbors[0], n, nodes_to_remove)
right = get_next_node_in_chain(Gr, neighbors[1], n, nodes_to_remove)
# Check visible path between left and right
hit = False
cells = list(bresenham(int(left[0]), int(left[1]), int(right[0]), int(right[1])))
for c in cells:
# First check if we're off the map
if np.amin(c) < 0 or c[0] >= Cg.shape[0] or c[1] >= Cg.shape[1]:
hit = True
break
# Next check if we're in collision
if Cg[c[0], c[1]] >= safety_height:
hit = True
break
# If the edge does not hit on obstacle
# add it to the list
if not hit:
dist = LA.norm(np.array(left) - np.array(right))
edges_to_add.append((left, right, dist))
nodes_to_remove.append(n)
for edge in edges_to_add:
left = edge[0]
right = edge[1]
dist = edge[2]
if left not in nodes_to_remove and right not in nodes_to_remove:
Gr.add_edge(left, right, weight=dist)
Gr.remove_nodes_from(nodes_to_remove)
return Gr
def print_info(Gr, Cg, north_offset, east_offset):
print('Graph nodes: %5d' % len(Gr.nodes))
print('Graph edges: %5d' % len(Gr.edges))
print('Grid dimensions {0}, north_offset: {1}, east_offset: {2} '.format(Cg.shape, north_offset, east_offset))
def load_graph_from_pickle(pkl_filename):
print('Loading {0} graph'.format(pkl_filename))
with open(pkl_filename, "rb") as pfile:
dist_pickle = pickle.load(pfile)
Gr = dist_pickle['graph']
Cg = dist_pickle['collision_grid']
north_offset = dist_pickle['north_offset']
east_offset = dist_pickle['east_offset']
return Gr, Cg, north_offset, east_offset
def save_graph_to_pickle(Gr, Cg, north_offset, east_offset, pkl_filename):
try:
with open(pkl_filename, 'wb+') as pfile:
print('Saving to pickle file', pkl_filename)
pickle.dump(
{
'graph': Gr,
'collision_grid': Cg,
'north_offset' : north_offset,
'east_offset' : east_offset,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to ', pkl_filename, ':', e)
def visualize_graph(Gr, Cg, nmin=0, emin=0):
# Plot it up!
fig = plt.figure(figsize=(10,10))
plt.imshow(Cg, origin='lower', cmap='Greys')
# Draw edges in green
for (n1, n2) in list(Gr.edges)[0:1]:
plt.plot([n1[1] - emin, n2[1] - emin], [n1[0] - nmin, n2[0] - nmin], 'green', alpha=1)
# Draw connected nodes in red
for n1 in list(Gr.nodes)[0:1]:
print(n1)
plt.scatter(n1[1] - emin, n1[0] - nmin, c='red')
plt.scatter(0 - emin, 0 - nmin, c='blue') # (0,0)
plt.scatter(emin - emin, nmin - nmin , c='green') # Lowest point
plt.xlabel('EAST')
plt.ylabel('NORTH')
plt.show()
import sys
def perform_astar(Gr, Cg, nmin=0, emin=0):
#drone_location = (-emin, -nmin, 5.0) # map coordinates
drone_location = (445.04762260615826, 315.94609723985195, 5.0)
print('Find Start node from {0}'.format(drone_location))
nearest_start = None
closest_distance = sys.float_info.max
for n in Gr.nodes:
# heuristic is the Euclidean distance:
distance = heuristic(drone_location, n)
if distance < closest_distance:
closest_distance = distance
nearest_start = n
if nearest_start == None:
print('Error while getting closest starting node')
return
print('Found starting node = {0}'.format(nearest_start))
##########
goal_location = (240.7685, 360.76114, 5.0) # map coordinates
print('Find Goal node from {0}'.format(goal_location))
nearest_goal = None
closest_distance = sys.float_info.max
for n in Gr.nodes:
# heuristic is the Euclidean distance:
distance = heuristic(goal_location, n)
if distance < closest_distance:
closest_distance = distance
nearest_goal = n
################
start = nearest_start
print('Start: ', start)
goal = nearest_goal
print('Goal: ', goal)
path, cost = a_star_graph(Gr, heuristic, start, goal)
print(len(path), path)
if len(path) == 0:
return
waypoints = [[p[0], p[1], p[2], 0] for p in path]
print("start")
fig = plt.figure(figsize=(10,10))
plt.imshow(Cg, cmap='Greys', origin='lower')
path_pairs = zip(waypoints[:-1], waypoints[1:])
for (n1, n2) in path_pairs:
plt.plot([n1[1], n2[1]], [n1[0], n2[0]], 'green')
plt.scatter(drone_location[0], drone_location[1], c='blue') # (0,0)
plt.scatter(emin - emin, nmin - nmin , c='green') # Lowest point
plt.scatter(100, 0, c='purple') # (0,0)
plt.xlabel('EAST')
plt.ylabel('NORTH')
plt.show()
def create_graph_from_voronoi(voronoi_graph, grid, k=10):
g = nx.Graph()
nodes = tuple(map(tuple, voronoi_graph.vertices))
tree = KDTree(nodes)
# Check each edge from graph.ridge_vertices for collision
for n1 in nodes:
# for each node connect try to connect to k nearest nodes
idxs = tree.query([n1], k, return_distance=False)[0]
for idx in idxs:
n2 = nodes[idx]
if n2 == n1:
continue
hit = False
cells = list(bresenham(int(n1[0]), int(n1[1]), int(n2[0]), int(n2[1])))
for c in cells:
# First check if we're off the map
if np.amin(c) < 0 or c[0] >= grid.shape[0] or c[1] >= grid.shape[1]:
hit = True
break
# Next check if we're in collision
if grid[c[0], c[1]] >= FLYING_ALTITUDE + SAFETY_DISTANCE:
hit = True
break
# If the edge does not hit on obstacle
# add it to the list
if not hit:
dist = LA.norm(np.array(n2) - np.array(n1))
g.add_edge((n1[0], n1[1], FLYING_ALTITUDE), (n2[0], n2[1], FLYING_ALTITUDE), weight=dist)
return g, tree
from planning_utils import create_grid
from scipy.spatial import Voronoi
import numpy.linalg as LA
from sklearn.neighbors import KDTree
from bresenham import bresenham
if __name__== "__main__":
test_case = 1
if test_case == 1:
print('Voronoi')
# Unit testing of functions in the file
Gr, Cg, no, eo = load_graph_from_pickle('graph.voronoi.raw.p')
print_info(Gr, Cg, no, eo)
visualize_graph(Gr, Cg)
Gr = remove_unconnected_subgraphs(Gr)
print_info(Gr, Cg, no, eo)
Gr = remove_unnecessary_nodes(Gr, Cg, FLYING_ALTITUDE+SAFETY_DISTANCE)
print_info(Gr, Cg, no, eo)
#visualize_graph(Gr, Cg)
save_graph_to_pickle(Gr, Cg, no, eo, 'graph.voronoi.p')
perform_astar(Gr, Cg, no, eo)
elif test_case == 2:
Gr, Cg, no, eo = load_graph_from_pickle('graph.voronoi.p')
print_info(Gr, Cg, no, eo)
# Plot it up!
fig = plt.figure(figsize=(10,10))
plt.imshow(Cg, origin='lower', cmap='Greys')
# Draw edges in green
#for (n1, n2) in Gr.edges:
# plt.plot([n1[1], n2[1]], [n1[0], n2[0]], 'green', alpha=1)
# Draw connected nodes in red
for n1 in Gr.nodes:
plt.scatter(n1[1], n1[0], c='red')
plt.scatter(0, 0, c='blue')
plt.xlabel('EAST')
plt.ylabel('NORTH')
plt.show()
elif test_case == 3:
filename = 'colliders.csv'
data = np.loadtxt(filename, delimiter=',', dtype='Float64', skiprows=2)
safety_distance = SAFETY_DISTANCE
print('Create grid')
Cg, centers, north_offset, east_offset = create_grid(data, FLYING_ALTITUDE, SAFETY_DISTANCE)
np_centers = np.array(centers)
print('Create Voronoi')
voronoi_graph = Voronoi(np_centers[:,:-1])
print('Create Graph')
Gr, tree = create_graph_from_voronoi(voronoi_graph, Cg)
print_info(Gr, Cg, north_offset, east_offset)
save_graph_to_pickle(Gr, Cg, north_offset, east_offset, 'graph.voronoi.raw.p')
visualize_graph(Gr, Cg)
|
[
"edupaz2@gmail.com"
] |
edupaz2@gmail.com
|
c18fa0521deea5d95377f3c2c179d339c8d9893f
|
4dee997683137d7813db0e5b60ca59184f43d156
|
/src/pipeline.py
|
ff4cf2ad7fcae254042f03ce2027daa792b5e20f
|
[] |
no_license
|
TOSUKUi/cnn-trading
|
f8699388ef536c6472f19b90c6dbe6d00f5087a5
|
3344bff5c093fe36d8a0f52027d54ad30cb8049d
|
refs/heads/develop
| 2020-05-04T09:55:24.682207
| 2020-02-22T09:29:59
| 2020-02-22T09:29:59
| 179,078,576
| 2
| 1
| null | 2020-02-08T07:28:33
| 2019-04-02T12:59:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 393
|
py
|
from functools import reduce
from abc import ABCMeta, abstractmethod
from typing import List
class Procedure(metaclass=ABCMeta):
@abstractmethod
def run(self, x):
pass
class PipeLine():
def execute(self):
return reduce(lambda x, y: y.run(x), self.pipeline)
def __init__(self, first, *pipeline: List[Procedure]):
self.pipeline = first + pipeline
|
[
"am.businessmail.exp@gmail.com"
] |
am.businessmail.exp@gmail.com
|
f185325153f7d9caea871e1b341a08fa0dae1c46
|
bbbb191a51b04b8f07c000c355697bebd9da9718
|
/test.py
|
dd243fb8e9cd530eafcc8f534ad1e1ba4ed30003
|
[] |
no_license
|
1newstar/mysql-tools-python2
|
f255f62e10166677b27947530e0b3d0bfa9f1bc4
|
6ca164ece23f1a627894f880635f20390b8b937e
|
refs/heads/master
| 2020-12-03T11:17:32.307632
| 2019-12-30T11:13:48
| 2019-12-30T11:13:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
import json
myv = {}
for line in open('mysql_global_variables.sql').readlines():
line_list = map(lambda x: x.strip('"'), line.strip().split(',')[:2])
if len(line_list) == 1:
key = line_list[0]
value = None
else:
# print(line_list)
key, value = line_list
myv[key] = value
# print(json.dumps(myv, indent=2))
mys = {}
for line in open('mysql_global_status.sql').readlines():
line_list = map(lambda x: x.strip('"'), line.strip().split(',')[:2])
if len(line_list) == 1:
key = line_list[0]
value = None
else:
# print(line_list)
key, value = line_list
mys[key] = value
# print(json.dumps(mys, indent=2))
import time
def timestamp_toString(stamp):
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(stamp))
print timestamp_toString(time.time())
print time.time()
|
[
"rgweiyaping@hotmail.com"
] |
rgweiyaping@hotmail.com
|
200e50632fe2526205a78acb9e0910e3593a30e0
|
8b4c91e45e735814b4397be3e2e928f2a8f5831c
|
/channels_presence/admin.py
|
c924d753883858a65ceb802ca761c6dec418d3e9
|
[
"MIT"
] |
permissive
|
Roang-zero1/django-channels-presence
|
008c2a121d4689db8312e06e8437d1942ba65335
|
3fbe34bdee3c41139808d880beb7f35441081be0
|
refs/heads/master
| 2021-03-02T09:22:59.444414
| 2020-03-08T17:23:30
| 2020-03-08T17:23:30
| 245,855,458
| 0
| 0
|
MIT
| 2020-03-08T17:14:08
| 2020-03-08T17:14:07
| null |
UTF-8
|
Python
| false
| false
| 104
|
py
|
from __future__ import unicode_literals
from django.contrib import admin
# Register your models here.
|
[
"cfd@media.mit.edu"
] |
cfd@media.mit.edu
|
8a73b1e4d029d79a1ab132ca6bfd4632c65a3459
|
4c4f94e5cf7cf086ea5ded67a1013835440c9abb
|
/igwn-data-checker-visualizer.py
|
93f20c64fc269ee069ee06543c056af875228299
|
[] |
no_license
|
gabrielefronze/igwn-data-checker
|
be96b6fc218e1b3578fcf224f4d24d87c4fba45b
|
401f9c05e798a3f16904adf25e7dfeae7210f286
|
refs/heads/master
| 2023-04-08T11:05:08.081099
| 2021-04-15T14:59:31
| 2021-04-15T14:59:31
| 329,659,727
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,493
|
py
|
#! /usr/bin/env python3
import os
import json
import argparse
import numpy as np
import pandas as pd
def main(json_path="output-PIC.json", normalize=False, title = None, save = None):
if save:
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.patches as patches
else:
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.patches as patches
if not title:
title = "Input file: "+os.path.basename(json_path)
paths = {}
counter = 0
valid_counter = 0
invalid_counter = 0
statuses = []
checksum_results = []
user_time_results = []
first_user_time_results = []
sys_time_results = []
first_sys_time_results = []
real_time_results = []
first_real_time_results = []
sys_MBps_results = []
file_sizes = []
data = json.load(open(os.path.abspath(json_path)))
for path, data in data.items():
avg_user_time = 0
avg_sys_time = 0
avg_real_time = 0
size = data["size"]
normalization = size/2**20 if normalize else 1
file_sizes.append(size/2**20)
results = data["results"]
print(path)
if path not in paths:
paths[path] = 1
else:
paths[path] = len(results)
counter = 0
valid_counter += 1
for i,r in enumerate(results):
statuses.append(1 if r["status"] else 0)
if r["status"]:
checksum_results.append(1 if r["checksum_status"] else 0)
real_time = r["timer"]["real"].split(":")
if i == 0:
first_user_time_results.append(r["timer"]["user"]/normalization)
first_sys_time_results.append(r["timer"]["sys"]/normalization)
first_real_time_results.append((float(real_time[0])*60+float(real_time[1]))/normalization)
else:
counter += 1
avg_user_time += r["timer"]["user"] if r["timer"]["user"]>0 else 0.01
avg_sys_time += r["timer"]["sys"] if r["timer"]["sys"]>0 else 0.01
avg_real_time += float(real_time[0])*60+float(real_time[1])
elif i == 0:
valid_counter -= 1
invalid_counter += 1
if not counter == 0:
user_time_results.append((avg_user_time/counter)/normalization)
sys_time_results.append((avg_sys_time/counter)/normalization)
sys_MBps_results.append((size/2**20)/(avg_sys_time/counter))
real_time_results.append((avg_real_time/counter)/normalization)
fig, axes = plt.subplots(nrows=5, ncols=2)
fig.suptitle(title, fontsize=30)
fig.set_size_inches(18.5, 12.5, forward=True)
ax = axes.flatten()
n_checksums = len(checksum_results)
n_entries = valid_counter
n_statuses = len(statuses)
n_tests = counter + 1
n_bins = int(max(n_entries / 2, 1))
ax[0].hist(checksum_results, 2, histtype='bar', weights=[1/n_checksums*100] * n_checksums, color='navy')
ax[0].set_facecolor("whitesmoke")
plt.sca(ax[0])
plt.xticks([.75, 1.25], ["wrong", "correct"])
ax[0].set_title("Checksum verification distribution", position=(0.5, 0.6))
ax[0].set_ylabel("percent [%]")
leg_n_entries = mpatches.Patch(color='navy', label="{} files tested\n {} times each".format(n_entries, n_tests))
plt.legend(handles=[leg_n_entries])
ax[1].hist(statuses, 2, histtype='bar', weights=[1/n_statuses*100] * n_statuses, color='blue')
ax[1].set_facecolor("whitesmoke")
plt.sca(ax[1])
plt.xticks([0.25, 0.75], ["failed", "valid"])
ax[1].set_title("Runtime failures distribution", position=(0.5, 0.6))
ax[1].set_ylabel("percent [%]")
leg_n_entries = mpatches.Patch(color='blue', label="{} files tested\n {} times each".format(n_entries+invalid_counter, n_tests))
plt.legend(handles=[leg_n_entries])
ax[2].hist(user_time_results, n_bins, histtype='bar', color='darkgreen')
ax[2].set_facecolor("whitesmoke")
plt.sca(ax[2])
ax[2].set_title("Average file access time (user)", position=(0.5, 0.6))
ax[2].set_xlabel("seconds per MB [s/MB]" if normalize else "seconds [s]")
ax[2].set_ylabel("counts")
leg_n_entries = mpatches.Patch(color='darkgreen', label="{} files tested\n {} times each".format(n_entries, n_tests - 1))
plt.legend(handles=[leg_n_entries])
ax[3].hist(first_user_time_results, n_bins, histtype='bar', color='lime')
ax[3].set_facecolor("whitesmoke")
plt.sca(ax[3])
ax[3].set_title("First file access time (user)", position=(0.5, 0.6))
ax[3].set_xlabel("seconds per MB [s/MB]" if normalize else "seconds [s]")
ax[3].set_ylabel("counts")
leg_n_entries = mpatches.Patch(color='lime', label="{} files tested".format(n_entries))
plt.legend(handles=[leg_n_entries])
user_time_xlim = [min(ax[2].get_xlim()[0], ax[3].get_xlim()[0]), max(ax[2].get_xlim()[1], ax[3].get_xlim()[1])]
ax[2].set_xlim(user_time_xlim)
ax[3].set_xlim(user_time_xlim)
ax[4].hist(sys_time_results, n_bins, histtype='bar', color='darkorange')
ax[4].set_facecolor("whitesmoke")
plt.sca(ax[4])
ax[4].set_title("Average file access time (sys)", position=(0.5, 0.6))
ax[4].set_xlabel("seconds per MB [s/MB]" if normalize else "seconds [s]")
ax[4].set_ylabel("counts")
leg_n_entries = mpatches.Patch(color='darkorange', label="{} files tested\n {} times each".format(n_entries, n_tests - 1))
plt.legend(handles=[leg_n_entries])
ax[5].hist(first_sys_time_results, n_bins, histtype='bar', color='gold')
ax[5].set_facecolor("whitesmoke")
plt.sca(ax[5])
ax[5].set_title("First file access time (sys)", position=(0.5, 0.6))
ax[5].set_xlabel("seconds per MB [s/MB]" if normalize else "seconds [s]")
ax[5].set_ylabel("counts")
leg_n_entries = mpatches.Patch(color='gold', label="{} files tested".format(n_entries))
plt.legend(handles=[leg_n_entries])
sys_time_xlim = [min(ax[4].get_xlim()[0], ax[5].get_xlim()[0]), max(ax[4].get_xlim()[1], ax[5].get_xlim()[1])]
ax[4].set_xlim(sys_time_xlim)
ax[5].set_xlim(sys_time_xlim)
ax[6].hist(real_time_results, n_bins, histtype='bar', color='maroon')
ax[6].set_facecolor("whitesmoke")
plt.sca(ax[6])
ax[6].set_title("Average file access time (real)", position=(0.5, 0.6))
ax[6].set_xlabel("seconds per MB [s/MB]" if normalize else "seconds [s]")
ax[6].set_ylabel("counts")
leg_n_entries = mpatches.Patch(color='maroon', label="{} files tested\n {} times each".format(n_entries, n_tests - 1))
plt.legend(handles=[leg_n_entries])
ax[7].hist(first_real_time_results, n_bins, histtype='bar', color='orangered')
ax[7].set_facecolor("whitesmoke")
plt.sca(ax[7])
ax[7].set_title("First file access time (real)", position=(0.5, 0.6))
ax[7].set_xlabel("seconds per MB [s/MB]" if normalize else "seconds [s]")
ax[7].set_ylabel("counts")
leg_n_entries = mpatches.Patch(color='orangered', label="{} files tested".format(n_entries))
plt.legend(handles=[leg_n_entries])
real_time_xlim = [0, max(ax[6].get_xlim()[1], ax[7].get_xlim()[1])]
rect = patches.Rectangle((0,0), ax[6].get_xlim()[1], ax[7].get_ylim()[1], linewidth=1, edgecolor='g', facecolor="#00FF0022")
rect2 = patches.Rectangle((ax[6].get_xlim()[1],0), ax[7].get_xlim()[1], ax[7].get_ylim()[1], linewidth=1, edgecolor='g', facecolor="#FF000022")
ax[7].add_patch(rect)
ax[7].add_patch(rect2)
ax[6].set_xlim(real_time_xlim)
ax[7].set_xlim(real_time_xlim)
ax[8].hist(file_sizes, n_bins, histtype='bar', color='deepskyblue')
ax[8].set_facecolor("whitesmoke")
plt.sca(ax[8])
ax[8].set_title("File sizes distribution", position=(0.5, 0.6))
ax[8].set_xlabel("MB")
ax[8].set_ylabel("counts")
leg_n_entries = mpatches.Patch(color='deepskyblue', label="{} files tested\n {} times each".format(n_entries, n_tests))
plt.legend(handles=[leg_n_entries])
ax[9].hist(sys_MBps_results, n_bins, histtype='bar', color='purple')
ax[9].set_facecolor("whitesmoke")
plt.sca(ax[9])
ax[9].set_title("File transfer speed", position=(0.5, 0.6))
ax[9].set_xlabel("Bandwidth [MB/s]")
ax[9].set_ylabel("counts")
leg_n_entries = mpatches.Patch(color='purple', label="{} files tested\n {} times each".format(n_entries, n_tests))
plt.legend(handles=[leg_n_entries])
plt.rcParams.update({'figure.autolayout': True})
fig.subplots_adjust(hspace=0.4)
plt.subplots_adjust(left=0.05, right=0.95, top=0.90, bottom=0.05)
if not save:
print("Showing")
plt.show()
else:
print("Saving")
plt.savefig('output.pdf')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Visualize IGWN Data Checker output files')
parser.add_argument("json_path", type=str, help='Path of input JSON file.')
parser.add_argument('-t', "--title", help="Title of the output image.")
parser.add_argument('-n', "--normalized", action='store_true', help="Normalize times over file size.")
parser.add_argument('-s', "--save", action='store_true', help="Saves output as image.")
args = parser.parse_args()
main(json_path=args.json_path, normalize=args.normalized, title=args.title, save=args.save)
|
[
"sucre.91@hotmail.it"
] |
sucre.91@hotmail.it
|
9e4b54a7a538ea7deb35087bf807daf93e450126
|
523aa188102588b98cf11628ae48d77f3fad4936
|
/lcm/crazyflie_t/dxyz_compare_t.py
|
d87e115c632812faefbafcc6b146c85609d01d5e
|
[
"MIT"
] |
permissive
|
hanliumaozhi/crazyflie-tools
|
52a5d9bceb9c9d0cccf543faa27232288551937f
|
ed3ca66d4efd77c7abb321e6f8deeb564da45862
|
refs/heads/master
| 2021-01-17T03:40:13.679643
| 2015-10-02T21:40:46
| 2015-10-02T21:40:46
| 52,494,119
| 3
| 0
| null | 2016-02-25T03:31:09
| 2016-02-25T03:31:09
| null |
UTF-8
|
Python
| false
| false
| 1,983
|
py
|
"""LCM type definitions
This file automatically generated by lcm.
DO NOT MODIFY BY HAND!!!!
"""
try:
import cStringIO.StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
class dxyz_compare_t(object):
__slots__ = ["dxyzraw", "dxyzfiltered"]
def __init__(self):
self.dxyzraw = [ 0.0 for dim0 in range(3) ]
self.dxyzfiltered = [ 0.0 for dim0 in range(3) ]
def encode(self):
buf = BytesIO()
buf.write(dxyz_compare_t._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
buf.write(struct.pack('>3d', *self.dxyzraw[:3]))
buf.write(struct.pack('>3d', *self.dxyzfiltered[:3]))
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = BytesIO(data)
if buf.read(8) != dxyz_compare_t._get_packed_fingerprint():
raise ValueError("Decode error")
return dxyz_compare_t._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = dxyz_compare_t()
self.dxyzraw = struct.unpack('>3d', buf.read(24))
self.dxyzfiltered = struct.unpack('>3d', buf.read(24))
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if dxyz_compare_t in parents: return 0
tmphash = (0xe697737567c345c1) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if dxyz_compare_t._packed_fingerprint is None:
dxyz_compare_t._packed_fingerprint = struct.pack(">Q", dxyz_compare_t._get_hash_recursive([]))
return dxyz_compare_t._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
|
[
"landry@mit.edu"
] |
landry@mit.edu
|
c070c9016f4aa57aec665e2498aafa4ba987f917
|
41dc3e18de7d1f31e1ccbbe676d6326210045588
|
/leetcode/find_smallest_letter_greater_than_target.py
|
c00bfd68cbf2161d0a51d176f4ffa1c23953b3f6
|
[] |
no_license
|
axiomiety/crashburn
|
372dcfad57a078e4caf7b22d7ae6038162cf4ffb
|
eff78ed020c1ce309b7cf6e53dd613e7d9f259ef
|
refs/heads/master
| 2023-09-01T00:53:08.969794
| 2023-08-30T11:23:32
| 2023-08-30T11:23:32
| 7,456,861
| 3
| 1
| null | 2023-02-11T10:44:01
| 2013-01-05T15:39:22
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 912
|
py
|
class Solution:
def nextGreatestLetter(self, letters: List[str], target: str) -> str:
if target < letters[0] or target > letters[-1]:
return letters[0]
def find_idx():
count = 0
start, stop = 0, len(letters)-1
while start < stop:
mid = start + (stop-start)//2
pivot = letters[mid]
if pivot == target:
return mid
elif target > pivot:
start = mid+1
else:
stop = mid-1
return start
idx = find_idx()
if letters[idx] == target:
while letters[idx%len(letters)] == target:
idx += 1
return letters[idx%len(letters)]
return letters[(idx+1)%len(letters)] if letters[idx] <= target else letters[idx%len(letters)]
|
[
"axiomiety@gmail.com"
] |
axiomiety@gmail.com
|
26ce304279c6822d477bfaf95ee90c06d0344824
|
47647705e42900dda6bdc88e17e18da9bd0cbf7e
|
/medical/views.py
|
1b636b85ee144833850429a7a349152c963ecd81
|
[] |
no_license
|
jyi468/mysite
|
1547b9ecf4cc1689ef2676d99869206fb2f30acd
|
562aaa7c26f659983b99ff562118e5ccd5f8ac45
|
refs/heads/master
| 2020-12-30T10:10:46.319828
| 2014-12-29T23:38:51
| 2014-12-29T23:38:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
#from django.shortcuts import render
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
# Create your views here.
def index(request):
return HttpResponse("Hello, world. You're at the Medical Info index.")
def home(request):
context = RequestContext(request,
{'user': request.user})
return render_to_response('templates/admin/home.html',
context_instance=context)
|
[
"jyi468@gmail.com"
] |
jyi468@gmail.com
|
c8585bad9693dd8f16bc1d6a1549eaf476774968
|
ef034334bdb3b8cbf514d4a2ca72059bc868b942
|
/report_project/customers/migrations/0001_initial.py
|
d8f040eb64b955896dbc6716426c51b04a903dd5
|
[] |
no_license
|
sunnythakr/Sales-Report
|
5205c8049906a830d471b94dee02d0e3fd466bf7
|
b626b190ee56afaf8ca237d240499b0679935809
|
refs/heads/master
| 2023-04-22T04:29:28.398142
| 2021-05-15T16:42:00
| 2021-05-15T16:42:00
| 360,109,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
# Generated by Django 3.2 on 2021-04-27 18:49
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=120)),
('logo', models.ImageField(default='no_picture.png', upload_to='customers')),
],
),
]
|
[
"sunny@gmail.com"
] |
sunny@gmail.com
|
5c1d106d01784a5ebce903f0473c2a29c82ce2ea
|
065840c8456faeb37d85a7611ec7fedf84943395
|
/djangogirlsenv/bin/pygmentize
|
097c5a179b979ee4d5ada4c27f94270adad1132b
|
[] |
no_license
|
BMariscal/my-first-blog
|
63eba4223151642880c8c3e545e58839ae9cdae8
|
b9f215439437ad3068c8532d3ca8d0a338770e3e
|
refs/heads/master
| 2021-01-12T11:31:47.971422
| 2016-11-08T19:13:43
| 2016-11-08T19:13:43
| 72,943,711
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
#!/Users/briceida/djangogirls/djangogirlsenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pygments.cmdline import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"briceidamariscal@gmail.com"
] |
briceidamariscal@gmail.com
|
|
424aeba25c97138156422124e398717277f0769c
|
a654c37e3fa3647d700b785d6ae95c2009848582
|
/算法/排序.py
|
554bd7aa7f26e2ec07755072e9e466fcc6cacd42
|
[] |
no_license
|
whnet/study_python
|
6cd47bf605b8a0d2b597520b0a55ddc6f94b4050
|
6e76637912c68ab89d05e308f1bb7bb496b02c93
|
refs/heads/master
| 2021-01-20T04:42:09.698364
| 2017-04-29T11:03:51
| 2017-04-29T11:03:51
| 89,715,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,509
|
py
|
# -*- coding: utf-8 -*-
'''
增长量级
O(1) : 常数级(constant)
O(logb^n) : 对象级(logarithmic)(对于任意b)
O(n) : 线性级(linear)
O(nlogb^n) : 线性对数级(linearithmic)
O(n^2) : 二次方级(quadratic)
O(n^3) : 三次方级(cubic)
O(c^n) : 指数级(exponential)(对于任意c)
[(\log n)^{c} 多对数
O(n!) : 阶乘
对于对数级数 【O(logb^n)】, 对数的基数并不影响增长量级。改变阶数等价于乘以一个常熟, 其不改变增长量级。
所有的指数级别 【O(c^n)】,都属于相同的增长量级,而无需考虑指数的基数大小(指数量级增长的非常快,因此指数级算法只用于小规模)
大O符号在分析算法效率的时候非常有用
举个例子:
解决一个规模为 {\displaystyle n} n的问题所花费的时间(或者所需步骤的数目)可以表示为:
T(n)=4n^{2}-2n+2。当 n增大时, {\displaystyle n^{2}} n^{2}项将开始占主导地位,而其他各项可以被忽略。
举例说明:当 n=500, 4n^{2}项是 2n项的1000倍大,
因此在大多数场合下,省略后者对表达式的值的影响将是可以忽略不计的。
进一步看,如果我们与任一其他级的表达式比较, {\displaystyle n^{2}} n^{2}项的系数也是无关紧要的。
'''
l = [14,10,9,13,34,26,11,7]
# 插入排序
'''
先假设list[min_index]处的值最小,再跟后面的值依次比较,
当发现list[j]比list[min_index]值小时,这时的min_index替换为j,
再跟后面的进行比较,指导找到最小的那个list[j],
将j付给min_index,这时l[min_index]就是遍历过程中的最小值了
'''
def insert_sort(l):
for i in range(len(l)):
min_index = i
for j in range(i + 1, len(l)):
min = l[ min_index]
next = l[j]
if min > next:
min_index = j
tmp = l[i]
l[i] = l[min_index]
l[min_index] = tmp
print(str(l))
'''
它重复地走访过要排序的数列,一次比较两个元素,
如果他们的顺序错误就把他们交换过来。
走访数列的工作是重复地进行直到没有再需要交换,
也就是说该数列已经排序完成。
'''
def bub_sort(l):
# 冒泡排序, 相邻的两个数进行比较
count = len(l)
for i in range(0, count):
for j in range(i + 1, count):
if l[i] > l[j]:
l[i], l[j] = l[j], l[i]
return l
print(bub_sort(l))
|
[
"896792616@qq.com"
] |
896792616@qq.com
|
f873629a0c35d6e011a87f26f6a74458dd48718b
|
9cbb624d93f029b41401efbcfd3e36ef47a7b757
|
/PyDa_L4/functions.py
|
b66f0577575e884b2fd914359ea6e8da55f691d9
|
[] |
no_license
|
GezhinOleg/PyDa
|
48f2c8acaebf007ae29f82ec4f8b8040129d9a45
|
3b641a4196cb7f0faf74cafc53fdb7cafb7ef4f4
|
refs/heads/main
| 2023-06-28T14:50:55.715817
| 2021-07-28T08:25:11
| 2021-07-28T08:25:11
| 389,911,086
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,455
|
py
|
documents = [
{'type': 'passport', 'number': '2207 876234', 'name': 'Василий Гупкин'},
{'type': 'invoice', 'number': '11-2', 'name': 'Геннадий Покемонов'},
{'type': 'insurance', 'number': '10006', 'name': 'Аристарх Павлов'}
]
directories = {
'1': ['2207 876234', '11-2'],
'2': ['10006'],
'3': []
}
'''
p - Пользователь может узнать владельца документа по его номеру
s - Пользователь может по номеру документа узнать на какой полке он хранится
l - Пользователь может увидеть полную информацию по всем документам
ads - Пользователь может добавить новую полку
ds - Пользователь может удалить существующую полку из данных (только если она пустая)
ad - Пользователь может добавить новый документ в данные
'''
def person_document(doc_list):
result = 'Извините, введенный номер в базе отсутствует!'
doc_input = input('Введите номер документа:')
for i in doc_list:
if doc_input == i['number']:
result = i['name']
return result
def shelf_document(shelf):
number_doc = input('Введите номер документа: ')
result = 'Извините, введенный номер в базе отсутствует!'
for shelf_l, doc in shelf.items():
for j in doc:
if number_doc == j:
result = f'Лежит на полке № {shelf_l}'
return result
def list_all_document(doc_list, shelf):
summary_dictionary = {}
for doc in doc_list:
for i in shelf:
for j in shelf[i]:
if j == doc['number']:
print('№: ', doc['number'], ', тип: ', doc['type'], ', владелец: ', doc['name'], ', полка хранения: ', i, sep = '')
def add_shelf(shelf):
new_shelf = input('Введите номер полки ')
if new_shelf not in directories:
directories[new_shelf] = []
print('Полка добавлена. Текущий перечень полок:', ", ".join(map(str, directories)))
elif new_shelf in directories:
print('Такая полка уже существует. Текущий перечень полок:', ", ".join(map(str, directories)))
def delete_shelf(shelf):
del_shelf = input('Введите номер полки ')
if del_shelf in shelf:
if shelf[del_shelf] == []:
del shelf[del_shelf]
print('Полка удалена. Текущий перечень полок:', ", ".join(map(str, shelf.keys())))
elif shelf[del_shelf] != []:
print('На полке есть документа, удалите их перед удалением полки. Текущий перечень полок:', ", ".join(map(str, shelf.keys())))
else:
print('Такой полки не существует. Текущий перечень полок:', ", ".join(map(str, shelf.keys())))
def add_document(doc_list, shelf):
doc_new = {}
doc_new['number'] = input('Введите номер документа: ')
doc_new['type'] = input('Введите тип документа: ')
doc_new['name'] = input('Введите владельца документа: ')
num_shelf = input('Введите полку для хранения: ')
if num_shelf in shelf.keys():
shelf[num_shelf].append(doc_new['number'])
doc_list.append(doc_new)
print('Документ добавлен. Текущий список документов:', List_All_Document(documents, directories))
# Почему-то выводит список, с добавленным файлом и в конце прописывает
#[None]
else:
print('Такой полки не существует. Добавьте полку командой ads.')
print(list_all_document(documents, directories))
# Сделал только функцию добавления документа, Остальные пункты, понимаю как делать,
# но не могу себя заставить, примечание (необязательное) сильно расхолаживает.
def main():
while True:
print('Для работы с программой введите команды: p, s, l, ads, ds, ad')
user_input = input('Введите команду: ')
if user_input == 'p':
print(person_document(documents))
elif user_input == 's':
print(shelf_document(directories))
elif user_input == 'l':
list_all_document(documents, directories)
elif user_input == 'ads':
add_shelf(directories)
elif user_input == 'ds':
delete_shelf(directories)
elif user_input == 'ad':
add_document(documents, directories)
elif user_input == 'q':
print('До свидания!')
break
main()
# Попытался сделать вызов функций через словарь, первые две получилось нормально,
# потом из-за ввода параметров к функции началась путаница, решил не ломать, то что работает.
# def main():
# while True:
# print('Для работы с программой введите команды: p, s, l, ads, ds, ad, d, m')
# user_input = input('Введите команду: ')
# command_user = {'p': 'Person_Document(documents)', 's': 'Shelf_Document(directories)', 'l': 'List_All_Document(documents, directories)',
# 'ads': add_shelf}
# if user_input == 'q':
# print('До свидания!')
# break
# elif user_input in command_user:
# command_user[user_input]()
# else:
# print('Введите правильную команду.')
#
# main()
# Попытался сделать вызов функций через словарь, первые две получилось нормально,
# потом из-за ввода параметров к функции началась путаница, решил не ломать, то что работает.
# def main():
# while True:
# print('Для работы с программой введите команды: p, s, l, ads, ds, ad, d, m')
# user_input = input('Введите команду: ')
# command_user = {'p': 'Person_Document(documents)', 's': 'Shelf_Document(directories)', 'l': 'List_All_Document(documents, directories)',
# 'ads': add_shelf}
# if user_input == 'q':
# print('До свидания!')
# break
# elif user_input in command_user:
# command_user[user_input]()
# else:
# print('Введите правильную команду.')
#
# main()
|
[
"noreply@github.com"
] |
GezhinOleg.noreply@github.com
|
e78ca48d7fa0ca987b62b5179ff5a01ec09f3852
|
f21a71a7829f44c63bf73005fba5b460c8e61925
|
/daily/20201115-range-sum-bst..py
|
56788998584f00f9f55d148fc6e308a04ab7da56
|
[] |
no_license
|
kapppa-joe/leetcode-practice
|
2b8a14b5cf7a96a428cefdb0dd102e0a1ae82042
|
64fd7baf3543a7a32ebcbaadb39c11fcc152bf4c
|
refs/heads/master
| 2023-03-13T09:05:55.631303
| 2021-02-04T12:04:59
| 2021-02-04T12:04:59
| 286,212,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 730
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def rangeSumBST(self, root: TreeNode, low: int, high: int) -> int:
def dfs(node: TreeNode) -> int:
if not node:
return 0
res = 0
if low <= node.val <= high:
res += node.val
res += dfs(node.left) + dfs(node.right)
if node.val < low:
res += dfs(node.right)
elif node.val > high:
res += dfs(node.left)
return res
return dfs(root)
|
[
"kapppa.joe@gmail.com"
] |
kapppa.joe@gmail.com
|
523af85399326e903c1e90757b3aaaed9d19917e
|
d0b2de49ef0ccaafa378d8ca855c356232098ee6
|
/setup.py
|
d94322b6becbdbb44ea0cc845d816248286cf59a
|
[
"Apache-2.0"
] |
permissive
|
Avalanche-io/pyc4_old
|
24ffdb05dd48fd473539ec61faf06561ed47c3b2
|
b6b048a3f09dfaab89204ad857e1fef8352c9987
|
refs/heads/master
| 2022-03-22T16:31:21.047218
| 2019-09-27T20:46:55
| 2019-09-27T20:46:55
| 48,461,864
| 2
| 0
|
Apache-2.0
| 2019-09-27T20:46:56
| 2015-12-23T01:16:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,207
|
py
|
from setuptools import setup
from codecs import open
# get pip version
__version__ = __import__("pyc4").__version__
# Get the long description from the README file
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyc4',
version=__version__,
description='Python module for the Cinema Content Creation Cloud frame work.',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/Avalanche-io/pyc4',
download_url='https://github.com/Avalanche-io/pyc4',
license='Apache-2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Operating System :: OS Independent',
'License :: OSI Approved :: Apache Software License',
],
keywords='c4',
py_modules=["pyc4"],
author='Blur Studio',
author_email='github@blur.com'
)
|
[
"mikeh@blur.com"
] |
mikeh@blur.com
|
c35738db1e3d6699fab8c72d2c29de250dc84d10
|
811b2249dfd6e863b5e58698ad1d0676059f04c3
|
/wikidata/lastname-alias.py
|
fec1a5676a8cc3b6737465c9e1fbbb8b506c827d
|
[] |
no_license
|
edoderoo/Python4Wikipedia
|
b19a37283e1ef3e713d100d74ff3bc9415e91115
|
666a8285fc2f1559c053923e4412252f5d81f30f
|
refs/heads/master
| 2022-04-29T02:21:01.097072
| 2022-04-02T13:25:07
| 2022-04-02T13:25:07
| 43,675,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,107
|
py
|
import pywikibot
from pywikibot import pagegenerators as pg
#bij een achternaam PQ101352 met voorvoegsel (de Jong, van der Velde, van de Wetering, van Willigenburg, etc)
sqlQuery='select ?item where {?item wdt:P31 wd:Q101352}'
searchfor=['de ','van der ','van de ','van ','in het ']
someitems=['Q21494168','Q1180481','Q7913814']
lang='nl'
def add2list(list,item2add,changed):
if not(item2add in list):
list.append(item2add)
return list,True
return list,changed
def wd_sparql_query(spq):
wikidatasite=pywikibot.Site('wikidata','wikidata')
generator=pg.WikidataSPARQLPageGenerator(spq,site=wikidatasite)
for wd in generator:
try:
wd.get(get_redirect=True)
yield wd
except:
pass
def action_one_item(wd):
changed=False
alias=[]
if lang in wd.aliases:
for onealias in wd.aliases[lang]:
alias,changed =add2list(alias,onealias,changed)
changed=False
if lang in wd.labels:
label=wd.labels[lang]
for found in searchfor:
if (label[0:len(found)].lower()==found.lower()):
alias,changed=add2list(alias,label[len(found):]+' '+found,changed)
label=' ' #so it won't get another alias
if (changed):
newalias=[]
for onealias in alias:
newalias.append(onealias)
data={}
data.update({'aliases':{lang:newalias}})
wd.editEntity(data,summary=f'achternaam-alias <{newalias}>')
return(changed)
print('Begonnen')
aantal=0
site = pywikibot.Site('wikidata','wikidata')
repo = site.data_repository()
if (False):
for item in someitems:
wd=pywikibot.ItemPage(repo,item)
wd.get()
if (action_one_item(wd)):
aantal+=1
print('x: %d: %s-%s' % (aantal,item.title(),ifany))
else:
for item in wd_sparql_query(sqlQuery):
if (action_one_item(item)):
ifany=''
if (lang in item.labels):
ifany=item.labels[lang]
print('x: %d: %s-%s' % (aantal,item.title(),ifany))
aantal+=1
#if aantal>250: break
print('Klaar')
|
[
"noreply@github.com"
] |
edoderoo.noreply@github.com
|
4c48bdaaebb994e7ab417ca88f8e06b2d83cbcf8
|
83084601900aaace157b9bbe8239c91d4d3fef5f
|
/smbportal/tracks/migrations/0004_data_migration_convert_timestamp_to_datetime.py
|
79fd7eb69a0f5b260289fce26fb4e69703ae92c8
|
[] |
no_license
|
geosolutions-it/smb-portal
|
0676d7e0009c5e30c91b6d9f934d72c9c34a1f30
|
b816f23d9ae30abdb27e11e28d42c59b065e5c66
|
refs/heads/dev
| 2023-03-31T13:22:47.738222
| 2020-10-07T12:30:28
| 2020-10-07T12:30:28
| 132,459,456
| 1
| 4
| null | 2020-10-09T15:04:09
| 2018-05-07T12:46:35
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 810
|
py
|
# Generated by Django 2.0 on 2018-07-18 21:41
import datetime as dt
from django.db import migrations
import pytz
def convert_timestamp_to_datetime(apps, schema_editor):
"""Convert a timestamp given in milliseconds to a datetime"""
collected_point_model = apps.get_model("tracks", "CollectedPoint")
for pt in collected_point_model.objects.all():
if pt.timestamp is not None:
pt.collection_date = dt.datetime.fromtimestamp(
pt.timestamp / 1000,
pytz.utc
)
pt.save()
class Migration(migrations.Migration):
dependencies = [
('tracks', '0003_collectedpoint_collection_date'),
]
operations = [
migrations.RunPython(
convert_timestamp_to_datetime, migrations.RunPython.noop)
]
|
[
"ricardo.garcia.silva@gmail.com"
] |
ricardo.garcia.silva@gmail.com
|
dde161338c0e9cd80a6a883fb86abb5cf20c666c
|
0370657ab53f3c74bdb4834c70b47068d590b4d4
|
/employee_info.py
|
7abce2406e58db2ba4a52d3b04f9f9610afc266d
|
[] |
no_license
|
Antares2k16/python_learning
|
bcac02e97cb8e943706628d1c96f852fc141848b
|
86c4c77787f99527b62f302988cb8cd5e90770a7
|
refs/heads/master
| 2020-03-30T21:15:14.776421
| 2018-11-24T00:37:36
| 2018-11-24T00:37:36
| 151,622,412
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 23 14:49:13 2018
@author: aliao
"""
class Employee():
def __init__(self, first_name, last_name, salary):
self.first_name = first_name
self.last_name = last_name
self.salary = salary
def give_raise(self, boost=5000):
self.boost = boost
self.salary += self.boost
|
[
"aaron.liao17@gmail.com"
] |
aaron.liao17@gmail.com
|
140d94bdfffa9d83c2df215e3a46e7ec06dd7446
|
4dfec060e0f7476e00d1fea3b77adc888feb9dee
|
/scripts/was/tpvlogging.py
|
ebd635d31a815992ec61419dc75ab5217d2e744e
|
[
"Apache-2.0"
] |
permissive
|
xguitian/problemdetermination
|
bf8ca33541898cf6a4db129ff0e0bc6fac8c60e1
|
a723809884a1bdaa5a9ffcd7305d3846faee5e32
|
refs/heads/master
| 2021-01-11T14:23:11.282217
| 2016-12-28T21:22:25
| 2016-12-28T21:22:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,041
|
py
|
# Start, stop, query, or configure TPV logging on a set of servers
# Example: wsadmin -username wsadmin -password wsadmin -lang jython -f tpvlogging.py -userprefs wsadmin -action start -server server1
def usage():
print "usage: wsadmin -lang jython -f tpvlogging.py -action [start|stop|list|setlevel] -userprefs USER [-node NODE] [-server SERVER] [-pmilevel NEWLEVEL]"
print " -userprefs is required and you can just pass in the same user as -username for wsadmin, or any name otherwise"
print " -pmilevel is only used with -action setlevel. Valid values are none, basic, extended, all"
sys.exit()
import sys
import com.ibm.ws.tpv.engine.UserPreferences as UserPreferences
import com.ibm.ws.tpv.engine.utils.ServerBean as ServerBean
import jarray
import javax.management as mgmt
sType = "APPLICATION_SERVER"
action = "start"
targetNode = ""
targetApplicationServer = ""
user = ""
filename = "tpv"
duration = 300000000
fileSize = 10485760
numFiles = 20
outputType = "bin" # or "xml"
bufferSize = 40
pmilevel = "extended" # only if -action setlevel
help = 0
refreshRate = 30
affectedCount = 0
verbose = 0
l = len(sys.argv)
i = 0
while i < l:
arg = sys.argv[i]
if arg == "-help" or arg == "-h" or arg == "-usage" or arg == "-?":
help = 1
if arg == "-action":
action = sys.argv[i + 1]
if arg == "-node":
targetNode = sys.argv[i + 1]
if arg == "-server":
targetApplicationServer = sys.argv[i + 1]
if arg == "-userprefs":
user = sys.argv[i + 1]
if arg == "-filename":
filename = sys.argv[i + 1]
if arg == "-duration":
duration = int(sys.argv[i + 1])
if arg == "-filesize":
fileSize = int(sys.argv[i + 1])
if arg == "-numfiles":
numFiles = int(sys.argv[i + 1])
if arg == "-buffersize":
bufferSize = int(sys.argv[i + 1])
if arg == "-refreshrate":
refreshRate = int(sys.argv[i + 1])
if arg == "-outputtype":
outputType = sys.argv[i + 1]
if arg == "-pmilevel":
pmilevel = sys.argv[i + 1]
if arg == "-verbose":
verbose = 1
i = i + 1
if help == 1:
usage()
if len(user) == 0:
print ""
print "ERROR: -userprefs must be specified (see usage below)"
print ""
usage()
def getExceptionText(typ, value, tb):
value = `value`
sd = `tb.dumpStack()`
sd = sd.replace("\\\\","/")
i = sd.rfind(" File ")
j = sd.rfind(", line ")
k = sd.rfind(", in ")
locn = ""
if(i>0 and j>0 and k>0):
file = sd[i+7:j]
line = sd[j+7:k]
func = sd[k+4:-3]
locn = "Function="+func+" Line="+line+" File="+file
return value+" "+locn
def convertToList( inlist ):
outlist = []
clist = None
if (len(inlist) > 0):
if (inlist[0] == '[' and inlist[len(inlist) - 1] == ']'):
if (inlist[1] == "\"" and inlist[len(inlist)-2] == "\""):
clist = inlist[1:len(inlist) -1].split(")\" ")
else:
clist = inlist[1:len(inlist) - 1].split(" ")
else:
clist = inlist.split(java.lang.System.getProperty("line.separator"))
if clist != None:
for elem in clist:
elem = elem.rstrip();
if (len(elem) > 0):
if (elem[0] == "\"" and elem[len(elem) -1] != "\""):
elem = elem+")\""
outlist.append(elem)
return outlist
def listNodes():
nodes = AdminConfig.list("Node")
nodeList = convertToList(nodes)
return nodeList
def listServers(serverType="", nodeName=""):
optionalParamList = []
if (len(serverType) > 0):
optionalParamList = ['-serverType', serverType]
if (len(nodeName) > 0):
node = AdminConfig.getid("/Node:" +nodeName+"/")
optionalParamList = optionalParamList + ['-nodeName', nodeName]
servers = AdminTask.listServers(optionalParamList)
servers = convertToList(servers)
newservers = []
for aServer in servers:
sname = aServer[0:aServer.find("(")]
nname = aServer[aServer.find("nodes/")+6:aServer.find("servers/")-1]
sid = AdminConfig.getid("/Node:"+nname+"/Server:"+sname)
if (newservers.count(sid) <= 0):
newservers.append(sid)
return newservers
print "Action: " + action
print "User: " + user
print "Node: " + targetNode
print "Server: " + targetApplicationServer
print "File name: " + filename
print "Duration: " + str(duration)
print "File Size: " + str(fileSize)
print "Historical Files: " + str(numFiles)
print "Output type: " + outputType
print "Refresh Rate: " + str(refreshRate)
nodeList = listNodes()
for nodeObject in nodeList:
nodeName = nodeObject.split("(")[0]
if len(targetNode) > 0 and targetNode.lower() != nodeName.lower():
print "Skipping node " + nodeName + " because it did not match targetNode"
continue
print ""
print "Processing node: " + nodeName
try:
# build list of Application Servers in the Node
serverList = listServers(sType,nodeName)
except:
typ, val, tb = sys.exc_info()
value = `val`
sd = `tb.dumpStack()`
sd = sd.replace("\\\\","/")
print "Could not process node. Probably the DMGR (which is ok to skip)? Continuing with the other nodes... " + value + " " + sd
continue
if verbose:
print "Number of servers: " + str(len(serverList))
for serverObject in serverList:
serverName = serverObject.split("(")[0]
if len(targetApplicationServer) > 0 and targetApplicationServer.lower() != serverName.lower():
if verbose:
print "Skipping server " + serverName + " (node " + nodeName + ")"
continue
prefs = UserPreferences()
prefs.setServerName(serverName)
prefs.setNodeName(nodeName)
prefs.setLoggingDuration(duration)
prefs.setLogFileSize(fileSize)
prefs.setNumLogFiles(numFiles)
prefs.setTpvLogFormat(outputType)
prefs.setLogFileName(filename)
prefs.setBufferSize(bufferSize)
prefs.setUserId(user)
prefs.setRefreshRate(refreshRate)
params = [prefs]
sig = ["com.ibm.ws.tpv.engine.UserPreferences"]
target = "node=" + nodeName
name = AdminControl.completeObjectName("type=TivoliPerfEngine," + target + ",*")
mbeanObjectName = mgmt.ObjectName(name)
display = nodeName + "\\" + serverName
if action == "start":
print "Calling TivoliPerfEngine.monitorServer on " + display
AdminControl.invoke_jmx(mbeanObjectName, "monitorServer", params, sig)
print "Calling TivoliPerfEngine.startLogging on " + display
AdminControl.invoke_jmx(mbeanObjectName, "startLogging", params, sig)
affectedCount = affectedCount + 1
elif action == "stop":
print "Calling TivoliPerfEngine.stopLogging on " + display
AdminControl.invoke_jmx(mbeanObjectName, "stopLogging", params, sig)
print "Calling TivoliPerfEngine.disableServer on " + display
AdminControl.invoke_jmx(mbeanObjectName, "disableServer", params, sig)
affectedCount = affectedCount + 1
elif action == "list":
print "Monitored Servers (by " + user + ")"
print "======================"
servers = AdminControl.invoke(name, "getMonitoredServers", user)
if len(servers) > 0:
isLoggingSig = ["com.ibm.ws.tpv.engine.utils.ServerBean"]
for server in servers.split("\n"):
pieces = server.split(".")
bean = ServerBean(pieces[0], pieces[1])
isLoggingParams = [bean]
res = AdminControl.invoke_jmx(mbeanObjectName, "isServerLogging", isLoggingParams, isLoggingSig)
perftarget = "node=" + nodeName + ",process=" + pieces[1]
perfname = AdminControl.completeObjectName("type=Perf," + perftarget + ",*")
print server + " ; Logging=" + str(res) + " ; Level=" + AdminControl.invoke(perfname, "getStatisticSet")
break # otherwise we'll do the list for each server in the node -- TODO break outter loop too?
elif action == "setlevel":
target = target + ",process=" + serverName
perfname = AdminControl.completeObjectName("type=Perf," + target + ",*")
# none, basic, extended, all, custom
print "Setting PMI level to " + pmilevel + " on " + serverName
AdminControl.invoke(perfname, "setStatisticSet", pmilevel)
AdminControl.invoke(perfname, "savePMIConfiguration")
affectedCount = affectedCount + 1
elif action == "debug":
print "Debug"
else:
print "Unknown action " + action
print ""
print "Script finished. " + str(affectedCount) + " servers affected."
|
[
"kevin.grigorenko@us.ibm.com"
] |
kevin.grigorenko@us.ibm.com
|
1e581a2c8ebbc7ab2d1173fe0082025ee8f6bbf0
|
2a54e8d6ed124c64abb9e075cc5524bb859ba0fa
|
/.history/1-Python-Basics/20-list-method_20200413041837.py
|
f8e3193ec4cc1aa4338efa0725c097663d2321fa
|
[] |
no_license
|
CaptainStorm21/Python-Foundation
|
01b5fbaf7a913506518cf22e0339dd948e65cea1
|
a385adeda74f43dd7fb2d99d326b0be23db25024
|
refs/heads/master
| 2021-05-23T01:29:18.885239
| 2020-04-23T19:18:06
| 2020-04-23T19:18:06
| 253,171,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
basket = [21, 12,33, 35, 99]
print(basket)
print(len(basket))
#33 gets pops because it is 2nd number in the array
print(basket.pop(2))
print(basket)
#extend
basket1 = [1000, 2000, 3000]
print(basket.extend(basket1))
print(basket)
#append - last to the list
print(basket.append(700))
print(basket)
#index
print(basket.index(21))
print(basket)
basket.sort()
print(basket)
#insert
print(basket.insert(3, 'new'))
print(basket)
#look up
forest = ['trees', 'bush', 'mushrooms', 'berries' ]
#reverse
p
#sorted
print(sorted(forest))
#false
print ('x' in forest)
#true
print ('trees' in forest)
#true
print ('i' in 'I love forest rain')
#1
print (forest.count('trees'))
|
[
"tikana4@yahoo.com"
] |
tikana4@yahoo.com
|
29a18b4136cc087bb6bd975aaf093978e9230c4e
|
0a6d2f67e5bef6608ab9d2b9a31de776020c501f
|
/oldstyle.py
|
d948ca989f293bb90a88a3031416f5ea466e8f41
|
[] |
no_license
|
lzwscu2/Sentiment-Analysis
|
2cc519dfc618a726ccff49a2c8639f0f6a1b72a8
|
05f73f4b1721d9bc365431e99e90bfcfe350a227
|
refs/heads/master
| 2020-04-05T08:40:39.305343
| 2018-11-06T11:07:22
| 2018-11-06T11:07:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,628
|
py
|
# Version python3.6
# -*- coding: utf-8 -*-
# @Time : 2018/10/16 8:43 PM
# @Author : zenRRan
# @Email : zenrran@qq.com
# @File : oldstyle.py
# @Software: PyCharm Community Edition
import torch
import torch.nn.functional as F
import torch.optim as oprim
from torch.autograd import Variable
import utils.Reader as Reader
from models.LSTM import LSTM as biLSTM
import random
from utils.Evaluate import Eval
from utils.Common import unk_key
from utils.Common import padding_key
import collections
class Labeler:
def __init__(self):
self.HyperParams = HyperParams()
self.word_stat_dic = collections.OrderedDict()
self.label_stat_dic = collections.OrderedDict()
self.topic_stat_dic = collections.OrderedDict()
if self.HyperParams.using_English_data:
self.topics = English_topics
else:
self.topics = Chinese_topics
self.padID = 0
self.unkID = 0
def createAlphabet(self, text):
print("Creating Alphabet......")
for line in text:
for word in line[:-2]:
if word not in self.word_stat_dic:
self.word_stat_dic[word] = 1
else:
self.word_stat_dic[word] += 1
if line[-1] not in self.label_stat_dic:
self.label_stat_dic[line[-1]] = 1
else:
self.label_stat_dic[line[-1]] += 1
for line in self.topics:
line = line.strip().split()
self.HyperParams.wordAlpha.from_string(unk_key)
self.HyperParams.wordAlpha.from_string(padding_key)
self.word_stat_dic[unk_key] = self.HyperParams.wordCutOff + 1
self.word_stat_dic[padding_key] = self.HyperParams.wordCutOff + 1
self.HyperParams.wordAlpha.initial(self.word_stat_dic, self.HyperParams.wordCutOff)
self.HyperParams.labelAlpha.initial(self.label_stat_dic)
self.padID = self.HyperParams.wordAlpha.from_string(padding_key)
self.unkID = self.HyperParams.wordAlpha.from_string(unk_key)
self.HyperParams.wordNum = self.HyperParams.wordAlpha.m_size + 1
self.HyperParams.labelSize = self.HyperParams.labelAlpha.m_size
print("Created over")
# print("wordNum: ", self.HyperParams.wordNum)
# print("label: ", self.HyperParams.labelSize)
def seq2id(self, seqs):
idList = []
maxLen = 0
for seq in seqs:
maxLen = max(maxLen, len(seq))
for seq in seqs:
id = []
for word in seq:
degit = self.HyperParams.wordAlpha.from_string(word)
if degit >= 0:
id.append(degit)
else:
id.append(self.unkID)
for _ in range(maxLen-len(seq)):
id.append(self.padID)
idList.append(id)
return idList
def label2id(self, labels):
idList = []
for label in labels:
id = self.HyperParams.labelAlpha.from_string(label)
if id != -1:
idList.append(id)
else:
print("Wrong: label2id id = -1!")
return []
return idList
def processingRawStanceData(self, textList):
topics = []
texts = []
labels = []
for line in textList:
if line[0] == self.topics[0]:
topics.append([0])
texts.append(line[1:-1])
labels.append(line[-1])
elif " ".join(line[:2]) == self.topics[1]:
topics.append([1])
texts.append(line[2:-1])
labels.append(line[-1])
elif " ".join(line[:2]) == self.topics[2]:
topics.append([2])
texts.append(line[2:-1])
labels.append(line[-1])
elif " ".join(line[:3]) == self.topics[3]:
topics.append([3])
texts.append(line[3:-1])
labels.append(line[-1])
elif " ".join(line[:6]) == self.topics[4]:
topics.append([4])
texts.append(line[6:-1])
labels.append(line[-1])
else:
return -1
return topics, texts, labels
def cutSentFromText(self, text):
newText = []
for line in text:
newText.append(line[:self.HyperParams.setSentlen])
return newText
def train(self, trainFile, devFile, testFile):
readerTrain = Reader.reader(trainFile)
readerDev = Reader.reader(devFile)
readerTest = Reader.reader(testFile)
sentsTrain = readerTrain.getWholeText()
sentsDev = readerDev.getWholeText()
sentsTest = readerTest.getWholeText()
sentsTrain = self.cutSentFromText(sentsTrain)
sentsDev = self.cutSentFromText(sentsDev)
sentsTest = self.cutSentFromText(sentsTest)
self.HyperParams.trainLen = len(sentsTrain)
self.HyperParams.devLen = len(sentsDev)
self.HyperParams.testLen = len(sentsTest)
self.createAlphabet(sentsTrain+sentsDev)
self.HyperParams.topicSize = len(self.topics)
args = self.HyperParams.args()
LearningRate = self.HyperParams.learningRate
Steps = self.HyperParams.Steps
model = biLSTM.Model(self.HyperParams)
Optimizer = oprim.Adam(model.parameters(), lr=LearningRate)
def accuracy(model, sents):
pred_right_num_idx = 0
pred_num_idx = 1
gold_num_idx = 2
evalList = [[0, 0, 0] for _ in range(self.HyperParams.labelSize)]
# for sent in sents:
topic, text, label = self.processingRawStanceData(sents)
text = self.seq2id(text)
label = self.label2id(label)
topic = Variable(torch.LongTensor(topic))
text = Variable(torch.LongTensor(text))
label = Variable(torch.LongTensor(label))
Y = model(topic, text)
C = (torch.max(Y, 1)[1].view(label.size()).data == label.data).sum()
pred_list = torch.max(Y, 1)[1].view(label.size()).data.tolist()
label_list = label.data.tolist()
for i in range(len(evalList)):
for j in range(len(label_list)):
if label_list[j] == i:
evalList[i][gold_num_idx] += 1
if label_list[j] == pred_list[j]:
evalList[i][pred_right_num_idx] += 1
if pred_list[j] == i:
evalList[i][pred_num_idx] += 1
P_R_F1_list = [Eval(pred_right_num=evalList[i][pred_right_num_idx],
pred_num=evalList[i][pred_num_idx],
gold_num=evalList[i][gold_num_idx]).P_R_F1
for i in range(len(evalList))]
return float(C)/len(sents)*100, C, len(sents), P_R_F1_list
def getTextBatchList(text, batch):
textBatchlist = []
textBatchNum = len(text) // batch
if len(text) % batch != 0:
textBatchNum += 1
if textBatchNum - 1 < 0:
print("wrong: func getTextBatchList's text's length is 0!!!")
return []
end = 0
for i in range(textBatchNum-1):
begin = end
end += batch
textBatchlist.append(text[begin:end])
textBatchlist.append(text[end:len(text)])
return textBatchlist
file = open(self.HyperParams.writeFileName, 'a+')
file.write(args)
file.close()
sentsTrain = sentsTrain
sentsDev = sentsDev
sentsTest = sentsTest
batchSize = self.HyperParams.batchSize
for step in range(Steps):
file = open(self.HyperParams.writeFileName, 'a+')
totalLoss = torch.Tensor([0])
cnt = 0
trainCorrect = 0
random.shuffle(sentsTrain)
textBatchList = getTextBatchList(sentsTrain, batchSize)
for batch in textBatchList:
# print(batch.size())
model.train()
Optimizer.zero_grad()
topic, text, label = self.processingRawStanceData(batch)
text = self.seq2id(text)
label = self.label2id(label)
topic = Variable(torch.LongTensor(topic))
text = Variable(torch.LongTensor(text))
label = Variable(torch.LongTensor(label))
Y = model(topic, text)
Loss = F.cross_entropy(Y, label)
Loss.backward()
#torch.nn.utils.clip_grad_norm(model.parameters(), 10)
Optimizer.step()
cnt += 1
if cnt % 500 == 0:
print(cnt)
totalLoss += Loss.data
trainCorrect += (torch.max(Y, 1)[1].view(label.size()).data == label.data).sum()
totalLoss /= len(sentsTrain)
TrainAcc = float(trainCorrect)/len(sentsTrain) * 100
FAVOR_index = self.HyperParams.labelAlpha.string2id["favor"]
AGAINST_index = self.HyperParams.labelAlpha.string2id["against"]
DevAcc, DevCorrect, DevNum, P_R_F1_dev_list = accuracy(model, sentsDev)
TestAcc, TestCorrect, TestNum, P_R_F1_test_list = accuracy(model, sentsTest)
dev_mean_F1 = (P_R_F1_dev_list[FAVOR_index][2] + P_R_F1_dev_list[AGAINST_index][2]) / 2
test_mean_F1 = (P_R_F1_test_list[FAVOR_index][2] + P_R_F1_test_list[AGAINST_index][2]) / 2
output = "Step: {} - loss: {:.6f} Train acc: {:.4f}%{}/{} Dev acc: {:.4f}%{}/{} Test acc: {:.4f}%{}/{} F1={:.4f}".format(step,
totalLoss.numpy()[0],
TrainAcc,
trainCorrect,
len(sentsTrain),
DevAcc,
DevCorrect,
int(DevNum),
TestAcc,
TestCorrect,
int(TestNum),
test_mean_F1)
print(output)
file.write(output+"\n")
file.close()
l = Labeler()
l.train(l.HyperParams.trainFile, l.HyperParams.devFile, l.HyperParams.testFile)
|
[
"824203828@qq.com"
] |
824203828@qq.com
|
5e2cc0f993c0e65ef76d296a291ea28374c7c3c1
|
d7629239de1924d5d335f324952ecca9da6bb82b
|
/gomoku/pg-keras/environment.py
|
367e5904d6d7d75a2eb7b26b9d2bbc9171ffc542
|
[] |
no_license
|
lacoonn/reinforcement-learning-study
|
f70b4c757196a7e69d84051c1b8cbd222c77e732
|
c3faf322aaaa8fbd810c37b3281e502fc0b81800
|
refs/heads/master
| 2020-03-10T00:48:43.610465
| 2018-05-20T07:50:49
| 2018-05-20T07:50:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,359
|
py
|
'''
오목 게임 환경
'''
from board import Board
import numpy as np
import random
import math
import os
import sys
import time
#------------------------------------------------------------
# Environment
#------------------------------------------------------------
class Env(object):
def __init__(self):
self.grid_size = 10
self.state_size = self.grid_size * self.grid_size
# board는 object 내부에서 오목의 연산 처리용으로만 사용한다
self.board = Board(self.grid_size)
# object의 외부 api는 state를 통해서만 이루어진다
self.state = np.zeros(self.state_size, dtype=np.uint8)
#--------------------------------
# 리셋
#--------------------------------
def reset(self):
self.board = Board(self.grid_size)
self.state = np.zeros(self.state_size, dtype=np.uint8)
return self.state
#--------------------------------
# 현재 state 구함
#--------------------------------
def get_state(self):
return np.reshape(self.state, (1, self.state_size))
#--------------------------------
# board 색을 뒤집음
#--------------------------------
def inverse(self):
BLACK = 1
WHITE = 2
self.board.inverse()
for i in range(self.state_size):
if(self.state[i] == BLACK):
self.state[i] = WHITE
elif(self.state[i] == WHITE):
self.state[i] = BLACK
#--------------------------------
# 현재 board 구함
#--------------------------------
def get_board(self):
return self.board
#--------------------------------
# 현재 턴을 구함
#--------------------------------
def get_turn(self):
return self.board.turn
#--------------------------------
# state에 action을 적용
#--------------------------------
def update_state(self, player, action):
if self.state[action] == 0:
# state에 적용
self.state[action] = player
# board에 적용
x = int(action / self.grid_size)
y = int(action % self.grid_size)
self.board.put_value(player, x, y)
# 반환
return self.state
#--------------------------------
# board에 action을 적용
#--------------------------------
def update_board(self, player, x, y):
if self.board.get_value(x, y) == 0:
# board에 적용
self.board.put_value(player, x, y)
# state에 적용
action = x * self.grid_size + y
self.state[action] = player
# 반환
return self.board
#------------------------------------------------------------
# 랜덤값 구함
#------------------------------------------------------------
def randf(self, s, e):
return (float(random.randrange(0, (e - s) * 9999)) / 10000) + s
#------------------------------------------------------------
#------------------------------------------------------------
# board 출력
#------------------------------------------------------------
def draw_board(self):
self.board.draw()
#------------------------------------------------------------
#--------------------------------
# 게임오버 검사
#--------------------------------
def is_gameover(self, player):
if self.board.turn >= self.state_size:
#return True
return self.board.finished
else:
return self.board.finished
#--------------------------------
# action을 실행하고 결과를 반환
#--------------------------------
def step(self, player, action):
'''
args:
player
action
return:
next_state : action을 실행한 이후의 state
reward : action에 대한 보상
done : 게임 종료 여부
'''
x = int(action / self.grid_size)
y = int(action % self.grid_size)
# 빈 곳에 돌을 놓으면 정상 실행
if self.board.get_value(x, y) == 0:
next_state = self.update_state(player, action)
done = self.is_gameover(player)
# 승부가 결정난 경우
if done == True:
reward = 100 / self.get_turn()
# 승부가 결정나지 않은 경우
else:
# 빈 곳 체크
empty_space = 0
for i in range(100):
if self.state[i] == 0:
empty_space += 1
# 빈 곳이 없으면 게임 종료
if empty_space == 0:
done = True
# 빈 곳이 있으면
else:
if self.board.is_near(1, x, y):
reward = 0.1
else:
reward = 0.0
# 이미 돌이 있는 곳에 돌을 놓으면 -1 점수를 받고 종료
else:
self.board.turn += 1
next_state = self.state
done = False
reward = -1
return next_state, reward, done
|
[
"teakan7179@gmail.com"
] |
teakan7179@gmail.com
|
3b1aeea7216c1fb7459ceb7745fb791f3be2099c
|
8a97a2a39547ce9a8df4bf0addebb22c3148d64a
|
/runoff.py
|
7285cb83ed01d8e4d5013d9a74f698441101b234
|
[] |
no_license
|
yashwant-nagarjuna/Reflections
|
a9e51d64b67245c2bad6b44096c97c66d5e639c5
|
25894b121fc3684e8530505f03fc8c97264bd951
|
refs/heads/master
| 2021-09-01T04:11:50.544660
| 2017-12-24T17:42:57
| 2017-12-24T17:42:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,232
|
py
|
# def runoff(voters):
# l = {}
# key = []
# for i in range(len(voters)):
# key.append(voters[i][0])
# keys = set(key)
# l = dict.fromkeys(keys, 0)
# for i in range(len(voters)):
# l[voters[i][0]] += 1
# print(l)
# while True:
# max_votes = max(l.values(), default=0)
# min_votes = min(l.values(), default=0)
# if l == {}:
# return None
# break
# elif max_votes >= (sum(l.values()) * 0.5):
# for k in l:
# if l[k] == max_votes:
# return k
# break
# else:
# for k in list(l):
# if l[k] == min_votes:
# del l[k]
# result = runoff([["a", "c", "d", "e", "b"],
# ["e", "b", "d", "c", "a"],
# ["d", "e", "c", "a", "b"],
# ["c", "e", "d", "b", "a"],
# ["b", "e", "a", "c", "d"]])
# print(result)
def runoff(voters):
l = {}
keys= []
for i in range(len(voters)):
key.append(voters[i][0])
keys = set(key)
l = dict.fromkeys(keys, 0)
for i in range(len(voters)):
l[voters[i][0]] += 1
|
[
"nagarjuna.yashwanth1@gmail.com"
] |
nagarjuna.yashwanth1@gmail.com
|
86826058b9fa6860194735fd27cce74162b0ef15
|
f15860a73bd450ae8e3582a8a0db8c1d0192fb69
|
/analysis/0_retiles/6_mask_albers_tiles.py
|
d592982c6b7701bdf6d158ac2981138a2c755ac3
|
[] |
no_license
|
fykx/forest
|
2472eceba0515cba272d0fc747bef30126f143d3
|
ac97083bfc4d550c978b671fdb03b00e67534568
|
refs/heads/main
| 2023-08-01T00:55:17.163246
| 2021-09-13T01:56:17
| 2021-09-13T01:56:17
| 404,992,514
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,863
|
py
|
def read_tiles(tasks):
from osgeo import gdal,ogr,osr
tasks_ds = ogr.Open(tasks, 0)
tasks_lyr = tasks_ds.GetLayer(0)
tiles = []
for tile_feat in tasks_lyr:
tiles.append(tile_feat.GetField('tag'))
del tasks_ds
return tiles
def listdatas(pathin):
import os
a = []
datas = os.listdir(pathin)
for i in datas:
if i[-4:] == '.tif':
fn_i = pathin + '/' + i
a.append(fn_i)
return a
def generate_mask(tile, pathin, pathout):
from osgeo import gdal,gdalconst,ogr,osr
import numpy as np
pathin_tile = pathin + '/' + tile[0:4] + '/' + tile[-4:] + '/' + tile
datas = listdatas(pathin_tile)
in_ds_para = gdal.Open(datas[0])
in_band_para = in_ds_para.GetRasterBand(1)# 波段索引从1开始
in_array_para = in_band_para.ReadAsArray()
xsize_para = in_band_para.XSize# 列
ysize_para = in_band_para.YSize# 行
nodata_para = in_band_para.GetNoDataValue()
# 新建数据集
gtiff_driver = gdal.GetDriverByName('GTiff')
out_ds = gtiff_driver.Create(pathout + '/' + tile + '_mask.tif', xsize_para, ysize_para, 1, in_band_para.DataType)
out_ds.SetProjection(in_ds_para.GetProjection())
out_ds.SetGeoTransform(in_ds_para.GetGeoTransform())
del in_ds_para
datas_list = []
for data in datas:
in_ds = gdal.Open(data)
in_band = in_ds.GetRasterBand(1)# 波段索引从1开始
in_array = in_band.ReadAsArray()
datas_list.append(in_array)
del in_ds
datas_narray = np.array(datas_list)
#构建输出数组
mask = np.zeros(shape=(ysize_para, xsize_para))
for x in range(xsize_para):# 遍历列
for y in range(ysize_para):# 遍历行
value = 0
threshold_nodata = datas_narray[:,y,x].tolist().count(nodata_para)
if threshold_nodata > 5:
value = 1
mask[y, x] = value
out_band = out_ds.GetRasterBand(1)
out_band.FlushCache()
out_band.WriteArray(mask)
out_band.SetNoDataValue(nodata_para)
out_band.ComputeStatistics(False)
return
def divide(datas, n):
'''进程分割'''
mpi_datas = {}
step = len(datas)//n
for i in range(n):
if i < n-1:
mpi_data = datas[i*step:(i+1)*step]
mpi_datas[i] = mpi_data
else:
mpi_data = datas[i*step:]
mpi_datas[i] = mpi_data
j = 0
while len(mpi_datas[n-1]) > step and j < n-1:
mpi_datas[j].append(mpi_datas[n-1][-1])
mpi_datas[n-1].remove(mpi_datas[n-1][-1])
j = j + 1
mpi_datas_out = []
for mpi_data_out in mpi_datas.values():
mpi_datas_out.append(mpi_data_out)
return mpi_datas_out
def main():
import mpi4py.MPI as MPI
comm = MPI.COMM_WORLD
comm_rank = comm.Get_rank()
comm_size = comm.Get_size()
import random
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-ta', '--tasks_albers', type=str, help='tasks_albers', required=True)# tasks_albers
parser.add_argument('-i', '--input', type=str, help='input', required=True)# 输入路径
parser.add_argument('-o', '--output', type=str, help='output', required=True)# 输出路径
args = parser.parse_args()
if comm_rank == 0:
tiles = read_tiles(args.tasks_albers)
random.shuffle(tiles)
mpi_datas = divide(tiles, comm_size)
else:
tiles = None
mpi_datas = None
mpi_datas_divide = comm.scatter(mpi_datas, root=0)
if os.path.isdir(args.output):
pass
else:
try:
os.makedirs(args.output)
except:
pass
for data in mpi_datas_divide:
generate_mask(data, args.input, args.output)
return
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
fykx.noreply@github.com
|
1045f9c0042dc15d576f50a62617f40d418ada9a
|
959971fa11eff2fc8dad6df15729ecfc90f2e289
|
/tests/test_range.py
|
5ab90b53ba1e5947e399717caa1b338d89ae3b4a
|
[
"MIT"
] |
permissive
|
drykovanov/poker
|
cf15d9e9efa9f9ba4564e17d84a54a7c50f5ed64
|
9ab1db7de92a1138d14f538a83894bfab27a6f1d
|
refs/heads/master
| 2021-04-12T05:18:25.001758
| 2014-10-16T08:19:55
| 2014-10-16T08:19:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,303
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, division, print_function
import pickle
from pytest import raises, mark
from poker.card import Suit, Card
from poker.hand import Hand, Combo, Range, PAIR_HANDS
# from worse to best (suit matter)
DEUCE_COMBOS = (
Combo('2d2c'), Combo('2h2c'), Combo('2h2d'),
Combo('2s2c'), Combo('2s2d'), Combo('2s2h')
)
THREE_COMBOS = (
Combo('3d3c'), Combo('3h3c'), Combo('3h3d'),
Combo('3s3c'), Combo('3s3d'), Combo('3s3h')
)
# from worse to best (suit matter)
TEN_COMBOs = (
Combo('TdTc'), Combo('ThTc'), Combo('ThTd'),
Combo('TsTc'), Combo('TsTd'), Combo('TsTh')
)
class TestHandsResultsAfterParse:
def test_pairs_simple(self):
assert Range('22').hands == (Hand('22'),)
assert Range('22').combos == DEUCE_COMBOS
def test_combo_simple(self):
assert Range('2s2c').hands == (Hand('22'),)
assert Range('2s2c').combos == (Combo('2c2s'),)
def test_pairs_multiple(self):
assert Range('22 33').hands == (Hand('22'), Hand('33'))
assert Range('33 22').hands == (Hand('22'), Hand('33'))
def test_pairs_with_plus(self):
assert Range('88+').hands == (Hand('88'), Hand('99'), Hand('TT'), Hand('JJ'), Hand('QQ'),
Hand('KK'), Hand('AA'))
assert Range('22+').hands == PAIR_HANDS
def test_pairs_with_dash(self):
assert Range('22-55').hands == (Hand('22'), Hand('33'), Hand('44'), Hand('55'))
assert Range('22-33').hands == (Hand('22'), Hand('33'))
def test_pairs_with_dash_reverse(self):
assert Range('55-22').hands == (Hand('22'), Hand('33'), Hand('44'), Hand('55'))
assert Range('33-22').hands == (Hand('22'), Hand('33'))
def test_multiple_offsuit_hands(self):
assert Range('AKo 84o').hands == (Hand('84o'), Hand('AKo'))
def test_hands_without_suit(self):
assert Range('AK 48').hands == (Hand('84o'), Hand('84s'), Hand('AKo'), Hand('AKs'))
def test_dash_offsuit(self):
assert Range('J8o-J4o').hands == (Hand('J4o'), Hand('J5o'), Hand('J6o'),
Hand('J7o'), Hand('J8o'))
def test_dash_suited(self):
assert Range('J8s-J4s').hands == (Hand('J4s'), Hand('J5s'), Hand('J6s'),
Hand('J7s'), Hand('J8s'))
def test_pairs_backward(self):
assert Range('44-').hands == (Hand('22'), Hand('33'), Hand('44'))
def test_both_suits_with_minus(self):
assert Range('A5-').hands == (Hand('A2o'), Hand('A2s'), Hand('A3o'), Hand('A3s'),
Hand('A4o'), Hand('A4s'), Hand('A5o'), Hand('A5s'))
def test_both_suits_with_plus(self):
assert Range('A5+').hands == (
Hand('A5o'), Hand('A5s'), Hand('A6o'), Hand('A6s'), Hand('A7o'), Hand('A7s'),
Hand('A8o'), Hand('A8s'), Hand('A9o'), Hand('A9s'), Hand('ATo'), Hand('ATs'),
Hand('AJo'), Hand('AJs'), Hand('AQo'), Hand('AQs'), Hand('AKo'), Hand('AKs')
)
def test_X_plus_in_range(self):
assert Range('KX+').hands == (
Hand('K2o'), Hand('K2s'), Hand('K3o'), Hand('K3s'), Hand('K4o'), Hand('K4s'),
Hand('K5o'), Hand('K5s'), Hand('K6o'), Hand('K6s'), Hand('K7o'), Hand('K7s'),
Hand('K8o'), Hand('K8s'), Hand('K9o'), Hand('K9s'), Hand('KTo'), Hand('KTs'),
Hand('KJo'), Hand('KJs'), Hand('KQo'), Hand('KQs'), Hand('A2o'), Hand('A2s'),
Hand('A3o'), Hand('A3s'), Hand('A4o'), Hand('A4s'), Hand('A5o'), Hand('A5s'),
Hand('A6o'), Hand('A6s'), Hand('A7o'), Hand('A7s'), Hand('A8o'), Hand('A8s'),
Hand('A9o'), Hand('A9s'), Hand('ATo'), Hand('ATs'), Hand('AJo'), Hand('AJs'),
Hand('AQo'), Hand('AQs'), Hand('AKo'), Hand('AKs')
)
def test_X_suited_plus(self):
assert Range('KXs+').hands == (
Hand('K2s'), Hand('K3s'), Hand('K4s'), Hand('K5s'), Hand('K6s'), Hand('K7s'),
Hand('K8s'), Hand('K9s'), Hand('KTs'), Hand('KJs'), Hand('KQs'), Hand('A2s'),
Hand('A3s'), Hand('A4s'), Hand('A5s'), Hand('A6s'), Hand('A7s'), Hand('A8s'),
Hand('A9s'), Hand('ATs'), Hand('AJs'), Hand('AQs'), Hand('AKs')
)
def test_X_offsuit_plus(self):
assert Range('KXo+').hands == (
Hand('K2o'), Hand('K3o'), Hand('K4o'), Hand('K5o'), Hand('K6o'), Hand('K7o'),
Hand('K8o'), Hand('K9o'), Hand('KTo'), Hand('KJo'), Hand('KQo'), Hand('A2o'),
Hand('A3o'), Hand('A4o'), Hand('A5o'), Hand('A6o'), Hand('A7o'), Hand('A8o'),
Hand('A9o'), Hand('ATo'), Hand('AJo'), Hand('AQo'), Hand('AKo')
)
def test_X_suited_minus(self):
assert Range('5Xs-').hands == (
Hand('32s'), Hand('42s'), Hand('43s'), Hand('52s'), Hand('53s'), Hand('54s'),
)
def test_X_offsuit_minus(self):
assert Range('5Xo-').hands == (
Hand('32o'), Hand('42o'), Hand('43o'), Hand('52o'), Hand('53o'), Hand('54o'),
)
def test_offsuit_plus(self):
assert Range('KJo+').hands == (Hand('KJo'), Hand('KQo'))
def test_offsuit_minus(self):
assert Range('76o-').hands == (Hand('72o'), Hand('73o'), Hand('74o'),
Hand('75o'), Hand('76o'))
def test_suited_plus(self):
assert Range('KJs+').hands == (Hand('KJs'), Hand('KQs'))
def test_suited_minus(self):
assert Range('76s-').hands == (Hand('72s'), Hand('73s'), Hand('74s'),
Hand('75s'), Hand('76s'))
def test_offsuit_and_suited_dashed(self):
assert Range('J8-J4').hands == (
Hand('J4o'), Hand('J4s'), Hand('J5o'), Hand('J5s'), Hand('J6o'), Hand('J6s'),
Hand('J7o'), Hand('J7s'), Hand('J8o'), Hand('J8s')
)
def test_offsuit_and_suited_with_dash_reversed_is_the_same(self):
assert Range('J8-J4').hands == Range('J4-J8').hands
def test_empty_range(self):
assert Range().hands == tuple()
assert Range().combos == tuple()
assert Range('').hands == tuple()
assert Range('').combos == tuple()
class TestCombosResultsAfterParse:
def test_pairs_simple(self):
"""Test if pairs get all the combos."""
assert Range('22').combos == DEUCE_COMBOS
def test_pairs_multiple(self):
assert Range('22 33').combos == DEUCE_COMBOS + THREE_COMBOS
def test_pairs_with_dash(self):
assert Range('22-33').combos == DEUCE_COMBOS + THREE_COMBOS
def test_pairs_with_dash_are_equal_with_spaces(self):
assert Range('22-33').combos == Range('22 33').combos
assert Range('55-33').combos == Range('33 44 55').combos
class TestCaseInsensitive:
def test_pairs(self):
assert Range('aA') == Range('AA')
assert Range('TT') == Range('tt')
def test_offsuit(self):
assert Range('AkO') == Range('AKo')
def test_suited(self):
assert Range('AKs') == Range('kaS')
class TestPercentages:
def test_one_pair(self):
assert Range('22').percent == 0.45
def test_one_suited_card(self):
assert Range('AKs').percent == 0.3
def test_one_offsuit_card(self):
assert Range('Ako').percent == 0.9
def test_pair_range(self):
assert Range('88+').percent == 3.17
def test_pair_and_offsuit(self):
assert Range('22 AKo').percent == 1.36
def test_full_range(self):
assert Range('XX').percent == 100
class TestNumberOfCombos:
"""Test number of hand combos by suits."""
def test_one_pair(self):
assert len(Range('22')) == 6
assert len(Range('QQ')) == 6
def test_pair_range(self):
assert len(Range('22-55')) == 24
assert len(Range('55-22')) == 24
def test_one_suited_hand(self):
assert len(Range('AKs')) == 4
assert len(Range('76s')) == 4
def test_one_offsuit_card(self):
assert len(Range('AKo')) == 12
def test_full_range(self):
assert len(Range('XX')) == 1326
class TestComposeHands:
"""Test different constructors and composition of hands."""
def test_pairs_from_hands(self):
assert Range.from_objects({Hand('AA'), Hand('KK'), Hand('QQ')}) == Range('QQ+')
def test_from_combos(self):
range = Range.from_objects(DEUCE_COMBOS)
assert range == Range('22')
assert range.combos == DEUCE_COMBOS
assert range.hands == (Hand('22'),)
@mark.xfail
def test_from_percent(self):
assert Range.from_percent(0.9) == Range('KK+')
@mark.xfail
def test_from_percent_comparison(self):
# both represents 0.9%, but they should not be equal
assert Range('AKo') != Range.from_percent(0.9)
class TestRangeEquality:
"""Tests if two range objects are equal."""
def test_pairs_with_dash_equals_pairs_with_dash_reverse(self):
assert Range('33-22').hands == Range('22-33').hands
def test_offsuit_multiple_with_AK(self):
assert Range('AKo 22+ 45 33') == Range('22+ AKo 54')
def test_empty_range(self):
assert Range() == Range('')
class TestValueChecks:
def test_invalid_pair(self):
with raises(ValueError):
Range('HH')
def test_invalid_offsuit(self):
with raises(ValueError):
Range('KKo')
def test_multiple_ranges_one_invalid(self):
with raises(ValueError):
Range('22+ AKo JK2')
def test_invalid_combos(self):
with raises(ValueError):
Range('AsKq')
def test_invalid_text_in_range(self):
with raises(ValueError):
Range('this is not a range')
def test_invalid_Combo(self):
with raises(ValueError):
Range('AsKq')
class TestComparisons:
def test_ranges_with_lesser_hands_are_smaller(self):
assert Range('33+') < Range('22+')
assert Range('22+') > Range('33+')
assert Range('AKo, JKs') > Range('AKo')
assert not(Range('AKo') < Range('33-44')) # 12 vs 12
def test_ranges_only_equal_if_they_are_the_same(self):
assert Range('Ak') == Range('Aks, AKo')
assert Range('33+') == Range('44+, 33')
def test_ranges_with_different_hands_are_not_equal(self):
assert Range('AKs') != Range('KJs')
assert Range('AKo') != Range('KJo')
assert Range('22') != Range('44')
def test_pairs_with_dash_equals_pairs_with_dash_reverse(self):
assert Range('33-22').hands == Range('22-33').hands
def test_offsuit_multiple_with_AK(self):
assert Range('AKo 22+ 45 33') == Range('22+ AKo 54')
class TestNormalization:
"""Test for repr, unicode representation and range normalization."""
def test_empty_range_is_empty(self):
assert unicode(Range('')) == ''
assert repr(Range('')) == b"Range('')"
assert unicode(Range()) == ''
assert repr(Range()) == b"Range('')"
def test_one_pair(self):
assert str(Range('22')) == b'22'
assert unicode(Range('22')) == '22'
def test_two_pairs(self):
assert unicode(Range('22 44')) == '44, 22'
def test_one_offsuit_hand(self):
assert unicode(Range('AKo')) == 'AKo'
def test_one_combination(self):
assert unicode(Range('AsKc')) == 'A♠K♣'
def test_offsuit_and_suited(self):
assert unicode(Range('AK')) == 'AKs, AKo'
def test_suited_hand(self):
assert unicode(Range('AKs')) == 'AKs'
def test_one_pair_and_one_hand(self):
assert unicode(Range('22 AKo')) == '22, AKo'
assert unicode(Range('22 AKs')) == '22, AKs'
def test_one_pair_and_suited_and_offsuit(self):
assert unicode(Range('22 AKo AKs')) == '22, AKs, AKo'
assert unicode(Range('22 AK')) == '22, AKs, AKo'
def test_one_pair_and_one_combo(self):
assert unicode(Range('22 AsKh')) == '22, A♠K♥'
def test_pair_range(self):
assert unicode(Range('33-66')) == '66-33'
def test_mixed_pairs_ranges_and_combos(self):
assert unicode(Range('44+, KJs KJo JsQc AcKc')) == '44+, A♣K♣, KJs, KJo, Q♣J♠'
def test_very_complicated_range(self):
assert unicode(Range('44-88, AA-KK, KJs KcJh JsQc AcKc 74s-76s')) == \
'KK+, 88-44, A♣K♣, KJs, 74s+, K♣J♥, Q♣J♠'
def test_negative(self):
range = Range('55-22')
assert unicode(range) == '55-'
assert repr(range) == "Range('55-')"
def test_full_range(self):
assert unicode(Range('XX')) == 'XX'
def test_X_in_range(self):
assert unicode(Range('KX')) == 'K2s+, K2o+'
def test_rep_pieces(self):
assert Range('KX').rep_pieces == ['K2s+', 'K2o+']
def test_both_suits_with_plus_or_minus(self):
assert unicode(Range('A5-')) == 'A5s-, A5o-'
assert unicode(Range('A5+')) == 'A5s+, A5o+'
assert unicode(Range('A5+ A5-')) == 'A2s+, A2o+'
def test_X_plus(self):
assert unicode(Range('QX+')) == 'A2s+, K2s+, Q2s+, A2o+, K2o+, Q2o+'
def test_X_minus(self):
assert unicode(Range('5X-')) == '52s+, 42s+, 32s, 52o+, 42o+, 32o'
def test_hand_plus(self):
assert unicode(Range('KJo+')) == 'KJo+'
def test_hand_minus(self):
assert unicode(Range('76o-')) == '72o+'
def test_both_dashed(self):
assert unicode(Range('J8-J4')) == 'J8s-J4s, J8o-J4o'
def test_str_and_range(self):
range = Range('77+ AKo')
assert repr(range) == "Range('77+ AKo')"
assert unicode(range) == '77+, AKo'
def test_order_with_suit_and_without_suit(self):
range = Range('Kas 48')
assert repr(range) == "Range('AKs 84s 84o')"
assert unicode(range) == 'AKs, 84s, 84o'
def test_pairs_order(self):
range = Range('22-55')
assert unicode(range) == '55-'
def test_redundant_offsuit_hands(self):
range = Range('A2o+ 2Ao 8ao')
assert unicode(range) == 'A2o+'
assert repr(range) == "Range('A2o+')"
def test_reduntant_pairs(self):
range = Range('22-44 33')
assert unicode(range) == '44-'
assert repr(range) == "Range('44-')"
def test_redundant_suited_hands(self):
range = Range('2as+ A5s A7s')
assert unicode(range) == 'A2s+'
assert repr(range) == "Range('A2s+')"
class TestBooleanBehavior:
def test_empty_range_is_false(self):
assert bool(Range()) is False
def test_any_non_empty_valid_range_is_true(self):
assert bool(Range('22')) is True
def test_general_hand(self):
assert bool(Range('AK')) is True
def test_hand_combination(self):
assert bool(Range('AhKs')) is True
def test_offsuit_hand(self):
assert bool(Range('AKo')) is True
class TestContains:
def test_combo_in_range(self):
assert Combo('2s2c') in Range('22')
def test_hand_in_range(self):
assert Hand('Ako') in Range('AQo+')
def test_str_in_range(self):
assert 'AKo' in Range('AQo+')
def test_wrong_str_in_range_raises_ValueError(self):
with raises(ValueError):
assert 'AKl' in Range('AQo+')
def test_pickable():
assert pickle.loads(pickle.dumps(Range('Ako 22+'))) == Range('AKo 22+')
|
[
"kissgyorgy@me.com"
] |
kissgyorgy@me.com
|
ee2aed7ddc6385dce19e6b6f647ae016582cdadd
|
77baa4d9305db86b92de26054df7a4d6d9e05dac
|
/opolo-baselines/stable_baselines/trpo_mpi/trpogail.py
|
ceefab558cfef9e8fd838f0917c42e814d4745f0
|
[
"MIT"
] |
permissive
|
illidanlab/opolo-code
|
3c67a9b29f8cb720be118fc420c0906dfd36c70c
|
89a7a0e8a06e7c8320acbdfef2069d6aadcb5863
|
refs/heads/main
| 2023-03-11T23:10:18.680147
| 2021-03-04T16:48:45
| 2021-03-04T16:48:45
| 302,173,477
| 28
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28,538
|
py
|
import time
from contextlib import contextmanager
from collections import deque
import gym
from mpi4py import MPI
import tensorflow as tf
import numpy as np
import stable_baselines.common.tf_util as tf_util
from stable_baselines.common import explained_variance, zipsame, dataset, fmt_row, colorize, ActorCriticRLModel, \
SetVerbosity, TensorboardWriter
from stable_baselines import logger
from stable_baselines.common.mpi_adam import MpiAdam
from stable_baselines.common.cg import conjugate_gradient
from stable_baselines.common.policies import ActorCriticPolicy
from stable_baselines.a2c.utils import total_episode_reward_logger
from stable_baselines.trpo_mpi.utils import SegmentGenerator, add_vtarg_and_adv, flatten_lists
from stable_baselines.gail.triple import TripleDiscriminator
from stable_baselines.gail.dataset.dataset import ExpertDataset
from stable_baselines.deepq.replay_buffer import ReplayBuffer
class TRPOGAIL(ActorCriticRLModel):
"""
GAIL based on an TRPO implementation (https://arxiv.org/abs/1502.05477)
:param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) the discount value
:param timesteps_per_batch: (int) the number of timesteps to run per batch (horizon)
:param max_kl: (float) the Kullback-Leibler loss threshold
:param cg_iters: (int) the number of iterations for the conjugate gradient calculation
:param lam: (float) GAE factor
:param entcoeff: (float) the weight for the entropy loss
:param cg_damping: (float) the compute gradient dampening factor
:param vf_stepsize: (float) the value function stepsize
:param vf_iters: (int) the value function's number iterations for learning
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
WARNING: this logging can take a lot of space quickly
:param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).
If None (default), use random seed. Note that if you want completely deterministic
results, you must set `n_cpu_tf_sess` to 1.
:param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations
If None, the number of cpu of the current machine will be used.
"""
def __init__(self, policy, env, gamma=0.99, timesteps_per_batch=1024, max_kl=0.01, cg_iters=10, lam=0.98,
entcoeff=0.0, cg_damping=1e-2, vf_stepsize=3e-4, vf_iters=3, verbose=0, tensorboard_log=None,
_init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False,
seed=None, n_cpu_tf_sess=1,
buffer_size=int(1e6), demo_buffer_size=int(1e4), d_gradient_steps=None, config={}):
super(TRPOGAIL, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=False,
_init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
self.timesteps_per_batch = timesteps_per_batch
self.cg_iters = cg_iters
self.cg_damping = cg_damping
self.gamma = gamma
self.lam = lam
self.max_kl = max_kl
self.vf_iters = vf_iters
self.vf_stepsize = vf_stepsize
self.entcoeff = entcoeff
self.tensorboard_log = tensorboard_log
self.full_tensorboard_log = full_tensorboard_log
# GAIL Params
self.expert_dataset = None
self.g_step = 1
self.d_step = 1
self.d_stepsize = 3e-4
self.graph = None
self.sess = None
self.policy_pi = None
self.loss_names = None
self.assign_old_eq_new = None
self.compute_losses = None
self.compute_lossandgrad = None
self.compute_fvp = None
self.compute_vflossandgrad = None
self.d_adam = None
self.vfadam = None
self.get_flat = None
self.set_from_flat = None
self.timed = None
self.allmean = None
self.nworkers = None
self.rank = None
self.reward_giver = None
self.step = None
self.proba_step = None
self.initial_state = None
self.params = None
self.summary = None
## Customized parameters
self.buffer_size = int(buffer_size)
self.hidden_size_adversary = 256
self.adversary_entcoeff = 1e-3
self.adversary_gradcoeff = 10
self.demo_buffer_size = demo_buffer_size
self.d_batch_size = 256
self.d_gradient_steps= timesteps_per_batch // self.d_batch_size
self.d_learning_rate = 3e-4
if _init_setup_model:
self.setup_model(config)
def _get_pretrain_placeholders(self):
policy = self.policy_pi
action_ph = policy.pdtype.sample_placeholder([None])
if isinstance(self.action_space, gym.spaces.Discrete):
return policy.obs_ph, action_ph, policy.policy
return policy.obs_ph, action_ph, policy.deterministic_action
def setup_model(self, config):
# prevent import loops
with SetVerbosity(self.verbose):
assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the TRPO model must be " \
"an instance of common.policies.ActorCriticPolicy."
self.config = config
self.expert_data_path = config.get('expert_data_path', None)
self.expert_dataset = ExpertDataset(expert_path=self.expert_data_path, ob_flatten=False)
print('-'*20 + "expert_data_path: {}".format(self.expert_data_path))
self.nworkers = MPI.COMM_WORLD.Get_size()
self.rank = MPI.COMM_WORLD.Get_rank()
np.set_printoptions(precision=3)
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
self.discriminator = None
self.explore_discriminator = None
# we find that training discriminator using (s,a,s') obtains better performance than using (s,a).
self.discriminator = TripleDiscriminator(
self.env,
self.observation_space,
self.action_space,
hidden_size=self.hidden_size_adversary,
entcoeff=self.adversary_entcoeff,
gradcoeff=self.adversary_gradcoeff,
normalize=True
)
# Construct network for new policy
self.policy_pi = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
None, reuse=False, **self.policy_kwargs)
# Network for old policy
with tf.variable_scope("oldpi", reuse=False):
old_policy = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
None, reuse=False, **self.policy_kwargs)
with tf.variable_scope("loss", reuse=False):
atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)
ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return
observation = self.policy_pi.obs_ph
action = self.policy_pi.pdtype.sample_placeholder([None])
kloldnew = old_policy.proba_distribution.kl(self.policy_pi.proba_distribution)
ent = self.policy_pi.proba_distribution.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
entbonus = self.entcoeff * meanent
vferr = tf.reduce_mean(tf.square(self.policy_pi.value_flat - ret))
# advantage * pnew / pold
ratio = tf.exp(self.policy_pi.proba_distribution.logp(action) -
old_policy.proba_distribution.logp(action))
surrgain = tf.reduce_mean(ratio * atarg)
optimgain = surrgain + entbonus
losses = [optimgain, meankl, entbonus, surrgain, meanent]
self.loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"]
dist = meankl
all_var_list = tf_util.get_trainable_vars("model")
var_list = [v for v in all_var_list if "/vf" not in v.name and "/q/" not in v.name]
vf_var_list = [v for v in all_var_list if "/pi" not in v.name and "/logstd" not in v.name]
self.get_flat = tf_util.GetFlat(var_list, sess=self.sess)
self.set_from_flat = tf_util.SetFromFlat(var_list, sess=self.sess)
klgrads = tf.gradients(dist, var_list)
flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name="flat_tan")
shapes = [var.get_shape().as_list() for var in var_list]
start = 0
tangents = []
for shape in shapes:
var_size = tf_util.intprod(shape)
tangents.append(tf.reshape(flat_tangent[start: start + var_size], shape))
start += var_size
gvp = tf.add_n([tf.reduce_sum(grad * tangent)
for (grad, tangent) in zipsame(klgrads, tangents)]) # pylint: disable=E1111
# Fisher vector products
fvp = tf_util.flatgrad(gvp, var_list)
policy_summary = []
policy_summary.append(tf.summary.scalar('entropy_loss', meanent))
policy_summary.append(tf.summary.scalar('policy_gradient_loss', optimgain))
policy_summary.append(tf.summary.scalar('value_function_loss', surrgain))
policy_summary.append(tf.summary.scalar('approximate_kullback-leibler', meankl))
policy_summary.append(tf.summary.scalar('loss', optimgain + meankl + entbonus + surrgain + meanent))
self.assign_old_eq_new = \
tf_util.function([], [], updates=[tf.assign(oldv, newv) for (oldv, newv) in
zipsame(tf_util.get_globals_vars("oldpi"),
tf_util.get_globals_vars("model"))])
self.compute_losses = tf_util.function([observation, old_policy.obs_ph, action, atarg], losses)
self.compute_fvp = tf_util.function([flat_tangent, observation, old_policy.obs_ph, action, atarg],
fvp)
self.compute_vflossandgrad = tf_util.function([observation, old_policy.obs_ph, ret],
tf_util.flatgrad(vferr, vf_var_list))
@contextmanager
def timed(msg):
if self.rank == 0 and self.verbose >= 1:
print(colorize(msg, color='magenta'))
start_time = time.time()
yield
print(colorize("done in {:.3f} seconds".format((time.time() - start_time)),
color='magenta'))
else:
yield
def allmean(arr, nworkers):
assert isinstance(arr, np.ndarray)
out = np.empty_like(arr)
MPI.COMM_WORLD.Allreduce(arr, out, op=MPI.SUM)
out /= nworkers
return out
tf_util.initialize(sess=self.sess)
th_init = self.get_flat()
MPI.COMM_WORLD.Bcast(th_init, root=0)
self.set_from_flat(th_init)
with tf.variable_scope("Adam_mpi", reuse=False):
self.vfadam = MpiAdam(vf_var_list, sess=self.sess)
self.vfadam.sync()
self.d_adam = MpiAdam(self.discriminator.get_trainable_variables(), sess=self.sess)
self.d_adam.sync()
with tf.variable_scope("input_info", reuse=False):
policy_summary.append(tf.summary.scalar('discounted_rewards', tf.reduce_mean(ret)))
policy_summary.append(tf.summary.scalar('learning_rate', tf.reduce_mean(self.vf_stepsize)))
policy_summary.append(tf.summary.scalar('advantage', tf.reduce_mean(atarg)))
policy_summary.append(tf.summary.scalar('kl_clip_range', tf.reduce_mean(self.max_kl)))
if self.full_tensorboard_log:
policy_summary.append(tf.summary.histogram('discounted_rewards', ret))
policy_summary.append(tf.summary.histogram('learning_rate', self.vf_stepsize))
policy_summary.append(tf.summary.histogram('advantage', atarg))
policy_summary.append(tf.summary.histogram('kl_clip_range', self.max_kl))
if tf_util.is_image(self.observation_space):
policy_summary.append(tf.summary.image('observation', observation))
else:
policy_summary.append(tf.summary.histogram('observation', observation))
self.timed = timed
self.allmean = allmean
self.step = self.policy_pi.step
self.proba_step = self.policy_pi.proba_step
self.initial_state = self.policy_pi.initial_state
self.params = tf_util.get_trainable_vars("model") + tf_util.get_trainable_vars("oldpi")
self.params.extend(self.discriminator.get_trainable_variables())
self.summary = tf.summary.merge(policy_summary)
self.compute_lossandgrad = \
tf_util.function([observation, old_policy.obs_ph, action, atarg, ret],
[self.summary, tf_util.flatgrad(optimgain, var_list)] + losses)
def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name="TRPOGAIL",
reset_num_timesteps=True):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
true_reward_buffer = deque(maxlen=40)
# Initialize demonstration buffer
self.teacher_buffer = ReplayBuffer(self.demo_buffer_size)
self.teacher_buffer.initialize_teacher_buffer(self.expert_dataset)
with self.sess.as_default():
seg_generator = SegmentGenerator(
self.policy_pi,
self.env,
self.timesteps_per_batch,
self.discriminator,
explore_discriminator=self.explore_discriminator,
replay_buffer=None,
sess=self.sess,
config=self.config
)
seg_gen = seg_generator.traj_segment_generator(gail=True)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
t_start = time.time()
len_buffer = deque(maxlen=40) # rolling buffer for episode lengths
reward_buffer = deque(maxlen=40) # rolling buffer for episode rewards
# Stats not used for now
# TODO: replace with normal tb logging
# g_loss_stats = Stats(loss_names)
# d_loss_stats = Stats(reward_giver.loss_name)
# ep_stats = Stats(["True_rewards", "Rewards", "Episode_length"])
while True:
if callback is not None:
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
if callback(locals(), globals()) is False:
break
if total_timesteps and timesteps_so_far >= total_timesteps:
break
logger.log("********** Iteration %i ************" % iters_so_far)
def fisher_vector_product(vec):
return self.allmean(self.compute_fvp(vec, *fvpargs, sess=self.sess), self.nworkers) + self.cg_damping * vec
# ------------------ Update G ------------------
logger.log("Optimizing Policy...")
# g_step = 1 when not using GAIL
mean_losses = None
vpredbefore = None
tdlamret = None
observation = None
action = None
seg = None
for k in range(self.g_step):
with self.timed("sampling"):
seg = seg_gen.__next__()
add_vtarg_and_adv(seg, self.gamma, self.lam)
# ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
observation, action = seg["observations"], seg["actions"]
atarg, tdlamret = seg["adv"], seg["tdlamret"]
vpredbefore = seg["vpred"] # predicted value function before update
atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate
# true_rew is the reward without discount
if writer is not None:
total_episode_reward_logger(self.episode_reward,
seg["true_rewards"].reshape(
(self.n_envs, -1)),
seg["dones"].reshape((self.n_envs, -1)),
writer, self.num_timesteps)
args = seg["observations"], seg["observations"], seg["actions"], atarg
# Subsampling: see p40-42 of John Schulman thesis
# http://joschu.net/docs/thesis.pdf
fvpargs = [arr[::5] for arr in args]
self.assign_old_eq_new(sess=self.sess)
with self.timed("computegrad"):
steps = self.num_timesteps + (k + 1) * (seg["total_timestep"] / self.g_step)
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata() if self.full_tensorboard_log else None
# run loss backprop with summary, and save the metadata (memory, compute time, ...)
if writer is not None:
summary, grad, *lossbefore = self.compute_lossandgrad(*args, tdlamret, sess=self.sess,
options=run_options,
run_metadata=run_metadata)
if self.full_tensorboard_log:
writer.add_run_metadata(run_metadata, 'step%d' % steps)
writer.add_summary(summary, steps)
else:
_, grad, *lossbefore = self.compute_lossandgrad(*args, tdlamret, sess=self.sess,
options=run_options,
run_metadata=run_metadata)
lossbefore = self.allmean(np.array(lossbefore), self.nworkers)
grad = self.allmean(grad, self.nworkers)
if np.allclose(grad, 0):
logger.log("Got zero gradient. not updating")
else:
with self.timed("conjugate_gradient"):
stepdir = conjugate_gradient(fisher_vector_product, grad, cg_iters=self.cg_iters,
verbose=self.rank == 0 and self.verbose >= 1)
assert np.isfinite(stepdir).all()
shs = .5 * stepdir.dot(fisher_vector_product(stepdir))
# abs(shs) to avoid taking square root of negative values
lagrange_multiplier = np.sqrt(abs(shs) / self.max_kl)
# logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g))
fullstep = stepdir / lagrange_multiplier
expectedimprove = grad.dot(fullstep)
surrbefore = lossbefore[0]
stepsize = 1.0
thbefore = self.get_flat()
for _ in range(10):
thnew = thbefore + fullstep * stepsize
self.set_from_flat(thnew)
mean_losses = surr, kl_loss, *_ = self.allmean(
np.array(self.compute_losses(*args, sess=self.sess)), self.nworkers)
improve = surr - surrbefore
logger.log("Expected: %.3f Actual: %.3f" % (expectedimprove, improve))
if not np.isfinite(mean_losses).all():
logger.log("Got non-finite value of losses -- bad!")
elif kl_loss > self.max_kl * 1.5:
logger.log("violated KL constraint. shrinking step.")
elif improve < 0:
logger.log("surrogate didn't improve. shrinking step.")
else:
logger.log("Stepsize OK!")
break
stepsize *= .5
else:
logger.log("couldn't compute a good step")
self.set_from_flat(thbefore)
if self.nworkers > 1 and iters_so_far % 20 == 0:
# list of tuples
paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), self.vfadam.getflat().sum()))
assert all(np.allclose(ps, paramsums[0]) for ps in paramsums[1:])
for (loss_name, loss_val) in zip(self.loss_names, mean_losses):
logger.record_tabular(loss_name, loss_val)
with self.timed("vf"):
for _ in range(self.vf_iters):
# NOTE: for recurrent policies, use shuffle=False?
for (mbob, mbret) in dataset.iterbatches((seg["observations"], seg["tdlamret"]),
include_final_partial_batch=False,
batch_size=128,
shuffle=True):
grad = self.allmean(self.compute_vflossandgrad(mbob, mbob, mbret, sess=self.sess), self.nworkers)
self.vfadam.update(grad, self.vf_stepsize)
logger.record_tabular("explained_variance_tdlam_before",
explained_variance(vpredbefore, tdlamret))
## # ------------------ Update D ------------------
# onpolicy discriminator
self.discriminator.train_onpolicy_discriminator(
writer, logger, self.d_gradient_steps, self.d_learning_rate,
self.d_batch_size, self.teacher_buffer, seg,
self.num_timesteps, self.sess, self.d_adam, self.nworkers)
# lr: lengths and rewards
lr_local = (seg["ep_lens"], seg["ep_rets"], seg["ep_true_rets"]) # local values
list_lr_pairs = MPI.COMM_WORLD.allgather(lr_local) # list of tuples
lens, rews, true_rets = map(flatten_lists, zip(*list_lr_pairs))
true_reward_buffer.extend(true_rets)
len_buffer.extend(lens)
reward_buffer.extend(rews)
if len(len_buffer) > 0:
logger.record_tabular("EpLenMean", np.mean(len_buffer))
logger.record_tabular("EpRewMean", np.mean(reward_buffer))
logger.record_tabular("EpTrueRewMean", np.mean(true_reward_buffer))
logger.record_tabular("EpThisIter", len(lens))
episodes_so_far += len(lens)
current_it_timesteps = MPI.COMM_WORLD.allreduce(seg["total_timestep"])
timesteps_so_far += current_it_timesteps
self.num_timesteps += current_it_timesteps
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", self.num_timesteps)
logger.record_tabular("TimeElapsed", time.time() - t_start)
if self.verbose >= 1 and self.rank == 0:
logger.dump_tabular()
return self
def save(self, save_path, cloudpickle=False):
if self.expert_dataset is not None:
# Exit processes to pickle the dataset
self.expert_dataset.prepare_pickling()
data = {
"gamma": self.gamma,
"timesteps_per_batch": self.timesteps_per_batch,
"max_kl": self.max_kl,
"cg_iters": self.cg_iters,
"lam": self.lam,
"entcoeff": self.entcoeff,
"cg_damping": self.cg_damping,
"vf_stepsize": self.vf_stepsize,
"vf_iters": self.vf_iters,
"hidden_size_adversary": self.hidden_size_adversary,
"adversary_entcoeff": self.adversary_entcoeff,
"expert_dataset": self.expert_dataset,
"g_step": self.g_step,
"d_step": self.d_step,
"d_stepsize": self.d_stepsize,
"verbose": self.verbose,
"policy": self.policy,
"observation_space": self.observation_space,
"action_space": self.action_space,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params_to_save = self.get_parameters()
self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
|
[
"zhuangdizhu@yahoo.com"
] |
zhuangdizhu@yahoo.com
|
4d386fd1300322b507e6f1f1f34f942c879974a3
|
48f73b5b78da81c388d76d685ec47bb6387eefdd
|
/scrapeHackerrankCode/codes/s10-geometric-distribution-226.py
|
3a10c689910f87f39f3dca15106ccca2acf25104
|
[] |
no_license
|
abidkhan484/hacerrankScraping
|
ad0ceda6c86d321d98768b169d63ea1ee7ccd861
|
487bbf115117bd5c293298e77f15ae810a50b82d
|
refs/heads/master
| 2021-09-18T19:27:52.173164
| 2018-07-18T12:12:51
| 2018-07-18T12:12:51
| 111,005,462
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
# Wrong Answer
# Python 3
m, n = map(int, input().split())
p = int(input().strip())
print(1-((1-(m/n))**p))
|
[
"abidkhan484@gmail.com"
] |
abidkhan484@gmail.com
|
ac77fe5c302738244592e05e17947cf21ad5e7a9
|
34830496fc3ca119be3204d593d202bca2dc1334
|
/kaprekar_cycles.py
|
c9d2002a55c8ea6597b4289b45681b0f09343420
|
[] |
no_license
|
kerem-kirici/Kaprekar-s-Cycles
|
a8f9ad85130bbed8f97bd4a27e7358e283632e24
|
d8075b6d5cdfe775d01d2aee687664bb53ae1926
|
refs/heads/main
| 2023-04-12T07:59:09.195685
| 2021-05-22T17:23:17
| 2021-05-22T17:23:17
| 369,854,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,575
|
py
|
from collections import defaultdict
def cycle(number, number_of_digits):
difference = None
array = []
step = 0
while difference not in array:
if difference != None:
step += 1
array.append(difference)
string_number = str(difference)
else:
string_number = str(number)
large_integer = int("".join(sorted(string_number, reverse=True)))
small_integer = int("".join(sorted(string_number)))
if len(string_number) != number_of_digits:
large_integer *= 10
difference = large_integer-small_integer
return array[array.index(difference):], step
def check_num_of_digit(number_of_digits):
array = []
list_of_sets = []
max_steps = 0
for i in range(10**(number_of_digits-1), 10**number_of_digits):
if i % int("1"*number_of_digits):
answer, step = cycle(i, number_of_digits)
max_steps = max(step, max_steps)
set_answer = set(answer)
if set_answer not in list_of_sets:
array.append(str(answer + [answer[0]]).replace(",", "").replace("]", " ...]"))
list_of_sets.append(set_answer)
return array, max_steps
def main():
number = int(input("How many digit numbers you want to look for?\n"))
answer, max_steps = check_num_of_digit(number)
print("%d digit kaprekar cycle(s):" % number, *answer, "\nMaximum required number of steps:", max_steps, "\n")
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
kerem-kirici.noreply@github.com
|
829bbe778c610d8510a631364039563d386fabe2
|
d36bedd98c99c60bbb6c71391ff6105ef57822d8
|
/Algorithms/bubble_sort.py
|
dc2aa64fd9f8b48554dae57af33388d7dfb5881c
|
[] |
no_license
|
akgunberk/sortingAlgorithms
|
51e3799f010a9526b9980d2c89afd041bb769ba0
|
65ab9d12cd58368f1d223e68073c25d0052ca2a9
|
refs/heads/master
| 2021-06-26T21:10:51.516936
| 2020-12-30T13:15:03
| 2020-12-30T13:15:03
| 200,853,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
tobesorted = [9,3,89,0,5,7,-5]
b = 0
while b < len(tobesorted):
for index,i in enumerate(tobesorted):
if index != len(tobesorted) - 1:
while i > tobesorted[index + 1]:
new , old = index + 1, i
tobesorted.remove(old)
tobesorted.insert(new,i)
b += 1
print(tobesorted)
|
[
"noreply@github.com"
] |
akgunberk.noreply@github.com
|
6aa535eb62729f8c3c04ca243373e63862eb56e3
|
e17f0a87393716bd74d41a30bda75b27a99089c0
|
/median.py
|
1a55169aa69b392485746e96d6d4fba34098d85d
|
[] |
no_license
|
alexandrera/udacity_practice
|
515faed72cef15e38baae600f61d356d276b3c7a
|
ea61631bdbad66aea89223087bf20569acba3fb0
|
refs/heads/master
| 2020-03-29T15:09:16.402949
| 2018-09-26T13:55:51
| 2018-09-26T13:55:51
| 150,047,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
def medianHard(a,b,c):
if a > b:
if a < c:
if b < a:
return a
else:
return b
else:
return c
else:
if b > c:
if a > c:
return a
else:
return c
else:
return b
def bigger(a,b):
if a > b:
return a
else:
return b
def biggest(a,b,c):
return bigger(a,bigger(b,c))
def medianEasy(a,b,c):
big = biggest(a,b,c)
if big == a:
return bigger (b,c)
if big == b:
return bigger (a,c)
else:
return bigger (a,b)
print(medianHard(1,2,3))
print(medianEasy(2,3,1))
|
[
"alexandre.ra@gmail.com"
] |
alexandre.ra@gmail.com
|
06a079db10afe082602df7a32f55c4b27ab174ed
|
052b7c164ac17cf38d2c39b52a63f583c8d2605b
|
/Assignment.py
|
d4c983c248f8076f12cc9c8a17d036e32356daf7
|
[] |
no_license
|
Adejare634/python
|
74f399cee77e8714f71488b1f56533133d7db71c
|
a8d71529ee3f9aafce7b161d244d8c24e6801823
|
refs/heads/master
| 2022-12-15T16:33:45.304175
| 2020-09-18T23:28:27
| 2020-09-18T23:28:27
| 286,596,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
#creating file
New_file = open("Alphanumeric_nums.txt","w+")
New_file.write('64ja')
New_file.close()
num = int(input('Enter Number: '))
if num % 4 == 0:
print('Number is divisible by 4')
else:
print('Number is not divisible by 4')
|
[
"noreply@github.com"
] |
Adejare634.noreply@github.com
|
eb185cce7bfad29c7e0de4a472c3b0bb1e381663
|
a71d3c3bce999ab22ff418cd89bc55242e9ae5d4
|
/models/Item.py
|
45aed3170a16b97a7aa22247da2d8e6e08eaf290
|
[] |
no_license
|
rowneee/ShoppingCart
|
8dee69bbe28254ed7553cdc772647499267adc21
|
66d75c8ffb669b4c993ab0931f8a9d1d70f6e4f0
|
refs/heads/main
| 2023-04-22T09:16:14.899109
| 2021-05-05T12:44:03
| 2021-05-05T12:44:03
| 364,367,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
class Item:
def __init__(self, name, price, quantity):
self.name = name
self.price = price
self.quantity = quantity
def json(self):
return {'name': self.name, 'price': self.price, 'quantity': self.quantity}
|
[
"noreply@github.com"
] |
rowneee.noreply@github.com
|
7c3510e856387f314b224eaf5e9fb808086e4952
|
df4a821c939337d7686b7f14bb09df27324dec4d
|
/Back_End/youtube_clone_api/comments/urls.py
|
b9391b5b7b227381fc1cbd4ff3c2c6e2a8807656
|
[] |
no_license
|
cjones0429/Youtube_Clone
|
b212805278f028e88c9568f525afd98ebe807a6f
|
48708fa8ded0564d4d7f0e1f7b859c8e532a65eb
|
refs/heads/main
| 2023-06-07T21:15:41.869014
| 2021-06-24T15:46:16
| 2021-06-24T15:46:16
| 379,657,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('comments/', views.CommentList.as_view()),
path('comments/<int:pk>/', views.CommentList.as_view())
]
|
[
"mrchrisfarrell@yahoo.com"
] |
mrchrisfarrell@yahoo.com
|
d946ad02b33b635e1a7c59b385ae4bded3702a84
|
62f0b6a3a2c929e97d87be6b81c6c7862c726030
|
/familio/wsgi.py
|
a3164c1c57a913b67d8c39d6f7ea8070f38b5d6c
|
[] |
no_license
|
zmunetsi/familio
|
051aed6c388e57e629ac7a4575cedcb6d1b69e3c
|
043afe483e2e7d6191b02b8582c9b60636cbe12e
|
refs/heads/main
| 2023-06-13T22:15:11.479030
| 2021-07-01T12:39:30
| 2021-07-01T12:39:30
| 382,025,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
"""
WSGI config for familio project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'familio.settings')
application = get_wsgi_application()
|
[
"dev@jetweb.onmicrosoft.com"
] |
dev@jetweb.onmicrosoft.com
|
2ac144d369a075a6cf027846879e08f162f8de2f
|
add5493b191adb5b6e52728854ae74378d8704c0
|
/micro_video.py
|
94e08b56e62991582d6b1a2eba6af75aec03c63a
|
[] |
no_license
|
Grashes/Bilibili
|
7896ebdeb08d72d32a8742c124069e280df39ede
|
0ec4380453b7013c46fbc505843470b973b67aa8
|
refs/heads/master
| 2023-03-21T14:33:25.789488
| 2020-02-06T14:29:59
| 2020-02-06T14:29:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,692
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/4/8 20:46
# @Author : Nismison
# @FileName: micro_video.py
# @Description: Bilibili小视频爬取
# @Blog :https://blog.tryfang.cn
from functions.requests_func import url_get
from functions.deal_json import dict_get
from functions.database import Database
def micro_video_crawler(order='', page_num=1):
"""
:param order: 排序方式,new为按照视频上传时间排序,默认为系统推荐
"""
database = Database("localhost", "root", "", "bilibili")
table_name = "micro_video"
classification = []
# 获取所有分类
classification_url = "https://api.vc.bilibili.com/clip/v1/video/zonelist?page=total"
classification_json = url_get(classification_url, "json")
classification_data = dict_get(classification_json, "data")
for i in classification_data:
if classification_data[i] == '':
continue
for j in classification_data[i]['tags']:
classification.append(j)
for tag in classification:
ps = 50 # page_size最大50
pn = page_num # 开始页,调用时可自定义
while True:
next_offset = (pn - 1) * ps
micro_video_url = "https://api.vc.bilibili.com/clip/v1/video/search?" \
"page_size={}&need_playurl=0&next_offset={}&order={}" \
"&tag={}".format(ps, next_offset, order, tag)
micro_video_json = url_get(micro_video_url, "json")
items = dict_get(micro_video_json, "items")
if len(items) == 0:
break
for item in items:
video_info = {"tag": tag}
video_info['title'] = dict_get(item, "description").replace("\n", "") # 视频标题
video_info['video_id'] = dict_get(item, "id") # 视频id
video_info['reply'] = dict_get(item, "reply") # 视频评论数
video_info['upload_time'] = dict_get(item, "upload_time") # 视频上传时间
video_info['video_size'] = round(float(dict_get(item, "video_size")) / 1024**2, 2) # 视频文件大小,单位mb(float)
video_info['video_time'] = dict_get(item, "video_time") # 视频时长,单位s
video_info['video_playurl'] = dict_get(item, "video_playurl") # 视频播放地址
video_info['watched_num'] = dict_get(item, "watched_num") # 视频播放数
video_info['name'] = dict_get(item, "name") # 上传者用户名
video_info['uid'] = dict_get(item, "uid") # 上传者uid
# 如果需要下载视频,请把下面注释去掉
# video_content = url_get(video_info['video_playurl'], "content") # 获取视频内容
# video_file_name = video_info['title'][:30].replace("/", '').replace("<", '').replace(">", '').replace(
# "|", '').replace(":", '').replace("*", '').replace("?", '').replace("\\", '') + ".mp4" # 拼接视频文件名
# # 保存视频
# with open(video_file_name, "wb") as video_file:
# video_file.write(video_content)
# video_file.close()
# 如果不需要插入数据库,请把下面部分注释掉
if database.execute_sql(table_name=table_name, key="video_id", value=video_info['video_id']) != 0:
print("视频id:{} 重复,跳过".format(video_info['video_id']))
print("-" * 60)
continue
if database.execute_sql(table_name=table_name, mode="insert",
keys=list(video_info.keys()), values=list(video_info.values())):
print("视频标题: {}".format(video_info['title']))
print("视频id: {}".format(video_info['video_id']))
print("视频评论数: {}".format(video_info['reply']))
print("视频上传时间: {}".format(video_info['upload_time']))
print("视频大小(mb): {}".format(video_info['video_size']))
print("视频时长: {}".format(video_info['video_time']))
print("视频播放地址: {}".format(video_info['video_playurl']))
print("视频观看数: {}".format(video_info['watched_num']))
print("上传者用户名: {}".format(video_info['name']))
print("上传者id: {}".format(video_info['uid']))
print("-" * 60)
pn += 1
if __name__ == '__main__':
micro_video_crawler()
|
[
"2692789921@qq.com"
] |
2692789921@qq.com
|
0a8b9ed5854b38b6e4149f77498bb4371228bb32
|
20f836620dc467dc0a9c79716fa278e0dbcda7b2
|
/Coffee_Inspection_Service/Program/Save_Local_to_DB.py
|
8bb5f225f24b183efd2e09c2b9c0becb398181e0
|
[] |
no_license
|
twohlee/coffeeproject
|
1a953d85a382b918dfaaa5784c2e1f42df43f8ad
|
d967eb7203a36fd0370c048ae8a5901b3612e0fc
|
refs/heads/master
| 2022-10-21T08:53:31.487421
| 2020-06-16T00:50:28
| 2020-06-16T00:50:28
| 272,574,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
import io
import pymongo
import os
import gridfs
from PIL import Image
import numpy as np
import glob
import matplotlib.pyplot as plt
conn = pymongo.MongoClient('127.0.0.1', 27017)
cnt = 0
categories = ['Normal', 'Broken', 'Black']
for idx, category in enumerate(categories):
path = './Data/' + category
db = conn.get_database(category)
files = os.listdir(path)
fs = gridfs.GridFS(db, collection = category)
for f in files:
fp = open(path + '/' + f, 'rb')
data = fp.read()
stored = fs.put(data, filename = f)
cnt += 1
print(cnt)
|
[
"two_h_lee@naver.com"
] |
two_h_lee@naver.com
|
86e39f1afbfd5d93a6cfbe75798e4b3519a0e1b8
|
1f73dacc143cd7417f6b1e6a117ff13ac5c9632b
|
/utilities/pretty-ls/ex.py
|
726c487000694afc38a60af91db9fed8c857c1d1
|
[
"MIT"
] |
permissive
|
BPHays/rc
|
84fb5ee9abcbe96ba9d46f0b550f959f80f8c472
|
fcf9770c6dd69caaaf3d55f446e37fa0183e866b
|
refs/heads/master
| 2021-09-10T23:04:07.583690
| 2018-04-03T21:35:54
| 2018-04-03T21:35:54
| 68,263,895
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,612
|
py
|
#!/usr/bin/python3
#-*- coding: utf-8 -*-
from os import path
import os
import glob
import stat
import sys
import shutil
from grp import getgrgid
from pwd import getpwuid
from optparse import OptionParser
BLUE ="159"
GREEN ="85"
YELLOW ="229"
LTGREY ="252"
DKGREY ="244"
INDIGO ="105"
ORANGE ="216"
RED ="160"
FILE ="FILE"
DIR ="DIR"
SRC ="SRC"
COMPRESS="COMPRESS"
IMG ="IMG"
AUDIO ="AUDIO"
VIDEO ="VIDEO"
TXT ="TXT"
DOTFILE ="DOTFILE"
EXE ="EXE"
COMPILED="COMPILED"
TMP ="TMP"
# Filetype modifiers
LINK = u" \uf0c1 "
# Specific file name desctriptions.
# Format: "NAME": [u"ICON","COLOR CODE"]
FILENAMES = {
"Makefile": [u"", TXT],
"README": [u"\uf128", TXT],
"readme": [u"\uf128", TXT],
"LICENSE": [u"", TXT],
"license": [u"", TXT],
".gitignore": [u"", TXT],
".git": [u"", TXT],
"tags": [u"\uf02c", TXT],
}
# File extension descriptions.
# Format: "EXTENSION": [u"ICON","COLOR CODE"]
EXTENSIONS = [
{
# Generic types
":FILE": [u"", FILE],
":DIRECTORY": [u"", DIR],
":DOTFILE": [u"", DOTFILE],
},
{
# Executables
"out": [u"", EXE],
"": [u"", EXE],
"exe": [u"", EXE],
},
{
# Archives
"7z": [u"", COMPRESS],
"bz": [u"", COMPRESS],
"bz2": [u"", COMPRESS],
"gz": [u"", COMPRESS],
"tar": [u"", COMPRESS],
"xz": [u"", COMPRESS],
"zip": [u"", COMPRESS],
},
{
# Images
"ai": [u"", IMG],
"bmp": [u"", IMG],
"gif": [u"", IMG],
"ico": [u"", IMG],
"jpeg": [u"", IMG],
"jpg": [u"", IMG],
"png": [u"", IMG],
"psb": [u"", IMG],
"psd": [u"", IMG],
"ts": [u"", IMG],
},
{
# Audio
"mp3": [u"", AUDIO],
"wav": [u"", AUDIO],
},
{
# Video
"mkv": [u"", VIDEO],
},
{
# General file formats
# Office
"doc": [u"\uf1c2", TXT],
"docx": [u"\uf1c2", TXT],
"odt": [u"\uf1c2", TXT],
"xls": [u"\uf1c3", TXT],
"xlsx": [u"\uf1c3", TXT],
"ods": [u"\uf1c3", TXT],
"ppt": [u"\uf1c4", TXT],
"pptx": [u"\uf1c4", TXT],
"odp": [u"\uf1c4", TXT],
# Misc
"pdf": [u"\uf1c1", TXT],
"ttf": [u"\uf031", TXT],
},
{
# Temporary Files
"tmp": [u"", TMP],
"swp": [u"", TMP],
},
{
# Simple Text
"csv": [u"", TXT],
"dump": [u"", TXT],
"log": [u"", TXT],
"markdown": [u"", TXT],
"md": [u"", TXT],
"rss": [u"", TXT],
"t": [u"", TXT],
"txt": [u"", TXT],
"conf": [u"", TXT],
},
{
# Compiled Files (but not executable)
"class": [u"", COMPILED],
"o": [u"", COMPILED],
},
{
# Source Code Files
"asm": [u"\uf2db", SRC],
"s": [u"\uf2db", SRC],
"S": [u"\uf2db", SRC],
"bat": [u"", SRC],
"c": [u"", SRC],
"h": [u"\uf1dc", SRC],
"cc": [u"", SRC],
"c++": [u"", SRC],
"cpp": [u"", SRC],
"cxx": [u"", SRC],
"hh": [u"\uf1dc", SRC],
"hpp": [u"\uf1dc", SRC],
"clj": [u"", SRC],
"cljc": [u"", SRC],
"cljs": [u"", SRC],
"coffee": [u"", SRC],
"cp": [u"", SRC],
"css": [u"", SRC],
"d": [u"", SRC],
"dart": [u"", SRC],
"db": [u"", SRC],
"diff": [u"", SRC],
"edn": [u"", SRC],
"ejs": [u"", SRC],
"erl": [u"", SRC],
"f#": [u"", SRC],
"fs": [u"", SRC],
"fsi": [u"", SRC],
"fsscript": [u"", SRC],
"fsx": [u"", SRC],
"go": [u"", SRC],
"hbs": [u"", SRC],
"hrl": [u"", SRC],
"hs": [u"", SRC],
"htm": [u"", SRC],
"html": [u"", SRC],
"ini": [u"", SRC],
"java": [u"", SRC],
"jl": [u"", SRC],
"js": [u"", SRC],
"json": [u"", SRC],
"jsx": [u"", SRC],
"less": [u"", SRC],
"lhs": [u"", SRC],
"lua": [u"", SRC],
"ml": [u"λ", SRC],
"mli": [u"λ", SRC],
"mustache": [u"", SRC],
"php": [u"", SRC],
"pl": [u"", SRC],
"pm": [u"", SRC],
"py": [u"", SRC],
"pyc": [u"", SRC],
"pyd": [u"", SRC],
"pyo": [u"", SRC],
"rb": [u"", SRC],
"rlib": [u"", SRC],
"rs": [u"", SRC],
"scala": [u"", SRC],
"scm": [u"λ", SRC],
"scss": [u"", SRC],
"sh": [u"", SRC],
"csh": [u"", SRC],
"zsh": [u"", SRC],
"fish": [u"", SRC],
"bash": [u"", SRC],
"zsh": [u"", SRC],
"tex": [u"\uf0db", SRC],
"slim": [u"", SRC],
"sln": [u"", SRC],
"sql": [u"", SRC],
"styl": [u"", SRC],
"suo": [u"", SRC],
"twig": [u"", SRC],
"vim": [u"", SRC],
"xul": [u"", SRC],
"yml": [u"", SRC],
}
]
for ext in EXTENSIONS:
for key, value in sorted(ext.items()):
s = "{"
s += "\"{}\"".format(key);
s += ","
for i in range(10 - len(key)):
s += ' '
s += "\"{}\"".format(value[0]);
s += ","
for i in range(7 - len(value[0])):
s += ' '
s += "{}".format(value[1]);
for i in range(10 - len(value[1])):
s += ' '
s += "},"
print(s)
for key, value in sorted(FILENAMES.items()):
s = "{"
s += "\"{}\"".format(key);
s += ","
for i in range(10 - len(key)):
s += ' '
s += "\"{}\"".format(value[0]);
s += ","
for i in range(7 - len(value[0])):
s += ' '
s += "{}".format(value[1]);
for i in range(10 - len(value[1])):
s += ' '
s += "},"
print(s)
|
[
"hays1@purdue.edu"
] |
hays1@purdue.edu
|
82395c5d7d5cf0a545e3a2c1884f6c4488bac288
|
80d50ea48e10674b1b7d3f583a1c4b7d0b01200f
|
/src/datadog_api_client/v1/model/usage_specified_custom_reports_data.py
|
0580d3942865320614173f6c73c13d95102c0df5
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] |
permissive
|
DataDog/datadog-api-client-python
|
3e01fa630278ad0b5c7005f08b7f61d07aa87345
|
392de360e7de659ee25e4a6753706820ca7c6a92
|
refs/heads/master
| 2023-09-01T20:32:37.718187
| 2023-09-01T14:42:04
| 2023-09-01T14:42:04
| 193,793,657
| 82
| 36
|
Apache-2.0
| 2023-09-14T18:22:39
| 2019-06-25T22:52:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,197
|
py
|
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import annotations
from typing import Union, TYPE_CHECKING
from datadog_api_client.model_utils import (
ModelNormal,
cached_property,
unset,
UnsetType,
)
if TYPE_CHECKING:
from datadog_api_client.v1.model.usage_specified_custom_reports_attributes import (
UsageSpecifiedCustomReportsAttributes,
)
from datadog_api_client.v1.model.usage_reports_type import UsageReportsType
class UsageSpecifiedCustomReportsData(ModelNormal):
@cached_property
def openapi_types(_):
from datadog_api_client.v1.model.usage_specified_custom_reports_attributes import (
UsageSpecifiedCustomReportsAttributes,
)
from datadog_api_client.v1.model.usage_reports_type import UsageReportsType
return {
"attributes": (UsageSpecifiedCustomReportsAttributes,),
"id": (str,),
"type": (UsageReportsType,),
}
attribute_map = {
"attributes": "attributes",
"id": "id",
"type": "type",
}
def __init__(
self_,
attributes: Union[UsageSpecifiedCustomReportsAttributes, UnsetType] = unset,
id: Union[str, UnsetType] = unset,
type: Union[UsageReportsType, UnsetType] = unset,
**kwargs,
):
"""
Response containing date and type for specified custom reports.
:param attributes: The response containing attributes for specified custom reports.
:type attributes: UsageSpecifiedCustomReportsAttributes, optional
:param id: The date for specified custom reports.
:type id: str, optional
:param type: The type of reports.
:type type: UsageReportsType, optional
"""
if attributes is not unset:
kwargs["attributes"] = attributes
if id is not unset:
kwargs["id"] = id
if type is not unset:
kwargs["type"] = type
super().__init__(kwargs)
|
[
"noreply@github.com"
] |
DataDog.noreply@github.com
|
97a1861384689b10d546eb6520eb506aea91e34b
|
2b6116b967f6b02a6c62392058623ba8824f5ee2
|
/deal/migrations/0063_historicalclient_sip_id.py
|
dd3ea4ee08b4ad08514c6270982ab3819aca3a4f
|
[] |
no_license
|
tayursky/med-crm
|
68a16d771a91a9a5ff3e61acd00c08ad6297c405
|
8e39904968a8217b9cd4593acc3afa27ff4584ba
|
refs/heads/master
| 2023-01-11T08:28:23.762631
| 2020-03-15T20:53:59
| 2020-03-15T20:53:59
| 247,546,343
| 0
| 0
| null | 2023-01-06T02:27:23
| 2020-03-15T20:30:05
|
Python
|
UTF-8
|
Python
| false
| false
| 462
|
py
|
# Generated by Django 2.2.1 on 2019-10-11 15:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('deal', '0062_auto_20190928_1432'),
]
operations = [
migrations.AddField(
model_name='historicalclient',
name='sip_id',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='SIP идентификатор'),
),
]
|
[
"tayursky@gmail.com"
] |
tayursky@gmail.com
|
63e7252b39dd8d37f305c7b9c0f27529768db58c
|
9f1039075cc611198a988034429afed6ec6d7408
|
/tensorflow-stubs/feature_column/__init__.pyi
|
08e5b749b55c600ad152fbe19fe1ec44476eaff6
|
[] |
no_license
|
matangover/tensorflow-stubs
|
9422fbb1cb3a3638958d621461291c315f9c6ec2
|
664bd995ef24f05ba2b3867d979d23ee845cb652
|
refs/heads/master
| 2020-05-23T12:03:40.996675
| 2019-05-15T06:21:43
| 2019-05-15T06:21:43
| 186,748,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 909
|
pyi
|
# Stubs for tensorflow.feature_column (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.python.feature_column.feature_column import bucketized_column as bucketized_column, categorical_column_with_hash_bucket as categorical_column_with_hash_bucket, categorical_column_with_identity as categorical_column_with_identity, categorical_column_with_vocabulary_file as categorical_column_with_vocabulary_file, categorical_column_with_vocabulary_list as categorical_column_with_vocabulary_list, crossed_column as crossed_column, embedding_column as embedding_column, indicator_column as indicator_column, input_layer as input_layer, linear_model as linear_model, make_parse_example_spec as make_parse_example_spec, numeric_column as numeric_column, shared_embedding_columns as shared_embedding_columns, weighted_categorical_column as weighted_categorical_column
|
[
"matangover@gmail.com"
] |
matangover@gmail.com
|
af660086772dd3ed9174234c1c8fba29d88bfd79
|
0b86600e0288c0fefc081a0f428277a68b14882e
|
/code/tortue/tortue_code_1_1.py
|
0ee7c29c08f94de7e601d2d1a259e7798b140009
|
[] |
no_license
|
Byliguel/python1-exo7
|
9ede37a8d2b8f384d1ebe3d612e8c25bbe47a350
|
fbf6b08f4c1e94dd9f170875eee871a84849399e
|
refs/heads/master
| 2020-09-22T10:16:34.044141
| 2019-12-01T11:52:51
| 2019-12-01T11:52:51
| 225,152,986
| 1
| 0
| null | 2019-12-01T11:51:37
| 2019-12-01T11:51:36
| null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
# Début du code
from turtle import *
width(5) # Epaisseur du trait
# Lettre "P"
color('red')
left(90) # 90 degrés à gauche
forward(200) # On avance
right(90)
forward(100)
right(90)
forward(100)
right(90)
forward(100)
up()
|
[
"arnaud.bodin@math.univ-lille1.fr"
] |
arnaud.bodin@math.univ-lille1.fr
|
8076aec57183287e6239dd2a6e1102b8731398df
|
a3cb1eacfb827e50098995362e84efa6b0f85988
|
/23-Classes-Attributes-and-Methods/protected-attributes-and-methods.py
|
ed1f61abacd8ab8da76fe3f291708d4b06b69c7c
|
[] |
no_license
|
zamudio/learning-python
|
91e940660e58c8f663cdacfdd029a351b151940c
|
402886b858f23d6000c59d4fd3ce7cad109d286d
|
refs/heads/master
| 2022-04-16T04:01:42.403051
| 2020-04-06T07:09:08
| 2020-04-06T07:09:08
| 250,422,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
class SmartPhone():
def __init__(self):
self._company = 'Apple'
self._firmware = 10.0
def get_os_version(self):
return self._firmware
def update_firmware(self):
self._firmware += 1
iphone = SmartPhone()
# underscore means protected, no touchy touchy
|
[
"github email address"
] |
github email address
|
a39fbaf80501c5b6a6fa69a9bde5bc2ad5dcb2b6
|
7cd9220fdaa5fb7ae9892b477b8600567c92b8e7
|
/cortical_layers/__init__.py
|
62ff347f97b95319a30278d72dbaa34d6dbf054a
|
[] |
no_license
|
AlexTMallen/cortical-layers
|
2264b3124d3bc97ebd63bfa2f43632f641547cd9
|
e50631694660ddfb97e62e688ff3d87646218b48
|
refs/heads/main
| 2023-07-14T13:22:57.292063
| 2021-08-27T21:33:30
| 2021-08-27T21:33:30
| 386,097,463
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51
|
py
|
__version__ = "0.1.0"
from . import LayerPredictor
|
[
"atmallen8@gmail.com"
] |
atmallen8@gmail.com
|
942c3fc4673eaafa403ee4cd88c000fc695d1ee0
|
ae46919dc6d42ac7c198a25fc54cec3363fd3d02
|
/lib/galaxy/jobs/runners/pulsar.py
|
dc4b6e5f1bb04ca6a83f86b24beae8895c0d6816
|
[
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
msGenDev/galaxy-central
|
4720bce7d2108bdbb31d39908d3ba362bf36a5bb
|
8db0963005f7f6e8ca3e3aca8ecc0be172a26957
|
refs/heads/master
| 2021-01-24T17:14:45.285919
| 2014-08-20T21:51:49
| 2014-08-20T21:51:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,898
|
py
|
from __future__ import absolute_import # Need to import pulsar_client absolutely.
import logging
from galaxy import model
from galaxy.jobs.runners import AsynchronousJobState, AsynchronousJobRunner
from galaxy.jobs import ComputeEnvironment
from galaxy.jobs import JobDestination
from galaxy.jobs.command_factory import build_command
from galaxy.tools.deps import dependencies
from galaxy.util import string_as_bool_or_none
from galaxy.util.bunch import Bunch
from galaxy.util import specs
import errno
from time import sleep
import os
from pulsar.client import build_client_manager
from pulsar.client import url_to_destination_params
from pulsar.client import finish_job as pulsar_finish_job
from pulsar.client import submit_job as pulsar_submit_job
from pulsar.client import ClientJobDescription
from pulsar.client import PulsarOutputs
from pulsar.client import ClientOutputs
from pulsar.client import PathMapper
log = logging.getLogger( __name__ )
__all__ = [ 'PulsarLegacyJobRunner', 'PulsarRESTJobRunner', 'PulsarMQJobRunner' ]
NO_REMOTE_GALAXY_FOR_METADATA_MESSAGE = "Pulsar misconfiguration - Pulsar client configured to set metadata remotely, but remote Pulsar isn't properly configured with a galaxy_home directory."
NO_REMOTE_DATATYPES_CONFIG = "Pulsar client is configured to use remote datatypes configuration when setting metadata externally, but Pulsar is not configured with this information. Defaulting to datatypes_conf.xml."
GENERIC_REMOTE_ERROR = "Failed to communicate with remote job server."
# Is there a good way to infer some default for this? Can only use
# url_for from web threads. https://gist.github.com/jmchilton/9098762
DEFAULT_GALAXY_URL = "http://localhost:8080"
PULSAR_PARAM_SPECS = dict(
transport=dict(
map=specs.to_str_or_none,
valid=specs.is_in("urllib", "curl", None),
default=None
),
cache=dict(
map=specs.to_bool_or_none,
default=None,
),
amqp_url=dict(
map=specs.to_str_or_none,
default=None,
),
galaxy_url=dict(
map=specs.to_str_or_none,
default=DEFAULT_GALAXY_URL,
),
manager=dict(
map=specs.to_str_or_none,
default=None,
),
amqp_consumer_timeout=dict(
map=lambda val: None if val == "None" else float(val),
default=None,
),
amqp_connect_ssl_ca_certs=dict(
map=specs.to_str_or_none,
default=None,
),
amqp_connect_ssl_keyfile=dict(
map=specs.to_str_or_none,
default=None,
),
amqp_connect_ssl_certfile=dict(
map=specs.to_str_or_none,
default=None,
),
amqp_connect_ssl_cert_reqs=dict(
map=specs.to_str_or_none,
default=None,
),
# http://kombu.readthedocs.org/en/latest/reference/kombu.html#kombu.Producer.publish
amqp_publish_retry=dict(
map=specs.to_bool,
default=False,
),
amqp_publish_priority=dict(
map=int,
valid=lambda x: 0 <= x and x <= 9,
default=0,
),
# http://kombu.readthedocs.org/en/latest/reference/kombu.html#kombu.Exchange.delivery_mode
amqp_publish_delivery_mode=dict(
map=str,
valid=specs.is_in("transient", "persistent"),
default="persistent",
),
amqp_publish_retry_max_retries=dict(
map=int,
default=None,
),
amqp_publish_retry_interval_start=dict(
map=int,
default=None,
),
amqp_publish_retry_interval_step=dict(
map=int,
default=None,
),
amqp_publish_retry_interval_max=dict(
map=int,
default=None,
),
)
PARAMETER_SPECIFICATION_REQUIRED = object()
PARAMETER_SPECIFICATION_IGNORED = object()
class PulsarJobRunner( AsynchronousJobRunner ):
"""
Pulsar Job Runner
"""
runner_name = "PulsarJobRunner"
def __init__( self, app, nworkers, **kwds ):
"""Start the job runner """
super( PulsarJobRunner, self ).__init__( app, nworkers, runner_param_specs=PULSAR_PARAM_SPECS, **kwds )
self._init_worker_threads()
galaxy_url = self.runner_params.galaxy_url
if galaxy_url:
galaxy_url = galaxy_url.rstrip("/")
self.galaxy_url = galaxy_url
self.__init_client_manager()
self._monitor()
def _monitor( self ):
# Extension point allow MQ variant to setup callback instead
self._init_monitor_thread()
def __init_client_manager( self ):
client_manager_kwargs = {}
for kwd in 'manager', 'cache', 'transport':
client_manager_kwargs[ kwd ] = self.runner_params[ kwd ]
for kwd in self.runner_params.keys():
if kwd.startswith( 'amqp_' ):
client_manager_kwargs[ kwd ] = self.runner_params[ kwd ]
self.client_manager = build_client_manager(**client_manager_kwargs)
def url_to_destination( self, url ):
"""Convert a legacy URL to a job destination"""
return JobDestination( runner="pulsar", params=url_to_destination_params( url ) )
def check_watched_item(self, job_state):
try:
client = self.get_client_from_state(job_state)
status = client.get_status()
except Exception:
# An orphaned job was put into the queue at app startup, so remote server went down
# either way we are done I guess.
self.mark_as_finished(job_state)
return None
job_state = self._update_job_state_for_status(job_state, status)
return job_state
def _update_job_state_for_status(self, job_state, pulsar_status):
if pulsar_status == "complete":
self.mark_as_finished(job_state)
return None
if pulsar_status == "failed":
self.fail_job(job_state)
return None
if pulsar_status == "running" and not job_state.running:
job_state.running = True
job_state.job_wrapper.change_state( model.Job.states.RUNNING )
return job_state
def queue_job(self, job_wrapper):
job_destination = job_wrapper.job_destination
self._populate_parameter_defaults( job_destination )
command_line, client, remote_job_config, compute_environment = self.__prepare_job( job_wrapper, job_destination )
if not command_line:
return
try:
dependencies_description = PulsarJobRunner.__dependencies_description( client, job_wrapper )
rewrite_paths = not PulsarJobRunner.__rewrite_parameters( client )
unstructured_path_rewrites = {}
if compute_environment:
unstructured_path_rewrites = compute_environment.unstructured_path_rewrites
client_job_description = ClientJobDescription(
command_line=command_line,
input_files=self.get_input_files(job_wrapper),
client_outputs=self.__client_outputs(client, job_wrapper),
working_directory=job_wrapper.working_directory,
tool=job_wrapper.tool,
config_files=job_wrapper.extra_filenames,
dependencies_description=dependencies_description,
env=client.env,
rewrite_paths=rewrite_paths,
arbitrary_files=unstructured_path_rewrites,
)
job_id = pulsar_submit_job(client, client_job_description, remote_job_config)
log.info("Pulsar job submitted with job_id %s" % job_id)
job_wrapper.set_job_destination( job_destination, job_id )
job_wrapper.change_state( model.Job.states.QUEUED )
except Exception:
job_wrapper.fail( "failure running job", exception=True )
log.exception("failure running job %d" % job_wrapper.job_id)
return
pulsar_job_state = AsynchronousJobState()
pulsar_job_state.job_wrapper = job_wrapper
pulsar_job_state.job_id = job_id
pulsar_job_state.old_state = True
pulsar_job_state.running = False
pulsar_job_state.job_destination = job_destination
self.monitor_job(pulsar_job_state)
def __prepare_job(self, job_wrapper, job_destination):
""" Build command-line and Pulsar client for this job. """
command_line = None
client = None
remote_job_config = None
compute_environment = None
try:
client = self.get_client_from_wrapper(job_wrapper)
tool = job_wrapper.tool
remote_job_config = client.setup(tool.id, tool.version)
rewrite_parameters = PulsarJobRunner.__rewrite_parameters( client )
prepare_kwds = {}
if rewrite_parameters:
compute_environment = PulsarComputeEnvironment( client, job_wrapper, remote_job_config )
prepare_kwds[ 'compute_environment' ] = compute_environment
job_wrapper.prepare( **prepare_kwds )
self.__prepare_input_files_locally(job_wrapper)
remote_metadata = PulsarJobRunner.__remote_metadata( client )
dependency_resolution = PulsarJobRunner.__dependency_resolution( client )
metadata_kwds = self.__build_metadata_configuration(client, job_wrapper, remote_metadata, remote_job_config)
remote_command_params = dict(
working_directory=remote_job_config['working_directory'],
metadata_kwds=metadata_kwds,
dependency_resolution=dependency_resolution,
)
remote_working_directory = remote_job_config['working_directory']
# TODO: Following defs work for Pulsar, always worked for Pulsar but should be
# calculated at some other level.
remote_job_directory = os.path.abspath(os.path.join(remote_working_directory, os.path.pardir))
remote_tool_directory = os.path.abspath(os.path.join(remote_job_directory, "tool_files"))
container = self._find_container(
job_wrapper,
compute_working_directory=remote_working_directory,
compute_tool_directory=remote_tool_directory,
compute_job_directory=remote_job_directory,
)
command_line = build_command(
self,
job_wrapper=job_wrapper,
container=container,
include_metadata=remote_metadata,
include_work_dir_outputs=False,
remote_command_params=remote_command_params,
)
except Exception:
job_wrapper.fail( "failure preparing job", exception=True )
log.exception("failure running job %d" % job_wrapper.job_id)
# If we were able to get a command line, run the job
if not command_line:
job_wrapper.finish( '', '' )
return command_line, client, remote_job_config, compute_environment
def __prepare_input_files_locally(self, job_wrapper):
"""Run task splitting commands locally."""
prepare_input_files_cmds = getattr(job_wrapper, 'prepare_input_files_cmds', None)
if prepare_input_files_cmds is not None:
for cmd in prepare_input_files_cmds: # run the commands to stage the input files
if 0 != os.system(cmd):
raise Exception('Error running file staging command: %s' % cmd)
job_wrapper.prepare_input_files_cmds = None # prevent them from being used in-line
def _populate_parameter_defaults( self, job_destination ):
updated = False
params = job_destination.params
for key, value in self.destination_defaults.iteritems():
if key in params:
if value is PARAMETER_SPECIFICATION_IGNORED:
log.warn( "Pulsar runner in selected configuration ignores parameter %s" % key )
continue
#if self.runner_params.get( key, None ):
# # Let plugin define defaults for some parameters -
# # for instance that way jobs_directory can be
# # configured next to AMQP url (where it belongs).
# params[ key ] = self.runner_params[ key ]
# continue
if not value:
continue
if value is PARAMETER_SPECIFICATION_REQUIRED:
raise Exception( "Pulsar destination does not define required parameter %s" % key )
elif value is not PARAMETER_SPECIFICATION_IGNORED:
params[ key ] = value
updated = True
return updated
def get_output_files(self, job_wrapper):
output_paths = job_wrapper.get_output_fnames()
return [ str( o ) for o in output_paths ] # Force job_path from DatasetPath objects.
def get_input_files(self, job_wrapper):
input_paths = job_wrapper.get_input_paths()
return [ str( i ) for i in input_paths ] # Force job_path from DatasetPath objects.
def get_client_from_wrapper(self, job_wrapper):
job_id = job_wrapper.job_id
if hasattr(job_wrapper, 'task_id'):
job_id = "%s_%s" % (job_id, job_wrapper.task_id)
params = job_wrapper.job_destination.params.copy()
for key, value in params.iteritems():
if value:
params[key] = model.User.expand_user_properties( job_wrapper.get_job().user, value )
env = getattr( job_wrapper.job_destination, "env", [] )
return self.get_client( params, job_id, env )
def get_client_from_state(self, job_state):
job_destination_params = job_state.job_destination.params
job_id = job_state.job_id
return self.get_client( job_destination_params, job_id )
def get_client( self, job_destination_params, job_id, env=[] ):
# Cannot use url_for outside of web thread.
#files_endpoint = url_for( controller="job_files", job_id=encoded_job_id )
encoded_job_id = self.app.security.encode_id(job_id)
job_key = self.app.security.encode_id( job_id, kind="jobs_files" )
files_endpoint = "%s/api/jobs/%s/files?job_key=%s" % (
self.galaxy_url,
encoded_job_id,
job_key
)
get_client_kwds = dict(
job_id=str( job_id ),
files_endpoint=files_endpoint,
env=env
)
return self.client_manager.get_client( job_destination_params, **get_client_kwds )
def finish_job( self, job_state ):
stderr = stdout = ''
job_wrapper = job_state.job_wrapper
try:
client = self.get_client_from_state(job_state)
run_results = client.full_status()
remote_working_directory = run_results.get("working_directory", None)
stdout = run_results.get('stdout', '')
stderr = run_results.get('stderr', '')
exit_code = run_results.get('returncode', None)
pulsar_outputs = PulsarOutputs.from_status_response(run_results)
# Use Pulsar client code to transfer/copy files back
# and cleanup job if needed.
completed_normally = \
job_wrapper.get_state() not in [ model.Job.states.ERROR, model.Job.states.DELETED ]
cleanup_job = self.app.config.cleanup_job
client_outputs = self.__client_outputs(client, job_wrapper)
finish_args = dict( client=client,
job_completed_normally=completed_normally,
cleanup_job=cleanup_job,
client_outputs=client_outputs,
pulsar_outputs=pulsar_outputs )
failed = pulsar_finish_job( **finish_args )
if failed:
job_wrapper.fail("Failed to find or download one or more job outputs from remote server.", exception=True)
except Exception:
message = GENERIC_REMOTE_ERROR
job_wrapper.fail( message, exception=True )
log.exception("failure finishing job %d" % job_wrapper.job_id)
return
if not PulsarJobRunner.__remote_metadata( client ):
self._handle_metadata_externally( job_wrapper, resolve_requirements=True )
# Finish the job
try:
job_wrapper.finish(
stdout,
stderr,
exit_code,
remote_working_directory=remote_working_directory
)
except Exception:
log.exception("Job wrapper finish method failed")
job_wrapper.fail("Unable to finish job", exception=True)
def fail_job( self, job_state ):
"""
Seperated out so we can use the worker threads for it.
"""
self.stop_job( self.sa_session.query( self.app.model.Job ).get( job_state.job_wrapper.job_id ) )
job_state.job_wrapper.fail( getattr( job_state, "fail_message", GENERIC_REMOTE_ERROR ) )
def check_pid( self, pid ):
try:
os.kill( pid, 0 )
return True
except OSError, e:
if e.errno == errno.ESRCH:
log.debug( "check_pid(): PID %d is dead" % pid )
else:
log.warning( "check_pid(): Got errno %s when attempting to check PID %d: %s" % ( errno.errorcode[e.errno], pid, e.strerror ) )
return False
def stop_job( self, job ):
#if our local job has JobExternalOutputMetadata associated, then our primary job has to have already finished
job_ext_output_metadata = job.get_external_output_metadata()
if job_ext_output_metadata:
pid = job_ext_output_metadata[0].job_runner_external_pid # every JobExternalOutputMetadata has a pid set, we just need to take from one of them
if pid in [ None, '' ]:
log.warning( "stop_job(): %s: no PID in database for job, unable to stop" % job.id )
return
pid = int( pid )
if not self.check_pid( pid ):
log.warning( "stop_job(): %s: PID %d was already dead or can't be signaled" % ( job.id, pid ) )
return
for sig in [ 15, 9 ]:
try:
os.killpg( pid, sig )
except OSError, e:
log.warning( "stop_job(): %s: Got errno %s when attempting to signal %d to PID %d: %s" % ( job.id, errno.errorcode[e.errno], sig, pid, e.strerror ) )
return # give up
sleep( 2 )
if not self.check_pid( pid ):
log.debug( "stop_job(): %s: PID %d successfully killed with signal %d" % ( job.id, pid, sig ) )
return
else:
log.warning( "stop_job(): %s: PID %d refuses to die after signaling TERM/KILL" % ( job.id, pid ) )
else:
# Remote kill
pulsar_url = job.job_runner_name
job_id = job.job_runner_external_id
log.debug("Attempt remote Pulsar kill of job with url %s and id %s" % (pulsar_url, job_id))
client = self.get_client(job.destination_params, job_id)
client.kill()
def recover( self, job, job_wrapper ):
"""Recovers jobs stuck in the queued/running state when Galaxy started"""
job_state = self._job_state( job, job_wrapper )
job_wrapper.command_line = job.get_command_line()
state = job.get_state()
if state in [model.Job.states.RUNNING, model.Job.states.QUEUED]:
log.debug( "(Pulsar/%s) is still in running state, adding to the Pulsar queue" % ( job.get_id()) )
job_state.old_state = True
job_state.running = state == model.Job.states.RUNNING
self.monitor_queue.put( job_state )
def shutdown( self ):
super( PulsarJobRunner, self ).shutdown()
self.client_manager.shutdown()
def _job_state( self, job, job_wrapper ):
job_state = AsynchronousJobState()
# TODO: Determine why this is set when using normal message queue updates
# but not CLI submitted MQ updates...
raw_job_id = job.get_job_runner_external_id() or job_wrapper.job_id
job_state.job_id = str( raw_job_id )
job_state.runner_url = job_wrapper.get_job_runner_url()
job_state.job_destination = job_wrapper.job_destination
job_state.job_wrapper = job_wrapper
return job_state
def __client_outputs( self, client, job_wrapper ):
work_dir_outputs = self.get_work_dir_outputs( job_wrapper )
output_files = self.get_output_files( job_wrapper )
client_outputs = ClientOutputs(
working_directory=job_wrapper.working_directory,
work_dir_outputs=work_dir_outputs,
output_files=output_files,
version_file=job_wrapper.get_version_string_path(),
)
return client_outputs
@staticmethod
def __dependencies_description( pulsar_client, job_wrapper ):
dependency_resolution = PulsarJobRunner.__dependency_resolution( pulsar_client )
remote_dependency_resolution = dependency_resolution == "remote"
if not remote_dependency_resolution:
return None
requirements = job_wrapper.tool.requirements or []
installed_tool_dependencies = job_wrapper.tool.installed_tool_dependencies or []
return dependencies.DependenciesDescription(
requirements=requirements,
installed_tool_dependencies=installed_tool_dependencies,
)
@staticmethod
def __dependency_resolution( pulsar_client ):
dependency_resolution = pulsar_client.destination_params.get( "dependency_resolution", "local" )
if dependency_resolution not in ["none", "local", "remote"]:
raise Exception("Unknown dependency_resolution value encountered %s" % dependency_resolution)
return dependency_resolution
@staticmethod
def __remote_metadata( pulsar_client ):
remote_metadata = string_as_bool_or_none( pulsar_client.destination_params.get( "remote_metadata", False ) )
return remote_metadata
@staticmethod
def __use_remote_datatypes_conf( pulsar_client ):
""" When setting remote metadata, use integrated datatypes from this
Galaxy instance or use the datatypes config configured via the remote
Pulsar.
Both options are broken in different ways for same reason - datatypes
may not match. One can push the local datatypes config to the remote
server - but there is no guarentee these datatypes will be defined
there. Alternatively, one can use the remote datatype config - but
there is no guarentee that it will contain all the datatypes available
to this Galaxy.
"""
use_remote_datatypes = string_as_bool_or_none( pulsar_client.destination_params.get( "use_remote_datatypes", False ) )
return use_remote_datatypes
@staticmethod
def __rewrite_parameters( pulsar_client ):
return string_as_bool_or_none( pulsar_client.destination_params.get( "rewrite_parameters", False ) ) or False
def __build_metadata_configuration(self, client, job_wrapper, remote_metadata, remote_job_config):
metadata_kwds = {}
if remote_metadata:
remote_system_properties = remote_job_config.get("system_properties", {})
remote_galaxy_home = remote_system_properties.get("galaxy_home", None)
if not remote_galaxy_home:
raise Exception(NO_REMOTE_GALAXY_FOR_METADATA_MESSAGE)
metadata_kwds['exec_dir'] = remote_galaxy_home
outputs_directory = remote_job_config['outputs_directory']
configs_directory = remote_job_config['configs_directory']
working_directory = remote_job_config['working_directory']
# For metadata calculation, we need to build a list of of output
# file objects with real path indicating location on Galaxy server
# and false path indicating location on compute server. Since the
# Pulsar disables from_work_dir copying as part of the job command
# line we need to take the list of output locations on the Pulsar
# server (produced by self.get_output_files(job_wrapper)) and for
# each work_dir output substitute the effective path on the Pulsar
# server relative to the remote working directory as the
# false_path to send the metadata command generation module.
work_dir_outputs = self.get_work_dir_outputs(job_wrapper, job_working_directory=working_directory)
outputs = [Bunch(false_path=os.path.join(outputs_directory, os.path.basename(path)), real_path=path) for path in self.get_output_files(job_wrapper)]
for output in outputs:
for pulsar_workdir_path, real_path in work_dir_outputs:
if real_path == output.real_path:
output.false_path = pulsar_workdir_path
metadata_kwds['output_fnames'] = outputs
metadata_kwds['compute_tmp_dir'] = working_directory
metadata_kwds['config_root'] = remote_galaxy_home
default_config_file = os.path.join(remote_galaxy_home, 'universe_wsgi.ini')
metadata_kwds['config_file'] = remote_system_properties.get('galaxy_config_file', default_config_file)
metadata_kwds['dataset_files_path'] = remote_system_properties.get('galaxy_dataset_files_path', None)
if PulsarJobRunner.__use_remote_datatypes_conf( client ):
remote_datatypes_config = remote_system_properties.get('galaxy_datatypes_config_file', None)
if not remote_datatypes_config:
log.warn(NO_REMOTE_DATATYPES_CONFIG)
remote_datatypes_config = os.path.join(remote_galaxy_home, 'datatypes_conf.xml')
metadata_kwds['datatypes_config'] = remote_datatypes_config
else:
integrates_datatypes_config = self.app.datatypes_registry.integrated_datatypes_configs
# Ensure this file gets pushed out to the remote config dir.
job_wrapper.extra_filenames.append(integrates_datatypes_config)
metadata_kwds['datatypes_config'] = os.path.join(configs_directory, os.path.basename(integrates_datatypes_config))
return metadata_kwds
class PulsarLegacyJobRunner( PulsarJobRunner ):
destination_defaults = dict(
rewrite_parameters="false",
dependency_resolution="local",
)
class PulsarMQJobRunner( PulsarJobRunner ):
destination_defaults = dict(
default_file_action="remote_transfer",
rewrite_parameters="true",
dependency_resolution="remote",
jobs_directory=PARAMETER_SPECIFICATION_REQUIRED,
url=PARAMETER_SPECIFICATION_IGNORED,
private_token=PARAMETER_SPECIFICATION_IGNORED
)
def _monitor( self ):
# This is a message queue driven runner, don't monitor
# just setup required callback.
self.client_manager.ensure_has_status_update_callback(self.__async_update)
def __async_update( self, full_status ):
job_id = None
try:
job_id = full_status[ "job_id" ]
job, job_wrapper = self.app.job_manager.job_handler.job_queue.job_pair_for_id( job_id )
job_state = self._job_state( job, job_wrapper )
self._update_job_state_for_status(job_state, full_status[ "status" ] )
except Exception:
log.exception( "Failed to update Pulsar job status for job_id %s" % job_id )
raise
# Nothing else to do? - Attempt to fail the job?
class PulsarRESTJobRunner( PulsarJobRunner ):
destination_defaults = dict(
default_file_action="transfer",
rewrite_parameters="true",
dependency_resolution="remote",
url=PARAMETER_SPECIFICATION_REQUIRED,
)
class PulsarComputeEnvironment( ComputeEnvironment ):
def __init__( self, pulsar_client, job_wrapper, remote_job_config ):
self.pulsar_client = pulsar_client
self.job_wrapper = job_wrapper
self.local_path_config = job_wrapper.default_compute_environment()
self.unstructured_path_rewrites = {}
# job_wrapper.prepare is going to expunge the job backing the following
# computations, so precalculate these paths.
self._wrapper_input_paths = self.local_path_config.input_paths()
self._wrapper_output_paths = self.local_path_config.output_paths()
self.path_mapper = PathMapper(pulsar_client, remote_job_config, self.local_path_config.working_directory())
self._config_directory = remote_job_config[ "configs_directory" ]
self._working_directory = remote_job_config[ "working_directory" ]
self._sep = remote_job_config[ "system_properties" ][ "separator" ]
self._tool_dir = remote_job_config[ "tools_directory" ]
version_path = self.local_path_config.version_path()
new_version_path = self.path_mapper.remote_version_path_rewrite(version_path)
if new_version_path:
version_path = new_version_path
self._version_path = version_path
def output_paths( self ):
local_output_paths = self._wrapper_output_paths
results = []
for local_output_path in local_output_paths:
wrapper_path = str( local_output_path )
remote_path = self.path_mapper.remote_output_path_rewrite( wrapper_path )
results.append( self._dataset_path( local_output_path, remote_path ) )
return results
def input_paths( self ):
local_input_paths = self._wrapper_input_paths
results = []
for local_input_path in local_input_paths:
wrapper_path = str( local_input_path )
# This will over-copy in some cases. For instance in the case of task
# splitting, this input will be copied even though only the work dir
# input will actually be used.
remote_path = self.path_mapper.remote_input_path_rewrite( wrapper_path )
results.append( self._dataset_path( local_input_path, remote_path ) )
return results
def _dataset_path( self, local_dataset_path, remote_path ):
remote_extra_files_path = None
if remote_path:
remote_extra_files_path = "%s_files" % remote_path[ 0:-len( ".dat" ) ]
return local_dataset_path.with_path_for_job( remote_path, remote_extra_files_path )
def working_directory( self ):
return self._working_directory
def config_directory( self ):
return self._config_directory
def new_file_path( self ):
return self.working_directory() # Problems with doing this?
def sep( self ):
return self._sep
def version_path( self ):
return self._version_path
def rewriter( self, parameter_value ):
unstructured_path_rewrites = self.unstructured_path_rewrites
if parameter_value in unstructured_path_rewrites:
# Path previously mapped, use previous mapping.
return unstructured_path_rewrites[ parameter_value ]
if parameter_value in unstructured_path_rewrites.itervalues():
# Path is a rewritten remote path (this might never occur,
# consider dropping check...)
return parameter_value
rewrite, new_unstructured_path_rewrites = self.path_mapper.check_for_arbitrary_rewrite( parameter_value )
if rewrite:
unstructured_path_rewrites.update(new_unstructured_path_rewrites)
return rewrite
else:
# Did need to rewrite, use original path or value.
return parameter_value
def unstructured_path_rewriter( self ):
return self.rewriter
|
[
"jmchilton@gmail.com"
] |
jmchilton@gmail.com
|
d55c901156848ed43ca51c04d3a5862ab4803e53
|
9eacea0d83735b18f3698c371045ce778dfac518
|
/Code/max_entropy.py
|
8c81b56aaf823f8fd4adf790be04061d5f814c23
|
[] |
no_license
|
zgood9527/Basic4AI
|
ef235d98cae39951da0dc89b64d72fab711b5ee2
|
cc9520554682172ba690cbcf517ac8fc5ec180b0
|
refs/heads/master
| 2023-02-21T05:28:12.035593
| 2021-01-12T13:39:49
| 2021-01-12T13:39:49
| 329,815,142
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,136
|
py
|
'''
最大熵模型
主要参考 李航《统计学习方法》 以及 https://www.pkudodo.com/2018/12/05/1-7/
代码主要是copy https://www.pkudodo.com/2018/12/05/1-7/ 中的,许多命名尚未修改至统一风格
'''
import time
import numpy as np
from collections import defaultdict
from sklearn.datasets import load_digits
from tqdm import tqdm
def load_data():
'''
加载sklearn自带的手写数字识别数据集
返回 输入、输出
'''
digits = load_digits()
xs = digits.data.tolist()
ys = (digits.target > 4).astype(int).tolist()
return xs, ys
class MaxEntropy:
'''
最大熵类
'''
def __init__(self, train_xs, train_ys, test_xs, test_ys):
'''
各参数初始化
'''
self.train_xs = train_xs # 训练数据集
self.train_ys = train_ys # 训练标签集
self.test_xs = test_xs # 训练数据集
self.test_ys = test_ys # 训练标签集
self.class_count = len(set(self.test_ys)) # 标签取值数量
self.m = len(train_xs[0]) # 原始输入特征的数量,需要跟特征函数的数量区分开
self.N = len(train_xs) # 训练样本数目
self.features, self.feature_count = self.get_features() # 所有特征 特征函数数量
self.M = self.m # 假定任意样本中所有特征函数的和是固定值,简化IIS算法
self.w = [0] * self.feature_count # 所有特征的权重
self.xy2id, self.id2xy = self.createSearchDict() # 特征->id、id->特征 的对应字典
self.Ep_xy = self.get_Ep_xy() # 特征函数f(x, y)关于经验分布P_(x, y)的期望值
def get_Epxy(self):
'''
计算特征函数f(x, y)关于模型P(Y|X)与经验分布P_(X, Y)的期望值
即“6.2.2 最大熵模型的定义”中第二个期望(83页最上方的期望)
:return:
'''
# 初始化期望存放列表,对于每一个xy对都有一个期望
# 这里的x是单个的特征,不是一个样本的全部特征。例如x={x1,x2,x3.....,xk},实际上是(x1,y),(x2,y),。。。
# 但是在存放过程中需要将不同特诊的分开存放,李航的书可能是为了公式的泛化性高一点,所以没有对这部分提及
# 具体可以看我的博客,里面有详细介绍 www.pkudodo.com
Epxy = [0] * self.feature_count
# 对于每一个样本进行遍历
for i in range(self.N):
# 初始化公式中的P(y|x)列表
Pwxy = self.calcPwy_x(self.train_xs[i])
for feature in range(self.m):
for y in range(self.class_count):
if (self.train_xs[i][feature], y) in self.features[feature]:
id = self.xy2id[feature][(
self.train_xs[i][feature], y)]
Epxy[id] += (1 / self.N) * Pwxy[y]
return Epxy
def get_Ep_xy(self):
'''
计算特征函数f(x, y)关于经验分布P_(x, y)的期望值(下划线表示P上方的横线,
同理Ep_xy中的“_”也表示p上方的横线)
即“6.2.2 最大熵的定义”中第一个期望(82页最下方那个式子)
:return: 计算得到的Ep_xy
'''
# 初始化Ep_xy列表,长度为n
Ep_xy = [0] * self.feature_count
# 遍历每一个特征
for feature in range(self.m):
# 遍历每个特征中的(x, y)对
for (x, y) in self.features[feature]:
# 获得其id
id = self.xy2id[feature][(x, y)]
# 将计算得到的Ep_xy写入对应的位置中
# fixy中存放所有对在训练集中出现过的次数,处于训练集总长度N就是概率了
Ep_xy[id] = self.features[feature][(x, y)] / self.N
# 返回期望
return Ep_xy
def createSearchDict(self):
'''
创建查询字典
xy2idDict:通过(x,y)对找到其id,所有出现过的xy对都有一个id
id2xyDict:通过id找到对应的(x,y)对
'''
# 设置xy搜多id字典
# 这里的x指的是单个的特征,而不是某个样本,因此将特征存入字典时也需要存入这是第几个特征
# 这一信息,这是为了后续的方便,否则会乱套。
# 比如说一个样本X = (0, 1, 1) label =(1)
# 生成的标签对有(0, 1), (1, 1), (1, 1),三个(x,y)对并不能判断属于哪个特征的,后续就没法往下写
# 不可能通过(1, 1)就能找到对应的id,因为对于(1, 1),字典中有多重映射
# 所以在生成字典的时总共生成了特征数个字典,例如在mnist中样本有784维特征,所以生成784个字典,属于
# 不同特征的xy存入不同特征内的字典中,使其不会混淆
xy2idDict = [{} for i in range(self.m)]
# 初始化id到xy对的字典。因为id与(x,y)的指向是唯一的,所以可以使用一个字典
id2xyDict = {}
# 设置缩影,其实就是最后的id
index = 0
# 对特征进行遍历
for feature in range(self.m):
# 对出现过的每一个(x, y)对进行遍历
# fixy:内部存放特征数目个字典,对于遍历的每一个特征,单独读取对应字典内的(x, y)对
for (x, y) in self.features[feature]:
# 将该(x, y)对存入字典中,要注意存入时通过[feature]指定了存入哪个特征内部的字典
# 同时将index作为该对的id号
xy2idDict[feature][(x, y)] = index
# 同时在id->xy字典中写入id号,val为(x, y)对
id2xyDict[index] = (x, y)
# id加一
index += 1
# 返回创建的两个字典
return xy2idDict, id2xyDict
def get_features(self):
'''
根据训练集统计所有特征以及总的特征的数量
:return:
'''
n = 0
# 建立特征数目个字典,属于不同特征的(x, y)对存入不同的字典中,保证不被混淆
fixyDict = [defaultdict(int) for i in range(self.m)]
# 遍历训练集中所有样本
for i in range(len(self.train_xs)):
# 遍历样本中所有特征
for j in range(self.m):
# 将出现过的(x, y)对放入字典中并计数值加1
fixyDict[j][(self.train_xs[i][j],
self.train_ys[i])] += 1
# 对整个大字典进行计数,判断去重后还有多少(x, y)对,写入n
for i in fixyDict:
n += len(i)
# 返回大字典
return fixyDict, n
def calcPwy_x(self, x):
'''
计算“6.23 最大熵模型的学习” 式6.22
:param X: 要计算的样本X(一个包含全部特征的样本)
:param y: 该样本的标签
:return: 计算得到的Pw(Y|X)
'''
# 分子
numerators = [0] * self.class_count
# 对每个特征进行遍历
for i in range(self.m):
for j in range(self.class_count):
if (x[i], j) in self.xy2id[i]:
index = self.xy2id[i][(x[i], j)]
numerators[j] += self.w[index]
# 计算分子的指数
numerators = np.exp(numerators)
# 计算分母的z
Z = np.sum(numerators)
# 返回Pw(y|x)
res = numerators / Z
return res
def iis_train(self, iter=200):
# 使用iis进行训练
for i in tqdm(range(iter)):
# 计算“6.2.3 最大熵模型的学习”中的第二个期望(83页最上方哪个)
Epxy = self.get_Epxy()
# 使用的是IIS,所以设置sigma列表
sigmaList = [0] * self.feature_count
# 对于所有的n进行一次遍历
for j in range(self.feature_count):
# 依据“6.3.1 改进的迭代尺度法” 式6.34计算
sigmaList[j] = (1 / self.M) * np.log(self.Ep_xy[j] / Epxy[j])
# 按照算法6.1步骤二中的(b)更新w
self.w = [self.w[i] + sigmaList[i] for i in range(self.feature_count)]
if (i+1) % 5 == 0:
accuracy = self.test()
print('the accuracy is:%.4f' % accuracy)
def predict(self, X):
'''
预测标签
:param X:要预测的样本
:return: 预测值
'''
return np.argmax(self.calcPwy_x(X))
def test(self):
'''
对测试集进行测试
:return:
'''
# 错误值计数
errorCnt = 0
# 对测试集中所有样本进行遍历
for i in range(len(self.test_xs)):
# 预测该样本对应的标签
result = self.predict(self.test_xs[i])
# 如果错误,计数值加1
if result != self.test_ys[i]:
errorCnt += 1
# 返回准确率
return 1 - errorCnt / len(self.test_xs)
if __name__ == '__main__':
features, targets = load_data()
train_count = int(len(features)*0.8)
train_xs, train_ys = features[:train_count], targets[:train_count]
test_xs, test_ys = features[train_count:], targets[train_count:]
# 初始化最大熵类
maxEnt = MaxEntropy(train_xs, train_ys, test_xs, test_ys)
# 开始训练
print('start to train')
maxEnt.iis_train()
# 开始测试
print('start to test')
accuracy = maxEnt.test() # 200轮准确率为86.39%
print('the accuracy is:%.4f'%accuracy)
|
[
"1033020837@qq.com"
] |
1033020837@qq.com
|
7cb427e3ff072b5451c32d0a9f6950112bfd49d1
|
0b193f4da7547d95b7c50fbc1b81276da8163372
|
/images/views.py
|
894ee4571baede4f1456d759e54a15017b903926
|
[] |
no_license
|
jzxyouok/bookmarks
|
4b071023af57a2b87fb4fcb034affd5a16719e85
|
c1bf5ce731f20c8771f6ff5038839c938a2562d8
|
refs/heads/master
| 2020-06-06T15:22:37.096495
| 2019-04-08T03:51:17
| 2019-04-08T03:51:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,289
|
py
|
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.http import JsonResponse, HttpResponse
from django.views.decorators.http import require_POST
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.conf import settings
from common.decorators import ajax_required
from actions.utils import create_action
from .forms import ImageCreateForm
from .models import Image
import redis
r = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB)
# Create your views here.
@login_required
def image_create(request):
if request.method == 'POST':
form = ImageCreateForm(request.POST)
if form.is_valid():
data = form.cleaned_data
new_item = form.save(commit=False)
new_item.uploader = request.user
new_item.save()
messages.success(request, 'Image added successfully.')
create_action(request.user, 'bookmarked image', new_item)
return redirect(new_item.get_absolute_url())
else:
form = ImageCreateForm(request.GET)
return render(request, 'images/image/create.html', {'section': 'images', 'form': form})
def image_detail(request, id, slug):
image = get_object_or_404(Image, id=id, slug=slug)
total_views = r.incr(f'image:{image.id}:views')
r.zincrby('image:ranking', 1, image.id)
return render(request, 'images/image/detail.html', {'section': 'images', 'image': image, 'total_views': total_views})
@ajax_required
@login_required
@require_POST
def image_favor(request):
image_id = request.POST.get('id')
action = request.POST.get('action')
if image_id and action:
try:
image = Image.objects.get(id=image_id)
if action == 'favor':
image.favorited_by.add(request.user)
create_action(request.user, 'likes', image)
else:
image.favorited_by.remove(request.user)
except Exception:
pass
return JsonResponse({'status': 'ok'})
@login_required
def image_list(request):
object_list = Image.objects.all()
paginator = Paginator(object_list, settings.IMAGES_PER_PAGE)
page = request.GET.get('page')
try:
images = paginator.page(page)
except PageNotAnInteger:
images = paginator.page(1)
except EmptyPage:
if request.is_ajax():
# stop the ajax
return HttpResponse('')
images = paginator.page(paginator.num_pages)
if request.is_ajax():
return render(request, 'images/image/list_ajax.html', {'section': 'images', 'images': images})
return render(request, 'images/image/list.html', {'section': 'images', 'images': images})
@login_required
def image_ranking(request):
image_ranking = r.zrange('image:ranking', 0, -1, desc=True)[:10]
image_ranking_ids = [int(id) for id in image_ranking]
most_viewed_images = list(Image.objects.filter(id__in=image_ranking_ids))
most_viewed_images.sort(key=lambda x:image_ranking_ids.index(x.id))
return render(request, 'images/image/ranking.html', {'section': 'images', 'most_viewed_images': most_viewed_images})
|
[
"2582347430@qq.com"
] |
2582347430@qq.com
|
ff6a536a0aab0c49ffe44f3890ca4e1f8f4e1e47
|
9851ec19fc07f92f0444c1e433607f28801f1e17
|
/my_blog/article/models.py
|
aa9639da8db3a26699b1039bade4a73da1e2cbd9
|
[] |
no_license
|
yezigege/blog_yezi
|
d030dd2f286493697071fc231577d7fbb8304274
|
6b3e565444df33535fba2aab5621d07580687ad7
|
refs/heads/master
| 2020-04-11T08:33:26.471940
| 2020-01-10T15:24:12
| 2020-01-10T15:24:12
| 161,647,665
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,904
|
py
|
from django.db import models
from django.contrib.auth.models import User # 导入 django 自带的 User 模型
from django.utils import timezone # timezone 用于处理时间相关的事务
# 博客文章数据模型
class ArticlePost(models.Model):
"""
使用 ForeignKey定义一个关系。这将告诉 Django,每个(或多个) ArticlePost 对象都关联到一个 User 对象。
"""
author = models.ForeignKey(User, on_delete=models.CASCADE) # 作者。 on_delete 用于指定数据删除方式
title = models.CharField(max_length=100) # 标题。models.CharField 为字符串字段,用于保存较短的字符串
body = models.TextField() # 正文。保存大量文本使用 TextField
created = models.DateTimeField(default=timezone.now) # 文章创建时间。timezone.now 指定其在创建时写入当前的时间
updated = models.DateTimeField(auto_now=True) # 文章更新时间。auto_now=True 指定每次数据更新时自动写入当前时间
# 内部类 class Meta 用于给 model 定义元数据
class Meta:
db_table = "articles" # 指定数据库表名
verbose_name = '文章' # 在后台 admin 站点中显示的名称
verbose_name_plural = verbose_name # 显示的复数名称
ordering = ('-created',) # ordering 指定模型返回数据的排列顺序。 '-created' 表名数据应该以创建时间 倒序 排列
# 函数 __str__ 定义当调用对象的 str() 方法时的返回值内容
def __str__(self):
"""
__str__方法定义了需要表示数据时应该显示的名称。
给模型增加 __str__方法是很重要的,
它最常见的就是在Django管理后台中做为对象的显示值。
因此应该总是返回一个友好易读的字符串
"""
return self.title # return self.title 将文章的标题返回
|
[
"18839136833@163.com"
] |
18839136833@163.com
|
3bdfccb810864e94980fbc3a0c8947f023906a78
|
1da91735d1a4d19e62b2d19826d9a1e85d88d690
|
/dxpy/dxpy/projects/mie/__init__.py
|
1347b98e951e7bd5afbe3ce8aa30b68104dbc58b
|
[] |
no_license
|
Hong-Xiang/dxl
|
94229e4c20f0c97dfe21f8563889c991330df9c3
|
29aed778d1c699cc57d09666a20b4ca60196392f
|
refs/heads/master
| 2021-01-02T22:49:20.298893
| 2018-05-22T13:42:20
| 2018-05-22T13:42:20
| 99,401,725
| 1
| 1
| null | 2018-05-22T13:42:21
| 2017-08-05T05:34:35
|
Python
|
UTF-8
|
Python
| false
| false
| 74
|
py
|
"""Incident position estimation for monolithic crystal of PET scanners"""
|
[
"hx.hongxiang@gmail.com"
] |
hx.hongxiang@gmail.com
|
70059dc0c084c43dad919428365e6b617c65bcd8
|
edc6693ada84d2392bf6c1ac24097ab8b5a9d040
|
/r2.apps/common.py
|
999ef80e708971a4592314b3f840c6c69675d55b
|
[] |
no_license
|
he-actlab/r2.code
|
e544a60ba6eb90a94023d09843574b23725a5c14
|
b212d1e8fe90b87b5529bf01eb142d7b54c7325b
|
refs/heads/master
| 2023-03-30T10:48:43.476138
| 2016-02-15T15:58:54
| 2016-02-15T15:58:54
| 354,711,273
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,670
|
py
|
#!/usr/local/bin/python2.7
import json
import sys
from collections import OrderedDict
VARNAMES = OrderedDict([
('INVPROB_DRAM_FLIP_PER_SECOND', 'DRAM'),
('INVPROB_SRAM_READ_UPSET', 'SRAM read'),
('INVPROB_SRAM_WRITE_FAILURE', 'SRAM write'),
('MB_FLOAT_APPROX', '\\texttt{float} bits'),
('MB_DOUBLE_APPROX', '\\texttt{double} bits'),
('TIMING_ERROR_PROB_PERCENT-1', 'timing errors: single bit'),
('TIMING_ERROR_PROB_PERCENT-2', 'timing errors: random value'),
('TIMING_ERROR_PROB_PERCENT-3', 'timing errors: last value'),
])
RSRCNAMES = OrderedDict([
('heap', 'DRAM storage'),
('stack', 'SRAM storage'),
('alu', 'Integer operations'),
('fpu', 'FP operations'),
])
LEVELNAMES = ['Mild', 'Medium', 'Aggressive']
BMLONGNAMES = OrderedDict([
('fft', 'fft'),
('sor', 'sor'),
('mc', 'mc'),
('smm', 'smm'),
('lu', 'lu'),
('zxing', 'zxing'),
('jmeint', 'jmeint'),
('simpleRaytracer', 'simpleRaytracer'),
('sobel', 'sobel'),
])
def table_row(cells):
return ' & '.join(cells) + ' \\\\'
def numstr(f):
out = '%.2f' % f
if len(out) > 5:
return 'lots'
return out
def percent(f, places=2):
return ('%.' + str(places) + 'f\\%%') % (f*100)
def benchname(s):
if ':' in s:
return s.rsplit(':', 1)[1].strip()
elif s == 'Plane':
return 'Raytracer'
else:
return s
def json_in():
return json.load(sys.stdin)
def rtable(table):
return '\n'.join('\t'.join(str(cell) for cell in row) for row in table)
def frac(a, b):
total = float(a) + float(b)
if total == 0.0:
return 0.0
else:
return float(b) / total
|
[
"jspark@gatech.edu"
] |
jspark@gatech.edu
|
267ad789d05c45a3e0a753f039d54e290006e720
|
b7252b8a1ba3b863fff638458f5beb186d2586b0
|
/user_repo3/user_repo/urls.py
|
4c8474375de53ee4095b0d0ecdd86ee4b593b50d
|
[] |
no_license
|
jyothinaidu/user_management
|
dcee5a67106f0c1a5410124323bd31beaa29da00
|
5714b02deb0acc8fa185eb02bd6b561e2f5f185e
|
refs/heads/master
| 2022-12-13T15:12:29.246904
| 2018-07-18T20:50:34
| 2018-07-18T20:50:34
| 141,488,381
| 1
| 0
| null | 2022-09-23T21:53:58
| 2018-07-18T20:46:25
|
Python
|
UTF-8
|
Python
| false
| false
| 6,519
|
py
|
from django.conf.urls import url, include
# from rest_framework import routers
from user_management import views
from user_management.user_management_api import *
# from user_management.user_management_api import FirebaseAuthentication
from user_management import user_management_api
# router = routers.DefaultRouter()
# router.register(r'users', views.UsersViewSet)
# router.register(r'applications', views.ApplicationViewSet)
# router.register(r'profileattributes', views.ProfileAttributesViewSet)
# router.register(r'profile', views.ProfileViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsabuserle API.
from django.contrib.auth.views import LogoutView, LoginView
# from django.urls import path
from rest_framework_jwt.views import refresh_jwt_token
from django.conf.urls import url
from rest_framework_swagger.views import get_swagger_view
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from user_management.views import LoginView,LogoutView,TestAuthView,AdminLoginView,AdminLogoutView
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,TokenVerifyView,TokenObtainSlidingView,
TokenRefreshSlidingView
)
from user_management import views
from recipes_sample import views as recipesviews
from datetime import timedelta
from rest_framework_jwt.views import obtain_jwt_token
schema_view = get_swagger_view(title='Kraft API')
urlpatterns = [
url(r'^$', user_management_api.login, name='admin_firebase_auth'),
url(r'^admin/v0/', admin.site.urls),
url(r'^swagger/v0/$', schema_view,name="swagger-details"),
url(r'^api/dashboard/$', user_management_api.api_root),
url(r'^api-auth/v0/', include('rest_framework.urls')),
url(r'^user/login/v0/', obtain_jwt_token, name='api-token-auth'),
url(r'^user/v0/$', views.UsersListView.as_view(), name="users_list"),
url(r'^user/auth/login/v0/$', views.UserLoginAPIView.as_view(), name='admin_firebase_auth'),
url(r'^user/auth/logout/v0/$', AdminLogoutView.as_view(), name='admin_firebase_logout'),
url(r'^user/auth/create-user/v0/$', views.UserRegistrationAPIView.as_view(), name="user_create"),
url(r'^user/auth/delete-user/v0/$', views.UserDeleteView.as_view(), name="user_delete"),
url(r'^verify/(?P<verification_key>.+)/$',views.UserEmailVerificationAPIView.as_view(),name='email_verify'),
# url(r'^user/preferences/v0/$', views.PreferencesListView.as_view(), name="preferences_list"),
url(r'^user/preferences/create/v0/$', views.UserPreferenceAPIView.as_view(), name="preferences_create"),
url(r'^user/answers/create/v0/$', views.UserAnswersCreateView.as_view(), name="answers_create"),
url(r'^user/preferences/favourite/create/v0/$', views.PreferencesFavouriteCreateView.as_view(), name="favourites_create"),
# url(r'^user/preferences/favourite/v0/$', views.PreferencesFavouriteListApiView.as_view(), name="favourites_list"),
# url(r'^$', views.UsersListView.as_view(), name="users_list"),
# url(r'^user/list/$', views.UsersListView.as_view(), name="users_list"),
# url(r'^v0/user/(?P<pk>\d+)/$', views.UserDetailView.as_view(), name="user_detail"),
# url(r'^v0/user/(?P<pk>\d+)/detail/$', views.UserDetailView.as_view(), name="user_detail"),
# url(r'^v0/user/(?P<pk>\d+)/update/$', views.UserUpdateView.as_view(), name="user_update"),
# url(r'^user/(?P<pk>\d+)/delete/$', views.UserDeleteView.as_view(), name="user_delete"),
url(r'^assets/$',recipesviews.AssetsView.as_view(),name=recipesviews.AssetsView.name),
url(r'^assets/(?P<pk>[0-9]+)$',recipesviews.AssetsDetailView.as_view(),name=recipesviews.AssetsDetailView.name),
url(r'^categories/$',recipesviews.CategoryView.as_view(),name=recipesviews.CategoryView.name),
url(r'^categories/(?P<pk>[0-9]+)$',recipesviews.CategoryDetailView.as_view(),name=recipesviews.CategoryDetailView.name),
url(r'^dishes/$',recipesviews.DishView.as_view(),name=recipesviews.DishView.name),
url(r'^dishes/(?P<pk>[0-9]+)$',recipesviews.DishDetailView.as_view(),name=recipesviews.DishDetailView.name),
url(r'^ingredients/$',recipesviews.IngredientView.as_view(),name=recipesviews.IngredientView.name),
url(r'^ingredients/(?P<pk>[0-9]+)$',recipesviews.IngredientDetailView.as_view(),name=recipesviews.IngredientDetailView.name),
url(r'^riseingredients/$',recipesviews.RiseIngredientView.as_view(),name=recipesviews.RiseIngredientView.name),
url(r'^riseingredients/(?P<pk>[0-9]+)$',recipesviews.RiseIngredientDetailView.as_view(),name=recipesviews.RiseIngredientDetailView.name),
url(r'^taxonomy/$',recipesviews.TaxonomyView.as_view(),name=recipesviews.TaxonomyView.name),
url(r'^taxonomy/(?P<pk>[0-9]+)$',recipesviews.TaxonomyDetailView.as_view(),name=recipesviews.TaxonomyDetailView.name),
url(r'^mealplanrecipes/$',recipesviews.RecipeMealPlanView.as_view(),name=recipesviews.RecipeMealPlanView.name),
url(r'^mealplanrecipes/(?P<pk>[0-9]+)$',recipesviews.RecipeMealPlanDetailView.as_view(),name=recipesviews.RecipeMealPlanDetailView.name),
url(r'^mealplans/$',recipesviews.MealplanView.as_view(),name=recipesviews.MealplanView.name),
url(r'^mealplan/(?P<pk>[0-9]+)$',recipesviews.MealplanDetailView.as_view(),name=recipesviews.MealplanDetailView.name),
url(r'^recipes/$',recipesviews.RecipesView.as_view(),name=recipesviews.RecipesView.name),
url(r'^recipes/(?P<pk>[0-9]+)$',recipesviews.RecipesDetailView.as_view(),name=recipesviews.RecipesDetailView.name),
# url(r'^swagger/$', schema_view,name="swagger-details"),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=5),
'REFRESH_TOKEN_LIFETIME': timedelta(days=1),
'ROTATE_REFRESH_TOKENS': False,
'BLACKLIST_AFTER_ROTATION': True,
'ALGORITHM': 'HS256',
'SIGNING_KEY': settings.SECRET_KEY,
'VERIFYING_KEY': None,
'AUTH_HEADER_TYPES': ('Bearer',),
'USER_ID_FIELD': 'id',
'USER_ID_CLAIM': 'user_id',
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),
'TOKEN_TYPE_CLAIM': 'token_type',
'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',
'SLIDING_TOKEN_LIFETIME': timedelta(minutes=5),
'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=1),
}
|
[
"jyothi.nadu@gmail.com"
] |
jyothi.nadu@gmail.com
|
431bad1c8a17afaaa33860eac018fc8fee998c4b
|
9db852981a2d4ff33d6f8e57f0894471be2f74d5
|
/Black_Jack.py
|
87f4951c64349fa0e1fc58457decff5b3e1158cf
|
[] |
no_license
|
asi1234/Mini_Python_Projects
|
4938f52a66967cbb39202468626e79acd8d0b4d4
|
c529457bbe15eac50c062618f825d0ff26a06a76
|
refs/heads/master
| 2023-08-21T14:08:24.446423
| 2021-10-30T13:16:11
| 2021-10-30T13:16:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,388
|
py
|
from time import *
# BLACK JACK - CASINO
# PYTHON CODE BASE
# master
import random
deck = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 11] * 4
random.shuffle(deck)
print(f'{"*"*58} \n Welcome to the game Casino - BLACK JACK ! \n{"*"*58}')
sleep(2)
print('So Finally You Are Here To Accept Your Fate')
sleep(2)
print('I Mean Your Fortune')
sleep(2)
print('Lets Check How Lucky You Are Wish You All The Best')
sleep(2)
print('Loading---')
sleep(2)
print('Still Loading---')
sleep(2)
print('So You Are Still Here Not Gone I Gave You Chance But No Problem May Be You Trust Your Fortune A Lot \n Lets Begin Then')
sleep(2)
d_cards = [] # Initialising dealer's cards
p_cards = [] # Initialising player's cards
sleep(2)
while len(d_cards) != 2:
random.shuffle(deck)
d_cards.append(deck.pop())
if len(d_cards) == 2:
print('The cards dealer has are X ', d_cards[1])
# Displaying the Player's cards
while len(p_cards) != 2:
random.shuffle(deck)
p_cards.append(deck.pop())
if len(p_cards) == 2:
print("The total of player is ", sum(p_cards))
print("The cards Player has are ", p_cards)
if sum(p_cards) > 21:
print(f"You are BUSTED !\n {'*'*14}Dealer Wins !!{'*'*14}\n")
exit()
if sum(d_cards) > 21:
print(f"Dealer is BUSTED !\n {'*'*14} You are the Winner !!{'*'*18}\n")
exit()
if sum(d_cards) == 21:
print(f"{'*'*24}Dealer is the Winner !!{'*'*14}")
exit()
if sum(d_cards) == 21 and sum(p_cards) == 21:
print(f"{'*'*17}The match is tie !!{'*'*25}")
exit()
# function to show the dealer's choice
def dealer_choice():
if sum(d_cards) < 17:
while sum(d_cards) < 17:
random.shuffle(deck)
d_cards.append(deck.pop())
print("Dealer has total " + str(sum(d_cards)) + "with the cards ", d_cards)
if sum(p_cards) == sum(d_cards):
print(f"{'*'*15}The match is tie !!{'*'*15}")
exit()
if sum(d_cards) == 21:
if sum(p_cards) < 21:
print(f"{'*'*23}Dealer is the Winner !!{'*'*18}")
elif sum(p_cards) == 21:
print(f"{'*'*20}There is tie !!{'*'*26}")
else:
print(f"{'*'*23}Dealer is the Winner !!{'*'*18}")
elif sum(d_cards) < 21:
if sum(p_cards) < 21 and sum(p_cards) < sum(d_cards):
print(f"{'*'*23}Dealer is the Winner !!{'*'*18}")
if sum(p_cards) == 21:
print(f"{'*'*22}Player is winner !!{'*'*22}")
if 21 > sum(p_cards) > sum(d_cards):
print(f"{'*'*22}Player is winner !!{'*'*22}")
else:
if sum(p_cards) < 21:
print(f"{'*'*22}Player is winner !!{'*'*22}")
elif sum(p_cards) == 21:
print(f"{'*'*22}Player is winner !!{'*'*22}")
else:
print(f"{'*'*23}Dealer is the Winner !!{'*'*18}")
while sum(p_cards) < 21:
# to continue the game again and again !!
k = input('Want to hit or stay?\n Press 1 for hit and 0 for stay ')
if k == 1:
random.shuffle(deck)
p_cards.append(deck.pop())
print('You have a total of ' + str(sum(p_cards))
+ ' with the cards ', p_cards)
if sum(p_cards) > 21:
print(f'{"*"*13}You are BUSTED !{"*"*13}\n Dealer Wins !!')
if sum(p_cards) == 21:
print(f'{"*"*19}You are the Winner !!{"*"*29}')
else:
dealer_choice()
break
|
[
"noreply@github.com"
] |
asi1234.noreply@github.com
|
4798d1b2de639b7e3a40b0058ff316b6d0ed97b3
|
308b8c6e8b33d56f23029f3039d2a8d8e8f9ba1f
|
/lab3/run.py
|
1eb14b857ffbc0f29c0cd48e5b097944aa58149d
|
[] |
no_license
|
myrlund/tdt4275-nlp
|
029dd79b5366e30771f9cfd960324d538741071b
|
fce13d7cbb3dbba0494cab94f64c1b9a4f7e06a6
|
refs/heads/master
| 2021-01-19T07:09:25.870108
| 2013-04-23T13:14:36
| 2013-04-23T13:14:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,402
|
py
|
import nltk
def get_parser(f, debug):
trace = 2 if debug else 0
return nltk.load_parser(f, trace=trace)
def part1a(sentence, debug):
# grammar = nltk.parse_cfg(unifying_grammar)
fcfg = 'file:feat1.fcfg'
parser = get_parser(fcfg, debug)
# Give the parser pairs of correct/erronous sentences
if sentence:
sentences = [sentence]
else:
sentences = [
("I want to spend lots of money", "me want to spend lots of money"),
("tell me about Chez Parnisse", "tell I about Chez Parnisse"),
("I would like to take her out to dinner", "I would like to take she out to dinner"),
("she does not like Italian", "her does not like Italian"),
("this dog runs", "I runs", "these dogs runs"),
]
# Run them through the parser and display either OK or FAIL
for pair in sentences:
for sentence in pair:
trees = parser.nbest_parse(sentence.split())
print ("%-40s" % sentence),
if len(trees) > 0:
print "OK"
else:
print "FAIL"
if debug:
for tree in trees:
print tree
print ""
def part1b(debug):
lp = nltk.LogicParser()
logic = [
"all x y.(Shark(x) & Bird(y) & -Eats(x, y))",
"-(all x.(Bird(x) & LaysEggs(x)))",
]
for l in logic:
print "Parsing: '%s'" % l
parsed_logic = lp.parse(l)
print " -> free variables: %s" % parsed_logic.free()
print ""
def part1c(debug):
lp = nltk.LogicParser()
a3 = lp.parse('exists x.(samfundet(x) and school(x))')
c1 = lp.parse('smart(jonas)')
c2 = lp.parse('-smart(jonas)')
mace = nltk.Mace()
print mace.build_model(None, [a3, c1])
print mace.build_model(None, [a3, c2])
print mace.build_model(None, [c1, c2])
def part2a(sentence, debug):
fcfg = "file:fragment.fcfg"
# parser = get_parser(fcfg, debug)
if not sentence:
sentence = "a man chases a dog"
print "Parsing: '%s'" % sentence
trace = 2 if debug else 0
results = nltk.batch_interpret([sentence], fcfg, trace=trace)
for result in results:
for (synrep, semrep) in result:
print synrep
print semrep
if __name__ == '__main__':
parts = {
'1a': part1a,
'1b': part1b,
'1c': part1c,
'2a': part2a,
}
import argparse
parser = argparse.ArgumentParser(description="Parses sentences.")
parser.add_argument('-s', '--sentence', nargs=1, help="single run on a given sentence (default: predefined test set)")
parser.add_argument('--parts', nargs='+', help="run only the specified parts (choose from %s)" % ", ".join(sorted(parts.keys())))
parser.add_argument('--skip', nargs='*', help="do not run the specified parts (choose from %s)" % ", ".join(sorted(parts.keys())))
parser.add_argument('--debug', action='store_true', help="print traces and parse trees")
args = parser.parse_args()
run_parts = set(args.parts or parts.keys()) - set(args.skip or [])
# Part 1
if '1a' in run_parts: parts['1a'](args.sentence, debug=args.debug)
if '1b' in run_parts: parts['1b'](debug=args.debug)
if '1c' in run_parts: parts['1c'](debug=args.debug)
if '2a' in run_parts: parts['2a'](args.sentence[0] if args.sentence else None, debug=args.debug)
|
[
"myrlund@gmail.com"
] |
myrlund@gmail.com
|
60fa1f9da726d6a1c98cfc38a87f607929db5f4d
|
e5d980804586d27a65bd2fe36b1039bf4599a635
|
/pidgy/tests/__init__.py
|
41de3cfc08e4d0deb8747c4761dbc929f29bdba3
|
[
"BSD-3-Clause"
] |
permissive
|
chrisjsewell/pidgy
|
e9dbc1a15ae0de4994e6e1d6a363036bc30a8eba
|
17b4ffcd871528998fe4eb0434010ea55c53f9b2
|
refs/heads/master
| 2021-01-06T22:06:53.746592
| 2020-02-14T13:47:21
| 2020-02-14T13:47:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 85
|
py
|
from .. import reuse
with reuse.pidgyLoader(lazy=True):
from . import interactive
|
[
"noreply@github.com"
] |
chrisjsewell.noreply@github.com
|
4f0e6b4a279037595cb60bbb51ed7a0c32640ce7
|
ac5cae7fb66ea0924756c7aeaf190578ff5a6db5
|
/vis_utils.py
|
1f0556a5be7dd6e61fb868bf202c55b37d535dba
|
[] |
no_license
|
pranjaldatta/creditcard-fraud-streamlit
|
db78f896d71f445eb755887ef8fbc57b1696475f
|
0c4c200be1e6cfd889fab454e984815cc405750f
|
refs/heads/master
| 2023-06-02T15:45:48.621294
| 2021-06-13T16:01:49
| 2021-06-13T16:01:49
| 365,283,207
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24
|
py
|
import streamlit as st
|
[
"pranjaldatta99@gmail.com"
] |
pranjaldatta99@gmail.com
|
88168cb031b0a457fb111140e9344ceca124f8fa
|
de9b8b7192a0a81e9249823bb2b86f0b7e452863
|
/.history/main_20171106171513.py
|
5378773f80c55c72622e6038de1967acec7208a8
|
[
"MIT"
] |
permissive
|
reecebenson/uwe-dadsa-tennis-a
|
f5eaeb1b96d4e61f29279514e68eeea8ad6533db
|
d0763f819b300fcd0ce27041f5bc4ef0519c00bf
|
refs/heads/master
| 2023-07-08T16:13:23.963348
| 2017-11-30T12:07:01
| 2017-11-30T12:07:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
# DADSA - Assignment 1
# Reece Benson
from classes import Handler as Handler
from classes import Player as Player
from classes import Season as Season
from classes import Tournament as Tournament
from classes import Round as Round
from classes import Match as Match
class App():
def __hold__(self):
input(">>> Press <Return> to terminate the program")
exit()
def __main__(self):
handler = Handler.Handler()
# Hold the program
self.__hold__()
App().__main__()
|
[
"business@reecebenson.me"
] |
business@reecebenson.me
|
cc3dca0113f1768a8d59a244a3c0b0b7514b7eec
|
052b56514e5ea6837e088478fe1db882c2797c8d
|
/python_scripts/aws_security_group.py
|
1ddc145bffa8777f36096de5507d2bf36815c34d
|
[] |
no_license
|
pkumarbe/AWS-by-Ansible-and-Python
|
33b846c95a243ff58d844e5c28498a668a1562df
|
700a723eda8a445474c0818c02cf278a7bc62b76
|
refs/heads/master
| 2021-06-22T04:02:18.957989
| 2019-01-22T18:16:37
| 2019-01-22T18:16:37
| 163,870,848
| 2
| 1
| null | 2020-12-29T09:34:03
| 2019-01-02T17:41:11
|
Python
|
UTF-8
|
Python
| false
| false
| 751
|
py
|
import boto3
def create_add_rule_secgroup():
ec2= boto3.resource('ec2')
print "Creating Security Group..."
sec_group = ec2.create_security_group(
GroupName = "custom sec",
Description = "Allow http/s and SSH"
)
# Create the list rules to be allowed by this SG.
ip_ranges = [{
'CidrIp': '0.0.0.0/0'
}]
permission_lists = [{
'IpProtocol':'TCP',
'FromPort':80,
'ToPort':80,
'IpRanges':ip_ranges
},{
'IpProtocol':'TCP',
'FromPort':22,
'ToPort':22,
'IpRanges':ip_ranges
}]
#Add the lists to SG
sec_group.authorize_ingress(IpPermissions=permission_lists)
return sec_group.id
print create_add_rule_secgroup()
|
[
"mymail8500@gmail.com"
] |
mymail8500@gmail.com
|
2c27bc02010bbcddfa0ac25fd5c8c0feb9dce22b
|
9edffceeefab2f5a9beebeeace22a5e3783931e0
|
/checkio_solutions/Elementary/popular_words.py
|
7deab66d21c3a8d8b7895ca7cf0cc8d3ee382ba5
|
[] |
no_license
|
sunnirvana/py-checkio
|
13b22a851cf285f34485aa6ac417c852785d96d7
|
c7ebdc517ee26f1791391d584b9be67fc8c39660
|
refs/heads/master
| 2020-04-24T15:28:29.136611
| 2019-03-04T22:13:43
| 2019-03-04T22:13:43
| 172,069,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,754
|
py
|
#!/usr/bin/env checkio --domain=py run popular-words
# https://py.checkio.org/mission/popular-words/
# In this mission your task is to determine the popularity of certain words in the text.
#
# At the input of your function are given 2 arguments: the text and the array of words the popularity of which you need to determine.
#
# When solving this task pay attention to the following points:
#
# The words should be sought in all registers. This means that if you need to find a word "one" then words like "one", "One", "oNe", "ONE" etc. will do.The search words are always indicated in the lowercase.If the word wasn’t found even once, it has to be returned in the dictionary with 0 (zero) value.Input:The text and the search words array.
#
# Output:The dictionary where the search words are the keys and values are the number of times when those words are occurring in a given text.
#
# Precondition:
# The input text will consists of English letters in uppercase and lowercase and whitespaces.
#
#
# END_DESC
#
#
#
#
#
#
#
def popular_words(text: str, words: list) -> dict:
# your code here
text_lst = [w.lower() for w in text.split()]
return {w: text_lst.count(w) for w in words}
if __name__ == '__main__':
print("Example:")
print(popular_words('''
When I was One
I had just begun
When I was Two
I was nearly new
''', ['i', 'was', 'three', 'near']))
# These "asserts" are used for self-checking and not for an auto-testing
assert popular_words('''
When I was One
I had just begun
When I was Two
I was nearly new
''', ['i', 'was', 'three', 'near']) == {
'i': 4,
'was': 3,
'three': 0,
'near': 0
}
print("Coding complete? Click 'Check' to earn cool rewards!")
|
[
"yubogo@gmail.com"
] |
yubogo@gmail.com
|
110117acc0a89ab48991460b4d01aa08d976f653
|
64c890511437a9aa5c3911871177d8eab793107d
|
/main.py
|
e01647395bfe48e75c7048d84e5b7872ebe2798f
|
[] |
no_license
|
victusfate/ChitChatRooms
|
05e807212801b8a8b943f7ab658b50e8413e04a7
|
a487a76e631ea0f3a60ef619e53b4b9feaa18b6e
|
refs/heads/master
| 2020-12-25T09:38:16.512768
| 2011-04-05T09:19:53
| 2011-04-05T09:19:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,801
|
py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import hashlib
import time
import logging
import urllib
from datetime import datetime, date, time
from google.appengine.ext import blobstore
from google.appengine.api import memcache
from google.appengine.api import xmpp
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp import xmpp_handlers
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.api import channel
from django.utils.html import strip_tags
from django.utils import simplejson
##
# Adds a user to a room.
def add_to_room(room, user, channel):
#!!! LOCKING ISSUE. LET'S IGNORE THIS FOR THE SAKE OF THIS SIMPLE APP!!!#
#!!! At worst, a user may actually not be in the list of listeners... let's hope he reloads that page in time !!!#
listeners = []
try:
listeners = simplejson.loads(memcache.get(key=room))
except :
# Well huh
listeners = []
listeners.append([channel, user])
memcache.set(key=room, value=simplejson.dumps(listeners), time=1800)
##
# Sends messages to all members of a room
def send_to_room(room, msg):
listeners = []
try:
listeners = simplejson.loads(memcache.get(key=room))
except :
# Well huh
listeners = []
for listener in listeners:
logging.info(listener[0]);
if listener[0] == "http":
channel.send_message(listener[1], simplejson.dumps(msg))
elif listener[0] == "xmpp":
xmpp.send_message(listener[1], msg["name"] + " : " + msg["message"])
##
# In charge of rendering the home page, and redirect to the right room
class MainHandler(webapp.RequestHandler):
def render(self, template_file, template_values = {}):
path = os.path.join(os.path.dirname(__file__), 'templates', template_file)
self.response.out.write(template.render(path, template_values))
def get(self):
self.render("index.html")
def post(self):
self.redirect("/r/"+self.request.get("room"))
##
# Handles rooms : shows and post messages
class RoomHandler(webapp.RequestHandler):
def render(self, template_file, template_values = {}):
path = os.path.join(os.path.dirname(__file__), 'templates', template_file)
self.response.out.write(template.render(path, template_values))
def get(self, room):
user = hashlib.md5(datetime.now().isoformat()).hexdigest()
add_to_room(room, user, "http")
token = channel.create_channel(user)
self.render("room.html", {"room": room, 'token': token})
def post(self, room):
# Adds messages to the rooms.
msg = {"message": strip_tags(self.request.get("message")), "name": strip_tags(self.request.get("name"))};
send_to_room(room, msg)
##
# File uploader
class UploadHandler(blobstore_handlers.BlobstoreUploadHandler):
def render(self, template_file, template_values = {}):
path = os.path.join(os.path.dirname(__file__), 'templates', template_file)
self.response.out.write(template.render(path, template_values))
def post(self, room):
upload_files = self.get_uploads('file') # 'file' is file upload field in the form
blob_info = upload_files[0]
send_to_room(self.request.get("room"), {"name": "ChitChat", "message": "<a target='_blank' href='/serve/%s'>File uploaded!</a>"% blob_info.key()})
self.redirect('/upload/%s?done=success' % self.request.get("room"))
def get(self, room):
if self.request.get("done") == "success":
self.render("done.html")
else:
upload_url = blobstore.create_upload_url('/upload/')
self.render("upload.html", {"room": room, 'upload_url': upload_url})
##
# Uploaded file handler
class ServeHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, resource):
resource = str(urllib.unquote(resource))
blob_info = blobstore.BlobInfo.get(resource)
self.send_blob(blob_info)
##
# XMPP Handler
class XMPPHandler(xmpp_handlers.CommandHandler):
def join_command(self, message=None):
message = xmpp.Message(self.request.POST)
user = message.sender.rpartition("/")[0]
room = message.arg
add_to_room(room, user, "xmpp")
memcache.set(key=user, value=room, time=1800)
message.reply("Congrats, you joined the room '" + room + "'");
def help_command(self, message=None):
message = xmpp.Message(self.request.POST)
help_msg = "This is a simple chatroom client which can be used both from the web, or from an XMPP client:\n\n" \
"/join XYZ -> joins the XYZ room\n\n" \
"/help -> get help message\n"
message.reply(help_msg)
message.reply(message.body)
def text_message(self, message=None):
message = xmpp.Message(self.request.POST)
user = message.sender.rpartition("/")[0]
msg = {"message": strip_tags(message.body), "name": user};
room = memcache.get(key=user)
send_to_room(room, msg)
def main():
application = webapp.WSGIApplication([
('/_ah/xmpp/message/chat/', XMPPHandler),
('/', MainHandler),
('/r/([^/]+)?', RoomHandler),
('/upload/([^/]+)?', UploadHandler),
('/serve/([^/]+)?', ServeHandler)
],debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
[
"julien.genestoux@gmail.com"
] |
julien.genestoux@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.