blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
909b30bb2930b9cb2a1dc1880cd4b41358bb8757 | 65bdb78953d12ef94951f9006b9aa94779827f17 | /challenge10d.py | b4e5e590f1abab93f7a457fce9971ca5839d218c | [] | no_license | GM3D/POH1 | bf66385b7993d6255736a1cef83da466bf1fa8c0 | 0d1ed9b76f01e328c434ccb48cea971c90ea7acb | refs/heads/master | 2021-01-13T12:55:54.934919 | 2013-12-22T18:14:18 | 2013-12-22T18:14:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,996 | py | #challenge10c.py
# count_and_offset is list
# with prescan.
from datetime import datetime, timedelta
from sys import stderr
num_marks = 10
def report_time():
for i in xrange(num_marks - 1):
if t[i+1] > t[i]:
stderr.write("t[%d] - t[%d] = %d us.\n" %
(i + 1, i, (t[i + 1] - t[i]).microseconds))
t = [datetime.now() for i in range(num_marks)]
t[0] = datetime.now()
from sys import stdin
from collections import Counter
def get_next_valid_lower(x):
l = count_and_offset[x]
if l < 0:
x += l
return x
million = 1000 * 1000
max_days = 75
lowest_price = 10
t[1] = datetime.now()
content = stdin.read()
lines=content.splitlines()
N, D = map(int, lines[0].split())
t[2] = datetime.now()
count_and_offset = Counter()
count_and_offset[0] = 0
for i in xrange(N):
value = int(lines[i + 1])
count_and_offset[value] += 1
cprices = map(int, lines[N + 1:])
t[3] = datetime.now()
offset = 0;
for i in xrange(million + 1):
if count_and_offset[i] > 0:
offset = 0;
else:
count_and_offset[i] = offset
offset -= 1
t[4] = datetime.now()
best_price = []
for day in xrange(D):
candidate = 0
cp = cprices[day]
if cp > 2 * lowest_price:
lowlimit = cp / 2
else:
lowlimit = lowest_price
larger = cp - lowest_price
larger = get_next_valid_lower(larger)
while larger >= lowlimit and candidate != cp:
smaller = cp - larger
if (count_and_offset[smaller] == 1 and smaller == larger):
smaller -= 1
smaller = get_next_valid_lower(smaller)
if smaller < lowest_price:
larger = get_next_valid_lower(larger - 1)
continue
if smaller + larger > candidate:
candidate = smaller + larger
larger = get_next_valid_lower(larger - 1)
best_price.append(candidate)
t[5] = datetime.now()
for day in xrange(D):
print best_price[day]
t[6] = datetime.now()
report_time()
| [
"gmomma940@gmail.com"
] | gmomma940@gmail.com |
dbee5a27c760a92bb50d8294c6b8352a22ea276f | 346090f528bc8a8c415b4f0253b29dac9c1a97a1 | /takenotex/manage.py | 9480f8a9fc125a3877549005240aac46f01394e4 | [] | no_license | raveenb/TakeNote | 10e01769b342d99a4efe0f0369edbe436a52354d | 0d043debb540d5118264f8f64252db1b499e6c3b | refs/heads/master | 2021-01-23T16:28:11.629683 | 2013-03-25T18:13:56 | 2013-03-25T18:13:56 | 7,698,881 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "takenotex.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"raveen.b@gmail.com"
] | raveen.b@gmail.com |
f8446039964cea805bcbfbf0a9c49bc5f3b15ee8 | 8ab6ee616e14619338d74751947d6446ac4a38bd | /ur5_test/scripts/main.py | 0ebeaf5a27bd031172d021e58089c4260207dfda | [
"BSD-2-Clause"
] | permissive | wkqun555/OCRTOC_GINS_Simulation-Part | 1fcecfefba0c2afbf1721d147726cdab602af97e | 642bb479c97a5e3a4c1be44ac10fa2b09f6b8a24 | refs/heads/master | 2023-01-09T03:09:18.680931 | 2020-10-05T07:26:59 | 2020-10-05T07:26:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | #!/usr/bin/env python
import rospy
from std_msgs.msg import Header
from trajectory_msgs.msg import JointTrajectory
from trajectory_msgs.msg import JointTrajectoryPoint
waypoints = [[1.5621, -2.12, 1.72, -1.1455, -1.57, 3.14],[0,0,0,0,0,0]]
def main():
rospy.init_node('send_joints')
pub = rospy.Publisher('/arm_controller/command',JointTrajectory,queue_size=10)
traj = JointTrajectory()
traj.header = Header()
traj.joint_names = ['shoulder_pan_joint','shoulder_lift_joint','elbow_joint','wrist_1_joint',
'wrist_2_joint','wrist_3_joint']
rate = rospy.Rate(1)
cnt=0
pts = JointTrajectoryPoint()
traj.header.stamp = rospy.Time.now()
while not rospy.is_shutdown():
cnt += 1
if cnt%2 == 1:
pts.positions = waypoints[0]
else:
pts.positions = waypoints[1]
pts.time_from_start = rospy.Duration(1.0)
traj.points = []
traj.points.append(pts)
pub.publish(traj)
rospy.sleep(3)
rate.sleep()
if __name__ == '__main__' :
try:
main()
except rospy.ROSInterruptException:
print("you are too lame")
| [
"1206413225@qq.com"
] | 1206413225@qq.com |
9344bb527e6bd01be344424a5716b7c6d61d62a3 | 4a23bef7eae8206345a02824b377a61ac2b588ce | /DirectRanker.py | e17c7e62241f319a4927fe8b3e459434595234a7 | [] | no_license | cjholl211/Learning-to-rank-for-patient-prioritisation | 575cac61a873b9204485425085165ed14c4aa9c9 | 477941b90453eed54d2fd8c9e2fd33662ad7a759 | refs/heads/master | 2022-11-16T01:43:05.958814 | 2020-07-18T10:51:45 | 2020-07-18T10:51:45 | 263,937,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,950 | py | import numpy as np
import tensorflow as tf
import pickle
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from dr_helpers import nDCG_cls
import time
def default_weight_function(w1, w2):
return w1 * w2
class directRanker(BaseEstimator):
"""
Constructor
:param hidden_layers: List containing the numbers of neurons in the layers for feature
:param feature_activation: tf function for the feature part of the net
:param ranking_activation: tf function for the ranking part of the net
:param feature_bias: boolean value if the feature part should contain a bias
:param kernel_initializer: tf kernel_initializer
:param dtype: dtype used in each layer
:param cost: cost function for the directRanker
:param weight_function: weight_function for the documents weights
:param start_batch_size: start value for increasing the sample size
:param end_batch_size: end value for increasing the sample size
:param learning_rate: learning rate for the optimizer
:param max_steps: total training steps
:param learning_rate_step_size: factor for increasing the learning rate
:param learning_rate_decay_factor: factor for increasing the learning rate
:param optimizer: tf optimizer object
:param print_step: for which step the script should print out the cost for the current batch
:param feature_func: additional feature_function for the feature part of the net
:param weights: boolean if weights are passed in the fit
:param end_qids: end value for increasing the query size
:param start_qids: start value for increasing the query size
"""
def __init__(self,
hidden_layers=[10],
feature_activation=tf.nn.tanh,
ranking_activation=tf.nn.tanh,
feature_bias=True,
kernel_initializer=tf.random_normal_initializer(),
dtype=tf.float32,
cost=None,
weight_function=None,
start_batch_size=100,
end_batch_size=10000,
learning_rate=0.01,
max_steps=10000,
learning_rate_step_size=500,
learning_rate_decay_factor=0.944,
optimizer=tf.train.AdamOptimizer,
print_step=0,
feature_func=None,
feature_func_nn0_1=None,
weights=False,
end_qids=300,
start_qids=10,
weight_regularization=0.,
dropout=0.,
input_dropout=0.,
early_stopping=False,
validation_size=0.2,
stop_scorer=nDCG_cls,
lookback=10,
stop_delta=0.001,
random_seed=None,
stop_start=None,
name="DirectRanker",
):
self.hidden_layers = hidden_layers
self.feature_activation = feature_activation
self.ranking_activation = ranking_activation
self.feature_bias = feature_bias
self.kernel_initializer = kernel_initializer
self.dtype = dtype
self.cost = cost
self.weight_function = weight_function
self.start_batch_size = start_batch_size
self.end_batch_size = end_batch_size
self.learning_rate = learning_rate
self.max_steps = max_steps
self.learning_rate_step_size = learning_rate_step_size
self.learning_rate_decay_factor = learning_rate_decay_factor
self.optimizer = optimizer
self.print_step = print_step
self.feature_func = feature_func
self.weights = weights
self.end_qids = end_qids
self.start_qids = start_qids
self.weight_func = weight_function
self.feature_func_nn0_1 = feature_func_nn0_1
self.weight_regularization = weight_regularization
self.dropout = dropout
self.input_dropout = input_dropout
self.early_stopping = early_stopping
self.validation_size = validation_size
self.random_seed = random_seed
self.stop_scorer = stop_scorer
self.lookback = lookback
self.stop_delta = stop_delta
self.name = name
if stop_start is None:
self.stop_start = int(self.max_steps / 2)
else:
self.stop_start = stop_start
self.should_drop = None
self.x0 = None
self.x1 = None
self.y0 = None
self.w0 = None
self.num_features = None
self.sess = None
self.num_hidden_layers = len(hidden_layers)
def _to_dict(self, features, real_classes, weights):
"""
A little function for preprocessing the data
:param features: documents of a query
:param real_classes: class leables of the documents
:param weights: weights of the documents
:return: dict with class as key and a list of queries
"""
d = {}
for i in range(len(features)):
if real_classes[i, 0] in d.keys():
if weights is None:
d[real_classes[i, 0]].append(features[i])
else:
d[real_classes[i, 0]].append(np.concatenate((features[i], [weights[i]])))
else:
if weights is None:
d.update({real_classes[i, 0]: [features[i]]})
else:
d.update({real_classes[i, 0]: [np.concatenate((features[i], [weights[i]]))]})
for k in d.keys():
d[k] = np.array(d[k])
return d
def _comparator(self, x1, x2):
"""
:param x1: list of documents
:param x2: list of documents
:return: cmp value for sorting the query
"""
res = self.evaluate(x1[:-1], x2[:-1])
if res < 0:
return -1
elif res > 0:
return 1
return 0
def _build_model(self):
"""
This function builds the directRanker with the values specified in the constructor
:return:
"""
if self.weight_function is None:
self.weight_function = default_weight_function
tf.reset_default_graph()
# Placeholders for the inputs
self.x0 = tf.placeholder(
shape=[None, self.num_features],
dtype=self.dtype,
name="x0"
)
self.x1 = tf.placeholder(
shape=[None, self.num_features],
dtype=self.dtype,
name="x1"
)
# Placeholder for the real classes
self.y0 = tf.placeholder(
shape=[None, 1],
dtype=self.dtype,
name="y0"
)
# Placeholder for the weights
self.w0 = tf.placeholder(
shape=[None, ],
dtype=self.dtype,
name="w0"
)
# Drop placeholder
self.should_drop = tf.placeholder(tf.bool, name="drop")
# Regularization
regularizer = tf.contrib.layers.l2_regularizer(self.weight_regularization)
# Input_Dropout
in0 = tf.layers.dropout(inputs=self.x0,
rate=self.input_dropout,
training=self.should_drop
)
in1 = tf.layers.dropout(inputs=self.x1,
rate=self.input_dropout,
training=self.should_drop
)
# Constructing the feature creation part of the net
nn0 = tf.layers.dense(
inputs=in0,
units=self.hidden_layers[0],
activation=self.feature_activation,
use_bias=self.feature_bias,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=regularizer,
name="nn_hidden_0"
)
# By giving nn1 the same name as nn0 and using the flag reuse=True,
# the weights and biases of all neurons in each branch are identical
nn1 = tf.layers.dense(
inputs=in1,
units=self.hidden_layers[0],
activation=self.feature_activation,
use_bias=self.feature_bias,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=regularizer,
name="nn_hidden_0",
reuse=True
)
# Layer Dropout
nn0 = tf.layers.dropout(inputs=nn0,
rate=self.dropout,
training=self.should_drop
)
nn1 = tf.layers.dropout(inputs=nn1,
rate=self.dropout,
training=self.should_drop
)
for i in range(1, len(self.hidden_layers)):
nn0 = tf.layers.dense(
inputs=nn0,
units=self.hidden_layers[i],
activation=self.feature_activation,
use_bias=self.feature_bias,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=regularizer,
name="nn_hidden_" + str(i)
)
nn1 = tf.layers.dense(
inputs=nn1,
units=self.hidden_layers[i],
activation=self.feature_activation,
use_bias=self.feature_bias,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=regularizer,
name="nn_hidden_" + str(i),
reuse=True
)
# Layer Dropout
nn0 = tf.layers.dropout(inputs=nn0,
rate=self.dropout,
training=self.should_drop
)
nn1 = tf.layers.dropout(inputs=nn1,
rate=self.dropout,
training=self.should_drop
)
# Creating antisymmetric features for the ranking
self.nn = (nn0 - nn1) / 2.
self.nn = tf.layers.dense(
inputs=self.nn,
units=1,
activation=self.ranking_activation,
use_bias=False,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=regularizer,
name="nn_rank"
)
self.nn_cls = tf.layers.dense(
inputs=nn0 / 2.,
units=1,
activation=self.ranking_activation,
use_bias=False,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=regularizer,
name="nn_rank",
reuse=True
)
nn_out = tf.identity(
input=self.nn,
name="nn"
)
def _build_pairs(self, query, samples, use_weights):
"""
:param query: query of documents
:param samples: number of samples
:param use_weights: list of weights per document
:return: a list of pairs of documents from a query
"""
x0 = []
x1 = []
y = []
if use_weights:
w = []
keys = sorted(list(query.keys()))
for i in range(len(keys) - 1):
indices0 = np.random.randint(0, len(query[keys[i + 1]]), samples)
indices1 = np.random.randint(0, len(query[keys[i]]), samples)
x0.extend(query[keys[i + 1]][indices0][:, :self.num_features])
x1.extend(query[keys[i]][indices1][:, :self.num_features])
y.extend((keys[i + 1] - keys[i]) * np.ones(samples))
if use_weights:
w.extend(self.weight_func(query[keys[i + 1]][indices0][-1],
query[keys[i]][indices1][-1]))
x0 = np.array(x0)
x1 = np.array(x1)
y = np.array([y]).transpose()
if use_weights:
w = np.array(w)
return [x0, x1, y, w]
else:
return [x0, x1, y]
def _build_pairs_f(self, query, targets, samples, use_weights):
"""
:param query: query of documents
:param targets: target values
:param samples: number of samples
:param use_weights: list of weights per document
:return: a list of pairs of documents from a query
"""
#print('_build_pairs_f')
x0 = []
x1 = []
y = []
if use_weights:
w = []
keys, counts = np.unique(targets, return_counts=True)
sort_ids = np.argsort(keys)
keys = keys[sort_ids]
counts = counts[sort_ids]
for i in range(len(keys) - 1):
indices0 = np.random.randint(0, counts[i + 1], samples)
indices1 = np.random.randint(0, counts[i], samples)
querys0 = np.where(targets == keys[i + 1])[0]
querys1 = np.where(targets == keys[i])[0]
x0.extend(query[querys0][indices0][:, :self.num_features])
x1.extend(query[querys1][indices1][:, :self.num_features])
y.extend((keys[i + 1] - keys[i]) * np.ones(samples))
if use_weights:
w.extend(self.weight_func(query[querys0][indices0][-1],
query[querys1][indices1][-1]))
x0 = np.array(x0)
x1 = np.array(x1)
y = np.array([y]).transpose()
if use_weights:
w = np.array(w)
return [x0, x1, y, w]
else:
return [x0, x1, y]
def _build_no_query_pairs(self, features, samples, weights):
"""
:param features: array of features
:param samples: number of samples
:param weights: list of weights per document
:return: a list of pairs of instances
"""
#print('_build_no_query_pairs')
x0 = []
x1 = []
y = []
if weights is not None:
w = []
keys = sorted(list(features.keys()))
for i in range(len(keys) - 1):
indices0 = np.random.randint(0, len(features[keys[i + 1]]), samples)
indices1 = np.random.randint(0, len(features[keys[i]]), samples)
x0.extend(features[keys[i + 1]][indices0])
x1.extend(features[keys[i]][indices1])
y.extend(keys[i + 1] * np.ones(samples))
x0 = np.array(x0)
x1 = np.array(x1)
y = np.array([y]).transpose()
if weights is None:
return [x0, x1, y]
else:
w = np.array(w)
return [x0, x1, y, w]
def _fit_querys(self, dictOfQueries, validation, use_weights):
"""
:param dictOfQueries: dict of queries for training the net. The key is the class
and the value is a list of queries
:param use_weights: list of weights per document inside a query
:return:
"""
#print('_fit_querys')
if self.x0 is None:
if self.feature_func is None:
self.num_features = len(dictOfQueries[list(dictOfQueries[0].keys())[0]][0][0]) - (
1 if use_weights else 0)
else:
if use_weights:
self.num_features = len(
self.feature_func(dictOfQueries[0][list(dictOfQueries.keys())[0]][0][0][:-1]))
else:
self.num_features = len(self.feature_func(dictOfQueries[0][list(
dictOfQueries.keys())[0]][0][0]))
self._build_model()
if self.cost is None:
if not self.weights:
cost = tf.reduce_mean((self.y0 - self.nn) ** 2)
else:
cost = tf.reduce_mean(self.w0 * (self.y0 - self.nn) ** 2)
else:
cost = self.cost(self.nn, self.y0)
# Regularization Loss
l2_loss = tf.losses.get_regularization_loss()
train_loss = cost + l2_loss
global_step = tf.Variable(0, trainable=False)
increment_global_step = tf.assign(global_step, global_step + 1)
learning_rate = tf.train.exponential_decay(self.learning_rate,
global_step,
self.learning_rate_step_size,
self.learning_rate_decay_factor,
staircase=True)
optimizer = self.optimizer(learning_rate).minimize(train_loss, global_step=global_step)
init = tf.global_variables_initializer()
sample_factor = np.log(1.0 * self.end_batch_size / self.start_batch_size)
q_factor = np.log(1.0 * self.end_qids / self.start_qids)
self.sess = tf.Session()
self.sess.run(init)
# Early Stopping
scores = []
best_sf = 0
saver = tf.train.Saver()
for step in range(self.max_steps):
samples = int(self.start_batch_size * np.exp(1.0 * sample_factor * step / self.max_steps))
q_samples = int(self.start_qids * np.exp(1.0 * q_factor * step / self.max_steps))
x0 = []
x1 = []
y = []
if use_weights:
w = []
queries = np.random.choice(dictOfQueries, q_samples)
for q in queries:
pairs = self._build_pairs(q, samples, use_weights)
x0.extend(pairs[0])
x1.extend(pairs[1])
y.extend(pairs[2])
if use_weights:
w.extend(pairs[3])
if use_weights:
val, _, _ = self.sess.run(
[cost, optimizer, increment_global_step],
feed_dict={self.x0: x0, self.x1: x1, self.w0: w, self.y0: y, self.should_drop: True})
else:
val, _, _ = self.sess.run(
[cost, optimizer, increment_global_step],
feed_dict={self.x0: x0, self.x1: x1, self.y0: y, self.should_drop: True})
if self.print_step != 0 and step % self.print_step == 0:
print("step: {}, value: {}, samples: {}, queries: {}".format(
step, val, samples, q_samples))
# Early Stopping
if self.early_stopping and step >= self.stop_start:
cur_score = 0.
for X, y, z in validation:
cur_score += self.stop_scorer(self, X, y)
cur_score /= len(validation)
scores.append(cur_score)
if cur_score >= scores[best_sf] + self.stop_delta or step == self.stop_start:
best_sf = step - self.stop_start
saver.save(self.sess, "./tmp/{}_{}.ckpt".format(self.name, tmp_name))
if step - best_sf > self.lookback:
saver.restore(self.sess, "./tmp/{}_{}.ckpt".format(self.name, tmp_name))
break
def _fit_querys_f(self, dictOfQueries, validation, use_weights):
"""
:param dictOfQueries: dict of queries for training the net. The key is the class
and the value is a list of queries
:param use_weights: list of weights per document inside a query
:return:
"""
if self.x0 is None:
if self.feature_func is None:
len(dictOfQueries[0][0][0])
self.num_features = len(dictOfQueries[0][0][0]) - (
1 if use_weights else 0)
else:
if use_weights:
self.num_features = len(
self.feature_func(dictOfQueries[0][0][0][:-1]))
else:
self.num_features = len(self.feature_func(dictOfQueries[0][0][0]))
self._build_model()
if self.cost is None:
if not self.weights:
cost = tf.reduce_mean((self.y0 - self.nn) ** 2)
else:
cost = tf.reduce_mean(self.w0 * (self.y0 - self.nn) ** 2)
else:
cost = self.cost(self.nn, self.y0)
# Regularization Loss
l2_loss = tf.losses.get_regularization_loss()
train_loss = cost + l2_loss
global_step = tf.Variable(0, trainable=False)
increment_global_step = tf.assign(global_step, global_step + 1)
learning_rate = tf.train.exponential_decay(self.learning_rate,
global_step,
self.learning_rate_step_size,
self.learning_rate_decay_factor,
staircase=True)
optimizer = self.optimizer(learning_rate).minimize(train_loss, global_step=global_step)
init = tf.global_variables_initializer()
sample_factor = np.log(1.0 * self.end_batch_size / self.start_batch_size)
q_factor = np.log(1.0 * self.end_qids / self.start_qids)
self.sess = tf.Session()
self.sess.run(init)
# Early Stopping
tmp_name = str(time.time())
scores = []
best_sf = 0
saver = tf.train.Saver()
for step in range(self.max_steps):
samples = int(self.start_batch_size * np.exp(1.0 * sample_factor * step / self.max_steps))
q_samples = int(self.start_qids * np.exp(1.0 * q_factor * step / self.max_steps))
x0 = []
x1 = []
y = []
if use_weights:
w = []
queries = np.random.choice(len(dictOfQueries), q_samples)
queries = [dictOfQueries[loc] for loc in queries]
for X, y_2, z in queries:
pairs = self._build_pairs_f(X, y_2, samples, use_weights)
x0.extend(pairs[0])
x1.extend(pairs[1])
y.extend(pairs[2])
if use_weights:
w.extend(pairs[3])
if use_weights:
val, _, _ = self.sess.run(
[cost, optimizer, increment_global_step],
feed_dict={self.x0: x0, self.x1: x1, self.w0: w, self.y0: y, self.should_drop: True})
else:
val, _, _ = self.sess.run(
[cost, optimizer, increment_global_step],
feed_dict={self.x0: x0, self.x1: x1, self.y0: y, self.should_drop: True})
if self.print_step != 0 and step % self.print_step == 0:
print("step: {}, value: {}, samples: {}, queries: {}".format(
step, val, samples, q_samples))
# Early Stopping
if self.early_stopping and step >= self.stop_start:
cur_score = 0.
for X, y, z in validation:
cur_score += self.stop_scorer(self, X, y)
cur_score /= len(validation)
scores.append(cur_score)
if cur_score >= scores[best_sf] + self.stop_delta or step == self.stop_start:
best_sf = step - self.stop_start
saver.save(self.sess, "./tmp/{}_{}.ckpt".format(self.name, tmp_name))
if step - best_sf > self.lookback:
saver.restore(self.sess, "./tmp/{}_{}.ckpt".format(self.name, tmp_name))
break
def _fit_no_querys(self, features, validation, weights=None):
"""
# ToDo for now refit a loaded ranker is not working
:param features:
:param real_classes:
:param weights:
:return:
"""
if self.x0 is None:
if self.feature_func is None:
self.num_features = len(features[list(features.keys())[0]][0])
else:
self.num_features = len(self.feature_func(features[0]))
self._build_model()
if self.cost is None:
if not self.weights:
cost = tf.reduce_mean((self.y0 - self.nn) ** 2)
else:
cost = tf.reduce_mean(self.w0 * (self.y0 - self.nn) ** 2)
else:
cost = self.cost(self.nn, self.y0)
# Regularization Loss
l2_loss = tf.losses.get_regularization_loss()
train_loss = cost + l2_loss
global_step = tf.Variable(0, trainable=False)
increment_global_step = tf.assign(global_step, global_step + 1)
learning_rate = tf.train.exponential_decay(self.learning_rate,
global_step,
self.learning_rate_step_size,
self.learning_rate_decay_factor,
staircase=True)
optimizer = self.optimizer(learning_rate).minimize(train_loss, global_step=global_step)
init = tf.global_variables_initializer()
sample_factor = np.log(1.0 * self.end_batch_size / self.start_batch_size)
self.sess = tf.Session()
self.sess.run(init)
# Early Stopping
tmp_name = str(time.time())
scores = []
best_sf = 0
saver = tf.train.Saver()
for step in range(self.max_steps):
samples = int(self.start_batch_size * np.exp(1.0 * sample_factor * step / self.max_steps))
pairs = self._build_no_query_pairs(features, samples, weights is not None)
x0 = pairs[0]
x1 = pairs[1]
y = pairs[2]
if weights is not None:
w = pairs[3]
if weights is not None:
val, _, _ = self.sess.run(
[cost, optimizer, increment_global_step],
feed_dict={self.x0: x0, self.x1: x1, self.w0: w, self.y0: y, self.should_drop: True})
else:
val, _, _ = self.sess.run(
[cost, optimizer, increment_global_step],
feed_dict={self.x0: x0, self.x1: x1, self.y0: y, self.should_drop: True})
if self.print_step != 0 and step % self.print_step == 0:
print("step: {}, value: {}, samples: {}".format(step, val, samples))
# Early Stopping
if self.early_stopping and step >= self.stop_start:
cur_score = 0.
for X, y, z in validation:
cur_score += self.stop_scorer(self, X, y)
cur_score /= len(validation)
scores.append(cur_score)
if cur_score >= scores[best_sf] + self.stop_delta or step == self.stop_start:
best_sf = step - self.stop_start
saver.save(self.sess, "./tmp/{}_{}.ckpt".format(self.name, tmp_name))
if step - best_sf > self.lookback:
saver.restore(self.sess, "./tmp/{}_{}.ckpt".format(self.name, tmp_name))
break
def fit(self, features, real_classes, **fit_params):
"""
:param features: list of queries for training the net
:param real_classes: list of labels inside a query
:param weights: list of weights per document inside a query
:return:
"""
#print('fit')
if "sample_weights" in fit_params.keys():
sample_weights = fit_params["sample_weights"]
else:
sample_weights = None
if fit_params["ranking"]:
#print('fit_params["ranking"]')
vals = []
val_queries = []
if self.early_stopping:
val_queries = np.random.choice(len(features),
int(len(features) * self.validation_size),
replace=False)
for i in val_queries:
vals.append((features[i], real_classes[i], sample_weights[i]
if sample_weights is not None else None))
feats = []
for i in range(len(features)):
if i in val_queries:
continue
feats.append((features[i], real_classes[i], sample_weights[i]
if sample_weights is not None else None))
self._fit_querys_f(feats, vals, sample_weights is not None)
else:
#print('NOT fit_params["ranking"]')
vals = None
if self.early_stopping:
id_train, id_test = train_test_split(
np.arange(len(features)), test_size=self.validation_size,
random_state=self.random_seed, shuffle=True, stratify=real_classes)
vals = [(features[id_test], real_classes[id_test], sample_weights[id_test]
if sample_weights is not None else None)]
features = features[id_train]
real_classes = real_classes[id_train]
if sample_weights is not None:
sample_weights = sample_weights[id_train]
feats = self._to_dict(features, real_classes, sample_weights if sample_weights is not None else None)
self._fit_no_querys(feats, vals, sample_weights)
@staticmethod
def save(estimator, path):
"""
This saves a saved directRanker
:param path: location for the directRanker
:param path:
:return:
"""
saver = tf.train.Saver()
if "/" not in path:
path = "./" + path
saver.save(estimator.sess, path + ".ckpt")
save_dr = directRanker()
for key in estimator.get_params():
# ToDo: Need to be fixed to also restore the cost function
if key == "cost":
save_dr.__setattr__(key, None)
else:
save_dr.__setattr__(key, estimator.get_params()[key])
with open(path + ".pkl", 'wb') as output:
pickle.dump(save_dr, output, 0)
@staticmethod
def load_ranker(path):
"""
This loads a saved directRanker
:param path: location for the saved directRanker
:return:
"""
tf.reset_default_graph()
graph = tf.Graph()
sess = tf.Session(graph=graph)
with open(path + ".pkl", 'rb') as input:
dr = pickle.load(input)
with graph.as_default():
saver = tf.train.import_meta_graph(path + ".ckpt.meta")
saver.restore(sess, path + ".ckpt")
dr.x0 = graph.get_tensor_by_name("x0:0")
dr.x1 = graph.get_tensor_by_name("x1:0")
dr.y0 = graph.get_tensor_by_name("y0:0")
dr.w0 = graph.get_tensor_by_name("w0:0")
dr.nn = graph.get_tensor_by_name("nn:0")
dr.should_drop = graph.get_tensor_by_name("drop:0")
dr.sess = sess
dr.num_features = dr.x0.shape[1].value
return dr
def evaluate(self, features0, features1):
"""
:param features0: list of features of the first instance feed to the net
:param features1: list of features of the second instance feed to the net
:return: r(features0, features1) of the net
"""
if self.feature_func is None:
features0 = np.array(features0)
features1 = np.array(features1)
else:
features0, features1 = self.feature_func(features0, features1)
if len(features0.shape) == 1:
features0 = [features0]
features1 = [features1]
return self.sess.run(self.nn, feed_dict={self.x0: features0, self.x1: features1, self.should_drop: False})
def evaluatePartNet(self, features):
"""
:param features: list of features of the instance feed to the net
:return: output of nn0/nn1 of the net
"""
if self.feature_func_nn0_1 is None:
features = np.array(features)
else:
features = self.feature_func_nn0_1(features)
if len(features.shape) == 1:
features = [features]
return self.sess.run(self.nn_cls, feed_dict={self.x0: features, self.should_drop: False})
def predict_proba(self, features):
"""
:param features: list of features of the instance feed to the net
:return: predicted class
"""
if self.feature_func_nn0_1 is None:
features = np.array(features)
else:
features = self.feature_func_nn0_1(features)
if len(features.shape) == 1:
features = [features]
res = self.sess.run(self.nn_cls, feed_dict={self.x0: features, self.should_drop: False})
return [0.5 * (value + 1) for value in res]
def close(self):
"""
This function closes the tensorflow session used for the directRanker
"""
self.sess.close()
| [
"noreply@github.com"
] | noreply@github.com |
3370e689410d396a827a715f14aedb1803000b7e | f048f66977ebcfd3973f5cb41911e5de8b1bf7f5 | /pullenti/ner/NumberSpellingType.py | 72db57f98484782ba6c07c2f861ed30185173d7c | [] | no_license | AAA1911/PullentiPython | e01223d2d8656a8fbcc0873446a12d7e5c913f4a | f25b228c8eef9b70acb1285f405c976542342319 | refs/heads/master | 2020-12-22T12:56:21.701229 | 2019-12-11T08:34:43 | 2019-12-11T08:34:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | # Copyright (c) 2013, Pullenti. All rights reserved. Non-Commercial Freeware.
# This class is generated using the converter UniSharping (www.unisharping.ru) from Pullenti C#.NET project (www.pullenti.ru).
# See www.pullenti.ru/downloadpage.aspx.
from enum import IntEnum
class NumberSpellingType(IntEnum):
""" Возможные типы написаний """
DIGIT = 0
""" Цифрами """
ROMAN = 1
""" Римскими цифрами """
WORDS = 2
""" Прописью (словами) """
AGE = 3
""" Возраст (летие) """
@classmethod
def has_value(cls, value):
return any(value == item.value for item in cls) | [
"alex@alexkuk.ru"
] | alex@alexkuk.ru |
d793f87b0bef4eaaef51634ad0c4592d4a02d5ee | dd573ed68682fd07da08143dd09f6d2324f51345 | /daily_study/ProblemSolving/5430_AC.py | ee3166173aa8bebbfdd26b513e4d008af4aec83f | [] | no_license | chelseashin/My-Algorithm | 0f9fb37ea5c6475e8ff6943a5fdaa46f0cd8be61 | db692e158ebed2d607855c8e554fd291c18acb42 | refs/heads/master | 2021-08-06T12:05:23.155679 | 2021-07-04T05:07:43 | 2021-07-04T05:07:43 | 204,362,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | from sys import stdin
input = stdin.readline
def solve(numbers):
print("초기상태", numbers, len(numbers))
rcnt, dcnt = 0, 0
for cmd in p:
if cmd == "R":
rcnt += 1
elif cmd == "D":
try:
if rcnt % 2 == 0:
dcnt += 1 # 나중에 빼줄 때 사용
else:
numbers.pop() # 지금 바로 빼주기
except:
return "error"
# print("rcnt", rcnt, "dcnt", dcnt, numbers)
if len(numbers) < dcnt:
return "error"
if rcnt%2:
numbers[dcnt:].reverse()
else:
numbers = numbers[dcnt:]
result = "["
for i in range(len(numbers)):
if i < len(numbers)-1:
result += numbers[i] + ","
else:
result += numbers[i] + "]"
# print("최종", numbers, result)
return result
T = int(input())
for _ in range(T):
p = input().strip()
n = int(input())
numbers = input().strip().split(',')
numbers[0] = numbers[0][1:]
numbers[-1] = numbers[-1][:-1]
print(solve(numbers)) | [
"chaewonshin95@gmail.com"
] | chaewonshin95@gmail.com |
6ba1c5ad8455cb166c23400b1eaadef2c3fbd71a | 207b1a92f81074d646ea40201f20aadb4f04a7b9 | /virtual/bin/sqlformat | 26f5fa7b6770340ebcd6b5d485c38b431f8ea587 | [
"MIT"
] | permissive | Benardakaka/Water-Travelles | 7bb00ad6a0d3f174a28b90c1c839fb3bd8bcc7a9 | f074cd38bc7aefb12f024e8d9ba34b8bd5ab2776 | refs/heads/master | 2023-02-24T13:52:06.612692 | 2021-02-04T09:35:05 | 2021-02-04T09:35:05 | 335,035,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | #!/home/moringa/Documents/Marine-Travellers/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"benardakaka484@gmail.com"
] | benardakaka484@gmail.com | |
e2ff23792a6b36940e6dfd522da58a99e0a959da | d3f44206d35c8c8976f10492de3c77b57082ad11 | /client.py | d4482dc9f9d577b753a36b01ed9e758c9a6893a9 | [] | no_license | s19-nets/udp-file-transfer-xquipster | e80cc7cba17507a942baff674e06aebb15d06b70 | 5882c49b4f6b29383ab28578d15b61f0537cceac | refs/heads/master | 2020-04-23T23:14:40.843065 | 2019-03-06T17:31:27 | 2019-03-06T17:31:27 | 171,529,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,388 | py | from socket import *
import sys
class Client:
def __init__(self, af, socktype, file_name, choice, numClients):
self.numClients = numClients
self.sAddr = sAddr = ("127.0.0.1", 50000) #set address and reserve port
self.file_name, self.choice = file_name, choice
self.clientSock = clientSock = socket(af, socktype) #create client socket
try:
self.clientSock.connect(sAddr) #connect to Server
print "Connected to server."
except error:
print "Error. Server not found."
sys.exit()
self.sendNumClients() #send number of clients to server
self.sendBasicInfoToServer() #send file name and choice to server
self.determineChoice() #determine if put or get
self.closeConnection() #close connection
#get file from server
def getFileFromServer(self):
with open(self.file_name, "w") as clientFile:
while True:
print "Receiving data..."
data = self.clientSock.recv(1024)
if data == "File not found. Try again.":
print data
return
if not data:
break
print "data =" + data
# write data to a file
clientFile.write(data)
clientFile.close()
print "Successfully get file from server."
return
#put file to server
def sendFileToServer(self):
try:
with open(self.file_name, "r") as clientFile:
print "Sending file..."
data = clientFile.read()
self.clientSock.send(data)
clientFile.close()
except IOError as e:
print "No such file or directory. Try again."
self.clientSock.send("File not found. Try again.")
return
print "Successfuly sent file to server."
return
def determineChoice(self):
if(self.choice == "PUT" or self.choice == "put"):
self.sendFileToServer()
elif(self.choice == "GET" or self.choice == "get"):
self.getFileFromServer()
else:
print "Invalid choice. Choices: PUT or GET."
return
def sendBasicInfoToServer(self):
while True:
#send file name and choice and if the ack is not received, send again
self.clientSock.send(self.file_name + ":" + self.choice) #send file name and choice to server
ack = self.clientSock.recv(4)
if ack == "Done":
return
def sendNumClients(self):
self.clientSock.send(str(self.numClients)) #send number of Clients to server
return
def closeConnection(self):
self.clientSock.close()
print "--- Connection closed. --- \n"
return
def startClients():
numClients = input('Enter the number of clients: ')
clients = []
#iterate with the number of clients
for i in range(int(numClients)):
print "Client " + str(i+1) + "."
file_name = raw_input('Enter file name: ')
choice = raw_input('Enter choice (PUT or GET): ')
clients.append(Client(AF_INET, SOCK_STREAM, file_name, choice, numClients))
startClients()
| [
"ejhardin@miners.utep.edu"
] | ejhardin@miners.utep.edu |
0db7f68eab74751e0b8f455e123cefcc363b17d2 | 470eb6b6af669ae037d1aaaf28c7169d906ca25e | /src/split_read_matrices_by_plate.py | 8292d94d002a13ad6308b38113fa0d8197f0494f | [] | no_license | wxgao33/CSI-Microbes-analysis | 5bddd6cc4ffb7ec2dca833231a4e966b92f348a1 | 273b41a20c4c13af0efe2a888821b0cfc5e0c189 | refs/heads/master | 2023-04-12T00:59:02.423797 | 2021-05-18T15:03:11 | 2021-05-18T15:03:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | import pandas as pd
read_df = pd.read_csv(snakemake.input[0], sep="\t", index_col=0)
metadata_df = pd.read_csv(snakemake.input[1], sep="\t")
metadata_df = metadata_df.loc[metadata_df["plate"].astype("str") == snakemake.wildcards["plate"]]
read_df = read_df[metadata_df["cell"]]
read_df.to_csv(snakemake.output[0], sep="\t")
metadata_df.to_csv(snakemake.output[1], sep="\t", index=False)
| [
"wir963@gmail.com"
] | wir963@gmail.com |
bbb44545c91f38f60b5e2dd68e784a2dd5585d07 | 5f0aa278f1bce85b852d4cb57b5d1314b577635f | /anagram.py | d9ed06d79ee8e8fb1ad202f9006e8070ad81565e | [] | no_license | osvaldohg/algorithms | 56eb12975468871a11c7598b291780ce662b80cd | 99bcc57440e1fc7ac077112a9f65377554cb3a4c | refs/heads/master | 2021-10-08T04:28:24.106489 | 2018-12-07T17:08:03 | 2018-12-07T17:08:03 | 71,183,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,159 | py | print "decoding"
#poultry outwits ants
PHRASE="poultryoutwitsants"
#pastils turnout towy
#poultryoutwitsants
#ailnooprssttttuuwy
source_list=[]
source_dict={}
filter_words=[]
def load_dict_source(inputFile):
source=open(inputFile,"r")
for line in source:
source_list.append(line.strip())
def fill_dict(word):
dict={}
for letter in word:
if letter not in dict:
dict[letter]=1
else:
dict[letter]=dict[letter]+1
return dict
#main
load_dict_source("wordlist2.txt")
#print source_list
print len(source_list)
source_dict=fill_dict(PHRASE)
print source_dict.keys()
for word in source_list:
tmpDict=fill_dict(word)
for letter in tmpDict.keys():
if letter in source_dict.keys():
if tmpDict[letter]<=source_dict[letter]:
filter_words.append(word)
else:
break
print len(filter_words)
#we are doing good enough
len_dict={}
for word in filter_words:
if len(word) not in len_dict:
len_dict[len(word)]=[word]
else:
len_dict[len(word)].append(word)
print len_dict.keys()
| [
"noreply@github.com"
] | noreply@github.com |
f0e3aff03c48d717cce727030dcf7d1bc41cccc4 | 4384308f49712034322404f65bedd47878c3c288 | /server/dataAccess.py | 7a81622218d0aae8a62e77e31ec7be6004b7874e | [] | no_license | weiwang-goed/warehouse | f4fea4b8205149bb18213e58f6f191fa36df4610 | 650330d7875c7094c4c53495855ae71663b733f4 | refs/heads/master | 2023-07-13T16:37:01.059098 | 2021-09-12T14:11:09 | 2021-09-12T14:11:09 | 405,655,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,493 | py | import pandas as pd
import os
import json
import pathlib
import time
from serverConfig import *
class dataAccessJs():
''' Funtions to access database (in this case, js files) '''
def __init__(self, inventoryJs, productJs):
self.inventory, self.products = {}, {}
self.inventoryJs, self.productJs = inventoryJs, productJs
self.loadInventory(inventoryJs)
self.loadProducts(productJs)
return
def _dbgData(self):
''' debug and show all data '''
print('-- Inventory dataframe table --')
inv = self.inventory
print(inv.head(5))
# print('-- Inventory article names --')
# print(list(inv['name']) )
# print('-- Inventory in-stock statistics --')
# print(inv['stock'].astype('int32').describe())
# print('-- Products --')
# print(self.products.keys())
return
def loadInventory(self, path:str):
''' load inventory from js '''
with open(path) as fd:
d = json.load(fd)
inv = pd.DataFrame(d['inventory'])
self.inventory = inv.set_index('art_id')
return
def _commitInventory(self):
''' write loaded inventory back to js '''
inv = self.inventory.copy()
inv['art_id'] = inv.index
invJs = {"inventory": json.loads(inv.astype(str).to_json(orient = 'records'))}
with open(self.inventoryJs, 'w') as fd:
json.dump(invJs, fd, indent=2, separators=(',', ': '))
return
def loadProducts(self, path:str):
''' load products menu from js '''
with open(path) as fd:
productList = json.load(fd)['products']
for p in productList:
self.products[p['name']] = pd.DataFrame(p['contain_articles'])
return
def getProductQuantity(self, name:str) -> int:
''' return maximum quantity of one product from the current inventory
return 0 if any errors
'''
if name not in self.products.keys():
print('[Warning] Product not found: ', name)
return 0
dfPrd = self.products[name]
try:
dfInvMatch = self.inventory.loc[dfPrd['art_id']]
except:
print('[Warning] Some article category missing in the inventory', list(dfPrd['art_id']) )
return 0
invArticleStocks = dfInvMatch['stock'].astype(int).to_numpy()
prdArticleNeeded = dfPrd['amount_of'].astype(int).to_numpy()
numPerArticle = invArticleStocks/prdArticleNeeded
print(numPerArticle, int(min(numPerArticle)))
return int(min(numPerArticle))
def getProductsQuantity(self) -> dict:
''' return a dictionary, for each product -> {product : getProductQuantity(product)} '''
productsQuantity = {}
for p in self.products.keys():
productsQuantity[p] = self.getProductQuantity(p)
return productsQuantity
def rmProduct(self, name:str, num:int):
''' remove/sell {num} pieces of product {name}, num < 0 means cancel subscription
1. return number of products successfully sold
2. update local inventory
'''
## check paramters
if name not in self.products.keys():
print('[Warning] Product not found: ', name)
return 0
dfPrd = self.products[name]
try:
dfInvMatch = self.inventory.loc[dfPrd['art_id']]
except:
print('[Warning] Some article category missing in the inventory', list(dfPrd['art_id']) )
return 0
## available products to remove
invArticleStocks = dfInvMatch['stock'].astype(int).to_numpy()
prdArticleNeeded = dfPrd['amount_of'].astype(int).to_numpy()
prdNum = int(min(invArticleStocks/prdArticleNeeded))
rmNum = min(prdNum, num)
## update inventory dataframe.
invArticleStocksNew = invArticleStocks - prdArticleNeeded*rmNum
self.inventory.loc[dfPrd['art_id'], 'stock'] = invArticleStocksNew
self._commitInventory()
return rmNum
if __name__ == '__main__':
os.chdir(pathlib.Path(__file__).parent.resolve())
db = dataAccessJs(DATA_INV_JS, DATA_PRD_JS)
db._dbgData()
db.getProductQuantity('Dinning Table')
print(db.getProductsQuantity())
db.rmProduct('Dinning Table', 1)
db._dbgData()
db.saveInventory()
| [
"ec2-user@ip-172-31-38-196.us-east-2.compute.internal"
] | ec2-user@ip-172-31-38-196.us-east-2.compute.internal |
0e82ee79e918a29ba71b84fda1e05d64b7d61662 | 88509a8ce62a22acc0639c683900d5d0cb8d69e7 | /Day22/orm/app/views.py | 034ffce2d9c3976faf3424c9b86052e00b42b8fe | [] | no_license | pytutorial/py2104 | 8b0238ab6f6d2f5395aee5fbe1f4aff03b819cd3 | 48b36d6b1f40730ef2747c310e70fb6997eda388 | refs/heads/main | 2023-09-03T16:55:02.285158 | 2021-10-20T05:24:31 | 2021-10-20T05:24:31 | 391,613,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,166 | py | from django.shortcuts import HttpResponse
from .models import *
import json
# Create your views here.
def get_product_by_code(request, code):
product = Product.objects.get(code=code)
data = {
'id': product.id,
'name': product.name,
'code': product.code,
'description': product.description,
'price': product.price
}
return HttpResponse(json.dumps(data))
def search_product(request):
input_data = request.GET
keyword = input_data.get('keyword', '')
product_list = Product.objects.filter(
name__icontains=keyword)
result = [product.name for product in product_list]
return HttpResponse(','.join(result))
def get_customer_by_phone(request, phone):
customer = Customer.objects.get(phone=phone)
return HttpResponse(customer.name)
def search_customer(request):
input_data = request.GET
keyword = input_data.get('keyword', '')
print('keyword=', keyword)
customer_list = Customer.objects.filter(name__icontains=keyword)
print('customer_list=', customer_list)
result = ','.join([customer.name for customer in customer_list])
return HttpResponse(result) | [
"duongthanhtungvn01@gmail.com"
] | duongthanhtungvn01@gmail.com |
0e61787e7e39045dbbff613b13f6d1a6a51f3570 | 29e4cfd21734307a438ce4620c11e0c99204b05d | /crawler/items/new.py | f7638a6a8cdd2c3a22ad45ef9e743cae108afd14 | [
"MIT"
] | permissive | LProDreamAll/douban_movie_database | b8be820c5c71c0263475ec6c3260da34c0bcbae7 | c4891fa078dbaffe251d135e5b49fafdb92e2b2a | refs/heads/master | 2020-09-23T05:48:52.387022 | 2019-12-01T12:16:40 | 2019-12-01T12:16:40 | 225,420,261 | 2 | 0 | MIT | 2019-12-02T16:30:07 | 2019-12-02T16:30:06 | null | UTF-8 | Python | false | false | 192 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# author: humingk
# ----------------------
import scrapy
class MovieDouban(scrapy.Item):
id = scrapy.Field()
name_zh = scrapy.Field()
| [
"humingk@qq.com"
] | humingk@qq.com |
fc1266738f799c65b9d4f71e6846f6b72d00fc74 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/recoveryservices/get_replication_migration_item.py | d1e403b2a6e74c291c8bcdb23200e48a30cd7dcb | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 4,452 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetReplicationMigrationItemResult',
'AwaitableGetReplicationMigrationItemResult',
'get_replication_migration_item',
]
@pulumi.output_type
class GetReplicationMigrationItemResult:
"""
Migration item.
"""
def __init__(__self__, id=None, location=None, name=None, properties=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource Location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.MigrationItemPropertiesResponse':
"""
The migration item properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource Type
"""
return pulumi.get(self, "type")
class AwaitableGetReplicationMigrationItemResult(GetReplicationMigrationItemResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetReplicationMigrationItemResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
type=self.type)
def get_replication_migration_item(fabric_name: Optional[str] = None,
migration_item_name: Optional[str] = None,
protection_container_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetReplicationMigrationItemResult:
"""
Migration item.
API Version: 2018-07-10.
:param str fabric_name: Fabric unique name.
:param str migration_item_name: Migration item name.
:param str protection_container_name: Protection container name.
:param str resource_group_name: The name of the resource group where the recovery services vault is present.
:param str resource_name: The name of the recovery services vault.
"""
__args__ = dict()
__args__['fabricName'] = fabric_name
__args__['migrationItemName'] = migration_item_name
__args__['protectionContainerName'] = protection_container_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:recoveryservices:getReplicationMigrationItem', __args__, opts=opts, typ=GetReplicationMigrationItemResult).value
return AwaitableGetReplicationMigrationItemResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
| [
"noreply@github.com"
] | noreply@github.com |
2a469acf940d000a63a2e38071d9e6e0c3be30a0 | 48b8afa97581b618fc5a378b566db95d486bb6b5 | /6kyu/AreTheyTheSame_6kyu.py | 04523bdd29f97c4c415faac92383a12cbfc391bf | [] | no_license | AthaG/Kata-Tasks | 1a3449a641b8bb10739195fd9291e91b990f0076 | ec01f86bd7e508552e00a285116d442c999070b2 | refs/heads/master | 2023-06-10T02:51:57.445083 | 2021-06-20T08:13:57 | 2021-06-20T08:13:57 | 323,949,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,715 | py | '''Given two arrays a and b write a function comp(a, b) (orcompSame(a, b)) that checks
whether the two arrays have the "same" elements, with the same multiplicities. "Same"
means, here, that the elements in b are the elements in a squared, regardless of the
order.
Examples:
Valid arrays
a = [121, 144, 19, 161, 19, 144, 19, 11]
b = [121, 14641, 20736, 361, 25921, 361, 20736, 361]
comp(a, b) returns true because in b 121 is the square of 11, 14641 is the square of
121, 20736 the square of 144, 361 the square of 19, 25921 the square of 161, and so on.
It gets obvious if we write b's elements in terms of squares:
a = [121, 144, 19, 161, 19, 144, 19, 11]
b = [11*11, 121*121, 144*144, 19*19, 161*161, 19*19, 144*144, 19*19]
Invalid arrays
If, for example, we change the first number to something else, comp may not return true
anymore:
a = [121, 144, 19, 161, 19, 144, 19, 11]
b = [132, 14641, 20736, 361, 25921, 361, 20736, 361]
comp(a,b) returns false because in b 132 is not the square of any number of a.
a = [121, 144, 19, 161, 19, 144, 19, 11]
b = [121, 14641, 20736, 36100, 25921, 361, 20736, 361]
comp(a,b) returns false because in b 36100 is not the square of any number of a.
Remarks:
a or b might be [] or {} (all languages except R, Shell).
a or b might be nil or null or None or nothing (except in C++, Elixir, Haskell,
PureScript, Pascal, R, Rust, Shell).
If a or b are nil (or null or None, depending on the language), the problem doesn't
make sense so return false.'''
def comp(array1, array2):
if array1 == None:
return False
for x in array1:
if x*x in array2:
array2.remove(x*x)
return array2 == []
| [
"52504599+AthaG@users.noreply.github.com"
] | 52504599+AthaG@users.noreply.github.com |
17345d8b511ebbe958917dea63d04e077d72fb03 | fe3adecf5d7f65b14b5b6faf4a9f34b00b481a75 | /corpus_formats/formater.py | 4cd9a15ba3937fd87c0d33da28add8690e6c1893 | [] | no_license | kendalvictor/harmonizing-tibidabo | d8dfcd2d3533439362278703796edff100aa7725 | e808d682431db525ae4eb7945bf9baeff9a67ca6 | refs/heads/master | 2021-01-19T20:43:25.391859 | 2015-11-02T11:35:35 | 2015-11-02T11:35:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | py | # -*- coding: utf-8 -*-
# /usr/bin/python
# Transforms the conll format of the tibidabo corpus to the conll format of freeling.
import sys
if __name__ == "__main__":
if len(sys.argv) is not 1 + 3:
print "Usage: ./formater.py <tibidabo input> <freeling format> <conll format>"
exit(1)
flout = open(sys.argv[2],'wb')
cnout = open(sys.argv[3],'wb')
with open(sys.argv[1],'rb') as input:
for line in input:
token = line.split()
if len(token) == 11 :
flformat = '%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (token[0],token[1],token[2],
token[4],token[3],token[5],'-','-',
token[10],token[6],token[7],'-','-')
cnformat = '%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (token[0],token[1],token[2],
token[4],token[3],token[5],token[6],
token[7],token[10],'_','_')
flout.write(flformat)
cnout.write(cnformat)
else:
if len(token) > 0 :
print 'Error', len(token), line
flout.write(line)
cnout.write(line) | [
"mlloberes@bitbucket.org"
] | mlloberes@bitbucket.org |
9b2e42ad3619a8aa8d9e99c6a2b3c8045609e66e | 475d1b83b77e2730b53722f0d8d11b070f97018a | /travelapp/migrations/backup/0013_auto_20210221_1309.py | 6de634dbf85cd70b3b448828cfa895fc3a0f6706 | [
"MIT"
] | permissive | Gwellir/my-region | b651284ee4d4ec7ec892bb78a7ce3444c833d035 | baacb7f54a19c55854fd068d6e38b3048a03d13d | refs/heads/main | 2023-04-20T17:31:33.040419 | 2021-05-17T13:35:38 | 2021-05-17T13:35:38 | 336,533,029 | 0 | 1 | MIT | 2021-05-17T13:35:39 | 2021-02-06T12:31:08 | Python | UTF-8 | Python | false | false | 1,558 | py | # Generated by Django 3.1.6 on 2021-02-21 10:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('travelapp', '0012_trip_subbed'),
]
operations = [
migrations.RemoveField(
model_name='route',
name='base_price_currency',
),
migrations.RemoveField(
model_name='trip',
name='price_currency',
),
migrations.AlterField(
model_name='route',
name='base_price',
field=models.DecimalField(decimal_places=2, default=0, max_digits=7, verbose_name='Ориентировочная стоимость прохождения маршрута'),
),
migrations.AlterField(
model_name='trip',
name='price',
field=models.DecimalField(decimal_places=2, max_digits=7, verbose_name='Стоимость прохождения маршрута'),
),
migrations.CreateModel(
name='TripOption',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Наименование опции')),
('price', models.DecimalField(decimal_places=2, default=0, max_digits=5, verbose_name='Стоимость опции')),
('trip', models.ManyToManyField(related_name='options', to='travelapp.Trip')),
],
),
]
| [
"gwellir@gmail.com"
] | gwellir@gmail.com |
0e54f592add357a09ba8655d612cbf44e75aacd4 | e694891ff8c9d06df7b7b5def7ba71c1dba03aa8 | /redis_queue/db.py | 730396f0069a2660ad5e33e14ba3afafc373801f | [] | no_license | wangyu190810/python-skill | 78f9abb39ebfa01b92ffb2ec96c7ef57c490d68d | 719d082d47a5a82ce4a15c57dd481932a9d8f1ba | refs/heads/master | 2020-04-05T17:43:48.005145 | 2019-02-01T01:45:49 | 2019-02-01T01:45:49 | 41,524,479 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | # -*-coding:utf-8-*-
# email:190810401@qq.com
__author__ = 'wangyu'
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine,text
from config import Config
def connection(database):
engine = create_engine(database)
Session = sessionmaker(engine)
session = Session()
return session
conn = connection(Config.db)
def insert_data(data):
sql =text("insert into queue_message (url,status_code) "
"VALUES (:url,:status_code)")
sql = sql.bindparams(url=data.get("url"),
status_code=data.get("status_code"))
conn.execute(sql)
conn.commit()
| [
"190810401@qq.com"
] | 190810401@qq.com |
46928e043be6e75bc252aa673845202699199897 | 34d52e534dbd866e43091fb61249c171e7f6506b | /main/tokenizeDB.py | 0c116d7e2a6c11e918a3cff26934b7665f166b3b | [] | no_license | wickz80/askreddit-informatics | e739fccebd910b99d743468f7515cfb4fd0433be | dd1510fd7b875fca23e48aec7518ecd387004664 | refs/heads/master | 2021-10-27T15:22:08.828069 | 2019-04-17T22:54:35 | 2019-04-17T22:54:35 | 115,559,434 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,132 | py | import praw
import sqlite3
import time
from itertools import islice
import re
import csv
from multiprocessing import Pool as ThreadPool
def fetchAllPostText(amount):
if amount == 0:
cur.execute("SELECT title FROM posts WHERE score >= 1")
else:
cur.execute("SELECT title FROM posts WHERE score >= 1 LIMIT {}".format(amount))
return cur.fetchall()
def tokenizeDbIntoSet(dbTextList):
wordList = []
for dbText in dbTextList:
formattedText = re.sub('[^a-zA-Z -/]+', '', str(dbText[0]).lower())
textWords = re.split("[ -/]+", formattedText, flags=re.IGNORECASE)
for textWord in textWords:
try:
wordList += [textWord]
except Exception as e:
print("Error...",e)
return set(wordList)
def getAverageScore(key, cur):
query = """SELECT round(avg(score),1)
FROM posts
WHERE title
LIKE {wildcard}
AND
(SELECT count(*)
FROM posts
WHERE title
LIKE {wildcard}) > 1
""".format(wildcard='\'%{term}%\'').format(term=key)
cur.execute(query)
return cur.fetchone()[0]
def getOccurences(key, cur):
query = """SELECT count(*)
FROM posts
WHERE title
LIKE {wildcard}
""".format(wildcard='\'%{term}%\'').format(term=key)
cur.execute(query)
return cur.fetchone()[0]
def buildTuple(wordSet, cur):
setLength = len(wordSet)
wordTuple = [('word', 'avgScore', 'numOccurences')]
startTime = time.time()
for i, textWord in enumerate(wordSet):
wordTuple += [(textWord, getAverageScore(textWord, cur), getOccurences(textWord, cur))]
if i % 50 == 0:
estimateProgess(i, setLength, startTime)
return wordTuple
def estimateProgess(currentIteration, totalIterations, lastTimestamp):
try:
fractionDone = currentIteration/totalIterations
percentageDone = round((fractionDone*100),2)
elapsedTime = round(time.time()-lastTimestamp,2)
secondsUntilDone = ((1/fractionDone) * elapsedTime)-elapsedTime
completionEstimate = time.strftime("%H:%M:%S", time.gmtime(secondsUntilDone))
except:
fractionDone = currentIteration/totalIterations
percentageDone = round((fractionDone*100),2)
elapsedTime = round(time.time()-lastTimestamp,2)
secondsUntilDone = 1
completionEstimate = time.strftime("%H:%M:%S", time.gmtime(secondsUntilDone))
statusText = '{} out of {} iterations performed -- {}%\n'.format(str(currentIteration),str(totalIterations),str(percentageDone))
timeChange = ' Time elapsed: {} seconds\n'.format(time.strftime("%H:%M:%S", time.gmtime(elapsedTime)))
timeEstimate = ' Estimated time until completion: {}\n'.format(completionEstimate)
print(statusText, timeChange, timeEstimate)
def writeTupleToCSV(tuple, fileName):
with open('{}.csv'.format(fileName), 'w', newline='') as csv_file:
writer = csv.writer(csv_file)
for line in tuple:
try:
writer.writerow(line)
except Exception as e:
print("Error...",e)
db = sqlite3.connect(r'questions.DB')
cur = db.cursor()
#pool = ThreadPool(4)
askRedditPosts = fetchAllPostText(1000)
wordSet = tokenizeDbIntoSet(askRedditPosts)
wordTuple = buildTuple(wordSet, cur)
for line in wordTuple:
print(line)
writeTupleToCSV(wordTuple, 'tupleList')
| [
"32722332+wickz80@users.noreply.github.com"
] | 32722332+wickz80@users.noreply.github.com |
34bf0ddf4c836f00f7809ad719bf5652f662b7e8 | 373035950bdc8956cc0b74675aea2d1857263129 | /spar_python/report_generation/ta1/ta1_section_overview_p2.py | 8c710d1d99d7cf13febced25219e657a0bc71447 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | limkokholefork/SPARTA | 5d122cd2e920775d61a5404688aabbafa164f22e | 6eeb28b2dd147088b6e851876b36eeba3e700f16 | refs/heads/master | 2021-11-11T21:09:38.366985 | 2017-06-02T16:21:48 | 2017-06-02T16:21:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,053 | py | # *****************************************************************
# Copyright 2013 MIT Lincoln Laboratory
# Project: SPAR
# Authors: SY
# Description: Section class
#
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 19 Sep 2013 SY Original version
# *****************************************************************
# general imports:
import logging
# SPAR imports:
import spar_python.report_generation.common.section as section
import spar_python.report_generation.ta1.ta1_section_overview_common as t1soc
import spar_python.report_generation.ta1.ta1_schema as t1s
import spar_python.report_generation.common.regression as regression
import spar_python.report_generation.common.graphing as graphing
import spar_python.report_generation.common.latex_classes as latex_classes
# LOGGER:
LOGGER = logging.getLogger(__name__)
class Ta1OverviewP2Section(t1soc.Ta1OverviewCommonSection):
"""The equality overview section of the TA1 report."""
def __init__(self, jinja_template, report_generator):
"""Initializes the section with a jinja template and a report generator.
"""
cat = t1s.CATEGORIES.P2
super(Ta1OverviewP2Section, self).__init__(
jinja_template, report_generator, cat)
def _get_parameters(self, selection_cols):
"""Returns parameters for the 3d graph."""
parameters = {}
parameters["z_label"] = (
self._config.var_rangesize + " = range size")
# find the data:
this_constraint_list = (
self._config.get_constraint_list() +
self._inp.get_constraint_list() + [
(t1s.DBP_TABLENAME, t1s.DBP_SELECTIONCOLS, selection_cols)])
these_atomic_fields_and_functions = [
(t1s.DBA_RANGE,
t1s.Ta1ResultsSchema().get_complex_function(t1s.DBA_TABLENAME,
t1s.DBA_RANGE))]
parameters["values"] = self._config.results_db.get_query_values(
[(t1s.DBP_TABLENAME, t1s.DBP_NUMNEWRETURNEDRECORDS),
(t1s.DBP_TABLENAME, t1s.DBP_QUERYLATENCY)],
constraint_list=this_constraint_list,
atomic_fields_and_functions=these_atomic_fields_and_functions)
parameters["ftr"] = self._config.ql_p2_ftr
return parameters
def _populate_output(self):
"""Populates the output object which is passed to the Jinja tempalte
in get_string."""
super(Ta1OverviewP2Section, self)._populate_output()
this_constraint_list = (
self._config.get_constraint_list() +
self._inp.get_constraint_list())
categories = self._config.results_db.get_unique_query_values(
simple_fields=[(t1s.DBP_TABLENAME, t1s.DBP_SELECTIONCOLS)],
constraint_list=this_constraint_list)
for selection_cols in categories:
self._store_3d_latency_graph(selection_cols)
| [
"mitchelljd@ll.mit.edu"
] | mitchelljd@ll.mit.edu |
f051187cc8b8ebd6a86176547ea86dfea08a8b02 | 235f47ff67783b80f5b57692d01905ea8a7e171f | /RQ1_RQ2/Thermostat_case_study/EVALUATION/Pymoo_GA/ImageBuilder.py | 263ef27cf31f913a1a914e634a9c4248ba52602c | [
"MIT"
] | permissive | dgumenyuk/Environment_generation | 5293789b89818d961d93facad1f9604470efe55d | 092fbecdc208f84aa58f2ccd3522262984e79cda | refs/heads/main | 2023-07-01T06:26:30.972835 | 2021-08-02T19:30:28 | 2021-08-02T19:30:28 | 383,611,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,672 | py | '''
Module to illustrate the created scenarios
'''
import numpy as np
import matplotlib.pyplot as plt
import time
class ImageBuilder:
def __init__(self, folder, duration, smpl_rate):
self.folder = folder
self.duration = duration
self.smpl_rate = smpl_rate
def build_image(
self,
option,
tc_id,
fitness,
ss_tmp=[],
id_tmp=[],
):
fig, ax1 = plt.subplots(figsize=(24, 8))
if option == 1:
ax1.set_title(
"Temperature values expected" + ", smpl_rate = " + str(self.smpl_rate),
fontsize=17,
)
ax1.plot(
[i * 3 / 60 * self.smpl_rate for i in range(0, len(id_tmp))],
id_tmp,
"o--b",
label="Scheduled temperature",
)
plt.xticks(np.arange(0, len(id_tmp) * 3 / 60 * self.smpl_rate + 1, step=2))
elif option == 2:
ax1.set_title(
"Temperature values simulated" + ", smpl_rate = " + str(self.smpl_rate),
fontsize=17,
)
ax1.plot(
[i * 3 / 60 * self.smpl_rate for i in range(0, len(ss_tmp))],
ss_tmp,
"or",
label="Actual temperature",
)
plt.xticks(np.arange(0, len(ss_tmp) * 3 / 60 * self.smpl_rate + 1, step=2))
elif option == 3:
ax1.set_title(
"Temperature values expected vs simulated, fitness = "
+ str(fitness)
+ ", smpl_rate = "
+ str(self.smpl_rate),
fontsize=17,
)
ax1.plot(
[i * 3 / 60 * self.smpl_rate for i in range(0, len(id_tmp))],
id_tmp,
"o--b",
label="Scheduled temperature",
)
ax1.plot(
[i * 3 / 60 * self.smpl_rate for i in range(0, len(ss_tmp))],
ss_tmp,
"or",
label="Actual temperature",
)
plt.xticks(np.arange(0, len(ss_tmp) * 3 / 60 * self.smpl_rate + 1, step=2))
ax1.set_xlabel("Time, hours", fontsize=14)
ax1.set_ylabel("Temperature value in degrees Celsius", fontsize=14)
top = 28
bottom = 15
ax1.set_ylim(bottom, top)
plt.yticks(np.arange(bottom, top + 1, 1.0), fontsize=12)
plt.grid(b=True, which="major", axis="both")
ax1.legend(fontsize=14)
ctime = int(time.time())
fig.savefig(self.folder + "\\" + str(tc_id) + ".png")
plt.close(fig)
| [
"noreply@github.com"
] | noreply@github.com |
e04500146c2905788b8bb45ffa56d3c234a09b81 | 3120d8b22cc0b6755da6341434165baf0a855e9d | /Day 5 - Assignments/qn3.py | 1fe82263a2e7c8a2213031cae4cee04e2a0a3ef9 | [] | no_license | karthika-onebill/python_basics_assignments | 6033c8f442d452b463e81ba8bc70a6d1ed87b14f | 793bd0205d2f3eab47bf939aa0c0e002728805dd | refs/heads/master | 2023-05-24T14:18:46.603229 | 2021-06-20T02:17:02 | 2021-06-20T02:17:02 | 376,065,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,171 | py | #for loop
def forLoop(list_var,type_var=0) :
if(type_var==1) :
sum=0
for i in Num :
sum+=int(i)
print("The sum from for loop:",sum)
else :
for i in list_var :
print(i)
print()
#while loop
def whileLoop(list_var,type_var=0) :
if(type_var==1) :
sum=0
i=0
while(i<len(Num)) :
sum+=int(Num[i])
i=i+1
print("The sum from while loop:",sum)
else :
i=0
while(i<len(list_var)) :
print(list_var[i])
i=i+1
print()
#list 1
numbers = [1,2,3,4,5]
#list 2
Weekdays = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']
#list 3
Num = ['222','100','85','500','300']
#print numbers in list 'numbers' - using for loop
forLoop(numbers)
#print numbers in list 'numbers' - using while loop
whileLoop(numbers)
#print the days of the week 'weekdays' - using for loop
forLoop(Weekdays)
#print the days of the week 'weekdays' - using while loop
whileLoop(Weekdays)
#print sum of all numbers - for loop
forLoop(Num,1)
#print sum of all numbers - for loop
whileLoop(Num,1)
| [
"karthikavel2000@gmail.com"
] | karthikavel2000@gmail.com |
ca92dcd2a70ec4049ddc36eda9fdde994a6d6d20 | 5592b2cb7120a59a246a184e72177f9312bbc277 | /trunk/src/py/clTest.py | 5090d9d7bca9c97fdc73712d9fb66bf283933307 | [] | no_license | bjohan/Corbomite | 2d5bd75706af37450ad2f99cdce14b6c2e969d4f | 15908269c9bc433784a8d15c2feb3287ffa216d7 | refs/heads/master | 2022-05-27T11:57:31.155808 | 2022-05-14T18:29:10 | 2022-05-14T18:29:10 | 28,397,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | import common.tcpCommunication
cl = common.tcpCommunication.TcpClient()
cl.write("#info\r\n")
print cl.read()
cl.write("#tja\r\n")
print cl.read()
| [
"sa6bxq@gmail.com"
] | sa6bxq@gmail.com |
4fb689d7ee6a7dff3dde5f365c246c20d0d8dbde | b317bcf748825f0bcea361bbee0b02907a21aa39 | /Code/CNN/load_data.py | c0cb9d028bb1d264c0e39c6a6b2ff098eac702a9 | [] | no_license | singhalarchit/Abstract-Summarization | f0ef3b8591ab6f0273dc57ed7add4a3d4d1cdf60 | fa654984426a1ba1f128a22941307c5864904463 | refs/heads/master | 2021-05-07T06:23:05.432213 | 2017-12-14T11:25:59 | 2017-12-14T11:25:59 | 111,747,123 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,321 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 18 19:56:24 2017
@author: Archit
"""
import pickle
import nltk
import numpy as np
import re
import unicodedata
from io import open
full_data_filename = "../Processed Data/fulldata.pkl"
full_data_idx_filename = "../../Processed Data/fulldataidx.pkl"
full_data2_idx_filename = "../Processed Data/fulldataidx2.pkl"
vocab_filename = "../../Processed Data/vocab2.pkl"
"""
def read_dataset2(filename):
dataset = []
with open(filename) as f:
for line in f:
line = line.decode('utf-8').encode('ascii','ignore')
nodigit = ''.join([i for i in line if not i.isdigit()])
dataset.append(nodigit)
return dataset
"""
# Turn a Unicode string to plain ASCII, thanks to
# http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
# Lowercase, trim, and remove non-letter characters
def normalizeString(s, noPeriod):
s = unicodeToAscii(s.lower().strip())
if noPeriod:
s = re.sub(r"[^a-zA-Z]+", r" ", s)
else:
s = re.sub(r"([.])", r" \1 ", s)
s = re.sub(r"[^a-zA-Z.]+", r" ", s)
return s
def read_dataset(filename, noPeriod = False):
dataset = []
with open(filename, encoding='utf-8') as f:
for line in f:
dataset.append(str(normalizeString(line, noPeriod)))
return dataset
def save_full_data():
abstract_file = '../Processed Data/abstracts.txt'
title_file = '../Processed Data/titles.txt'
abstracts, titles = read_dataset(abstract_file), read_dataset(title_file, True)
pickle.dump([abstracts, titles], open(full_data_filename, "wb"), \
pickle.HIGHEST_PROTOCOL)
def load_full_data():
abstracts, titles = pickle.load(open(full_data_filename, "rb"))
return abstracts, titles
def abstracts2idx(abstracts):
word2idx, _, _, _ = pickle.load(open(vocab_filename, "rb"))
mod_abstracts = []
for abstract in abstracts:
sentences = nltk.sent_tokenize(abstract)[:10]
mod_abstract = []
for sentence in sentences:
tokens = sentence.split()
tokens2idx = [word2idx[token] for token in tokens]
mod_abstract.append(tokens2idx)
mod_abstracts.append(mod_abstract)
return mod_abstracts
def titles2idx(titles):
word2idx, _, _, _ = pickle.load(open(vocab_filename, "rb"))
mod_titles = []
for title in titles:
tokens = title.split()
tokens2idx = [word2idx[token] for token in tokens]
mod_titles.append(tokens2idx)
return mod_titles
def save_full_data_idx():
abstracts, titles = load_full_data()
abstracts, titles = abstracts2idx(abstracts), titles2idx(titles)
pickle.dump([abstracts, titles], open(full_data_idx_filename, "wb"), \
pickle.HIGHEST_PROTOCOL)
def load_full_data_idx():
abstracts, titles = pickle.load(open(full_data_idx_filename, "rb"))
return abstracts, titles
def load_full_data2_idx():
abstracts, titles = pickle.load(open(full_data2_idx_filename, "rb"))
return abstracts, titles
def split_data(abstracts, titles):
np.random.seed(1)
size = len(abstracts)
ind = np.arange(size)
np.random.shuffle(ind)
abstracts = np.asarray(abstracts)[ind]
titles = np.asarray(titles)[ind]
abstracts_split = np.split(abstracts, [int(0.8*size), int(0.9*size), size])
titles_split = np.split(titles, [int(0.8*size), int(0.9*size), size])
return abstracts_split, titles_split
def get_splitted_data(idx = True):
if idx:
abstracts, titles = load_full_data_idx()
else:
abstracts, titles = load_full_data()
abstracts_split, titles_split = split_data(abstracts, titles) #15874, 1984, 1985
return abstracts_split[:3], titles_split[:3]
def get_splitted_data2(idx = True):
if idx:
abstracts, titles = load_full_data2_idx()
else:
abstracts, titles = load_full_data()
abstracts_split, titles_split = split_data(abstracts, titles) #15874, 1984, 1985
return abstracts_split[:3], titles_split[:3]
#save_full_data()
#abstracts, titles = load_full_data()
#save_full_data_idx()
#abstracts, titles = load_full_data_idx()
#abstracts, titles = get_splitted_data() | [
"singhalarchit1@gmail.com"
] | singhalarchit1@gmail.com |
1994c2b87b2984da6665c4ad9634a631dc9266a0 | 6f0d5ec56efc0c2933013867cd6b9529ca5ac1ac | /SolarSystemdMag/keithlyCompExampleDmagvsScurve.py | 1064970ae35faafefca4cf71c67d0a252f68e6c8 | [] | no_license | deanthedream/PrototypePlottingUtilsEXOSIMS | 1e3b1a004e21a3d06cc47f557dec7ce0e1a576fe | 3f17eb42f62e3d9a7dbffa7990317f928d8ec9da | refs/heads/master | 2023-08-27T23:47:34.465078 | 2023-08-24T11:58:54 | 2023-08-24T11:58:54 | 138,237,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,053 | py | #### Keithly Completeness Example Dmags vs S curve plot
import os
from projectedEllipse import *
import EXOSIMS.MissionSim
import matplotlib.pyplot as plt
from matplotlib import colors
from mpl_toolkits.mplot3d import Axes3D
import numpy.random as random
import time
from astropy import constants as const
import astropy.units as u
from EXOSIMS.util.deltaMag import deltaMag
from EXOSIMS.util.planet_star_separation import planet_star_separation
from EXOSIMS.util.phaseFunctions import quasiLambertPhaseFunction
from EXOSIMS.util.phaseFunctions import betaFunc
from PrototypePlottingUtilsEXOSIMS.trueAnomalyFromEccentricAnomaly import trueAnomalyFromEccentricAnomaly
import sys, os.path, EXOSIMS
import numpy as np
import copy
from copy import deepcopy
import datetime
import re
folder = os.path.normpath(os.path.expandvars('$HOME/Documents/exosims/Scripts'))#'$HOME/Documents/exosims/Scripts'))#HabExTimeSweep_HabEx_CSAG13_PPSAG13'))#WFIRSTCompSpecPriors_WFIRSTcycle6core_3mo_40519'))#EXOSIMS/EXOSIMS/Scripts'))#EXOSIMS/EXOSIMS/Scripts'))
filename = 'HabEx_CSAG13_PPSAG13_compSubtype.json'
scriptfile = os.path.join(folder,filename)
sim = EXOSIMS.MissionSim.MissionSim(scriptfile=scriptfile,nopar=True)
#Note no completness specs in SAG13 SAG13
comp = sim.SurveySimulation.Completeness
TL= sim.SurveySimulation.TargetList
ZL = sim.ZodiacalLight
OS = sim.OpticalSystem
folder = './'
PPoutpath = './'
#Getting observing mode
#allModes = OS.observingModes
#det_mode = list(filter(lambda mode: mode['detectionMode'] == True, allModes))[0]
mode = OS.observingModes[0]
nu_smax = 0.
pvenus = np.asarray([0.689])
Rpvenus = (np.asarray([6051.8*1000.])*u.m).to('AU')
smavenus = (np.asarray([108.21*10.**9.])*u.m).to('AU') #in AU
e=np.asarray([1e-5])
inc=np.asarray([np.pi/2.-1e-5])
W=np.asarray([0.])
w=np.asarray([0.])
nus = np.linspace(start=-np.pi/2,stop=np.pi/2.,num=20000)
pneptune = np.asarray([0.442])
Rpneptune = (np.asarray([24622.*1000.])*u.m).to('AU')
smaneptune = (np.asarray([4495.*10.**9.])*u.m).to('AU')
#planProp['mars'] = {'R':3389.92*1000.,'a':227.92*10.**9.,'p':0.150}
pmars = np.asarray([0.150])
Rpmars = (np.asarray([3389.92*1000.])*u.m).to('AU')
smamars = (np.asarray([227.92*10.**9.])*u.m).to('AU')
#planProp['jupiter'] = {'R':69911.*1000.,'a':778.57*10.**9.,'p':0.538}
pjupiter = np.asarray([0.538])
Rpjupiter = (np.asarray([69911.*1000.])*u.m).to('AU')
smajupiter = (np.asarray([778.57*10.**9.])*u.m).to('AU')
#planProp['uranus'] = {'R':25362.*1000.,'a':2872.46*10.**9.,'p':0.488}
puranus = np.asarray([0.488])
Rpuranus = (np.asarray([25362.*1000.])*u.m).to('AU')
smauranus = (np.asarray([2872.46*10.**9.])*u.m).to('AU')
#Setting these values. Need to get the ones for Our Sun at 10 pc
TL.BV[0] = 1.
TL.Vmag[0] = 1.
#starMass
starMass = const.M_sun
periods_mars = (2.*np.pi*np.sqrt((smamars.to('AU'))**3./(const.G.to('AU3 / (kg s2)')*starMass))).to('year').value
periods_venus = (2.*np.pi*np.sqrt((smavenus.to('AU'))**3./(const.G.to('AU3 / (kg s2)')*starMass))).to('year').value
periods_neptune = (2.*np.pi*np.sqrt((smaneptune.to('AU'))**3./(const.G.to('AU3 / (kg s2)')*starMass))).to('year').value
periods_uranus = (2.*np.pi*np.sqrt((smauranus.to('AU'))**3./(const.G.to('AU3 / (kg s2)')*starMass))).to('year').value
#Separations
#s_circle = np.ones(len(sma))
dmag = 25. #29.0
dmag_upper = 25. #29.0
IWA_HabEx = 0.045*u.arcsec #taken from a Habex Script in units of mas
IWA2=0.150*u.arcsec #Suggested by dmitry as analahous to WFIRST
OWA_HabEx = 6.*u.arcsec #from the HabEx Standards Team Final Report
s_inner = 15.*u.pc*IWA_HabEx.to('rad').value
s_outer = 15.*u.pc*OWA_HabEx.to('rad').value
s_circle = np.asarray([s_inner.to('AU').value])
#NEED TO MAKE GOOD HANDLING FOR E=0 ORBITS. SPECIFICALLY FOR MIN AND MAX SOLVING
# dmajorp,dminorp,_,_,Op,x,y,Phi,xreal,only2RealInds,yrealAllRealInds,fourIntInds,twoIntOppositeXInds,twoIntSameYInds,nu_minSepPoints,nu_maxSepPoints,\
# nu_lminSepPoints,nu_lmaxSepPoints,nu_fourInt,nu_twoIntSameY,nu_twoIntOppositeX,nu_IntersectionsOnly2, yrealImagInds,\
# t_minSep,t_maxSep,t_lminSep,t_lmaxSep,t_fourInt0,t_fourInt1,t_fourInt2,t_fourInt3,t_twoIntSameY0,\
# t_twoIntSameY1,t_twoIntOppositeX0,t_twoIntOppositeX1,t_IntersectionOnly20,t_IntersectionOnly21,\
# _,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_, periods = calcMasterIntersections(smavenus,e,W,w,inc,s_circle,starMass,False)\
#need beta from true anomaly function
betas = betaFunc(inc,nus,w)
beta_smax = betaFunc(inc,0.,w)
Phis = quasiLambertPhaseFunction(betas)
Phi_smax = quasiLambertPhaseFunction(beta_smax)
rsvenus = smavenus*(1.-e**2.)/(1.+e*np.cos(nus))
dmags_venus = deltaMag(pvenus,Rpvenus,rsvenus[0],Phis)
dmag_venus_smax = deltaMag(pvenus,Rpvenus,rsvenus[0],Phi_smax)
seps_venus = planet_star_separation(smavenus,e,nus,w,inc)
WA_venus_smax = (smavenus.to('AU').value/(15.*u.pc.to('AU')))*u.rad.to('arcsec')*u.arcsec
#Calculate integration time at WA_venus_smax
venus_intTime = OS.calc_intTime(TL,[0],ZL.fZ0*100000.,ZL.fEZ0*100000.,dmag_venus_smax,WA_venus_smax,mode)
mean_anomalyvenus = venus_intTime.to('year').value*2.*np.pi/periods_venus #total angle moved by planet
eccentric_anomalyvenus = mean_anomalyvenus#solve eccentric anomaly from mean anomaly
nus_venus = trueAnomalyFromEccentricAnomaly(e,eccentric_anomalyvenus) #This is nominally the total true anomaly venus subtends as it is observed
sep_venus_edge0 = planet_star_separation(smavenus,e,0.,w,inc).to('AU').value #the separation where the true anomaly would not be observable
sep_venus_edge1 = planet_star_separation(smavenus,e,nus_venus/2.,w,inc).to('AU').value #the separation where the true anomaly would not be observable
sep_venus_edge2 = planet_star_separation(smavenus,e,-nus_venus/2.,w,inc).to('AU').value #the separation where the true anomaly would not be observable
#Calculate the points where
beta_tmp1 = betaFunc(inc,nus_venus/2.,w)
Phi_tmp1 = quasiLambertPhaseFunction(beta_tmp1)
rsvenus = smavenus*(1.-e**2.)/(1.+e*np.cos(nus))
dmag_venus_tmp1 = deltaMag(pvenus,Rpvenus,rsvenus[0],Phi_tmp1)
beta_tmp2 = betaFunc(inc,-nus_venus/2.,w)
Phi_tmp2 = quasiLambertPhaseFunction(beta_tmp2)
rsvenus = smavenus*(1.-e**2.)/(1.+e*np.cos(nus))
dmag_venus_tmp2 = deltaMag(pvenus,Rpvenus,rsvenus[0],Phi_tmp2)
#From optical ssytem nemati calc_intTime(TL, sInds, fZ, fEZ, dMag, WA, mode)
#### Mars
betas = betaFunc(inc,nus,w)
beta_smax = betaFunc(inc,0.,w)
Phis = quasiLambertPhaseFunction(betas)
Phi_smax = quasiLambertPhaseFunction(beta_smax)
rsmars = smamars*(1.-e**2.)/(1.+e*np.cos(nus))
dmags_mars = deltaMag(pmars,Rpmars,rsmars[0],Phis)
dmag_mars_smax = deltaMag(pmars,Rpmars,rsmars[0],Phi_smax)
seps_mars = planet_star_separation(smamars,e,nus,w,inc)
WA_mars_smax = (smamars.to('AU').value/(22.87*u.pc.to('AU')))*u.rad.to('arcsec')*u.arcsec
mars_intTime = OS.calc_intTime(TL,[0],ZL.fZ0,ZL.fEZ0*1.,dmag_mars_smax,WA_mars_smax,mode)
mean_anomalymars = mars_intTime.to('year').value*2.*np.pi/periods_mars #total angle moved by planet
eccentric_anomalymars = mean_anomalymars#solve eccentric anomaly from mean anomaly
nus_mars = trueAnomalyFromEccentricAnomaly(e,eccentric_anomalymars) #This is nominally the total true anomaly venus subtends as it is observed
sep_mars_edge0 = planet_star_separation(smamars,e,0.,w,inc).to('AU') #the separation where the true anomaly would not be observable
sep_mars_edge1 = planet_star_separation(smamars,e,nus_mars/2.,w,inc).to('AU') #the separation where the true anomaly would not be observable
sep_mars_edge2 = planet_star_separation(smamars,e,-nus_mars/2.,w,inc).to('AU') #the separation where the true anomaly would not be observable
beta_tmp1 = betaFunc(inc,nus_mars/2.,w)
Phi_tmp1 = quasiLambertPhaseFunction(beta_tmp1)
rsmars = smamars*(1.-e**2.)/(1.+e*np.cos(nus))
dmag_mars_tmp1 = deltaMag(pmars,Rpmars,rsmars[0],Phi_tmp1)
beta_tmp2 = betaFunc(inc,-nus_mars/2.,w)
Phi_tmp2 = quasiLambertPhaseFunction(beta_tmp2)
rsmars = smamars*(1.-e**2.)/(1.+e*np.cos(nus))
dmag_mars_tmp2 = deltaMag(pmars,Rpmars,rsmars[0],Phi_tmp2)
#Telescope IWA From Mars
IWA_mars_pretty = (sep_mars_edge2.value/(22.87*u.pc.to('AU')))*u.rad.to('arcsec') #The IWA that causes the separation for mars in arcsec
#### Neptune
#true anomaly of intersection
plotBool = False
# dmajorp,dminorp,theta_OpQ_X,theta_OpQp_X,Op,x,y,Phi,xreal,only2RealInds,yrealAllRealInds,\
# fourIntInds,twoIntOppositeXInds,twoIntSameYInds,nu_minSepPoints,nu_maxSepPoints,nu_lminSepPoints,nu_lmaxSepPoints,nu_fourInt,\
# nu_twoIntSameY,nu_twoIntOppositeX,nu_IntersectionsOnly2, yrealImagInds,\
# t_minSep,t_maxSep,t_lminSep,t_lmaxSep,t_fourInt0,t_fourInt1,t_fourInt2,t_fourInt3,t_twoIntSameY0,\
# t_twoIntSameY1,t_twoIntOppositeX0,t_twoIntOppositeX1,t_IntersectionOnly20,t_IntersectionOnly21,\
# minSepPoints_x, minSepPoints_y, maxSepPoints_x, maxSepPoints_y, lminSepPoints_x, lminSepPoints_y, lmaxSepPoints_x, lmaxSepPoints_y, minSep, maxSep, lminSep, lmaxSep,\
# errors_fourInt0,errors_fourInt1,errors_fourInt2,errors_fourInt3,errors_twoIntSameY0,\
# errors_twoIntSameY1,errors_twoIntOppositeX0,errors_twoIntOppositeX1,errors_IntersectionsOnly2X0,errors_IntersectionsOnly2X1,type0_0Inds,\
# type0_1Inds,type0_2Inds,type0_3Inds,type0_4Inds,type1_0Inds,type1_1Inds,type1_2Inds,type1_3Inds,type1_4Inds,type2_0Inds,type2_1Inds,type2_2Inds,\
# type2_3Inds,type2_4Inds,type3_0Inds,type3_1Inds,type3_2Inds,type3_3Inds,type3_4Inds,fourInt_x,fourInt_y,twoIntSameY_x,twoIntSameY_y,twoIntOppositeX_x,\
# twoIntOppositeX_y,xIntersectionsOnly2,yIntersectionsOnly2,typeInds0,typeInds1,typeInds2,typeInds3, periods = calcMasterIntersections(np.asarray(smamars.to('AU').value),e,W,w,inc,np.asarray(sep_mars_edge2.value),starMass,plotBool)
# print(nu_fourInt)
dmajorp,dminorp,theta_OpQ_X,theta_OpQp_X,Op,x,y,Phi,xreal,only2RealInds,yrealAllRealInds,\
fourIntInds,twoIntOppositeXInds,twoIntSameYInds,nu_minSepPoints,nu_maxSepPoints,nu_lminSepPoints,nu_lmaxSepPoints,nu_fourInt,\
nu_twoIntSameY,nu_twoIntOppositeX,nu_IntersectionsOnly2, yrealImagInds,\
t_minSep,t_maxSep,t_lminSep,t_lmaxSep,t_fourInt0,t_fourInt1,t_fourInt2,t_fourInt3,t_twoIntSameY0,\
t_twoIntSameY1,t_twoIntOppositeX0,t_twoIntOppositeX1,t_IntersectionOnly20,t_IntersectionOnly21,\
minSepPoints_x, minSepPoints_y, maxSepPoints_x, maxSepPoints_y, lminSepPoints_x, lminSepPoints_y, lmaxSepPoints_x, lmaxSepPoints_y, minSep, maxSep, lminSep, lmaxSep,\
errors_fourInt0,errors_fourInt1,errors_fourInt2,errors_fourInt3,errors_twoIntSameY0,\
errors_twoIntSameY1,errors_twoIntOppositeX0,errors_twoIntOppositeX1,errors_IntersectionsOnly2X0,errors_IntersectionsOnly2X1,type0_0Inds,\
type0_1Inds,type0_2Inds,type0_3Inds,type0_4Inds,type1_0Inds,type1_1Inds,type1_2Inds,type1_3Inds,type1_4Inds,type2_0Inds,type2_1Inds,type2_2Inds,\
type2_3Inds,type2_4Inds,type3_0Inds,type3_1Inds,type3_2Inds,type3_3Inds,type3_4Inds,fourInt_x,fourInt_y,twoIntSameY_x,twoIntSameY_y,twoIntOppositeX_x,\
twoIntOppositeX_y,xIntersectionsOnly2,yIntersectionsOnly2,typeInds0,typeInds1,typeInds2,typeInds3, periods = calcMasterIntersections(smaneptune.to('AU').value,e,W,w,inc,sep_mars_edge2.value,starMass,plotBool)
print(nu_fourInt)
# nus[only2RealInds,4:6] = nu_IntersectionsOnly2
# nus[yrealAllRealInds[fourIntInds],4:8] = nu_fourInt
# nus[yrealAllRealInds[twoIntOppositeXInds],4:6] = nu_twoIntOppositeX
# nus[yrealAllRealInds[twoIntSameYInds],4:6] = nu_twoIntSameY
#nu_fourInt
betas_fourInt = betaFunc(inc,nu_fourInt,w)
Phis_fourInt = quasiLambertPhaseFunction(betas_fourInt)
rsneptune_fourInt = smaneptune.to('AU')*(1.-e**2.)/(1.+e*np.cos(nu_fourInt))
seps_neptune_fourInt = planet_star_separation(smaneptune.to('AU'),e,nu_fourInt,w,inc).to('AU')
dmags_neptune_fourInt = deltaMag(pneptune,Rpneptune,rsneptune_fourInt,Phis_fourInt)
#WA_neptune_IWA = (smaneptune.to('AU').value/(22.87*u.pc.to('AU')))*u.rad.to('arcsec')*u.arcsec
neptune_intTimes_pretty = OS.calc_intTime(TL,[0],ZL.fZ0,ZL.fEZ0,dmags_neptune_fourInt,IWA_mars_pretty*u.arcsec,mode)
mean_anomalyneptune_pretty = neptune_intTimes_pretty.to('year').value*2.*np.pi/periods_neptune #total angle moved by planet
trueanomaly_neptune_pretty = nu_fourInt + mean_anomalyneptune_pretty # for e=0, cos(nu)=cos(E)=cos(M)
seps_neptune_pretty = planet_star_separation(smaneptune,e,trueanomaly_neptune_pretty,w,inc).to('AU')
betas_fourInt_pretty = betaFunc(inc,trueanomaly_neptune_pretty,w)
Phis_fourInt_pretty = quasiLambertPhaseFunction(betas_fourInt_pretty)
rsneptune_fourInt_pretty = smaneptune*(1.-e**2.)/(1.+e*np.cos(trueanomaly_neptune_pretty))
dmags_neptune_fourInt_pretty = deltaMag(pneptune,Rpneptune,rsneptune_fourInt_pretty,Phis_fourInt_pretty)
# #Can Delete
# a = smaneptune.to('AU')
# v = nu_fourInt[0,1]
# v=1.544
# r = a*(1-e**2.)/(1.+e*np.cos(v))
# X = r*(np.cos(W)*np.cos(w+v) - np.sin(W)*np.sin(w+v)*np.cos(inc))
# Y = r*(np.sin(W)*np.cos(w+v) + np.cos(W)*np.sin(w+v)*np.cos(inc))
# Z = r*(np.sin(inc)*np.sin(w+v))
# print(r)
# print(X)
# print(Y)
# print(Z)
# Calculate Seps vs True Anomaly
betas = betaFunc(inc,nus,w)
beta_smax = betaFunc(inc,0.,w)
Phis = quasiLambertPhaseFunction(betas)
Phi_smax = quasiLambertPhaseFunction(beta_smax)
rsneptune = smaneptune*(1.-e**2.)/(1.+e*np.cos(nus))
dmags_neptune = deltaMag(pneptune,Rpneptune,rsneptune[0],Phis)
dmag_neptune_smax = deltaMag(pneptune,Rpneptune,rsneptune[0],Phi_smax)
seps_neptune = planet_star_separation(smaneptune,e,nus,w,inc)
WA_neptune_smax = (smaneptune.to('AU').value/(22.87*u.pc.to('AU')))*u.rad.to('arcsec')*u.arcsec
neptune_intTime = OS.calc_intTime(TL,[0],ZL.fZ0,ZL.fEZ0,dmag_neptune_smax,WA_neptune_smax,mode)
mean_anomalyneptune = neptune_intTime.to('year').value*2.*np.pi/periods_neptune #total angle moved by planet
#### Uranus intersections
uranusOWASep = np.asarray([11.83])*u.AU
dmajorp,dminorp,theta_OpQ_X,theta_OpQp_X,Op,x,y,Phi,xreal,only2RealInds,yrealAllRealInds,\
fourIntInds,twoIntOppositeXInds,twoIntSameYInds,nu_minSepPoints,nu_maxSepPoints,nu_lminSepPoints,nu_lmaxSepPoints,nu_fourInt2,\
nu_twoIntSameY,nu_twoIntOppositeX,nu_IntersectionsOnly2, yrealImagInds,\
t_minSep,t_maxSep,t_lminSep,t_lmaxSep,t_fourInt0,t_fourInt1,t_fourInt2,t_fourInt3,t_twoIntSameY0,\
t_twoIntSameY1,t_twoIntOppositeX0,t_twoIntOppositeX1,t_IntersectionOnly20,t_IntersectionOnly21,\
minSepPoints_x, minSepPoints_y, maxSepPoints_x, maxSepPoints_y, lminSepPoints_x, lminSepPoints_y, lmaxSepPoints_x, lmaxSepPoints_y, minSep, maxSep, lminSep, lmaxSep,\
errors_fourInt0,errors_fourInt1,errors_fourInt2,errors_fourInt3,errors_twoIntSameY0,\
errors_twoIntSameY1,errors_twoIntOppositeX0,errors_twoIntOppositeX1,errors_IntersectionsOnly2X0,errors_IntersectionsOnly2X1,type0_0Inds,\
type0_1Inds,type0_2Inds,type0_3Inds,type0_4Inds,type1_0Inds,type1_1Inds,type1_2Inds,type1_3Inds,type1_4Inds,type2_0Inds,type2_1Inds,type2_2Inds,\
type2_3Inds,type2_4Inds,type3_0Inds,type3_1Inds,type3_2Inds,type3_3Inds,type3_4Inds,fourInt_x,fourInt_y,twoIntSameY_x,twoIntSameY_y,twoIntOppositeX_x,\
twoIntOppositeX_y,xIntersectionsOnly2,yIntersectionsOnly2,typeInds0,typeInds1,typeInds2,typeInds3, periods = calcMasterIntersections(smauranus.to('AU').value,e,W,w,inc,uranusOWASep.value,starMass,plotBool)
print(nu_fourInt)
betas_fourInt2 = betaFunc(inc,nu_fourInt2,w)
Phis_fourInt2 = quasiLambertPhaseFunction(betas_fourInt2)
rsuranus_fourInt2 = smauranus.to('AU')*(1.-e**2.)/(1.+e*np.cos(nu_fourInt2))
seps_uranus_fourInt2 = planet_star_separation(smauranus.to('AU'),e,nu_fourInt2,w,inc).to('AU')
dmags_uranus_fourInt2 = deltaMag(puranus,Rpuranus,rsuranus_fourInt2,Phis_fourInt2)
#WA_neptune_IWA = (smaneptune.to('AU').value/(22.87*u.pc.to('AU')))*u.rad.to('arcsec')*u.arcsec
WA_uranus = (uranusOWASep.value/(22.87*u.pc.to('AU')))*u.rad.to('arcsec') #The IWA that causes the separation for mars in arcsec
uranus_intTimes_pretty = OS.calc_intTime(TL,[0],ZL.fZ0,ZL.fEZ0,dmags_uranus_fourInt2,WA_uranus*u.arcsec,mode)
mean_anomalyuranus_pretty = uranus_intTimes_pretty.to('year').value*2.*np.pi/periods_uranus #total angle moved by planet
trueanomaly_uranus_pretty = nu_fourInt2 + mean_anomalyuranus_pretty # for e=0, cos(nu)=cos(E)=cos(M)
seps_uranus_pretty = planet_star_separation(smauranus,e,trueanomaly_uranus_pretty,w,inc).to('AU')
betas_fourInt2_pretty = betaFunc(inc,trueanomaly_uranus_pretty,w)
Phis_fourInt2_pretty = quasiLambertPhaseFunction(betas_fourInt2_pretty)
rsuranus_fourInt2_pretty = smauranus*(1.-e**2.)/(1.+e*np.cos(trueanomaly_uranus_pretty))
dmags_uranus_fourInt2_pretty = deltaMag(puranus,Rpuranus,rsuranus_fourInt2_pretty,Phis_fourInt2_pretty)
#all the separations
betas = betaFunc(inc,nus,w)
Phis = quasiLambertPhaseFunction(betas)
rsuranus = smauranus*(1.-e**2.)/(1.+e*np.cos(nus))
dmags_uranus = deltaMag(puranus,Rpuranus,rsuranus[0],Phis)
seps_uranus = planet_star_separation(smauranus,e,nus,w,inc)
maxdmaguranus = 31.
dmag_upper = maxdmaguranus
#### Solving for dmag_min and dmag_max for each planet ################
mindmag, maxdmag, dmaglminAll, dmaglmaxAll, indsWith2, indsWith4, nuMinDmag, nuMaxDmag, nulminAll, nulmaxAll = calc_planet_dmagmin_dmagmax(e,inc,w,smauranus,puranus,Rpuranus)
#### nu From dmag_upper
print('Num Planets with At Least 2 Int given dmag: ' + str(np.sum((mindmag < dmag_upper)*(maxdmag > dmag_upper))))
print('Num Planets with dmag local extrema: ' + str(len(indsWith4)))
print('Num Planets with given 4 Int given dmag: ' + str(np.sum((dmaglminAll < dmag_upper)*(dmaglmaxAll > dmag_upper))))
indsWith4Int = indsWith4[np.where((dmaglminAll < dmag_upper)*(dmaglmaxAll > dmag_upper))[0]]
indsWith2Int = list(set(np.where((mindmag < dmag_upper)*(maxdmag > dmag_upper))[0]) - set(indsWith4Int))
nus2Int, nus4Int, dmag2Int, dmag4Int = calc_planetnu_from_dmag(dmag_upper,e,inc,w,smauranus,puranus,Rpuranus,mindmag, maxdmag, indsWith2Int, indsWith4Int)
#nus[indsWith2Int,8:10] = nus2Int
#nus[indsWith4Int,8:12] = nus4Int
#nus2Int
betas_fourInt3 = betaFunc(inc,nus2Int,w)
Phis_fourInt3 = quasiLambertPhaseFunction(betas_fourInt3)
rsuranus_fourInt3 = smauranus.to('AU')*(1.-e**2.)/(1.+e*np.cos(nus2Int))
seps_uranus_fourInt3 = planet_star_separation(smauranus.to('AU'),e,nus2Int,w,inc).to('AU')
dmags_uranus_fourInt3 = deltaMag(puranus,Rpuranus,rsuranus_fourInt3,Phis_fourInt3)
#Ensure intTime of uranus is shorter than the visibility window
uranusVisibilityWindowCorner = (nu_fourInt2[0,1] - nus2Int[0,0])/(2.*np.pi)*periods_uranus
#Uranus
dmajorp,dminorp,theta_OpQ_X,theta_OpQp_X,Op,x,y,Phi,xreal,only2RealInds,yrealAllRealInds,\
fourIntInds,twoIntOppositeXInds,twoIntSameYInds,nu_minSepPoints,nu_maxSepPoints,nu_lminSepPoints,nu_lmaxSepPoints,nu_fourInt4,\
nu_twoIntSameY,nu_twoIntOppositeX,nu_IntersectionsOnly2, yrealImagInds,\
t_minSep,t_maxSep,t_lminSep,t_lmaxSep,t_fourInt0,t_fourInt1,t_fourInt2,t_fourInt3,t_twoIntSameY0,\
t_twoIntSameY1,t_twoIntOppositeX0,t_twoIntOppositeX1,t_IntersectionOnly20,t_IntersectionOnly21,\
minSepPoints_x, minSepPoints_y, maxSepPoints_x, maxSepPoints_y, lminSepPoints_x, lminSepPoints_y, lmaxSepPoints_x, lmaxSepPoints_y, minSep, maxSep, lminSep, lmaxSep,\
errors_fourInt0,errors_fourInt1,errors_fourInt2,errors_fourInt3,errors_twoIntSameY0,\
errors_twoIntSameY1,errors_twoIntOppositeX0,errors_twoIntOppositeX1,errors_IntersectionsOnly2X0,errors_IntersectionsOnly2X1,type0_0Inds,\
type0_1Inds,type0_2Inds,type0_3Inds,type0_4Inds,type1_0Inds,type1_1Inds,type1_2Inds,type1_3Inds,type1_4Inds,type2_0Inds,type2_1Inds,type2_2Inds,\
type2_3Inds,type2_4Inds,type3_0Inds,type3_1Inds,type3_2Inds,type3_3Inds,type3_4Inds,fourInt_x,fourInt_y,twoIntSameY_x,twoIntSameY_y,twoIntOppositeX_x,\
twoIntOppositeX_y,xIntersectionsOnly2,yIntersectionsOnly2,typeInds0,typeInds1,typeInds2,typeInds3, periods = calcMasterIntersections(smauranus.to('AU').value,e,W,w,inc,sep_mars_edge2.value,starMass,plotBool)
print(nu_fourInt4)
# nus[only2RealInds,4:6] = nu_IntersectionsOnly2
# nus[yrealAllRealInds[fourIntInds],4:8] = nu_fourInt
# nus[yrealAllRealInds[twoIntOppositeXInds],4:6] = nu_twoIntOppositeX
# nus[yrealAllRealInds[twoIntSameYInds],4:6] = nu_twoIntSameY
#nu_fourInt
betas_fourInt4 = betaFunc(inc,nu_fourInt4,w)
Phis_fourInt4 = quasiLambertPhaseFunction(betas_fourInt4)
rsuranus_fourInt4 = smauranus.to('AU')*(1.-e**2.)/(1.+e*np.cos(nu_fourInt4))
seps_uranus_fourInt4 = planet_star_separation(smauranus.to('AU'),e,nu_fourInt4,w,inc).to('AU')
dmags_uranus_fourInt4 = deltaMag(puranus,Rpuranus,rsuranus_fourInt4,Phis_fourInt4)
#Uranus not visible range upper
upper = 0.9070013
nus_uranus_notVisible1 = np.linspace(start=3.*np.pi/2.,stop=2.*np.pi+upper,num=1000)
betas = betaFunc(inc,nus_uranus_notVisible1,w)
Phis = quasiLambertPhaseFunction(betas)
rsuranus = smauranus*(1.-e**2.)/(1.+e*np.cos(nus_uranus_notVisible1))
dmags_uranusNotVisible = deltaMag(puranus,Rpuranus,rsuranus[0],Phis)
seps_uranusNotVisible = planet_star_separation(smauranus,e,nus_uranus_notVisible1,w,inc)
#Uranus not visible range lower
lower = 0.08
nus_uranus_notVisible2 = np.linspace(start=np.pi/2.,stop=np.pi/2.+lower,num=1000)
betas = betaFunc(inc,nus_uranus_notVisible2,w)
Phis = quasiLambertPhaseFunction(betas)
rsuranus2 = smauranus*(1.-e**2.)/(1.+e*np.cos(nus_uranus_notVisible2))
dmags_uranusNotVisible2 = deltaMag(puranus,Rpuranus,rsuranus[0],Phis)
seps_uranusNotVisible2 = planet_star_separation(smauranus,e,nus_uranus_notVisible2,w,inc)
OWA_uranus_edge2 = (uranusOWASep.value/(22.87*u.pc.to('AU')))*(u.rad.to('arcsec')) #The IWA that causes the separation for mars in arcsec
miny = 25.
maxy = 34.
# fig = plt.figure(num=1)
# plt.rc('axes',linewidth=2)
# plt.rc('lines',linewidth=2)
# plt.rcParams['axes.linewidth']=2
# plt.rc('font',weight='bold')
# plt.plot([22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad'),uranusOWASep.value],[maxdmaguranus,maxdmaguranus],color='black')
# plt.plot(np.asarray([1,1])*22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad'),[miny,maxdmaguranus],color='black')
# plt.plot(np.asarray([1,1])*uranusOWASep.value,[miny,maxdmaguranus],color='black')
# plt.plot(seps_mars.to('AU'),dmags_mars,color='red')
# #plt.plot(seps_mars.to('AU'),dmags_mars,color='black',linestyle='--')
# plt.scatter(sep_mars_edge1,dmag_mars_tmp1,color='red')
# plt.scatter(sep_mars_edge2,dmag_mars_tmp2,color='red')
# plt.plot(seps_venus.to('AU'),dmags_venus,color='gold')
# plt.scatter(sep_venus_edge1,dmag_venus_tmp1,color='gold')
# plt.scatter(sep_venus_edge2,dmag_venus_tmp2,color='gold')
# #Neptune
# #plt.plot(seps_neptune.to('AU'),dmags_neptune,color=colors.to_rgba('deepskyblue'))
# #plt.scatter(seps_neptune_fourInt[0,2].to('AU'),dmags_neptune_fourInt[0,2],color=colors.to_rgba('deepskyblue'))
# #plt.scatter(seps_neptune_pretty[0,2].to('AU'),dmags_neptune_fourInt_pretty[0,2],color=colors.to_rgba('deepskyblue'))
# #Uranus
# plt.plot(seps_uranus.to('AU'),dmags_uranus,color=colors.to_rgba('darkblue'))
# #uranus upper points
# plt.scatter(seps_uranus_fourInt2[0,1].to('AU'),dmags_uranus_fourInt2[0,1],color=colors.to_rgba('darkblue'))
# plt.scatter(seps_uranus_pretty[0,0].to('AU'),dmags_uranus_fourInt2_pretty[0,0],color=colors.to_rgba('darkblue'))
# plt.scatter(seps_uranus_fourInt3[0,0].to('AU'),dmags_uranus_fourInt3[0,0],color=colors.to_rgba('darkblue'))
# #uranus lower points
# plt.scatter(seps_uranus_fourInt2[0,3].to('AU'),dmags_uranus_fourInt2[0,3],color=colors.to_rgba('darkblue'))
# plt.scatter(seps_uranus_pretty[0,3].to('AU'),dmags_uranus_fourInt2_pretty[0,3],color=colors.to_rgba('darkblue'))
# plt.ylabel(r'$\Delta \mathrm{mag}$',weight='bold')
# plt.ylim([miny,maxy])
# plt.xscale('log')
# plt.xlim([10.**0.,40])#10.**2.])
# plt.xlabel('Planet-Star Separation, s, in AU',weight='bold')
# plt.show(block=False)
# from mpl_toolkits.axes_grid1.inset_locator import inset_axes
# inset_axes0 = inset_axes(plt.gca(), width="30%", height=1., loc=0) # width = 30% of parent_bbox # height : 1 inch
# inset_axes0.plot(np.asarray([1,1])*22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad'),[miny,maxdmaguranus],color='black')
# inset_axes0.scatter(sep_mars_edge1,dmag_mars_tmp1,color='red')
# inset_axes0.scatter(sep_mars_edge2,dmag_mars_tmp2,color='red')
# inset_axes0.set_xlim([1.50,1.525])
# inset_axes0.set_ylim([27.3,28.1])
# inset_axes1 = inset_axes(plt.gca(), width="30%", height=1., loc=1) # width = 30% of parent_bbox # height : 1 inch
# inset_axes1.plot(np.asarray([1,1])*uranusOWASep.value,[miny,maxdmaguranus],color='black')
# inset_axes1.plot([22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad'),uranusOWASep.value],[maxdmaguranus,maxdmaguranus],color='black')
# inset_axes1.plot(seps_uranus.to('AU'),dmags_uranus,color=colors.to_rgba('darkblue'))
# inset_axes1.scatter(seps_uranus_fourInt2[0,1].to('AU'),dmags_uranus_fourInt2[0,1],color=colors.to_rgba('darkblue'))
# inset_axes1.scatter(seps_uranus_fourInt3[0,0].to('AU'),dmags_uranus_fourInt3[0,0],color=colors.to_rgba('darkblue'))
# inset_axes1.set_xlim([11.55,11.9])
# inset_axes1.set_ylim([30.80,31.20])
# inset_axes2 = inset_axes(plt.gca(), width="30%", height=1., loc=2) # width = 30% of parent_bbox # height : 1 inch
# inset_axes2.plot(np.asarray([1,1])*uranusOWASep.value,[miny,maxdmaguranus],color='black')
# inset_axes2.plot(seps_uranus.to('AU'),dmags_uranus,color=colors.to_rgba('darkblue'))
# inset_axes2.scatter(seps_uranus_fourInt2[0,3].to('AU'),dmags_uranus_fourInt2[0,3],color=colors.to_rgba('darkblue'))
# inset_axes2.scatter(seps_uranus_pretty[0,3].to('AU'),dmags_uranus_fourInt2_pretty[0,3],color=colors.to_rgba('darkblue'))
# inset_axes2.set_xlim([11.8299,11.8301])
# inset_axes2.set_ylim([25.5,27.0])
# plt.show(block=False)
plt.close(2)
from matplotlib import gridspec
import matplotlib
from matplotlib import lines
rect1 = matplotlib.patches.Rectangle((uranusOWASep.value,20.), 50, 50, color='grey',alpha=0.2,linewidth=0) #outside OWA
rect2 = matplotlib.patches.Rectangle((22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad'),20.), -50, 50, color='grey',alpha=0.2, linewidth=0) #Inside IWA
rect3 = matplotlib.patches.Rectangle((22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad'),maxdmaguranus), (uranusOWASep.value-22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad')), 50, color='grey',alpha=0.2, linewidth=0)
fig3 = plt.figure(constrained_layout=True,num=2,figsize=(7.5,5))
plt.rc('axes',linewidth=2)
plt.rc('lines',linewidth=2)
plt.rcParams['axes.linewidth']=2
plt.rc('font',weight='bold')
gs = fig3.add_gridspec(ncols=2, nrows=3,width_ratios=[3,1],height_ratios=[1,1,1])
f3_ax0 = fig3.add_subplot(gs[:, 0])
f3_ax0.plot([11.,11.],[25.5,27.0],color='grey')
f3_ax0.plot([11.,12.5],[27.0,27.0],color='grey')
f3_ax0.plot([12.5,12.5],[27.0,25.5],color='grey')
f3_ax0.plot([12.5,11.],[25.5,25.5],color='grey')
f3_ax0.add_patch(rect1)
f3_ax0.plot([11.,11.],[30.5,31.5],color='grey')
f3_ax0.plot([11.,12.5],[31.5,31.5],color='grey')
f3_ax0.plot([12.5,12.5],[31.5,30.5],color='grey')
f3_ax0.plot([12.5,11.],[30.5,30.5],color='grey')
f3_ax0.add_patch(rect2)
f3_ax0.plot([1.4,1.4],[27.25,28.25],color='grey')
f3_ax0.plot([1.4,1.6],[28.25,28.25],color='grey')
f3_ax0.plot([1.6,1.6],[28.25,27.25],color='grey')
f3_ax0.plot([1.6,1.4],[27.25,27.25],color='grey')
f3_ax0.add_patch(rect3)
f3_ax0.plot([22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad'),uranusOWASep.value],[maxdmaguranus,maxdmaguranus],color='black')
f3_ax0.plot(np.asarray([1,1])*22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad'),[miny,maxdmaguranus],color='black')
f3_ax0.plot(np.asarray([1,1])*uranusOWASep.value,[miny,maxdmaguranus],color='black')
f3_ax0.plot(seps_mars.to('AU'),dmags_mars,color='red')
f3_ax0.plot(seps_mars.to('AU'),dmags_mars,color='black',linestyle='--')
f3_ax0.scatter(sep_mars_edge1,dmag_mars_tmp1,color='red')
f3_ax0.scatter(sep_mars_edge2,dmag_mars_tmp2,color='red')
# f3_ax0.plot(seps_venus.to('AU'),dmags_venus,color='gold')
# f3_ax0.scatter(sep_venus_edge1,dmag_venus_tmp1,color='gold')
# f3_ax0.scatter(sep_venus_edge2,dmag_venus_tmp2,color='gold')
#Neptune
#plt.plot(seps_neptune.to('AU'),dmags_neptune,color=colors.to_rgba('deepskyblue'))
#plt.scatter(seps_neptune_fourInt[0,2].to('AU'),dmags_neptune_fourInt[0,2],color=colors.to_rgba('deepskyblue'))
#plt.scatter(seps_neptune_pretty[0,2].to('AU'),dmags_neptune_fourInt_pretty[0,2],color=colors.to_rgba('deepskyblue'))
#Uranus
f3_ax0.plot(seps_uranus.to('AU'),dmags_uranus,color=colors.to_rgba('skyblue'))
f3_ax0.plot(seps_uranusNotVisible,dmags_uranusNotVisible,color='black',linestyle='--')
f3_ax0.plot(seps_uranusNotVisible2,dmags_uranusNotVisible2,color='black',linestyle='--')
#uranus upper points
f3_ax0.scatter(seps_uranus_fourInt2[0,1].to('AU'),dmags_uranus_fourInt2[0,1],color=colors.to_rgba('skyblue'))
f3_ax0.scatter(seps_uranus_pretty[0,0].to('AU'),dmags_uranus_fourInt2_pretty[0,0],color=colors.to_rgba('skyblue'))
f3_ax0.scatter(seps_uranus_fourInt3[0,0].to('AU'),dmags_uranus_fourInt3[0,0],color=colors.to_rgba('skyblue'))
f3_ax0.scatter(seps_uranus_fourInt4[0,3].to('AU'),dmags_uranus_fourInt4[0,3],color=colors.to_rgba('skyblue'))
#uranus lower points
f3_ax0.scatter(seps_uranus_fourInt2[0,3].to('AU'),dmags_uranus_fourInt2[0,3],color=colors.to_rgba('skyblue'))
f3_ax0.scatter(seps_uranus_pretty[0,3].to('AU'),dmags_uranus_fourInt2_pretty[0,3],color=colors.to_rgba('skyblue'))
f3_ax0.set_ylabel(r'$\Delta \mathrm{mag}$',weight='bold')
f3_ax0.set_ylim([miny,maxy])
f3_ax0.set_xscale('log')
f3_ax0.set_xlim([10.**0.,40])#10.**2.])
f3_ax0.set_xlabel('Planet-Star Separation, s, in AU',weight='bold')
dotted_line1 = lines.Line2D([], [], linestyle="--", color='black')
dotted_line2 = lines.Line2D([], [], linestyle="-", color=colors.to_rgba('skyblue'))
dotted_line3 = lines.Line2D([], [], linestyle="--", color='black')
dotted_line4 = lines.Line2D([], [], linestyle="-", color='red')
f3_ax0.legend([(dotted_line4,dotted_line3),(dotted_line2, dotted_line1),(dotted_line2)], ["Mars Not Visible","Neptune Not Visible","Neptune Visible"])
#f3_ax0.legend([])
f3_ax1 = fig3.add_subplot(gs[0, 1])
f3_ax1.plot(np.asarray([1,1])*22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad'),[miny,maxdmaguranus],color='black')
f3_ax1.plot(seps_mars.to('AU'),dmags_mars,color='red')
f3_ax1.plot(seps_mars.to('AU'),dmags_mars,color='black',linestyle='--')
f3_ax1.scatter(sep_mars_edge1,dmag_mars_tmp1,color='red')
f3_ax1.scatter(sep_mars_edge2,dmag_mars_tmp2,color='red')
f3_ax1.set_xlim([1.50,1.525])
f3_ax1.set_ylim([27.3,28.1])
f3_ax1.set_ylabel(r'$\Delta \mathrm{mag}$', weight='bold')
f3_ax1.set_xlabel('s, in AU', weight='bold')
rect1_ax1 = matplotlib.patches.Rectangle((uranusOWASep.value,20.), 50, 50, color='grey',alpha=0.2,linewidth=0) #outside OWA
rect2_ax1 = matplotlib.patches.Rectangle((22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad'),20.), -50, 50, color='grey',alpha=0.2, linewidth=0) #Inside IWA
rect3_ax1 = matplotlib.patches.Rectangle((22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad'),maxdmaguranus), (uranusOWASep.value-22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad')), 50, color='grey',alpha=0.2, linewidth=0)
f3_ax1.plot([11.,11.],[25.5,27.0],color='grey')
f3_ax1.plot([11.,12.5],[27.0,27.0],color='grey')
f3_ax1.plot([12.5,12.5],[27.0,25.5],color='grey')
f3_ax1.plot([12.5,11.],[25.5,25.5],color='grey')
f3_ax1.add_patch(rect1_ax1)
f3_ax1.plot([11.,11.],[30.5,31.5],color='grey')
f3_ax1.plot([11.,12.5],[31.5,31.5],color='grey')
f3_ax1.plot([12.5,12.5],[31.5,30.5],color='grey')
f3_ax1.plot([12.5,11.],[30.5,30.5],color='grey')
f3_ax1.add_patch(rect2_ax1)
f3_ax1.plot([1.4,1.4],[27.25,28.25],color='grey')
f3_ax1.plot([1.4,1.6],[28.25,28.25],color='grey')
f3_ax1.plot([1.6,1.6],[28.25,27.25],color='grey')
f3_ax1.plot([1.6,1.4],[27.25,27.25],color='grey')
f3_ax1.add_patch(rect3_ax1)
f3_ax2 = fig3.add_subplot(gs[1, 1])
f3_ax2.plot(np.asarray([1,1])*uranusOWASep.value,[miny,maxdmaguranus],color='black')
f3_ax2.plot([22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad'),uranusOWASep.value],[maxdmaguranus,maxdmaguranus],color='black')
f3_ax2.plot(seps_uranus.to('AU'),dmags_uranus,color=colors.to_rgba('skyblue'))
f3_ax2.scatter(seps_uranus_fourInt2[0,1].to('AU'),dmags_uranus_fourInt2[0,1],color=colors.to_rgba('skyblue'))
f3_ax2.scatter(seps_uranus_fourInt3[0,0].to('AU'),dmags_uranus_fourInt3[0,0],color=colors.to_rgba('skyblue'))
f3_ax2.plot(seps_uranusNotVisible,dmags_uranusNotVisible,color='black',linestyle='--')
f3_ax2.set_xlim([11.55,11.9])
f3_ax2.set_ylim([30.80,31.20])
f3_ax2.set_ylabel(r'$\Delta \mathrm{mag}$', weight='bold')
f3_ax2.set_xlabel('s, in AU', weight='bold')
rect1_ax2 = matplotlib.patches.Rectangle((uranusOWASep.value,20.), 50, 50, color='grey',alpha=0.2,linewidth=0) #outside OWA
rect2_ax2 = matplotlib.patches.Rectangle((22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad'),20.), -50, 50, color='grey',alpha=0.2, linewidth=0) #Inside IWA
rect3_ax2 = matplotlib.patches.Rectangle((22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad'),maxdmaguranus), (uranusOWASep.value-22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad')), 50, color='grey',alpha=0.2, linewidth=0)
f3_ax2.plot([11.,11.],[25.5,27.0],color='grey')
f3_ax2.plot([11.,12.5],[27.0,27.0],color='grey')
f3_ax2.plot([12.5,12.5],[27.0,25.5],color='grey')
f3_ax2.plot([12.5,11.],[25.5,25.5],color='grey')
f3_ax2.add_patch(rect1_ax2)
f3_ax2.plot([11.,11.],[30.5,31.5],color='grey')
f3_ax2.plot([11.,12.5],[31.5,31.5],color='grey')
f3_ax2.plot([12.5,12.5],[31.5,30.5],color='grey')
f3_ax2.plot([12.5,11.],[30.5,30.5],color='grey')
f3_ax2.add_patch(rect2_ax2)
f3_ax2.plot([1.4,1.4],[27.25,28.25],color='grey')
f3_ax2.plot([1.4,1.6],[28.25,28.25],color='grey')
f3_ax2.plot([1.6,1.6],[28.25,27.25],color='grey')
f3_ax2.plot([1.6,1.4],[27.25,27.25],color='grey')
f3_ax2.add_patch(rect3_ax2)
f3_ax3 = fig3.add_subplot(gs[2, 1])
f3_ax3.plot(np.asarray([1,1])*uranusOWASep.value,[miny,maxdmaguranus],color='black')
f3_ax3.plot(seps_uranus.to('AU'),dmags_uranus,color=colors.to_rgba('skyblue'))
f3_ax3.plot(seps_uranusNotVisible,dmags_uranusNotVisible,color='black',linestyle='--')
f3_ax3.scatter(seps_uranus_fourInt2[0,3].to('AU'),dmags_uranus_fourInt2[0,3],color=colors.to_rgba('skyblue'))
f3_ax3.scatter(seps_uranus_pretty[0,3].to('AU'),dmags_uranus_fourInt2_pretty[0,3],color=colors.to_rgba('skyblue'),marker='x')
f3_ax3.set_xlim([11.829985,11.830015])
f3_ax3.set_ylim([25.5,27.0])
#f3_ax3.ticklabel_format(axis='x',style='sci',useOffset=11.83,useMathText=True)
f3_ax3.set_ylabel(r'$\Delta \mathrm{mag}$', weight='bold')
f3_ax3.set_xlabel('s ' + r'$\times 10^{-5}+11.83$' + ', in AU', weight='bold')#,labelpad=11)
rect1_ax3 = matplotlib.patches.Rectangle((uranusOWASep.value,20.), 50, 50, color='grey',alpha=0.2,linewidth=0) #outside OWA
rect2_ax3 = matplotlib.patches.Rectangle((22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad'),20.), -50, 50, color='grey',alpha=0.2, linewidth=0) #Inside IWA
rect3_ax3 = matplotlib.patches.Rectangle((22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad'),maxdmaguranus), (uranusOWASep.value-22.87*u.pc.to('AU')*IWA_mars_pretty*u.arcsec.to('rad')), 50, color='grey',alpha=0.2, linewidth=0)
f3_ax3.plot([11.,11.],[25.5,27.0],color='grey')
f3_ax3.plot([11.,12.5],[27.0,27.0],color='grey')
f3_ax3.plot([12.5,12.5],[27.0,25.5],color='grey')
f3_ax3.plot([12.5,11.],[25.5,25.5],color='grey')
f3_ax3.add_patch(rect1_ax3)
f3_ax3.plot([11.,11.],[30.5,31.5],color='grey')
f3_ax3.plot([11.,12.5],[31.5,31.5],color='grey')
f3_ax3.plot([12.5,12.5],[31.5,30.5],color='grey')
f3_ax3.plot([12.5,11.],[30.5,30.5],color='grey')
f3_ax3.add_patch(rect2_ax3)
f3_ax3.plot([1.4,1.4],[27.25,28.25],color='grey')
f3_ax3.plot([1.4,1.6],[28.25,28.25],color='grey')
f3_ax3.plot([1.6,1.6],[28.25,27.25],color='grey')
f3_ax3.plot([1.6,1.4],[27.25,27.25],color='grey')
f3_ax3.add_patch(rect3_ax3)
f3_ax3.xaxis.offsetText.set_visible(False)
#f3_ax3.xaxis.offsetText.set_text(r'$\times 10^{-5}+11.83$')
#tx = f3_ax3.xaxis.get_offset_text()
#tx.set_text(r'$\times10^{-5}+11.83$')
#tx.draw()
#tx.set_fontsize(7)
#f3_ax3.text(x=11.83,y=25.,s=r"$\times 10^{-5}+11.83$",backgroundcolor="white",bbox={'xy':(11.83,25.),'width':0.01,'height':1.})
plt.gcf().text(0.005, 0.96, 'a)', fontsize=14)
plt.gcf().text(0.09, 0.35, 'b)', fontsize=14)
plt.gcf().text(0.49, 0.705, 'c)', fontsize=14)
plt.gcf().text(0.43, 0.25, 'd)', fontsize=14)
plt.gcf().text(0.68, 0.95, 'b)', fontsize=14)
plt.gcf().text(0.68, 0.65, 'c)', fontsize=14)
plt.gcf().text(0.68, 0.33, 'd)', fontsize=14)
plt.show(block=False)
plt.gcf().canvas.draw()
# Save to a File
date = str(datetime.datetime.now())
date = ''.join(c + '_' for c in re.split('-|:| ',date)[0:-1])#Removes seconds from date
fname = 'dmagVsSIntersectionPlot' + folder.split('/')[-1] + '_' + date
plt.savefig(os.path.join(PPoutpath, fname + '.png'), format='png', dpi=500)
plt.savefig(os.path.join(PPoutpath, fname + '.svg'))
plt.savefig(os.path.join(PPoutpath, fname + '.eps'), format='eps', dpi=500)
plt.savefig(os.path.join(PPoutpath, fname + '.pdf'), format='pdf', dpi=500)
print('Done plotting dmagVsSIntersectionPlot')
| [
"drk94@cornell.edu"
] | drk94@cornell.edu |
59850fd2e1725ec52509b49798ea40b17538a168 | 3c85959670855891a4db4c2cb901cbab1cd4dbc4 | /crop.py | ba246987d996ee60ada74797125ca71f36af3dc3 | [] | no_license | pkmurphy85/Self-Driving-Car-NanoDegree-Term1-Behavioral-Cloning | 6d13abf3b562b33c99028c6011ffe872b3676fff | 973f3fd59a7e6a6583fa44a4527fecea8d76a902 | refs/heads/master | 2020-03-11T00:21:59.766873 | 2018-04-29T00:43:27 | 2018-04-29T00:43:27 | 129,662,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | import cv2
img = cv2.imread("./Images/center_2018_04_15_17_34_49_863.jpg")
crop_img = img[75:75+65,0:320]
cv2.imshow("cropped", crop_img)
cv2.imwrite("cropped.png", crop_img)
| [
"pkmurphy85@gmail.com"
] | pkmurphy85@gmail.com |
7d1565ba786aff297e23e5f3ce7456cf1f7fcdc7 | f3ee9772e2a1599dd29705022e820ee48683562b | /script.py | d0319806c11bdb4528ee63f72f59824a6a397367 | [] | no_license | isalevine/file-renamer | 5434c739eb26ca53963a5cd707cd43063e7dcbb3 | 9e4bd0eaab3cb1b6733f6d1e9736c0622f5722cd | refs/heads/master | 2020-06-09T12:47:35.983012 | 2020-05-13T07:49:53 | 2020-05-13T07:49:53 | 193,440,265 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,678 | py | # per: https://stackabuse.com/python-list-files-in-a-directory/
import pathlib
# per: https://stackoverflow.com/questions/54152653/renaming-file-extension-using-pathlib-python-3
import os
path = os.path.realpath('./drum-samples') # set default path here
current_directory = pathlib.Path(path)
def print_menu():
global current_directory
menu_text = """
Hello, and welcome to Filename Reader!
This handy CLI is designed for mass-renaming
of drum sample filenames, specifically from
Samples From Mars downloads.
~~~"""
# print_current_directory()
print(menu_text)
user_input = ""
# more efficient way to iterate through checks for q/quit, y/yes, etc?
while (user_input != "quit") and (user_input != "q"):
user_input = get_user_input()
# pwd = print working directory
if user_input == "pwd":
print_current_directory()
# ls = list
if user_input == "ls":
for filename in current_directory.iterdir():
print(filename)
# h = help
if user_input == "h" or user_input == "help": # currently empty - use Readme text!
print_help_menu()
# rn = rename
if user_input[0:2] == "rn":
array = user_input.split(" ")
if array[1] == "\\all":
array[1] = "all"
if array[1] == "all":
if (not array[2]) or (not array[3]):
print("Rename error!")
continue
# may need to detect empty strings to do beginning inserts?
# ALSO: may want to ask to confirm empty string inserts?
if array[2] == '\\':
array[2] = ""
if array[3] == '\\':
# use this to count a number of possible blank spaces?
array[3] = ""
# MUST be a way to only iterate through current_directory once!
print("You are going to replace: " + array[2] + " => " + array[3])
print("This will affect the following files: ")
for filename in current_directory.iterdir():
if array[2] in str(filename):
print(filename)
print("Do you want to proceed? (Y/n)")
final_check = get_user_input()
if (final_check == "Y") or (final_check == "y") or (final_check == "Yes") or (final_check == "yes"):
print("Files changed:")
for filename in current_directory.iterdir():
if array[2] in str(filename):
dst = rename_partial_filename(filename, array[2], array[3])
print(dst)
else:
print("Rename aborted!")
continue
else:
if array[1] and array[2] and os.path.isfile(array[1]):
rename_whole_filename(array[1], array[2])
else:
print("Rename aborted! (Probably because the original filename wasn't found.)")
# change the following to cd commands => call a separate
# cd() function that parses and changes current_directory?
# REFACTOR using os.path module!!!
if user_input[0:2] == "cd":
temp_directory = current_directory
if user_input[3:5] == "..":
new_path = current_directory.parent
current_directory = pathlib.Path(new_path)
else:
new_path = os.path.join(current_directory, user_input[3:])
current_directory = pathlib.Path(new_path)
print_current_directory()
if not os.path.isdir(current_directory):
print("Not a valid directory!")
current_directory = temp_directory
def print_current_directory():
print("Current directory is: " + str(current_directory))
def get_user_input():
print("")
return input("Enter command: ")
def rename_whole_filename(filename1, filename2):
src = filename1
dst = filename2
os.rename(src, dst)
def rename_partial_filename(filename, input, output):
# per: https://stackoverflow.com/a/3675423
#
# note: this does NOT solve issue of replacing strings in parent
# directory names if no match is found in a given file...
# (i.e. try replacing SDS800 with \ inside any sample folder)
#
src = str(filename)
head, sep, tail = src.rpartition(input)
dst = head + output + tail
os.rename(src, dst)
return dst
def print_help_menu():
help_menu = """
Hi, and welcome to the help menu!
Commands:
"""
print(help_menu)
# execute main program loop
print_menu()
# REFACTOR NOTES and IDEAS
# =========================================================
# IDEA for CLI TO NAVIGATE DIRECTORY:
# -> recursively call an update_directory() function
# that will append/mutate based on user input, and
# automatically call the print_menu() function again
# -> once a given directory is selected/"locked in",
# have user input a string to match (i.e. "Snare")
# and confirm a list of matching files
# -> then, specify the mutation with a CLI command:
# $ replace "Snare " with ""
# and take the first "" and second "" as parameters
# for calling change_filenames(list, string1, string2)
# CONSIDER ADDING A MAIN MENU WITH SEVERAL OPTIONS:
# 1. set default path
# 2. toggle arrow function syntax for commands (rn all Snare => Snr)...anything different it can do?
# (...or just make it an optional way of entering commands...)
# alt: consider implementing "" to denote whitespace... (rn all "Snare " "Snr")
# or: arrow functions could encapsulate multiple changes? : ("Example 1", "Example 2") => ("ex1", "ex2")
# 3. toggle logging of filename changes => could this possibly lead to an undo function??
# 4. help/instructions
# 5. exit
# OTHER ADD-ONS:
# - be able to select individual files and do more with them? (i.e. see/edit metadata?)
# - have a slick CLI interface that can constantly show/update the directory's files? (toggle??)
| [
"isa.loves.coding@gmail.com"
] | isa.loves.coding@gmail.com |
d1637ba38880e1918ef3ef2ff63a4a45df0985d1 | 73e277935ef28fd05935c93a3f155c9cc6dc6de7 | /ctf/crypto/rsa/pq_common_pollard_rho.py | b3132f1b7d7879f7cddc3228571c37da556ae317 | [] | no_license | ohmygodlin/snippet | 5ffe6b8fec99abd67dd5d7f819520e28112eae4b | 21d02015492fb441b2ad93b4a455dc4a145f9913 | refs/heads/master | 2023-01-08T14:59:38.618791 | 2022-12-28T11:23:23 | 2022-12-28T11:23:23 | 190,989,347 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,409 | py | #Easy_Rsa, yangcheng-2021, https://lazzzaro.github.io/2021/09/12/match-2021%E7%BE%8A%E5%9F%8E%E6%9D%AF%E7%BD%91%E7%BB%9C%E5%AE%89%E5%85%A8%E5%A4%A7%E8%B5%9B/, https://xz.aliyun.com/t/6703
from Crypto.Util.number import *
import gmpy2
n = 84236796025318186855187782611491334781897277899439717384242559751095347166978304126358295609924321812851255222430530001043539925782811895605398187299748256080526691975084042025794113521587064616352833904856626744098904922117855866813505228134381046907659080078950018430266048447119221001098505107823645953039
e = 58337
c = 13646200911032594651110040891135783560995665642049282201695300382255436792102048169200570930229947213493204600006876822744757042959653203573780257603577712302687497959686258542388622714078571068849217323703865310256200818493894194213812410547780002879351619924848073893321472704218227047519748394961963394668
def f(x):
return (pow(x, n - 1, n) + 3) % n #(x*x+1)%n
def rho():
i = 1
while True:
a = getRandomRange(2, n)
b = f(a)
j = 1
while a != b:
p = GCD(a - b, n)
print('{} in {} circle'.format(j, i))
if p > 1:
return (p, n // p)
a = f(a)
b = f(f(b))
j += 1
i += 1
p, q = rho()
d = gmpy2.invert(e, (p-1)*(q-1))
m = pow(c, d, n)
print(long_to_bytes(m))
#b'SangFor{0a8c2220-4c1b-32c8-e8c1-adf92ec7678b}' | [
"laitaizong@gmail.com"
] | laitaizong@gmail.com |
a9ad1e1edb10e8fc1e8a7133be180469d8058dfe | d0ff7de56025a341c8740cc8eb95171f9f789b4f | /project1.py | 0591582690edc39044fbf7ad796f44975ed69670 | [] | no_license | glitchwizard/OpenCVTutorialFromMurtazasWorkshop | 018fed1333fa6dbf8f2aba42d030bf062df83285 | dfa42842fbe00ac2ef95092559000551d9b146e8 | refs/heads/master | 2023-02-10T09:42:41.280427 | 2021-01-05T00:33:25 | 2021-01-05T00:33:25 | 324,441,959 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,433 | py | import cv2
import numpy as np
#Project
#Screen painting by color
#it will take individual colors and allow you to paint with them on a screen
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
myColors = [
[5, 107, 0, 19, 255, 255, "Orange"], #orange
[133, 56, 0, 159, 156, 255, "Purple"], #purple
[57, 76, 0, 100, 255, 255, "Green"] #green
]
myColorValues = [ ##this is BGR, not RGB
[51, 153, 255], # Orange
[255, 0, 255], # Purple
[0, 255, 0] # Green
]
#this is a list of points for drawing on canvas
myPoints = [] #[ x, y, colorId]
def findColor(img, myColors, myColorValues):
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
count = 0
newPoints = []
for color in myColors:
# lower = np.array([h_min, s_min, v_min])
# upper = np.array([h_max, s_max, v_max])
lower = np.array(color[0:3])
upper = np.array(color[3:6])
mask = cv2.inRange(imgHSV, lower, upper)
x, y = getContours(mask)
cv2.circle(imgResult,(x,y), 10, myColorValues[count], cv2.FILLED)
if x != 0 and y != 0:
newPoints.append([x,y,count])
count += 1
#cv2.imshow(str(color[6]), mask)
return newPoints
def getContours(img):
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
x,y,w,h = 0, 0, 0, 0
for contour in contours:
area = cv2.contourArea(contour)
if area > 500:
#cv2.drawContours(imgResult, contour, -1, [255,0,255], 3)
perimeter = cv2.arcLength(contour, True)
approximate_contour = cv2.approxPolyDP(contour, 0.02*perimeter, True)
x, y, w, h = cv2.boundingRect(approximate_contour)
return x+w//2, y
def drawOnCanvas(myPoints,myColorValues):
for point in myPoints:
cv2.circle(imgResult, (point[0], point[1]), 10, myColorValues[point[2]], cv2.FILLED)
while True:
success, img = cap.read()
imgResult = img.copy()
newPoints = findColor(img, myColors, myColorValues)
if len(newPoints) != 0:
for newPoint in newPoints:
myPoints.append(newPoint)
if len(myPoints) != 0:
drawOnCanvas(myPoints,myColorValues)
cv2.imshow("Video", imgResult)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| [
"charley.mcgowan@outlook.com"
] | charley.mcgowan@outlook.com |
bacf2279a6f0a45ea3f2fcfbac61d84042b305c0 | f5706f4e9ab0e82c3b4f32f2a51cf1add12f4b8d | /graph-project/client.py | a6a220b9b55ef3c1507234ee8c982d2f52d441fd | [] | no_license | brunodantas/sd | 35a01e00a6e993f2c5a2f5082ab86a42dab051b7 | c0c416205ba5c47011fd321058c4a94c68ebe58a | refs/heads/master | 2021-01-19T09:18:52.572582 | 2017-08-03T00:10:52 | 2017-08-03T00:10:52 | 53,506,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,418 | py | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
import sys
import glob
from inspect import signature
sys.path.append('gen-py')
sys.path.insert(0, glob.glob('./lib*')[0])
from graph import Graph
from graph.ttypes import NotFound
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try: input = raw_input
except NameError: pass
opt = "\n0 client.ping()\n1 client.add_upd_vertex( nome, cor, desc, peso)\n2 client.add_upd_edge( v1, v2, peso, bi_flag)\n3 client.get_vertex( v)\n4 client.get_edge( v1, v2)\n5 client.del_vertex( v)\n6 client.del_edge( v1, v2)\n7 client.list_edges( v)\n8 client.list_vertices( v1, v2)\n9 client.list_neighbors( v)\n10 client.shortest_path\n\n"
def main():
p = sys.argv[1]
# Make socket
transport = TSocket.TSocket('localhost', p)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TBufferedTransport(transport)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = Graph.Client(protocol)
# Connect!
transport.open()
client.ping()
f = [client.ping,client.add_upd_vertex,client.add_upd_edge,client.get_vertex,client.get_edge,client.del_vertex,client.del_edge,client.list_edges,client.list_vertices,client.list_neighbors,client.shortest_path]
# print(client.add_upd_vertex(1,1,"1",1))
# print(client.add_upd_vertex(2,2,"2",2))
# print(client.add_upd_vertex(3,3,"3",3))
# print(client.add_upd_vertex(4,4,"4",4))
# print(client.add_upd_vertex(5,5,"5",5))
# print(client.add_upd_edge(2,1,100.0,True))
# print(client.add_upd_edge(1,3,200.0,False))
# print(client.add_upd_edge(2,4,100.0,True))
# print(client.add_upd_edge(3,4,100.0,False))
# print(client.add_upd_edge(3,5,500.0,False))
while 1:
choice = input(opt)
choice = int(choice)
if choice == 0:
client.ping()
continue
args = input(str(signature(f[choice]))+'\n')
args = args.split()
args[0] = int(args[0])
if len(args) > 1:
args[1] = int(args[1])
if choice == 1:
args[3] = float(args[3])
elif choice == 2:
args[2] = float(args[2])
args[3] = bool(int(args[3]))
try:
print(f[choice](*args))
print('\n')
except NotFound as e:
print(e.dsc)
# Close!
transport.close()
if __name__ == '__main__':
try:
main()
except Thrift.TException as tx:
print('%s' % tx.message)
| [
"dantas@esolvere.com.br"
] | dantas@esolvere.com.br |
9135fcf5f8c811e586c97a191119fd9d46c83c74 | 6352a8f4726d4d9c1970576e7b10a1394ce987cc | /config/taiga-back/local.py | 8364302d82641dd2918b3294090976761061d191 | [] | no_license | rimms/taiga-docker | d3eec16d36f83a4c74a64b0a08992083a8984779 | 2e23ff15bcab219b8fa25372b6d2111ce3899f9d | refs/heads/master | 2021-04-30T08:24:05.782331 | 2018-02-13T11:42:30 | 2018-02-13T11:42:30 | 121,373,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | from .common import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'taiga',
'USER': 'taiga',
'PASSWORD': 'password',
'HOST': 'taiga-db',
'PORT': '5432',
}
}
MEDIA_URL = "http://localhost/media/"
STATIC_URL = "http://localhost/static/"
SITES["front"]["scheme"] = "http"
SITES["front"]["domain"] = "localhost"
SECRET_KEY = "theveryultratopsecretkey"
DEBUG = False
PUBLIC_REGISTER_ENABLED = False
DEFAULT_FROM_EMAIL = "no-reply@localhost"
SERVER_EMAIL = DEFAULT_FROM_EMAIL
| [
"imms.ryohei@gmail.com"
] | imms.ryohei@gmail.com |
3eea5540d632b5f817bbd40bce3131d0ad1f8263 | 101f124e47b71134a25ed664998ec244a6e4ab55 | /cluster_CESM-PIC.py | dd1ec609a7c12a5c19979391be26a9d92b4aa8bf | [] | no_license | cxzhangqi/cluster_ssh | 31e3777c141b09a08fd6ddfb221397b856b4d3ac | 4ca640d5ed0188e7b9ea25e688d077a19fbd6ab3 | refs/heads/main | 2023-06-05T18:00:59.583829 | 2021-06-28T23:51:19 | 2021-06-28T23:51:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,116 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Perform Clustering Analysis on CESM Data (Pre-Industrial Control)
Created on Wed Mar 10 10:10:37 2021
@author: gliu
"""
from sklearn.metrics.pairwise import haversine_distances
from sklearn.metrics import silhouette_score,silhouette_samples
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import xarray as xr
import numpy as np
import pygmt
from tqdm import tqdm
import os
import glob
import time
import cmocean
from scipy.signal import butter, lfilter, freqz, filtfilt, detrend
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from pylab import cm
from scipy.cluster.hierarchy import dendrogram, linkage, fcluster
from scipy.spatial.distance import squareform
# Custom Toolboxes
import sys
sys.path.append("/Users/gliu/Downloads/02_Research/01_Projects/01_AMV/00_Commons/03_Scripts/")
sys.path.append("/Users/gliu/Downloads/02_Research/01_Projects/03_SeaLevel/03_Scripts/cluster_ssh/")
from amv import proc,viz
import slutil
import yo_box as ybx
import tbx
#%% Used edits
# Set Paths
datpath = "/Users/gliu/Downloads/02_Research/01_Projects/03_SeaLevel/01_Data/01_Proc/"
outfigpath = "/Users/gliu/Downloads/02_Research/01_Projects/03_SeaLevel/02_Figures/20210610/"
proc.makedir(outfigpath)
# Experiment Names
#start = '1993-01'
#end = '2013-01'
#start = '1850-01'
#end = '2100-12'
nclusters = 6
rem_gmsl = True
e = 0 # Ensemble index (ensnum-1), remove after loop is developed
maxiter = 5 # Number of iterations for elimiting points
minpts = 30 # Minimum points per cluster
# Other Toggles
debug = True
savesteps = True # Save Intermediate Variables
filteragain = False # Smooth variable again after coarsening
add_gmsl = False # Add AVISO GMSL
if add_gmsl:
rem_gmsl=0
ensnum = e+1
datname = "CESM_PIC_remGMSL%i" % (rem_gmsl)
expname = "%s_%iclusters_minpts%i_maxiters%i" % (datname,nclusters,minpts,maxiter)
print(datname)
print(expname)
# Make Directory for Experiment
expdir = outfigpath+expname +"/"
checkdir = os.path.isdir(expdir)
if not checkdir:
print(expdir + " Not Found!")
os.makedirs(expdir)
else:
print(expdir+" was found!")
#%% Functions
def cluster_ssh(sla,lat,lon,nclusters,distthres=3000,
returnall=False,absmode=0,distmode=0,uncertmode=0,printmsg=True,
calcsil=False):
# ---------------------------------------------
# Calculate Correlation, Covariance, and Distance Matrices
# ---------------------------------------------
ntime,nlat,nlon = sla.shape
srho,scov,sdist,okdata,okpts,coords2=slutil.calc_matrices(sla,lon5,lat5,return_all=True)
#npts = okdata.shape[1]
# -------------------------------
# Apply corrections based on mode
# -------------------------------
if absmode == 1: # Take Absolute Value of Correlation/Covariance
scov = np.abs(scov)
srho = np.abs(srho)
elif absmode == 2: # Use Anticorrelation, etc
scov *= -1
srho *= -1
# --------------------------
# Combine the Matrices
# --------------------------
a_fac = np.sqrt(-distthres/(2*np.log(0.5))) # Calcuate so exp=0.5 when distance is 3000km
expterm = np.exp(-sdist/(2*a_fac**2))
if distmode == 0: # Include distance and correlation
distance_matrix = 1-expterm*srho
elif distmode == 1: # Just Include distance
distance_matrix = 1-expterm
elif distmode == 2: # Just Include correlation
distance_matrix = 1-srho
# --------------------------
# Do Clustering (scipy)
# --------------------------
cdist = squareform(distance_matrix,checks=False)
linked = linkage(cdist,'weighted')
clusterout = fcluster(linked, nclusters,criterion='maxclust')
# --------------------
# Calculate Silhouette
# --------------------
if calcsil:
s_score,s,s_bycluster = slutil.calc_silhouette(distance_matrix,clusterout,nclusters)
# fig,ax = plt.subplots(1,1)
# ax = slutil.plot_silhouette(clusterout,nclusters,s,ax1=ax)
# -------------------------
# Calculate the uncertainty
# -------------------------
uncertout = np.zeros(clusterout.shape)
for i in range(len(clusterout)):
covpt = scov[i,:] #
cid = clusterout[i] #
covin = covpt[np.where(clusterout==cid)]
covout = covpt[np.where(clusterout!=cid)]
if uncertmode == 0:
uncertout[i] = np.mean(covin)/np.mean(covout)
elif uncertmode == 1:
uncertout[i] = np.median(covin)/np.median(covout)
# Apply rules from Thompson and Merrifield (Do this later)
# if uncert > 2, set to 2
# if uncert <0.5, set to 0
#uncertout[uncertout>2] = 2
#uncertout[uncertout<0.5] = 0
# ------------------------------
# Calculate Wk for gap statistic
# ------------------------------
Wk = np.zeros(nclusters)
for i in range(nclusters):
cid = i+1
ids = np.where(clusterout==cid)[0]
dist_in = distance_matrix[ids[:,None],ids[None,:]] # Get Pairwise Distances within cluster
dist_in = dist_in.sum()/2 # Sum and divide by 2 (since pairs are replicated)
Wk[i] = dist_in
# -----------------------
# Replace into full array
# -----------------------
clustered = np.zeros(nlat*nlon)*np.nan
clustered[okpts] = clusterout
clustered = clustered.reshape(nlat,nlon)
cluster_count = []
for i in range(nclusters):
cid = i+1
cnt = (clustered==cid).sum()
cluster_count.append(cnt)
if printmsg:
print("Found %i points in cluster %i" % (cnt,cid))
uncert = np.zeros(nlat*nlon)*np.nan
uncert[okpts] = uncertout
uncert = uncert.reshape(nlat,nlon)
if calcsil: # Return silhouette values
return clustered,uncert,cluster_count,Wk,s,s_bycluster
if returnall:
return clustered,uncert,cluster_count,Wk,srho,scov,sdist,distance_matrix
return clustered,uncert,cluster_count,Wk
def plot_results(clustered,uncert,expname,lat5,lon5,outfigpath):
# Set some defaults
ucolors = ('Blues','Purples','Greys','Blues','Reds','Oranges','Greens')
proj = ccrs.PlateCarree(central_longitude=180)
cmap = cm.get_cmap("jet",nclusters)
fig,ax = plt.subplots(1,1,subplot_kw={'projection':proj})
ax = slutil.add_coast_grid(ax)
gl = ax.gridlines(ccrs.PlateCarree(central_longitude=0),draw_labels=True,
linewidth=2, color='gray', alpha=0.5, linestyle="dotted",lw=0.75)
gl.xlabels_top = False
gl.ylabels_right = False
pcm = ax.pcolormesh(lon5,lat5,clustered,cmap=cmap,transform=ccrs.PlateCarree())#,cmap='Accent')#@,cmap='Accent')
plt.colorbar(pcm,ax=ax,orientation='horizontal')
ax.set_title("Clustering Results \n nclusters=%i %s" % (nclusters,expname))
plt.savefig("%sCluster_results_n%i_%s.png"%(outfigpath,nclusters,expname),dpi=200,transparent=True)
# Plot raw uncertainty
fig,ax = plt.subplots(1,1,subplot_kw={'projection':proj})
ax = slutil.add_coast_grid(ax)
pcm = plt.pcolormesh(lon5,lat5,uncert,cmap='copper',transform=ccrs.PlateCarree())
ax.set_title(r"Uncertainty $(<\sigma^{2}_{out,x}>/<\sigma^{2}_{in,x}>)$")
fig.colorbar(pcm,ax=ax,fraction=0.02)
plt.savefig(outfigpath+"Uncertainty.png",dpi=200)
# Apply Thompson and Merrifield thresholds
uncert[uncert>2] = 2
uncert[uncert<0.5]=0
# Plot Cluster Uncertainty
fig1,ax1 = plt.subplots(1,1,subplot_kw={'projection':proj})
ax1 = slutil.add_coast_grid(ax1)
for i in range(nclusters):
cid = i+1
if (i+1) > len(ucolors):
ci=i%len(ucolors)
else:
ci=i
cuncert = uncert.copy()
cuncert[clustered!=cid] *= np.nan
ax1.pcolormesh(lon5,lat5,cuncert,vmin=0,vmax=2,cmap=ucolors[ci],transform=ccrs.PlateCarree())
#fig.colorbar(pcm,ax=ax)
ax1.set_title("Clustering Output (nclusters=%i) %s "% (nclusters,expname))
plt.savefig(outfigpath+"Cluster_with_Shaded_uncertainties_%s.png" % expname,dpi=200)
return fig,ax,fig1,ax1
def elim_points(sla,lat,lon,nclusters,minpts,maxiter,outfigpath,distthres=3000,
absmode=0,distmode=0,uncertmode=0,viz=True,printmsg=True,
calcsil=False):
ntime,nlat,nlon = sla.shape
slain = sla.copy()
# Preallocate
allclusters = []
alluncert = []
allcount = []
allWk = []
if calcsil:
alls = []
alls_byclust = []
rempts = np.zeros((nlat*nlon))*np.nan
# Loop
flag = True
it = 0
while flag and it < maxiter:
if printmsg:
print("Iteration %i ========================="%it)
expname = "iteration%02i" % (it+1)
#print("Iteration %i ========================="%it)
# Perform Clustering
clustoutput = cluster_ssh(slain,lat,lon,nclusters,distthres=distthres,
absmode=absmode,distmode=distmode,uncertmode=uncertmode,
printmsg=printmsg,calcsil=calcsil)
if calcsil:
clustered,uncert,cluster_count,Wk,s,s_byclust = clustoutput
alls.append(s)
alls_byclust.append(s_byclust)
else:
clustered,uncert,cluster_count,Wk = clustoutput
# Save results
allclusters.append(clustered)
alluncert.append(uncert)
allcount.append(cluster_count)
allWk.append(Wk)
if viz:
# Visualize Results
fig,ax,fig1,ax1 = plot_results(clustered,uncert,expname,lat,lon,outfigpath)
# Check cluster counts
for i in range(nclusters):
cid = i+1
flag = False
if cluster_count[i] < minpts:
flag = True # Set flag to continue running
print("\tCluster %i (count=%i) will be removed" % (cid,cluster_count[i]))
clusteredrs = clustered.reshape(nlat*nlon)
slainrs = slain.reshape(ntime,nlat*nlon)
slainrs[:,clusteredrs==cid] = np.nan # Assign NaN Values
rempts[clusteredrs==cid] = it # Record iteration of removal
slain = slainrs.reshape(ntime,nlat,nlon)
# if removeflag:
# flag = True
# else:
# flag = False
it += 1
if printmsg:
print("COMPLETE after %i iterations"%it)
rempts = rempts.reshape(nlat,nlon)
if calcsil:
return allclusters,alluncert,allcount,rempts,allWk,alls,alls_byclust
return allclusters,alluncert,allcount,rempts,allWk
#%% Load in the dataset
# Load data (preproc, then anomalized)
st = time.time()
ds = xr.open_dataset("%sSSHA_coarse_PIC.nc"%(datpath))
ssh = ds.SSH.values/100 # Convert to meters
lat5 = ds.lat.values
lon5 = ds.lon.values
times = ds.time.values
ntime,nlat5,nlon5 = ssh.shape
print("Loaded data in %.2fs"%(time.time()-st))
# Set up time array
timesmon = np.array(["%04d-%02d"%(t.year,t.month) for t in times])
# Plotting utilities
cmbal = cmocean.cm.balance
#%% Work
# ------------------------------
# Apply land ice mask from aviso
# ------------------------------
mask = np.load(datpath+"AVISO_landice_mask_5deg.npy")
ssha = ssh * mask[None,:,:]
# ------------------
# Remove GMSL Signal
# ------------------
lonf = 330
latf = 50
timesyr = np.arange(0,int(len(times)/12))
if rem_gmsl>0:
print("Removing GMSL")
out1 = slutil.remove_GMSL(ssha,lat5,lon5,timesyr,viz=True,testpoint=[lonf,latf],awgt=True)
if len(out1)>2:
ssha,gmslrem,fig,ax = out1
plt.savefig(expdir+"GMSL_Removal_CESM_ens%i_testpoint_lon%i_lat%i.png"%(ensnum,lonf,latf),dpi=200)
else:
ssha,gmsl=out1
if np.all(np.abs(gmslrem)>(1e-10)):
print("Saving GMSL")
#np.save(datpath+"CESM1_ens%i_GMSL_%s_%s.npy"%(ensnum,start,end),gmslrem)
else:
print("GMSL Not Removed")
# ---------------------
# Add in the Aviso GMSL
# ---------------------
if add_gmsl:
gmslav = np.load(datpath+"AVISO_GMSL_1993-01_2013-01.npy")
ssh_ori = ssha.copy()
ssha += gmslav[:,None,None]
fig,ax = plt.subplots(1,1)
ax.plot(gmslav,label="GMSL")
ax.plot()
klon,klat = proc.find_latlon(lonf,latf,lon5,lat5)
fig,ax = plt.subplots(1,1)
#ax.set_xticks(np.arange(0,len(times)+1,12))
ax.set_xticks(np.arange(0,len(timesyr),12))
ax.set_xticklabels(timesyr[::12],rotation = 45)
ax.grid(True,ls='dotted')
ax.plot(ssh_ori[:,klat,klon],label="Original",color='k')
ax.plot(ssha[:,klat,klon],label="After Addition")
ax.plot(gmslav,label="AVISO-GMSL")
ax.legend()
ax.set_title("GMSL Addition at Lon %.2f Lat %.2f (%s to %s)" % (lon5[klon],lat5[klat],timesyr[0],timesyr[-1]))
ax.set_ylabel("SSH (m)")
plt.savefig(expdir+"GMSL_Addition.png",dpi=150)
else:
print("No GMSL Added!")
# ----------------------
#%% Design Low Pass Filter
# ----------------------
ntimer = ssha.shape[0]
# ---
# Apply LP Filter
# ---
# Filter Parameters and Additional plotting options
dt = 24*3600*30
M = 5
xtk = [1/(10*12*dt),1/(24*dt),1/(12*dt),1/(3*dt),1/dt]
xtkl = ['decade','2-yr','year','season','month']
order = 5
tw = 18 # filter size for time dim
sla_lp = slutil.lp_butter(ssha,tw,order)
#% Remove NaN points and Examine Low pass filter
slars = sla_lp.reshape(ntimer,nlat5*nlon5)
# ---
# Locate points where values are all zero
# ---
tsum = slars.sum(0)
zero_pts = np.where(tsum==0)[0]
ptmap = np.array(tsum==0)
slars[:,zero_pts] = np.nan
ptmap = ptmap.reshape(nlat5,nlon5)
# Map removed points
fig,ax = plt.subplots(1,1,subplot_kw={'projection':ccrs.PlateCarree(central_longitude=0)})
ax = slutil.add_coast_grid(ax)
pcm = ax.pcolormesh(lon5,lat5,ptmap,cmap='bone',transform=ccrs.PlateCarree(),alpha=0.88)
fig.colorbar(pcm,ax=ax)
ax.set_title("Removed Zero Points")
# ---
# Visualize Filter Transfer Function
# ---
okdata,knan,okpts = proc.find_nan(slars,0)
npts5 = okdata.shape[1]
lpdata = okdata.copy()
rawdata = ssha.reshape(ntimer,nlat5*nlon5)[:,okpts]
lpspec,rawspec,p24,filtxfer,fig,ax=slutil.check_lpfilter(rawdata,lpdata,xtk[1],M,tw,dt=24*3600*30)
plt.savefig("%sFilter_Transfer_%imonLP_%ibandavg_%s.png"%(expdir,tw,M,expname),dpi=200)
# ---
# Save results
# ---
if savesteps: # Save low-pass-filtered result, right before clustering
outname = "%sSSHA_LP_%s_order%i_cutoff%i.npz" % (datpath,datname,order,tw)
print("Saved to: %s"%outname)
np.savez(outname,**{
'sla_lp':sla_lp,
'lon':lon5,
'lat':lat5,
'times':times
})
#%% Making the Regiondict
from scipy import stats
import itertools
import matplotlib as mpl
ldzo = np.load(datpath+"CESM_PIC_remGMSL0_6clusters_minpts30_maxiters5_Results_winsize240.npz",allow_pickle=True)
test = ldzo["clusters"]
# Set Region IDs
regionids = ("1: Northwest Pacific",
"2: ST N. Atl",
"3: SP N. Atl",
"4: East Pacific",
"5: Indian-South Pacific Ocean",
"6: South Atlantic"
)
# Make Region Colors
regioncolors = np.array(
[[233,51,35],
[73,161,68],
[154,219,232],
[251,237,79],
[81,135,195],
[138,39,113],
])/255
cmapn = (mpl.colors.ListedColormap(regioncolors))
cmap = cm.get_cmap("jet",nclusters)
testmap = np.array(test[0])[0,:,:]
fig,ax = plt.subplots(1,1,subplot_kw={'projection':ccrs.PlateCarree()})
ax = viz.add_coast_grid(ax)
pcm=ax.pcolormesh(lon5,lat5,testmap[:,:],cmap=cmap)
ax = viz.plot_box([150,179,5,50],ax=ax,leglab="1: Northwest Pacific")
ax = viz.plot_box([280-360,350-360,20,45],ax=ax,leglab="2: ST N. Atl")
ax = viz.plot_box([300-360,360-360,50,75],ax=ax,leglab="3: SP N. Atl")
ax = viz.plot_box([200-360,250-360,0,35],ax=ax,leglab="4: East Pacific")
ax = viz.plot_box([50,105,-30,15],ax=ax,leglab="5: Indian-South Pacific Ocean")
ax = viz.plot_box([280-330,360-360,-50,-20],ax=ax,leglab="6: South Atlantic")
fig.colorbar(pcm,ax=ax)
#ax.legend()
# Dictionary of Bounding Boxes to search thru
regiondict = {1:[150,180,5,50],
2:[280-360,350-360,20,45],
3:[300-360,360-360,50,75],
4:[200-360,250-360,0,35],
5:[50,105,-30,15],
6:[280-330,360-360,-50,-20]
}
#%% Some New Tools
def remapcluster(inclust,lat5,lon5,regiondict,printmsg=True,returnremap=False):
# Remap an input cluster [inclust] according
# to a regiondict.
# Searches within each region and assigns
# value to most frequent class in a given region
nlat,nlon = inclust.shape
clusternew = inclust.copy()
clusternewflat = clusternew.flatten()
clusteroldflat = inclust.flatten()
assigned = []
remapdict = {}
for r in regiondict.keys():
#print(r)
# Get Region
bbox = regiondict[r].copy()
for i in range(2): # Just check Longitudes
if bbox[i] < 0:
bbox[i]+=360
varr,lonr,latr,=proc.sel_region(inclust.T,lon5,lat5,bbox,warn=printmsg)
# Get rid of NaNs
varrok = varr.flatten().copy()
varrok = varrok[~np.isnan(varrok)]
# Get unique elements and counts, sort by count
eles,freqs = np.unique(varrok,return_counts=True)
sortid = np.argsort(freqs)[::-1]
eles = eles[sortid]
done=False
for ele in eles:
if done: # Skip if already assigned
continue
if ele in assigned: # Skip if class has already be reassigned
continue
# Assign new cluster
clusternewflat[clusteroldflat==ele] = r
if printmsg:
print("Reassigned Class %i to %i" % (ele,r))
assigned.append(int(ele))
remapdict[int(ele)] = r
done=True
if done is False: # When no cluster is assigned...
# Get unassigned regions, and assign first one
unassigned = np.setdiff1d(list(regiondict.keys()),assigned)
ele = unassigned[0]
clusternewflat[clusteroldflat==ele] = r
assigned.append(int(ele))
remapdict[int(ele)] = r
if printmsg:
print("Reassigned (Leftover) Class %i to %i because nothing was found" % (ele,r))
clusternew = clusternewflat.reshape(nlat,nlon)
if returnremap:
return clusternew,remapdict
return clusternew
def patterncorr(map1,map2):
# From Taylor 2001,Eqn. 1, Ignore Area Weights
# Get Non NaN values, Flatten, Array Size
map1ok = map1.copy()
map1ok = map1ok[~np.isnan(map1ok)].flatten()
map2ok = map2.copy()
map2ok = map2ok[~np.isnan(map2ok)].flatten()
N = len(map1ok)
# Anomalize
map1a = map1ok - map1ok.mean()
map2a = map2ok - map2ok.mean()
std1 = np.std(map1ok)
std2 = np.std(map2ok)
# calculate
R = 1/N*np.sum(map1a*map2a)/(std1*std2)
return R
def make_mapdict(oldclass,newclass):
mapdict = {oldclass[i] : newclass[i] for i in range(len(oldclass))}
return mapdict
def reassign_classes(inclust,mapdict,printmsg=True):
nlat,nlon = inclust.shape
clusternew = inclust.copy()
clusternewflat = clusternew.flatten()
clusteroldflat = inclust.flatten()
for i in mapdict.keys():
newclass = mapdict[i]
clusternewflat[clusteroldflat==i] = newclass
if printmsg:
print("Reassigned Class %i to %i "%(i,newclass))
return clusternewflat.reshape(nlat,nlon)
def calc_cluster_patcorr(inclust,evalclust,oldclass=None,returnmax=True):
if oldclass is None:
oldclass = [1,2,3,4,5,6]
# Make all possible permutations of classes
pms = list(itertools.permutations(oldclass))
# Loop through each permutation
patcor = []
for newclass in tqdm(pms):
# Make Remapping Dictionary
mapdict = make_mapdict(oldclass,newclass)
# Remap the Target Cluster
remapclust = reassign_classes(evalclust,mapdict,printmsg=False)
# Calculate Pattern Correlation and save
pc = patterncorr(remapclust,inclust)
patcor.append(pc)
patcor = np.array(patcor)
if returnmax:
return np.nanmax(patcor)
return patcor
#%% Testing out the functions
inclust = testmap
# Remapping Clusters
clusternew = remapcluster(testmap,lat5,lon5,regiondict)
# Plot Remapped Result
fig,ax = plt.subplots(1,1,subplot_kw={'projection':ccrs.PlateCarree()})
ax = viz.add_coast_grid(ax)
pcm=ax.pcolormesh(lon5,lat5,clusternew,cmap=cmapn)
for i in np.arange(1,7):
ax,ls = viz.plot_box(regiondict[i],ax=ax,return_line=True,leglab=regionids[i-1])#,color=regioncolors[i-1])
#ax = viz.plot_box(regiondict[1],ax=ax,leglab="1: Northwest Pacific")
#ax = viz.plot_box([280-360,350-360,20,45],ax=ax,leglab="2: ST N. Atl")
#ax = viz.plot_box([300-360,360-360,50,75],ax=ax,leglab="3: SP N. Atl")
#ax = viz.plot_box([200-360,250-360,0,35],ax=ax,leglab="4: East Pacific")
#ax = viz.plot_box([50,105,-30,15],ax=ax,leglab="5: Indian-South Pacific Ocean")
#ax = viz.plot_box([280-330,360-360,-50,-20],ax=ax,leglab="6: South Atlantic")
fig.colorbar(pcm,ax=ax)
# Example here of testing out patterncorr script
map1 = testmap
map2 = clusternew
patterncorr(map2,map1)
# Testing patcorr iteratively
patcorr = calc_cluster_patcorr(inclust,clusternew,returnmax=True)
"""
Some Notes
allclusters,alluncert = [nclusters,nlat,nlon]
allcount,allWk = [niter,nclasses]
rempt = [nlat,nlon]
"""
#
# %% First, Cluster for the whole PIC
#
sla_in = sla_lp[:,:,:]
# Do Clustering
allclusters,alluncert,allcount,rempt,allWk,alls,alls_byclust= slutil.elim_points(sla_in,lat5,lon5,nclusters,minpts,maxiter,expdir,
viz=False,printmsg=True,calcsil=True)
#%% Plot the results
# Dictionary of Bounding Boxes to search thru
# Inputs
clusterin = allclusters[-1]
uncertin = alluncert[-1]
rempts = rempt
vlm = [-10,10]
nclusters = 6
start = '400-01'
end = '2200-01'
sameplot=True
# Make Region Colors
cmapn,regiondict = slutil.get_regions()
# rempts = rempts.flatten()
# rempts[~np.isnan(rempts)] = 1
# rempts = rempts.reshape(nlat5,nlon5)
proj = ccrs.PlateCarree(central_longitude=180)
# Rearrange clustering number
clusternew,remapdict = slutil.remapcluster(clusterin,lat5,lon5,regiondict,returnremap=True)
# -------------
# Plot Clusters
# -------------
if sameplot:
fig,axs = plt.subplots(1,2,subplot_kw={'projection':proj},figsize=(12,4))
ax = axs[0]
else:
fig,ax = plt.subplots(1,1,subplot_kw={'projection':proj})
ax = viz.add_coast_grid(ax)
pcm = ax.pcolormesh(lon5,lat5,clusternew,cmap=cmapn,transform=ccrs.PlateCarree())
#ax.pcolor(lon5,lat5,rempts,cmap='Greys',transform=ccrs.PlateCarree(),hatch=":")
for o in range(nlon5):
for a in range(nlat5):
pt = rempts[a,o]
if np.isnan(pt):
continue
else:
ax.scatter(lon5[o],lat5[a],s=10,marker="x",color="k",transform=ccrs.PlateCarree())
fig.colorbar(pcm,ax=ax,fraction=0.025)
ax.set_title("CESM1 Clusters (%s to %s)"%(start,end))
if sameplot:
ax = axs[1]
else:
plt.savefig("%s%s_ClustersMap.png"%(expdir,expname),dpi=200,bbox_inches='tight')
fig,ax = plt.subplots(1,1,subplot_kw={'projection':proj})
# ------------------
# Plot Uncertainties
# ------------------
ax = viz.add_coast_grid(ax)
pcm=ax.pcolormesh(lon5,lat5,uncertin,vmin=vlm[0],vmax=vlm[-1],cmap=cmocean.cm.balance,transform=ccrs.PlateCarree())
#cl = ax.contour(lon5,lat5,clusternew,levels=np.arange(0,nclusters+2),colors='k',transform=ccrs.PlateCarree())
fig.colorbar(pcm,ax=ax,fraction=0.025)
ax.set_title(r"CESM1 Cluster Uncertainty $(<\sigma^{2}_{x,in}>/<\sigma^{2}_{x,out}>)$")
if sameplot:
plt.savefig("%s%s_Cluster_and_Uncert.png"%(expdir,expname),dpi=200,bbox_inches='tight')
else:
plt.savefig("%s%s_ClustersUncert.png"%(expdir,expname),dpi=200,bbox_inches='tight')
# inclust = np.array(allclusters[0])
# inuncert = np.array(alluncert[0])
# # Adjust classes
# clusterPIC = remapcluster(inclust,lat5,lon5,regiondict)
# # Plot some results (Clusters Themselves)
# proj = ccrs.PlateCarree(central_longitude=180)
# fig,ax = plt.subplots(1,1,subplot_kw={'projection':proj})
# ax = viz.add_coast_grid(ax)
# pcm=ax.pcolormesh(lon5,lat5,clusterPIC,cmap=cmapn,transform=ccrs.PlateCarree())
# fig.colorbar(pcm,ax=ax,fraction=0.025)
# ax.set_title("CESM-PiC Clusters (Year 400 to 2200)")
# plt.savefig("%sCESM1PIC_%s_Clusters_all.png"%(outfigpath,expname),dpi=200,bbox_inches='tight')
# # Now Plot the Uncertainties
# vlm = [-10,10]
# proj = ccrs.PlateCarree(central_longitude=180)
# fig,ax = plt.subplots(1,1,subplot_kw={'projection':proj})
# ax = viz.add_coast_grid(ax)
# pcm=ax.pcolormesh(lon5,lat5,inuncert,vmin=vlm[0],vmax=vlm[-1],cmap=cmocean.cm.balance,transform=ccrs.PlateCarree())
# fig.colorbar(pcm,ax=ax,fraction=0.025)
# ax.set_title(r"CESM-PIC Cluster Uncertainty $(<\sigma^{2}_{in,x}>/<\sigma^{2}_{out,x}>)$"+" \n (Year 400 to 2200) ")
# plt.savefig("%sCESM1PIC_%s_Uncert_all.png"%(outfigpath,expname),dpi=200,bbox_inches='tight')
#%% Plot results again, but this time with the silhouette metric
sigval = 0 #4.115e-3 # Significance Value (Greater than Red-Noise Null Hypothesis)
# Dictionary of Bounding Boxes to search thru
# Inputs
clusterin = allclusters[-1]
uncertin = alluncert[-1]
s_in = alls[-1]
rempts = rempt
vlm = [-10,10]
nclusters = 6
start = '400-01'
end = '2200-01'
sameplot=True
# Make Region Colors
cmapn,regiondict = slutil.get_regions()
# rempts = rempts.flatten()
# rempts[~np.isnan(rempts)] = 1
# rempts = rempts.reshape(nlat5,nlon5)
proj = ccrs.PlateCarree(central_longitude=180)
# Rearrange clustering number
clusternew,remapdict = slutil.remapcluster(clusterin,lat5,lon5,regiondict,returnremap=True)
# -------------
# Plot Clusters
# -------------
if sameplot:
fig,axs = plt.subplots(1,2,subplot_kw={'projection':proj},figsize=(12,4))
ax = axs[0]
else:
fig,ax = plt.subplots(1,1,subplot_kw={'projection':proj})
ax = viz.add_coast_grid(ax)
pcm = ax.pcolormesh(lon5,lat5,clusternew,cmap=cmapn,transform=ccrs.PlateCarree())
#ax.pcolor(lon5,lat5,rempts,cmap='Greys',transform=ccrs.PlateCarree(),hatch=":")
for o in range(nlon5):
for a in range(nlat5):
pt = rempts[a,o]
if np.isnan(pt):
continue
else:
ax.scatter(lon5[o],lat5[a],s=10,marker="x",color="k",transform=ccrs.PlateCarree())
fig.colorbar(pcm,ax=ax,fraction=0.025)
ax.set_title("CESM1 Clusters (%s to %s)"%(start,end))
if sameplot:
ax = axs[1]
else:
plt.savefig("%s%s_ClustersMap.png"%(expdir,expname),dpi=200,bbox_inches='tight')
fig,ax = plt.subplots(1,1,subplot_kw={'projection':proj})
# Plot 1: the silhoutte value map
cmap="RdBu_r"
silmap = np.zeros(nlat5*nlon5)*np.nan
silmap[okpts] = s_in
silmap = silmap.reshape(nlat5,nlon5)
proj = ccrs.PlateCarree(central_longitude=180)
#fig,ax = plt.subplots(1,1,subplot_kw={'projection':proj})
ax = slutil.add_coast_grid(ax)
pcm=ax.pcolormesh(lon5,lat5,silmap,vmin=-.25,vmax=.25,cmap=cmap,transform=ccrs.PlateCarree())
# for o in range(nlon5):
# for a in range(nlat5):
# pt = silmap[a,o]
# if pt > sigval:
# continue
# else:
# ax.scatter(lon5[o],lat5[a],s=10,marker="x",color="k",transform=ccrs.PlateCarree())
ax.contour(lon5,lat5,silmap,levels=[sigval],colors='k',linewidths=0.75,linestyles=":",transform=ccrs.PlateCarree())
ax.pcolormesh(lon5,lat5,silmap,vmin=-.5,vmax=.5,cmap=cmocean.cm.balance,transform=ccrs.PlateCarree())
fig.colorbar(pcm,ax=ax,fraction=0.025)
ax.set_title("Silhouette Map ($s_{avg}=%.2e$)"%(s_in.mean()))
if sameplot:
plt.savefig("%s%s_Cluster_and_Sil.png"%(expdir,expname),dpi=200,bbox_inches='tight')
else:
plt.savefig("%s%s_ClustersUncert.png"%(expdir,expname),dpi=200,bbox_inches='tight')
#%% WTF is happening with uncertainty. Lets take a look
varin = sla_lp[:,:,:]
# distmode
# 0: Default (Distance and Corr)
# 1: Distance Only
# 2: Corr Only
# 3: Red Noise Dist
# uncertmode
# 0: Default (E(Cov_in) / E(Cov_out))
# 1: Median (Med(Cov_in) / M(Cov_out))
# absmode
# 0: Default: Correlation and Covariances, no modification
# 1: Absolute Values: Take abs(corr) and abs(cov)
# 2: Anti: Take -1*corr, -1*cov
distmode = 0
absmode = 0
uncertmode = 0
chardist = 3000
# ------------------
# Calculate Matrices
# ------------------
ntime,nlat,nlon = varin.shape
srho,scov,sdist,okdata,okpts,coords2=slutil.calc_matrices(varin,lon5,lat5,return_all=True)
if absmode == 1:
scov = np.abs(scov)
srho = np.abs(srho)
elif absmode == 2:
scov *= -1
srho *= -1
# --------------------------
# Combine the Matrices
# --------------------------
distthres=3000
# Default Distance Calculation
a_fac = np.sqrt(-distthres/(2*np.log(0.5)))
expterm = np.exp(-sdist/(2*a_fac**2))
if distmode == 0:
distance_matrix = 1-expterm*srho
elif distmode == 1:
distance_matrix = 1-expterm
elif distmode == 2:
distance_matrix = 1-srho
elif distmode == 3:
distance_matrix = 1-expterm*np.exp(-distthres/chardist)
# --------------------------
# Do Clustering (scipy)
# --------------------------
cdist = squareform(distance_matrix,checks=False)
linked = linkage(cdist,'weighted')
clusterout = fcluster(linked, nclusters,criterion='maxclust')
# ----------------------------
# Calculate Silhouette Metrics
# ----------------------------
s_score,s,s_byclust=slutil.calc_silhouette(distance_matrix,clusterout,nclusters)
# --------------------------
# Replace into pull matrix
# --------------------------
clustered = np.zeros(nlat*nlon)*np.nan
silmap = clustered.copy()
clustered[okpts] = clusterout
silmap[okpts] = s
clustered = clustered.reshape(nlat,nlon)
silmap = silmap.reshape(nlat,nlon)
# ---------------------
# Calculate Uncertainty
# ---------------------
uncertout = np.zeros(clusterout.shape)
covins = []
covouts = []
for i in range(len(clusterout)):
covpt = scov[i,:] #
cid = clusterout[i] #
covin = covpt[np.where(clusterout==cid)]
covout = covpt[np.where(clusterout!=cid)]
covins.append(covin)
covouts.append(covout)
if uncertmode == 0:
uncertout[i] = np.mean(covin)/np.mean(covout)
elif uncertmode == 1:
uncertout[i] = np.median(covin)/np.median(covout)
uncert = np.zeros(nlat*nlon)*np.nan
uncert[okpts] = uncertout
uncert = uncert.reshape(nlat,nlon)
# # Reassign to another Map
# clusterPICALL = clusterPIC.copy()
#
# %% Next, Cluster for some specific time period
#
start = '1750-02'
end = '2200-12'
#end = '1300-01'
# start = '1300-02'
# end = '2200-12'
# Convert Datestrings
timesmon = np.array(["%04d-%02d"%(t.year,t.month) for t in times])
# Find indices
idstart = np.where(timesmon==start)[0][0]
idend = np.where(timesmon==end)[0][0]
# Restrict Data to period
sla_in = sla_lp[idstart:idend,:,:]
timeslim = timesmon[idstart:idend]
timesyr = np.array(["%04d"%(t.year) for t in times])[idstart:idend]
ntimer = sla_in.shape[0]
timestr = "%s_to_%s" % (start,end)
timestrtitle = "%s to %s" % (start[:4],end[:4])
# Do Clustering
allclusters,alluncert,allcount,rempt,allWk = elim_points(sla_in,lat5,lon5,nclusters,minpts,maxiter,expdir,
viz=False,printmsg=False)
inclust = np.array(allclusters[-1])
inuncert = np.array(alluncert[-1])
# Adjust classes
clusterPIC = inclust
#clusterPIC = remapcluster(inclust,lat5,lon5,regiondict)
patcorr = calc_cluster_patcorr(clusterPIC,clusterPICALL,returnmax=True)
# Plot some results (Clusters Themselves)
proj = ccrs.PlateCarree(central_longitude=180)
fig,ax = plt.subplots(1,1,subplot_kw={'projection':proj})
ax = viz.add_coast_grid(ax)
pcm=ax.pcolormesh(lon5,lat5,clusterPIC,cmap=cmapn,transform=ccrs.PlateCarree())
fig.colorbar(pcm,ax=ax,fraction=0.025)
ax.set_title("CESM-PiC Clusters (Year %s) \n Pattern Correlation = %.3f" % (timestrtitle,patcorr))
plt.savefig("%sCESM1PIC_%s_Clusters_%s.png"%(outfigpath,expname,timestr),dpi=200,bbox_inches='tight')
# Now Plot the Uncertainties
vlm = [-10,10]
proj = ccrs.PlateCarree(central_longitude=180)
fig,ax = plt.subplots(1,1,subplot_kw={'projection':proj})
ax = viz.add_coast_grid(ax)
pcm=ax.pcolormesh(lon5,lat5,inuncert,vmin=vlm[0],vmax=vlm[-1],cmap=cmocean.cm.balance,transform=ccrs.PlateCarree())
fig.colorbar(pcm,ax=ax,fraction=0.025)
ax.set_title(r"CESM-PIC Cluster Uncertainty $(<\sigma^{2}_{in,x}>/<\sigma^{2}_{out,x}>)$"+" \n (Year %s) " % (timestrtitle))
plt.savefig("%sCESM1PIC_%s_Uncert_%s.png"%(outfigpath,expname,timestr),dpi=200,bbox_inches='tight')
#%% Calculate Pattern Correlation for each moving 20-year window
npers = len(test)
remapclusts = np.zeros((npers,nlat5,nlon5))*np.nan
for i in tqdm(range(npers)):
inclust = np.array(test[i])[-1,:,:]
clusterPIC = remapcluster(inclust,lat5,lon5,regiondict,printmsg=False)
remapclusts[i,:,:] = clusterPIC.copy()
#%% Make an animation
from matplotlib.animation import FuncAnimation
import matplotlib.animation as animation
# Animation parameters
frames = 12 #Indicate number of frames
figsize = (8,6)
vm = [-5,5]
interval = 0.1
bbox = [-80,0,0,80]
fps= 10
savetype="mp4"
dpi=100
yrstrs = []
for i in tqdm(range(ntime-240)):
rng = np.arange(i,i+winsize+1)
yrstr = "%s to %s" % (timesmon[rng[0]][:4],timesmon[rng[-1]][:4])
yrstrs.append(yrstr)
lon180,remap180 = proc.lon360to180(lon5,remapclusts.transpose(2,1,0))
invar = remap180.transpose(1,0,2)
#invar = remapclusts.transpose(1,2,0) # [lat x lon x time]
def make_figure():
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1,1,1,projection=ccrs.PlateCarree())
ax = viz.add_coast_grid(ax)
return fig,ax
start = time.time()
fig,ax = make_figure() # Make the basemap
pcm = ax.pcolormesh(lon180,lat5,invar[...,i],cmap=cmapn)
fig.colorbar(pcm,orientation='horizontal',fraction=0.046,pad=0.05)
pcm.set_array(invar[:,:,i].ravel())
def animate(i):
pcm.set_array(invar[...,i].flatten())
ax.set_title("Years %s" % (yrstrs[i]))
print("\rCompleted frame %i"%i,end="\r",flush=True)
anim = FuncAnimation(
fig, animate, interval=interval, frames=frames, blit=False,)
#anim.save('%sForcingAnim.mp4'%outfigpath, writer=animation.FFMpegWriter(fps=fps),dpi=dpi)
anim.save('%ssst_test.gif'%outfigpath, writer='imagemagick',fps=fps,dpi=dpi)
# Pass figure animator and draw on it
# blit = True, redraw only parts that have changed
print("Animation completed in %.2fs"%(time.time()-start))
# ------------------------
#%% Try Silhouette Metric
# -----------------------
start = '0400-01'
end = '2200-12'
# Convert Datestrings
timesmon = np.array(["%04d-%02d"%(t.year,t.month) for t in times])
# Find indices
idstart = np.where(timesmon==start)[0][0]
idend = np.where(timesmon==end)[0][0]
# Restrict Data to period
sla_in = sla_lp[idstart:idend,:,:]
timeslim = timesmon[idstart:idend]
timesyr = np.array(["%04d"%(t.year) for t in times])[idstart:idend]
ntimer = sla_in.shape[0]
timestr = "%s_to_%s" % (start,end)
timestrtitle = "%s to %s" % (start[:4],end[:4])
# Do Clustering
clustered,uncert,cluster_count,Wk,s,s_byclust = cluster_ssh(sla_in,lat5,lon5,6,returnall=True,calcsil=True)
# Set input data
inclust = np.array(clustered)
inuncert = np.array(uncert)
# Adjust classes
clusterPIC = inclust
clusterPIC,remapdict = remapcluster(inclust,lat5,lon5,regiondict,returnremap=True)
#patcorr = calc_cluster_patcorr(clusterPIC,clusterPICALL,returnmax=True)
new_sbyclust = np.zeros(nclusters)
for k in remapdict.keys():
newclass = remapdict[k] # Class that k was remapped to
new_sbyclust[newclass-1] = s_byclust[k-1] # Reassign
print("Reassigned new class %i"%newclass)
# Recover clusterout for silhouette plotting
clusterout,knan,okpts = proc.find_nan(clusterPIC.flatten(),0)
# Plot the silhouette
fig,ax = plt.subplots(1,1)
ax = slutil.plot_silhouette(clusterout,nclusters,s,ax1=ax,cmap=regioncolors)
ax.grid(True,ls='dotted')
ax.set_title("Silhouette Plot for CESM-PiC Clusters (Year %s) \n Mean Silhouette Coefficient = %.3f" % (timestrtitle,s.mean()))
# Add dummy legend
for i in range(nclusters):
cid = i+1
ax.axvline([-100],lw=5,color=regioncolors[i],label="Cluster %i, s = %.3f"%(cid,new_sbyclust[i]))
ax.legend(fontsize=10)
ax.set_xticks(np.arange(-.2,.6,.1))
ax.set_xlim([-.25,.6])
plt.savefig("%sCESM1PIC_%s_SilhouettePlot_%s.png"%(outfigpath,expname,timestr),dpi=200,bbox_inches='tight')
# Replace silhouette into full map
silmap = np.zeros(nlat5*nlon5)*np.nan
silmap[okpts] = s
silmap = silmap.reshape(nlat5,nlon5)
proj = ccrs.PlateCarree(central_longitude=180)
fig,ax = plt.subplots(1,1,subplot_kw={'projection':proj})
ax = viz.add_coast_grid(ax)
pcm=ax.pcolormesh(lon5,lat5,silmap,vmin=-.5,vmax=.5,cmap=cmocean.cm.balance,transform=ccrs.PlateCarree())
ax.contour(lon5,lat5,silmap,levels=[0],colors='k',linewidths=0.75,transform=ccrs.PlateCarree())
#ax.pcolormesh(lon5,lat5,silmap,vmin=-.5,vmax=.5,cmap=cmocean.cm.balance,transform=ccrs.PlateCarree())
fig.colorbar(pcm,ax=ax,fraction=0.025)
ax.set_title("Silhouette Values for CESM-PiC Clusters (Year %s) \n $s_{avg}$ = %.3f" % (timestrtitle,s.mean()))
plt.savefig("%sCESM1PIC_%s_Silhouette_%s.png"%(outfigpath,expname,timestr),dpi=200,bbox_inches='tight')
# Plot some results (Clusters Themselves)
proj = ccrs.PlateCarree(central_longitude=180)
fig,ax = plt.subplots(1,1,subplot_kw={'projection':proj})
ax = viz.add_coast_grid(ax)
pcm=ax.pcolormesh(lon5,lat5,clusterPIC,cmap=cmapn,transform=ccrs.PlateCarree())
fig.colorbar(pcm,ax=ax,fraction=0.025)
ax.set_title("CESM-PiC Clusters (Year %s)" % (timestrtitle))
plt.savefig("%sCESM1PIC_%s_Clusters_%s.png"%(outfigpath,expname,timestr),dpi=200,bbox_inches='tight')
# Now Plot the Uncertainties
vlm = [-10,10]
proj = ccrs.PlateCarree(central_longitude=180)
fig,ax = plt.subplots(1,1,subplot_kw={'projection':proj})
ax = viz.add_coast_grid(ax)
pcm=ax.pcolormesh(lon5,lat5,inuncert,vmin=vlm[0],vmax=vlm[-1],cmap=cmocean.cm.balance,transform=ccrs.PlateCarree())
fig.colorbar(pcm,ax=ax,fraction=0.025)
ax.set_title(r"CESM-PIC Cluster Uncertainty $(<\sigma^{2}_{in,x}>/<\sigma^{2}_{out,x}>)$"+" \n (Year %s) " % (timestrtitle))
plt.savefig("%sCESM1PIC_%s_Uncert_%s_stest.png"%(outfigpath,expname,timestr),dpi=200,bbox_inches='tight')
#%% Sections Below are old/under construction
| [
"glenn.y.liu@gmail.com"
] | glenn.y.liu@gmail.com |
a4057c1687bbd2a89ca72fd789f29938de7b0a68 | c2842053268c1a065f8cbbac476713904da5e12c | /python/PyImporterExample.py | 8242672bd6ad1aeac4a57944a480d2c73176146e | [] | no_license | xiewendan/HelloWorld | 0741776218f44d92e54ca0bfe676401fd3c165cf | eeb11d0a0c3f584d2168d9c459fc54ddca83f654 | refs/heads/master | 2023-02-24T14:43:33.497926 | 2023-02-07T09:29:24 | 2023-02-07T09:29:24 | 82,867,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,374 | py | import sys
from importlib.abc import FileLoader
from importlib.machinery import FileFinder, PathFinder
from os import getcwd
from os.path import basename
from sibilant.module import prep_module, exec_module
SOURCE_SUFFIXES = [".lspy", ".sibilant"]
_path_importer_cache = {}
_path_hooks = []
class SibilantPathFinder(PathFinder):
"""
An overridden PathFinder which will hunt for sibilant files in
sys.path. Uses storage in this module to avoid conflicts with the
original PathFinder
"""
@classmethod
def invalidate_caches(cls):
for finder in _path_importer_cache.values():
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
@classmethod
def _path_hooks(cls, path):
for hook in _path_hooks:
try:
return hook(path)
except ImportError:
continue
else:
return None
@classmethod
def _path_importer_cache(cls, path):
if path == '':
try:
path = getcwd()
except FileNotFoundError:
# Don't cache the failure as the cwd can easily change to
# a valid directory later on.
return None
try:
finder = _path_importer_cache[path]
except KeyError:
finder = cls._path_hooks(path)
_path_importer_cache[path] = finder
return finder
class SibilantSourceFileLoader(FileLoader):
def create_module(self, spec):
return None
def get_source(self, fullname):
return self.get_data(self.get_filename(fullname)).decode("utf8")
def exec_module(self, module):
name = module.__name__
source = self.get_source(name)
filename = basename(self.get_filename(name))
prep_module(module)
exec_module(module, source, filename=filename)
def _get_lspy_file_loader():
return (SibilantSourceFileLoader, SOURCE_SUFFIXES)
def _get_lspy_path_hook():
return FileFinder.path_hook(_get_lspy_file_loader())
def _install():
done = False
def install():
nonlocal done
if not done:
_path_hooks.append(_get_lspy_path_hook())
sys.meta_path.append(SibilantPathFinder)
done = True
return install
_install = _install()
_install() | [
"jinchunxie@126.com"
] | jinchunxie@126.com |
2a46943042d1f292a584285a1d041fb2aecb17d8 | b1a85a746ab50c9de17a8e2072ee319ad39f7e3e | /Task_2/Solution/Components/Data_Augmentation.py | 8a9781d5e59cf44898a30bd533ea172f4f64f33e | [] | no_license | LukeAndrewSmith/AML | 4087e28ce165bd316d97197f36bb6c12d5e2d791 | 1f30700bf825d88248ed7a38761438f094189539 | refs/heads/master | 2023-02-02T16:37:27.801817 | 2020-12-20T12:13:05 | 2020-12-20T12:13:05 | 298,624,715 | 0 | 0 | null | 2020-12-19T17:55:37 | 2020-09-25T16:27:03 | Jupyter Notebook | UTF-8 | Python | false | false | 181 | py |
import imblearn
from imblearn.over_sampling import SMOTE
def smote_resampling(X,y):
sm = SMOTE(random_state=0)
X_res, y_res = sm.fit_resample(X,y)
return X_res, y_res | [
"boeselfrederic@gmail.com"
] | boeselfrederic@gmail.com |
5389f0f4eeec31e9af90b7b7de046de7ff9bc43a | 9221667562d5d6a88eaf83df683ecc25eb9aac89 | /account/views.py | 436717b358c6808c19b8a86931d60c3802ddf7b1 | [] | no_license | RaihanDewonEman/Online-Exam | 84c5e1969afdb631affe8ffb943945fd70d56ec3 | 5608cff22e67125f2dc805ff266865e027fe7859 | refs/heads/master | 2023-08-26T17:53:44.758653 | 2021-11-12T05:12:07 | 2021-11-12T05:12:07 | 298,227,432 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,742 | py | from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.contrib import auth
from django.core.exceptions import ObjectDoesNotExist
# Create your views here.
def Home(request):
return render(request, 'account/home.html',{'name': 'Home page'})
def Signup(request):
if request.method == 'POST':
if request.POST['userPassword'] == request.POST['userConfirmPassword']:
try:
user = User.objects.get(username = request.POST['username'])
return render(request, 'account/signup.html',{'name': 'Signup page', 'error':'User name has already been registered!'})
print(user)
except ObjectDoesNotExist:
user = User.objects.create_user(request.POST['username'], password = request.POST['userPassword'])
auth.login(request, user)
return redirect('homepage')
else:
return render(request, 'account/signup.html',{'name': 'Signup page', 'error':'Password and confirm password donot match!'})
else:
return render(request, 'account/signup.html',{'name': 'Signup page'})
def Login(request):
if request.method == 'POST':
user = auth.authenticate(username = request.POST['username'], password = request.POST['userPassword'])
if user is not None:
auth.login(request, user)
return redirect('homepage')
else:
return render(request, 'account/login.html',{'name': 'Login page', 'error':"Your user name and password don't match"})
else:
return render(request, 'account/login.html',{'name': 'Login page'})
def Logout(request):
if request.method == "POST":
auth.logout(request)
return redirect('homepage')
def Profile(request):
if not request.user.is_authenticated:
return render(request, 'account/home.html',{'name': 'Home page','error':'Please login first!'})
else:
naam = request.user.username
cur_user = User.objects.get(username=naam)
return render(request, 'account/profilepage.html',{'name': 'Profile page', 'user': cur_user})
def Update_profile(request):
if not request.user.is_authenticated:
return render(request, 'account/home.html',{'name': 'Home page','error':'Please login first!'})
else:
if request.method == 'POST':
username = request.user.username
User.objects.filter(username= username).update( email = request.POST['email'], first_name=request.POST['fname'], last_name=request.POST['lname'])
return redirect('profilepage')
else:
return render(request, 'account/updateprofile.html',{'name': 'Update Profile'})
| [
"raihan11011997@gmail.com"
] | raihan11011997@gmail.com |
8a6670b49666ee77f08d7a315a5866b81dbe64f2 | 4edbab938229e911599706974b5ea00b593a4594 | /le_sheets_essentials.py | 742c5290653bd06c234d7caddbd241885a2cefcf | [] | no_license | there-is-no-sp00n/Google_APIs | 2283004bc734f7f00165c36a17fbd285e24c5b43 | c260d02b086fcf9e2d29d5b296881de341039995 | refs/heads/main | 2023-06-25T03:38:14.187776 | 2021-07-21T10:56:40 | 2021-07-21T10:56:40 | 381,122,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,284 | py | '''
Copyright 2021, Aninda Zaman, All rights reserved.
'''
#this will only work with Oshud Delivery
import os
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import pprint
from send_order_info import send_order_info
# tells the program what scopes to look into
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
# these are the credentials from the API control panel, also shared with the .json file
credentials = ServiceAccountCredentials.from_json_keyfile_name('Oshud_Secret.json', scope)
# authorize with the credential before to start accessing information
gc = gspread.authorize(credentials)
# open the exact sheet with the proper title
sheet = gc.open('Oshud Delivery (COVID-19 Kit): Responses').sheet1
result = sheet.get_all_records()
def get_active_rows():
# this is how you get all the active rows, you shall have to subtract 1 because of the header row...
max_rows = len(sheet.get_all_values())
max_rows = max_rows - 1
return max_rows
#figure out which of the rows' info haven't been confirmed
def determine_new_rows(x): #takes in the number of active rows
file = open("order_totz.txt", "r")
num = int(file.readline())
file.close()
return x - num
def tong_order_prep():
x = 0
return x
#prepare the individual orders to send out to be confirmed
#x represents active rows
#y represensts the new rows since last check
def covid_order_prep(x, y):
#this gets the difference between current active rows and previously dealth with rows
#ie: however many new orders have come in
y = x - y
while y < x:
print("x, y: ", x, y)
#print(result)
order_spec = []
order_spec.append(result[y].get('Name'))
order_spec.append(result[y].get('Phone'))
order_spec.append(result[y].get('Address'))
order_spec.append(result[y].get('Pick How Many COVID-19 Kit You Need'))
print(order_spec)
send_order_info(order_spec)
y = y + 1
file = open("order_totz.txt", "w")
file.write(str(y))
file.close()
return x
'''
Copyright 2021, Aninda Zaman, All rights reserved.
'''
| [
"noreply@github.com"
] | noreply@github.com |
63f87d61e8c964d81e856f1e6f01cd937940a20b | 6b8bf10a57e1a85d2281579da9511310e39b9125 | /Exercise5/list_module.py | 653fd5fdce78508a87f475cb1e928e78a0de0a2d | [] | no_license | Hadirback/python | 9c0c5b622b18da50379d4c17df8ba68b67d452c9 | 88e03c34edb1c2f60a1624ee04b5bd975967e8ad | refs/heads/master | 2020-07-20T12:47:48.224472 | 2019-10-11T20:39:12 | 2019-10-11T20:39:12 | 206,643,640 | 0 | 1 | null | 2019-09-05T19:48:10 | 2019-09-05T19:38:58 | null | UTF-8 | Python | false | false | 629 | py | # Lesson 5 Exercise 2
import random
def get_random_elem(list):
if not list:
return None
return random.choice(list)
def fill_list():
my_list = []
while True:
elem = input('Введите элемент списка или Enter чтобы закончить ввод: ')
if not elem:
return my_list
else:
my_list.append(elem)
if __name__ == '__main__':
main_list = fill_list()
print(f'Мой список элементов: {main_list}')
print(f'Мой рандомный элемент из списка - {get_random_elem(main_list)}')
| [
"mail.evgeny.filippov@gmail.com"
] | mail.evgeny.filippov@gmail.com |
812927956fda4aaf42b40e6e663b051499964fbb | cc423a599fae5bdc319b1ede586e97289308ae07 | /libsongtext/lyricwiki.py | bf1d70c41d9c4dff44edaa05bbe89905922b6133 | [
"BSD-2-Clause"
] | permissive | ysim/songtext | 778cc74967029760994f5b6143e6f4a175f3b5db | 1806c18ea9b77dc3f065e19aa0fb88ee533d835c | refs/heads/master | 2022-12-18T12:09:46.381777 | 2019-06-21T20:28:34 | 2019-06-21T20:28:34 | 16,112,047 | 14 | 3 | BSD-2-Clause | 2022-12-08T05:46:12 | 2014-01-21T17:51:54 | Python | UTF-8 | Python | false | false | 2,377 | py | from lxml import html, etree
from lxml.html.clean import clean_html
import requests
from libsongtext.errors import ArgumentError, SearchError
from libsongtext.utils import format_song_info, output_song
API_URL = 'http://lyrics.wikia.com/api.php'
SEARCH_PARAMETERS = {
'artist': 'artist',
'title': 'song',
}
class LyricWikiSong(object):
PARAMS = {
'fmt': 'realjson',
'func': 'getSong'
}
CSS_SELECTOR = '.lyricbox'
def __init__(self, args):
self.args = args
request_params = self.PARAMS.copy()
for k, v in SEARCH_PARAMETERS.items():
if self.args[k] is None:
print(
'\nThis API requires that you search with both the artist '
'name (-a, --artist) and the song title (-t, --title). '
'All other options will be ignored.\n\n'
)
raise ArgumentError
request_params[v] = self.args[k]
self.response = requests.get(API_URL, params=request_params)
self.json = self.response.json()
if not self.json['page_id']:
print("\nYour query did not match any tracks.\n\n")
raise SearchError
self.url = self.json['url']
def get_lyrics(self):
response = requests.get(self.url)
page_html = html.document_fromstring(response.text)
element = page_html.cssselect(self.CSS_SELECTOR)[0]
# Replace <br> tags with \n (prepend it with \n and then remove all
# occurrences of <br>)
for br in element.cssselect('br'):
br.tail = '\n' + br.tail if br.tail else '\n'
etree.strip_elements(element, 'br', with_tail=False)
# Remove unneeded tags
bad_tags = element.cssselect('.rtMatcher') + \
element.cssselect('.lyricsbreak')
for tag in bad_tags:
tag.drop_tree()
# Remove HTML comments
real_string = etree.tostring(element, encoding="UTF-8")
cleaned_html = clean_html(real_string)
info_output = format_song_info(self.json['artist'], self.json['song'])
lyric_output = html.fragment_fromstring(cleaned_html).text_content()
return u'{}{}'.format(info_output, lyric_output)
def get_result(args):
track = LyricWikiSong(args)
output_song(track.get_lyrics(), args['no_pager'])
| [
"opensource@yiqingsim.net"
] | opensource@yiqingsim.net |
f89be28b2adee79ecf8dd448dea45f1e162d5230 | fd1f351d6a768cdfccc5e863951f104b0461201c | /hamcwebc/settings.py | 503e0be296e6535cb18a59c8af34321b34c28d94 | [] | no_license | gurkslask/hamcwebc | b599f0a7796370c4180d0040a887399bdc37313e | 8abbe47cdd58972072d6088b63e1c368a63bb4af | refs/heads/master | 2021-01-10T22:35:52.527879 | 2017-06-01T09:26:22 | 2017-06-01T09:26:22 | 70,381,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,949 | py | # -*- coding: utf-8 -*-
"""Application configuration."""
import os
from datetime import timedelta
class Config(object):
"""Base configuration."""
SECRET_KEY = os.environ.get('HAMCWEBC_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
BCRYPT_LOG_ROUNDS = 13
ASSETS_DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://alex:bit@127.0.0.1:5432/alex' # TODO: Change me
DEBUG_TB_ENABLED = False # Disable Debug toolbar
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
DB_NAME = 'dev.db'
# Put the db file in project root
DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME)
# SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH)
SQLALCHEMY_DATABASE_URI = 'postgresql://alex:bit@127.0.0.1:5432/alex' # TODO: Change me
DEBUG_TB_ENABLED = True
ASSETS_DEBUG = True # Don't bundle/minify static assets
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
CELERY_BROKER_URL = 'amqp://localhost'
CELERY_BACKEND = 'rpc://'
CELERYBEAT_SCHEDULE = {
'every_five_seconds': {
'task': 'connect_to_pi',
'schedule': timedelta(seconds=20)
}
}
class TestConfig(Config):
"""Test configuration."""
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///data.sqlite'
BCRYPT_LOG_ROUNDS = 4 # For faster tests; needs at least 4 to avoid "ValueError: Invalid rounds"
WTF_CSRF_ENABLED = False # Allows form testing
| [
"gurkslask@gmail.com"
] | gurkslask@gmail.com |
8350f11980db9cb44191f5846907f76bee29c0a3 | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-2/vse-naloge-brez-testov/DN13-M-065.py | d4f9aec2e9e2b28eadb12b6390cf9ff7b76a6e9f | [] | no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,396 | py | from math import fabs
class Minobot:
def __init__(self):
self.x = 0
self.y = 0
self.direction = 0
self.x_direction_coefficient = [1, 0, -1, 0]
self.y_direction_coefficient = [0, -1, 0, 1]
self.states = []
def get_current_state(self):
return {'x': self.x, 'y': self.y, 'direction': self.direction}
def save_current_state(self):
self.states.append(self.get_current_state())
def change_direction(self, direction):
self.save_current_state()
self.direction = (self.direction + direction) % 4
def levo(self):
self.change_direction(-1)
def desno(self):
self.change_direction(1)
def naprej(self, d):
self.save_current_state()
if self.x_direction_coefficient[self.direction]:
self.x += d * self.x_direction_coefficient[self.direction]
else:
self.y += d * self.y_direction_coefficient[self.direction]
def razveljavi(self):
if self.states:
previous_state = self.states.pop()
self.x = previous_state['x']
self.y = previous_state['y']
self.direction = previous_state['direction']
def razdalja(self):
return abs(self.x) + abs(self.y)
def koordinate(self):
return self.x, self.y
| [
"benjamin.fele@gmail.com"
] | benjamin.fele@gmail.com |
ee2adfc989da6719ce249ae302c01aec142a648f | 1dfc1b538bd5c22468d9cbeaba5e16b9d6985518 | /ws-img/app/views/image_view.py | 6c031e954b03dcfc9e198c593255bbf3b94b0eb2 | [] | no_license | Deyveson/Project-Article | a0dce5a50e42f2ba8b5df74b82eab3b57044f43e | 6324f56985dad63b6b5939599f1a1740b2bd80f3 | refs/heads/master | 2022-11-22T21:10:24.333988 | 2020-06-25T17:09:50 | 2020-06-25T17:09:50 | 216,874,662 | 1 | 0 | null | 2022-11-22T04:59:32 | 2019-10-22T17:44:04 | Python | UTF-8 | Python | false | false | 978 | py | from flask import send_file
from flask_restplus import Resource
from app.py_image import app, ns, api
from app.services import imageService
@ns.route('/version')
class Version(Resource):
def get(self):
"""
Version project.
"""
return api.version
@ns.route('/get_image/<id>')
class ImageDefault(Resource):
def get(self, id):
if id == '1':
filename = app.config['DIRETORIO']+'ok.jpg'
else:
filename = app.config['DIRETORIO']+'error.jpg'
return send_file(filename, mimetype='image/jpg')
@ns.route('/findImage/<name>/<codigo>')
class Compact(Resource):
def get(self, name, codigo):
response = imageService.compactImage(name, codigo)
return response
@ns.route('/save/<name>/<codigo>/<base64>')
class Compact(Resource):
def post(self, name, codigo, base64):
response = imageService.imageSave(name, codigo, base64)
return response
| [
"deyvesonn@outlook.com"
] | deyvesonn@outlook.com |
3d70438dff4ccacb8dc111623cddb2e69d1930fc | 3405ec23e8e1b7fbeffcef144b8994da4dd5b227 | /src/gui.py | 314e67a64fb7b02a216dbfc0896f5edc6920e9e5 | [] | no_license | Trekki03/Titration-Analyzer | 684fd582cd40315a3a892f133a7d3b5b927be6bf | 5542ae1a9b910cc4446e87efd024a341554b650a | refs/heads/main | 2023-08-11T04:17:43.448960 | 2021-10-11T07:47:21 | 2021-10-11T07:47:21 | 415,121,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,674 | py | # -*- coding: utf-8 -*-
"""
First created on 10/10/2021
@author: github.com/Trekki03
"""
import tkinter as tk
import matplotlib as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import plotter
import analyzer
import fileHandler
import linearFunction
class Window:
""" class containing all functions for the ui
"""
def __init__(self, root):
"""constructor
Args:
root (return of tk.Tk()): root object of the window
"""
self.root = root
#Init Windows
self.root.title("Titration-Analyzer")
self.root.resizable(False, False)
#Add Vars
self.vSample = 0.0
self.cTiter = 0.0
self.cSample = tk.StringVar()
self.fileUrl = tk.StringVar()
self.csvData = ([], [])
self.fileUrlLable = None
self.sampleVIdentifierLable = None
self.titerCIdentifierLable = None
self.sampleCIdentifierLable = None
self.sampleCLable = None
self.selectFileButton = None
self.calculateButton = None
self.sampleVEntry = None
self.titerCEntry = None
self.chart = None
#Init Vars
self.cSample.set("not calculated yet")
self.fileUrl.set("no file selected")
#create UI elements
self.fileUrlLable = tk.Label(self.root, textvariable=self.fileUrl)
self.sampleVIdentifierLable = tk.Label(self.root, text = "Sample Volume (ml):")
self.titerCIdentifierLable = tk.Label(self.root, text = "Titer Concentration (mol/l):")
self.sampleCIdentifierLable = tk.Label(self.root, text = "Titer Concentration (mol/l):")
self.sampleCLable = tk.Label(self.root, textvariable=self.cSample)
self.selectFileButton = tk.Button(self.root, text = "selectFile", command = self.selectFile)
self.calculateButton = tk.Button(self.root, text = "Calculate", command = self.calculate)
self.sampleVEntry = tk.Entry(self.root, width = 10)
self.titerCEntry = tk.Entry(self.root, width = 10)
self.createUiElementFromFigure(plotter.createEmptyFigure())
#position UI elements
self.fileUrlLable .grid(row=0, column = 0)
self.selectFileButton .grid(row=0, column = 1)
self.sampleVIdentifierLable .grid(row = 1, column = 0)
self.sampleVEntry .grid(row = 1, column = 1)
self.titerCIdentifierLable .grid(row = 2, column = 0)
self.titerCEntry .grid(row = 2, column = 1)
self.calculateButton .grid(row = 3, column = 1)
self.sampleCIdentifierLable .grid(row = 4, column = 0)
self.sampleCLable .grid(row = 4, column = 1)
#chart in createUiElementFromFigure Function
pass
def createUiElementFromFigure(self, figure: plt.figure):
"""turn an matplotlib figure into an ui element and saves it in self.chart
Args:
figure (plt.figure): matplotlib figure
"""
self.chart = FigureCanvasTkAgg(figure, self.root)
self.chart.get_tk_widget().grid(row = 5, column = 0, columnspan=2)
return None
def calculate(self):
self.vSample = float(self.sampleVEntry.get())
self.cTiter = float(self.titerCEntry.get())
if ((self.fileUrl.get != "no file selected") and (self.vSample != 0.0)) and (self.cTiter != 0.0):
pointsOfHighestSlope = analyzer.getPointsOfHighestSlope(self.csvData[0], self.csvData[1])
linFunction = linearFunction.getFunctionBetweenToPoints(pointsOfHighestSlope[0],pointsOfHighestSlope[1], pointsOfHighestSlope[2], pointsOfHighestSlope[3])
zeroPoint = linearFunction.getZeroPoint(linFunction[0], linFunction[1])
highPoint = linearFunction.getXforY(linFunction[0], linFunction[1], self.csvData[1][analyzer.getHighestValueOfArray(self.csvData[0])[0]])
print(highPoint)
linPlot = linearFunction.createFunctionValuesBetweenToXValues(linFunction[0], linFunction[1], zeroPoint, highPoint)
equiPoint = linearFunction.getMiddlePointBetweenTwoPointsOnLinearFunction(pointsOfHighestSlope[0], pointsOfHighestSlope[2], linFunction[0], linFunction[1])
self.cSample.set(analyzer.calculateConcentrationOfSampleSolution(self.cTiter, equiPoint[0], self.vSample))
plot = plotter.createEmptyFigureWithSubplot()
plotter.addDataToSubplot(plot[1], self.csvData[0], self.csvData[1], color = "blue", marker = "x", label = "titration readings")
plotter.addDataToSubplot(plot[1], linPlot[0], linPlot[1], color = "green", linestyle ="dashed", label = "highest slope")
plotter.addDataToSubplot(plot[1], equiPoint[0], equiPoint[1], color = "red", marker ="o", label="equivalence point")
plotter.enableGridForSubplot(plot[1])
plotter.enableLegendForSubplot(plot[1])
self.createUiElementFromFigure(plot[0])
return None
def selectFile(self):
self.fileUrl.set(tk.filedialog.askopenfilename(initialdir = "Desktop", title = "Select Data", filetypes = [("CSV Files", "*.csv")] ))
self.csvData = fileHandler.loadFilesFromCsv(self.fileUrl.get())
return None
| [
"33525119+Trekki03@users.noreply.github.com"
] | 33525119+Trekki03@users.noreply.github.com |
6047a87b91eac506db9f9f35c3187806f051ffc0 | d5a1d87ce0162f0767f557c15a8081473d7e167a | /tools/SGD.py | f648b97e33a0fbfdc080ae269f2dcd5970e756a8 | [
"MIT"
] | permissive | Yonghongwei/reid-strong-baseline | bda4a7a6901e7320b4271e6a5a1dd6edd43c33b1 | 64d8f8c1b8d988a598071bb5ed15fd3fcc200dfb | refs/heads/master | 2022-06-19T10:40:53.092057 | 2020-05-07T08:30:31 | 2020-05-07T08:30:31 | 255,493,153 | 0 | 0 | null | 2020-04-14T02:43:17 | 2020-04-14T02:43:16 | null | UTF-8 | Python | false | false | 13,636 | py | import torch
from torch.optim.optimizer import Optimizer, required
class SGD_GCC(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGD_GCC, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD_GCC, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
#GC operation for Conv layers
if len(list(d_p.data.size()))==4:
d_p.add_(-d_p.mean(dim = 1, keepdim = True).mean(dim = 2, keepdim = True).mean(dim = 3, keepdim = True))
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-group['lr'], d_p)
return loss
class SGD_GC(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGD_GC, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD_GC, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
#GC operation for Conv layers and FC layers
length=len(list(p.data.size()))
if length>1:
m_grad=d_p
for i in range(length-1):
m_grad=m_grad.mean(i+1,keepdim=True)
d_p.add_(-m_grad)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-group['lr'], d_p)
return loss
class SGDW(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGDW, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGDW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
old = torch.clone(p.data).detach()
#if weight_decay != 0:
# d_p.add_(weight_decay, p.data)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-group['lr'], d_p)
if weight_decay != 0:
p.data.add_(-weight_decay*group['lr'], old)
return loss
class SGD_CC(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGD_CC, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD_CC, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-group['lr'], d_p)
weight_size=p.data.size()
length=len(list(p.data.size()))
if length==4:
if weight_size[2]==3:
p.data=p.data.view(weight_size[0],weight_size[1],weight_size[2]*weight_size[3])
p.data[:,:,[0,2,6,8]]=p.data[:,:,[0,2,6,8]].mean(dim=2,keepdim=True).expand_as(p.data[:,:,[0,2,6,8]])
p.data[:,:,[1,3,5,7]]=p.data[:,:,[1,3,5,7]].mean(dim=2,keepdim=True).expand_as(p.data[:,:,[1,3,5,7]])
p.data=p.data.view(weight_size[0],weight_size[1],weight_size[2],weight_size[3])
return loss
class SGDW_GCC(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGDW_GCC, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGDW_GCC, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
old = torch.clone(p.data).detach()
#if weight_decay != 0:
# d_p.add_(weight_decay, p.data)
if len(list(d_p.data.size()))==4:
d_p.add_(-d_p.mean(dim = 1, keepdim = True).mean(dim = 2, keepdim = True).mean(dim = 3, keepdim = True))
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-group['lr'], d_p)
if weight_decay != 0:
p.data.add_(-weight_decay*group['lr'], old)
return loss
| [
"noreply@github.com"
] | noreply@github.com |
1cf60a4eca3f70277691e357f9944a1180adcaf2 | 6e5a095b273497368cc63a8a38ca31324b990555 | /test.py | 0e9ddc06d21f650c472cb71f1f912b924b0f306c | [] | no_license | kalanod/bot | ca94b2651ec9e87fd57481f0f241c781b7c05988 | 4ef398b2c15b997b0f8f6f4530d153bd12b7de68 | refs/heads/master | 2023-04-10T16:01:15.763758 | 2021-05-01T17:20:20 | 2021-05-01T17:20:20 | 360,224,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | import itertools
from random import randint
key1 = itertools.permutations('QFS01LVmOPZ4G', 5)
rand = randint(1, 154440)
a = [i for i in key1]
key1 = "".join(a[rand]) | [
"336633563aA"
] | 336633563aA |
5789a966ed6ae948e41d390b54e40df3e87ec159 | 86e292a80731c6824fa410df41761db6a07939ca | /prodscripts/QuantStudio_hamilton.py | 095084284e732e78af7f0a2611b6bd4994035796 | [
"MIT"
] | permissive | EdinburghGenomics/clarity_scripts | 447b38ed4c4ff62abb65e421cbfbd43c0286035d | bc9c0ab400d4c47d2c2e43519dfe50360cf88dd4 | refs/heads/master | 2022-12-07T06:29:23.073447 | 2022-03-30T16:30:17 | 2022-03-30T16:30:17 | 50,435,598 | 2 | 0 | MIT | 2022-11-22T10:25:22 | 2016-01-26T14:54:00 | Python | UTF-8 | Python | false | false | 3,265 | py | #!/usr/bin/env python
__author__ = 'dcrawford'
from EPPs import glsapiutil
from xml.dom.minidom import parseString
from optparse import OptionParser
api = None
def limslogic():
def writecsv():
content = ['Input Plate, Input Well, Output Plate, Output Well']
for art in sortedArtifacts:
inputContainerName = ConName[ inputMap[ art ][0] ]
inputWell = inputMap[ art ][1]
outputArt = iomap[ art ]
outputContainerName = ConName[ inputMap[ outputArt ][0] ]
outputWell = inputMap[ outputArt ][1]
content.append( ','.join( [ inputContainerName,
inputWell,
outputContainerName,
outputWell ]) )
content = '\n'.join( content )
file = open( options.resultfile , "w" )
file.write( content )
file.close()
# Gather the required Data from LIMS
stepdetailsXML = api.GET( options.stepURI + "/details" )
stepdetails = parseString( stepdetailsXML )
## Create the input output map
iomap = {}
for io in stepdetails.getElementsByTagName( "input-output-map" ):
output = io.getElementsByTagName( "output" )[0]
if output.getAttribute( "type" ) == "Analyte":
input = io.getElementsByTagName( "input" )[0]
iomap[ input.getAttribute( "uri" ) ] = output.getAttribute( "uri" )
artifacts = parseString( api.getArtifacts( list(iomap.keys()) + list(iomap.values())) )
## Map the locations of the artfacts
inputMap = {}
for art in artifacts.getElementsByTagName( "art:artifact" ):
artURI = art.getAttribute( "uri" ).split("?state")[0]
location = art.getElementsByTagName( "container" )[0].getAttribute( "uri" )
well = art.getElementsByTagName( "value" )[0].firstChild.data
inputMap[ artURI ] = [ location, well ]
# Gather the names of the containers
ConName = {}
for c in parseString( api.getContainers( list( set([ c[0] for c in list(inputMap.values()) ])) )).getElementsByTagName("con:container"):
ConName[ c.getAttribute( "uri" ) ] = c.getElementsByTagName("name")[0].firstChild.data
## sort the artifacts by the container first, then the well location
def con_well(x):
w = inputMap[x][1]
if len( inputMap[x][1] ) == 3:
#w = w[:2] + "0" + w[2:] ## row first
w = "0" + w[2:] + w[:2] ## column first
return inputMap[x][0] + w
sortedArtifacts = sorted( list(iomap.keys()), key=con_well)
writecsv()
def setupArguments():
Parser = OptionParser()
Parser.add_option('-u', "--username", action='store', dest='username')
Parser.add_option('-p', "--password", action='store', dest='password')
Parser.add_option('-s', "--stepURI", action='store', dest='stepURI')
Parser.add_option('-r', "--resultfile", action='store', dest='resultfile')
return Parser.parse_args()[0]
def main():
global options
options = setupArguments()
global api
api = glsapiutil.glsapiutil2()
api.setURI( options.stepURI )
api.setup( options.username, options.password )
limslogic()
if __name__ == "__main__":
main()
| [
"timothee.cezard@ed.ac.uk"
] | timothee.cezard@ed.ac.uk |
b8317e4db558cc1098bd224c458d1d8f2c3b1ddf | b22de9d0ca277bf9d12cb8bc847cd3964618746c | /get_movie_board.py | 758a94bd773fffde200e1e07953d42a670c3626c | [] | no_license | Proudgeorge/Web-Crawler | e32ea7327449d6598c13264192eb6933891e234f | 5d515511fead4035a8c727a679eaaeebf403cade | refs/heads/master | 2020-03-31T15:38:11.068390 | 2018-10-10T01:56:47 | 2018-10-10T01:56:47 | 152,344,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,399 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'CZH'
import requests
import re
import pprint
import json
import time
from requests.exceptions import RequestException
'''
作者:陈自豪
功能:抓取猫眼上评分前100的电影名称、时间、评分、图片等信息
版本:1.0
日期:09/10/2018
'''
def get_one_page(url):
'''
抓取一页的信息
:param url:
:return:
'''
try:
headers = {
'User-Agent': 'Mozilla/5.0(Macintosh;Intel Mac OS X 10_11_4)AppleWebKit/537.76(KHTML,like Gecko)'
'Chrome/52.0.2743.116 Safari/537.36'
}
response = requests.get(url,headers=headers)
if response.status_code == 200:
return response.text
return None
except RequestException:
return None
def parse_one_page(html):
'''
利用正则化表达式获取电影排名、图片、电影名、主演、上映时间、评分等信息
:param html:
:return:
'''
pattern = re.compile(
'<dd>.*?board-index.*?>(.*?)</i>.*?data-src="(.*?)".*?name.*?a.*?>(.*?)</a>.*?star.*?>(.*?)</p>'
'.*?releasetime.*?>(.*?)</p>.*?integer.*?>(.*?)</i>.*?fraction.*?>(.*?)</i>.*?</dd>',re.S)
items = re.findall(pattern,html)
for item in items:
yield {
'index':item[0],
'image':item[1],
'title':item[2].strip(),
'actor':item[3].strip()[3:] if len(item[3]) > 3 else '',
'time':item[4].strip()[5:] if len(item[4]) > 5 else '',
'score':item[5].strip()+item[6].strip()
}
# pprint.pprint(items)
def write_to_file(content):
'''
将爬取的内容整理好后写入文件
:param content:
:return:
'''
with open('movie_board.txt','a',encoding='utf_8') as f:
print(type(json.dumps(content)))
f.write(json.dumps(content,ensure_ascii=False)+'\n')
def main(offset):
url = 'http://maoyan.com/board/4?offset='+str(offset)
html = get_one_page(url)
# print(html)
parse_one_page(html)
for item in parse_one_page(html):
print(item)
write_to_file(item)
if __name__ == '__main__':
for i in range(10):
main(offset=i*10)
time.sleep(1) #为了应对反爬虫设置延时等待
| [
"noreply@github.com"
] | noreply@github.com |
93b8938a179dfae461fe0665037e5001a40af79d | db9a37dd50b90eb1c910ddf4f9cf97baa2cdb1b8 | /old/vis_html/vis.py | d6e738426b1a2a31dc8d874e30b49e5cbd0b2ce4 | [] | no_license | turbinenreiter/edurocket | f245767645d8187d8a8a1ea2864138affa925278 | b5088ff9a1835f9960ee8869193898322f7ebc21 | refs/heads/master | 2021-01-19T06:35:45.973124 | 2018-06-10T14:12:09 | 2018-06-10T14:12:09 | 25,205,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,681 | py | import matplotlib as mpl
#mpl.use('Agg') #'WebAgg'
mpl.rcParams.update({'font.size': 12})
import numpy as np
import matplotlib.pyplot as plt
import mpld3
import sys
def main(filename):
# read file
data = np.genfromtxt(filename,delimiter=';',names=True)
t = data['t_start']-data['t_start'][0]
uax, uay, uaz = [a for a in (data['uax'],data['uay'],data['uaz'])]
h = data['h']
trigger = data['trigger']
# analyze
# max altitude
dh_max = max(h)-min(h)
t_hmax_i = h.argmax()
t_hmax = t[t_hmax_i]
# parachute trigger
t_trigger_i = np.where(trigger==255.)[0][0]
t_trigger = t[t_trigger_i]
# plot
fig, ((accel, uaccel), (altitude, gyro)) = plt.subplots(nrows=2, ncols=2, sharex=True)
fig.set_facecolor('white')
# These are the "Tableau 20" colors
c = {}
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
colornames = ['blue','lblue','orange','lorange','green','lgreen','red','lred',
'purple','lpurple','brown','lbrown','pink','lpink','grey','lgrey',
'seaweed','lseaweed','turqois','lturqois']
for i in range(len(tableau20)):
r, g, b = tableau20[i]
c[colornames[i]] = (r / 255., g / 255., b / 255.)
# accel
accel.set_title('accelerations', fontsize=22)
accel.plot(t, data['ax'], color=c['lblue'])
accel.plot(t, data['ay'], color=c['lred'])
accel.plot(t, -data['az'], color=c['green'])
accel.legend(['ax','ay','az'], frameon=False)
accel.spines['top'].set_visible(False)
accel.spines['right'].set_visible(False)
accel.spines['bottom'].set_visible(False)
accel.tick_params(bottom='off', top='off', right='off')
accel.set_ylabel('g', rotation='horizontal')
accel.plot(t_trigger, -data['az'][t_trigger_i], color=c['red'], marker='o')
accel.axhline(color='k')
accel.axvline(t_hmax, color=c['lturqois'])
# uaccel
uaccel.set_title('uaccelerations', fontsize=22)
uaccel.plot(t, uax, color=c['lblue'])
uaccel.plot(t, uay, color=c['lred'])
uaccel.plot(t, uaz, color=c['green'])
uaccel.legend(['uax','uay','uaz'], frameon=False)
uaccel.yaxis.tick_right()
uaccel.yaxis.set_label_position("right")
uaccel.spines['top'].set_visible(False)
uaccel.spines['left'].set_visible(False)
uaccel.spines['bottom'].set_visible(False)
uaccel.tick_params(bottom='off', top='off', right='off')
uaccel.set_ylabel('g', rotation='horizontal')
uaccel.plot(t_trigger, uaz[t_trigger_i], color=c['red'], marker='o')
uaccel.axhline(color='k')
uaccel.axvline(t_hmax, color=c['lturqois'])
# altitude
altitude.set_title('altitude', fontsize=22)
altitude.plot(t, h, color=c['blue'])
altitude.legend(['h'], frameon=False)
altitude.spines['top'].set_visible(False)
altitude.spines['right'].set_visible(False)
altitude.spines['bottom'].set_visible(False)
altitude.tick_params(bottom='off', top='off', right='off')
altitude.set_ylabel('m', rotation='horizontal')
altitude.set_xlabel('s')
altitude.plot(t_trigger, h[t_trigger_i], color=c['red'], marker='o')
altitude.axhline(min(h), color='k')
altitude.axvline(t_hmax, color=c['lturqois'])
# gyro
gyro.set_title('angular acceleration', fontsize=22)
gyro.plot(t, data['gx'], color=c['blue'])
gyro.plot(t, data['gy'], color=c['lred'])
gyro.plot(t, data['gz'], color=c['green'])
gyro.legend(['gx', 'gy', 'gz'], frameon=False)
gyro.yaxis.tick_right()
gyro.yaxis.set_label_position("right")
gyro.spines['top'].set_visible(False)
gyro.spines['left'].set_visible(False)
gyro.spines['bottom'].set_visible(False)
gyro.tick_params(bottom='off', top='off', right='off')
gyro.set_ylabel('Hz/s', rotation='horizontal')
gyro.set_xlabel('s')
gyro.plot(t_trigger, data['gz'][t_trigger_i], color=c['red'], marker='o')
gyro.axhline(color='k')
gyro.axvline(t_hmax, color=c['lturqois'])
# savefig config
sc = 1
fig.set_size_inches(16*sc,9*sc)
plt.tight_layout()
mpld3.save_html(fig, 'fig10.html')
# plt.savefig(filename[:-4]+'.png', dpi=120, format='png', transparent=False, frameon=False)
# plt.show()
if __name__ == "__main__":
import convert
convert.main(sys.argv[1])
main(sys.argv[1][:-4]+'.csv')
| [
"oepse@gmail.com"
] | oepse@gmail.com |
ec349dee68d4714e67b1fc1faef499204a9b868f | 481f4d9c2265360fff330716ef6f2c0daed86740 | /couleur_aleatoire.py | b2d2e65649dabb2e1f8eb6a48849b0347fbc6f68 | [] | no_license | lucas-thong/NSI-project-1 | d1513b7ade96b2085d88a188234927d7e1537b72 | 80089c6cab4e47ef1a7eb7df90bd0029008210ad | refs/heads/main | 2022-12-25T14:52:49.670581 | 2020-10-12T16:31:01 | 2020-10-12T16:31:01 | 303,449,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | import turtle
from random import randint
def couleur_aleatoire():
'''
renvoie un triplet de 3 nombres entier compris entre 0 et 255
Ce triplet correspond à une couleur codée en RVB
'''
pass | [
"noreply@github.com"
] | noreply@github.com |
72e1e11b35929352a9b8fb4845e93cb148b0df76 | d7d7108dc922c927650b68a65c857d64911c9882 | /eclipse_workspace/DownLoad/src/default/postform.py | 4c6a59eb291e00a0ca600b9fb00a21b288c397dd | [] | no_license | yzqcode/MyWorkSpace | adf310b3d367892e2ec2793abe07948e8ada2129 | 40d659e8e1549fbad8e84497b30f9894d298fc2d | refs/heads/master | 2021-01-18T20:56:10.721277 | 2016-10-14T01:21:10 | 2016-10-14T01:21:10 | 69,458,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,066 | py | # -*- coding: utf-8 -*-
'''
Created on 2016-09-27
@author: 鄢真强
'''
import urllib.request
from bs4 import BeautifulSoup
import re
import math
import os
counter = 1
url = "http://disclosure.szse.cn/m/search0425.jsp"
def down(link):
global counter
url = 'http://disclosure.szse.cn/'
url = url + link
print("完整下载链接:"+url)
urllib.request.urlretrieve(url,'./files/'+str(counter)+".pdf")
print('已下载'+str(counter)+'个文件')
counter = counter+1
def post(starttime,endtime):
postdata = urllib.parse.urlencode({'noticeType': '010301', 'startTime': starttime, 'endTime':endtime})
postdata = postdata.encode('gb2312')
res = urllib.request.urlopen(url,postdata)
data = res.read().decode('gb2312')
soup = BeautifulSoup(data,"html.parser")
if None != soup.find(text="没有找到你搜索的公告!"):
print("查询时间段内无报告")
return
pattern = r'(pdf|PDF)'
herflist = soup.find_all('a')
if herflist != []:
for link in herflist:
tmp = link.get('href')
result = re.search(pattern, tmp)
if result:
print(tmp)
down(tmp)
if __name__ == '__main__':
if os.path.exists('files')== False:
os.mkdir('files')
for year in range(2001,2016):
for month in range(1,12):
if month<=9:
startmonth = '0'+str(month)
else:
startmonth = str(month)
if month+1<=9:
endmonth = '0'+str(month+1)
else:
endmonth = str(month+1)
if endmonth == '12':
post(str(year)+'-'+endmonth+'-01',str(year)+'-'+endmonth+'-30')
starttime = str(year)+'-'+startmonth+'-01'
endtime = str(year)+'-'+endmonth+'-01'
print('starttime='+starttime+'\tendtime='+endtime)
post(starttime,endtime)
| [
"鄢真强"
] | 鄢真强 |
1e0b321bd32462e3429bc13aa5cf1eaa4183b6d8 | 05d7ffbde61f195e9ac73f3a73d880891a01bda6 | /shop_config.py | 83c7ab2c706f468639800cd2d495eec949b8fb85 | [] | no_license | m0n1ter/wxxq | 6d09d11022e3fc486e4193a6c6130a513db15e39 | d3fa3362cdbb48e59ff9a866158e108d3c6f1895 | refs/heads/master | 2020-04-07T03:26:56.463869 | 2018-03-14T13:54:23 | 2018-03-14T13:54:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | shop_list = [13069819, 13077683, 13079433, 13082387, 14065620, 14830235, 16438854, 18339733, 1865890, 1917598, 19281319, 19419483, 1999657, 2050147, 2245387, 2471101, 2716252, 3024541, 44397898, 44783109, 45075662, 5144657, 59061381, 9495359, 9873641, 9877320]
new_shop_list = [] | [
"chey"
] | chey |
7d26e894c9f765bef6b532e768050fc4c9df08c5 | a73ac0f5ae1f0ac543f0ffe9facd6861a435dc67 | /ex38.py | db5382f446dbcefdfb3d4cbea255f5f3e1843e97 | [] | no_license | suboqi/-python | 438e87bd440cc5b05cf7f884d6f08a46a2b346af | f8844c9dd81e0b9788089d420858891a460e2f39 | refs/heads/master | 2020-05-31T09:13:21.432226 | 2019-06-04T13:30:35 | 2019-06-04T13:30:35 | 190,207,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,993 | py | # batch_file_rename.py
# Created: 6th August 2012
"""
This will batch rename a group of files in a given directory,
once you pass the current and new extensions
一旦传递了当前和新的扩展,此批处理将重命名给定目录中的一组文件。
"""
# just checking
__author__ = 'Craig Richards'
__version__ = '1.0'
#导入os模块
import os
#导入argparse模块
import argparse
def batch_rename(work_dir, old_ext, new_ext):
"""
This will batch rename a group of files in a given directory,
once you pass the current and new extensions
一旦传递了当前和新的扩展,此批处理将重命名给定目录中的一组文件。
"""
# files = os.listdir(work_dir)
for filename in os.listdir(work_dir):
# Get the file extension
# 获取文件扩展名
split_file = os.path.splitext(filename)
file_ext = split_file[1]
# Start of the logic to check the file extensions, if old_ext = file_ext
# 开始检查文件扩展名的逻辑,if old_ext = file_ext
if old_ext == file_ext:
# Returns changed name of the file with new extention
newfile = split_file[0] + new_ext
# Write the files
# 写文件
# print(os.rename('c.txt','a.txt'))#修改文件名称
os.rename(
os.path.join(work_dir, filename),
os.path.join(work_dir, newfile)
)
def get_parser():
parser = argparse.ArgumentParser(description='change extension of files in a working directory')
parser.add_argument('work_dir', metavar='WORK_DIR', type=str, nargs=1,help='the directory where to change extension')
parser.add_argument('old_ext', metavar='OLD_EXT', type=str, nargs=1, help='old extension')
parser.add_argument('new_ext', metavar='NEW_EXT', type=str, nargs=1, help='new extension')
return parser
def main():
#This will be called if the script is directly invoked.
#如果直接调用脚本,将调用此函数
# adding command line argument
# 添加命令行参数
parser = get_parser()
args = vars(parser.parse_args())
# Set the variable work_dir with the first argument passed
# 用传递的第一个参数设置变量new_ext
work_dir = args['work_dir'][0]
# Set the variable old_ext with the second argument passed
# 用传递的第二个参数设置变量new_ext
old_ext = args['old_ext'][0]
if old_ext[0] != '.':
old_ext = '.' + old_ext
# Set the variable new_ext with the third argument passed
# 用传递的第三个参数设置变量new_ext
new_ext = args['new_ext'][0]
if new_ext[0] != '.':
new_ext = '.' + new_ext
batch_rename(work_dir, old_ext, new_ext)
#当.py文件被直接运行时,if __name__ ==__main__'之下的代码块将被运行;当.py文件以模块形式被导入时,if __name__ == '__main__'之下的代码块不被运行。
if __name__ == '__main__':
main() | [
"1622653732@qq.com"
] | 1622653732@qq.com |
e883556af74768d33c9d085307796931163d4c7f | 0cf64c67a3eede0d16e15a527a58bc7515659b3e | /Zadania 2/2.19.py | 229c1a5f88a9917d1febe73c295db8de3dd5025c | [] | no_license | StaporBartlomiej/Python | 14cf7dbf3a84fcd3217bc983b6f509571153a6c3 | 2d6922952e02bbe7285feca20f6444e2660fcc51 | refs/heads/master | 2021-01-12T13:28:44.520583 | 2017-01-27T17:41:07 | 2017-01-27T17:41:07 | 69,954,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | #!/usr/bin/python
# -*- coding: iso-8859-2 -*-
L = ["1","2","3","21","22","23","301","302","303"]
for i in range(len(L)):
print (L[i].zfill(3))
| [
"bartlomiej.stapor@gmail.com"
] | bartlomiej.stapor@gmail.com |
3e5b4e6f763a26884b9e42e10d66924e9ab53b82 | 959f70ef067d446b159a5a69f839afff453d0f49 | /Datasets/circRNA-RBP/FOX2/code.txt | bff8b18440e4ebeae769e8c905d5bacd7999b995 | [] | no_license | houzl3416/iCircRBP-DHN | db2ecded61240dd75c53ffd401eb57b2d168d8b3 | f1b4836aa4f1ffa4055d9cea51cb56d521ba14a2 | refs/heads/master | 2023-03-29T15:24:37.182020 | 2021-03-22T12:49:30 | 2021-03-22T12:49:30 | 290,127,792 | 6 | 2 | null | null | null | null | GB18030 | Python | false | false | 5,691 | txt | #!/usr/bin/env python
# encoding: utf-8
import keras.backend as K
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from sklearn.metrics import roc_curve, auc, roc_auc_score
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Bidirectional, Flatten, GRU, Activation
from keras.layers import Convolution1D, MaxPooling1D, AveragePooling1D
from keras.layers.embeddings import Embedding
from keras.callbacks import ModelCheckpoint, EarlyStopping
from sklearn import metrics
# from ene import reshape_input,Twoclassfy_evalu, avg
from keras.optimizers import Adam, SGD, RMSprop,Adagrad
from keras.models import load_model
import random
from keras import regularizers
import heapq
import math
import scipy.io as sio
from propre import Twoclassfy_evalu, avg
from sklearn.metrics import accuracy_score
def cut_kmer(seq, k, stride):
kmer = []
seq = seq.lower()
l = len(seq)
for i in range(0, l, stride):
if i + k >= l + 1:
break
else:
kmer.append(seq[i:i + k])
return kmer
embdim = 100
num_words = 20000
def tokenize_seqs(texts, num_words):
lens = [len(str(line).strip()) for line in texts]
max_1 = heapq.nlargest(1, lens)
max_len = int(max_1[0])
tokenizer = Tokenizer(num_words=num_words)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
X = pad_sequences(sequences, maxlen=max_len)
kmer_index = tokenizer.word_index
return X, kmer_index, max_len
def generate_EMatrix(embedim, kmer_index, word2vec):
embedding_dim = int(embedim)
nb_words = min(num_words, len(kmer_index))
kmer2vec = {}
with open(word2vec) as f:
for line in f:
values = line.split()
try:
kmer = values[0]
coefs = np.asarray(values[1:], dtype='float32')
kmer2vec[kmer] = coefs
except:
pass # pass代表什么也不做
embedding_matrix = np.zeros((nb_words + 1, embedding_dim))
for kmer, i in kmer_index.items():
if i > num_words:
continue
vector = kmer2vec.get(kmer)
if vector is not None:
embedding_matrix[i] = vector
return embedding_matrix, nb_words,
def deal_with_data(protein,num_words):
dataX = []
dataY = []
with open('F:/BY/DUT/2019_1/deepnet/circ-rna/CRIP-master/dataset/'+protein+'/positive') as f:
for line in f:
if '>' not in line:
dataX.append(cut_kmer(line.strip(), 6, 1))
dataY.append(1)
with open('F:/BY/DUT/2019_1/deepnet/circ-rna/CRIP-master/dataset/'+protein+'/negative') as f:
for line in f:
if '>' not in line:
dataX.append(cut_kmer(line.strip(), 6, 1))
dataY.append(0)
dataX, kmer_index, max_len = tokenize_seqs(dataX, num_words)
dataX = np.array(dataX)
dataY = np.array(dataY)
train_X, test_X, train_y, test_y = train_test_split(dataX,dataY,shuffle=True,test_size=0.2,stratify=dataY)
return train_X, test_X, train_y, test_y, kmer_index, max_len # lens是一个列表,记录每条序列的分词数
trainXeval, test_X, trainYeval, test_y, kmer_index, max_len = deal_with_data('AGO3',num_words)
embedding_matrix, nb_words = generate_EMatrix(embdim, kmer_index, 'F:/BY/DUT/2019_1/deepnet/circ-rna/word2vec/circ_61.vector')
# test_y = test_y[:, 1]
kf = StratifiedKFold(n_splits=5,shuffle=True)
aucs = []
kaccs = []
klosss = []
for train_index, eval_index in kf.split(trainXeval, trainYeval):
train_X = trainXeval[train_index]
train_y = trainYeval[train_index]
eval_X = trainXeval[eval_index]
eval_y = trainYeval[eval_index]
print('configure cnn network')
model = Sequential()
model.add(Embedding(nb_words + 1, embdim, weights=[embedding_matrix], input_length=max_len, trainable=True))
model.add(Convolution1D(filters=100, kernel_size=5, padding='same', activation='relu', strides=1))
model.add(MaxPooling1D(pool_size=5))
model.add(Dropout(0.3))
model.add(Convolution1D(filters=100, kernel_size=4, padding='same', activation='relu', strides=1))
model.add(MaxPooling1D(pool_size=4))
model.add(Dropout(0.3))
model.add(Bidirectional(LSTM(100,activation='relu',dropout=0.5,recurrent_dropout=0.3))) #recurrent_dropout=0.1
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1))
model.add(Activation('sigmoid'))
# sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # Adam(lr=1e-4)
print('model training')
checkpointer = ModelCheckpoint(filepath='model.h5', verbose=1,
save_best_only=True)
earlystopper = EarlyStopping(monitor='val_loss', patience=6, verbose=1)
model.fit(train_X, train_y, batch_size=100, epochs=30, verbose=1, validation_data=(eval_X, eval_y), shuffle=True,
callbacks=[checkpointer, earlystopper])
Model = load_model('model.h5')
predictions = Model.predict_proba(test_X)
auc = roc_auc_score(test_y,predictions)
print('auc',auc)
aucs.append(auc)
kloss,kacc = Model.evaluate(test_X, test_y)
kaccs.append(kacc)
klosss.append(kloss)
print('acc',kacc)
print('loss',kloss)
print('mean_auc', np.mean(aucs))
print('keras_acc',np.mean(kaccs))
print('keras_loss',np.mean(klosss))
| [
"33145210+542121317@users.noreply.github.com"
] | 33145210+542121317@users.noreply.github.com |
b5ed6dd488d878ccd57ad40c55431222f153790d | 13e21198448cd5839d9b5cdba0c62ea4c3b36314 | /basin3d/core/connection.py | 7fffa230e1fe83b836664940e6262b6a0f78abeb | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause-LBNL"
] | permissive | heycatwonton/basin3d | 2f9392a4e205ce427ab1d7581976908b2848c7f9 | 980a52fa8d6c073cd6704950cfd4168c7f4b0f61 | refs/heads/main | 2023-04-12T01:48:25.308923 | 2021-04-23T16:48:02 | 2021-04-23T16:48:02 | 351,609,042 | 0 | 0 | NOASSERTION | 2021-03-26T00:40:15 | 2021-03-25T23:57:17 | Python | UTF-8 | Python | false | false | 13,833 | py | """
`basin3d.core.connection`
************************
.. currentmodule:: basin3d.core.connection
:platform: Unix, Mac
:synopsis: BASIN-3D ``DataSource`` connection classes
:module author: Val Hendrix <vhendrix@lbl.gov>
:module author: Danielle Svehla Christianson <dschristianson@lbl.gov>
.. contents:: Contents
:local:
:backlinks: top
"""
import requests
import yaml
import logging
from basin3d.core.access import get_url, post_url
logger = logging.getLogger(__name__)
class HTTPConnectionDataSource(object):
"""
Class for handling Authentication and authorization of
:class:`basin3d.models.DataSource` over HTTP
:param datasource: the datasource to authenticate and authorize via HTTP
:type datasource: :class:`basin3d.models.DataSource` instance
"""
def __init__(self, datasource, *args, credentials=None, verify_ssl=False, **kwargs):
self.datasource = datasource
self.credentials = credentials
self.verify_ssl = verify_ssl
def login(self):
"""
Login to the :class:`basin3d.models.DataSource`
:return: JSON response
:rtype: dict
"""
raise NotImplementedError
def logout(self):
"""
Login out of the :class:`basin3d.models.DataSource`
:return: None
"""
raise NotImplementedError
def get(self, url_part, params=None, headers=None):
"""
The resources at the spedicfied url
:param url_part:
:param params:
:param headers:
:return:
"""
raise NotImplementedError
def post(self, url_part, params=None, headers=None):
"""
The resources at the spedicfied url
:param url_part:
:param params:
:param headers:
:return:
"""
raise NotImplementedError
@classmethod
def get_credentials_format(cls):
"""
This returnes the format that the credentials are stored in the DB
:return: The format for the credentials
"""
raise NotImplementedError
class InvalidOrMissingCredentials(Exception):
"""The credentials are invalid or missing"""
pass
class HTTPConnectionOAuth2(HTTPConnectionDataSource):
"""
Class for handling Authentication and authorization of
:class:`basin3d.models.DataSource` over HTTP with OAuth2
:param datasource: the datasource to authenticate and authorize via HTTP
:type datasource: :class:`basin3d.models.DataSource` instance
:param auth_token_path: The url part for requesting a token
:param revoke_token_path: The url part for revoking a valid token
:param auth_scope: The scope of the token being requested (e.g read, write, group)
:param grant_type: The type of oauth2 grant (e.g client_credentials, password,
refresh_token, authorization_code)
"""
CREDENTIALS_FORMAT = 'client_id:\nclient_secret:\n'
def __init__(self, datasource, *args, auth_token_path="o/token/",
revoke_token_path="o/revoke_token/", auth_scope="read",
grant_type="client_credentials",
**kwargs):
super(HTTPConnectionOAuth2, self).__init__(datasource, *args, **kwargs)
self.token = None
self.auth_token_path = auth_token_path
self.revoke_token_path = revoke_token_path
self.auth_scope = auth_scope
self.grant_type = grant_type
self.client_id, self.client_secret = self._load_credentials(datasource)
def _validate_credentials(self):
"""
Validate the Data Source credentials
:return: TRUE if the credentials are valid
:rtype: boolean
"""
# There should be a client_id and client secret
return "client_id" in self.credentials.keys() and "client_secret" in self.credentials.keys() \
and self.credentials["client_id"] and self.credentials["client_secret"]
def _load_credentials(self, datasource):
"""
Get the credentials from Data Source. If the
credentials are invalid `None` is returned.
:param datasource: The datasource object
:type datasource: :class:`basin3d.models.DataSource`
:return: tuple of client_id and client_secret
:rtype: tuple
"""
self.credentials = datasource.credentials # Access the credentials
# If there are credentials then make the api call
if self.credentials:
self.credentials = yaml.load(self.credentials)
if self._validate_credentials():
return self.credentials["client_id"], self.credentials["client_secret"]
raise InvalidOrMissingCredentials("client_id and client_secret are missing or invalid")
def login(self):
"""
Get a token
OAuth Client credentials (client_id, client_secret) stored in the
DataSource.
- *Url:* https://:class:`basin3d.models.DataSource.location`<auth_token_path>
- *Scope:* <token_scope>
- *Grant Type:* <grant_type>
- *Client Id:* stored in encrypted :class:`basin3d.models.DataSource` field
- *Client Secret:* stored in encrypted :class:`basin3d.models.DataSource` field
Example JSON Response::
{
"access_token": "<your_access_token>",
"token_type": "Bearer",
"expires_in": 36000,
"refresh_token": "<your_refresh_token>",
"scope": "read"
}
"""
# Build the authentication url
url = '{}{}'.format(self.datasource.location, self.auth_token_path)
try:
# Login to the Data Source
res = requests.post(url, params={"scope": self.auth_scope, "grant_type": self.grant_type},
auth=(self.client_id, self.client_secret),
verify=self.verify_ssl)
# Validate the response
if res.status_code != requests.codes.ok:
logger.error("Authentication error {}: {}".format(url, res.content))
return None
# Get the JSON content (This has the token)
result_json = res.json()
self.token = result_json
except Exception as e:
logger.error("Authentication error {}: {}".format(url, e))
# TODO: create exception for this
# Access is denied!!
raise Exception("AccessDenied")
def get(self, url_part, params=None, headers=None):
"""
Login Data Source if not already logged in.
Access url with the Authorization header and the access token
Authorization Header:
- Authorization": "{token_type} {access_token}
:param url_part: The url part to request
:param params: additional parameters for the request
:type params: dict
:param headers: request headers
:return: None
:raises: PermissionDenied
"""
self._validate_token()
# Prepare the Authorization header
auth_headers = {"Authorization": "{token_type} {access_token}".format(**self.token)}
if headers:
auth_headers.update(headers)
return get_url(url_part, params=params, headers=auth_headers, verify=self.verify_ssl)
def post(self, url_part, params=None, headers=None):
"""
Login Data Source if not already logged in.
Access url with the Authorization header and the access token
Authorization Header:
- Authorization": "{token_type} {access_token}
:param url_part: The url part to request
:param params: additional parameters for the request
:type params: dict
:param headers: request headers
:return: None
:raises: PermissionDenied
"""
self._validate_token()
# Prepare the Authorization header
auth_headers = {"Authorization": "{token_type} {access_token}".format(**self.token)}
if headers:
auth_headers.update(headers)
return post_url(url_part, params=params, headers=auth_headers, verify=self.verify_ssl)
def logout(self):
"""
Revokes atoken
:param token: The current Token
:return: None
"""
# Build the authentication url for revoking the token
url = '{}{}'.format(self.datasource.location, self.revoke_token_path)
# Request the token to be revoked
if self.token:
res = requests.post(url, params={"token": self.token["access_token"],
"client_id": self.client_id},
auth=(self.client_id, self.client_secret),
verify=self.verify_ssl)
# Validate the success of the token revocation
if res.status_code != 200:
logger.warning("Problem encountered revoking token for '{}' HTTP status {} -- {}".format(
self.datasource.name,
res.status_code, res.content.decode('utf-8')))
def _validate_token(self):
"""
Validate the connection token
:return:
"""
if not self.token:
self.login()
if not self.token:
# TODO: create exception for this
# Access is denied!!
raise Exception("AccessDenied")
class HTTPConnectionTokenAuth(HTTPConnectionDataSource):
"""
Class for handling Authentication and authorization of
:class:`basin3d.models.DataSource` over HTTP with Tokens
:param datasource: the datasource to authenticate and authorize via HTTP
:type datasource: :class:`basin3d.models.DataSource` instance
:param login_path: The url part for requesting a token
"""
CREDENTIALS_FORMAT = 'username: \npassword: \n'
def __init__(self, datasource, login_path='api/login'):
"""
Initialize HTTPTokenAuth
:param datasource:
:param login_path:
"""
super(HTTPConnectionTokenAuth, self).__init__(datasource)
self.userpass = self._load_credentials()
self.login_path = login_path
def _load_credentials(self):
"""
Load the credentials
:return:
"""
credentials = self.datasource.credentials # Access the credentials
self.credentials = yaml.load(credentials)
# If there are credentials then get the monitoring features
if self._validate_credentials(self.credentials):
return self.credentials
raise InvalidOrMissingCredentials(
f'Invalid or Missing Credentials - Data Source: {self.datasource.name}')
@staticmethod
def _validate_credentials(credentials):
"""
Validate the credentials
:param credentials:
:return:
"""
return credentials and "username" in credentials.keys() and "password" in credentials.keys() \
and credentials["username"] and credentials["password"]
def login(self):
"""
Get a Token
:return: JSON response
:rtype: dict
"""
try:
res = requests.get(f'{self.datasource.location}{self.login_path}',
params=self.userpass, verify=self.verify_ssl)
result_json = res.json()
# logger.debug(result_json)
if result_json[0] == 0:
return result_json[1]['token']
elif '{' in result_json[0]:
return result_json
else:
raise InvalidOrMissingCredentials(
f'Datasource \'{self.datasource.name}\' Error ({self.login_path}): {result_json}')
except InvalidOrMissingCredentials as imc:
raise imc
except Exception as e:
raise InvalidOrMissingCredentials(
f'Datasource \'{self.datasource.name}\' Error ({self.login_path}): {e}')
def logout(self):
pass
def _submit_url(self, url_part, params=None, headers=None, request_method=get_url):
"""
Interact with the datasource API
:param url_part:
:param params:
:param headers:
:param request_method:
:return:
"""
token_params = [('token', self.login())]
if params:
# loop through parameters
for item in params.items():
# separate lists into tuples
if isinstance(item[1], list):
for v in item[1]:
token_params.append((item[0], v))
else:
token_params.append(item)
# Check if the url_part contains the datasource location
url = url_part
if not url_part.startswith(self.datasource.location):
url = f'{self.datasource.location}{url_part}'
return request_method(url, params=token_params,
headers=headers, verify=self.verify_ssl)
def get(self, url_part, params=None, headers=None):
"""
Get the url
:param url_part: relative location of the requeste URL
:param params: The query parameters
:type params: dict or list of 2-tuples
:param headers:
:return:
"""
return self._submit_url(url_part, params, headers)
def post(self, url_part, params=None, headers=None):
"""
Post to the url
:param url_part: relative location of the requeste URL
:param params: The query parameters
:type params: dict or list of 2-tuples
:param headers:
:return:
"""
return self._submit_url(url_part, params, headers, post_url)
| [
"vchendrix@lbl.gov"
] | vchendrix@lbl.gov |
375457d210d4b2f2727061290fd2565a8958df81 | f1c133caa0d4113589040cfd01e4bafe82ae84fa | /210327/semain_가장 큰 수.py | 7df93d290ded70d882ae71009424558029b467bf | [] | no_license | amane-IT/CodingTest | d5c9d4be9ec88bc9a4bf972b764576631b623242 | 83ce192aceb7ad8626de5a28ef535b3f1ebe1e88 | refs/heads/main | 2023-08-11T18:53:03.984487 | 2021-09-19T09:30:23 | 2021-09-19T09:30:23 | 412,737,107 | 1 | 0 | null | 2021-10-02T08:28:46 | 2021-10-02T08:28:45 | null | UTF-8 | Python | false | false | 1,007 | py | def solution(numbers):
numbers = list(map(str, numbers))
numbers.sort(key=lambda x:x*3, reverse=True)
return str(int(''.join(numbers)))
"""
문제 풀이
힌트: numbers의 원소는 0 이상 1,000 이하
*모든 조합을 구하려고 할 때 시간 복잡도는 (O(numbers)!)
==> 1초를 넘어감
==> 정렬과 탐색은 nlogn의 시간 복잡도를 기준으로 잡아야 함
예시) numbers = [6, 10, 2]
1. 각 문자열을 반복함
numbers = ['6(66)', '10(1010)', '2(22)']
2. 문자열을 기준으로 역순으로 정렬
numbers = ['6(66)', '2(22)', '10(1010)']
(뒤가 뭐가 나오든 앞 자리가 큰 순으로)
(혹은 3자리씩 잘라서 큰 순서대로 정렬)
3. 문자열 리스트를 서로 합침
numbers = ['6210']
고려해야 하는 케이스)
[0, 0, 0, 0]의 경우 '0000'이 아니라 '0'이 나와야 한다.
기억해야 하는 함수 사용법
sort(key = lamda x : 수식 x, reverse = True)
"""
| [
"noreply@github.com"
] | noreply@github.com |
629f5f19bfaed88e1da50cd8780de17afb76811f | 57e649df1e635ab5d7ab4b31caef10ea1d829691 | /DBA/wsgi.py | 36ced47b75f4185978846ca828f12b5d8151b5d2 | [] | no_license | l985654987/hello_world | ed11d2df659294d6de8850fbaedc9c53da9ab74a | bbb58fef5f5416d5146f43251af246709e58a224 | refs/heads/master | 2020-04-11T12:13:00.262471 | 2019-04-20T08:21:20 | 2019-04-20T08:21:20 | 161,773,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | """
WSGI config for DBA project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DBA.settings')
application = get_wsgi_application()
| [
"985654987@qq.com"
] | 985654987@qq.com |
31dfa1ea8131220d682fa65569dac25250d32b06 | 832368f4a10376e3a0172bad1cedc5c71807bf32 | /PyPollHomeworkPart2.py | 8235c0588e4f3333f67f644c5fd98a62a056b562 | [] | no_license | elambrano/Python-Challenge-Part-2 | 73023a1e8fbf8c4bdb80dd413d6e58487e9e4c24 | 2201fd8f67564e9df3fe56e088eca61b18ec1b2b | refs/heads/master | 2020-04-13T03:04:05.711533 | 2018-12-23T20:10:20 | 2018-12-23T20:10:20 | 162,920,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py | fn = 'Resources/election_data.csv'
f = open (fn)
contents = f.read()
f.close()
lines = contents.split('\n')
print('Length =', len(lines))
for line in lines[0:10]:
print(line)
lines = lines[1:]
# drop last line if it's empty
if lines[-1]=="":
lines.pop()
#Question 1 (total Votes)
total_votes = len(lines)
print(total_votes)
#Question 2
election_dictionary={}
for line in lines:
items = line.split(',')
candidate = items [2]
if candidate in election_dictionary:
election_dictionary [candidate] +=1
else:
election_dictionary [candidate] = 1
candidates_with_votes = list (election_dictionary.keys())
print(candidates_with_votes)
#Question 3&4 percentage & number of votes
election_results = []
for candidate,count in election_dictionary.items():
percentage_votes = count/total_votes*100
candidate_data = (count,percentage_votes,candidate)
election_results.append(candidate_data)
sorted_results = sorted(election_results,reverse = True)
print(sorted_results)
#Question 5
winner = sorted_results [0][2]
print("Election Results")
print("-" * 20)
print("Total Votes:",total_votes)
print("-" * 20)
for cr in sorted_results:
print('%s: %.3f%% (%d)' % (cr[2], cr[1], cr[0]) )
print("-" * 20)
print("Winner:",winner)
print("-" * 20)
| [
"noreply@github.com"
] | noreply@github.com |
2e194112df53ba147c427b3442ad1038fea71b05 | 480e7bb34c23445497bc95e4b5107d89830148f2 | /Day 45/962. Maximum Width Ramp.py | a89d88a5861bb4b320d90a53b474a840aad0b325 | [] | no_license | ManojKumarPatnaik/100-Days-Code-Challenge | 632b15679557d482eb8e00468aa432c3df5d4d56 | 7be599bc7bbd07b4df576e00c27a55df60a678c4 | refs/heads/main | 2023-06-07T06:25:44.878727 | 2021-07-03T08:14:54 | 2021-07-03T08:14:54 | 382,612,464 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,294 | py | # Github: Shantanugupta1118
# DAY 45 of DAY 100
# 962. Maximum Width Ramp - LeetCode
# https://leetcode.com/problems/maximum-width-ramp/
import sys
# Brute Force -- O(N^2)
'''
class solution:
def max_width_ramp(self, nums):
max_width = -sys.maxsize
for start in range(len(nums)):
for end in range(start, len(nums)):
if nums[start] <= nums[end]:
max_width = max(max_width, end-start)
return max_width
'''
class solution:
def max_width_ramp(self, nums):
store = {}
for i in range(len(nums)):
x = nums[i]
if x in store:
store[x].append(i)
else:
store[x] = [i]
mini, maxi = [float('inf')], [float('-inf')]
for x in sorted(store.keys()):
mini.append(min(mini[-1], min(store[x])))
for x in sorted(store.keys(), reverse=True):
maxi.append(max(mini[-1], max(store[x])))
maxi = maxi[::-1][:-1]
mini = mini[1:]
p = 0
ans = float('-inf')
while p<len(mini):
ans = max(ans, maxi[p]-mini[p])
p+=1
return ans
nums = list(map(int, input().split()))
print(solution().max_width_ramp(nums)) | [
"shantanuguptaknp@gmail.com"
] | shantanuguptaknp@gmail.com |
347021acc8f528e862d6401bb21dfa7d3134cf58 | 8d73ebf53f3d0aa08c3a50f18f47ef7d48e6febf | /CGPA_Calculator/icon.py | 7c01b70363b3922098a63c8e25bc682ad829f7c7 | [
"MIT"
] | permissive | deepdalsania/calculator | 1da25f91feed8723a1faf43a2dffd8a955d7a359 | 1460fc7f91ef9e379bdde240ddbcb0183d7ec092 | refs/heads/master | 2022-12-20T16:42:36.522300 | 2020-10-06T05:03:51 | 2020-10-06T05:03:51 | 300,562,691 | 0 | 5 | MIT | 2020-10-06T05:03:52 | 2020-10-02T09:18:04 | Python | UTF-8 | Python | false | false | 864 | py | import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWebEngineWidgets import QWebEngineView
from PyQt5.QtWidgets import QMainWindow, QLabel, QLineEdit, QPushButton, QApplication
def arrowIcon(self):
self.arrow = QLabel(self)
self.arrow.setPixmap(QPixmap("ARR.png"))
self.arrow.setGeometry(QRect(650, 240, 50, 40))
self.arrow.setScaledContents(True)
self.arrow.setToolTip('Tech-Totes Club.')
self.arrow = QLabel(self)
self.arrow.setPixmap(QPixmap("ARR.png"))
self.arrow.setGeometry(QRect(280, 345, 30, 30))
self.arrow.setScaledContents(True)
self.arrow.setToolTip('Tech-Totes Club.')
self.arrow = QLabel(self)
self.arrow.setPixmap(QPixmap("ARR.png"))
self.arrow.setGeometry(QRect(280, 395, 30, 30))
self.arrow.setScaledContents(True)
self.arrow.setToolTip('Tech-Totes Club.') | [
"deeppatel.dd@gmail.com"
] | deeppatel.dd@gmail.com |
33389fb5d60e6f5adb6b4617ea997145e425efe2 | 02468531b67787532600a27b5a1c1b2db5daf11b | /bullbearetfs/selenium/eventcapture/tests.py | 27e19b345ee89a30ab52acacb5b387c67af59ea6 | [] | no_license | henrimeli/invest2020 | d284af960b34a6ccc830524826fd1be9271168bf | eb699fd3c70ab0e1cffc56cb86855f6a22849aed | refs/heads/master | 2023-01-31T14:54:22.451146 | 2020-12-15T08:44:06 | 2020-12-15T08:44:06 | 312,067,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,972 | py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import unittest, xmlrunner, time
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.urls import reverse
from bullbearetfs.selenium.core.browsers import EggheadSeleniumBrowser
#####################################################################################
# Events Capture.
#
#
#
class TestEventsCapture(StaticLiveServerTestCase):
testname ='TestEventsCapture'
@classmethod
def setUp(self):
self.browser = EggheadSeleniumBrowser()
self.driver = self.browser.getDriver()
@classmethod
def tearDown(self):
self.driver.quit()
def test_HomePage(self):
self.driver.maximize_window()
time.sleep(1)
self.driver.get(self.live_server_url)
time.sleep(3)
self.assertEqual(self.driver.title,'This is the home page for Egghead Project.')
dashboard_menu=self.driver.find_element_by_xpath("//*[@id='navbar']/ul[1]/li[2]")
dashboard_menu.click()
time.sleep(3)
home_menu=self.driver.find_element_by_xpath("/html/body/nav/div/div[1]/a")
home_menu.click()
time.sleep(3)
robots_menu=self.driver.find_element_by_xpath("//*[@id='navbar']/ul[1]/li[3]/a")
robots_menu.click()
time.sleep(3)
strategies_menu=self.driver.find_element_by_xpath("//*[@id='navbar']/ul[1]/li[4]/a")
strategies_menu.click()
time.sleep(3)
create_strategy_menu=self.driver.find_element_by_xpath("//*[@id='navbar']/ul[2]/li[1]/a")
create_strategy_menu.click()
time.sleep(3)
create_robot_menu=self.driver.find_element_by_xpath("//*[@id='navbar']/ul[2]/li[2]/a")
create_robot_menu.click()
time.sleep(3)
if __name__ == '__main__':
unittest.main(
testRunner=xmlrunner.XMLTestRunner(),
# these make sure that some options that are not applicable
# remain hidden from the help menu.
failfast=False, buffer=False, catchbreak=False)
| [
"bepanda@gmail.com"
] | bepanda@gmail.com |
32d8b616c7fbf7970c48dbd6370b57e89c4e3ef7 | 2ae5610c145557a6718f2103cc1ce01bc5d40fc7 | /distoptim/FedNova.py | c7cd144c19d90341b6bc49ea9595e3e4f6dc4a59 | [] | no_license | ms116/FedNova | bfc4ee9096194c774a884b25a60e37b84c965316 | 47b4e096dfb19dc43c728896fff335a5befb645d | refs/heads/master | 2023-03-19T10:52:01.438685 | 2020-10-21T21:00:51 | 2020-10-21T21:00:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,888 | py | import torch
import torch.distributed as dist
from torch.optim.optimizer import Optimizer, required
from comm_helpers import communicate, flatten_tensors, unflatten_tensors
import threading
import numpy as np
class FedNova(Optimizer):
r"""Implements federated normalized averaging (FedNova).
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
ratio (float): relative sample size of client
gmf (float): global/server/slow momentum factor
mu (float): parameter for proximal local SGD
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
.. note::
The implementation of SGD with Momentum/Nesterov subtly differs from
Sutskever et. al. and implementations in some other frameworks.
Considering the specific case of Momentum, the update can be written as
.. math::
v = \rho * v + g \\
p = p - lr * v
where p, g, v and :math:`\rho` denote the parameters, gradient,
velocity, and momentum respectively.
This is in contrast to Sutskever et. al. and
other frameworks which employ an update of the form
.. math::
v = \rho * v + lr * g \\
p = p - v
The Nesterov version is analogously modified.
"""
def __init__(self, params, ratio, gmf, mu = 0, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False, variance=0):
self.gmf = gmf
self.ratio = ratio
self.momentum = momentum
self.mu = mu
self.local_normalizing_vec = 0
self.local_counter = 0
self.local_steps = 0
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov, variance=variance)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(FedNova, self).__init__(params, defaults)
def __setstate__(self, state):
super(FedNova, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
device = "cuda" if torch.cuda.is_available() else "cpu"
loss = None
if closure is not None:
loss = closure()
# scale = 1**self.itr
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
param_state = self.state[p]
if 'old_init' not in param_state:
param_state['old_init'] = torch.clone(p.data).detach()
local_lr = group['lr']
# apply momentum updates
if momentum != 0:
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
# apply proximal updates
if self.mu != 0:
d_p.add_(self.mu, p.data - param_state['old_init'])
# update accumalated local updates
if 'cum_grad' not in param_state:
param_state['cum_grad'] = torch.clone(d_p).detach()
param_state['cum_grad'].mul_(local_lr)
else:
param_state['cum_grad'].add_(local_lr, d_p)
p.data.add_(-local_lr, d_p)
# compute local normalizing vector a_i
if self.momentum != 0:
self.local_counter = self.local_counter * self.momentum + 1
self.local_normalizing_vec += self.local_counter
self.etamu = local_lr * self.mu
if self.etamu != 0:
self.local_normalizing_vec *= (1 - self.etamu)
self.local_normalizing_vec += 1
if self.momentum == 0 and self.etamu == 0:
self.local_normalizing_vec += 1
self.local_steps += 1
return loss
def average(self, weight=0, tau_eff=0):
if weight == 0:
weight = self.ratio
if tau_eff == 0:
if self.mu != 0:
tau_eff_cuda = torch.tensor(self.local_steps*self.ratio).cuda()
else:
tau_eff_cuda = torch.tensor(self.local_normalizing_vec*self.ratio).cuda()
dist.all_reduce(tau_eff_cuda, op=dist.ReduceOp.SUM)
tau_eff = tau_eff_cuda.item()
param_list = []
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
scale = tau_eff/self.local_normalizing_vec
param_state['cum_grad'].mul_(weight*scale)
param_list.append(param_state['cum_grad'])
communicate(param_list, dist.all_reduce)
for group in self.param_groups:
lr = group['lr']
for p in group['params']:
param_state = self.state[p]
if self.gmf != 0:
if 'global_momentum_buffer' not in param_state:
buf = param_state['global_momentum_buffer'] = torch.clone(param_state['cum_grad']).detach()
buf.div_(lr)
else:
buf = param_state['global_momentum_buffer']
buf.mul_(self.gmf).add_(1/lr, param_state['cum_grad'])
param_state['old_init'].sub_(lr, buf)
else:
param_state['old_init'].sub_(param_state['cum_grad'])
p.data.copy_(param_state['old_init'])
param_state['cum_grad'].zero_()
# Reinitialize momentum buffer
if 'momentum_buffer' in param_state:
param_state['momentum_buffer'].zero_()
self.local_counter = 0
self.local_normalizing_vec = 0
self.local_steps = 0
| [
"jianyuw1@andrew.cmu.edu"
] | jianyuw1@andrew.cmu.edu |
705172b35e4e926f7aaafbb9431f13fc097b88a4 | 54a26bf56aebd604d4dece733f08d7d30cd27f89 | /zdemo/auser/urls.py | 111ae8dee1420e3cac23d71f7714792b65cc4091 | [
"MIT"
] | permissive | zzZaida/django_27 | b78f5ae8bccfa11074221ba32241878d703aa535 | bbbba8be9547fb815c68e94fadb7e8b6eebf75c9 | refs/heads/master | 2020-07-03T19:47:25.037195 | 2019-08-13T12:11:29 | 2019-08-13T12:11:29 | 202,030,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | """zdemo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^user/', views.index,name='index'),
]
| [
"xwp_fullstack@163.com"
] | xwp_fullstack@163.com |
dbc9f7579143bb64ff98823e0279763d02cde114 | 6a8644cc47ed31adb60aba0f47551a897fdf8767 | /src/seraing/urban/dataimport/acropole/mappers.py | caa692885e0c32bea9b5dba44a5c0dcf0ed4a1e6 | [] | no_license | IMIO/seraing.urban.dataimport | 4e6ad4340348fc24b8576d4ce1f4c89a03063b88 | c9bd8e49390a14eeb53ce04a3d7b571e34da8a20 | refs/heads/master | 2021-01-20T03:42:12.232635 | 2017-04-27T12:23:47 | 2017-04-27T12:23:47 | 89,576,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,564 | py | # -*- coding: utf-8 -*-
import unicodedata
import datetime
from seraing.urban.dataimport.acropole.settings import AcropoleImporterSettings
from seraing.urban.dataimport.acropole.utils import get_state_from_licences_dates, get_date_from_licences_dates, \
load_architects, load_geometers, load_notaries, load_parcellings, get_point_and_digits
from imio.urban.dataimport.config import IMPORT_FOLDER_PATH
from imio.urban.dataimport.exceptions import NoObjectToCreateException
from imio.urban.dataimport.factory import BaseFactory
from imio.urban.dataimport.mapper import Mapper, FinalMapper, PostCreationMapper
from imio.urban.dataimport.utils import CadastralReference
from imio.urban.dataimport.utils import cleanAndSplitWord
from imio.urban.dataimport.utils import guess_cadastral_reference
from imio.urban.dataimport.utils import identify_parcel_abbreviations
from imio.urban.dataimport.utils import parse_cadastral_reference
from DateTime import DateTime
from Products.CMFPlone.utils import normalizeString
from Products.CMFPlone.utils import safe_unicode
from plone import api
from plone.i18n.normalizer import idnormalizer
import re
import os
#
# LICENCE
#
# factory
class LicenceFactory(BaseFactory):
def getCreationPlace(self, factory_args):
path = '%s/urban/%ss' % (self.site.absolute_url_path(), factory_args['portal_type'].lower())
return self.site.restrictedTraverse(path)
# mappers
class IdMapper(Mapper):
def __init__(self, importer, args):
super(IdMapper, self).__init__(importer, args)
load_architects()
load_geometers()
load_notaries()
load_parcellings()
def mapId(self, line):
return normalizeString(self.getData('id'))
class ReferenceMapper(Mapper):
def mapReference(self, line):
if AcropoleImporterSettings.file_type == 'old':
ref = self.getData('Numero Permis') + " old"
if ref.strip():
return ref
else:
id = self.getData('id')
return "NC/%s" % (id) + " old"
elif AcropoleImporterSettings.file_type == 'new':
return self.getData('Reference') + " new"
class ReferenceDGO3Mapper(Mapper):
def mapReferencedgatlp(self, line):
type = self.getData('Type')
if type and type.startswith("PE1") or type.startswith("PE2"):
dg03ref = self.getData('PENReference DGO3')
if dg03ref:
return dg03ref
class PortalTypeMapper(Mapper):
def mapPortal_type(self, line):
if AcropoleImporterSettings.file_type == 'old':
return 'BuildLicence'
type = self.getData('Type')
# if type and type.startswith("PE1"):
# return "EnvClassOne"
# elif type and type.startswith("PE2"):
# return "EnvClassTwo"
# else:
# raise NoObjectToCreateException
if type and len(type) >= 3:
type_map = self.getValueMapping('type_map')
base_type = type.strip()[0:3]
# if base_type in ['PE', 'PEX', 'PUN']:
# base_type = type.strip()[0:4]
portal_type = type_map[base_type]
return portal_type
else:
raise NoObjectToCreateException
def mapFoldercategory(self, line):
foldercategory = 'uat'
return foldercategory
class LicenceSubjectMapper(Mapper):
def mapLicencesubject(self, line):
object1 = self.getData('Genre de Travaux')
object2 = self.getData('Divers')
return '%s %s' % (object1, object2)
class WorklocationMapper(Mapper):
def mapWorklocations(self, line):
num = self.getData('AdresseTravauxNumero')
noisy_words = set(('d', 'du', 'de', 'des', 'le', 'la', 'les', 'à', ',', 'rues', 'terrain', 'terrains', 'garage', 'magasin', 'entrepôt'))
raw_street = self.getData('AdresseTravauxRue')
# remove string in () and []
raw_street = re.sub("[\(\[].*?[\)\]]", "", raw_street)
street = cleanAndSplitWord(raw_street)
street_keywords = [word for word in street if word not in noisy_words and len(word) > 1]
if len(street_keywords) and street_keywords[-1] == 'or':
street_keywords = street_keywords[:-1]
locality = self.getData('AdresseTravauxVille')
street_keywords.extend(cleanAndSplitWord(locality))
brains = self.catalog(portal_type='Street', Title=street_keywords)
if len(brains) == 1:
return ({'street': brains[0].UID, 'number': num},)
if street:
self.logError(self, line, 'Couldnt find street or found too much streets', {
'address': '%s, %s, %s ' % (num, raw_street, locality),
'street': street_keywords,
'search result': len(brains)
})
return {}
class WorklocationOldMapper(Mapper):
def mapWorklocations(self, line):
noisy_words = set(('d', 'du', 'de', 'des', 'le', 'la', 'les', 'à', ',', 'rues', 'terrain', 'terrains', 'garage', 'magasin', 'entrepôt'))
raw_street = self.getData('Lieu de construction')
num = ''.join(ele for ele in raw_street if ele.isdigit())
# remove string in () and []
raw_street = re.sub("[\(\[].*?[\)\]]", "", raw_street)
street = cleanAndSplitWord(raw_street)
street_keywords = [word for word in street if word not in noisy_words and len(word) > 1]
if len(street_keywords) and street_keywords[-1] == 'or':
street_keywords = street_keywords[:-1]
brains = self.catalog(portal_type='Street', Title=street_keywords)
if len(brains) == 1:
return ({'street': brains[0].UID, 'number': num},)
if street:
self.logError(self, line, 'Couldnt find street or found too much streets', {
'address': '%s' % (raw_street),
'street': street_keywords,
'search result': len(brains)
})
return {}
class CityMapper(Mapper):
def mapCity(self, line):
city = self.getData('Ville Demandeur')
return (''.join(ele for ele in city if not ele.isdigit())).strip()
class PostalCodeMapper(Mapper):
def mapZipcode(self, line):
zip = self.getData('Ville Demandeur')
return (''.join(ele for ele in zip if ele.isdigit())).strip()
class ParcellingUIDMapper(Mapper):
def mapParcellings(self, line):
title = self.getData('Lotissement')
if title:
title = title.replace("(phase I)","").strip()
title = title.replace("(partie 1)","").strip()
title = title.replace("(partie 2)","").strip()
catalog = api.portal.get_tool('portal_catalog')
brains = catalog(portal_type='ParcellingTerm', Title=title)
parcelling_uids = [brain.getObject().UID() for brain in brains]
if len(parcelling_uids) == 1:
return parcelling_uids
if parcelling_uids:
self.logError(self, line, 'Couldnt find parcelling or found too much parcellings', {
'titre': '%s' % title,
'search result': len(parcelling_uids)
})
class IsInSubdivisionMapper(Mapper):
def mapIsinsubdivision(self, line):
title = self.getData('Lotissement')
return bool(title)
class SubdivisionDetailsMapper(Mapper):
def mapSubdivisiondetails(self, line):
lot = self.getData('Lot')
return lot
class WorkTypeMapper(Mapper):
def mapWorktype(self, line):
worktype = self.getData('Code_220+')
return [worktype]
class InquiryStartDateMapper(Mapper):
def mapInvestigationstart(self, line):
date = self.getData('DateDebEnq')
if date:
date = datetime.datetime.strptime(date, "%d/%m/%Y")
return date
class InquiryEndDateMapper(Mapper):
def mapInvestigationend(self, line):
date = self.getData('DateFinEnq')
if date:
date = datetime.datetime.strptime(date, "%d/%m/%Y")
return date
class InvestigationReasonsMapper(Mapper):
def mapInvestigationreasons(self, line):
reasons = '<p>%s</p> <p>%s</p>' % (self.getData('ParticularitesEnq1'), self.getData('ParticularitesEnq2'))
return reasons
class InquiryReclamationNumbersMapper(Mapper):
def mapInvestigationwritereclamationnumber(self, line):
reclamation = self.getData('NBRec')
return reclamation
class InquiryArticlesMapper(PostCreationMapper):
def mapInvestigationarticles(self, line, plone_object):
raw_articles = self.getData('Enquete')
articles = []
if raw_articles:
article_regex = '(\d+ ?, ?\d+)°'
found_articles = re.findall(article_regex, raw_articles)
if not found_articles:
self.logError(self, line, 'No investigation article found.', {'articles': raw_articles})
for art in found_articles:
article_id = re.sub(' ?, ?', '-', art)
if not self.article_exists(article_id, licence=plone_object):
self.logError(
self, line, 'Article %s does not exist in the config',
{'article id': article_id, 'articles': raw_articles}
)
else:
articles.append(article_id)
return articles
def article_exists(self, article_id, licence):
return article_id in licence.getLicenceConfig().investigationarticles.objectIds()
class AskOpinionsMapper(Mapper):
def mapSolicitopinionsto(self, line):
ask_opinions = []
for i in range(60, 76):
j = i - 59
if line[i] == "VRAI":
solicitOpinionDictionary = self.getValueMapping('solicitOpinionDictionary')
opinion = solicitOpinionDictionary[str(j)]
if opinion:
ask_opinions.append(opinion)
return ask_opinions
class RubricsMapper(Mapper):
def mapRubrics(self, line):
rubric_list = []
# licence = self.importer.current_containers_stack[-1]
# if licence.portal_type == 'EnvClassThree':
rubric_raw = self.getData('DENRubrique1')
if rubric_raw:
rubric_raw.replace("//", "/")
rubrics = rubric_raw.split("/")
if rubrics:
for rubric in rubrics:
point_and_digits = get_point_and_digits(rubric)
if point_and_digits and '.' in point_and_digits:
catalog = api.portal.get_tool('portal_catalog')
rubric_uids = [brain.UID for brain in catalog(portal_type='EnvironmentRubricTerm', id=point_and_digits)]
if not rubric_uids:
self.logError(self, line, 'No rubric found',
{
'rubric': point_and_digits,
})
else:
rubric_list.append(rubric_uids[0])
return rubric_list
class ObservationsMapper(Mapper):
def mapDescription(self, line):
description = '<p>%s</p> <p>%s</p>' % (self.getData('ParticularitesEnq1'),self.getData('ParticularitesEnq2'))
return description
class ObservationsOldMapper(Mapper):
def mapDescription(self, line):
description = '<p>%s</p>' % (self.getData('Remarques'))
return description
class TechnicalConditionsMapper(Mapper):
def mapLocationtechnicalconditions(self, line):
obs_decision1 = '<p>%s</p>' % self.getData('memo_Autorisation')
obs_decision2 = '<p>%s</p>' % self.getData('memo_Autorisation2')
return '%s%s' % (obs_decision1, obs_decision2)
class ArchitectMapper(PostCreationMapper):
def mapArchitects(self, line, plone_object):
# archi_name = '%s %s %s' % (self.getData('Nom Architecte'), self.getData('Prenom Architecte'), self.getData('Societe Architecte'))
archi_name = ' %s %s' % ( self.getData('Prenom Architecte'), self.getData('Societe Architecte'))
fullname = cleanAndSplitWord(archi_name)
if not fullname:
return []
noisy_words = ['monsieur', 'madame', 'architecte', '&', ',', '.', 'or', 'mr', 'mme', '/']
name_keywords = [word.lower() for word in fullname if word.lower() not in noisy_words]
architects = self.catalog(portal_type='Architect', Title=name_keywords)
if len(architects) == 0:
Utils.createArchitect(archi_name)
architects = self.catalog(portal_type='Architect', Title=name_keywords)
if len(architects) == 1:
return architects[0].getObject()
self.logError(self, line, 'No architects found or too much architects found',
{
'raw_name': archi_name,
'name': name_keywords,
'search_result': len(architects)
})
return []
class FolderZoneTableMapper(Mapper):
def mapFolderzone(self, line):
folderZone = []
sectorMap1 = self.getData('Plan de Secteur 1')
sectorMap2 = self.getData('Plan de Secteur 2')
zoneDictionnary = self.getValueMapping('zoneDictionary')
if sectorMap1 in zoneDictionnary:
folderZone.append(zoneDictionnary[sectorMap1])
if sectorMap2 in zoneDictionnary:
folderZone.append(zoneDictionnary[sectorMap2])
return folderZone
class GeometricianMapper(PostCreationMapper):
def mapGeometricians(self, line, plone_object):
name = self.getData('LOTGeoNom')
firstname = self.getData('LOTGeoPrenom')
raw_name = firstname + name
# name = cleanAndSplitWord(name)
# firstname = cleanAndSplitWord(firstname)
names = name + ' ' + firstname
if raw_name:
geometrician = self.catalog(portal_type='Geometrician', Title=names)
if len(geometrician) == 0:
Utils.createGeometrician(name, firstname)
geometrician = self.catalog(portal_type='Geometrician', Title=names)
if len(geometrician) == 1:
return geometrician[0].getObject()
self.logError(self, line, 'no geometricians found or too much geometricians found',
{
'raw_name': raw_name,
'name': name,
'firstname': firstname,
'search_result': len(geometrician)
})
return []
class PcaUIDMapper(Mapper):
def mapPca(self, line):
title = self.getData('PPA')
if title:
catalog = api.portal.get_tool('portal_catalog')
pca_id = catalog(portal_type='PcaTerm', Title=title)[0].id
return pca_id
return []
class IsInPcaMapper(Mapper):
def mapIsinpca(self, line):
title = self.getData('PPA')
return bool(title)
class EnvRubricsMapper(Mapper):
def mapDescription(self, line):
rubric = Utils().convertToUnicode(self.getData('LibNat'))
return rubric
class CompletionStateMapper(PostCreationMapper):
def map(self, line, plone_object):
self.line = line
transition = None
if AcropoleImporterSettings.file_type == 'old':
type_decision = self.getData('Type Decision')
if type_decision == 'REFUS':
transition = 'refuse'
else:
transition = 'accept'
else:
if plone_object.portal_type in ['BuildLicence', 'ParcelOutLicence']:
datePermis = self.getData('Date Permis')
dateRefus = self.getData('Date Refus')
datePermisRecours = self.getData('Date Permis sur recours')
dateRefusRecours = self.getData('Date Refus sur recours')
transition = get_state_from_licences_dates(datePermis, dateRefus, datePermisRecours, dateRefusRecours)
elif plone_object.portal_type == 'Declaration':
if self.getData('DURDecision') == 'Favorable':
transition = 'accept'
elif self.getData('DURDecision') == 'Défavorable':
transition = 'refuse'
elif plone_object.portal_type == 'UrbanCertificateTwo':
if self.getData('CU2Decision') == 'Favorable':
transition = 'accept'
elif self.getData('CU2Decision') == 'Défavorable':
transition = 'refuse'
elif plone_object.portal_type == 'EnvClassThree':
if self.getData('DENDecision') == 'irrecevable':
transition = 'refuse'
elif self.getData('DENDecision') == 'OK sans conditions' or self.getData('DENDecision') == 'OK avec conditions':
transition = 'accept'
if transition:
api.content.transition(plone_object, transition)
# api.content.transition(plone_object, 'nonapplicable')
class ErrorsMapper(FinalMapper):
def mapDescription(self, line, plone_object):
line_number = self.importer.current_line
errors = self.importer.errors.get(line_number, None)
description = plone_object.Description()
error_trace = []
if errors:
for error in errors:
data = error.data
if 'streets' in error.message:
error_trace.append('<p>adresse : %s</p>' % data['address'])
elif 'notaries' in error.message:
error_trace.append('<p>notaire : %s %s %s</p>' % (data['title'], data['firstname'], data['name']))
elif 'architects' in error.message:
error_trace.append('<p>architecte : %s</p>' % data['raw_name'])
elif 'geometricians' in error.message:
error_trace.append('<p>géomètre : %s</p>' % data['raw_name'])
elif 'parcels' in error.message and AcropoleImporterSettings.file_type == 'old':
error_trace.append('<p>parcels : %s </p>' % data['args'])
elif 'rubric' in error.message.lower():
error_trace.append('<p>Rubrique non trouvée : %s</p>' % (data['rubric']))
elif 'parcelling' in error.message:
if data['search result'] == '0':
error_trace.append('<p>lotissement non trouvé : %s </p>' % data['titre'])
else:
error_trace.append("<p>lotissement trouvé plus d'une fois: %s : %s fois</p>" % (data['titre'], data['search result'] ))
elif 'article' in error.message.lower():
error_trace.append('<p>Articles de l\'enquête : %s</p>' % (data['articles']))
error_trace = ''.join(error_trace)
return '%s%s' % (error_trace, description)
#
# CONTACT
#
# factory
class ContactFactory(BaseFactory):
def getPortalType(self, container, **kwargs):
if container.portal_type in ['UrbanCertificateOne', 'UrbanCertificateTwo', 'NotaryLetter']:
return 'Proprietary'
return 'Applicant'
# mappers
class ContactIdMapper(Mapper):
def mapId(self, line):
name = '%s%s%s' % (self.getData('NomDemandeur1'), self.getData('PrenomDemandeur1'), self.getData('id'))
name = name.replace(' ', '').replace('-', '')
return normalizeString(self.site.portal_urban.generateUniqueId(name))
class ContactIdOldMapper(Mapper):
def mapId(self, line):
name = '%s%s' % (self.getData('Nom Demandeur'), self.getData('id'))
name = name.replace(' ', '').replace('-', '')
return normalizeString(self.site.portal_urban.generateUniqueId(name))
class ContactTitleMapper(Mapper):
def mapPersontitle(self, line):
title1 = self.getData('Civi').lower()
title = title1 or self.getData('Civi2').lower()
title_mapping = self.getValueMapping('titre_map')
return title_mapping.get(title, 'notitle')
class ContactNameMapper(Mapper):
def mapName1(self, line):
title = self.getData('Civi2')
name = self.getData('D_Nom')
regular_titles = [
'M.',
'M et Mlle',
'M et Mme',
'M. et Mme',
'M. l\'Architecte',
'M. le président',
'Madame',
'Madame Vve',
'Mademoiselle',
'Maître',
'Mlle et Monsieur',
'Mesdames',
'Mesdemoiselles',
'Messieurs',
'Mlle',
'MM',
'Mme',
'Mme et M',
'Monsieur',
'Monsieur,',
'Monsieur et Madame',
'Monsieur l\'Architecte',
]
if title not in regular_titles:
name = '%s %s' % (title, name)
return name
class ContactSreetMapper(Mapper):
def mapStreet(self, line):
regex = '((?:[^\d,]+\s*)+),?'
raw_street = self.getData('D_Adres')
match = re.match(regex, raw_street)
if match:
street = match.group(1)
else:
street = raw_street
return street
class ContactNumberMapper(Mapper):
def mapNumber(self, line):
regex = '(?:[^\d,]+\s*)+,?\s*(.*)'
raw_street = self.getData('D_Adres')
number = ''
match = re.match(regex, raw_street)
if match:
number = match.group(1)
return number
class ContactPhoneMapper(Mapper):
def mapPhone(self, line):
raw_phone = self.getData('D_Tel')
gsm = self.getData('D_GSM')
phone = ''
if raw_phone:
phone = raw_phone
if gsm:
phone = phone and '%s %s' % (phone, gsm) or gsm
return phone
#
# PARCEL
#
#factory
class ParcelFactory(BaseFactory):
def create(self, parcel, container=None, line=None):
searchview = self.site.restrictedTraverse('searchparcels')
#need to trick the search browser view about the args in its request
parcel_args = parcel.to_dict()
parcel_args.pop('partie')
for k, v in parcel_args.iteritems():
searchview.context.REQUEST[k] = v
#check if we can find a parcel in the db cadastre with these infos
found = searchview.findParcel(**parcel_args)
if not found:
found = searchview.findParcel(browseoldparcels=True, **parcel_args)
if len(found) == 1 and parcel.has_same_attribute_values(found[0]):
parcel_args['divisionCode'] = parcel_args['division']
parcel_args['isOfficialParcel'] = True
else:
self.logError(self, line, 'Too much parcels found or not enough parcels found', {'args': parcel_args, 'search result': len(found)})
parcel_args['isOfficialParcel'] = False
parcel_args['id'] = parcel.id
parcel_args['partie'] = parcel.partie
return super(ParcelFactory, self).create(parcel_args, container=container)
def objectAlreadyExists(self, parcel, container):
existing_object = getattr(container, parcel.id, None)
return existing_object
# mappers
class ParcelDataMapper(Mapper):
def map(self, line, **kwargs):
section = self.getData('Parcelle1section', line).upper()
if len(section) > 0:
section = section[0]
remaining_reference = '%s %s' % (self.getData('Parcelle1numero', line), self.getData('Parcelle1numerosuite', line))
if not remaining_reference:
return []
abbreviations = identify_parcel_abbreviations(remaining_reference)
division = '25111' if self.getData('AdresseTravauxVille', line) == 'Wauthier-Braine' else '25015'
if not remaining_reference or not section or not abbreviations:
return []
base_reference = parse_cadastral_reference(division + section + abbreviations[0])
base_reference = CadastralReference(*base_reference)
parcels = [base_reference]
for abbreviation in abbreviations[1:]:
new_parcel = guess_cadastral_reference(base_reference, abbreviation)
parcels.append(new_parcel)
section2 = self.getData('Parcelle2section', line).upper()
if section2 :
section2 = section2[0]
remaining_reference2 = '%s %s' % (self.getData('Parcelle2numero', line), self.getData('Parcelle2numerosuite', line))
if not remaining_reference2:
return []
abbreviations2 = identify_parcel_abbreviations(remaining_reference2)
if not remaining_reference2 or not section2:
return []
base_reference2 = parse_cadastral_reference(division + section2 + abbreviations2[0])
base_reference2 = CadastralReference(*base_reference2)
for abbreviation2 in abbreviations2[1:]:
new_parcel2 = guess_cadastral_reference(base_reference2, abbreviation2)
parcels.append(new_parcel2)
return parcels
class OldParcelDataMapper(Mapper):
def map(self, line, **kwargs):
raw_parcel = self.getData('Cadastre', line)
if raw_parcel:
self.logError(self, line, 'parcels', {'args': raw_parcel})
# section = raw_parcel[0].upper()
# remaining_reference = raw_parcel[1:]
# remaining_reference = remaining_reference.replace("-","").strip()
# if not remaining_reference:
# return []
# abbreviations = identify_parcel_abbreviations(remaining_reference)
# division = '25015'
# if not remaining_reference or not section or not abbreviations:
# return []
# base_reference = parse_cadastral_reference(division + section + abbreviations[0])
#
# base_reference = CadastralReference(*base_reference)
#
# parcels = [base_reference]
# for abbreviation in abbreviations[1:]:
# new_parcel = guess_cadastral_reference(base_reference, abbreviation)
# self.logError(self, line, 'parcels', {'args': new_parcel})
# parcels.append(new_parcel)
#
# return parcels
raise NoObjectToCreateException
#
# UrbanEvent deposit
#
# factory
class UrbanEventFactory(BaseFactory):
def getPortalType(self, **kwargs):
return 'UrbanEvent'
def create(self, kwargs, container, line):
if not kwargs['eventtype']:
return []
eventtype_uid = kwargs.pop('eventtype')
urban_event = container.createUrbanEvent(eventtype_uid, **kwargs)
return urban_event
#mappers
class DepositEventMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = self.getValueMapping('eventtype_id_map')[licence.portal_type]['deposit_event']
config = urban_tool.getUrbanConfig(licence)
return getattr(config.urbaneventtypes, eventtype_id).UID()
class DepositDateMapper(Mapper):
def mapEventdate(self, line):
date = self.getData('Recepisse')
if not date:
raise NoObjectToCreateException
date = datetime.datetime.strptime(date, "%d/%m/%Y")
return date
class DepositEventIdMapper(Mapper):
def mapId(self, line):
return 'depot-de-la-demande'
# UrbanEvent transmitted decision
class TransmittedIdMapper(Mapper):
def mapId(self, line):
return 'transmis-decision'
class TransmittedEventMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = 'transmis-decision'
config = urban_tool.getUrbanConfig(licence)
return getattr(config.urbaneventtypes, eventtype_id).UID()
class DateTransmissionMapper(Mapper):
def mapEventdate(self, line):
date = self.getData('DURDateTransmission')
if not date:
raise NoObjectToCreateException
date = datetime.datetime.strptime(date, "%d/%m/%Y")
return date
class DateTransmissionEventIdMapper(Mapper):
def mapId(self, line):
return 'transmis-decision'
#
# UrbanEvent ask opinions
#
# factory
class OpinionMakersFactory(BaseFactory):
""" """
#mappers
class OpinionMakersTableMapper(Mapper):
""" """
def map(self, line, **kwargs):
lines = self.query_secondary_table(line)
for secondary_line in lines:
for mapper in self.mappers:
return mapper.map(secondary_line, **kwargs)
break
return []
class OpinionMakersMapper(Mapper):
def map(self, line):
opinionmakers_args = []
for i in range(1, 11):
opinionmakers_id = self.getData('Org{}'.format(i), line)
if not opinionmakers_id:
return opinionmakers_args
event_date = self.getData('Cont{}'.format(i), line)
receipt_date = self.getData('Rec{}'.format(i), line)
args = {
'id': opinionmakers_id,
'eventtype': opinionmakers_id,
'eventDate': event_date and DateTime(event_date) or None,
'transmitDate': event_date and DateTime(event_date) or None,
'receiptDate': receipt_date and DateTime(receipt_date) or None,
'receivedDocumentReference': self.getData('Ref{}'.format(i), line),
}
opinionmakers_args.append(args)
if not opinionmakers_args:
raise NoObjectToCreateException
return opinionmakers_args
class LinkedInquiryMapper(PostCreationMapper):
def map(self, line, plone_object):
opinion_event = plone_object
licence = opinion_event.aq_inner.aq_parent
inquiry = licence.getInquiries() and licence.getInquiries()[-1] or licence
opinion_event.setLinkedInquiry(inquiry)
#
# Claimant
#
# factory
class ClaimantFactory(BaseFactory):
def getPortalType(self, container, **kwargs):
return 'Claimant'
#mappers
class ClaimantIdMapper(Mapper):
def mapId(self, line):
name = '%s%s' % (self.getData('RECNom'), self.getData('RECPrenom'))
name = name.replace(' ', '').replace('-', '')
if not name:
raise NoObjectToCreateException
return normalizeString(self.site.portal_urban.generateUniqueId(name))
class ClaimantTitleMapper(Mapper):
def mapPersontitle(self, line):
title = self.getData('Civi_Rec').lower()
title_mapping = self.getValueMapping('titre_map')
return title_mapping.get(title, 'notitle')
class ClaimantSreetMapper(Mapper):
def mapStreet(self, line):
regex = '((?:[^\d,]+\s*)+),?'
raw_street = self.getData('RECAdres')
match = re.match(regex, raw_street)
if match:
street = match.group(1)
else:
street = raw_street
return street
class ClaimantNumberMapper(Mapper):
def mapNumber(self, line):
regex = '(?:[^\d,]+\s*)+,?\s*(.*)'
raw_street = self.getData('RECAdres')
number = ''
match = re.match(regex, raw_street)
if match:
number = match.group(1)
return number
#
# UrbanEvent second RW
#
#mappers
class SecondRWEventTypeMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = 'transmis-2eme-dossier-rw'
config = urban_tool.getUrbanConfig(licence)
return getattr(config.urbaneventtypes, eventtype_id).UID()
class SecondRWEventDateMapper(Mapper):
def mapEventdate(self, line):
date = self.getData('UR_Datenv2')
date = date and DateTime(date) or None
if not date:
raise NoObjectToCreateException
return date
class SecondRWDecisionMapper(Mapper):
def mapExternaldecision(self, line):
raw_decision = self.getData('UR_Avis')
decision = self.getValueMapping('externaldecisions_map').get(raw_decision, [])
return decision
class SecondRWDecisionDateMapper(Mapper):
def mapDecisiondate(self, line):
date = self.getData('UR_Datpre')
date = date and DateTime(date) or None
return date
class SecondRWReceiptDateMapper(Mapper):
def mapReceiptdate(self, line):
date = self.getData('UR_Datret')
date = date and DateTime(date) or None
return date
#
# UrbanEvent decision
#
#mappers
class DecisionEventTypeMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = self.getValueMapping('eventtype_id_map')[licence.portal_type]['decision_event']
config = urban_tool.getUrbanConfig(licence)
return getattr(config.urbaneventtypes, eventtype_id).UID()
class DecisionEventIdMapper(Mapper):
def mapId(self, line):
return 'decision_event'
class DecisionEventDateMapper(Mapper):
def mapDecisiondate(self, line):
licence = self.importer.current_containers_stack[-1]
if licence.portal_type in ['BuildLicence', 'ParcelOutLicence', 'EnvClassOne', 'EnvClassTwo']:
datePermis = self.getData('Date Permis')
dateRefus = self.getData('Date Refus')
datePermisRecours = self.getData('Date Permis sur recours')
dateRefusRecours = self.getData('Date Refus sur recours')
date = get_date_from_licences_dates(datePermis, dateRefus, datePermisRecours, dateRefusRecours)
if not date:
self.logError(self, line, 'No decision date found')
raise NoObjectToCreateException
elif licence.portal_type == 'Declaration':
date = self.getData('DURDateDecision')
if not date:
date = self.getData('DURDateTransmission')
if not date:
decision = self.getData('DURDecision')
if decision:
date = self.getValueMapping('default_date_decision')
elif licence.portal_type == 'UrbanCertificateTwo':
date = self.getData('CU2DateDecision')
return datetime.datetime.strptime(date, "%d/%m/%Y")
class DecisionEventDecisionMapper(Mapper):
def mapDecision(self, line):
licence = self.importer.current_containers_stack[-1]
if licence.portal_type in ['BuildLicence', 'ParcelOutLicence', 'EnvClassOne', 'EnvClassTwo']:
datePermis = self.getData('Date Permis')
dateRefus = self.getData('Date Refus')
datePermisRecours = self.getData('Date Permis sur recours')
dateRefusRecours = self.getData('Date Refus sur recours')
state = get_state_from_licences_dates(datePermis, dateRefus, datePermisRecours, dateRefusRecours)
if state == 'accept':
return u'Favorable'
elif state == 'refuse':
return u'Défavorable'
elif licence.portal_type == 'Declaration':
return self.getData('DURDecision')
elif licence.portal_type == 'UrbanCertificateTwo':
return self.getData('CU2Decision')
class DecisionEventNotificationDateMapper(Mapper):
def mapEventdate(self, line):
licence = self.importer.current_containers_stack[-1]
if licence.portal_type in ['BuildLicence', 'ParcelOutLicence', 'EnvClassOne', 'EnvClassTwo']:
datePermis = self.getData('Date Permis')
dateRefus = self.getData('Date Refus')
datePermisRecours = self.getData('Date Permis sur recours')
dateRefusRecours = self.getData('Date Refus sur recours')
eventDate = get_date_from_licences_dates(datePermis, dateRefus, datePermisRecours, dateRefusRecours)
elif licence.portal_type == 'Declaration':
eventDate = self.getData('DURDateDecision')
if not eventDate:
eventDate = self.getData('DURDateTransmission')
decision = self.getData('DURDecision')
if decision and not eventDate:
eventDate = self.getValueMapping('default_date_decision')
elif licence.portal_type == 'UrbanCertificateTwo':
eventDate = self.getData('CU2DateDecision')
if eventDate:
return datetime.datetime.strptime(eventDate, "%d/%m/%Y")
else:
raise NoObjectToCreateException
class EnvClassThreeCondAcceptabilityEventIdMapper(Mapper):
def mapId(self, line):
return 'acceptation-de-la-demande-cond'
class EnvClassThreeCondAcceptabilityEventMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = 'acceptation-de-la-demande-cond'
config = urban_tool.getUrbanConfig(licence)
if hasattr(config.urbaneventtypes, eventtype_id):
return getattr(config.urbaneventtypes, eventtype_id).UID()
class EventDateEnvClassThreeCondAcceptabilityMapper(Mapper):
def mapEventdate(self, line):
eventDate = self.getData('DENDatePriseActeAvecConditions')
eventDecision = self.getData('DENDecision')
if eventDecision == "OK avec conditions":
if not eventDate:
eventDate = self.getValueMapping('default_date_decision')
return datetime.datetime.strptime(eventDate, "%d/%m/%Y")
else:
raise NoObjectToCreateException
return eventDate
class EnvClassThreeAcceptabilityEventIdMapper(Mapper):
def mapId(self, line):
return 'acceptation-de-la-demande'
class EnvClassThreeAcceptabilityEventMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = 'acceptation-de-la-demande'
config = urban_tool.getUrbanConfig(licence)
if hasattr(config.urbaneventtypes, eventtype_id):
return getattr(config.urbaneventtypes, eventtype_id).UID()
class EventDateEnvClassThreeAcceptabilityMapper(Mapper):
def mapEventdate(self, line):
eventDate = self.getData('DENDatePriseActeSansConditions')
eventDecision = self.getData('DENDecision')
if eventDecision == "OK sans conditions":
if not eventDate:
eventDate = self.getValueMapping('default_date_decision')
return datetime.datetime.strptime(eventDate, "%d/%m/%Y")
else:
raise NoObjectToCreateException
return eventDate
class EnvClassThreeUnacceptabilityEventIdMapper(Mapper):
def mapId(self, line):
return 'acceptation-de-la-demande'
class EnvClassThreeUnacceptabilityEventMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = 'refus-de-la-demande'
config = urban_tool.getUrbanConfig(licence)
if hasattr(config.urbaneventtypes, eventtype_id):
return getattr(config.urbaneventtypes, eventtype_id).UID()
class EventDateEnvClassThreeUnacceptabilityMapper(Mapper):
def mapEventdate(self, line):
eventDate = self.getData('DENDateIrrecevable')
eventDecision = self.getData('DENDecision')
if eventDecision == "irrecevable":
if not eventDate:
eventDate = self.getValueMapping('default_date_decision')
return datetime.datetime.strptime(eventDate, "%d/%m/%Y")
else:
raise NoObjectToCreateException
return eventDate
class OldDecisionEventDateMapper(Mapper):
def mapDecisiondate(self, line):
datePermis = self.getData('DENDatePriseActeAvecConditions')
try:
d = datetime.datetime.strptime(datePermis, "%d.%m.%y")
if d > datetime.datetime.now():
d = datetime(d.year - 100, d.month, d.day)
return d
except ValueError:
return
class OldDecisionEventDecisionMapper(Mapper):
def mapDecision(self, line):
decision = self.getData('Type Decision')
if decision == 'REFUS':
return u'Défavorable'
else:
return u'Favorable'
class OldDecisionEventNotificationDateMapper(Mapper):
def mapEventdate(self, line):
datePermis = self.getData('Date Permis')
try:
d = datetime.datetime.strptime(datePermis, "%d.%m.%y")
if d > datetime.datetime.now():
d = datetime(d.year - 100, d.month, d.day)
return d
except ValueError:
raise NoObjectToCreateException
class CollegeReportTypeMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = self.getValueMapping('eventtype_id_map')[licence.portal_type]['college_report_event']
config = urban_tool.getUrbanConfig(licence)
return getattr(config.urbaneventtypes, eventtype_id).UID()
class CollegeReportIdMapper(Mapper):
def mapId(self, line):
return 'college_report_event'
class CollegeReportEventDateMapper(Mapper):
def mapEventdate(self, line):
eventDate = self.getData('Rapport du College')
if eventDate:
return eventDate
else:
raise NoObjectToCreateException
class CompleteFolderEventMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = self.getValueMapping('eventtype_id_map')[licence.portal_type]['complete_folder']
config = urban_tool.getUrbanConfig(licence)
if hasattr(config.urbaneventtypes, eventtype_id):
return getattr(config.urbaneventtypes, eventtype_id).UID()
class CompleteFolderDateMapper(Mapper):
def mapEventdate(self, line):
date = self.getData('PENDtDossierComplet')
if not date:
raise NoObjectToCreateException
try:
d = datetime.datetime.strptime(date, "%d/%m/%Y")
if d > datetime.datetime.now():
d = datetime(d.year - 100, d.month, d.day)
return d
except ValueError:
raise NoObjectToCreateException
class CompleteFolderEventIdMapper(Mapper):
def mapId(self, line):
return 'complete_folder'
class IncompleteFolderEventMapper(Mapper):
def mapEventtype(self, line):
licence = self.importer.current_containers_stack[-1]
urban_tool = api.portal.get_tool('portal_urban')
eventtype_id = ('dossier-incomplet')
config = urban_tool.getUrbanConfig(licence)
if hasattr(config.urbaneventtypes, eventtype_id):
return getattr(config.urbaneventtypes, eventtype_id).UID()
class IncompleteFolderDateMapper(Mapper):
def mapEventdate(self, line):
date = self.getData('PENDtDossierIncomplet')
if not date:
raise NoObjectToCreateException
try:
d = datetime.datetime.strptime(date, "%d/%m/%Y")
if d > datetime.datetime.now():
d = datetime(d.year - 100, d.month, d.day)
return d
except ValueError:
raise NoObjectToCreateException
class IncompleteFolderEventIdMapper(Mapper):
def mapId(self, line):
return 'incomplete_folder'
#
# UrbanEvent suspension
#
# factory
class SuspensionEventFactory(UrbanEventFactory):
def create(self, kwargs, container, line):
if not kwargs['eventtype']:
return []
eventtype_uid = kwargs.pop('eventtype')
suspension_reason = kwargs.pop('suspensionReason')
urban_event = container.createUrbanEvent(eventtype_uid, **kwargs)
urban_event.setSuspensionReason(suspension_reason)
return urban_event
#
# Documents
#
# factory
class DocumentsFactory(BaseFactory):
""" """
def getPortalType(self, container, **kwargs):
return 'File'
# *** Utils ***
class Utils():
@staticmethod
def convertToUnicode(string):
if isinstance(string, unicode):
return string
# convert to unicode if necessary, against iso-8859-1 : iso-8859-15 add € and oe characters
data = ""
if string and isinstance(string, str):
try:
data = unicodedata.normalize('NFKC', unicode(string, "iso-8859-15"))
except UnicodeDecodeError:
import ipdb; ipdb.set_trace() # TODO REMOVE BREAKPOINT
return data
@staticmethod
def createArchitect(name):
idArchitect = idnormalizer.normalize(name + 'Architect').replace(" ", "")
containerArchitects = api.content.get(path='/urban/architects')
if idArchitect not in containerArchitects.objectIds():
new_id = idArchitect
new_name1 = name
if not (new_id in containerArchitects.objectIds()):
object_id = containerArchitects.invokeFactory('Architect', id=new_id,
name1=new_name1)
@staticmethod
def createGeometrician(name1, name2):
idGeometrician = idnormalizer.normalize(name1 + name2 + 'Geometrician').replace(" ", "")
containerGeometricians = api.content.get(path='/urban/geometricians')
if idGeometrician not in containerGeometricians.objectIds():
new_id = idGeometrician
new_name1 = name1
new_name2 = name2
if not (new_id in containerGeometricians.objectIds()):
object_id = containerGeometricians.invokeFactory('Geometrician', id=new_id,
name1=new_name1,
name2=new_name2) | [
"julien.jaumotte@imio.be"
] | julien.jaumotte@imio.be |
de317dc6879183437dc69a03ede0e6a143c57d62 | 6d86360bc6ae5c39128d3480c6382c58a6e7c5c1 | /Code_With_Us/src/sim/model/parts/bondburn.py | dcb2aa459e6730d24f39b9ceb366c97d2316b613 | [
"MIT"
] | permissive | CineDAO/Risk-Adjusted-Bonding-Curves | 0c3809f488f818235875d68881a7262b1d35d1df | 56b4f714dc145cca1503b7c80d6a7b38c1e2e3fc | refs/heads/master | 2023-08-16T01:41:23.121213 | 2021-10-02T13:45:47 | 2021-10-02T13:45:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,818 | py |
def update_R(params, substep, state_history, prev_state, policy_input):
# params = params[0]
# access amt_to_burn using _input['action']['amt_to_burn'] because it's a dict of dicts
R = prev_state['reserve']
S = prev_state['supply']
V = prev_state['invariant_V']
kappa = prev_state['kappa']
deltaS = policy_input['amt_to_burn']
if V == 0:
print("V IS ZERO") # degenerate
else:
deltaR = R - (((S-deltaS)**kappa)/V)
#print("::::delta R::::", deltaR)
#print("::::AMTBOND::::", policy_input['amt_to_bond'])
## Continuous ##
# Continuous Enabled, newly reserved funds split to bond reserve and project funding
if params['ENABLE_CONTINUOUS']:
R = R + policy_input['amt_to_bond']*(1-params['THETA']) # - deltaR all burned funds not tempered by theta
if params['ENABLE_BURN']:
R = R - deltaR # for burning allowed (=TRUE) subtract burned funds from reserve
# Continuous Not Enabled, all new reserve funds go to reserve the bond
else:
if params['ENABLE_BURN']:
R = R + policy_input['amt_to_bond'] - deltaR # for burning allowed (=TRUE) subtract burned funds from reserve
else:
R = R + policy_input['amt_to_bond'] # for burning on bodning curve not allowed, occurs in uniswap
# print("RESERVE = ", R, " | deltaR = ", deltaR, " | deltaS = ", deltaS)
return 'reserve', R
def update_funds(params, substep, state_history, prev_state, policy_input):
# params = params[0]
# access amt_to_burn using _input['action']['amt_to_burn'] because it's a dict of dicts
F = prev_state['funds_from_bond']
V = prev_state['invariant_V']
monthly_instalment = policy_input['monthly_instalment']
if V == 0:
print("V IS ZERO") # degenerate
else:
## Continuous ##
if params['ENABLE_CONTINUOUS']:
deltaF = policy_input['amt_to_bond'] * (params['THETA'])
# burn if else
else:
deltaF = 0
F += deltaF
F += monthly_instalment
return 'funds_from_bond', F
def update_S(params, substep, state_history, prev_state, policy_input):
# params = params[0]
R = prev_state['reserve']
S = prev_state['supply']
V = prev_state['invariant_V']
kappa = prev_state['kappa']
deltaR = policy_input['amt_to_bond']
deltaS = (V*(R+deltaR))**(1/kappa) - S
# S = S - deltaS + policy_input['amt_to_burn']
# ?????????????????? Backwards ????????????????????
S = S + deltaS - policy_input['amt_to_burn']
# print("SUPPLY = ", S, " | deltaR = ", deltaR, " | deltaS = ", deltaS)
return 'supply', S
def update_r(params, substep, state_history, prev_state, policy_input):
R = prev_state['reserve']
S = prev_state['supply']
V = prev_state['invariant_V']
kappa = prev_state['kappa']
r = prev_state['chosen_agent']['agent_reserve']
deltaS = policy_input['amt_to_burn']
if V == 0:
print("V IS ZERO")
else:
deltar = R-((S-deltaS)**kappa)/V
r = r - policy_input['amt_to_bond'] + deltar
# print("AGENT RESERVE =", r, "deltar = ", deltar,
# "policy_input['amt_to_bond'] = ", policy_input['amt_to_bond'])
return 'agent_reserve', r
def update_s_free_bondburn(params, substep, state_history, prev_state, policy_input):
R = prev_state['reserve']
S = prev_state['supply']
V = prev_state['invariant_V']
kappa = prev_state['kappa']
s_free = prev_state['agent_supply_free']
deltaR = policy_input['amt_to_bond']
deltas = (V*(R+deltaR))**(1/kappa)-S
s_free = s_free + deltas - policy_input['amt_to_burn']
return 'agent_supply_free', s_free
def compute_r(R, S, V, kappa, r, deltaS, policy_input):
if V == 0:
r = policy_input['amt_to_bond']
else:
deltar = R-((S-deltaS)**kappa)/V
r = r - policy_input['amt_to_bond'] + deltar
return r
def compute_s_free(R, S, V, kappa, s_free, deltaR, policy_input, timestep):
deltas = (V*(R+deltaR))**(1/kappa)-S
s_free = s_free + deltas - policy_input['amt_to_burn']
# TEST RANDOM DROP
if timestep % 20 == 0:
random_drop = 0
else:
random_drop = 0
s_free = s_free + random_drop
return s_free
def update_agent_BC(params, substep, state_history, prev_state, policy_input):
R = prev_state['reserve']
S = prev_state['supply']
V = prev_state['invariant_V']
kappa = prev_state['kappa']
agent = prev_state['chosen_agent']
r = agent['agent_reserve']
s_free = agent['agent_supply_free']
deltaS = policy_input['amt_to_burn']
deltaR = policy_input['amt_to_bond']
timestep = prev_state['timestep']
agent['agent_reserve'] = compute_r(R, S, V, kappa, r, deltaS, policy_input)
agent['agent_supply_free'] = compute_s_free(
R, S, V, kappa, s_free, deltaR, policy_input, timestep)
return 'chosen_agent', agent
def update_P_bondburn(params, substep, state_history, prev_state, policy_input):
amt_to_bond = policy_input['amt_to_bond']
amt_to_burn = policy_input['amt_to_burn']
kappa = prev_state['kappa']
R = prev_state['reserve']
S = prev_state['supply']
V = prev_state['invariant_V']
if amt_to_bond > 0: # bond
deltaR = amt_to_bond
deltaS = (V*(R+deltaR))**(1/kappa)-S
if deltaS == 0:
P = kappa*(R**((kappa-1.0)/kappa)/(float(V) **
(1.0/float(kappa)))) # Zero handling
# return 'spot_price', P
else:
P = deltaR/deltaS # deltaR/deltaS
# return 'spot_price', P
elif amt_to_burn > 0: # burn
deltaS = amt_to_burn
deltaR = R - (((S-deltaS)**kappa)/V)
if deltaS == 0:
P = kappa*(R**((kappa-1.0)/kappa)/(float(V) **
(1.0/float(kappa)))) # Zero handling
# return 'spot_price', P
else:
P = deltaR/deltaS # deltaR/deltaS
# return 'spot_price', P
elif amt_to_burn == 0:
P = kappa*(R**((kappa-1.0)/kappa)/(float(V) **
(1.0/float(kappa)))) # Zero handling
elif amt_to_bond == 0:
P = prev_state['spot_price']
else:
P = amt_to_bond/amt_to_burn
#print("PRICE (BOND/BURN): ", P)
# print("SPOT PRICE P (from bondburn update) = ", P)
return 'spot_price', P
def update_pbar(params, substep, state_history, prev_state, policy_input):
R = prev_state['reserve']
S = prev_state['supply']
V = prev_state['invariant_V']
kappa = prev_state['kappa']
deltaS = policy_input['amt_to_burn']
deltaR = policy_input['amt_to_bond']
if deltaS != 0:
deltaR = R-((S-deltaS)**kappa)/V
if deltaR == 0:
realized_price = prev_state['pbar']
else:
realized_price = deltaR/deltaS
elif deltaR != 0:
deltaS = (V*(R+deltaR))**(1/kappa)-S
if deltaS == 0:
realized_price = prev_state['pbar']
else:
realized_price = deltaR/deltaS
else:
realized_price = prev_state['pbar']
# print("PRICE pbar (from bondburn update) =", realized_price)
return 'pbar', realized_price
def update_I_bondburn(params, substep, state_history, prev_state, policy_input):
# params = params[0]
R = prev_state['reserve']
C = params['C']
alpha = prev_state['alpha']
deltaR = policy_input['amt_to_bond']
I = (R + deltaR) + (C*alpha)
#print("C =", C, "alpha = ", alpha, "R = ", R, "deltaR = ", deltaR)
#print("I (from bondburn) =", I)
# print("--------------------------------------")
return 'invariant_I', I
| [
"andrewtaylorclark@gmail.com"
] | andrewtaylorclark@gmail.com |
3be52c23ed541d99741e87747c2063b628607ae5 | 9c1ec6c3a826a40f9911894b63af6e265b70792c | /src/EmailClient.py | d34c7782fba23f512be609273cd7e8f0c391d5e9 | [] | no_license | Duskamo/beachhouse_email | c84c3907791fcc792ec86a5ebecffcd43aedcf73 | a4d3b30691d9b8c5cf8ae0e606496ec1c11353fd | refs/heads/master | 2020-07-05T01:14:03.636340 | 2020-03-15T16:17:26 | 2020-03-15T16:17:26 | 202,480,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py |
import smtplib, ssl
from .Data import Data
class EmailClient:
def __init__(self):
self.port = 465
self.smtp_server = "smtp.gmail.com"
self.dlandryClientEmail = Data.clientEmail
self.dlandryClientPassword = Data.clientPassword
self.dlandryServerEmail = Data.serverEmail
def addUserEmail(self, userEmail):
self.userEmail = userEmail
def addUserName(self, userName):
self.userName = userName
def addSubject(self, subject):
self.subject = subject
def addMessage(self, message):
emailSubject = "Subject: {}".format(self.subject)
emailMessage = "\"{}\"".format(self.userName) + " with email address: " + "\"{}\"".format(self.userEmail) + " sent you the below message. \n\n" + message
self.message = emailSubject + "\n\n" + emailMessage
def send(self):
context = ssl.create_default_context()
with smtplib.SMTP_SSL(self.smtp_server, self.port, context=context) as server:
server.login(self.dlandryClientEmail, self.dlandryClientPassword)
server.sendmail(self.dlandryClientEmail, self.dlandryServerEmail, self.message)
| [
"dyanownz@yahoo.com"
] | dyanownz@yahoo.com |
d58b48a3b4056d05eabee5719a20f32556470175 | 497ad4c998a60f1aacab0dfccd5c034b9f60c11b | /Agentes/main.py | e108db6c21b55e8f554cb02466300a208d0ca15a | [] | no_license | LazardStrife/agents | b19bd1947dbff67cb36480d3b38254c22bc5a31b | 74ed19f646aac5d4e33a0d6c5bc0eb9abed29f60 | refs/heads/main | 2023-01-19T09:15:45.943878 | 2020-12-01T05:15:08 | 2020-12-01T05:15:08 | 317,379,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,060 | py | from simulation_logic import Initial_Scenary
import copy as cpy
def run_simulations_with_robot_type_A():
scenaries = [Initial_Scenary(5, 5, 40, 15, 4, 50),
Initial_Scenary(7, 9, 30, 10, 6, 100),
Initial_Scenary(8, 10, 25, 30, 8, 150),
Initial_Scenary(11, 11, 30, 15, 10, 200),
Initial_Scenary(13, 7, 37, 25, 9, 300),
Initial_Scenary(19, 16, 30, 15, 12, 400),
Initial_Scenary(15, 18, 38, 20, 9, 600),
Initial_Scenary(21, 19, 24, 35, 11, 500),
Initial_Scenary(12, 26, 50, 20, 12, 600),
Initial_Scenary(27, 20, 40, 36, 19, 700)]
for i in range(0, len(scenaries)):
s = scenaries[i]
win_amount_A = 0
lose_amount_A = 0
dirtiness_percent_A = 0
win_amount_B = 0
lose_amount_B = 0
dirtiness_percent_B = 0
for j in range(0, 30):
env_A = s.generate_environment()
env_B = env_A.copy_with_alternate_model()
result = ""
while True:
result = env_A.perform_turn()
if result != "Nothing happened":
break
if result == "Kids in cribs and floor cleaned":
win_amount_A += 1
else:
lose_amount_A += 1
dirtiness_percent_A += env_A.dirtyness_percentage()
result = ""
while True:
result = env_B.perform_turn()
if result != "Nothing happened":
break
if result == "Kids in cribs and floor cleaned":
win_amount_B += 1
else:
lose_amount_B += 1
dirtiness_percent_B += env_B.dirtyness_percentage()
dirtiness_percent_A /= 30
dirtiness_percent_B /= 30
print("Scenary #" + str(i + 1) + ":")
print("Model A win amount: " + str(win_amount_A))
print("Model A lose amount: " + str(lose_amount_A))
print("Model A dirtyness mean: " + str(dirtiness_percent_A))
print("Model B win amount: " + str(win_amount_B))
print("Model B lose amount: " + str(lose_amount_B))
print("Model B dirtyness mean: " + str(dirtiness_percent_B))
print("")
def print_board(environment):
for i in range(0, len(environment.board)):
row_info = ""
for j in range(0, len(environment.board[0])):
if environment.board[i][j] == -1:
row_info += str(environment.board[i][j]) + " "
else:
if environment.board[i][j] != 1 and environment.robot.row == i and environment.robot.column == j:
row_info += "-" + str(environment.board[i][j]) + " "
else:
row_info += " " + str(environment.board[i][j]) + " "
print(row_info)
def main():
run_simulations_with_robot_type_A()
if __name__ == "__main__":
main() | [
"lazardstrife@gmail.com"
] | lazardstrife@gmail.com |
a98df5dcac6f3e85ccedc80dfed62a64df0ae5a2 | 0deb8c7dcc24b2901b4dc294faf676eb95f4072a | /source/cronjobs.py | fc27609b85f6b00271e693ef2f21f9ab85812b92 | [
"MIT"
] | permissive | Mikumiku747/Student-Planner-Online-GAE | a295a236bf75089b6237288029eecaf07fe5f415 | e923c32715476323324b52f9f2d29cf9ecabc90f | refs/heads/master | 2021-01-22T03:14:04.211192 | 2014-11-02T05:47:32 | 2014-11-02T05:47:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | #Cronjobs
#Handles automated scheduled events, such as the deletion of past events and switching between weeks a and b.
from google.appengine.api import users
from google.appengine.ext import ndb
import webapp2
import jinja2
from timetable import Timetable
class WeekSwitchingScript(webapp2.RequestHandler):
def get(self):
tables = Timetable.query().fetch()
for table in tables:
table.isweeka = not table.isweeka
table.put()
app = webapp2.WSGIApplication([
('/admin/weekswitch', WeekSwitchingScript)],
debug=True)
| [
"Mikumiku747@gmail.com"
] | Mikumiku747@gmail.com |
ecb5492cf6eb5cfe2f5ed4a178027f8f18d979ae | c80a1a5414f18ed039cd83e7f88d567f40375aad | /get_data.py | 82ca654dbbc170df3ddf31e05dc0cf995bd7aea7 | [] | no_license | jorbau/NetflixAR | c5e222326b89647d1cfc360c7890f66a2b855147 | 7a65a757dd00e0a6dead072824a94209a2a80ed4 | refs/heads/main | 2023-05-07T08:01:10.988178 | 2021-06-01T00:12:51 | 2021-06-01T00:12:51 | 372,163,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,740 | py | from justwatch import JustWatch
import json
just_watch = JustWatch(country="ES")
l = []
results_by_providers = just_watch.search_for_item(providers=['nfx'], page=0, content_types=['movie'], release_year_from = 2014)
for x in range(results_by_providers["total_pages"]):
results_by_providers = just_watch.search_for_item(providers=['nfx'], page=x, content_types=['movie'], release_year_from = 2014)
for i in results_by_providers["items"]:
dic = dict()
if "poster" in i:
dic["url_image"] = "https://images.justwatch.com"+i["poster"][:-9]+"s592"
for s in i["scoring"]:
if s["provider_type"] == 'imdb:score':
dic["score"] = s["value"]
break
dic["title"] = i["title"]
dic["obj_type"] = i["object_type"]
dic["id"] = i["id"]
l.append(dic)
results_by_providers = just_watch.search_for_item(providers=['nfx'], page=0, content_types=['movie'], release_year_until = 2013)
for x in range(results_by_providers["total_pages"]):
results_by_providers = just_watch.search_for_item(providers=['nfx'], page=x, content_types=['movie'], release_year_until = 2013)
for i in results_by_providers["items"]:
dic = dict()
if "poster" in i:
dic["url_image"] = "https://images.justwatch.com"+i["poster"][:-9]+"s592"
for s in i["scoring"]:
if s["provider_type"] == 'imdb:score':
dic["score"] = s["value"]
break
dic["title"] = i["title"]
dic["obj_type"] = i["object_type"]
dic["id"] = i["id"]
l.append(dic)
with open('Netflix_data.json', 'w') as json_file:
json.dump(l, json_file) | [
"noreply@github.com"
] | noreply@github.com |
e6ea4a04ec64beed34cd77b78d93ca77f9f6c4b0 | ae1976bc9db8134156027aeae1481b6ee6ae776d | /day1/Day1_PythonCode/day1_python_programming_16.py | ac7d85c91a936ca365a9765d452e9bee9ae27a73 | [] | no_license | curieuxjy/DS-for-PPM | d9f6f94c3c01e37b81a19b11e52d458aad102fb3 | 70f37e8cf72bbf6351ba8ee657218758e2bd182f | refs/heads/master | 2021-01-03T11:43:01.220817 | 2020-02-15T17:45:53 | 2020-02-15T17:45:53 | 240,068,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py |
# [실습 5] 주가 조회하기
import bs4
from urllib.request import urlopen
idx_stock = '004170'
naver_index = 'https://finance.naver.com/item/sise.nhn?code=' + idx_stock
source = urlopen(naver_index).read()
source = bs4.BeautifulSoup(source, 'lxml')
#print(source.prettify())
#curPrice1 = source.find_all('em', class_='no_up')
curPrice1 = source.find_all('em', class_='no_up')[0]
print(curPrice1)
curPrice2 = curPrice1.find_all('span')[0].text
print(curPrice2)
#
curPrice3 = int(curPrice2.replace(',',''))
print(curPrice3)
| [
"skyfiower9721@gmail.com"
] | skyfiower9721@gmail.com |
ef3fd60375149ee9caa3c39d0bb96692633d0fe4 | 4fbc275e56b70eaaa636d2ec43bd47bb26e107de | /tests/format/fistr/test_write_fistr.py | 9ddcd0c8226f6f7fb3fbf33d38a1514451a5048d | [
"Apache-2.0"
] | permissive | yas/femio | 1d0cce174a438fc3b51899a95f28445e076f03a1 | 1b9b07ebea04edcd39ded6c556e707bc4ad58a45 | refs/heads/master | 2023-06-09T03:13:11.420691 | 2021-07-01T09:47:35 | 2021-07-01T09:47:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,166 | py | from glob import glob
import os
from pathlib import Path
import subprocess
import shutil
import unittest
import numpy as np
from femio.fem_attribute import FEMAttribute
# from femio.fem_attributes import FEMAttributes
from femio.fem_elemental_attribute import FEMElementalAttribute
from femio.fem_data import FEMData
RUN_FISTR = True
class TestWriteFistr(unittest.TestCase):
def test_write_fistr_static(self):
fem_data = FEMData.read_files('fistr', [
'tests/data/fistr/thermal/hex.msh',
'tests/data/fistr/thermal/hex.cnt',
'tests/data/fistr/thermal/hex.res.0.1'])
write_file_name = 'tests/data/fistr/write_static/mesh'
if os.path.isfile(write_file_name + '.msh'):
os.remove(write_file_name + '.msh')
if os.path.isfile(write_file_name + '.cnt'):
os.remove(write_file_name + '.cnt')
fem_data.write(
'fistr', file_name=write_file_name, overwrite=True)
written_fem_data = FEMData.read_files('fistr', [
write_file_name + '.msh',
write_file_name + '.cnt'])
np.testing.assert_almost_equal(
written_fem_data.elemental_data['ORIENTATION'].data,
fem_data.elemental_data['ORIENTATION'].data)
np.testing.assert_array_equal(
list(fem_data.element_groups.keys()),
list(written_fem_data.element_groups.keys()))
for v1, v2 in zip(
list(fem_data.element_groups.values()),
list(written_fem_data.element_groups.values())):
np.testing.assert_array_equal(v1, v2)
if RUN_FISTR:
subprocess.check_call(
"fistr1", cwd=os.path.dirname(write_file_name), shell=True)
written_fem_data_with_res = FEMData.read_files('fistr', [
write_file_name + '.msh',
write_file_name + '.cnt',
write_file_name + '.res.0.1'])
np.testing.assert_almost_equal(
written_fem_data_with_res.nodal_data['DISPLACEMENT'].data,
fem_data.nodal_data['DISPLACEMENT'].data, decimal=5)
def test_write_fistr_static_id_not_from_1(self):
fem_data = FEMData.read_files('fistr', [
'tests/data/fistr/thermal_id_not_from_1/hex.msh',
'tests/data/fistr/thermal_id_not_from_1/hex.cnt',
'tests/data/fistr/thermal_id_not_from_1/hex.res.0.1'])
write_file_name = 'tests/data/fistr/write_static_id_not_from_1/mesh'
if os.path.isfile(write_file_name + '.msh'):
os.remove(write_file_name + '.msh')
if os.path.isfile(write_file_name + '.cnt'):
os.remove(write_file_name + '.cnt')
fem_data.write(
'fistr', file_name=write_file_name, overwrite=True)
written_fem_data = FEMData.read_files('fistr', [
write_file_name + '.msh',
write_file_name + '.cnt'])
np.testing.assert_almost_equal(
written_fem_data.elemental_data['ORIENTATION'].data,
fem_data.elemental_data['ORIENTATION'].data)
np.testing.assert_array_equal(
list(fem_data.element_groups.keys()),
list(written_fem_data.element_groups.keys()))
for v1, v2 in zip(
list(fem_data.element_groups.values()),
list(written_fem_data.element_groups.values())):
np.testing.assert_array_equal(v1, v2)
if RUN_FISTR:
subprocess.check_call(
"fistr1", cwd=os.path.dirname(write_file_name), shell=True)
written_fem_data_with_res = FEMData.read_files('fistr', [
write_file_name + '.msh',
write_file_name + '.cnt',
write_file_name + '.res.0.1'])
np.testing.assert_almost_equal(
written_fem_data_with_res.nodal_data['DISPLACEMENT'].data,
fem_data.nodal_data['DISPLACEMENT'].data, decimal=5)
def test_write_fistr_heat(self):
fem_data = FEMData.read_files('fistr', [
'tests/data/fistr/heat/hex.msh',
'tests/data/fistr/heat/hex.cnt',
'tests/data/fistr/heat/hex.res.0.100'])
write_file_name = 'tests/data/fistr/write_heat/mesh'
if os.path.isfile(write_file_name + '.msh'):
os.remove(write_file_name + '.msh')
if os.path.isfile(write_file_name + '.cnt'):
os.remove(write_file_name + '.cnt')
fem_data.write(
'fistr', file_name=write_file_name, overwrite=True)
written_fem_data = FEMData.read_files('fistr', [
write_file_name + '.msh',
write_file_name + '.cnt'])
np.testing.assert_almost_equal(
written_fem_data.nodal_data['INITIAL_TEMPERATURE'].data,
fem_data.nodal_data['INITIAL_TEMPERATURE'].data
)
if RUN_FISTR:
subprocess.check_call(
"fistr1", cwd=os.path.dirname(write_file_name), shell=True)
written_fem_data_with_res = FEMData.read_files('fistr', [
write_file_name + '.msh',
write_file_name + '.cnt',
write_file_name + '.res.0.100'])
np.testing.assert_almost_equal(
written_fem_data_with_res.nodal_data['TEMPERATURE'].data,
fem_data.nodal_data['TEMPERATURE'].data)
def test_write_fistr_overwrite_error(self):
fem_data = FEMData.read_files('fistr', [
'tests/data/fistr/heat/hex.msh',
'tests/data/fistr/heat/hex.cnt',
'tests/data/fistr/heat/hex.res.0.100'])
with self.assertRaises(ValueError):
fem_data.write(
'fistr', file_name='tests/data/fistr/heat/hex')
def test_write_fistr_heat_steady(self):
fem_data = FEMData.read_files('fistr', [
'tests/data/fistr/heat_steady/hex.msh',
'tests/data/fistr/heat_steady/hex.cnt',
'tests/data/fistr/heat_steady/hex.res.0.1'])
write_file_name = 'tests/data/fistr/write_heat_steady/mesh'
if os.path.isfile(write_file_name + '.msh'):
os.remove(write_file_name + '.msh')
if os.path.isfile(write_file_name + '.cnt'):
os.remove(write_file_name + '.cnt')
fem_data.write(
'fistr', file_name=write_file_name, overwrite=True)
written_fem_data = FEMData.read_files('fistr', [
write_file_name + '.msh',
write_file_name + '.cnt'])
np.testing.assert_almost_equal(
written_fem_data.constraints['fixtemp'].data,
fem_data.constraints['fixtemp'].data
)
if RUN_FISTR:
subprocess.check_call(
"fistr1", cwd=os.path.dirname(write_file_name), shell=True)
written_fem_data_with_res = FEMData.read_files('fistr', [
write_file_name + '.msh',
write_file_name + '.cnt',
write_file_name + '.res.0.1'])
np.testing.assert_almost_equal(
written_fem_data_with_res.nodal_data['TEMPERATURE'].data,
fem_data.nodal_data['TEMPERATURE'].data)
def test_write_fistr_cload(self):
fem_data = FEMData.read_files('fistr', [
'tests/data/fistr/cload/hex.msh',
'tests/data/fistr/cload/hex.cnt'])
write_file_name = 'tests/data/fistr/write_cload/mesh'
if os.path.isfile(write_file_name + '.msh'):
os.remove(write_file_name + '.msh')
if os.path.isfile(write_file_name + '.cnt'):
os.remove(write_file_name + '.cnt')
fem_data.write(
'fistr', file_name=write_file_name, overwrite=True)
written_fem_data = FEMData.read_files('fistr', [
write_file_name + '.msh',
write_file_name + '.cnt'])
np.testing.assert_almost_equal(
written_fem_data.constraints['cload'].data,
fem_data.constraints['cload'].data
)
# if RUN_FISTR:
# os.system(f"cd {os.path.dirname(write_file_name)} "
# + "&& fistr1 > /dev/null 2>&1")
# written_fem_data_with_res = FEMData.read_files('fistr', [
# write_file_name + '.msh',
# write_file_name + '.cnt',
# write_file_name + '.res.0.1'])
# np.testing.assert_almost_equal(
# written_fem_data_with_res.nodal_data['TEMPERATURE'].data,
# fem_data.nodal_data['TEMPERATURE'].data)
def test_write_fistr_cload_full(self):
fem_data = FEMData.read_files('fistr', [
'tests/data/fistr/cload/hex.msh'])
cload_data = np.random.rand(len(fem_data.nodes), 3)
fem_data.constraints.update_data(
fem_data.nodes.ids, {'cload': cload_data})
write_file_name = 'tests/data/fistr/write_cload_full/mesh'
if os.path.isfile(write_file_name + '.msh'):
os.remove(write_file_name + '.msh')
if os.path.isfile(write_file_name + '.cnt'):
os.remove(write_file_name + '.cnt')
fem_data.write(
'fistr', file_name=write_file_name, overwrite=True)
written_fem_data = FEMData.read_files('fistr', [
write_file_name + '.msh',
write_file_name + '.cnt'])
n = len(fem_data.nodes)
for dof in range(3):
np.testing.assert_almost_equal(
written_fem_data.constraints['cload'].data[
dof*n:(dof+1)*n, dof], cload_data[:, dof])
def test_write_fistr_static_overwrite(self):
fem_data = FEMData.read_files('fistr', [
'tests/data/fistr/thermal/hex.msh',
'tests/data/fistr/thermal/hex.cnt',
'tests/data/fistr/thermal/hex.res.0.1'])
write_file_name = 'tests/data/fistr/write_static_overwrite/mesh'
if os.path.isfile(write_file_name + '.msh'):
os.remove(write_file_name + '.msh')
if os.path.isfile(write_file_name + '.cnt'):
os.remove(write_file_name + '.cnt')
data = np.random.rand(*fem_data.elemental_data.get_attribute_data(
'lte').shape)
fem_data.elemental_data.overwrite('lte', data)
data = np.random.rand(*fem_data.elemental_data.get_attribute_data(
'modulus').shape)
fem_data.elemental_data.overwrite('modulus', data)
fem_data.write(
'fistr', file_name=write_file_name, overwrite=True)
written_fem_data = FEMData.read_files('fistr', [
write_file_name + '.msh',
write_file_name + '.cnt'])
np.testing.assert_almost_equal(
written_fem_data.elemental_data.get_attribute_data('lte'),
fem_data.elemental_data.get_attribute_data('lte'))
np.testing.assert_almost_equal(
written_fem_data.elemental_data.get_attribute_data('modulus'),
fem_data.elemental_data.get_attribute_data('modulus'))
def test_write_boundary(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/tet2_bnd', read_npy=False, save=False)
write_file_name = 'tests/data/fistr/write_tet2_bnd/mesh'
write_dir_name = os.path.dirname(write_file_name)
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.write(
'fistr', file_name=write_file_name, overwrite=True)
written_fem_data = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data.constraints['boundary'].data,
fem_data.constraints['boundary'].data)
np.testing.assert_array_equal(
written_fem_data.constraints['boundary'].ids,
fem_data.constraints['boundary'].ids)
def test_write_fistr_lte_full(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/thermal_full', read_npy=False,
save=False)
write_dir_name = 'tests/data/fistr/write_thermal_full'
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.write('fistr', write_dir_name + '/mesh')
written_fem_data = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data.elemental_data.get_attribute_data('lte_full'),
fem_data.elemental_data.get_attribute_data('lte_full'),
)
if RUN_FISTR:
subprocess.check_call("fistr1", cwd=write_dir_name, shell=True)
written_fem_data_with_res = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data_with_res.nodal_data['DISPLACEMENT'].data,
fem_data.nodal_data['DISPLACEMENT'].data, decimal=5)
def test_write_fistr_convert_lte_full_to_lte_local_1(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/thermal_full_easy', read_npy=False,
save=False)
write_dir_name = 'tests/data/fistr/write_thermal_convert_easy'
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.convert_lte_global2local()
np.testing.assert_almost_equal(
fem_data.elemental_data.get_attribute_data('lte')[0] * 1e7,
np.array([1.0, 2.0, 3.0])
)
fem_data.write('fistr', write_dir_name + '/mesh')
if RUN_FISTR:
subprocess.check_call("fistr1", cwd=write_dir_name, shell=True)
written_fem_data_with_res = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data_with_res.elemental_data.get_attribute_data(
'ElementalSTRAIN'),
fem_data.elemental_data.get_attribute_data(
'ElementalSTRAIN'), decimal=5)
def test_write_fistr_convert_lte_full_to_lte_local_2(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/thermal_full', read_npy=False,
save=False)
write_dir_name = 'tests/data/fistr/write_thermal_convert'
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.convert_lte_global2local()
fem_data.write('fistr', write_dir_name + '/mesh')
if RUN_FISTR:
subprocess.check_call("fistr1", cwd=write_dir_name, shell=True)
written_fem_data_with_res = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data_with_res.elemental_data.get_attribute_data(
'ElementalSTRAIN'),
fem_data.elemental_data.get_attribute_data(
'ElementalSTRAIN'), decimal=5)
def test_write_fistr_quad(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/quad', read_npy=False,
save=False)
write_dir_name = 'tests/data/fistr/write_quad'
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.write('fistr', write_dir_name + '/mesh')
if RUN_FISTR:
subprocess.check_call("fistr1", cwd=write_dir_name, shell=True)
written_fem_data_with_res = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data_with_res.nodal_data.get_attribute_data(
'DISPLACEMENT'),
fem_data.nodal_data.get_attribute_data(
'DISPLACEMENT'), decimal=5)
def test_write_fistr_no_solition_type(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/thermal', read_npy=False, save=False)
fem_data.settings.pop('solution_type')
write_dir_name = 'tests/data/fistr/write_static_wo_solution'
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.write(
'fistr', write_dir_name + '/mesh', overwrite=True)
written_fem_data = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data.elemental_data['ORIENTATION'].data,
fem_data.elemental_data['ORIENTATION'].data)
np.testing.assert_array_equal(
list(fem_data.element_groups.keys()),
list(written_fem_data.element_groups.keys()))
for v1, v2 in zip(
list(fem_data.element_groups.values()),
list(written_fem_data.element_groups.values())):
np.testing.assert_array_equal(v1, v2)
if RUN_FISTR:
subprocess.check_call("fistr1", cwd=write_dir_name, shell=True)
written_fem_data_with_res = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data_with_res.nodal_data['DISPLACEMENT'].data,
fem_data.nodal_data['DISPLACEMENT'].data, decimal=5)
def test_write_fistr_spring(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/spring', read_npy=False, save=False)
write_dir_name = 'tests/data/fistr/write_spring'
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.write(
'fistr', file_name=write_dir_name + '/mesh', overwrite=True)
written_fem_data = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data.constraints['spring'].data,
fem_data.constraints['spring'].data
)
def test_write_fistr_mixed_shell(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/mixture_shell',
read_npy=False, save=False)
write_dir_name = 'tests/data/fistr/write_mixture_shell'
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.write(
'fistr', write_dir_name + '/mesh', overwrite=True)
written_fem_data = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data.nodes.data, fem_data.nodes.data)
if RUN_FISTR:
subprocess.check_call("fistr1", cwd=write_dir_name, shell=True)
written_fem_data_with_res = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data_with_res.nodal_data['DISPLACEMENT'].data,
fem_data.nodal_data['DISPLACEMENT'].data, decimal=5)
def test_write_fistr_6dof_boundary(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/mixture_shell',
read_npy=False, save=False)
disp = np.random.rand(3, 6)
fem_data.constraints.pop('boundary')
fem_data.constraints.update_data(
np.array([1, 2, 3]), {'boundary': disp})
write_dir_name = 'tests/data/fistr/write_6dof_boundary'
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.write(
'fistr', write_dir_name + '/mesh', overwrite=True)
written_fem_data = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data.nodes.data, fem_data.nodes.data)
if RUN_FISTR:
subprocess.check_call("fistr1", cwd=write_dir_name, shell=True)
written_fem_data_with_res = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data_with_res.nodal_data['DISPLACEMENT'].data[:3],
disp, decimal=5)
def test_write_fistr_mixed_solid(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/mixture_solid',
read_npy=False, save=False)
write_dir_name = 'tests/data/fistr/write_mixture_solid'
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.write(
'fistr', write_dir_name + '/mesh', overwrite=True)
written_fem_data = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data.nodes.data, fem_data.nodes.data)
if RUN_FISTR:
subprocess.check_call("fistr1", cwd=write_dir_name, shell=True)
written_fem_data_with_res = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data_with_res.nodal_data['DISPLACEMENT'].data,
fem_data.nodal_data['DISPLACEMENT'].data, decimal=5)
def test_write_spring_from_array(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/tet_3', read_npy=False, save=False)
fem_data.constraints = {
'spring': FEMAttribute(
'spring',
fem_data.elements.data[0, :3], np.ones((3, 3)) * 1e-6)}
write_dir_name = Path('tests/data/fistr/write_spring_from_array')
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.write(
'fistr', file_name=write_dir_name / 'mesh')
written_fem_data = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
original_constraints_data = fem_data.constraints['spring'].data
written_constraints_data = written_fem_data.constraints['spring'].data
np.testing.assert_almost_equal(
original_constraints_data[~np.isnan(original_constraints_data)],
written_constraints_data[~np.isnan(written_constraints_data)])
if RUN_FISTR:
subprocess.check_call(
"fistr1", cwd=write_dir_name, shell=True)
vis_files = glob(str(write_dir_name / '*.inp'))
self.assertTrue(len(vis_files) > 0)
def test_write_fistr_from_npy(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/npy/mixture_solid',
read_npy=True, save=False)
write_dir_name = 'tests/data/fistr/write_from_npy'
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.write(
'fistr', write_dir_name + '/mesh', overwrite=True)
written_fem_data = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data.nodes.data, fem_data.nodes.data)
if RUN_FISTR:
subprocess.check_call("fistr1", cwd=write_dir_name, shell=True)
written_fem_data_with_res = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data_with_res.nodal_data['DISPLACEMENT'].data,
fem_data.nodal_data['DISPLACEMENT'].data, decimal=5)
def test_write_fistr_thermal_wo_density(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/thermal_wo_density',
read_npy=False, save=False)
write_dir_name = 'tests/data/fistr/write_thermal_wo_density'
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.write(
'fistr', file_name=os.path.join(write_dir_name, 'mesh'))
written_fem_data = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data.elemental_data.get_attribute_data('lte'),
fem_data.elemental_data.get_attribute_data('lte'))
if RUN_FISTR:
subprocess.check_call("fistr1", cwd=write_dir_name, shell=True)
written_fem_data_with_res = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data_with_res.nodal_data['DISPLACEMENT'].data,
fem_data.nodal_data['DISPLACEMENT'].data, decimal=5)
def test_write_fistr_thermal_wo_density_material_overwritten(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/thermal_wo_density',
read_npy=False, save=False)
write_dir_name = 'tests/data/fistr/write_thermal_wo_density_overwrite'
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.elemental_data.overwrite(
'Young_modulus',
fem_data.elemental_data.get_attribute_data('Young_modulus') + .1)
fem_data.write(
'fistr', file_name=os.path.join(write_dir_name, 'mesh'))
written_fem_data = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data.elemental_data.get_attribute_data('lte'),
fem_data.elemental_data.get_attribute_data('lte'))
if RUN_FISTR:
subprocess.check_call("fistr1", cwd=write_dir_name, shell=True)
written_fem_data_with_res = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data_with_res.nodal_data['DISPLACEMENT'].data,
fem_data.nodal_data['DISPLACEMENT'].data, decimal=5)
def test_write_fistr_heat_no_visual(self):
fem_data = FEMData.read_files('fistr', [
'tests/data/fistr/heat/hex.msh',
'tests/data/fistr/heat/hex.cnt',
'tests/data/fistr/heat/hex.res.0.100'])
fem_data.settings['write_visual'] = False
write_file_name = 'tests/data/fistr/write_heat_no_vis/mesh'
if os.path.isfile(write_file_name + '.msh'):
os.remove(write_file_name + '.msh')
if os.path.isfile(write_file_name + '.cnt'):
os.remove(write_file_name + '.cnt')
fem_data.write(
'fistr', file_name=write_file_name, overwrite=True)
if RUN_FISTR:
subprocess.check_call(
"fistr1", cwd=os.path.dirname(write_file_name), shell=True)
vis_files = glob(write_file_name + '*.inp')
self.assertTrue(len(vis_files) == 0)
def test_write_fistr_tet_tet2(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/tet_3', read_npy=False, save=False)
fem_data.settings['tet_tet2'] = True
write_dir_name = Path('tests/data/fistr/write_tet_tet2')
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.write(
'fistr', file_name=write_dir_name / 'mesh')
with open(write_dir_name / 'hecmw_ctrl.dat') as f:
lines = f.readlines()
self.assertTrue(
('!TET_TET2, ON\n' in lines) or ('!TET_TET2,ON\n' in lines))
if RUN_FISTR:
subprocess.check_call(
"fistr1", cwd=write_dir_name, shell=True)
vis_files = glob(str(write_dir_name / '*.inp'))
self.assertTrue(len(vis_files) > 0)
def test_write_fistr_overwrite_material(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/thermal_to_overwrite',
read_npy=False, save=False)
mean_pos = fem_data.convert_nodal2elemental(
fem_data.nodal_data.get_attribute_data('node'), calc_average=True)
new_lte_full = np.einsum(
'ij,i->ij',
fem_data.elemental_data.get_attribute_data('lte_full'),
mean_pos[:, 0] + mean_pos[:, 1])
fem_data.elemental_data.overwrite('lte_full', new_lte_full)
write_dir_name = Path('tests/data/fistr/write_overtewrite')
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.write(
'fistr', file_name=write_dir_name / 'mesh')
written_fem_data = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data.elemental_data.get_attribute_data(
'lte_full'), new_lte_full)
if RUN_FISTR:
subprocess.check_call(
"fistr1", cwd=write_dir_name, shell=True)
vis_files = glob(str(write_dir_name / '*.inp'))
self.assertTrue(len(vis_files) == 2)
def test_read_heat_nl_material(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/heat_nl', read_npy=False, save=False)
heat_res = FEMData.read_files(
'ucd', 'tests/data/fistr/heat_nl/hex_vis_psf.0100.inp')
write_dir_name = Path('tests/data/fistr/write_heat_nl')
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.write(
'fistr', file_name=write_dir_name / 'mesh')
written_fem_data = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data.materials['thermal_conductivity'].values[0],
fem_data.materials['thermal_conductivity'].values[0])
if RUN_FISTR:
subprocess.check_call(
"fistr1", cwd=write_dir_name, shell=True)
written_res_data = FEMData.read_files(
'ucd', write_dir_name / 'mesh_vis_psf.0100.inp')
np.testing.assert_almost_equal(
written_res_data.nodal_data.get_attribute_data('TEMPERATURE'),
heat_res.nodal_data.get_attribute_data('TEMPERATURE')
)
def test_read_heat_nl_tensor_material(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/heat_nl_tensor',
read_npy=False, save=False)
heat_res = FEMData.read_files(
'ucd', 'tests/data/fistr/heat_nl_tensor/hex_vis_psf.0100.inp')
write_dir_name = Path('tests/data/fistr/write_heat_nl_tensor')
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.write(
'fistr', file_name=write_dir_name / 'mesh')
written_fem_data = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data.materials['thermal_conductivity_full'].values[
0, 0],
fem_data.materials['thermal_conductivity_full'].values[0, 0])
if RUN_FISTR:
subprocess.check_call(
"fistr1", cwd=write_dir_name, shell=True)
written_res_data = FEMData.read_files(
'ucd', write_dir_name / 'mesh_vis_psf.0100.inp')
np.testing.assert_almost_equal(
written_res_data.nodal_data.get_attribute_data('TEMPERATURE'),
heat_res.nodal_data.get_attribute_data('TEMPERATURE')
)
def test_read_heat_nl_tensor_material_long_value(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/heat_nl_tensor',
read_npy=False, save=False)
write_dir_name = Path('tests/data/fistr/write_heat_nl_tensor_long')
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
raw_conductivity = np.array([
[1., 2., 3., .01, .02, .03, -1.],
[2., 4., 6., .02, .04, .06, 1.],
]) * 1e-3 * np.random.rand()
conductivity = np.array(
[[raw_conductivity, 0]], dtype=object)[:, [0]]
fem_data.materials.update_data(
'STEEL', {'thermal_conductivity_full': conductivity},
allow_overwrite=True)
fem_data.write('fistr', file_name=write_dir_name / 'mesh')
written_fem_data = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data.materials['thermal_conductivity_full'].values[
0, 0], conductivity[0, 0])
if RUN_FISTR:
subprocess.check_call(
"fistr1", cwd=write_dir_name, shell=True)
self.assertTrue(
(write_dir_name / 'mesh_vis_psf.0100.inp').exists())
def test_write_fistr_heat_static(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/heat_static', read_npy=False,
save=False)
write_dir_name = Path('tests/data/fistr/write_heat_static')
if write_dir_name.exists():
shutil.rmtree(write_dir_name)
fem_data.write('fistr', write_dir_name / 'mesh')
written_fem_data = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data.elemental_data.get_attribute_data('lte_full'),
fem_data.elemental_data.get_attribute_data('lte_full'),
)
if RUN_FISTR:
subprocess.check_call("fistr1", cwd=write_dir_name, shell=True)
written_fem_data_with_res = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data_with_res.nodal_data['DISPLACEMENT'].data,
fem_data.nodal_data['DISPLACEMENT'].data, decimal=5)
def test_write_spring_boundary(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/spring_boundary',
read_npy=False, save=False)
write_dir_name = 'tests/data/fistr/write_spring_boundary'
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.write(
'fistr', write_dir_name + '/mesh', overwrite=True)
written_fem_data = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data.nodes.data, fem_data.nodes.data)
if RUN_FISTR:
subprocess.check_call("fistr1", cwd=write_dir_name, shell=True)
written_fem_data_with_res = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data_with_res.nodal_data['DISPLACEMENT'].data,
fem_data.nodal_data['DISPLACEMENT'].data, decimal=5)
def test_write_static_with_hand_created_data(self):
write_fem_data = FEMData(
nodes=FEMAttribute(
'NODE',
ids=[1, 2, 3, 4, 5],
data=np.array([
[0., 0., 0.],
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.],
[0., 0., 1.],
])),
elements=FEMElementalAttribute(
'ELEMENT', {
'tet': FEMAttribute('TET', ids=[1], data=[[1, 2, 3, 4]]),
'spring': FEMAttribute('SPRING', ids=[2], data=[[4, 5]])}))
write_fem_data.settings['solution_type'] = 'STATIC'
write_fem_data.constraints['boundary'] = FEMAttribute(
'boundary', ids=[1, 2, 3, 5], data=np.array([
[0., 0., 0.],
[np.nan, 0., 0.],
[np.nan, np.nan, 0.],
[0., 0., .1],
]))
write_fem_data.element_groups.update({'E_SOLID': [1], 'E_LINE': [2]})
write_fem_data.materials.update_data(
'M_SOLID', {
'Young_modulus': np.array([[10.0]]),
'Poisson_ratio': np.array([[0.4]])})
write_fem_data.materials.update_data(
'M_LINE', {
'Young_modulus': np.array([[10000.0]]),
'Poisson_ratio': np.array([[0.45]])})
write_fem_data.sections.update_data(
'M_SOLID', {'TYPE': 'SOLID', 'EGRP': 'E_SOLID'})
write_fem_data.sections.update_data(
'M_LINE', {'TYPE': 'SOLID', 'EGRP': 'E_LINE'})
write_dir_name = Path('tests/data/fistr/write_hand_created')
if write_dir_name.exists():
shutil.rmtree(write_dir_name)
write_fem_data.write('fistr', write_dir_name / 'mesh')
if RUN_FISTR:
subprocess.check_call("fistr1", cwd=write_dir_name, shell=True)
written_fem_data_with_res = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data_with_res.nodal_data[
'DISPLACEMENT'].data[3, -1], .1, decimal=3)
def test_write_fistr_heat_cflux(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/heat_cflux',
read_npy=False, save=False)
write_dir_name = 'tests/data/fistr/write_heat_cflux'
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.write(
'fistr', file_name=os.path.join(write_dir_name, 'mesh'))
if RUN_FISTR:
subprocess.check_call("fistr1", cwd=write_dir_name, shell=True)
written_fem_data_with_res = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data_with_res.nodal_data['TEMPERATURE'].data,
fem_data.nodal_data['TEMPERATURE'].data, decimal=5)
def test_write_fistr_heat_pure_cflux(self):
fem_data = FEMData.read_directory(
'fistr', 'tests/data/fistr/heat_pure_cflux',
read_npy=False, save=False)
write_dir_name = 'tests/data/fistr/write_heat_pure_cflux'
if os.path.exists(write_dir_name):
shutil.rmtree(write_dir_name)
fem_data.write(
'fistr', file_name=os.path.join(write_dir_name, 'mesh'))
if RUN_FISTR:
subprocess.check_call("fistr1", cwd=write_dir_name, shell=True)
written_fem_data_with_res = FEMData.read_directory(
'fistr', write_dir_name, read_npy=False, save=False)
np.testing.assert_almost_equal(
written_fem_data_with_res.nodal_data['TEMPERATURE'].data,
fem_data.nodal_data['TEMPERATURE'].data, decimal=5)
| [
"yellowshippo@gmail.com"
] | yellowshippo@gmail.com |
6f3281175ab81b728476fb5171d77260cd8d394d | 73f5461ea52354ea8caa6e08a3989f833fc9d5d0 | /src/python/fsqio/pants/buildgen/jvm/map_third_party_jar_symbols.py | c581fd1cf759f63584ab20647a192c01cd433beb | [
"Apache-2.0"
] | permissive | OpenGeoscience/fsqio | 52b674b3e2d1742916fcec83bbb831ddbd58d1f2 | aaee25552b602712e8ca3d8b02e0d28e4262e53e | refs/heads/master | 2021-01-15T20:23:18.180635 | 2017-06-05T20:25:18 | 2017-06-05T20:25:18 | 66,481,281 | 3 | 0 | null | 2017-06-05T20:25:18 | 2016-08-24T16:36:46 | Scala | UTF-8 | Python | false | false | 4,428 | py | # coding=utf-8
# Copyright 2014 Foursquare Labs Inc. All Rights Reserved.
from __future__ import (
absolute_import,
division,
generators,
nested_scopes,
print_function,
unicode_literals,
with_statement,
)
from contextlib import closing
from itertools import chain
import json
import os
import re
from zipfile import ZipFile
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.invalidation.cache_manager import VersionedTargetSet
from pants.task.task import Task
from pants.util.dirutil import safe_mkdir
class MapThirdPartyJarSymbols(Task):
@classmethod
def product_types(cls):
return [
'third_party_jar_symbols',
]
@classmethod
def prepare(cls, options, round_manager):
super(MapThirdPartyJarSymbols, cls).prepare(options, round_manager)
# NOTE(mateo): This is a deprecated concept upstream - everything is in the classpath now. So it will take some
# fiddling to get the jar symbols for anyone not using pom-resolve.
round_manager.require_data('compile_classpath')
round_manager.require_data('java')
round_manager.require_data('scala')
CLASSFILE_RE = re.compile(r'(?P<path_parts>(?:\w+/)+)'
r'(?P<file_part>.*?)'
r'\.class')
CLASS_NAME_RE = re.compile(r'[a-zA-Z]\w*')
def fully_qualified_classes_from_jar(self, jar_abspath):
with closing(ZipFile(jar_abspath)) as dep_zip:
for qualified_file_name in dep_zip.namelist():
match = self.CLASSFILE_RE.match(qualified_file_name)
if match is not None:
file_part = match.groupdict()['file_part']
path_parts = match.groupdict()['path_parts']
path_parts = filter(None, path_parts.split('/'))
package = '.'.join(path_parts)
non_anon_file_part = file_part.split('$$')[0]
nested_classes = non_anon_file_part.split('$')
for i in range(len(nested_classes)):
if not self.CLASS_NAME_RE.match(nested_classes[i]):
break
nested_class_name = '.'.join(nested_classes[:i + 1])
fully_qualified_class = '.'.join([package, nested_class_name])
yield fully_qualified_class
def execute(self):
products = self.context.products
targets = self.context.targets(lambda t: isinstance(t, JarLibrary))
with self.invalidated(targets, invalidate_dependents=False) as invalidation_check:
global_vts = VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
vts_workdir = os.path.join(self._workdir, global_vts.cache_key.hash)
vts_analysis_file = os.path.join(vts_workdir, 'buildgen_analysis.json')
if invalidation_check.invalid_vts or not os.path.exists(vts_analysis_file):
classpath = self.context.products.get_data('compile_classpath')
jar_entries = classpath.get_for_targets(targets)
all_jars = [jar for _, jar in jar_entries]
calculated_analysis = {}
calculated_analysis['hash'] = global_vts.cache_key.hash
calculated_analysis['jar_to_symbols_exported'] = {}
for jar_path in sorted(all_jars):
if os.path.splitext(jar_path)[1] != '.jar':
continue
fully_qualified_classes = list(set(self.fully_qualified_classes_from_jar(jar_path)))
calculated_analysis['jar_to_symbols_exported'][jar_path] = {
'fully_qualified_classes': fully_qualified_classes,
}
calculated_analysis_json = json.dumps(calculated_analysis)
safe_mkdir(vts_workdir)
with open(vts_analysis_file, 'wb') as f:
f.write(calculated_analysis_json)
if self.artifact_cache_writes_enabled():
self.update_artifact_cache([(global_vts, [vts_analysis_file])])
with open(vts_analysis_file, 'rb') as f:
analysis = json.loads(f.read())
third_party_jar_symbols = set(chain.from_iterable(
v['fully_qualified_classes'] for v in analysis['jar_to_symbols_exported'].values()
))
products.safe_create_data('third_party_jar_symbols', lambda: third_party_jar_symbols)
def check_artifact_cache_for(self, invalidation_check):
# Pom-resolve is an output dependent on the entire target set, and is not divisible
# by target. So we can only cache it keyed by the entire target set.
global_vts = VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
return [global_vts]
| [
"mateo@foursquare.com"
] | mateo@foursquare.com |
69f0731a3f3b46ba02834dac5649482141a78a69 | f0122f063e348c6279448e0768113c885e81a1e0 | /convert_output_smart-v2020.01.28.py | 388a7e377e2d3f2d7f5a62f62a0386b649c36268 | [] | no_license | jherfson/Convert-Output-Smart | e0501ac69342533587e030c677d8acf69f6d27a5 | e93d2d76ea7efcf468bed95159bcfc35b3c5eee1 | refs/heads/master | 2022-06-14T07:20:48.990781 | 2020-05-10T06:22:09 | 2020-05-10T06:22:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py | # Change log
# v2020.01.28
import numpy as np
var = 1
while var == 1:
name = input("Digite o nome do arquivo: ")
try:
with open(name, 'r')as row:
characters = row.read()
separator = characters[309]
if separator != ';':
delimiters = ','
else:
characters = characters.replace(',', '.')
with open(name, "w") as row:
row.write(characters)
delimiters = ';'
var = 0
except (IOError, OSError, NameError) as err:
print(err, 'arquivo não encontrado!. Digite novamente')
var = 1
all_data = np.loadtxt(name, delimiter=delimiters,
skiprows=4, usecols=(8, 4, 10, 11))
magnitude_and_phase_are_given = characters.find(
'Impedance Magnitude (Ohms)') != -1
current_index = 0
current_temperature = 0
vector = []
for index in range(len(all_data)):
temperature = all_data[index][0]
if temperature != current_temperature:
current_temperature = temperature
vector.append(index)
vector.append(len(all_data) - 1)
for i in range(len(vector) - 2):
matriz = all_data[vector[i]: vector[i+1], 1:4]
if magnitude_and_phase_are_given:
z_re = matriz[:, 1]*np.cos(np.radians(matriz[:, 2]))
z_im = matriz[:, 1]*np.sin(np.radians(matriz[:, 2]))
matriz[:, 1] = z_re
matriz[:, -1] = z_im
#print(all_data[vector[i]][0])
np.savetxt(str(all_data[vector[i]][0])+'.txt', matriz, delimiter=' ')
else:
np.savetxt(str(all_data[vector[i]][0])+'.txt', matriz, delimiter=' ')
| [
"noreply@github.com"
] | noreply@github.com |
bf4d47352d1dfddd94469e606117a7c21721ac05 | aa38ec617026e708c9eb0ceb63f899aa39e71918 | /estpeople/admin.py | 5b2d30a786cdf4803d4cda13d31bfceab4f3a2b7 | [] | no_license | segremelnikov/rep-test | 1cd601f87b7b9660d1d54a101ad3a85567d8f5da | a84a6b9db95b9fa0f8200d5935ad4a72a6340146 | refs/heads/main | 2023-08-31T21:58:48.709309 | 2021-10-16T01:23:48 | 2021-10-16T01:23:48 | 417,662,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | from django.contrib import admin
from .models import Page, Estimation
# Register your models here.
class PageAdmin(admin.ModelAdmin):
pass
admin.site.register(Page, PageAdmin)
class EstimationAdmin(admin.ModelAdmin):
pass
admin.site.register(Estimation, EstimationAdmin) | [
"sergemelnikov86@gmail.com"
] | sergemelnikov86@gmail.com |
95278a678b61e8131be98bc6b397defcfe8925f1 | 49a884c88b040414665ab6484bf530427e31f0d7 | /venv/Scripts/pip3.7-script.py | 8941e18fe6b90f189bb924061500f88b9c91da40 | [] | no_license | JojoPalambas/TSStatMaker | 1835d28b6eddc109c5ef17955739c92b32d91be1 | 94ed5aaeade092bc2caee6f23b5719575ec0d305 | refs/heads/master | 2022-03-28T10:32:39.846189 | 2020-01-13T09:46:12 | 2020-01-13T09:46:12 | null | 0 | 0 | null | null | null | null | ISO-8859-1 | Python | false | false | 420 | py | #!E:\Données\Programmes\Django\TSStatMaker\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.3','console_scripts','pip3.7'
__requires__ = 'pip==9.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.3', 'console_scripts', 'pip3.7')()
)
| [
"depott_g@epita.fr"
] | depott_g@epita.fr |
70477943b2e12cf9034eb2a84630f09be1aa398e | cd771773bf6b8d966a4d831d89e6b405701c2948 | /backend/urls.py | 072b5188d835664de0bd923c48fb91c888dd35a5 | [] | no_license | cti21/CQCRM22 | f4160ee27e52ccc1ddd2771b7cd6674574314b1b | 53b30ea1a0c04f97d29a34333df8875b06ab5cb3 | refs/heads/master | 2022-02-28T20:23:20.140170 | 2019-10-13T08:43:34 | 2019-10-13T08:43:34 | 214,786,456 | 0 | 2 | null | 2022-01-15T05:28:56 | 2019-10-13T08:33:31 | Vue | UTF-8 | Python | false | false | 3,435 | py | from django.conf.urls import url
from backend.views import *
# backend url 配置
urlpatterns = [
url(r'^info$',info,name='info'),
url(r'^logout$',logout,name='logout'),
url(r'^getTcTreatHistroryData/',getTcTreatHistroryData,name='getTcTreatHistroryData'),
url(r'^getOrderSelectData/',getOrderSelectData,name='getOrderSelectData'),
url(r'^treat_histrorys/',treat_histrorys,name='treat_histrorys'),
url(r'^treat_subhistorys/',treat_subhistorys,name='treat_subhistorys'),
# url(r'^txDeviceBymonth/',countTxDeviceBymonth,name='txDeviceBymonth'),
# url(r'^operateByYear/',operateByYear,name='operateByYear'),
# url(r'^analyseBykind/',analyseBykind,name='analyseBykind'),
# url(r'^analyseFirstTx/',analyseFirstTx,name='analyseFirstTx'),
# url(r'^analysePTH/',analysePTH,name='analysePTH'),
# url(r'^analyseCaP/',analyseCaP,name='analyseCaP'),
# url(r'^AlarmLab/',AlarmLab,name='AlarmLab'),
# url(r'^analyseBfzBymonth/',analyseBfzBymonth,name='analyseBfzBymonth'),
# url(r'^analyseBloodPressure/',analyseBloodPressure,name='analyseBloodPressure'),
# url(r'^analyseByYuanfa/',analyseByYuanfa,name='analyseByYuanfa'),
# url(r'^analyseZhuangui/',analyseZhuangui,name='analyseZhuangui'),
# url(r'^analyseZhengzhuang/',analyseZhengzhuang,name='analyseZhengzhuang'),
# url(r'^analyseDrugByyear/',analyseDrugByyear,name='analyseDrugByyear'),
# url(r'^analyseMaterialByyear/',analyseMaterialByyear,name='analyseMaterialByyear'),
# url(r'^analysedrug/',analysedrug,name='analysedrug'),
# url(r'^analysedrugDetail/',analysedrugDetail,name='analysedrugDetail'),
# url(r'^analysematerial/',analysematerial,name='analysematerial'),
# url(r'^analysematerialDetail/',analysematerialDetail,name='analysematerialDetail'),
# url(r'^analyseDrugstock/',analyseDrugstock,name='analyseDrugstock'),
# url(r'^analyseMaterialstock/',analyseMaterialstock,name='analyseMaterialstock'),
# url(r'^analyseDeptCharge/',analyseDeptCharge,name='analyseDeptCharge'),
# url(r'^deptChargeBymonth/',DeptChargeBymonth,name='deptChargeBymonth'),
# url(r'^getLabreason/',getLabreason,name='getLabreason'),
# url(r'^getLabItems/',getLabItems,name='getLabItems'),
# url(r'^analyseLabResultData/',analyseLabResultData,name='analyseLabResultData'),
# url(r'^getPatient_yz/',getPatient_yz,name='getPatient_yz'),
# url(r'^getTimeIntersect/',getTimeIntersect,name='getTimeIntersect'),
# url(r'^getXgtonglu/',getXgtonglu,name='getXgtonglu'),
# url(r'^getXgtongluType/',getXgtongluType,name='getXgtongluType'),
# url(r'^getMaterialType/',getMaterialType,name='getMaterialType'),
# url(r'^PatientsToRegister/',PatientsToRegister,name='PatientsToRegister'),
# url(r'^copyTotouxi_drug/',copyTotouxi_drug,name='copyTotouxi_drug'),
# url(r'^getNurses/',getNurses,name='getNurses'),
# url(r'^getProPostRecord/',getProPostRecord,name='getProPostRecord'),
# url(r'^getExpenseRecord/',getExpenseRecord,name='getExpenseRecord'),
# url(r'^getDrugAlert/',getDrugAlert,name='getDrugAlert'),
# url(r'^getLab_Test_Items/',getLab_Test_Items,name='getLab_Test_Items'),
# url(r'^getExam_Items/',getExam_Items,name='getExam_Items'),
url(r'^getOrganization/',getOrganization,name='getOrganization'),
url(r'^getAllSelectData/',getAllSelectData,name='getAllSelectData'),
# url(r'^down_file/', down_file,name='down_file'),
] | [
"cti21@163.com"
] | cti21@163.com |
174ac64b4968531a363c2eb2da3343b168fc4a5d | b8a914a6a6e30c3b980f987b9ca650f7d50ad743 | /RoomFileParser.py | 73f4b5f942949542721494e1fc66ec3ae5fc8662 | [] | no_license | estiben/Rob3 | daf2b9af8a067cd5f7c95248026c096ecc985469 | 7d5fa1fb44f668991fe43cfd7e91c3dd6a29908c | refs/heads/master | 2021-01-01T19:16:06.359588 | 2012-04-19T18:48:33 | 2012-04-19T18:48:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | import xml.sax
class RoomSAXContentHandler(xml.sax.ContentHandler):
def __init__(self):
xml.sax.ContentHandler.__init__(self)
self.paramList = []
self.currentElement = ''
self.currentClass = ''
def startElement(self, name, attrs):
self.currentElement = name
if name == 'tile':
print('tile')
elif name == 'object':
self.currentClass = attrs.getValue('class')
def endElement(self, name):
if name == 'object':
eval(self.currentClass + '()')
self.paramList = []
def characters(self, content):
if self.currentElement == 'param':
self.paramList.append(content)
def loadRoom(name):
roomFilePath = os.path.join('data', 'rooms', name) +'.xml'
source = open(roomFilePath,'r')
xml.sax.parse(source, RoomSAXContentHandler())
source.close()
| [
"steven.douglas2@yahoo.com"
] | steven.douglas2@yahoo.com |
150efe4399f75590dbde7ec1c2bc8fa2d003fb42 | 00bcc077360eca2b4a807ae5b83fdddeb076bbfd | /comments/serializers.py | ff483acbedd1e150da6b8a1468164d5d70485b61 | [] | no_license | LucasCarrias/basic-web-api | 499416f2151749fd0c2afe15b4fcc56857bbbf86 | 13f664a9b561910013c72a338a371098acaf7481 | refs/heads/main | 2023-02-02T12:06:26.555455 | 2020-12-22T02:17:25 | 2020-12-22T02:17:25 | 322,755,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | from rest_framework import serializers
from .models import Comment
class CommentSerializer(serializers.ModelSerializer):
postId = serializers.CharField(source="post_id")
class Meta:
model = Comment
fields = ['id', 'name', 'email', 'body', 'postId'] | [
"lucascarrias@outlook.com"
] | lucascarrias@outlook.com |
5de082c8ad08281fbec4a9d451210e4fa47fd140 | 6d311039c0ce561c00f0b5dfd9305798baaeca8f | /Exercicios python - IF,ELIF,ELSE/calculadora.py | 3a214dbdb40b5401428eb9566822d09a26789a5a | [] | no_license | WwAzevedo/Python-Exercises | d91d14133beeb2917bbb07cfa69fff67d4048c73 | 454b1b10b542659a60ca3a5d06fa38b4a6638467 | refs/heads/master | 2022-12-18T07:16:45.978735 | 2020-09-17T01:53:37 | 2020-09-17T01:53:37 | 296,187,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | n1 = int(input("Insira o primeiro valor:"))
n2 = int(input("Insira o segundo valor:"))
op = input("Qual operação realizar?")
if op == "+":
soma = n1 + n2
print(soma)
elif op == "-":
menos = n1 - n2
print(menos)
elif op == "*":
mult = n1 * n2
print(mult)
elif op == "/":
div = n1 / n2
print(div)
else:
print("Insira + para soma, - para subtração, * para multiplicação, / para divisão")
| [
"wesley.wazevedo@fco.net.br"
] | wesley.wazevedo@fco.net.br |
057689d1fa8f8c16acf59a0f0e342efca11d8cde | cb9281a34c3c5a36d4b3a846fb6ff22ede12f2f6 | /annotate_communities.py | c792c346aaae78ae95b967b3522d7c87354ffd69 | [] | no_license | juliettapc/CalorieKing | 9cb9f35ae9b239d2284175b0802cf2c60dc79d1d | 5f80bffb65fe4644a81ae2ab0b1738861e028331 | refs/heads/master | 2022-02-10T07:52:24.133379 | 2022-02-08T01:25:18 | 2022-02-08T01:25:18 | 153,174,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,823 | py | import networkx as nx
from transform_labels_to_nx import transform_labels_to_nx
import sys, os
from numpy import *
import itertools
def annotate_communities(G, num_points, filename, communitylist, dbdate = '2010'):
'''
Created by Rufaro Mukogo on 2011-03-31.
Copyright (c) 2010 __Northwestern University__. All rights reserved.
This script takes a GML file and the number of points and reads the a dat file that contains the
list of lists for the communties and then annotates the GML file with a community attribute for
each node that belongs to a community, the communities are odered from the largest to the smallest
the identifies is "n_s" where n is the number of the communitiy (zero is the largest) and s is the size of the community
'''
for n in G.nodes(): # because the type of the labels or ids in some gml files is diff, and otherwise it gives me an error
G.node[n]['label']=str(G.node[n]['label'])
G.node[n]['id']=str(G.node[n]['id'])
if dbdate =="2010":
G = transform_labels_to_nx(G)
#open file with the list of communities
f = open(str(communitylist)).readlines()
else:
print "You need to generate a gml file that has only 2009 data"
sys.exit()
#extract list of communities should return a list of list
communities = [x.strip().split(";") for x in f]
# print communities,"\n"
communities = [x.strip().split(",") for x in communities[0]]
#print communities,"\n"
#sort communities
communities = sorted(communities, key=len, reverse=True)
#lisf of all the nodes that are in a community
com_nodes= itertools.chain(*communities)
#convert to integers to avoid key errors
com_nodes =map(int, list(com_nodes))
for n in G.nodes():
if n not in com_nodes:
G.node[n]["community"] = ""
#print n
ii = 0
for co in communities:
s = str(ii)+"_"+str(len(co))
#print "community_size", len(co), "s:",s
for n in co:
#add attribute to the main GML file
n=str(n)
G.node[n]["community"] = s
ii+=1
nx.write_gml(G,str(filename)+".gml")
return G
if __name__ =="__main__":
if len(sys.argv)>1:
communitylist = sys.argv[1]
else:
print "Enter the name of the list of communities"
if len(sys.argv)>2:
filename = sys.argv[2]
else:
print "Enter the name of the name of the .gml file"
num_points = 5
M = nx.read_gml(str(filename)+".gml")
for n in M.nodes():
M.node[n]["community"] = ""
H = annotate_communities(M,num_points, filename, communitylist)
| [
"julia@chem-eng.northwestern.edu"
] | julia@chem-eng.northwestern.edu |
29c701073c352ca514237ca759457082a6ccd2f6 | 69c161371df4a8e85cef1524f3b17703ffd3588c | /edq_shared.py | 5d3e56fa9071b1a4f49deb4f525d3f1e61078013 | [
"MIT"
] | permissive | r-zemblys/windowselect | 479038dbfa7f7ad05ac1586d2ee31da3fa2a63ff | 9c2e25058b9faa145ee8dcacf68afb4ec38e9604 | refs/heads/master | 2021-01-19T07:20:38.676416 | 2014-11-14T10:13:57 | 2014-11-14T10:13:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,441 | py | # -*- coding: utf-8 -*-
import os
import numpy as np
import numpy.ma as ma
import matplotlib.pylab as plt
plt.ion()
from constants import *
def nabs(file_path):
"""
Return a normalized absolute path using file_path.
:param file_path:
:return:
"""
return os.path.normcase(os.path.normpath(os.path.abspath(file_path)))
# get git rev info for current working dir git repo
def get_git_local_changed():
import subprocess
local_repo_status = subprocess.check_output(['git', 'status'])
return local_repo_status.find(
'nothing to commit (working directory clean)') == -1 and \
local_repo_status.find('branch is up-to-date') == -1
def get_git_revision_hash(branch_name='HEAD'):
import subprocess
return subprocess.check_output(['git', 'rev-parse', branch_name])
def get_git_revision_short_hash(branch_name='HEAD'):
import subprocess
return subprocess.check_output(['git', 'rev-parse', '--short', branch_name])
def getFullOutputFolderPath(out_folder):
output_folder_postfix = "rev_{0}".format(get_git_revision_short_hash().strip())
if get_git_local_changed():
output_folder_postfix = output_folder_postfix+"_UNSYNCED"
return nabs(os.path.join(out_folder, output_folder_postfix))
def save_as_txt(fname, data):
col_count = len(data.dtype.names)
format_str = "{}\t" * col_count
format_str = format_str[:-1] + "\n"
header='#'+'\t'.join(data.dtype.names)+'\n'
txtf = open(fname, 'w')
txtf.write(header)
for s in data.tolist():
txtf.write(format_str.format(*s))
txtf.close()
def parseTrackerMode(eyetracker_mode):
if eyetracker_mode == 'Binocular':
return ['left', 'right']
else:
return [eyetracker_mode.split(' ')[0].lower()]
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def add_field(a, descr):
"""Return a new array that is like "a", but has additional fields.
Arguments:
a -- a structured numpy array
descr -- a numpy type description of the new fields
The contents of "a" are copied over to the appropriate fields in
the new array, whereas the new fields are uninitialized. The
arguments are not modified.
>>> sa = numpy.array([(1, 'Foo'), (2, 'Bar')], \
dtype=[('id', int), ('name', 'S3')])
>>> sa.dtype.descr == numpy.dtype([('id', int), ('name', 'S3')])
True
>>> sb = add_field(sa, [('score', float)])
>>> sb.dtype.descr == numpy.dtype([('id', int), ('name', 'S3'), \
('score', float)])
True
>>> numpy.all(sa['id'] == sb['id'])
True
>>> numpy.all(sa['name'] == sb['name'])
True
"""
if a.dtype.fields is None:
raise ValueError, "`A' must be a structured numpy array"
b = np.zeros(a.shape, dtype=a.dtype.descr + descr)
for name in a.dtype.names:
b[name] = a[name]
return b
### VisualAngleCalc
"""
Copied code from iohub
(trying to keep dependencies low for this conversion script)
__author__ = 'Sol'
"""
arctan = np.arctan2
rad2deg = np.rad2deg
hypot = np.hypot
np_abs = np.abs
np_sqrt = np.sqrt
class VisualAngleCalc(object):
def __init__(self, display_size_mm, display_res_pix, eye_distance_mm=None):
"""
Used to store calibrated surface information and eye to screen distance
so that pixel positions can be converted to visual degree positions.
Note: The information for display_size_mm,display_res_pix, and default
eye_distance_mm could all be read automatically when opening a ioDataStore
file. This automation should be implemented in a future release.
"""
self._display_width = display_size_mm[0]
self._display_height = display_size_mm[1]
self._display_x_resolution = display_res_pix[0]
self._display_y_resolution = display_res_pix[1]
self._eye_distance_mm = eye_distance_mm
self.mmpp_x = self._display_width / self._display_x_resolution
self.mmpp_y = self._display_height / self._display_y_resolution
def pix2deg(self, pixel_x, pixel_y=None, eye_distance_mm=None):
"""
Stimulus positions (pixel_x,pixel_y) are defined in x and y pixel units,
with the origin (0,0) being at the **center** of the display, as to match
the PsychoPy pix unit coord type.
The pix2deg method is vectorized, meaning that is will perform the
pixel to angle calculations on all elements of the provided pixel
position numpy arrays in one numpy call.
The conversion process can use either a fixed eye to calibration
plane distance, or a numpy array of eye distances passed as
eye_distance_mm. In this case the eye distance array must be the same
length as pixel_x, pixel_y arrays.
"""
eye_dist_mm = self._eye_distance_mm
if eye_distance_mm is not None:
eye_dist_mm = eye_distance_mm
x_mm = self.mmpp_x * pixel_x
y_mm = self.mmpp_y * pixel_y
Ah = arctan(x_mm, hypot(eye_dist_mm, y_mm))
Av = arctan(y_mm, hypot(eye_dist_mm, x_mm))
return rad2deg(Ah), rad2deg(Av)
###
def initStim(data, n):
"""
Creates /one-row-per-target/ data matrix
@author: Raimondas Zemblys
@email: raimondas.zemblys@humlab.lu.se
"""
#Independent variables
INCLUDE_IVs = ['eyetracker_model',
'eyetracker_sampling_rate',
'eyetracker_mode',
'operator',
'subject_id',
'exp_date',
'trial_id',
'ROW_INDEX',
'dt',
'TRIAL_START',
'TRIAL_END',
'posx',
'posy',
'target_angle_x',
'target_angle_y',
]
stim_change_ind = np.where(np.hstack((1,np.diff(data['trial_id']))) == 1)
stim_change_count = len(stim_change_ind[0])
stim = np.array( np.ones(stim_change_count)*np.nan, dtype=stim_dtype)
for stim_key_IV in INCLUDE_IVs:
stim[stim_key_IV][:] = data[stim_key_IV][stim_change_ind]
stim['px2deg'] = getGeometry(data[0])
#Remove first and last stimuli
ind = (stim['trial_id']==0) | (stim['trial_id']==n+1)
return stim[~ind]
def detect_rollingWin(data, **args):
"""
Fills /one-row-per-target/ data matrix
@author: Raimondas Zemblys
@email: raimondas.zemblys@humlab.lu.se
"""
win_size = args['win_size']
win_type = args['win_type']
win_size_sample = np.int16(win_size*data['eyetracker_sampling_rate'][0])+1
window_skip = args['window_skip']
selection_algorithms = args['wsa']
target_count = args['target_count']
measures=dict()
stim_full=[]
for eye in parseTrackerMode(data['eyetracker_mode'][0]):
if win_type == 'sample':
measures.update(rolling_measures_sample(data, eye, 'gaze', win_size_sample))
measures.update(rolling_measures_sample(data, eye, 'angle', win_size_sample))
if win_type == 'time':
measures.update(rolling_measures_time(data, eye, 'gaze', win_size))
measures.update(rolling_measures_time(data, eye, 'angle', win_size))
for wsa in selection_algorithms:
stim = initStim(data, target_count)
stim['wsa'][:] = wsa
stim['win_size'][:] = win_size
stim['window_skip'][:] = window_skip
for eye in parseTrackerMode(data['eyetracker_mode'][0]):
for stim_ind, stim_row in enumerate(stim):
analysis_range = (data['time'] >= stim_row['TRIAL_START']+window_skip) \
& (data['time'] <= stim_row['TRIAL_START']+1-win_size)
#needs to be a number to identify starting sample of a window
analysis_range=np.squeeze(np.argwhere(analysis_range==True))
analysis_range_full= (data['time'] >= stim_row['TRIAL_START']) \
& (data['time'] <= stim_row['TRIAL_START']+1)
stim['total_sample_count'][stim_ind] = np.sum(analysis_range_full)
#invalid_sample_count should be the same for gaze and angle
stim['_'.join((eye, 'invalid_sample_count'))][stim_ind] = np.sum(np.isnan(data[eye+'_gaze_x'][analysis_range_full]) |
np.isnan(data[eye+'_gaze_y'][analysis_range_full])
)
for units in ['gaze', 'angle']:
measures_rangeRMS = measures['_'.join((eye, units, 'RMS'))][analysis_range]
measures_rangeACC = measures['_'.join((eye, units, 'ACC'))][analysis_range]
measures_rangeSTD = measures['_'.join((eye, units, 'STD'))][analysis_range]
if wsa == 'fiona':
measures_range = measures_rangeRMS #Fiona RMS
elif wsa == 'dixon1':
measures_range = measures_rangeACC*measures_rangeSTD #Dixons's measure No. 1
elif wsa == 'dixon2':
measures_range = measures_rangeACC+measures_rangeSTD #Dixons's measure No. 2
elif wsa == 'dixon3':
measures_range = measures_rangeACC**2+measures_rangeSTD**2 #Dixons's measure No. 3
elif wsa == 'jeff':
#Jeff's measure
std_thres = np.nanmin(measures_rangeSTD)*5
measures_rangeACC[measures_rangeSTD>std_thres] = np.nan
measures_range = measures_rangeACC
if np.sum(np.isfinite(measures_range)) > 0: #handle all-nan slice
if np.size(measures_range) == 1:#handle one sample slice
measures_range = np.array([measures_range])
analysis_range = np.array([analysis_range])
IND = analysis_range[np.nanargmin(measures_range)]
#save measures to stim
stim['_'.join((eye, units, 'ind'))][stim_ind] = IND
stim['_'.join((eye, units, 'window_onset'))][stim_ind] = data['time'][IND]-stim_row['TRIAL_START']
for key in filter(lambda x: '_'.join((eye, units)) in x, measures.keys()):
stim[key][stim_ind]=measures[key][IND]
stim_full.extend(stim.tolist())
return np.array(stim_full, dtype=stim_dtype)
def rolling_measures_sample(data, eye, units, win_size_sample):
"""
Calculates rolling window measures
@author: Raimondas Zemblys
@email: raimondas.zemblys@humlab.lu.se
"""
measures=dict()
#Data
rolling_data_x = rolling_window(data['_'.join((eye, units, 'x'))], win_size_sample)
rolling_data_y = rolling_window(data['_'.join((eye, units, 'y'))], win_size_sample)
#Position error
err_x = data['_'.join((eye, units, 'x'))] - data[stim_pos_mappings[units]+'x']
err_y = data['_'.join((eye, units, 'y'))] - data[stim_pos_mappings[units]+'y']
rolling_err_x = rolling_window(err_x, win_size_sample)
rolling_err_y = rolling_window(err_y, win_size_sample)
#Time
rolling_time = rolling_window(data['time'], win_size_sample)
measures['_'.join((eye, units, 'sample_count'))] = np.ones(len(rolling_time)) * win_size_sample
measures['_'.join((eye, units, 'actual_win_size'))] = rolling_time[:,-1]-rolling_time[:,0]
#RMS
isd = np.diff([data['_'.join((eye, units, 'x'))],
data['_'.join((eye, units, 'y'))]], axis=1).T
measures['_'.join((eye, units, 'RMS', 'x'))] = np.sqrt(np.mean(np.square(rolling_window(isd[:,0], win_size_sample-1)), 1))
measures['_'.join((eye, units, 'RMS', 'y'))] = np.sqrt(np.mean(np.square(rolling_window(isd[:,1], win_size_sample-1)), 1))
measures['_'.join((eye, units, 'RMS'))] = np.hypot(measures['_'.join((eye, units, 'RMS', 'x'))],
measures['_'.join((eye, units, 'RMS', 'y'))]
)
#RMS of PE
isd = np.diff([err_x, err_y], axis=1).T
measures['_'.join((eye, units, 'RMS_PE', 'x'))] = np.sqrt(np.mean(np.square(rolling_window(isd[:,0], win_size_sample-1)), 1))
measures['_'.join((eye, units, 'RMS_PE', 'y'))] = np.sqrt(np.mean(np.square(rolling_window(isd[:,1], win_size_sample-1)), 1))
measures['_'.join((eye, units, 'RMS_PE'))] = np.hypot(measures['_'.join((eye, units, 'RMS_PE', 'x'))],
measures['_'.join((eye, units, 'RMS_PE', 'y'))]
)
###
### STD
measures['_'.join((eye, units, 'STD', 'x'))] = np.std(rolling_data_x, axis=1)
measures['_'.join((eye, units, 'STD', 'y'))] = np.std(rolling_data_y, axis=1)
measures['_'.join((eye, units, 'STD'))] = np.hypot(measures['_'.join((eye, units, 'STD', 'x'))],
measures['_'.join((eye, units, 'STD', 'y'))]
)
#STD of PE
measures['_'.join((eye, units, 'STD_PE', 'x'))] = np.std(rolling_err_x, axis=1)
measures['_'.join((eye, units, 'STD_PE', 'y'))] = np.std(rolling_err_y, axis=1)
measures['_'.join((eye, units, 'STD_PE'))] = np.hypot(measures['_'.join((eye, units, 'STD_PE', 'x'))],
measures['_'.join((eye, units, 'STD_PE', 'y'))]
)
###
###ACC
measures['_'.join((eye, units, 'ACC', 'x'))] = np.median(rolling_err_x, axis=1)
measures['_'.join((eye, units, 'ACC', 'y'))] = np.median(rolling_err_y, axis=1)
measures['_'.join((eye, units, 'ACC'))] = np.hypot(measures['_'.join((eye, units, 'ACC', 'x'))],
measures['_'.join((eye, units, 'ACC', 'y'))]
)
#Absolute accuracy
measures['_'.join((eye, units, 'ACC_abs', 'x'))] = np.median(np.abs(rolling_err_x), axis=1)
measures['_'.join((eye, units, 'ACC_abs', 'y'))] = np.median(np.abs(rolling_err_y), axis=1)
measures['_'.join((eye, units, 'ACC_abs'))] = np.hypot(measures['_'.join((eye, units, 'ACC_abs', 'x'))],
measures['_'.join((eye, units, 'ACC_abs', 'y'))]
)
#Fix
measures['_'.join((eye, units, 'fix', 'x'))] = np.median(rolling_data_x, axis=1)
measures['_'.join((eye, units, 'fix', 'y'))] = np.median(rolling_data_y, axis=1)
###
return measures
def rolling_measures_time(data, eye, units, win_size):
"""
Calculates rolling window measures
@author: Raimondas Zemblys
@email: raimondas.zemblys@humlab.lu.se
"""
measures=dict()
fs = data['eyetracker_sampling_rate'][0]
win_size_sample = np.int16(win_size*fs)+1
#adjust window size to account for the uncertainty of a measurement
win_size+=1.0/fs/2
#get masks for windows based on time
temp_acc=np.diff(data['time'])
rolling_temp_acc_for_data=np.cumsum(np.insert(rolling_window(temp_acc,win_size_sample*2-1), 0, np.zeros(len(temp_acc)-(win_size_sample-1)*2), axis=1), axis=1)
rolling_temp_acc_for_isd=np.cumsum(rolling_window(temp_acc,win_size_sample*2-1), axis=1)
mask_data=ma.getmaskarray(ma.masked_greater(rolling_temp_acc_for_data, win_size))
mask_isd=ma.getmaskarray(ma.masked_greater(rolling_temp_acc_for_isd, win_size))
#Data
rolling_data_x = ma.array(rolling_window(data['_'.join((eye, units, 'x'))], win_size_sample*2),mask=mask_data)
rolling_data_y = ma.array(rolling_window(data['_'.join((eye, units, 'y'))], win_size_sample*2),mask=mask_data)
#Position error
err_x = data['_'.join((eye, units, 'x'))] - data[stim_pos_mappings[units]+'x']
err_y = data['_'.join((eye, units, 'y'))] - data[stim_pos_mappings[units]+'y']
rolling_err_x = ma.array(rolling_window(err_x, win_size_sample*2),mask=mask_data)
rolling_err_y = ma.array(rolling_window(err_y, win_size_sample*2),mask=mask_data)
#Time
rolling_time = ma.array(rolling_window(data['time'], win_size_sample*2),mask=mask_data)
measures['_'.join((eye, units, 'sample_count'))] = np.sum(mask_data, axis=1)
notmasked_edges=ma.notmasked_edges(rolling_time, axis=1)
start_times = ma.getdata(rolling_time[notmasked_edges[0][0],notmasked_edges[0][1]])
end_times = ma.getdata(rolling_time[notmasked_edges[1][0],notmasked_edges[1][1]])
measures['_'.join((eye, units, 'actual_win_size'))] = end_times-start_times
### RMS
isd = np.diff([data['_'.join((eye, units, 'x'))],
data['_'.join((eye, units, 'y'))]], axis=1).T
rolling_isd_x = ma.array(rolling_window(isd[:,0], win_size_sample*2-1),mask=mask_isd)
rolling_isd_y = ma.array(rolling_window(isd[:,1], win_size_sample*2-1),mask=mask_isd)
RMS=[]
for rms in [np.sqrt(np.mean(np.square(rolling_isd_x), 1)),
np.sqrt(np.mean(np.square(rolling_isd_y), 1)),
]:
rms_tmp = ma.getdata(rms)
mask = ma.getmask(rms)
rms_tmp[mask]=np.nan
RMS.append(rms_tmp)
measures['_'.join((eye, units, 'RMS', 'x'))] = RMS[0]
measures['_'.join((eye, units, 'RMS', 'y'))] = RMS[1]
measures['_'.join((eye, units, 'RMS'))] = np.hypot(RMS[0], RMS[1])
###
#RMS of PE
isd = np.diff([err_x, err_y], axis=1).T
rolling_isd_x = ma.array(rolling_window(isd[:,0], win_size_sample*2-1),mask=mask_isd)
rolling_isd_y = ma.array(rolling_window(isd[:,1], win_size_sample*2-1),mask=mask_isd)
RMS=[]
for rms in [np.sqrt(np.mean(np.square(rolling_isd_x), 1)),
np.sqrt(np.mean(np.square(rolling_isd_y), 1)),
]:
rms_tmp = ma.getdata(rms)
mask = ma.getmask(rms)
rms_tmp[mask]=np.nan
RMS.append(rms_tmp)
measures['_'.join((eye, units, 'RMS_PE', 'x'))] = RMS[0]
measures['_'.join((eye, units, 'RMS_PE', 'y'))] = RMS[1]
measures['_'.join((eye, units, 'RMS_PE'))] = np.hypot(RMS[0], RMS[1])
###
###STD
STD=[]
for std in [np.std(rolling_data_x, axis=1), np.std(rolling_data_y, axis=1)]:
std_tmp = ma.getdata(std)
mask = ma.getmask(std)
std_tmp[mask]=np.nan
STD.append(std_tmp)
measures['_'.join((eye, units, 'STD', 'x'))] = STD[0]
measures['_'.join((eye, units, 'STD', 'y'))] = STD[1]
measures['_'.join((eye, units, 'STD'))] = np.hypot(STD[0], STD[1])
#STD of PE
STD=[]
for std in [np.std(rolling_err_x, axis=1), np.std(rolling_err_y, axis=1)]:
std_tmp = ma.getdata(std)
mask = ma.getmask(std)
std_tmp[mask]=np.nan
STD.append(std_tmp)
measures['_'.join((eye, units, 'STD_PE', 'x'))] = STD[0]
measures['_'.join((eye, units, 'STD_PE', 'y'))] = STD[1]
measures['_'.join((eye, units, 'STD_PE'))] = np.hypot(STD[0], STD[1])
###ACC
ACC=[]
for acc in [np.median(rolling_err_x, axis=1), np.median(rolling_err_y, axis=1)]:
acc_tmp = ma.getdata(acc)
mask = ma.getmask(acc)
acc_tmp[mask]=np.nan
ACC.append(acc_tmp)
measures['_'.join((eye, units, 'ACC', 'x'))] = ACC[0]
measures['_'.join((eye, units, 'ACC', 'y'))] = ACC[1]
measures['_'.join((eye, units, 'ACC'))] = np.hypot(ACC[0], ACC[1])
#Absolute accuracy
ACC=[]
for acc in [np.median(np.abs(rolling_err_x), axis=1), np.median(np.abs(rolling_err_y), axis=1)]:
acc_tmp = ma.getdata(acc)
mask = ma.getmask(acc)
acc_tmp[mask]=np.nan
ACC.append(acc_tmp)
measures['_'.join((eye, units, 'ACC_abs', 'x'))] = ACC[0]
measures['_'.join((eye, units, 'ACC_abs', 'y'))] = ACC[1]
measures['_'.join((eye, units, 'ACC_abs'))] = np.hypot(ACC[0], ACC[1])
#Fix
FIX=[]
for fix in [np.median(rolling_data_x, axis=1), np.median(rolling_data_y, axis=1)]:
fix_tmp = ma.getdata(fix)
mask = ma.getmask(fix)
fix_tmp[mask]=np.nan
FIX.append(fix_tmp)
measures['_'.join((eye, units, 'fix', 'x'))] = FIX[0]
measures['_'.join((eye, units, 'fix', 'y'))] = FIX[1]
###
return measures
def getGeometry(data):
"""
Calculates pix2deg values, based on simple geometry
@author: Raimondas Zemblys
@email: raimondas.zemblys@humlab.lu.se
"""
return np.mean((1/(np.degrees(2*np.arctan(data['screen_width']/(2*data['eye_distance'])))/data['display_width_pix']),
1/(np.degrees(2*np.arctan(data['screen_height']/(2*data['eye_distance'])))/data['display_height_pix'])))
def filter_trackloss(data_wide, et_model=None, fill=np.nan):
"""
Trackloss filter. Replaces invalid samples with /fill/
@author: Raimondas Zemblys
@email: raimondas.zemblys@humlab.lu.se
"""
loss_count = dict()
data = np.copy(data_wide)
for eye in ['left', 'right']:
#TODO: Filter off-screen, off-pshysical limit samples
# eyetribe dynamic et_nan_value
trackloss = (data['_'.join((eye, 'gaze_x'))] == et_nan_values[et_model]['x']) | \
(data['_'.join((eye, 'gaze_y'))] == et_nan_values[et_model]['y']) | \
(np.isnan(data['_'.join((eye, 'gaze_x'))])) | \
(np.isnan(data['_'.join((eye, 'gaze_y'))]))
if et_model == 'dpi':
trackloss = np.bitwise_or(trackloss, data['status'] < 4.0)
data['_'.join((eye, 'gaze_x'))][trackloss] = fill
data['_'.join((eye, 'gaze_y'))][trackloss] = fill
data['_'.join((eye, 'angle_x'))][trackloss] = fill
data['_'.join((eye, 'angle_y'))][trackloss] = fill
loss_count[eye] = np.sum(trackloss)
if data['eyetracker_mode'][0] == 'Binocular':
loss_count['avg'] = np.mean((loss_count['right'], loss_count['left']))
else:
eye = parseTrackerMode(data['eyetracker_mode'][0])[0]
loss_count['avg'] = loss_count[eye]
return data, loss_count
def filter_offscreen(data_wide, limit=0, fill=np.nan):
"""
Off-screen data filter. Replaces invalid samples with /fill/
@author: Raimondas Zemblys
@email: raimondas.zemblys@humlab.lu.se
"""
data = np.copy(data_wide)
for _dir in ['x', 'y']:
interval = np.min(data['_'.join(('target_angle', _dir))])-limit, \
np.max(data['_'.join(('target_angle', _dir))])+limit
for eye in parseTrackerMode(data['eyetracker_mode'][0]):
mask = ma.getmask(ma.masked_outside(data['_'.join((eye, 'angle', _dir))],
interval[0], interval[1]))
data['_'.join((eye, 'angle', _dir))][mask]=fill
data['_'.join((eye, 'gaze', _dir))][mask]=fill
return data
def plot_data(data, measure='angle', title=None, ylim=None, fname=None, keep=False, stim=None):
"""
Plots gaze data
@author: Raimondas Zemblys
@email: raimondas.zemblys@humlab.lu.se
"""
fig = plt.figure()
plt.suptitle(title)
dir_label = dict()
dir_label['x'] = 'Horizontal position, deg'
dir_label['y'] = 'Vertical position, deg'
for subplot_ind, _dir in enumerate(['x', 'y']):
for eye in parseTrackerMode(data['eyetracker_mode'][0]):
plt.subplot(2,1,subplot_ind+1)
plt.plot(data['time'], data['_'.join((eye, measure, _dir))])
plt.plot(data['time'], data['_'.join(('target', measure, _dir))], 'k-')
plt.xlim(data['time'][0], data['time'][-1])
plt.ylabel(dir_label[_dir])
if ylim:
plt.ylim(ylim[0], ylim[1])
if type(stim) is np.ndarray:
for stim_row in stim:
s = stim_row['_'.join((eye, measure, 'ind'))]
e = s+stim_row['_'.join((eye, measure, 'sample_count'))]
r = np.arange(s, e, dtype=np.int32)
plt.plot(data['time'][r], data['_'.join((eye, measure, _dir))][r], 'r-', linewidth=3)
plt.xlabel('Time, s')
if fname:
plt.savefig(fname, dpi=fig.dpi*4)
if not(keep):
fig.clf()
plt.close(fig)
fig = None | [
"raimondas.zemblys@gmail.com"
] | raimondas.zemblys@gmail.com |
8eff51fa665518a50f44a8c513d5f865fc70e37a | f512437ea436cdf7667435f1bea661b8b7d4322c | /downloadfileV2.py | c150dbf3a11656a8545dfc1ba5dc842ab013a039 | [] | no_license | siyiwolf/python | 5a75c9d072aa94a05d46219f0551569fc182695e | 595099f6d4c183c2fc5da7399a443e37a6a8b94d | refs/heads/master | 2021-05-05T04:26:50.262683 | 2018-02-27T03:41:14 | 2018-02-27T03:41:14 | 118,591,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,061 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import urllib.request
import requests
import re
import os
#该类主要用于数据统计以及启动恢复等
class staticLoadNum():
def __init__(self, url, level_num):
self.url = url
self.level_num = level_num
self.list = list();
self.file_num = 0;
self.sum_num = 0;
self.file_failed_num = 0;
self.sum_failed_num = 0;
def set_file_num(self, file_num):
self.file_num = file_num
self.sum_num += file_num
def data_add(self, sub_data):
#此处可以增加类型判断
self.list.append(sub_data)
self.sum_num += sub_data.sum_num
def __str__(self):
static_info = 'The URL is:' + self.url + '\n' +\
'Level_num:' + str(self.level_num) + '\n' +\
'Num of Sons:' + str(len(self.list)) + '\n' +\
'File Num:' + str(self.file_num) + '\n' +\
'Sum File Num:' + str(self.sum_num)
return static_info
#下载类
class downloadfile():
ulr_set = set();
def __init__(self, url, max_level, form_str, father_dir, level_num = 0):
self.url = url
self.max_level = max_level
self.level_num = level_num
self.form_str = form_str
self.form_file_list = list()
self.sub_url_list = list()
self.file_num = 0;
self.localData = staticLoadNum(url, level_num) #打包数据
downloadfile.ulr_set.add(url);
#规划下载文件存储目录层级
os.chdir(father_dir)
load_dir = str(level_num) + '_load_' + form_str[1:]
if not os.path.exists(load_dir):
os.mkdir(load_dir)
os.chdir(os.path.join(os.getcwd(), load_dir))
self.dir_name = os.getcwd()
def get_local_data(self):
return self.localData
#网络链接基础方法
def connect_to_url(self):
try:
response = requests.get(self.url, timeout = 30)
response.raise_for_status()
except requests.RequestException as e:
print('Conneted failed')
return
except:
print('Socket failed!')
return
else:
print(response)
return response
#解决文件查找方法
def get_a_href(self, response):
if (response):
soup = BeautifulSoup(response.content, "html.parser",from_encoding="GBK")
a_list = self.parser_div_element(soup)
#a_list = soup.find_all('a')
for a_href in a_list:
#a_href 过滤条件
if (self.filter_a_condition(a_href.string) == False):
#print('No Match Condition!,We Pass')
continue
try:
href_url = a_href['href']
except:
print('a-label have not a vaule of ATTR href')
continue
if href_url in downloadfile.ulr_set:
continue
#匹配href是否是#
fig_str = r'#$'
pad_fig = re.compile(fig_str)
if(pad_fig.match(href_url)):
print('Match # Success!')
try:
str1 = a_href['onclick']
except:
print('a label have not a vaule of ATTR onclick')
continue
m = re.findall("'([^']+)'", str1)
href_url = m[0]
#print(a_href)
#print(href_url)
#文件格式匹配
str_form = r'.+' + str(self.form_str) + '$'
pad = re.compile(str_form)
#返回上一目录匹配
sub_dir = r'.*/'
pad_dir = re.compile(sub_dir)
if(pad.match(href_url)): #匹配文件,确定找到下载的文件
#print(debug_href_url)
if href_url not in self.form_file_list:
self.form_file_list.append(href_url)
elif (re.match(r'http', href_url)): #匹配子链接
#print('debug http')
if href_url not in self.sub_url_list:
self.sub_url_list.append(href_url)
elif (pad_dir.match(href_url)): #确定是否属于返回上一目录
print('debug sub_dir')
if(re.match(href_url.split('/')[-2],self.url.split('/')[-3])): #判断返回到原始的位置
print('continue')
print(href_url.split('/'))
print(self.url.split('/'))
continue
rHtml_str = self.remove_html()
temp_url = rHtml_str + href_url
#特殊化处理先
## temp_url = 'http://www.bzmfxz.com' + href_url
## print(temp_url)
## if temp_url not in self.sub_url_list:
## self.sub_url_list.append(temp_url)
#删除html后缀
def remove_html(self):
html_form = r'.+\.htm'
pad = re.compile(html_form)
end_index = 0
if(pad.match(self.url.split('/')[-1])):
end_index = end_index = len(self.url.split('/')[-1])
return self.url[:end_index]
def parser_div_element(self, soup):
a_list = list();
if (soup == None):
return a_list
div_list = soup.find_all('div')
for div_href in div_list:
#print(div_href)
if (self.filter_div_condition(div_href)):
a_temp_list = div_href.find_all('a');
#print(a_temp_list)
for a_href in a_temp_list:
if (a_href not in a_list):
a_list.append(a_href)
return a_list
def filter_div_condition(self, div_str):
if (div_str == None):
return False
if (self.level_num == 0):
try:
div_class = div_str['class']
except:
print('div-label have not value of ATTR class')
return False
if (re.match(r'.*pub_element', str(div_class))):
return True
else:
print('Match class Failed!')
return False
return True
## #Case One
## def filter_a_condition(self, a_href_str):
## if (a_href_str == None):
## return False
##
## if (self.level_num == 0):
## gjb_str = r'.*\r\nGJB'
## pad = re.compile(gjb_str)
## if (pad.match(a_href_str)):
## print('Match GJB Success!')
## return True
## else:
## return False
## elif (self.level_num == 1):
## load_str = r'.*进入下载页面'
## pad_load = re.compile(load_str)
## if (pad_load.match(a_href_str)):
## print ('Match load page Success!')
## return True
## else:
## return False
## elif (self.level_num == 2):
## load_str = '.*点击下载'
## if (re.match(load_str, a_href_str)):
## print ('Match LOAD Success!')
## return True
## else:
## return False
#Case Two
def filter_a_condition(self, a_href_str):
if (a_href_str == None):
return False
if (self.level_num == 1):
load_str = r'Download Publication'
pad_load = re.compile(load_str)
if (pad_load.match(a_href_str)):
#print ('Match load page Success!')
return True
else:
return False
return True
#文件下载
def get_dir_name(self):
## html_form = r'.+\.html$'
## pad = re.compile(html_form)
## if(pad.match(self.url.split('/')[-1])):
## dir_list = self.url.split('/')[3:-1]
## else:
## dir_list = self.url.split('/')[3:]
dirl = self.remove_html()
dir_list = self.url.split('/')[3:]
dir_name = str(self.level_num)
for dir_str in dir_list:
if (len(dir_name) + len(dir_str) > 64):
return dir_name;
dir_str = re.sub('[\/:*?"<>|]','-',dir_str)
dir_name = dir_name + '_' + dir_str
return dir_name
def getFile(self, url):
file_name = url.split('/')[-1]
#print(file_name)
try:
u = urllib.request.urlopen(url)
except urllib.error.HTTPError:
print(url, "url file not found")
return
except:
print('File Socket failed!')
return
#如果文件存在,则不进行再次写入
if os.path.exists(file_name):
print(file_name + 'existed, we passed')
return
block_sz = 8192
with open(file_name, 'wb') as f:
while True:
buffer = u.read(block_sz)
if buffer:
f.write(buffer)
else:
break
print ("Sucessful to download" + " " + file_name)
def loadFile(self):
#self.form_file_list.sort()
form_num = len(self.form_file_list)
self.localData.set_file_num(form_num)
print('The total Form num is', form_num)
if (form_num != 0):
dir_name = self.get_dir_name()
if not os.path.exists(dir_name):
os.mkdir(dir_name)
cur_dir = os.getcwd()
os.chdir(os.path.join(cur_dir, dir_name))
i = 0
for url in self.form_file_list:
if (re.match(r'http', url)):
url = url
elif(self.url[-5:-1] == '.htm'):
url = self.url[:-len(self.url.split('/')[:-1])] + url
else:
url = self.url + url
print(url)
self.getFile(url)
i = i + 1
rate = format(i/form_num, '.0%')
print('have load num is:', i, 'and rate:', rate)
os.chdir(cur_dir)
#解决性能问题---进程调度
def process_load_file(self):
response = self.connect_to_url()
self.get_a_href(response)
self.loadFile()
if self.level_num > self.max_level:
return
for sub_url in self.sub_url_list:
sub_load_class = downloadfile(sub_url, self.max_level, self.form_str, self.dir_name, self.level_num+1)
sub_load_class.process_load_file()
self.localData.data_add(sub_load_class.get_local_data())
print(sub_load_class);
def __str__(self):
info_str = 'LoadFile Infomation:\n' +\
'url:'+ self.url + '\n' +\
'level_num:' + str(self.level_num) + '\n' +\
'have load file:' + str(len(self.form_file_list)) + '\n' +\
'local data:' + str(self.localData)
return info_str
if __name__=='__main__':
d_ulr = input('Please input the pdf webSite:')
level_max = int(input('Please input the max level:'))
form_str = input('Please input the file form:')
it_downloadfile = downloadfile(d_ulr, level_max, form_str, os.getcwd())
print(os.getcwd())
it_downloadfile.process_load_file()
print(it_downloadfile)
| [
"392567441@qq.com"
] | 392567441@qq.com |
8f65ce4f1efe7352ca4a791c2c1bd22c36fb5af7 | 61459ef393c252cc52dc56d5ebafff7e055579aa | /cui-spider/chapter04-框架/quotetutorial/quotetutorial/pipelines.py | 97599a9df5d0431e15803b4bee4515b4767227e9 | [] | no_license | Yan199405/Python_note | 5e56824b6ec347ab8af4f04b5070bdc5e6685b80 | d8fd0a83da280f80e7a3e9c535787afa7722e140 | refs/heads/master | 2020-06-13T01:17:33.696802 | 2019-08-12T00:28:17 | 2019-08-12T00:28:17 | 194,485,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,320 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
from scrapy.exceptions import DropItem
class TextPipeline(object):
def __init__(self):
self.limit = 50
def process_item(self, item, spider):
if item['text']:
if len(item['text']) > self.limit:
item['text'] = item['text'][0:self.limit].rstrip() + '...'
return item
else:
return DropItem("Missing Text")
class MongoPipeline(object):
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler): # 此函数可以传setting的设置
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DB')
)
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def process_item(self, item, spider):
name = item.__class__.__name__
self.db[name].insert(dict(item))
return item
def close_spider(self, spider):
self.client.close()
| [
"17793217774@163.com"
] | 17793217774@163.com |
106da989a8dff66103d031dc2b5908e522088065 | c67708a84466dedddbf446e2053c72a58dcb7a4d | /composer/airflow_1_samples/hadoop_tutorial.py | 5e4541d79557b068a75c053bce5f7d9f0843ebb5 | [
"Apache-2.0"
] | permissive | Matthelonianxl/python-docs-samples | 205d0ebaabf7d6e500fc36950a890c030a2c8882 | 8540237bb81e0b6ff87fa405abdfadd2e164c14c | refs/heads/master | 2022-02-14T12:36:30.927438 | 2022-01-19T17:58:05 | 2022-01-19T17:58:05 | 232,271,126 | 1 | 0 | Apache-2.0 | 2022-02-04T20:11:25 | 2020-01-07T07:44:42 | Python | UTF-8 | Python | false | false | 4,474 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START composer_hadoop_tutorial_airflow_1]
"""Example Airflow DAG that creates a Cloud Dataproc cluster, runs the Hadoop
wordcount example, and deletes the cluster.
This DAG relies on three Airflow variables
https://airflow.apache.org/docs/apache-airflow/stable/concepts/variables.html
* gcp_project - Google Cloud Project to use for the Cloud Dataproc cluster.
* gce_zone - Google Compute Engine zone where Cloud Dataproc cluster should be
created.
* gcs_bucket - Google Cloud Storage bucket to use for result of Hadoop job.
See https://cloud.google.com/storage/docs/creating-buckets for creating a
bucket.
"""
import datetime
import os
from airflow import models
from airflow.contrib.operators import dataproc_operator
from airflow.utils import trigger_rule
# Output file for Cloud Dataproc job.
# If you are running Airflow in more than one time zone
# see https://airflow.apache.org/docs/apache-airflow/stable/timezone.html
# for best practices
output_file = os.path.join(
models.Variable.get('gcs_bucket'), 'wordcount',
datetime.datetime.now().strftime('%Y%m%d-%H%M%S')) + os.sep
# Path to Hadoop wordcount example available on every Dataproc cluster.
WORDCOUNT_JAR = (
'file:///usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
)
# Arguments to pass to Cloud Dataproc job.
input_file = 'gs://pub/shakespeare/rose.txt'
wordcount_args = ['wordcount', input_file, output_file]
yesterday = datetime.datetime.combine(
datetime.datetime.today() - datetime.timedelta(1),
datetime.datetime.min.time())
default_dag_args = {
# Setting start date as yesterday starts the DAG immediately when it is
# detected in the Cloud Storage bucket.
'start_date': yesterday,
# To email on failure or retry set 'email' arg to your email and enable
# emailing here.
'email_on_failure': False,
'email_on_retry': False,
# If a task fails, retry it once after waiting at least 5 minutes
'retries': 1,
'retry_delay': datetime.timedelta(minutes=5),
'project_id': models.Variable.get('gcp_project')
}
# [START composer_hadoop_schedule_airflow_1]
with models.DAG(
'composer_hadoop_tutorial',
# Continue to run DAG once per day
schedule_interval=datetime.timedelta(days=1),
default_args=default_dag_args) as dag:
# [END composer_hadoop_schedule_airflow_1]
# Create a Cloud Dataproc cluster.
create_dataproc_cluster = dataproc_operator.DataprocClusterCreateOperator(
task_id='create_dataproc_cluster',
# Give the cluster a unique name by appending the date scheduled.
# See https://airflow.apache.org/docs/apache-airflow/stable/macros-ref.html
cluster_name='composer-hadoop-tutorial-cluster-{{ ds_nodash }}',
num_workers=2,
zone=models.Variable.get('gce_zone'),
master_machine_type='n1-standard-2',
worker_machine_type='n1-standard-2')
# Run the Hadoop wordcount example installed on the Cloud Dataproc cluster
# master node.
run_dataproc_hadoop = dataproc_operator.DataProcHadoopOperator(
task_id='run_dataproc_hadoop',
main_jar=WORDCOUNT_JAR,
cluster_name='composer-hadoop-tutorial-cluster-{{ ds_nodash }}',
arguments=wordcount_args)
# Delete Cloud Dataproc cluster.
delete_dataproc_cluster = dataproc_operator.DataprocClusterDeleteOperator(
task_id='delete_dataproc_cluster',
cluster_name='composer-hadoop-tutorial-cluster-{{ ds_nodash }}',
# Setting trigger_rule to ALL_DONE causes the cluster to be deleted
# even if the Dataproc job fails.
trigger_rule=trigger_rule.TriggerRule.ALL_DONE)
# [START composer_hadoop_steps_airflow_1]
# Define DAG dependencies.
create_dataproc_cluster >> run_dataproc_hadoop >> delete_dataproc_cluster
# [END composer_hadoop_steps_airflow_1]
# [END composer_hadoop_tutorial_airflow_1]
| [
"noreply@github.com"
] | noreply@github.com |
8a900fcc1c9f2cb65f9dd2a6b7c15eef2898558d | 1b9bd441c500e79042c48570035071dc20bfaf44 | /sources/Content_Quality/mekhilta.py | 6ded5ff121376d5bb37ff8e30b43ebf4f016f14d | [] | no_license | Sefaria/Sefaria-Data | ad2d1d38442fd68943535ebf79e2603be1d15b2b | 25bf5a05bf52a344aae18075fba7d1d50eb0713a | refs/heads/master | 2023-09-05T00:08:17.502329 | 2023-08-29T08:53:40 | 2023-08-29T08:53:40 | 5,502,765 | 51 | 52 | null | 2023-08-29T11:42:31 | 2012-08-22T00:18:38 | null | UTF-8 | Python | false | false | 1,737 | py | from sources.functions import *
alt_toc = """Massekta dePesah / מסכתא דפסחא
Exodus 12:1–13:16
Massekta deVayehi Beshalach / מסכתא דויהי בשלח
Exodus 13:17-14:31
Massekta deShirah / מסכתא דשירה
Exodus 15:1-15:21
Massekta deVayassa / מסכתא דויסע
Exodus 15:22-17:7
Massekta deAmalek / מסכתא דעמלק
Exodus 17:8- 18:27
Massekta deBahodesh / מסכתא דבחודש
Exodus 19:1-20:26
Massekta deNezikin / מסכתא דנזיקין
Exodus 21:1-22:23
Massekta deKaspa / מסכתא דכספא
Exodus 22:24-23:19
Massekta deShabbeta / מסכתא דשבתא
Exodus 31:12-35:3"""
nodes = []
alt_toc = alt_toc.splitlines()
for r, row in enumerate(alt_toc):
if r % 2 == 0:
node = ArrayMapNode()
en, he = row.strip().split(" / ")
node.add_primary_titles(en, he)
node.depth = 0
node.refs = []
else:
node.wholeRef = row.strip().replace("Exodus", "Mekhilta d'Rabbi Yishmael")
node.validate()
nodes.append(node.serialize())
index = get_index_api("Mekhilta d'Rabbi Yishmael", server="https://germantalmud.cauldron.sefaria.org")
index["alt_structs"] = {"Parasha": {"nodes": nodes}}
#post_index(index, server="https://www.sefaria.org")
links = []
for sec_ref in library.get_index("Mekhilta d'Rabbi Yishmael").all_section_refs():
seg_ref = sec_ref.as_ranged_segment_ref().normal()
exodus_ref = sec_ref.normal().replace("Mekhilta d'Rabbi Yishmael", "Exodus")
print(exodus_ref)
print(seg_ref)
print("***")
links.append({"refs": [exodus_ref, seg_ref], "generated_by": "mekhilta_to_exodus", "auto": True, "type": "Commentary"})
post_link_in_steps(links, server="https://www.sefaria.org", step=100, sleep_amt=10) | [
"steve@sefaria.org"
] | steve@sefaria.org |
8a47069ad08ff4a25b593f7f933e7207a34c9c81 | e6b1ad137a9bd3d39ae7c61cb5c7f7956ce095b9 | /bruteforce/first_and_last.py | 254541adec5d55d00b4b5ecdb2ee1dce8ea5e268 | [] | no_license | jadenpadua/Data-Structures-and-Algorithms | d9ba8ece779a2d564a7d66fcbacc9fb7fa1f7205 | 838c29112fec4beb9d9cc3f54db00492b4a480b0 | refs/heads/master | 2021-07-17T13:10:52.029327 | 2020-07-08T02:00:14 | 2020-07-08T02:00:14 | 186,896,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | Write a function that returns the lexicographically first and lexicographically last rearrangements of a string. Output the results in the following manner:
first_and_last(string) ➞ [first, last]
Examples
first_and_last("marmite") ➞ ["aeimmrt", "trmmiea"]
first_and_last("bench") ➞ ["bcehn", "nhecb"]
first_and_last("scoop") ➞ ["coops", "spooc"]
def first_and_last(s):
s_list = []
output = []
for i in range(len(s)):
s_list.append(s[i])
sorted_list = sorted(s_list)
sorted_list_rev = []
for i in range(len(sorted_list)-1,-1,-1):
sorted_list_rev.append(sorted_list[i])
sorted_string = ''.join(sorted_list)
sorted_rev_string = ''.join(sorted_list_rev)
output.append(sorted_string)
output.append(sorted_rev_string)
return output
| [
"noreply@github.com"
] | noreply@github.com |
035e339aacf1489757af5dacb964d6d303478870 | 7fb650e65c59f9c379040ab5558704ee9be5959f | /assignments/assignment4/fifa.py | a62c15eb92dd40b070f4fba6261b7902eff6d9eb | [] | no_license | rocket-ron/MIDS-W205 | 83d245062ab6fc9be2b1267e0ebcf8e94aaec3b0 | c275e4ab841070e2d2e9fbe1db737ef0ce353f68 | refs/heads/master | 2022-02-02T14:06:45.793627 | 2015-11-23T17:17:33 | 2015-11-23T17:17:33 | 36,993,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,141 | py | import requests
from bs4 import BeautifulSoup
import time
import csv
query = '#FIFAWWC AND (#USA OR #CAN OR #MEX OR #CRC OR #COL OR #ECU OR #BRA OR #NGA OR #CMR OR ' \
'#CIV OR #GER OR #ESP OR #ENG OR #FRA OR #SUI OR #NED OR #SWE OR #NOR OR #JPN OR #KOR OR ' \
'#CHN OR #THA OR #AUS OR #NZL) lang:en since:2015-06-05 until:2015-07-06'
url = 'https://twitter.com/i/search/timeline'
payload = { 'q' : query,
'f':'tweets',
'src':'typd',
'lang':'en'
}
r = requests.get(url, payload)
tweetdata = r.json()
tweetSoup = BeautifulSoup(tweetdata['items_html'], 'html.parser')
first_tweet = None
last_tweet = ''
fieldnames = ['tweet_id', 'user_id', 'user_name','screen_name','text','time']
with open('WC2015.csv','w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, delimiter = '|')
writer.writeheader()
for item in tweetSoup.find_all("li", attrs = {"data-item-type":"tweet"}):
try:
tweet = {}
if not first_tweet:
first_tweet = item['data-item-id']
last_tweet = item['data-item-id']
tweet['tweet_id'] = item['data-item-id']
tweet['user_id'] = item.div['data-user-id']
tweet['user_name'] = item.div['data-name'].encode('ascii','ignore')
tweet['screen_name'] = item.div['data-screen-name'].encode('ascii','ignore')
tweet['text'] = item.find("div", attrs = {"class":"content"}).p.text.encode('ascii','ignore')
tweet['time'] = item.find("div", attrs = {"class":"stream-item-header"}).find("a", attrs={"class":"tweet-timestamp js-permalink js-nav js-tooltip"}).span['data-time']
writer.writerow(tweet)
except:
pass # if we get an error, skip this one and move on
page = 1
while True:
position = 'TWEET-{0}-{1}'.format(last_tweet,first_tweet)
payload = { 'q' : query,
'f':'tweets',
'src':'typd',
'lang':'en',
'max_position' : position
}
r = requests.get(url, payload)
if r.status_code == requests.codes.ok:
tweetdata = r.json()
tweetSoup = BeautifulSoup(tweetdata['items_html'], 'html.parser')
for item in tweetSoup.find_all("li", attrs = {"data-item-type":"tweet"}):
try:
tweet = {}
if not first_tweet:
first_tweet = item['data-item-id']
last_tweet = item['data-item-id']
tweet['tweet_id'] = item['data-item-id']
tweet['user_id'] = item.div['data-user-id']
tweet['user_name'] = item.div['data-name'].encode('ascii','ignore')
tweet['screen_name'] = item.div['data-screen-name'].encode('ascii','ignore')
tweet['text'] = item.find("div", attrs = {"class":"content"}).p.text.encode('ascii','ignore')
tweet['time'] = item.find("div", attrs = {"class":"stream-item-header"}).find("a", attrs={"class":"tweet-timestamp js-permalink js-nav js-tooltip"}).span['data-time']
writer.writerow(tweet)
except:
pass # skip and move on
else:
print "Recieved NOT OK status response {0} ... exiting".format(r.status_code)
break
print 'Page {0} processed... last tweet id {1}'.format(page, last_tweet)
page += 1
time.sleep(5)
| [
"ron.cordell@berkeley.edu"
] | ron.cordell@berkeley.edu |
bff401b4024d18170f711c74f11a60dbba29c93a | 370cbf1c31ae62bd17f96db5f8e68f705a2876cb | /facebook/settings.py | 26c583b2475041f29b7d2c4b2dac26bd0fca4669 | [] | no_license | shyamsundar1129official/facebook | f57172e9a745b8c736dbb7938142ad70711a757a | 69667bfeed351fcba1e5f0574119c3cfec8f1602 | refs/heads/master | 2023-05-05T16:27:04.701186 | 2019-10-29T07:05:20 | 2019-10-29T07:05:20 | 218,208,283 | 0 | 0 | null | 2023-04-21T20:39:24 | 2019-10-29T05:06:18 | Python | UTF-8 | Python | false | false | 3,133 | py | """
Django settings for facebook project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u_38x*s)b_pc9p_)o49r6gu9!8(d(bxgr%^3(b=5v_50hbro#*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["shyamsundar1129official.herokuapp.com"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'facebook.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'facebook.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"shyamsundar1129official@gmail.com"
] | shyamsundar1129official@gmail.com |
5d7cb01dad0c0b9d2f4d9ca5afd5a9e6c680224d | f8dbd8b0fb69bee5eed6560e7eac9625e568bc1f | /tests/test_callbacks.py | d3964c0200c0148b041965705e7f650d676dd68a | [
"MIT"
] | permissive | tesera/pygypsy | 4ab81d94a912eb10e3a5cc486343ac22a95ce48c | cae6ac808e53ebb2465ad28b4787c20e8a452bd0 | refs/heads/dev | 2021-05-01T10:30:57.500910 | 2017-02-03T18:48:00 | 2017-02-03T18:48:00 | 58,996,041 | 3 | 0 | null | 2017-02-01T00:40:27 | 2016-05-17T06:23:55 | Jupyter Notebook | UTF-8 | Python | false | false | 359 | py | import os
import pytest
from pygypsy.scripts.callbacks import _load_and_validate_config
@pytest.mark.skipif(os.getenv('GYPSY_BUCKET') is None,
reason="S3 tests are not configured locally")
def test_load_and_validate_s3_config(config_on_s3):
conf = _load_and_validate_config(None, None, config_on_s3)
assert isinstance(conf, dict)
| [
"jotham.apaloo@tesera.com"
] | jotham.apaloo@tesera.com |
a8e229e9ea24d1c73a3eca12ed966665e0e3d6b9 | a2a630d9fa403f22be92be4869bef21bd6b87544 | /example_2d.py | b051679e49456baac5a0fa49455863980e1be7b0 | [] | no_license | robert-richardson/GP-Derivative-Observations | 1cb51a923308ea72d52458de36a9aaa89607e7d0 | f8ef503d8d3fdee0bb142ca4056350985e15fe88 | refs/heads/master | 2021-07-05T08:16:08.537966 | 2017-09-30T16:45:18 | 2017-09-30T16:45:18 | 105,381,947 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,543 | py | #%% A simple 2d example
#%% Imports
import numpy as np
import matplotlib.pyplot as plt
import GPy
from GPy.util.multioutput import build_XY
from gp_derivative_observations import GPDerivativeObservations
#%%
plt.close('all')
#%% Options
deriv_dim = 1 # 0 or 1
#%%
seed=1
np.random.seed(seed=seed)
#%% Generate data
# sample inputs and outputs
plot_lims = [0,7]
nX = 20
X = np.random.uniform(0,7,(nX,2))
R = np.sqrt(np.sum(X**2, axis=1)).reshape(-1,1)
c1 = 1; c2 = 0.4
Y0 = c2*X[:,0,None] + np.sin(c1*X[:,1,None]) + np.random.randn(nX,1)*0.05 # y
Y12 = c1*np.cos(c1*X[:,1,None]) + np.random.randn(nX,1)*0.05 # dy_dx1
Y11 = c2 + np.random.randn(nX,1)*0.05 # dy_dx2
#%% Fit a simple GP model
ker = GPy.kern.ExpQuad(2)
m_simple = GPy.models.GPRegression(X,Y0,ker)
m_simple.optimize(max_f_eval = 1000)
print(m_simple)
ax = m_simple.plot()
ax.set_xlabel('x1')
ax.set_ylabel('x2')
#%% Fit derivative GP
index=[0,1,2]
m = GPDerivativeObservations(X_list=[X,X,X], Y_list=[Y0,Y11,Y12], index=index)
m.optimize('bfgs', max_iters=100)
print(m)
#%% 1D plot slices
slices = [0, 4]
fig, ax = plt.subplots(1, len(slices), figsize=(8,3))
nXs=200 # dense sampling for plotting
for i, y in zip(range(len(slices)), slices):
Xs = np.linspace(*plot_lims,nXs)
Xs = (np.concatenate((Xs,Xs))).reshape(-1,2)
Xs[:,1-deriv_dim] = slices[i]*np.ones_like(Xs[:,0])
Xs_, _, _ = build_XY([Xs,Xs,Xs],index=index)
_,id_u = np.unique(Xs_[:,-1,None],return_inverse=True)
output_index = (id_u.min() + id_u).reshape(Xs_[:,-1,None].shape)
# Truth
Ys_true = c2*Xs[:,0,None] + np.sin(c1*Xs[:,1,None])
ax[i].plot(Xs[:int(nXs/2),deriv_dim], Ys_true[:int(nXs/2),0], 'k-', lw= 0.7,
label='Truth')
ax[i].set_xlim(plot_lims)
# Simple GP
Ys, _ = m_simple.predict(Xs)
ax[i].plot(Xs[:int(nXs/2),deriv_dim], Ys[:int(nXs/2),0],'--',color='C0',
label='Pred (w/o derivs)')
# Derivative observations GP
Ys, _ = m.predict(Xs_, Y_metadata={'output_index':output_index})
ax[i].plot(Xs[:int(nXs/2),deriv_dim], Ys[:int(nXs/2),0],'--',color='C3',
label='Pred (with derivs)')
# Title, axis labels
ax[i].set_title('x1 = {}'.format(slices[i]))
ax[i].set_xlabel('x2')
ax[i].set_ylabel('f(x1,x2)')
ax[0].legend(loc='lower left')
fig.tight_layout()
| [
"noreply@github.com"
] | noreply@github.com |
20663f1e70d2a884a70b48748a48d95537d7616f | 74845e4f5aca34d2c5f2ec6cb1309653cda5da6e | /samples/batch_qtransform.py | d153d56b6aea8ae7b1f3b7016b04748ad65d5118 | [] | no_license | gw-detchar/Kozapy | 3e5fe53019e53c0f4c7e7fd43164c32d075f3740 | 82d31e18eca1658a35265f2ff830f1c9b81d302b | refs/heads/master | 2021-07-22T00:51:36.153126 | 2020-05-28T03:05:21 | 2020-05-28T03:05:21 | 177,755,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,292 | py | '''Make q-transform plot.
'''
__author__ = "Chihiro Kozakai"
import os
import matplotlib
matplotlib.use('Agg') # this line is required for the batch job before importing other matplotlib modules.
import subprocess
import glob
from gwpy.timeseries import TimeSeries
from gwpy.timeseries import TimeSeriesDict
from gwpy.segments import DataQualityFlag
from matplotlib import pylab as pl
from gwpy.detector import Channel
from matplotlib import cm
from mylib import mylib
from matplotlib import pylab as pl
pl.rcParams['font.size'] = 16
pl.rcParams['font.family'] = 'Verdana'
# argument processing
import argparse
parser = argparse.ArgumentParser(description='Make coherencegram.')
parser.add_argument('-o','--outdir',help='output directory.',default='/tmp')
parser.add_argument('-c','--channel',help='channel.',required=True)
parser.add_argument('-s','--gpsstart',help='GPS starting time.',required=True)
parser.add_argument('-e','--gpsend',help='GPS ending time.',required=True)
parser.add_argument('-t','--time',help='Plot time duration.',type=float,default=None)
parser.add_argument('-f','--fmin',help='frequency range.',default=8 )
parser.add_argument('-q','--q',help='Q range.',type=float, default=-1 )
parser.add_argument('-i','--index',help='It will be added to the output file name.',default='test')
parser.add_argument('-l','--lchannel',help='Make locked segment bar plot.',default='')
parser.add_argument('--llabel',help='Label of the locked segment bar plot.',default='')
parser.add_argument('-n','--lnumber',help='The requirement for judging locked. lchannel==lnumber will be used as locked.',default=99,type=int)
parser.add_argument('-k','--kamioka',help='Flag to run on Kamioka server.',action='store_true')
parser.add_argument('--dpi',help='Plot resolution. dot per inch.',type=int,default=100)
parser.add_argument('--ll',help='Flag to use low latency files. Valid only in Kashiwa.',action='store_true')
parser.add_argument('-g','--geo',help='Flag to use GEO files. Valid only in Kashiwa.',action='store_true')
# GEO h(t) channel G1:DER_DATA_H 16384
# define variables
args = parser.parse_args()
outdir=args.outdir
channel=args.channel
#latexchname = channel.replace('_','\_')
latexchname = channel
gpsstart=args.gpsstart
gpsend=args.gpsend
fmin=args.fmin
dpi=args.dpi
#margin=40
margin=4
# Adjust margin to match the requirement from Q and fmin.
if (float(gpsend)-float(gpsstart)+2.*margin) * float(fmin) < float(args.q):
margin = (float(args.q)/float(fmin)-(float(gpsend)-float(gpsstart))) /2.
# for safety.
margin+=1
gpsstartmargin=float(gpsstart)-margin
gpsendmargin=float(gpsend)+margin
qmin = 4
qmax = 100
if args.q > 0:
qmin = int(args.q)
qmax = int(args.q)
index=args.index
lchannel=args.lchannel
lnumber=args.lnumber
llabel=args.llabel
lflag=bool(llabel)
kamioka = args.kamioka
ll = args.ll
GEO = args.geo
unit = "Normalized energy"
# Get data from frame files
if kamioka:
sources = mylib.GetFilelist_Kamioka(gpsstartmargin,gpsendmargin)
elif ll:
sources = mylib.GetLLFilelist(gpsstartmargin,gpsendmargin)
elif GEO:
sources = mylib.GetGeoFilelist(gpsstart,gpsend)
else:
sources = mylib.GetFilelist(gpsstartmargin,gpsendmargin)
data = TimeSeries.read(sources,channel,format='gwf.lalframe',start=float(gpsstartmargin),end=float(gpsendmargin))
#maxf=1024
#if maxf > 1./data.dt.value/4.:
fmax=1./data.dt.value/4.
if data.value[0] == data.value[1]:
print("Warning !!! : The data is constant. Plot is not provided.")
# following is for successcheck.sh to take it as OK.
if kamioka:
print("/users/DET/tools/GlitchPlot/Script/Kozapy/samples/dummy")
else:
print("/home/chihiro.kozakai/detchar/analysis/code/gwpy/Kozapy/samples/dummy")
print('Successfully finished !')
exit()
qgram = data.q_transform(outseg=[float(gpsstart),float(gpsend)],frange=(fmin,fmax),qrange=(qmin,qmax),gps=float(gpsstart)/2.+float(gpsend)/2.,logf=True)
#qgram = data.q_transform(outseg=[float(1267205087),float(1267205098)],frange=(fmin,fmax),qrange=(qmin,qmax),gps=float(gpsstart)/2.+float(gpsend)/2.,logf=True)
# default parameter
#qrange=(4, 64), frange=(0, inf), gps=None, search=0.5, tres='<default>', fres='<default>', logf=False, norm='median', mismatch=0.2, outseg=None, whiten=True, fduration=2, highpass=None, **asd_kw
#plot=qgram.plot(figsize = (12, 8),vmin=0.,vmax=25.)
plot=qgram.plot(figsize = (12, 8))
ax = plot.gca()
ax.set_ylabel('Frequency [Hz]')
ax.set_yscale('log')
ax.set_title(latexchname+" Q-transform")
ax.grid(b=True, which='both', axis='y')
ax.set_xlim(float(gpsstart),float(gpsend))
plot.add_colorbar(cmap='YlGnBu_r',label="Normalized energy",log=True, clim=[0.1,None])
fname = outdir + '/' + channel + '_qtransform_'+ gpsstart + '_' + gpsend +'_' + index +'.png'
if lflag:
flag = mylib.GetDQFlag(float(gpsstart),float(gpsend),config=llabel, kamioka=kamioka)
if flag == None:
ldata = TimeSeries.read(sources,lchannel,format='gwf.lalframe',start=float(gpsstart),end=float(gpsend))
locked = ldata == lnumber
flag = locked.to_dqflag(name = '', label = llabel)
plot.add_state_segments(flag)
else:
pass
plot.savefig(fname,dpi=dpi)
plot.clf()
plot.close()
print(fname)
print('Successfully finished !')
| [
"38804436+ckozakai@users.noreply.github.com"
] | 38804436+ckozakai@users.noreply.github.com |
e6d1f32ef39100eaa119842196cb660e7ec7fb85 | 16bf7df8f176fd1e57b1d3fdf085762c0a6c9ac3 | /gomoku_game.py | 12f4de8ac9a42a29bb8cc300720bfc8eb41118fc | [] | no_license | AaronYALai/Reinforcement_Learning_Project | 69c35c2dfdbe445f08d5d6d9ab24cd080574e735 | 0bb7a2b0ab0d0d1bb94964ab34b978eb98c8b5dc | refs/heads/master | 2020-02-26T15:20:30.100952 | 2016-10-08T08:59:24 | 2016-10-08T08:59:24 | 70,220,858 | 25 | 6 | null | null | null | null | UTF-8 | Python | false | false | 3,933 | py | # -*- coding: utf-8 -*-
# @Author: aaronlai
# @Date: 2016-10-07 15:03:47
# @Last Modified by: AaronLai
# @Last Modified time: 2016-10-07 17:30:26
import numpy as np
def initGame(width=19):
"""Initialize width x width new game"""
state = np.zeros((width, width, 2))
available = np.zeros((width, width))
return state, available
def makeMove(state, available, action, actor):
"""specify the actor and the location of the new stone"""
available_ret = np.zeros(available.shape)
available_ret[:] = available[:]
if available_ret[action] == 0:
state[action][actor] = 1
available_ret[action] = float("-inf")
return state, available_ret
else:
return None, available_ret
def winGame(sub_state):
"""check if the game winning criteria is met"""
for i in range(sub_state.shape[0] - 4):
for j in range(sub_state.shape[1] - 4):
horizontal = sub_state[i][j: j+5]
if (horizontal == 1).all():
return True
vertical = [sub_state[i+k, j] for k in range(5)]
if (np.array(vertical) == 1).all():
return True
diagonal = [sub_state[(i+k, j+k)] for k in range(5)]
if (np.array(diagonal) == 1).all():
return True
return False
def fullGrid(state):
"""check if the chessboard is full"""
return not ((state[:, :, 0] + state[:, :, 1]) == 0).any()
def getReward(state, whose_turn, win_reward=500, lose_reward=-1000,
even_reward=-100, keepgoing_reward=-10):
"""calculate the reward given to whom just moved"""
reward = [0, 0]
if winGame(state[:, :, whose_turn]):
reward[whose_turn] = win_reward
reward[1 - whose_turn] = lose_reward
elif fullGrid(state):
reward = [even_reward, even_reward]
else:
reward[whose_turn] = keepgoing_reward
return reward
def drawGrid(state):
"""visualize the chessboard"""
grid = np.zeros(state.shape[:2], dtype='<U2')
grid[:] = ' '
for i in range(state.shape[0]):
for j in range(state.shape[1]):
if (state[(i, j)] > 0).any():
if (state[(i, j)] == 1).all():
raise
elif state[(i, j)][0] == 1:
grid[(i, j)] = 'O'
else:
grid[(i, j)] = 'X'
return grid
def displayGrid(grid):
"""print out the chessboard"""
wid = grid.shape[0]
show_num = 9 if wid > 9 else wid
# chessboard
line = '\n' + '- + ' * (wid - 1) + '- {}\n'
line = line.join([' | '.join(grid[i]) for i in range(wid)])
# mark the number of its lines
bottom = ('\n' + ' {} ' * show_num)
bottom = bottom.format(*[i+1 for i in range(show_num)])
if show_num == 9:
part = (' {} '*(wid - show_num))
part = part.format(*[i+1 for i in range(show_num, wid)])
bottom += part
print(line.format(*[i+1 for i in range(wid)]) + bottom)
def try_display(width=19):
state, avai = initGame(width)
terminate = False
print('Start')
for i in range(int(width**2 / 2)):
for actor in [0, 1]:
new_state = None
while new_state is None:
x = np.random.randint(width)
y = np.random.randint(width)
move = (x, y)
new_state, avai = makeMove(state, avai, move, actor)
state = new_state
reward = getReward(state, actor)
if 500 in reward:
print('\tterminal: %d\n' % i)
terminate = True
break
elif -100 in reward:
print('\tchessboard is full.\n')
terminate = True
break
if terminate:
break
displayGrid(drawGrid(state))
def main():
try_display(11)
if __name__ == '__main__':
main()
| [
"aaronlaiya@icloud.com"
] | aaronlaiya@icloud.com |
168a343047d22e06e3684322144074b16a173713 | 133859e4825b22c71bc79990c0ecc1139a74178f | /src/north/cli/gscli/tai.py | a1709a3817ae0f7f42f06d7e392f570c4f60468e | [
"Apache-2.0"
] | permissive | falcacicd/goldstone-mgmt | 9d99eec7e7f4f71f96321cf67c062715450b8395 | e7348011180e3c2dcd0558636ddc5c21779c7a3f | refs/heads/master | 2022-12-05T09:28:46.792508 | 2020-09-03T06:30:34 | 2020-09-03T06:30:34 | 290,127,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,306 | py | import sys
import os
import re
from .base import Object, InvalidInput, Completer
from pyang import repository, context
import sysrepo as sr
import base64
import struct
from prompt_toolkit.document import Document
from prompt_toolkit.completion import WordCompleter, Completion, FuzzyCompleter
_FREQ_RE = re.compile(r'.+[kmgt]?hz$')
class TAICompleter(Completer):
def __init__(self, config, state=None):
self.config = config
self.state = state
# when self.state != None, this completer is used for get() command
# which doesn't take value. so we don't need to do any completion
hook = lambda : self.state != None
super(TAICompleter, self).__init__(self.attrnames, self.valuenames, hook)
def attrnames(self):
l = [v.arg for v in self.config.substmts]
if self.state:
l += [v.arg for v in self.state.substmts]
return l
def valuenames(self, attrname):
for v in self.config.substmts:
if attrname == v.arg:
t = v.search_one('type')
if t.arg == 'boolean':
return ['true', 'false']
elif t.arg == 'enumeration':
return [e.arg for e in t.substmts]
else:
return []
return []
class TAIObject(Object):
def __init__(self, session, parent, name, type_):
self.type_ = type_
self.name = name
self._get_hook = {}
self._set_hook = {}
self.session = session
super(TAIObject, self).__init__(parent)
d = self.session.get_ly_ctx().get_searchdirs()
repo = repository.FileRepository(d[0])
ctx = context.Context(repo)
m = self.session.get_ly_ctx().get_module('goldstone-tai')
v = m.print_mem("yang")
ctx.add_module(None, v)
mod = ctx.get_module('goldstone-tai')
self.config = mod.search_one('grouping', 'tai-{}-config'.format(type_))
self.state = mod.search_one('grouping', 'tai-{}-state'.format(type_))
@self.command(FuzzyCompleter(TAICompleter(self.config, self.state)))
def get(args):
if len(args) != 1:
raise InvalidInput('usage: get <name>')
self.session.switch_datastore('operational')
try:
items = self.session.get_items('{}/state/{}'.format(self.xpath(), args[0]))
for item in items:
if args[0] in self._get_hook:
print(self._get_hook[args[0]](item.value))
else:
print(item.value)
except sr.errors.SysrepoCallbackFailedError as e:
print(e)
self.session.switch_datastore('running')
@self.command(TAICompleter(self.config))
def set(args):
if len(args) != 2:
raise InvalidInput('usage: set <name> <value>')
if args[0] in self._set_hook:
v = self._set_hook[args[0]](args[1])
else:
v = args[1]
self.session.set_item('{}/config/{}'.format(self.xpath(), args[0]), v)
self.session.apply_changes()
@self.command()
def show(args):
if len(args) != 0:
raise InvalidInput('usage: show[cr]')
self.session.switch_datastore('operational')
print(self.session.get_data(self.xpath()))
self.session.switch_datastore('running')
def human_freq(item):
if type(item) == str:
try:
int(item)
return item
except ValueError:
item = item.lower()
if not _FREQ_RE.match(item):
raise InvalidInput('invalid frequency input. (e.g 193.50THz)')
item = item[:-2]
v = 1
if item[-1] == 't':
v = 1e12
elif item[-1] == 'g':
v = 1e9
elif item[-1] == 'm':
v = 1e6
elif item[-1] == 'k':
v = 1e3
return str(round(float(item[:-1]) * v))
else:
return '{0:.2f}THz'.format(int(item) / 1e12)
def human_ber(item):
return '{0:.2e}'.format(struct.unpack('>f', base64.b64decode(item))[0])
class HostIf(TAIObject):
def xpath(self):
return "{}/host-interface[name='{}']".format(self.parent.xpath(), self.name)
def __init__(self, session, parent, name):
super(HostIf, self).__init__(session, parent, name, 'host-interface')
def __str__(self):
return 'hostif({})'.format(self.name)
class NetIf(TAIObject):
def xpath(self):
return "{}/network-interface[name='{}']".format(self.parent.xpath(), self.name)
def __init__(self, session, parent, name):
super(NetIf, self).__init__(session, parent, name, 'network-interface')
self._get_hook = {
'tx-laser-freq': human_freq,
'ch1-freq': human_freq,
'min-laser-freq': human_freq,
'max-laser-freq': human_freq,
'current-tx-laser-freq': human_freq,
'current-pre-fec-ber': human_ber,
'current-post-fec-ber': human_ber,
'current-prbs-ber': human_ber,
}
self._set_hook = {
'tx-laser-freq': human_freq,
}
def __str__(self):
return 'netif({})'.format(self.name)
class Module(TAIObject):
XPATH = '/goldstone-tai:modules/module'
def xpath(self):
return "{}[name='{}']".format(self.XPATH, self.name)
def __init__(self, session, parent, name):
super(Module, self).__init__(session, parent, name, 'module')
d = self.session.get_data("{}[name='{}']".format(self.XPATH, self.name))
self._map = d['modules']['module'][self.name]
@self.command(WordCompleter(self._components('network-interface')))
def netif(args):
if len(args) != 1:
raise InvalidInput('usage: netif <name>')
return NetIf(self.session, self, args[0])
@self.command(WordCompleter(self._components('host-interface')))
def hostif(args):
if len(args) != 1:
raise InvalidInput('usage: hostif <name>')
return HostIf(self.session, self, args[0])
def __str__(self):
return 'module({})'.format(self.name)
def _components(self, type_):
d = self._map
return [v['name'] for v in d[type_]]
class Transponder(Object):
XPATH = '/goldstone-tai:modules'
def __init__(self, session, parent):
self.session = session
super(Transponder, self).__init__(parent)
self._module_map = self.session.get_data(self.XPATH)
@self.command()
def show(args):
if len(args) != 0:
raise InvalidInput('usage: show[cr]')
print(self._module_map)
@self.command(WordCompleter(self._modules()))
def module(args):
if len(args) != 1:
raise InvalidInput('usage: module <name>')
return Module(self.session, self, args[0])
def __str__(self):
return 'transponder'
def _modules(self):
d = self._module_map
return [v['name'] for v in d.get('modules', {}).get('module', {})]
| [
"wataru.ishid@gmail.com"
] | wataru.ishid@gmail.com |
7eb2cebea2ae37abaeaff05644a4e31c4d321648 | 603db646b1ec2a9c9642366cc0da552611560786 | /results/nLoading.py | 0c42f6acc473cca05bb1078a35c6c754fdafbcbc | [] | no_license | yazoo178/08_JUL_TP | 3f6a73795229d35f7ae7fc18246b73de888a4eb0 | 2c020a66bbf6378487cc71525b9235042803f38c | refs/heads/master | 2020-12-02T12:43:58.705785 | 2017-07-07T23:04:46 | 2017-07-07T23:04:46 | 96,582,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,865 | py | import re
import pylab as p
import numpy as np
import matplotlib.pyplot as plt
import re
file = open("cacm_gold_std.txt")
goldDocs = {}
def frange(x, y, jump):
while x < y:
yield x
x += jump
for line in file:
line = re.search("(\d+) (\d+)", line)
if line:
queryId = int(line.group(1))
docId = int(line.group(2))
if queryId in goldDocs:
goldDocs[queryId].append(int(docId))
else:
goldDocs[queryId] = []
goldDocs[queryId].append(int(docId))
results = []
for i in range(500, 5000, 10):
fileResults = open("nPer/" + "percent_" + str(i) + "_" + "5" + "_" + "run_results.txt")
fileDocs = {}
for line in fileResults:
line = re.search("(\d+) (\d+)", line)
if line:
queryId = int(line.group(1))
docId = line.group(2)
if queryId in fileDocs:
fileDocs[queryId].append(int(docId))
else:
fileDocs[queryId] = []
fileDocs[queryId].append(int(docId))
results.append(fileDocs)
recallRates = []
presRates = []
for index,outputFile in enumerate(results):
recall = []
pres = []
for goldDoc in goldDocs:
inCount = 0
for line in outputFile:
for element in outputFile[line]:
if element in goldDocs[goldDoc] and goldDoc == line:
inCount+=1
recall.append(inCount / len(goldDocs[goldDoc]))
pres.append(inCount/ len(outputFile[line]))
recallRates.append(recall)
presRates.append(pres)
averageRecalls = []
for item in recallRates:
total = 0
for rate in item:
total +=rate
averageRecalls.append((total / len(item)))
averagePres = []
for item in presRates:
total = 0
for rate in item:
total +=rate
averagePres.append((total / len(item)))
averageFMeasures = []
for pres, recall in zip(presRates, recallRates):
totalPres = 0
for ratePres in pres:
totalPres += ratePres
totalRecall = 0
for rateRecall in recall:
totalRecall += rateRecall
totalPres = totalPres / len(pres)
totalRecall = totalRecall / len(recall)
averageFMeasures.append((2 * (totalPres * totalRecall)) / (totalPres + totalRecall))
plt.show()
X = [x/100 for x in range(500, 5000, 10)]
#Y = averageRecalls
#Y2 = averagePres
#p.plot(X,Y, label = "Average Recall")
#p.plot(X,Y2, label = "Average Precision")
p.plot(X, averageFMeasures, label= "Harmonic Mean")
p.xlabel("Permitted % difference between vector simularity rates", fontsize=13)
p.ylabel('Rate (0-1)', fontsize=13)
p.legend( loc='lower right')
p.suptitle("The F-Measure rate for increasing the percentage before cut off (-s-m-tf:n-idf:t)", fontsize=14)
p.show()
| [
"wbriggs2@sheffield.ac.uk"
] | wbriggs2@sheffield.ac.uk |
0a3072ac7da71bc957882478316909c65e5aa868 | c098085eedb3a1534f723ee770a8837067c54281 | /chapter03/studying2.py | 69e01309d518509934fbbfa190391c2a245784ea | [] | no_license | haitian-zhang/PythonScrapyingStudying | 14eb66fe94df1061859db2c69c7a5780d9c9687c | e22beefeb44aa263cca9b1f0113f8eaa2968ba74 | refs/heads/master | 2021-10-28T03:37:59.084766 | 2019-04-21T13:23:54 | 2019-04-21T13:23:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,341 | py | # 豆瓣爬虫
import time
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
# 整合mysql
import pymysql
conn = pymysql.connect(host='127.0.0.1', user='root', passwd='123456', db='python')
cur = conn.cursor()
cur.execute("USE python")
def getLinks(pageUrl="https://book.douban.com", pages = []):
try:
html = urlopen(pageUrl)
bsObj = BeautifulSoup(html, "html.parser")
for link in bsObj.findAll("a", href=re.compile("^(https://book.douban.com/subject/)[0-9]*[/]$")):
if link.attrs['href'] not in pages:
newPage = link.attrs['href']
name = link.parent.next_sibling.next_sibling.get_text().strip()
pages.append(newPage)
print(newPage, name)
# 插入数据库
sql = "INSERT INTO books (URI, name) VALUES ('" + newPage + "','" + name + "')"
try:
cur.execute(sql)
conn.commit()
except:
# 重复不提交
print("重复")
getLinks(pageUrl=newPage, pages=pages)
except:
print("禁止访问,10秒后重试。。。")
time.sleep(10)
getLinks(pageUrl=pageUrl, pages=pages)
getLinks()
cur.close()
conn.close()
print("运行结束") | [
"a13207123727@gmail,com"
] | a13207123727@gmail,com |
f9e0930f0105b1e7ffa5cf93463939b85918a75f | e6cf2817154c6764d308503a3b06f798e5854799 | /4. Comprehension/EXERCISE/01_word_filter.py | 95cd69c9e5aa75a075274ce5a05d8b05b36d8346 | [] | no_license | dimDamyanov/PythonAdvanced | dca1629cac2ee7ae113b82f19276e1c5e0e07023 | 00ca5148c4d57c8cdd13f069315942e81902f746 | refs/heads/main | 2023-04-06T18:16:11.240712 | 2021-04-22T20:57:28 | 2021-04-22T20:57:28 | 328,765,865 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | text = input()
even_words = [word for word in text.split() if len(word) % 2 == 0]
for word in even_words:
print(word)
| [
"dim.damianov@gmail.com"
] | dim.damianov@gmail.com |
cd1bb9f006222ba1fa141e47c1acdc1149465d89 | 033c991b1ccc49ab12eb3cc3dca45c8fb3714b83 | /__main__.py | d064802c2bd6d5fb044ac8f8b9c7b86f1627e92a | [
"MIT"
] | permissive | SKU-IT/SESS-webservice-client | 2b78a5fdd2a4564013d8e61286a454599bfbb80a | 2101502c39938a5b2f2278e20b14f1c83df1cb0e | refs/heads/master | 2020-03-25T21:01:28.842853 | 2018-08-14T08:53:28 | 2018-08-14T08:53:28 | 144,155,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | #!/usr/bin/env python3
from getpass import getpass
from personal_info import PersonalInfo
def main():
gate = PersonalInfo(input('Username: '), getpass())
print(gate.get_data('CurrentAcadState=1', max_length=10))
if __name__ == '__main__':
main()
| [
"hadi@hadisafari.ir"
] | hadi@hadisafari.ir |
336d5186b1c0f8624957237b93107c3b3eab86b0 | 55f7ed471c7878c04d14cf69c472c36bd128881b | /L2jFrozen_GameServer/build/L2JSegara_GameServer/gameserver/data/scripts/custom/8871_gve/__init__.py | 3d373999e9b389d4982c3184efb41a30e1a5425d | [] | no_license | doulu99/L2Segara | a8b4c23228300bb01bfd92ebbd65c1c90c1e7a54 | 82e646440ee94109176240ee2baa0d75aa14d735 | refs/heads/master | 2021-04-01T09:47:45.208358 | 2020-03-21T10:03:15 | 2020-03-21T10:03:15 | 248,179,108 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,108 | py | # Author ProGramMoS, Scoria Dev
# Version 0.2b
import sys
from com.l2jfrozen.gameserver.model.actor.instance import L2PcInstance
from com.l2jfrozen.util.database import L2DatabaseFactory
from com.l2jfrozen.gameserver.model.quest import State
from com.l2jfrozen.gameserver.model.quest import QuestState
from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest
qn = "8871_gve"
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent(self,event,st):
st.getPlayer().setTarget(st.getPlayer())
if event == "1": #good
st.getPlayer.setGood(true)
st.setState(COMPLETED)
if event == "2": #evil
st.getPlayer.setEvil(true)
st.setState(COMPLETED)
if event == "3": #unfact good
st.getPlayer.setGood(false)
st.setState(COMPLETED)
if event == "4": #unfact evil
st.getPlayer.setEvil(false)
st.setState(COMPLETED)
return
QUEST = Quest(8871,qn,"custom")
CREATED = State('Start',QUEST)
STARTED = State('Started',QUEST)
COMPLETED = State('Completed',QUEST)
QUEST.setInitialState(CREATED) | [
"ghemax@ghemax.wvmwp4m0qdrufdbhm41aqoexvg.ux.internal.cloudapp.net"
] | ghemax@ghemax.wvmwp4m0qdrufdbhm41aqoexvg.ux.internal.cloudapp.net |
fb27c227f73b008cfdfe76d6ecbf32ec958e3170 | 11c46400a55f204d78dd409f229626e8b1eea656 | /coding_challenge_1.py | b5bf4e5491f5adf365996c2f612980333dafb938 | [] | no_license | azurewillgit/code_snippets | acfd26f08f48d0cea78529efae5e0bd25916cf86 | 5914cc3d9dc6af8b8d6da976ad52fd0945d30b9c | refs/heads/master | 2020-12-03T04:29:16.904409 | 2020-01-07T06:43:59 | 2020-01-07T06:43:59 | 231,201,996 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py |
names = ['Paul', 'Yvonne', 'Tim', 'Hugo', 'Oliver', 'Nadine']
| [
"dennis.will37@gmail.com"
] | dennis.will37@gmail.com |
de85e910fc8b9cf34cf5556e8809318730b73fd4 | 145fd80224998d43cec8de1fe19fbaf589852578 | /Lab 1/L1/main_volume.py | bbfdd575753c48fc64ff5532d9d51235efb46c1a | [] | no_license | andres2631996/CM2004-Medical-Visualization | a0c58de60b11a7f97ee721dcb03fe95be8f3b256 | ec7aa918a1485652f4543cf8a80f4e0cfded6927 | refs/heads/master | 2020-12-09T09:45:39.526785 | 2020-01-11T17:17:08 | 2020-01-11T17:17:08 | 233,266,654 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,522 | py | __author__ = 'fabian sinzinger'
__email__ = 'fabiansi@kth.se'
import vtk
import sys
import os
if __name__ == '__main__':
# 1 get data path from the first argument given
filename = sys.argv[1]
# 2 set up the source
reader_src = vtk.vtkNIFTIImageReader()
reader_src.SetFileName(filename)
#scalar_range = output.GetScalarRange()
# 3 set up the volume mapper
vol_map = vtk.vtkGPUVolumeRayCastMapper()
vol_map.SetInputConnection(reader_src.GetOutputPort())
# 4 transfer functions for color and opacity
# for now: map value 0 -> black: (0., 0., 0.)
# 512 -> black: (1., 1., 1.)
ctfun = vtk.vtkColorTransferFunction()
ctfun.SetColorSpaceToRGB()
ctfun.AddRGBPoint(0, 0., 0., 0.)
ctfun.AddRGBPoint(512, 1., 1., 1.)
# 5 assign also an alpha (opacity) gradient to the values
# for now: map value 0 -> 0.
# 256 -> .01
gtfun = vtk.vtkPiecewiseFunction()
gtfun.AddPoint(0, 0.0)
gtfun.AddPoint(256, 0.01)
# 6 set up the volume properties with linear interpolation
vol_prop = vtk.vtkVolumeProperty()
vol_prop.SetColor(0, ctfun)
vol_prop.SetScalarOpacity(0, gtfun)
vol_prop.SetInterpolationTypeToLinear()
# 7 set up the actor and connect it to the mapper
vol_act = vtk.vtkVolume()
vol_act.SetMapper(vol_map)
vol_act.SetProperty(vol_prop)
# 8 set up the camera and the renderer
# for now: up-vector: (0., 1., 0.)
# camera position: (-500, 100, 100)
# focal point: (100, 100, 100)
cam = vtk.vtkCamera()
cam.SetViewUp(0., 1., 0.)
cam.SetPosition(-500, 100, 100)
cam.SetFocalPoint(100, 100, 100)
# 9 set the color of the renderers background to black (0., 0., 0.)
ren = vtk.vtkRenderer()
ren.SetBackground(0. , 0. , 0.)
# 10 set the renderers camera as active
ren.SetActiveCamera(cam)
# 11 add the volume actor to the renderer
ren.AddActor(vol_act)
# 12 create a render window
renWin = vtk.vtkRenderWindow()
# 13 add renderer to the render window
renWin.AddRenderer(ren)
# 14 create an interactor
inter = vtk.vtkRenderWindowInteractor()
# 15 connect interactor to the render window
inter.SetRenderWindow(renWin)
# 16 start displaying the render window
renWin.Render()
# 17 make the window interactive (start the interactor)
inter.Start() | [
"noreply@github.com"
] | noreply@github.com |
4458795c392ba0ab3f81e2d130be56272b33e8c0 | ee00ebe5e71c36b05fbff993b19e9723b963313f | /280_Wiggle_Sort.py | 09fa9084f0ab202059ebdd2af873de234323560f | [] | no_license | 26XINXIN/leetcode | f365560d93604a28abf399707b333f3c11f924ec | 78ed11f34fd03e9a188c9c6cb352e883016d05d9 | refs/heads/master | 2021-06-28T16:31:45.103879 | 2020-09-19T20:33:55 | 2020-09-19T20:33:55 | 144,975,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | class Solution:
def wiggleSort(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
if len(nums) <= 1:
return
for i in range(1, len(nums)):
if i % 2 == 1: # increasing
if nums[i] < nums[i-1]:
nums[i-1], nums[i] = nums[i], nums[i-1]
else: # decreasing
if nums[i] > nums[i-1]:
nums[i-1], nums[i] = nums[i], nums[i-1]
| [
"yangxin.nlp@bytedance.com"
] | yangxin.nlp@bytedance.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.