hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ed7a1c0d7a4492dad9e31bba9aa2a19bf2c67236
| 2,241
|
py
|
Python
|
pandas_funcs/sort_by_multiple_indexes.py
|
SecTraversl/Toolbox_Python_3.8
|
0ad1d92d3a12225ea60e4eef3f263aecfffd1b65
|
[
"MIT"
] | null | null | null |
pandas_funcs/sort_by_multiple_indexes.py
|
SecTraversl/Toolbox_Python_3.8
|
0ad1d92d3a12225ea60e4eef3f263aecfffd1b65
|
[
"MIT"
] | null | null | null |
pandas_funcs/sort_by_multiple_indexes.py
|
SecTraversl/Toolbox_Python_3.8
|
0ad1d92d3a12225ea60e4eef3f263aecfffd1b65
|
[
"MIT"
] | null | null | null |
# %%
#######################################
def sort_by_multiple_indexes(lst: list, *index_nums: int, reverse=False):
"""With a two dimensional array, returns the rows sorted by one or more column index numbers.
Example:
>>> mylst = []
# create the table (name, age, job)
>>> mylst.append(["Nick", 30, "Doctor"])
>>> mylst.append(["John", 8, "Student"])
>>> mylst.append(["Paul", 22, "Car Dealer"])
>>> mylst.append(["Mark", 66, "Retired"])
>>> mylst.append(['Yolo', 22, 'Student'])
>>> mylst.append(['Mark', 66, 'Doctor'])
# Sort by the "Name"
>>> sort_by_multiple_indexes(mylst, 0)\n
[['John', 8, 'Student'], ['Mark', 66, 'Retired'], ['Mark', 66, 'Doctor'], ['Nick', 30, 'Doctor'], ['Paul', 22, 'Car Dealer'], ['Yolo', 22, 'Student']]
# Sort by the "Name", then the "Job"
>>> sort_by_multiple_indexes(mylst, 0,2)\n
[['John', 8, 'Student'], ['Mark', 66, 'Doctor'], ['Mark', 66, 'Retired'], ['Nick', 30, 'Doctor'], ['Paul', 22, 'Car Dealer'], ['Yolo', 22, 'Student']]
# Sort by the "Job"
>>> sort_by_multiple_indexes(mylst, 2)\n
[['Paul', 22, 'Car Dealer'], ['Nick', 30, 'Doctor'], ['Mark', 66, 'Doctor'], ['Mark', 66, 'Retired'], ['John', 8, 'Student'], ['Yolo', 22, 'Student']]
# Sort by the "Job", then the "Age"
>>> sort_by_multiple_indexes(mylst, 2,1)\n
[['Paul', 22, 'Car Dealer'], ['Nick', 30, 'Doctor'], ['Mark', 66, 'Doctor'], ['Mark', 66, 'Retired'], ['John', 8, 'Student'], ['Yolo', 22, 'Student']]
# Sort by age in descending order
>>> sort_by_multiple_indexes(mylst, 1, reverse=True)\n
[['Mark', 66, 'Retired'], ['Mark', 66, 'Doctor'], ['Nick', 30, 'Doctor'], ['Paul', 22, 'Car Dealer'], ['Yolo', 22, 'Student'], ['John', 8, 'Student']]
References:
https://stackoverflow.com/questions/18595686/how-do-operator-itemgetter-and-sort-work
https://docs.python.org/3/library/operator.html#operator.itemgetter
"""
import operator
if reverse:
return sorted(lst, key=operator.itemgetter(*index_nums), reverse=True)
else:
return sorted(lst, key=operator.itemgetter(*index_nums))
| 47.680851
| 158
| 0.54797
|
8f366521d4ae8fcbcf3812f935d737eb027efe57
| 3,270
|
py
|
Python
|
django_apscheduler/migrations/0001_initial.py
|
calledbert/django-apscheduler
|
8947bb55976718b634e81ad54b64f53e300d12df
|
[
"MIT"
] | 331
|
2016-07-12T07:03:08.000Z
|
2021-01-26T23:23:36.000Z
|
django_apscheduler/migrations/0001_initial.py
|
calledbert/django-apscheduler
|
8947bb55976718b634e81ad54b64f53e300d12df
|
[
"MIT"
] | 115
|
2016-07-07T15:23:25.000Z
|
2021-01-21T17:16:10.000Z
|
django_apscheduler/migrations/0001_initial.py
|
calledbert/django-apscheduler
|
8947bb55976718b634e81ad54b64f53e300d12df
|
[
"MIT"
] | 92
|
2016-11-01T16:10:06.000Z
|
2021-01-25T03:59:58.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-11-05 16:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="DjangoJob",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, unique=True)),
("next_run_time", models.DateTimeField(db_index=True)),
("job_state", models.BinaryField()),
],
options={
"ordering": ("next_run_time",),
},
),
migrations.CreateModel(
name="DjangoJobExecution",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"status",
models.CharField(
choices=[
["Added", "Added"],
["Started execution", "Started execution"],
["Max instances reached!", "Max instances reached!"],
["Missed!", "Missed!"],
["Modified!", "Modified!"],
["Removed!", "Removed!"],
["Error!", "Error!"],
["Executed", "Executed"],
],
max_length=50,
),
),
("run_time", models.DateTimeField(db_index=True)),
(
"duration",
models.DecimalField(
decimal_places=2, default=None, max_digits=15, null=True
),
),
(
"started",
models.DecimalField(
decimal_places=2, default=None, max_digits=15, null=True
),
),
(
"finished",
models.DecimalField(
decimal_places=2, default=None, max_digits=15, null=True
),
),
("exception", models.CharField(max_length=1000, null=True)),
("traceback", models.TextField(null=True)),
(
"job",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="django_apscheduler.DjangoJob",
),
),
],
options={
"ordering": ("-run_time",),
},
),
]
| 33.367347
| 81
| 0.372783
|
0a5fb2b9a49b76d55d5032b0b48f957f3c861110
| 1,181
|
py
|
Python
|
ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_client.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 1,664
|
2015-01-03T09:35:21.000Z
|
2022-03-31T04:55:24.000Z
|
ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_client.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 3,018
|
2015-02-19T20:16:10.000Z
|
2021-11-13T20:47:48.000Z
|
ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_client.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 1,673
|
2015-01-06T14:14:42.000Z
|
2022-03-31T07:22:30.000Z
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management import *
from hive import hive
class HiveClient(Script):
def install(self, env):
self.install_packages(env)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
hive(name='client')
def status(self, env):
raise ClientComponentHasNoStatus()
if __name__ == "__main__":
HiveClient().execute()
| 28.119048
| 72
| 0.762913
|
bfe40b55084c5530a868aa2e821bc70495757119
| 29,526
|
py
|
Python
|
src/t5s.py
|
adfre18/t5s
|
7b35b14e55b829f0a4c749e321d174e9aad4e780
|
[
"Apache-2.0"
] | 5
|
2021-03-03T14:03:02.000Z
|
2021-11-08T07:12:19.000Z
|
src/t5s.py
|
adfre18/t5s
|
7b35b14e55b829f0a4c749e321d174e9aad4e780
|
[
"Apache-2.0"
] | null | null | null |
src/t5s.py
|
adfre18/t5s
|
7b35b14e55b829f0a4c749e321d174e9aad4e780
|
[
"Apache-2.0"
] | 1
|
2021-09-03T10:06:55.000Z
|
2021-09-03T10:06:55.000Z
|
from tensorflow_text.python.ops.sentencepiece_tokenizer import SentencepieceTokenizer as TFSentencepieceTokenizer
import tensorflow as tf
from transformers import (T5Tokenizer,
TFT5ForConditionalGeneration,
generation_tf_utils as _tfu)
import transformers
from tensorflow.keras.callbacks import LearningRateScheduler, Callback, EarlyStopping
from sklearn.metrics import precision_recall_fscore_support
import logging
import yaml
import numpy as np
import sys
import os
import re
from collections import Counter
def remove_last_ext(fn):
"Returns the filename with the last extension removed"
return fn.rsplit(".", 1)[0]
# SentencePiece ids as required in the T5 trainig code
PAD_ID = 0
EOS_ID = 1
UNK_ID = 2
def sparse_from_dense(t):
"""Helper function for edit_accuracy()
Args:
t: Tensor of type tf.int32
Returns:
SparseTensor without padding and eos tokens
"""
idx = tf.where(tf.logical_and(tf.not_equal(t, PAD_ID), tf.not_equal(t, EOS_ID)))
shape = tf.shape(t)
shape = tf.cast(shape, dtype=tf.int64)
return tf.SparseTensor(idx, tf.gather_nd(t, idx), shape)
def edit_accuracy(y_true, y_pred):
y_true = sparse_from_dense(y_true)
y_pred = sparse_from_dense(y_pred)
dist = tf.edit_distance(y_true, y_pred)
acc = tf.map_fn(lambda d: tf.cond(tf.math.is_finite(d), lambda: 1-d, lambda: 0.),
dist)
return acc
class EditAccuracy(tf.python.keras.metrics.MeanMetricWrapper):
def __init__(self, name='edit_accuracy', dtype=None):
super(EditAccuracy, self).__init__(edit_accuracy, name, dtype=dtype)
def sent_accuracy(y_true, y_pred, mask=None):
y_true = tf.cast(y_true, tf.int32)
y_pred = tf.cast(y_pred, tf.int32)
if mask is None:
mask = tf.cast(y_true != 0, tf.int32)
y_pred = y_pred * mask
equal = tf.cast(y_true == y_pred, tf.int32)
mul = tf.math.reduce_prod(equal, axis=-1)
return mul
class SentAccuracy(tf.python.keras.metrics.MeanMetricWrapper):
def __init__(self, name='sent_accuracy', dtype=None):
super(SentAccuracy, self).__init__(sent_accuracy, name, dtype=dtype)
class T5Training(TFT5ForConditionalGeneration):
# https://github.com/snapthat/TF-T5-text-to-text/blob/master/snapthatT5/notebooks/TF-T5-%20Training.ipynb
def __init__(self, *args, log_dir=None, cache_dir=None, **kwargs):
super().__init__(*args, **kwargs)
self.loss_tracker = tf.keras.metrics.Mean(name='loss')
@tf.function
def train_step(self, data):
x, _ = data
y = x["labels"]
# mask = x["decoder_attention_mask"]
with tf.GradientTape() as tape:
outputs = self(x, training=True)
loss = outputs[0]
logits = outputs[1]
loss = tf.reduce_mean(loss)
grads = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.trainable_variables))
self.loss_tracker.update_state(loss)
self.compiled_metrics.update_state(y, tf.math.argmax(logits, axis=-1, output_type=tf.int32))
metrics = {m.name: m.result() for m in self.metrics}
return metrics
def test_step(self, data):
x, _ = data
y = x["labels"]
# mask = x["decoder_attention_mask"]
output = self(x, training=False)
loss = output[0]
loss = tf.reduce_mean(loss)
logits = output[1]
self.loss_tracker.update_state(loss)
self.compiled_metrics.update_state(y, tf.math.argmax(logits, axis=-1, output_type=tf.int32))
return {m.name: m.result() for m in self.metrics}
def tsv_dataset(fn, tf_tokenizer, input_size=1024, output_size=1280, min_batch_size=2,
shuffle_window=None, line_counter=None, skip=None, repeat=False):
"""Creates TF dataset from TSV file
The dataset uses variable-length and variable-sized batches not exceeding
input_size tokens. Each batch has at least min_batch_size samples, i.e.
the maximum sequence length is limited to input_size / min_batch_size.
The dataset is optionaly shuffled if shuffle_window is a positive integer.
"""
input_size = tf.constant(input_size, tf.int32)
output_size = tf.constant(output_size, tf.int32)
min_batch_size = tf.constant(min_batch_size, tf.int64)
if line_counter is not None:
line_counter.assign(0 if skip is None else skip)
def split_line(line):
if line_counter is not None:
line_counter.assign_add(1)
parts = tf.strings.split(line, "\t", 1)
parts = tf.cond(tf.shape(parts)[0] == 2, lambda: parts, lambda: tf.stack([parts[0], tf.constant("")]))
text = parts[0]
label = parts[1]
return (text, label)
def filter_labels(text, label):
# TODO: add counter of ignored examples
return tf.strings.length(label) > 0
def tokenize(text, label):
text = tf_tokenizer.tokenize(text)
text_att = tf.cast(tf.math.not_equal(text, 0), tf.int32)
label = tf_tokenizer.tokenize(label)
label_att = tf.cast(tf.math.not_equal(label, 0), tf.int32)
return text, text_att, label, label_att
def to_dict(text, text_att, label, label_att):
batch_size = tf.shape(text)[0]
input_len = input_size // batch_size
output_len = output_size // batch_size
return ({
"input_ids": text[:, :input_len],
"attention_mask": text_att[:, :input_len],
"labels": label[:, :output_len],
"decoder_attention_mask": label_att[:, :output_len],
}, None)
def key_func(text, text_att, label, label_att):
in_len = tf.cast(tf.shape(text)[0], tf.int64)
in_per_batch = tf.cast(input_size, tf.int64) // in_len
out_len = tf.cast(tf.shape(label)[0], tf.int64)
out_per_batch = tf.cast(output_size, tf.int64) // out_len
return tf.maximum(min_batch_size, tf.minimum(in_per_batch, out_per_batch))
def reduce_func(key, dataset):
return dataset.padded_batch(key)
def window_size_func(key):
return key
if isinstance(fn, list):
dataset = tf.data.TextLineDataset(fn, num_parallel_reads=len(fn))
else:
dataset = tf.data.TextLineDataset(fn)
if repeat:
dataset = dataset.repeat()
if skip:
dataset = dataset.skip(skip)
dataset = (dataset.map(split_line)
.filter(filter_labels)
.map(tokenize))
if shuffle_window is not None:
dataset = dataset.shuffle(shuffle_window, reshuffle_each_iteration=True)
dataset = (dataset.apply(tf.data.experimental.group_by_window(
key_func=key_func,
reduce_func=reduce_func,
window_size_func=window_size_func
))
.map(to_dict)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return dataset
def SqrtScheduler(learning_rate, verbose=0):
return LearningRateScheduler(lambda n: learning_rate*1/((n+1)**0.5), verbose=verbose)
class CheckpointSaver(Callback):
logger = logging.getLogger("t5s.CheckpointSaver")
def __init__(self, model, config):
super(CheckpointSaver, self).__init__()
self.model = model
self.config = config
self.freq = config["t5_model"].get("save_checkpoint_every", None)
self.epoch = None
self.last_saved_epoch = None
self.line_counter = tf.Variable(0, trainable=False, name="line_counter")
def on_train_end(self, logs=None):
self.save()
def on_epoch_end(self, epoch, logs=None):
self.epoch = epoch
print()
self.logger.info("Consumed %d training examples", self.line_counter.value().numpy())
if self.freq is not None and epoch % self.freq == 0:
self.save()
def save(self):
if self.last_saved_epoch == self.epoch:
# skip save on_train_end, it was already done on_epoch_end
return
out_fn = self.config["t5_model"]["save_checkpoint"]
self.logger.info("Saving checkpoint to %s", out_fn)
self.model.save_pretrained(out_fn)
self.config["t5_model"]["load_checkpoint"] = self.config["t5_model"]["save_checkpoint"]
if self.epoch is not None:
self.config["training"]["initial_epoch"] = self.epoch+1
if "steps_per_epoch" in self.config["training"]:
self.config["training"]["skip_samples"] = self.line_counter.value().numpy().item()
out_yaml_fn = self.config["t5_model"]["save_checkpoint"]+".yaml"
with open(out_yaml_fn, "w", encoding="utf-8") as fw:
yaml.dump(self.config, fw, default_flow_style=False, sort_keys=True)
self.last_saved_epoch = self.epoch
class T5(object):
logger = logging.getLogger("t5s.T5")
def __init__(self, config):
"""
"""
if isinstance(config, str):
self.config_fn = config
with open(config, "r", encoding="utf-8") as fr:
self.config = yaml.safe_load(fr)
else:
self.config_fn = None
self.config = config
self.model = None
self.predict_tokenizers = None
def get_transformers_lib_version(self):
print(transformers.__version__)
def load_tokenizer(self, type="predict"):
assert type == "predict"
self.logger.info("Loaded tokenizer from: %s", self.config["tokenizer"]["spm"])
tokenizer = T5Tokenizer(self.config["tokenizer"]["spm"])
with open(self.config["tokenizer"]["spm"], "rb") as f:
tf_tokenizer = TFSentencepieceTokenizer(f.read(), add_eos=True)
return tokenizer, tf_tokenizer
def load_model(self):
if self.model is not None:
return self.model
# Load the pre-trained model
model_config = self.config["t5_model"]
if "load_checkpoint" in model_config:
model_fn = model_config["load_checkpoint"]
else:
model_fn = model_config["pre_trained"]
self.logger.info("Loading model from %s", model_fn)
self.model = T5Training.from_pretrained(model_fn)
return self.model
def predict(self, batch, generate_hidden_states=False):
if self.predict_tokenizers is None:
self.predict_tokenizers = self.load_tokenizer("predict")
self.load_model()
predict_config = self.config.get("predict", {})
max_input_length = predict_config.get("max_input_length", None)
min_output_length = predict_config.get("min_output_length", None)
max_output_length = predict_config.get("max_output_length", None)
no_repeat_ngram_size = predict_config.get("no_repeat_ngram_size", 0)
length_penalty = predict_config.get("length_penalty", 1.0)
tokenizer, tf_tokenizer = self.predict_tokenizers
sentences = tokenizer(batch, padding="longest", max_length=max_input_length, truncation=True)
input_ids = tf.constant(sentences["input_ids"])
try:
self.model.config.generate_hidden_states = generate_hidden_states
outputs_dict = self.model.generate(input_ids,
min_length=min_output_length,
max_length=max_output_length,
early_stopping=True,
no_repeat_ngram_size=no_repeat_ngram_size,
length_penalty=length_penalty,
output_hidden_states=True,
return_dict_in_generate=True)
outputs = outputs_dict['sequences']
if generate_hidden_states:
# Split the hidden states from the outputs
hidden_states = self.concatenate_hidden_states(outputs_dict['decoder_hidden_states'])
else:
# The hidden states was not required
hidden_states = None
finally:
self.model.config.generate_hidden_states = False
preds = tf_tokenizer.detokenize(outputs).numpy()
preds = [i.decode() for i in preds]
if hidden_states:
# Also return the generated hidden states
return preds, hidden_states
else:
return preds
def concatenate_hidden_states(self, hidden_states):
assert len(hidden_states) != 0
# The length of output
length_output = len(hidden_states)
# Number of layers
num_of_layers = len(hidden_states[0])
# List of Nones with length wich is same as the number of layers
concatenated_hidden_states = [None] * num_of_layers
# Easily concatenate all hidden_states
for idx_layer in range(0, num_of_layers):
for idx_output in range(0, length_output):
if concatenated_hidden_states[idx_layer] is None:
concatenated_hidden_states[idx_layer] = hidden_states[0][idx_layer]
else:
concatenated_hidden_states[idx_layer] = tf.concat([concatenated_hidden_states[idx_layer], hidden_states[idx_output][idx_layer]], 1)
return concatenated_hidden_states
def predict_tsv(self, tsv_in, tsv_out):
batch_size = self.config.get("predict", {}).get("batch_size", 400)
with open(tsv_in, "r", encoding="utf-8") as fr, \
open(tsv_out, "w", encoding="utf-8") as fw:
batch = []
def flush(n_predicted=[0]):
preds = self.predict(batch)
for input_sent, output_sent in zip(batch, preds):
n_predicted[0] += 1
print(input_sent, output_sent, sep="\t", file=fw)
fw.flush()
del batch[:]
self.logger.info("Processed %d items", n_predicted[0])
for line in fr:
items = line.strip().split("\t")
input_sent = items[0]
batch.append(input_sent)
if len(batch) >= batch_size:
flush()
else:
if batch:
flush()
def fine_tune(self):
# Initialize configuration variables
dataset_config = self.config.get("dataset", {})
training_config = self.config.get("training", {})
train_tsv = dataset_config["train_tsv"]
devel_tsv = dataset_config["devel_tsv"]
dataset_kwargs = dataset_config.get("loader", {})
steps_per_epoch = training_config.get("steps_per_epoch", None)
skip_samples = training_config.get("skip_samples", None)
learning_rate = training_config.get("learning_rate", 1e-4)
learning_rate_schedule = training_config.get("learning_rate_schedule", True)
early_stopping = training_config.get("early_stopping", False)
if isinstance(early_stopping, dict):
# We have the configuration section for early stopping
# enable early_stopping and use the dict with details
early_stopping, early_stopping_config = True, early_stopping
else:
# No detailed configuration for early stopping, use empty dict
early_stopping_config = {}
# Load the SentencePiece tokenizer
tokenizer, tf_tokenizer = self.load_tokenizer()
model = self.load_model()
# Configure trainable variables
model.shared.trainable = training_config["shared_trainable"]
model.encoder.trainable = training_config["encoder_trainable"]
# Initialize metrics
metrics = [SentAccuracy(), EditAccuracy()]
# Initialize optimizer
optimizer = "adam"
model.compile(optimizer=optimizer, metrics=metrics)
callbacks = []
if learning_rate_schedule:
callbacks.append(SqrtScheduler(learning_rate, verbose=1))
if early_stopping:
# Configure early stopping
self.logger.info("Using early stopping config: %s", early_stopping_config)
early_stopping_quantity = early_stopping_config.get("quantity", "val_loss")
early_stopping_patience = early_stopping_config.get("patience", 0)
callbacks.append(EarlyStopping(monitor=early_stopping_quantity,
restore_best_weights=True,
verbose=True,
patience=early_stopping_patience))
# Automatically generate t5_model.save_checkpoint
if "save_checkpoint" not in self.config["t5_model"]:
if self.config_fn is not None and self.config_fn.endswith(".init.yaml"):
save_checkpoint = remove_last_ext(self.config_fn)
save_checkpoint = remove_last_ext(save_checkpoint)
self.config["t5_model"]["save_checkpoint"] = save_checkpoint
else:
raise ValueError("Cannot determine the value of missing t5_model.save_checkpoint")
self.logger.info("Trained model will be saved into %s", self.config["t5_model"]["save_checkpoint"])
checkpoint_saver = CheckpointSaver(model, self.config)
callbacks.append(checkpoint_saver)
# Instantiate datasets
self.logger.debug("Dataset loader parameters: %s", dataset_kwargs)
self.logger.info("Training dataset: %s", train_tsv)
train_dataset_kwargs = dataset_kwargs.copy()
train_dataset_kwargs.pop("devel_samples", None)
if steps_per_epoch:
train_dataset_kwargs["repeat"] = True
if skip_samples:
self.logger.info("Skipping initial %d samples, training starts from epoch %d",
skip_samples, training_config["initial_epoch"]+1)
train_dataset_kwargs["skip"] = skip_samples
train_dataset = tsv_dataset(train_tsv, tf_tokenizer,
line_counter=checkpoint_saver.line_counter,
**train_dataset_kwargs)
self.logger.info("Development dataset: %s", devel_tsv)
dev_dataset_kwargs = dataset_kwargs.copy()
dev_dataset_kwargs.pop("repeat", None)
dev_dataset_kwargs.pop("shuffle_window", None)
devel_samples = dev_dataset_kwargs.pop("devel_samples", None)
dev_dataset = tsv_dataset(devel_tsv, tf_tokenizer, **dev_dataset_kwargs)
if devel_samples is not None:
self.logger.info("Limit development dataset to %s samples", devel_samples)
dev_dataset = dev_dataset.take(devel_samples)
self.model.fit(train_dataset,
validation_data=dev_dataset,
steps_per_epoch=steps_per_epoch,
callbacks=callbacks,
initial_epoch=training_config["initial_epoch"],
epochs=training_config["n_epochs"])
if "evaluation" in self.config:
self.evaluate()
def predict_dataset(self, dataset):
tsv = self.config["dataset"].get("{}_tsv".format(dataset))
if tsv is None:
raise ValueError("No such dataset: {}".format(dataset))
if not isinstance(tsv, list):
tsv = [tsv]
model_base = os.path.split(self.config["t5_model"]["save_checkpoint"])[-1]
ref_fns = []
hyp_fns = []
for ref_fn in tsv:
hyp_fn = "{ref_base}.{model_base}.tsv".format(
ref_base=remove_last_ext(ref_fn),
model_base=model_base,
)
self.logger.info("Predicting %s into %s", ref_fn, hyp_fn)
self.predict_tsv(ref_fn, hyp_fn)
ref_fns.append(ref_fn)
hyp_fns.append(hyp_fn)
return ref_fns, hyp_fns
def evaluate(self, datasets=None):
"""Executes the evaluation of the model
The evaluation is performed for each dataset under the "dataset"
section, with the exception of train dataset. The dataset's key must
end with "_tsv" suffix and the name of dataset is without this suffix.
The result is stored in YAML file with the following filename:
"{model_base}.eval.{dataset}.yaml", where "model_base" is the path to
model checkpoint (see "save_checkpoint" in configuration YAML) and
"dataset" is the name of the dataset.
The evaluation datasets could be limited with the configuration key
"evaluation.datasets".
Args:
datasets: an override for "evaluation.datasets" configuration key
"""
evaluation_cfg = self.config["evaluation"]
metric_name = evaluation_cfg["metric"]
metric = EVAL_METRICS[metric_name]
if datasets is None:
default_eval_datasets = [i[:-4] for i in self.config["dataset"] if i.endswith("_tsv") and i != "train_tsv"]
datasets = evaluation_cfg.get("datasets", default_eval_datasets)
for dataset in datasets:
ref_fns, hyp_fns = self.predict_dataset(dataset)
eval_results = eval_tsv(metric, ref_fns, hyp_fns)
eval_fn = "{model_base}.eval.{dataset}.yaml".format(
model_base=self.config["t5_model"]["save_checkpoint"],
dataset=dataset,
)
self.logger.info("Evaluation results for dataset %s:", dataset)
with open(eval_fn, "w", encoding="utf-8") as fw:
yaml_dump_result(eval_results, sys.stdout)
yaml_dump_result(eval_results, fw)
# Evaluation metrics definition
def f1_multilabel(pairs):
def slash_split(item):
return tuple(i.strip() for i in item.split("/"))
ref, hyp = zip(*pairs)
ref = [slash_split(i) for i in ref]
hyp = [slash_split(i) for i in hyp]
kwds_set = set()
for lst in [ref, hyp]:
for kws in lst:
kwds_set |= set(kws)
kwds_list = {kw: idx for idx, kw in enumerate(list(kwds_set))}
def to_array(lst):
ret = np.zeros((len(lst), len(kwds_list)))
for idx, item in enumerate(lst):
for kw in item:
ret[idx, kwds_list[kw]] = 1
return ret
ref = to_array(ref)
hyp = to_array(hyp)
P, R, F, _ = precision_recall_fscore_support(ref, hyp, average="samples")
return {"P": float(P), "R": float(R), "F": float(F)}
def match(pairs):
n = 0
ok = 0
w_n = 0
w_ok = 0
for ref, hyp in pairs:
if ref == hyp:
ok += 1
n += 1
ref = ref.split()
hyp = hyp.split()
w_n += len(ref)
for r1, h1 in zip(ref, hyp):
if r1 == h1:
w_ok += 1
return {"SAcc": ok/n, "WAcc": w_ok/w_n, "W_N": w_n, "W_OK": w_ok, "S_N": n, "S_OK": ok, "W_Err": w_n-w_ok, "S_Err": n-ok}
def binary_lab(pairs):
TP = 0
FN = 0
FP = 0
for ref, hyp in pairs:
ref = ref.split()
hyp = hyp.split()
for r, h in zip(ref, hyp):
if r == h == "1":
TP += 1
elif r == "1" and h == "0":
FN += 1
elif r == "0" and h == "1":
FP += 1
P = TP / (TP+FP)
R = TP / (TP+FN)
F = 2 * P * R / (P+R)
return {"TP": TP, "FN": FN, "FP": FP, "P": P, "R": R, "F": F}
def get_tag_nodes(output, strip=True):
def stringify_tokens(tokens):
if strip:
# Strip the resulting tokens and replace multiple whitespaces
tokens = " ".join(tokens)
return re.sub(r"\s+", " ", tokens).strip()
else:
return "".join(tokens)
def add_tag_value(tag, value):
ret.setdefault(tag, [])
ret[tag].append(value)
ret = {}
parts = re.split("(</?[^>]*>)", output)
stack = []
for i in parts:
if i.startswith("</") and i.endswith(">"):
# This is a closing tag
tag = i[2:-1]
for idx, (stack_tag, tokens) in reversed(list(enumerate(stack))):
if tag == stack_tag:
# We are closing a tag, so we add it to the returned values
add_tag_value(tag, stringify_tokens(tokens))
del stack[idx]
break
elif i.startswith("<") and i.endswith("/>"):
# This is a singleton tag
tag = i[1:-2]
add_tag_value(tag, None)
elif i.startswith("<") and i.endswith(">"):
# This is an opening tag
tag = i[1:-1]
stack.append((tag, []))
else:
# This is a token, add it to all tags on the stack
token = i.strip() if strip else i
for tag, tokens in stack:
tokens.append(token)
# Add remaining (non-closed) tags,
# we assume that they span until the end of the string
for tag, tokens in stack:
add_tag_value(tag, stringify_tokens(tokens))
return ret
def f1_tagged(pairs):
TP = Counter()
FN = Counter()
FP = Counter()
tag_set = set()
for ref, hyp in pairs:
ref = get_tag_nodes(ref)
hyp = get_tag_nodes(hyp)
tags = set(ref) | set(hyp)
tag_set |= tags
for tag in tags:
ref_values = ref.get(tag, [])
hyp_values = hyp.get(tag, [])
# Take reference values
while ref_values:
ref_val = ref_values.pop(0)
try:
idx = hyp_values.index(ref_val)
# We have a match, remove it from hypothesis
del hyp_values[idx]
TP[tag] += 1
except ValueError:
# We have a false negative
FN[tag] += 1
# Take hypothesis values
for hyp_value in hyp_values:
FP[tag] += 1
P = {}
R = {}
F = {}
for tag in tag_set:
try:
P[tag] = TP[tag] / (TP[tag]+FP[tag])
except ZeroDivisionError:
P[tag] = 0.
try:
R[tag] = TP[tag] / (TP[tag]+FN[tag])
except ZeroDivisionError:
R[tag] = 0.
try:
F[tag] = 2 * P[tag] * R[tag] / (P[tag]+R[tag])
except ZeroDivisionError:
F[tag] = 0.
TP = sum(TP.values())
FP = sum(FP.values())
FN = sum(FN.values())
try:
P_total = TP / (TP+FP)
except ZeroDivisionError:
P_total = 0.
try:
R_total = TP / (TP+FN)
except ZeroDivisionError:
R_total = 0.
try:
F_total = 2 * P_total * R_total / (P_total+R_total)
except ZeroDivisionError:
F_total = 0.
return {"P_tag": P, "R_tag": R, "F_tag": F,
"P": P_total, "R": R_total, "F": F_total}
def f1_tagged_sum(pairs):
return {key: value for (key, value) in f1_tagged(pairs).items() if key in "PRF"}
EVAL_METRICS = {
"f1_multilabel": f1_multilabel,
"f1_tagged": f1_tagged,
"f1_tagged_sum": f1_tagged_sum,
"match": match,
"binary_lab": binary_lab,
}
TOTAL_METRIC = "__total__"
def eval_tsv(metric, ref, hyp):
"""Evaluates the prediction results using reference and (optionally) multiple hypothesis
Args:
metric: Metric function to evaluate, choose from EVAL_METRICS dictionary
ref: Reference TSV
hyp: Single string or list of strings, hypothesis TSV
"""
logger = logging.getLogger("t5s.eval_tsv")
if not isinstance(ref, list):
ref = [ref]
if not isinstance(hyp, list):
hyp = [hyp]
assert len(ref) == len(hyp)
ret = {}
all_pairs = []
for ref_fn, hyp_fn in zip(ref, hyp):
pairs = []
with open(ref_fn, "r", encoding="utf-8") as fr_ref, \
open(hyp_fn, "r", encoding="utf-8") as fr_hyp:
for idx, (ref_line, hyp_line) in enumerate(zip(fr_ref, fr_hyp)):
ref_in, ref_out = ref_line.split("\t")[:2]
hyp_in, hyp_out = hyp_line.split("\t")[:2]
if ref_in != hyp_in:
logger.warning("Reference and hypothesis inputs mismatch on line: %d", idx)
pairs.append((ref_out, hyp_out))
logger.info("Loaded %d examples", len(pairs))
all_pairs.extend(pairs)
if len(hyp) != 1:
# Store partial results only if we have multiple files
ret[hyp_fn] = metric(pairs)
# Compute total metric value
ret[TOTAL_METRIC] = metric(all_pairs)
return ret
def yaml_dump_result(obj, stream):
"""Redefinition of yaml.safe_dump with added float representer
The float representer uses float precision of four decimal digits
"""
def float_representer(dumper, value):
text = '{0:.4f}'.format(value)
return dumper.represent_scalar(u'tag:yaml.org,2002:float', text)
class ResultDumper(yaml.SafeDumper):
def __init__(self, *args, **kwargs):
super(ResultDumper, self).__init__(*args, **kwargs)
self.add_representer(float, float_representer)
yaml.dump(obj, stream, Dumper=ResultDumper, default_flow_style=False, sort_keys=True)
| 35.445378
| 149
| 0.596017
|
af81e5ec04ef366dddd9deafcbb420ec67402c4c
| 4,665
|
py
|
Python
|
strategy/indicator/stochastic/stochastic.py
|
firebird631/siis
|
8d64e8fb67619aaa5c0a62fda9de51dedcd47796
|
[
"PostgreSQL"
] | null | null | null |
strategy/indicator/stochastic/stochastic.py
|
firebird631/siis
|
8d64e8fb67619aaa5c0a62fda9de51dedcd47796
|
[
"PostgreSQL"
] | null | null | null |
strategy/indicator/stochastic/stochastic.py
|
firebird631/siis
|
8d64e8fb67619aaa5c0a62fda9de51dedcd47796
|
[
"PostgreSQL"
] | null | null | null |
# @date 2018-09-02
# @author Frederic Scherma, All rights reserved without prejudices.
# @author Xavier BONNIN
# @license Copyright (c) 2018 Dream Overflow
# Stochastic indicator
from strategy.indicator.indicator import Indicator
from strategy.indicator.utils import down_sample, MM_n # , MMexp_n
import numpy as np
from talib import STOCH as ta_STOCH # , STOCHF as to_STOCHF
class StochasticIndicator(Indicator):
"""
Stochastique indicator.
https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/slow-stochastic
"""
__slots__ = '_length', '_len_K', '_len_D', '_prev_k', '_last_k', '_prev_d', '_last_d', '_ks', '_ds'
@classmethod
def indicator_type(cls):
return Indicator.TYPE_MOMENTUM
@classmethod
def indicator_class(cls):
return Indicator.CLS_OSCILLATOR
def __init__(self, timeframe, length=9, len_K=3, len_D=3):
super().__init__("stochastic", timeframe)
self._length = length # periods number for the K
self._len_K = len_K # periods number for the K smooth
self._len_D = len_D # periods number for the D smooth
self._prev_k = 0.0
self._last_k = 0.0
self._prev_d = 0.0
self._last_d = 0.0
self._ks = np.array([])
self._ds = np.array([])
@property
def length(self):
return self._length
@length.setter
def length(self, length):
self._length = length
@property
def prev_k(self):
return self._prev_k
@property
def last_k(self):
return self._last_k
@property
def prev_d(self):
return self._prev_d
@property
def last_d(self):
return self._last_d
@property
def len_K(self):
return self._len_K
@len_K.setter
def len_K(self, len_K):
self._len_K = len_K
@property
def len_D(self):
return self._len_D
@len_D.setter
def len_D(self, len_D):
self._len_D = len_D
@property
def ks(self):
return self._ks
@property
def ds(self):
return self._ds
def cross(self):
if (self._prev_k > self._prev_d and self._last_k < self._last_d):
return -1
elif (self._prev_k < self._prev_d and self._last_k > self._last_d):
return 1
return 0
@staticmethod
def Stochastic(N, data, N_D=3):
K = np.zeros(len(data))
for (j,d) in enumerate(data):
i=min(j,N)
highest = max(data[j-i:j+1])
lowest = min(data[j-i:j+1])
if highest == lowest:
highest += 0.000000001
K[j]=(d-lowest)/(highest-lowest) # +epsilon to avoid 0
D = MM_n(N_D, K)
return (K, D)
@staticmethod
def Stochastic_sf(N, data, N_D=3, step=1, filtering=False):
"""
Calcul des stochastiques.
N est le nombre de periodes a observer pour repérer le min et le max du cours.
N_D est le nombre d'echantillons de K a utiliser pour le calcul de D
step permet de ne selectionner qu'un echantillon sur step dans data.
filtering permet de filtrer ou non les donnees avant d'appliquer la selection.
Retourne les stochastiques K, D interpolees lineairement ; meme taille que data.
"""
sub_data = down_sample(data, step) if filtering else data [::step]
K = np.zeros(len(sub_data))
t_subdata = range(0,len(data),step)
for (j,d) in enumerate(sub_data):
i=min(j,N)
highest = max(sub_data[j-i:j+1])
lowest = min(sub_data[j-i:j+1])
if highest == lowest:
highest += 0.000000001
K[j]=(d-lowest)/(highest-lowest) # +epsilon to avoid 0
D = MM_n(N_D, K)
return np.interp(range(len(data)), t_subdata, K), np.interp(range(len(data)), t_subdata, D)
def compute(self, timestamp, high, low, close):
self._prev_k = self._last_k
self._prev_d = self._last_d
# self._ks, self._ds = StochasticIndicator.Stochastic_sf(self._len_K, close, self._len_D) # , self._step, self._filtering)
self._ks, self._ds = ta_STOCH(high, low, close, fastk_period=self._length, slowk_period=self._len_K, slowk_matype=0, slowd_period=self._len_D, slowd_matype=0)
# self._ks, self._ds = to_STOCHF(high, low, close, fastk_period=self._len_K, fastd_period=self._len_D, fastd_matype=0)
self._last_k = self._ks[-1]
self._last_d = self._ds[-1]
self._last_timestamp = timestamp
return self._ks, self._ds
| 28.445122
| 166
| 0.614577
|
991e3f5957105a7ad5fffdad8a8b7e5e3e9fd805
| 3,808
|
py
|
Python
|
serial_scripts/sriov/base.py
|
hkgopal/tf-test
|
dfb00ce26fb159ab5a91726f9647b68d905cfbad
|
[
"Apache-2.0"
] | 5
|
2020-09-29T00:36:57.000Z
|
2022-02-16T06:51:32.000Z
|
serial_scripts/sriov/base.py
|
hkgopal/tf-test
|
dfb00ce26fb159ab5a91726f9647b68d905cfbad
|
[
"Apache-2.0"
] | 27
|
2019-11-02T02:18:34.000Z
|
2022-02-24T18:49:08.000Z
|
serial_scripts/sriov/base.py
|
hkgopal/tf-test
|
dfb00ce26fb159ab5a91726f9647b68d905cfbad
|
[
"Apache-2.0"
] | 20
|
2019-11-28T16:02:25.000Z
|
2022-01-06T05:56:58.000Z
|
from builtins import range
import test_v1
import time
import struct
import socket
import fixtures
class BaseSriovTest(test_v1.BaseTestCase_v1):
@classmethod
def setUpClass(cls):
super(BaseSriovTest, cls).setUpClass()
cls.orch = cls.connections.orch
cls.quantum_h= cls.connections.quantum_h
cls.nova_h = cls.connections.nova_h
cls.vnc_lib= cls.connections.vnc_lib
cls.agent_inspect= cls.connections.agent_inspect
cls.cn_inspect= cls.connections.cn_inspect
cls.analytics_obj=cls.connections.analytics_obj
#end setUpClass
@classmethod
def tearDownClass(cls):
super(BaseSriovTest, cls).tearDownClass()
#end tearDownClass
#TODO: add autodetection for interface name (kind of ip link show | grep NO-CARRIER | cut -d ':' -f2 | sed -e 's/^[[:space:]]*//')
def bringup_interface_forcefully(self, vm_fixture, intf='ens6'):
cmd = 'ifconfig %s up'%(intf)
for i in range (5):
cmd_to_pass = [cmd]
vm_fixture.run_cmd_on_vm(cmds=cmd_to_pass, as_sudo=True, timeout=60)
vm_fixture.run_cmd_on_vm(cmds=['ifconfig'], as_sudo=True, timeout=60)
output = vm_fixture.return_output_cmd_dict['ifconfig']
if output and 'ens6' in output:
break
else:
time.sleep(3)
def get_sriov_enabled_compute_list(self):
sriov_host_name_list=[]
sriov_host_list=list(self.inputs.sriov_data[0].keys())
for item in sriov_host_list:
sriov_host_name_list.append(self.inputs.host_data[item.split('@')[1]]['fqname'])
return sriov_host_name_list
def get_sriov_physnets(self,compute_name):
host_key=self.inputs.host_data[compute_name]['username'] + '@' + self.inputs.host_data[compute_name]['host_ip']
physnets_list={}
physnets_list=self.inputs.sriov_data[0][host_key][0]['physnets']
return physnets_list
def get_sriov_vf_number(self,compute_name):
host_key=self.inputs.host_data[compute_name]['username'] + '@' + self.inputs.host_data[compute_name]['host_ip']
vf_number=None
vf_number=self.inputs.sriov_data[0][host_key][0]['VF']
return vf_number
def get_sriov_pf(self,compute_name):
host_key=self.inputs.host_data[compute_name]['username'] + '@' + self.inputs.host_data[compute_name]['host_ip']
pf_intf=None
pf_intf=self.inputs.sriov_data[0][host_key][0]['interface']
return pf_intf
def ip_increment(self,base_ip,increase_by):
ip2int = lambda ipstr: struct.unpack('!I', socket.inet_aton(ipstr))[0]
ip_num=ip2int(base_ip)
ip_num=ip_num + int(increase_by)
int2ip = lambda n: socket.inet_ntoa(struct.pack('!I', n))
new_ip=int2ip(ip_num)
return new_ip
def get_sriov_mac(self,vm_fix,interface):
intf_cmd='ip link show dev %s| grep ether'%(interface)
output=vm_fix.run_cmd_on_vm(cmds=[intf_cmd], as_sudo=True)
return output[intf_cmd].split(" ")[1]
def get_vf_in_use(self,vm_fix,interface,mac):
host = self.inputs.get_host_ip(vm_fix.vm_node_ip)
cmd='ip link show dev %s| grep %s'%(interface,mac)
output=self.inputs.run_cmd_on_server(host, cmd)
return output.split(" ")[1]
def set_mtu_on_vf(self,vm_fix,intf,vf_num,vlan_num,mtu):
host = self.inputs.get_host_ip(vm_fix.vm_node_ip)
cmd='ip link set %s vf %s vlan %s mtu %s'%(intf,vf_num,vlan_num,mtu)
output=self.inputs.run_cmd_on_server(host, cmd)
return output
def remove_from_cleanups(self, fix):
for cleanup in self._cleanups:
if fix.cleanUp in cleanup:
self._cleanups.remove(cleanup)
break
#end remove_from_cleanups
| 37.70297
| 134
| 0.664653
|
dc7fe316b9dbd9788a6a44099558bd643ac8d07d
| 1,777
|
py
|
Python
|
pytmt/get_spec.py
|
Molecular-Proteomics/TMT_quantifier
|
32eeaac10bbc423904cf268fdb83cd317aee5203
|
[
"MIT"
] | null | null | null |
pytmt/get_spec.py
|
Molecular-Proteomics/TMT_quantifier
|
32eeaac10bbc423904cf268fdb83cd317aee5203
|
[
"MIT"
] | null | null | null |
pytmt/get_spec.py
|
Molecular-Proteomics/TMT_quantifier
|
32eeaac10bbc423904cf268fdb83cd317aee5203
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""" Reads in mzml file using pymzml and get list of ms2 scans """
import logging
import os
import pymzml as mz
class Mzml(object):
def __init__(
self,
path,
precision
):
"""
This class reads mzml files using pymzml
:param path: path of the mzml file to be loaded, e.g., "~/Desktop/example.mzml"
:param precision: integer determines precision of reading as well as mass tolerance of peak integration (ppm)
"""
self.path = path
self.msdata = {}
self.rt_idx = {}
self.mslvl_idx = {}
self.precision = precision
self.logger = logging.getLogger('pytmt.mzml')
def parse_mzml_ms2(self):
"""
Read the mzml file and create data dictionary for all ms2 peaks
:return:
"""
run = mz.run.Reader(self.path,
MS_precision={
1: self.precision*1e-6,
2: self.precision*1e-6
})
for n, spec in enumerate(run):
#if n % 1000 == 0:
# print(
# 'Loading spectrum {0} at retention time {scan_time:1.2f}'.format(
# spec.ID,
# scan_time=spec.scan_time
# )
# )
self.mslvl_idx[n + 1] = spec.ms_level
self.rt_idx[n + 1] = spec.scan_time
if spec.ms_level == 2:
self.msdata[n + 1] = spec.peaks("centroided")
self.logger.info(
'Parsed {0} spectra from file {1}'.format(
n + 1,
self.path)
)
return True
| 25.028169
| 117
| 0.471581
|
6c6d3a2b7f1fee479c17145c51dc31dbeedb6d87
| 8,269
|
py
|
Python
|
leetcode_python/Dynamic_Programming/longest-common-subsequence.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | null | null | null |
leetcode_python/Dynamic_Programming/longest-common-subsequence.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | null | null | null |
leetcode_python/Dynamic_Programming/longest-common-subsequence.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | null | null | null |
"""
1143. Longest Common Subsequence
Medium
Given two strings text1 and text2, return the length of their longest common subsequence. If there is no common subsequence, return 0.
A subsequence of a string is a new string generated from the original string with some characters (can be none) deleted without changing the relative order of the remaining characters.
For example, "ace" is a subsequence of "abcde".
A common subsequence of two strings is a subsequence that is common to both strings.
Example 1:
Input: text1 = "abcde", text2 = "ace"
Output: 3
Explanation: The longest common subsequence is "ace" and its length is 3.
Example 2:
Input: text1 = "abc", text2 = "abc"
Output: 3
Explanation: The longest common subsequence is "abc" and its length is 3.
Example 3:
Input: text1 = "abc", text2 = "def"
Output: 0
Explanation: There is no such common subsequence, so the result is 0.
Constraints:
1 <= text1.length, text2.length <= 1000
text1 and text2 consist of only lowercase English characters.
"""
# V0
# V1
# IDEA : DP
# https://leetcode.com/problems/longest-common-subsequence/discuss/794472/Simple-python
class Solution:
def longestCommonSubsequence(self, s1: str, s2: str) -> int:
n1, n2 = len(s1), len(s2)
dp = [[0] * n2 for _ in range(n1)]
for i in range(n1):
for j in range(n2):
if s1[i] == s2[j]:
dp[i][j] = 1 + (dp[i-1][j-1] if i > 0 and j > 0 else 0)
else:
dp[i][j] = max(dp[i][j-1] if j > 0 else 0, dp[i-1][j] if i > 0 else 0)
return dp[-1][-1]
# V1'
# IDEA : Memoization
# https://leetcode.com/problems/longest-common-subsequence/discuss/598739/Memoization-Python
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
self.cache = [[-1 for i in range(len(text2)+1)] for j in range(len(text1)+1)]
def LCS(i,j):
if i == len(text1) or j == len(text2):
return 0
if self.cache[i][j] != -1:
return self.cache[i][j]
if text1[i] == text2[j]:
self.cache[i][j] = 1 + LCS(i+1,j+1)
else:
self.cache[i][j] = max(LCS(i+1,j),LCS(i,j+1))
return self.cache[i][j]
return LCS(0,0)
# V1''
# IDEA : 2D dynamic programming:
# https://leetcode.com/problems/longest-common-subsequence/discuss/598687/Python-O(-m*n-)-2D-DP.-85%2B-w-Hint
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
# padding one space for empty string representation
text1 = ' ' + text1
text2 = ' ' + text2
w, h = len(text1), len(text2)
dp_table = [ [ 0 for x in range(w) ] for y in range(h) ]
# update dynamic programming table with optimal substructure
for y in range(1, h):
for x in range(1, w):
if text1[x] == text2[y]:
# with the same character
# extend the length of common subsequence
dp_table[y][x] = dp_table[y-1][x-1] + 1
else:
# with different characters
# choose the optimal subsequence
dp_table[y][x] = max( dp_table[y-1][x], dp_table[y][x-1] )
return dp_table[-1][-1]
# V1'''
# IDEA : Memoization
# https://leetcode.com/problems/longest-common-subsequence/solution/
from functools import lru_cache
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
@lru_cache(maxsize=None)
def memo_solve(p1, p2):
# Base case: If either string is now empty, we can't match
# up anymore characters.
if p1 == len(text1) or p2 == len(text2):
return 0
# Option 1: We don't include text1[p1] in the solution.
option_1 = memo_solve(p1 + 1, p2)
# Option 2: We include text1[p1] in the solution, as long as
# a match for it in text2 at or after p2 exists.
first_occurence = text2.find(text1[p1], p2)
option_2 = 0
if first_occurence != -1:
option_2 = 1 + memo_solve(p1 + 1, first_occurence + 1)
# Return the best option.
return max(option_1, option_2)
return memo_solve(0, 0)
# V1'''''
# IDEA : Improved Memoization
# https://leetcode.com/problems/longest-common-subsequence/solution/
from functools import lru_cache
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
@lru_cache(maxsize=None)
def memo_solve(p1, p2):
# Base case: If either string is now empty, we can't match
# up anymore characters.
if p1 == len(text1) or p2 == len(text2):
return 0
# Recursive case 1.
if text1[p1] == text2[p2]:
return 1 + memo_solve(p1 + 1, p2 + 1)
# Recursive case 2.
else:
return max(memo_solve(p1, p2 + 1), memo_solve(p1 + 1, p2))
return memo_solve(0, 0)
# V1''''''
# IDEA : DP
# https://leetcode.com/problems/longest-common-subsequence/solution/
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
# Make a grid of 0's with len(text2) + 1 columns
# and len(text1) + 1 rows.
dp_grid = [[0] * (len(text2) + 1) for _ in range(len(text1) + 1)]
# Iterate up each column, starting from the last one.
for col in reversed(range(len(text2))):
for row in reversed(range(len(text1))):
# If the corresponding characters for this cell are the same...
if text2[col] == text1[row]:
dp_grid[row][col] = 1 + dp_grid[row + 1][col + 1]
# Otherwise they must be different...
else:
dp_grid[row][col] = max(dp_grid[row + 1][col], dp_grid[row][col + 1])
# The original problem's answer is in dp_grid[0][0]. Return it.
return dp_grid[0][0]
# V1'''''''
# IDEA : DP WITH SPACE OPTIMIZATION
# https://leetcode.com/problems/longest-common-subsequence/solution/
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
# If text1 doesn't reference the shortest string, swap them.
if len(text2) < len(text1):
text1, text2 = text2, text1
# The previous column starts with all 0's and like before is 1
# more than the length of the first word.
previous = [0] * (len(text1) + 1)
# Iterate up each column, starting from the last one.
for col in reversed(range(len(text2))):
# Create a new array to represent the current column.
current = [0] * (len(text1) + 1)
for row in reversed(range(len(text1))):
if text2[col] == text1[row]:
current[row] = 1 + previous[row + 1]
else:
current[row] = max(previous[row], current[row + 1])
# The current column becomes the previous one.
previous = current
# The original problem's answer is in previous[0]. Return it.
return previous[0]
# V1'''''''''
# IDEA : DP
# https://leetcode.com/problems/longest-common-subsequence/discuss/1496789/python
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
n=len(text1)
m=len(text2)
dp={}
def helper(i,j):
if i==n or j==m:
return 0
if (i,j) in dp:
return dp[(i,j)]
elif text1[i]==text2[j]:
temp=1+helper(i+1,j+1)
dp[(i,j)]=temp
else:
temp=max(helper(i,j+1),helper(i+1,j))
dp[(i,j)]=temp
return dp[(i,j)]
return helper(0,0)
# V2
| 35.337607
| 184
| 0.548192
|
9d5133d5ed966f5b1f7041d445e563b07af0bd92
| 6,635
|
py
|
Python
|
examples/helpers/mail/mail_example.py
|
scomert/sendgrid-python
|
3e81ffc7a94007d894c1e2364e06d628c46dd117
|
[
"MIT"
] | null | null | null |
examples/helpers/mail/mail_example.py
|
scomert/sendgrid-python
|
3e81ffc7a94007d894c1e2364e06d628c46dd117
|
[
"MIT"
] | null | null | null |
examples/helpers/mail/mail_example.py
|
scomert/sendgrid-python
|
3e81ffc7a94007d894c1e2364e06d628c46dd117
|
[
"MIT"
] | null | null | null |
import json
import os
import urllib2
from sendgrid.helpers.mail import *
from sendgrid import *
# NOTE: you will need move this file to the root directory of this project to execute properly.
def build_hello_email():
"""Minimum required to send an email"""
from_email = Email("test@example.com")
subject = "Hello World from the SendGrid Python Library"
to_email = Email("test@example.com")
content = Content("text/plain", "some text here")
mail = Mail(from_email, subject, to_email, content)
mail.personalizations[0].add_to(Email("test2@example.com"))
return mail.get()
def build_kitchen_sink():
"""All settings set"""
mail = Mail()
mail.from_email = Email("test@example.com", "Example User")
mail.subject = "Hello World from the SendGrid Python Library"
personalization = Personalization()
personalization.add_to(Email("test1@example.com", "Example User"))
personalization.add_to(Email("test2@example.com", "Example User"))
personalization.add_cc(Email("test3@example.com", "Example User"))
personalization.add_cc(Email("test4@example.com", "Example User"))
personalization.add_bcc(Email("test5@example.com"))
personalization.add_bcc(Email("test6@example.com"))
personalization.subject = "Hello World from the Personalized SendGrid Python Library"
personalization.add_header(Header("X-Test", "test"))
personalization.add_header(Header("X-Mock", "true"))
personalization.add_substitution(Substitution("%name%", "Example User"))
personalization.add_substitution(Substitution("%city%", "Denver"))
personalization.add_custom_arg(CustomArg("user_id", "343"))
personalization.add_custom_arg(CustomArg("type", "marketing"))
personalization.send_at = 1443636843
mail.add_personalization(personalization)
personalization2 = Personalization()
personalization2.add_to(Email("test1@example.com", "Example User"))
personalization2.add_to(Email("test2@example.com", "Example User"))
personalization2.add_cc(Email("test3@example.com", "Example User"))
personalization2.add_cc(Email("test4@example.com", "Eric Shallock"))
personalization2.add_bcc(Email("test5@example.com"))
personalization2.add_bcc(Email("test6@example.com"))
personalization2.subject = "Hello World from the Personalized SendGrid Python Library"
personalization2.add_header(Header("X-Test", "test"))
personalization2.add_header(Header("X-Mock", "true"))
personalization2.add_substitution(Substitution("%name%", "Example User"))
personalization2.add_substitution(Substitution("%city%", "Denver"))
personalization2.add_custom_arg(CustomArg("user_id", "343"))
personalization2.add_custom_arg(CustomArg("type", "marketing"))
personalization2.send_at = 1443636843
mail.add_personalization(personalization2)
mail.add_content(Content("text/plain", "some text here"))
mail.add_content(Content("text/html", "<html><body>some text here</body></html>"))
attachment = Attachment()
attachment.content = "TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gQ3JhcyBwdW12"
attachment.type = "application/pdf"
attachment.filename = "balance_001.pdf"
attachment.disposition = "attachment"
attachment.content_id = "Balance Sheet"
mail.add_attachment(attachment)
attachment2 = Attachment()
attachment2.content = "BwdW"
attachment2.type = "image/png"
attachment2.filename = "banner.png"
attachment2.disposition = "inline"
attachment2.content_id = "Banner"
mail.add_attachment(attachment2)
mail.template_id = "13b8f94f-bcae-4ec6-b752-70d6cb59f932"
mail.add_section(Section("%section1%", "Substitution Text for Section 1"))
mail.add_section(Section("%section2%", "Substitution Text for Section 2"))
mail.add_header(Header("X-Test1", "test1"))
mail.add_header(Header("X-Test3", "test2"))
mail.add_category(Category("May"))
mail.add_category(Category("2016"))
mail.add_custom_arg(CustomArg("campaign", "welcome"))
mail.add_custom_arg(CustomArg("weekday", "morning"))
mail.send_at = 1443636842
# This must be a valid [batch ID](https://sendgrid.com/docs/API_Reference/SMTP_API/scheduling_parameters.html) to work
# mail.set_batch_id("N2VkYjBjYWItMGU4OC0xMWU2LWJhMzYtZjQ1Yzg5OTBkNzkxLWM5ZTUyZjNhOA")
mail.asm = ASM(99, [4, 5, 6, 7, 8])
mail.ip_pool_name = "24"
mail_settings = MailSettings()
mail_settings.bcc_settings = BCCSettings(True, Email("test@example.com"))
mail_settings.bypass_list_management = BypassListManagement(True)
mail_settings.footer_settings = FooterSettings(True, "Footer Text", "<html><body>Footer Text</body></html>")
mail_settings.sandbox_mode = SandBoxMode(True)
mail_settings.spam_check = SpamCheck(True, 1, "https://spamcatcher.sendgrid.com")
mail.mail_settings = mail_settings
tracking_settings = TrackingSettings()
tracking_settings.click_tracking = ClickTracking(True, True)
tracking_settings.open_tracking = OpenTracking(True, "Optional tag to replace with the open image in the body of the message")
tracking_settings.subscription_tracking = SubscriptionTracking(True, "text to insert into the text/plain portion of the message", "<html><body>html to insert into the text/html portion of the message</body></html>", "Optional tag to replace with the open image in the body of the message")
tracking_settings.ganalytics = Ganalytics(True, "some source", "some medium", "some term", "some_content", "some_campaign")
mail.tracking_settings = tracking_settings
mail.reply_to = Email("test@example.com")
return mail.get()
def send_hello_email():
# Assumes you set your environment variable:
# https://github.com/sendgrid/sendgrid-python/blob/master/TROUBLESHOOTING.md#environment-variables-and-your-sendgrid-api-key
sg = SendGridAPIClient()
data = build_hello_email()
response = sg.client.mail.send.post(request_body=data)
print(response.status_code)
print(response.headers)
print(response.body)
def send_kitchen_sink():
# Assumes you set your environment variable:
# https://github.com/sendgrid/sendgrid-python/blob/master/TROUBLESHOOTING.md#environment-variables-and-your-sendgrid-api-key
sg = SendGridAPIClient()
data = build_kitchen_sink()
response = sg.client.mail.send.post(request_body=data)
print(response.status_code)
print(response.headers)
print(response.body)
send_hello_email() # this will actually send an email
send_kitchen_sink() # this will only send an email if you set SandBox Mode to False
| 45.758621
| 293
| 0.740467
|
d506893d2011d021aef767e066b9d3a53cd97c54
| 1,252
|
py
|
Python
|
yolov5/utils/draw_name.py
|
Matrix-King-Studio/MaskDetection
|
5fed65833a8c08380299d606f66e14df814b022f
|
[
"MIT"
] | null | null | null |
yolov5/utils/draw_name.py
|
Matrix-King-Studio/MaskDetection
|
5fed65833a8c08380299d606f66e14df814b022f
|
[
"MIT"
] | null | null | null |
yolov5/utils/draw_name.py
|
Matrix-King-Studio/MaskDetection
|
5fed65833a8c08380299d606f66e14df814b022f
|
[
"MIT"
] | null | null | null |
import base64
from aip import AipFace
import cv2
APP_ID = '23167253'
API_KEY = '38jgdZXVu0azpywqCw4Z7NAZ'
SECRET_KEY = 'KrQZFsjoPNY44BD6qghUCDTuE9FyjV6n'
client = AipFace(APP_ID, API_KEY, SECRET_KEY)
options = {'max_face_num': '10'}
def get_name():
with open('img.jpg', 'rb') as f:
data = base64.b64encode(f.read())
image = str(data, 'UTF-8')
result = client.multiSearch(image, "BASE64", "Matrix_studio", options=options)
# print(result)
if result["error_msg"] == "SUCCESS":
dic = {}
for i in result["result"]["face_list"]:
try:
dic[i["user_list"][0]["user_id"]] = i["location"]
except IndexError:
return dic
return dic
else:
return False
def draw_name(img, color):
dic = get_name()
if dic:
for name in dic:
cv2.putText(img, name, (int(dic[name]['left']) + 12, int(dic[name]['top']) - 80 + 12),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)
return img
if __name__ == '__main__':
# get_name()
img = cv2.imread('img.jpg')
img = draw_name(img, (255, 255, 255))
cv2.imshow("add_text", img)
cv2.waitKey()
| 26.083333
| 99
| 0.558307
|
0ac820b3e5b1b903e6827e56bfb522e6ba2f3819
| 14,771
|
py
|
Python
|
test/dlc_tests/eks/pytorch/training/test_eks_pytorch_training.py
|
Yixiao99/deep-learning-containers
|
01f078adf5abfb92e802b326511981bdd4a8c85c
|
[
"Apache-2.0"
] | 1
|
2021-10-22T04:36:45.000Z
|
2021-10-22T04:36:45.000Z
|
test/dlc_tests/eks/pytorch/training/test_eks_pytorch_training.py
|
Yixiao99/deep-learning-containers
|
01f078adf5abfb92e802b326511981bdd4a8c85c
|
[
"Apache-2.0"
] | 32
|
2021-06-10T21:21:29.000Z
|
2021-08-06T22:37:37.000Z
|
test/dlc_tests/eks/pytorch/training/test_eks_pytorch_training.py
|
Yixiao99/deep-learning-containers
|
01f078adf5abfb92e802b326511981bdd4a8c85c
|
[
"Apache-2.0"
] | 1
|
2021-04-20T05:05:11.000Z
|
2021-04-20T05:05:11.000Z
|
import json
import os
import random
import datetime
import pytest
from invoke import run
from invoke.context import Context
from retrying import retry
import test.test_utils.eks as eks_utils
from test.test_utils import is_pr_context, SKIP_PR_REASON, is_below_framework_version
from test.test_utils import get_framework_and_version_from_tag, get_cuda_version_from_tag
from packaging.version import Version
LOGGER = eks_utils.LOGGER
@pytest.mark.skipif(not is_pr_context(), reason="Skip this test. It is already tested under PR context and we do not have enough resouces to test it again on mainline pipeline")
@pytest.mark.model("mnist")
def test_eks_pytorch_single_node_training(pytorch_training):
"""
Function to create a pod using kubectl and given container image, and run MXNet training
Args:
:param setup_utils: environment in which EKS tools are setup
:param pytorch_training: the ECR URI
"""
training_result = False
rand_int = random.randint(4001, 6000)
yaml_path = os.path.join(os.sep, "tmp", f"pytorch_single_node_training_{rand_int}.yaml")
pod_name = f"pytorch-single-node-training-{rand_int}"
# Workaround for https://github.com/pytorch/vision/issues/1938 and https://github.com/pytorch/vision/issues/3549
mnist_dataset_download_config = '''
FILE=new_main.py &&
echo "from __future__ import print_function" > $FILE &&
echo "from six.moves import urllib" >> $FILE &&
echo "from packaging.version import Version" >> $FILE &&
echo "opener = urllib.request.build_opener()" >> $FILE &&
echo "opener.addheaders = [('User-agent', 'Mozilla/5.0')]" >> $FILE &&
echo "urllib.request.install_opener(opener)" >> $FILE &&
echo "import torchvision" >> $FILE &&
echo "from torchvision import datasets, transforms" >> $FILE &&
echo "# from torchvision 0.9.1, 2 candidate mirror website links will be added before resources items automatically" >> $FILE &&
echo "# Reference PR https://github.com/pytorch/vision/pull/3559" >> $FILE &&
echo "TORCHVISION_VERSION = '0.9.1'" >> $FILE &&
echo "if Version(torchvision.__version__) < Version(TORCHVISION_VERSION):" >> $FILE &&
echo " datasets.MNIST.resources = [" >> $FILE &&
echo " ('https://dlinfra-mnist-dataset.s3-us-west-2.amazonaws.com/mnist/train-images-idx3-ubyte.gz', 'f68b3c2dcbeaaa9fbdd348bbdeb94873')," >> $FILE &&
echo " ('https://dlinfra-mnist-dataset.s3-us-west-2.amazonaws.com/mnist/train-labels-idx1-ubyte.gz', 'd53e105ee54ea40749a09fcbcd1e9432')," >> $FILE &&
echo " ('https://dlinfra-mnist-dataset.s3-us-west-2.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz', '9fb629c4189551a2d022fa330f9573f3')," >> $FILE &&
echo " ('https://dlinfra-mnist-dataset.s3-us-west-2.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz', 'ec29112dd5afa0611ce80d1b7f02629c')" >> $FILE &&
echo " ]" >> $FILE &&
sed -i '1d' examples/mnist/main.py &&
sed -i '6d' examples/mnist/main.py &&
cat examples/mnist/main.py >> $FILE &&
rm examples/mnist/main.py &&
mv $FILE examples/mnist/main.py
'''
args = f"git clone https://github.com/pytorch/examples.git && {mnist_dataset_download_config} && python examples/mnist/main.py"
# TODO: Change hardcoded value to read a mapping from the EKS cluster instance.
cpu_limit = 72
cpu_limit = str(int(cpu_limit) / 2)
search_replace_dict = {
"<POD_NAME>": pod_name,
"<CONTAINER_NAME>": pytorch_training,
"<ARGS>": args,
"<CPU_LIMIT>": cpu_limit,
}
eks_utils.write_eks_yaml_file_from_template(
eks_utils.SINGLE_NODE_TRAINING_TEMPLATE_PATH, yaml_path, search_replace_dict
)
try:
run("kubectl create -f {}".format(yaml_path))
if eks_utils.is_eks_training_complete(pod_name):
pytorch_out = run("kubectl logs {}".format(pod_name)).stdout
if "Accuracy" in pytorch_out:
training_result = True
else:
eks_utils.LOGGER.info("**** training output ****")
eks_utils.LOGGER.debug(pytorch_out)
assert training_result, f"Training failed"
finally:
run("kubectl delete pods {}".format(pod_name))
@pytest.mark.skipif(not is_pr_context(), reason="Skip this test. It is already tested under PR context and we do not have enough resouces to test it again on mainline pipeline")
@pytest.mark.model("resnet18")
@pytest.mark.integration("pt_s3_plugin")
def test_eks_pt_s3_plugin_single_node_training(pytorch_training, pt17_and_above_only):
"""
Function to create a pod using kubectl and given container image, and run MXNet training
Args:
:param setup_utils: environment in which EKS tools are setup
:param pytorch_training: the ECR URI
"""
_, image_framework_version = get_framework_and_version_from_tag(pytorch_training)
if Version(image_framework_version) < Version("1.8"):
pytest.skip("S3 plugin is supported on PyTorch version >=1.8")
training_result = False
rand_int = random.randint(4001, 6000)
yaml_path = os.path.join(os.sep, "tmp", f"pytorch_s3_single_node_training_{rand_int}.yaml")
pod_name = f"pytorch-s3-single-node-training-{rand_int}"
args = f"git clone https://github.com/aws/amazon-s3-plugin-for-pytorch.git && python amazon-s3-plugin-for-pytorch/examples/s3_imagenet_example.py"
# TODO: Change hardcoded value to read a mapping from the EKS cluster instance.
cpu_limit = 96
cpu_limit = str(int(cpu_limit) / 2)
if "gpu" in pytorch_training:
args = args + " --gpu 0"
search_replace_dict = {
"<POD_NAME>": pod_name,
"<CONTAINER_NAME>": pytorch_training,
"<ARGS>": args,
"<CPU_LIMIT>": cpu_limit,
}
eks_utils.write_eks_yaml_file_from_template(
eks_utils.SINGLE_NODE_TRAINING_TEMPLATE_PATH, yaml_path, search_replace_dict
)
try:
run("kubectl create -f {}".format(yaml_path))
if eks_utils.is_eks_training_complete(pod_name):
pytorch_out = run("kubectl logs {}".format(pod_name)).stdout
if "Acc" in pytorch_out:
training_result = True
else:
eks_utils.LOGGER.info("**** training output ****")
eks_utils.LOGGER.debug(pytorch_out)
assert training_result, f"Training failed"
finally:
run("kubectl delete pods {}".format(pod_name))
@pytest.mark.skipif(not is_pr_context(), reason="Skip this test. It is already tested under PR context")
@pytest.mark.integration("dgl")
@pytest.mark.model("gcn")
def test_eks_pytorch_dgl_single_node_training(pytorch_training, py3_only):
"""
Function to create a pod using kubectl and given container image, and run
DGL training with PyTorch backend
Args:
:param pytorch_training: the ECR URI
"""
_, image_framework_version = get_framework_and_version_from_tag(pytorch_training)
image_cuda_version = get_cuda_version_from_tag(pytorch_training)
if Version(image_framework_version) == Version("1.6") and image_cuda_version == "cu110":
pytest.skip("DGL does not suport CUDA 11 for PyTorch 1.6")
training_result = False
rand_int = random.randint(4001, 6000)
yaml_path = os.path.join(os.sep, "tmp", f"pytorch_single_node_training_dgl_{rand_int}.yaml")
pod_name = f"pytorch-single-node-training-dgl-{rand_int}"
if is_below_framework_version("1.7", pytorch_training, "pytorch"):
dgl_branch = "0.4.x"
else:
dgl_branch = "0.5.x"
args = (
f"git clone -b {dgl_branch} https://github.com/dmlc/dgl.git && "
f"cd /dgl/examples/pytorch/gcn/ && DGLBACKEND=pytorch python train.py --dataset cora"
)
# TODO: Change hardcoded value to read a mapping from the EKS cluster instance.
cpu_limit = 72
cpu_limit = str(int(cpu_limit) / 2)
if "gpu" in pytorch_training:
args = args + " --gpu 0"
else:
args = args + " --gpu -1"
search_replace_dict = {
"<POD_NAME>": pod_name,
"<CONTAINER_NAME>": pytorch_training,
"<ARGS>": args,
"<CPU_LIMIT>": cpu_limit,
}
eks_utils.write_eks_yaml_file_from_template(
eks_utils.SINGLE_NODE_TRAINING_TEMPLATE_PATH, yaml_path, search_replace_dict
)
try:
run("kubectl create -f {}".format(yaml_path))
if eks_utils.is_eks_training_complete(pod_name):
dgl_out = run("kubectl logs {}".format(pod_name)).stdout
if "Test accuracy" in dgl_out:
training_result = True
else:
eks_utils.LOGGER.info("**** training output ****")
eks_utils.LOGGER.debug(dgl_out)
assert training_result, f"Training failed"
finally:
run("kubectl delete pods {}".format(pod_name))
@pytest.mark.skipif(is_pr_context(), reason=SKIP_PR_REASON)
@pytest.mark.model("mnist")
@pytest.mark.multinode(4)
def test_eks_pytorch_multinode_node_training(pytorch_training, example_only):
"""
Function to create mutliple pods using kubectl and given container image, and run Pytorch training
Args:
:param setup_utils: environment in which EKS tools are setup
:param pytorch_training: the ECR URI
"""
# TODO: Change hardcoded value to read a mapping from the EKS cluster instance.
random.seed(f"{pytorch_training}-{datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')}")
unique_id = random.randint(1, 6000)
namespace = f"pytorch-multi-node-training-{unique_id}"
app_name = f"eks-pytorch-mnist-app-{unique_id}"
job_name = f"kubeflow-pytorch-gpu-dist-job-{unique_id}"
num_masters = "1"
num_workers = "3"
gpu_limit = "1"
backend = "gloo"
epochs = '"10"'
local_template_file_path = os.path.join(
"eks",
"eks_manifest_templates",
"pytorch",
"training",
"multi_node_gpu_training.yaml"
)
remote_yaml_path = os.path.join(os.sep, "tmp", f"pytorch_multinode_node_training_{unique_id}.yaml")
replace_dict = {
"<JOB_NAME>": job_name,
"<NUM_MASTERS>": num_masters,
"<NUM_WORKERS>": num_workers,
"<CONTAINER_IMAGE>": pytorch_training,
"<BACKEND>": backend,
"<EPOCHS>": epochs,
"<GPU_LIMIT>": gpu_limit
}
eks_utils.write_eks_yaml_file_from_template(local_template_file_path, remote_yaml_path, replace_dict)
run_eks_pytorch_multi_node_training(namespace, job_name, remote_yaml_path)
def run_eks_pytorch_multi_node_training(namespace, job_name, remote_yaml_file_path):
"""Run PyTorch distributed training on EKS using PyTorch Operator
Args:
namespace, job_name, remote_yaml_file_path
"""
# Namespaces will allow parallel runs on the same cluster. Create namespace if it doesnt exist.
does_namespace_exist = run(f"kubectl get namespace | grep {namespace}",
warn=True)
if not does_namespace_exist:
run(f"kubectl create namespace {namespace}")
try:
run(f"kubectl delete -f {remote_yaml_file_path}", warn=True)
run(f"kubectl create -f {remote_yaml_file_path} -n {namespace}")
training_result = is_pytorch_eks_multinode_training_complete(job_name, namespace)
if training_result:
run_out = run(f"kubectl logs {job_name}-master-0 -n {namespace}", warn=True).stdout
if "accuracy" in run_out:
training_result = True
else:
eks_utils.LOGGER.info("**** training output ****")
eks_utils.LOGGER.debug(run_out)
assert training_result, f"Training for eks pytorch multinode failed"
finally:
eks_utils.eks_multinode_cleanup(remote_yaml_file_path, namespace)
def retry_if_value_error(exception):
"""Return True if we should retry (in this case when it's an ValueError), False otherwise"""
return isinstance(exception, ValueError)
@retry(stop_max_attempt_number=40, wait_fixed=60000, retry_on_exception=retry_if_value_error)
def is_pytorch_eks_multinode_training_complete(job_name, namespace):
"""Function to check job and pod status for multinode training.
A separate method is required because kubectl commands for logs and status are different with namespaces.
Args:
job_name: str
"""
run_out = run(f"kubectl get pytorchjobs -n {namespace} {job_name} -o json", warn=True)
if run_out.stdout is not None or run_out.stdout != "":
job_info = json.loads(run_out.stdout)
LOGGER.debug(f"Job info: {job_info}")
if 'status' not in job_info:
raise ValueError("Waiting for job to launch...")
job_status = job_info['status']
if 'conditions' not in job_status:
raise ValueError("Waiting for job to launch...")
job_conditions = job_status['conditions']
if len(job_conditions) == 0:
raise ValueError("Waiting for job to launch...")
else:
# job_conditions at least with length 1
if 'status' in job_conditions[0]:
job_created = job_conditions[0]['status']
if 'message' in job_conditions[0] and len(job_conditions) == 1:
LOGGER.info(job_conditions[0]['message'])
if not job_created:
raise ValueError("Waiting for job to be created...")
if len(job_conditions) == 1:
raise ValueError("Waiting for job to run...")
# job_conditions at least with length 2
if 'status' in job_conditions[1]:
job_running = job_conditions[1]['status']
if 'message' in job_conditions[1] and len(job_conditions) == 2:
LOGGER.info(job_conditions[1]['message'])
if not job_running:
raise ValueError("Waiting for job to run...")
if len(job_conditions) == 2:
raise ValueError("Waiting for job to complete...")
# job_conditions at least with length 3
if 'status' in job_conditions[2]:
job_succeed = job_conditions[2]['status']
if 'message' in job_conditions[2]:
LOGGER.info(job_conditions[2]['message'])
if not job_succeed:
if job_running:
raise ValueError("Waiting for job to complete...")
else:
return False
return True
else:
raise ValueError("Waiting for job to run...")
else:
raise ValueError("Waiting for job to launch...")
return False
| 41.844193
| 177
| 0.657708
|
20b99314962ebeb3eed7f12930a2ea191607ff1c
| 500
|
py
|
Python
|
autodidaqt_receiver/__init__.py
|
chstan/autodidaqt-receiver
|
ff5ab88b8d759a4441dbe52619820fbce67fdc1b
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
autodidaqt_receiver/__init__.py
|
chstan/autodidaqt-receiver
|
ff5ab88b8d759a4441dbe52619820fbce67fdc1b
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
autodidaqt_receiver/__init__.py
|
chstan/autodidaqt-receiver
|
ff5ab88b8d759a4441dbe52619820fbce67fdc1b
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
# type: ignore[attr-defined]
"""Analyis-side bridge for autodiDAQt."""
import sys
from .receiver import *
if sys.version_info >= (3, 8):
from importlib import metadata as importlib_metadata
else:
import importlib_metadata
def get_version() -> str:
try:
return importlib_metadata.version(__name__)
except importlib_metadata.PackageNotFoundError: # pragma: no cover
return "unknown"
version: str = get_version()
VERSION: str = version
__version__: str = version
| 20.833333
| 71
| 0.72
|
68fb997d400a8754930e7ee9de11a07073dfe57a
| 12,440
|
py
|
Python
|
argopy/tests/test_index_fetchers.py
|
tylertucker202/argopy
|
c22f8c9803cfcf2ccfde3d93269f48a0e8e60832
|
[
"Apache-2.0"
] | null | null | null |
argopy/tests/test_index_fetchers.py
|
tylertucker202/argopy
|
c22f8c9803cfcf2ccfde3d93269f48a0e8e60832
|
[
"Apache-2.0"
] | null | null | null |
argopy/tests/test_index_fetchers.py
|
tylertucker202/argopy
|
c22f8c9803cfcf2ccfde3d93269f48a0e8e60832
|
[
"Apache-2.0"
] | 2
|
2020-06-29T19:40:00.000Z
|
2021-08-01T10:35:09.000Z
|
#!/bin/env python
# -*coding: UTF-8 -*-
#
# Test data fetchers
#
import os
import xarray as xr
import shutil
import pytest
import unittest
from unittest import TestCase
import argopy
from argopy import IndexFetcher as ArgoIndexFetcher
from argopy.errors import InvalidFetcherAccessPoint, InvalidFetcher, \
FileSystemHasNoCache, CacheFileNotFound, ErddapServerError, DataNotFound
from argopy.utilities import list_available_index_src, isconnected, erddap_ds_exists
AVAILABLE_INDEX_SOURCES = list_available_index_src()
CONNECTED = isconnected()
if CONNECTED:
DSEXISTS = erddap_ds_exists(ds="ArgoFloats-index")
else:
DSEXISTS = False
def test_invalid_accesspoint():
src = list(AVAILABLE_INDEX_SOURCES.keys())[0] # Use the first valid data source
with pytest.raises(InvalidFetcherAccessPoint):
ArgoIndexFetcher(src=src).invalid_accesspoint.to_xarray() # Can't get data if access point not defined first
with pytest.raises(InvalidFetcherAccessPoint):
ArgoIndexFetcher(src=src).to_xarray() # Can't get data if access point not defined first
def test_invalid_fetcher():
with pytest.raises(InvalidFetcher):
ArgoIndexFetcher(src='invalid_fetcher').to_xarray()
# @unittest.skipUnless('localftp' in AVAILABLE_SOURCES, "requires localftp data fetcher")
# def test_unavailable_accesspoint():
# with pytest.raises(InvalidFetcherAccessPoint):
# ArgoIndexFetcher((src=self.src).region([-85., -45., 10., 20., 0., 100.]).to_xarray()
class EntryPoints_AllBackends(TestCase):
""" Test main API facade for all available index fetching backends """
def setUp(self):
# todo Determine the list of output format to test
# what else beyond .to_xarray() ?
self.fetcher_opts = {}
# Define API entry point options to tests:
# These should be available online and with the argopy-data dummy gdac ftp
self.args = {}
self.args['float'] = [[2901623],
[6901929, 2901623]]
self.args['region'] = [[-60, -40, 40., 60.],
[-60, -40, 40., 60., '2007-08-01', '2007-09-01']]
self.args['profile'] = [[2901623, 2],
[6901929, [5, 45]]]
def __test_float(self, bk, **ftc_opts):
""" Test float index fetching for a given backend """
for arg in self.args['float']:
options = {**self.fetcher_opts, **ftc_opts}
ds = ArgoIndexFetcher(src=bk, **options).float(arg).to_xarray()
assert isinstance(ds, xr.Dataset)
def __test_profile(self, bk, **ftc_opts):
""" Test profile index fetching for a given backend """
for arg in self.args['profile']:
options = {**self.fetcher_opts, **ftc_opts}
ds = ArgoIndexFetcher(src=bk, **options).profile(*arg).to_xarray()
assert isinstance(ds, xr.Dataset)
def __test_region(self, bk, **ftc_opts):
""" Test float index fetching for a given backend """
for arg in self.args['region']:
options = {**self.fetcher_opts, **ftc_opts}
ds = ArgoIndexFetcher(src=bk, **options).region(arg).to_xarray()
assert isinstance(ds, xr.Dataset)
@unittest.skipUnless('erddap' in AVAILABLE_INDEX_SOURCES, "requires erddap index fetcher")
@unittest.skipUnless(CONNECTED, "erddap requires an internet connection")
@unittest.skipUnless(DSEXISTS, "erddap requires a valid core index Argo dataset from Ifremer server")
@unittest.skipUnless(False, "Waiting for https://github.com/euroargodev/argopy/issues/16")
def test_float_index_erddap(self):
self.__test_float('erddap')
@unittest.skipUnless('erddap' in AVAILABLE_INDEX_SOURCES, "requires erddap index fetcher")
@unittest.skipUnless(CONNECTED, "erddap requires an internet connection")
@unittest.skipUnless(DSEXISTS, "erddap requires a valid core index Argo dataset from Ifremer server")
@unittest.skipUnless(False, "Waiting for https://github.com/euroargodev/argopy/issues/16")
def test_region_index_erddap(self):
self.__test_region('erddap')
@unittest.skipUnless('localftp' in AVAILABLE_INDEX_SOURCES, "requires localftp index fetcher")
def test_float_index_localftp(self):
ftproot, findex = argopy.tutorial.open_dataset('global_index_prof')
with argopy.set_options(local_ftp=ftproot):
self.__test_float('localftp', index_file='ar_index_global_prof.txt')
@unittest.skipUnless('localftp' in AVAILABLE_INDEX_SOURCES, "requires localftp index fetcher")
def test_profile_index_localftp(self):
ftproot, findex = argopy.tutorial.open_dataset('global_index_prof')
with argopy.set_options(local_ftp=ftproot):
self.__test_profile('localftp', index_file='ar_index_global_prof.txt')
@unittest.skipUnless('localftp' in AVAILABLE_INDEX_SOURCES, "requires localftp index fetcher")
def test_region_index_localftp(self):
ftproot, findex = argopy.tutorial.open_dataset('global_index_prof')
with argopy.set_options(local_ftp=ftproot):
self.__test_region('localftp', index_file='ar_index_global_prof.txt')
@unittest.skipUnless('erddap' in AVAILABLE_INDEX_SOURCES, "requires erddap index fetcher")
@unittest.skipUnless(CONNECTED, "erddap requires an internet connection")
@unittest.skipUnless(DSEXISTS, "erddap requires a valid core index Argo dataset from Ifremer server")
# @unittest.skipUnless(False, "Waiting for https://github.com/euroargodev/argopy/issues/16")
class Erddap(TestCase):
""" Test main API facade for all available dataset of the ERDDAP index fetching backend """
testcachedir = os.path.expanduser(os.path.join("~", ".argopytest_tmp"))
def test_cachepath_notfound(self):
with argopy.set_options(cachedir=self.testcachedir):
loader = ArgoIndexFetcher(src='erddap', cache=True).float(6902746)
with pytest.raises(CacheFileNotFound):
loader.fetcher.cachepath
shutil.rmtree(self.testcachedir) # Make sure the cache is empty
@unittest.skipUnless(False, "Waiting for https://github.com/euroargodev/argopy/issues/16")
def test_nocache(self):
with argopy.set_options(cachedir=self.testcachedir):
loader = ArgoIndexFetcher(src='erddap', cache=False).float(6902746)
loader.to_xarray()
with pytest.raises(FileSystemHasNoCache):
loader.fetcher.cachepath
shutil.rmtree(self.testcachedir) # Make sure the cache is empty
@unittest.skipUnless(False, "Waiting for https://github.com/euroargodev/argopy/issues/16")
def test_caching_index(self):
with argopy.set_options(cachedir=self.testcachedir):
try:
loader = ArgoIndexFetcher(src='erddap', cache=True).float(6902746)
# 1st call to load from erddap and save to cachedir:
ds = loader.to_xarray()
# 2nd call to load from cached file:
ds = loader.to_xarray()
assert isinstance(ds, xr.Dataset)
assert isinstance(loader.fetcher.cachepath, str)
shutil.rmtree(self.testcachedir)
except ErddapServerError: # Test is passed when something goes wrong because of the erddap server, not our fault !
shutil.rmtree(self.testcachedir)
pass
except Exception:
shutil.rmtree(self.testcachedir)
raise
def test_url(self):
loader = ArgoIndexFetcher(src='erddap', cache=True).float(2901623)
assert isinstance(loader.fetcher.url, str)
# loader = ArgoIndexFetcher(src='erddap', cache=True).profile(2901623, 12)
# assert isinstance(loader.fetcher.url, str)
loader = ArgoIndexFetcher(src='erddap', cache=True).region([-60, -40, 40., 60., '2007-08-01', '2007-09-01'])
assert isinstance(loader.fetcher.url, str)
@unittest.skipUnless('localftp' in AVAILABLE_INDEX_SOURCES, "requires localftp index fetcher")
class LocalFTP(TestCase):
""" Test localftp index fetcher """
src = 'localftp'
ftproot, flist = argopy.tutorial.open_dataset('localftp')
local_ftp = ftproot
def test_cachepath_notfound(self):
testcachedir = os.path.expanduser(os.path.join("~", ".argopytest_tmp"))
with argopy.set_options(cachedir=testcachedir, local_ftp=self.local_ftp):
loader = ArgoIndexFetcher(src=self.src, cache=True).profile(2901623, 2)
with pytest.raises(CacheFileNotFound):
loader.fetcher.cachepath
shutil.rmtree(testcachedir) # Make sure the cache folder is cleaned
def test_nocache(self):
with argopy.set_options(cachedir="dummy", local_ftp=self.local_ftp):
loader = ArgoIndexFetcher(src=self.src, cache=False).profile(2901623, 2)
loader.to_xarray()
with pytest.raises(FileSystemHasNoCache):
loader.fetcher.cachepath
def test_caching_float(self):
testcachedir = os.path.expanduser(os.path.join("~", ".argopytest_tmp"))
with argopy.set_options(cachedir=testcachedir, local_ftp=self.local_ftp):
try:
loader = ArgoIndexFetcher(src=self.src, cache=True).float(6901929)
# 1st call to load from erddap and save to cachedir:
ds = loader.to_xarray()
# 2nd call to load from cached file:
ds = loader.to_xarray()
assert isinstance(ds, xr.Dataset)
assert isinstance(loader.fetcher.cachepath, str)
shutil.rmtree(testcachedir)
except Exception:
shutil.rmtree(testcachedir)
raise
def test_noresults(self):
with argopy.set_options(local_ftp=self.local_ftp):
with pytest.raises(DataNotFound):
ArgoIndexFetcher(src=self.src).region([-70, -65, 30., 35., '2030-01-01', '2030-06-30']).to_dataframe()
def __testthis(self, dataset):
for access_point in self.args:
if access_point == 'profile':
for arg in self.args['profile']:
with argopy.set_options(local_ftp=self.local_ftp):
try:
ds = ArgoIndexFetcher(src=self.src).profile(*arg).to_xarray()
assert isinstance(ds, xr.Dataset)
except Exception:
print("ERROR LOCALFTP request:\n",
ArgoIndexFetcher(src=self.src).profile(*arg).fetcher.cname())
pass
if access_point == 'float':
for arg in self.args['float']:
with argopy.set_options(local_ftp=self.local_ftp):
try:
ds = ArgoIndexFetcher(src=self.src).float(arg).to_xarray()
assert isinstance(ds, xr.Dataset)
except Exception:
print("ERROR LOCALFTP request:\n",
ArgoIndexFetcher(src=self.src).float(arg).fetcher.cname())
pass
if access_point == 'region':
for arg in self.args['region']:
with argopy.set_options(local_ftp=self.local_ftp):
try:
ds = ArgoIndexFetcher(src=self.src).region(arg).to_xarray()
assert isinstance(ds, xr.Dataset)
except Exception:
print("ERROR LOCALFTP request:\n",
ArgoIndexFetcher(src=self.src).region(arg).fetcher.cname())
pass
def test_phy_float(self):
self.args = {}
self.args['float'] = [[2901623],
[2901623, 6901929]]
self.__testthis('phy')
def test_phy_profile(self):
self.args = {}
self.args['profile'] = [[6901929, 36],
[6901929, [5, 45]]]
self.__testthis('phy')
def test_phy_region(self):
self.args = {}
self.args['region'] = [[-60, -40, 40., 60.],
[-60, -40, 40., 60., '2007-08-01', '2007-09-01']]
self.__testthis('phy')
if __name__ == '__main__':
unittest.main()
| 45.735294
| 127
| 0.630788
|
86c014b076bfaa1d7da143bdd7cb29c10438d2f3
| 8,184
|
py
|
Python
|
youtube_dl/extractor/ivi.py
|
MOODesign/Youtube-videos-Download
|
730c0d12a06f349907481570f1f2890251f7a181
|
[
"Unlicense"
] | 16
|
2020-12-01T15:26:58.000Z
|
2022-02-24T23:12:14.000Z
|
youtube_dl/extractor/ivi.py
|
MOODesign/Youtube-videos-Download
|
730c0d12a06f349907481570f1f2890251f7a181
|
[
"Unlicense"
] | 5
|
2021-02-20T10:30:00.000Z
|
2021-06-01T21:12:31.000Z
|
youtube_dl/extractor/ivi.py
|
MOODesign/Youtube-videos-Download
|
730c0d12a06f349907481570f1f2890251f7a181
|
[
"Unlicense"
] | 7
|
2020-12-01T15:27:04.000Z
|
2022-01-09T23:21:53.000Z
|
# coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
qualities,
)
class IviIE(InfoExtractor):
IE_DESC = 'ivi.ru'
IE_NAME = 'ivi'
_VALID_URL = r'https?://(?:www\.)?ivi\.(?:ru|tv)/(?:watch/(?:[^/]+/)?|video/player\?.*?videoId=)(?P<id>\d+)'
_GEO_BYPASS = False
_GEO_COUNTRIES = ['RU']
_TESTS = [
# Single movie
{
'url': 'http://www.ivi.ru/watch/53141',
'md5': '6ff5be2254e796ed346251d117196cf4',
'info_dict': {
'id': '53141',
'ext': 'mp4',
'title': 'Иван Васильевич меняет профессию',
'description': 'md5:b924063ea1677c8fe343d8a72ac2195f',
'duration': 5498,
'thumbnail': r're:^https?://.*\.jpg$',
},
'skip': 'Only works from Russia',
},
# Serial's series
{
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/9549',
'md5': '221f56b35e3ed815fde2df71032f4b3e',
'info_dict': {
'id': '9549',
'ext': 'mp4',
'title': 'Двое из ларца - Дело Гольдберга (1 часть)',
'series': 'Двое из ларца',
'season': 'Сезон 1',
'season_number': 1,
'episode': 'Дело Гольдберга (1 часть)',
'episode_number': 1,
'duration': 2655,
'thumbnail': r're:^https?://.*\.jpg$',
},
'skip': 'Only works from Russia',
},
{
# with MP4-HD720 format
'url': 'http://www.ivi.ru/watch/146500',
'md5': 'd63d35cdbfa1ea61a5eafec7cc523e1e',
'info_dict': {
'id': '146500',
'ext': 'mp4',
'title': 'Кукла',
'description': 'md5:ffca9372399976a2d260a407cc74cce6',
'duration': 5599,
'thumbnail': r're:^https?://.*\.jpg$',
},
'skip': 'Only works from Russia',
},
{
'url': 'https://www.ivi.tv/watch/33560/',
'only_matching': True,
},
]
# Sorted by quality
_KNOWN_FORMATS = (
'MP4-low-mobile', 'MP4-mobile', 'FLV-lo', 'MP4-lo', 'FLV-hi', 'MP4-hi',
'MP4-SHQ', 'MP4-HD720', 'MP4-HD1080')
def _real_extract(self, url):
video_id = self._match_id(url)
data = {
'method': 'da.content.get',
'params': [
video_id, {
'site': 's183',
'referrer': 'http://www.ivi.ru/watch/%s' % video_id,
'contentid': video_id
}
]
}
video_json = self._download_json(
'http://api.digitalaccess.ru/api/json/', video_id,
'Downloading video JSON', data=json.dumps(data))
if 'error' in video_json:
error = video_json['error']
origin = error['origin']
if origin == 'NotAllowedForLocation':
self.raise_geo_restricted(
msg=error['message'], countries=self._GEO_COUNTRIES)
elif origin == 'NoRedisValidData':
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
raise ExtractorError(
'Unable to download video %s: %s' % (video_id, error['message']),
expected=True)
result = video_json['result']
quality = qualities(self._KNOWN_FORMATS)
formats = [{
'url': x['url'],
'format_id': x.get('content_format'),
'quality': quality(x.get('content_format')),
} for x in result['files'] if x.get('url')]
self._sort_formats(formats)
title = result['title']
duration = int_or_none(result.get('duration'))
compilation = result.get('compilation')
episode = title if compilation else None
title = '%s - %s' % (compilation, title) if compilation is not None else title
thumbnails = [{
'url': preview['url'],
'id': preview.get('content_format'),
} for preview in result.get('preview', []) if preview.get('url')]
webpage = self._download_webpage(url, video_id)
season = self._search_regex(
r'<li[^>]+class="season active"[^>]*><a[^>]+>([^<]+)',
webpage, 'season', default=None)
season_number = int_or_none(self._search_regex(
r'<li[^>]+class="season active"[^>]*><a[^>]+data-season(?:-index)?="(\d+)"',
webpage, 'season number', default=None))
episode_number = int_or_none(self._search_regex(
r'[^>]+itemprop="episode"[^>]*>\s*<meta[^>]+itemprop="episodeNumber"[^>]+content="(\d+)',
webpage, 'episode number', default=None))
description = self._og_search_description(webpage, default=None) or self._html_search_meta(
'description', webpage, 'description', default=None)
return {
'id': video_id,
'title': title,
'series': compilation,
'season': season,
'season_number': season_number,
'episode': episode,
'episode_number': episode_number,
'thumbnails': thumbnails,
'description': description,
'duration': duration,
'formats': formats,
}
class IviCompilationIE(InfoExtractor):
IE_DESC = 'ivi.ru compilations'
IE_NAME = 'ivi:compilation'
_VALID_URL = r'https?://(?:www\.)?ivi\.ru/watch/(?!\d+)(?P<compilationid>[a-z\d_-]+)(?:/season(?P<seasonid>\d+))?$'
_TESTS = [{
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa',
'info_dict': {
'id': 'dvoe_iz_lartsa',
'title': 'Двое из ларца (2006 - 2008)',
},
'playlist_mincount': 24,
}, {
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/season1',
'info_dict': {
'id': 'dvoe_iz_lartsa/season1',
'title': 'Двое из ларца (2006 - 2008) 1 сезон',
},
'playlist_mincount': 12,
}]
def _extract_entries(self, html, compilation_id):
return [
self.url_result(
'http://www.ivi.ru/watch/%s/%s' % (compilation_id, serie), IviIE.ie_key())
for serie in re.findall(
r'<a href="/watch/%s/(\d+)"[^>]+data-id="\1"' % compilation_id, html)]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
compilation_id = mobj.group('compilationid')
season_id = mobj.group('seasonid')
if season_id is not None: # Season link
season_page = self._download_webpage(
url, compilation_id, 'Downloading season %s web page' % season_id)
playlist_id = '%s/season%s' % (compilation_id, season_id)
playlist_title = self._html_search_meta('title', season_page, 'title')
entries = self._extract_entries(season_page, compilation_id)
else: # Compilation link
compilation_page = self._download_webpage(url, compilation_id, 'Downloading compilation web page')
playlist_id = compilation_id
playlist_title = self._html_search_meta('title', compilation_page, 'title')
seasons = re.findall(
r'<a href="/watch/%s/season(\d+)' % compilation_id, compilation_page)
if not seasons: # No seasons in this compilation
entries = self._extract_entries(compilation_page, compilation_id)
else:
entries = []
for season_id in seasons:
season_page = self._download_webpage(
'http://www.ivi.ru/watch/%s/season%s' % (compilation_id, season_id),
compilation_id, 'Downloading season %s web page' % season_id)
entries.extend(self._extract_entries(season_page, compilation_id))
return self.playlist_result(entries, playlist_id, playlist_title)
| 37.031674
| 119
| 0.527981
|
2c5346e665cece1a46c4ff2b17f9543608c2517c
| 13,333
|
py
|
Python
|
scormxblock/scormxblock.py
|
eduNEXT/edx_xblock_scorm
|
d40309606eca36af72a59cc3c90abe1700076c5d
|
[
"Apache-2.0"
] | null | null | null |
scormxblock/scormxblock.py
|
eduNEXT/edx_xblock_scorm
|
d40309606eca36af72a59cc3c90abe1700076c5d
|
[
"Apache-2.0"
] | 2
|
2017-11-21T20:38:42.000Z
|
2021-03-11T03:35:20.000Z
|
scormxblock/scormxblock.py
|
eduNEXT/edx_xblock_scorm
|
d40309606eca36af72a59cc3c90abe1700076c5d
|
[
"Apache-2.0"
] | 1
|
2021-06-07T09:03:19.000Z
|
2021-06-07T09:03:19.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import mimetypes
import re
import pkg_resources
import zipfile
import xml.etree.ElementTree as ET
from urllib.parse import urljoin, urlparse, unquote
import boto3
from os import path, walk
from django.conf import settings
from django.template import Context, Template
from webob import Response
from celery.task import task
from fs.copy import copy_fs
from fs.tempfs import TempFS
from djpyfs import djpyfs
from xblock.core import XBlock
from xblock.fields import Scope, String, Float, Boolean, Dict
from xblock.fragment import Fragment
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
FILES_THRESHOLD_FOR_ASYNC = getattr(settings, 'SCORMXBLOCK_ASYNC_THRESHOLD', 150)
DEFAULT_CONTENT_TYPE = 'application/octet-stream'
@task(name='scormxblock.scormxblock.s3_upload', routing_key=settings.HIGH_PRIORITY_QUEUE)
def s3_upload(all_content, temp_directory, dest_dir):
"""
Actual handling of the s3 uploads.
"""
s3 = boto3.resource('s3',
aws_access_key_id=settings.DJFS.get('aws_access_key_id'),
aws_secret_access_key=settings.DJFS.get('aws_secret_access_key'),
endpoint_url=settings.DJFS.get('endpoint_url'),
region_name=settings.DJFS.get('region_name'),
)
bucket = s3.Bucket(settings.DJFS.get('bucket'))
for filepath in all_content:
sourcepath = path.normpath(path.join(temp_directory.root_path, filepath))
destpath = path.normpath(path.join(dest_dir, filepath))
# It's possible that the type is not in the mimetypes list.
content_type = mimetypes.guess_type(sourcepath)[0] or DEFAULT_CONTENT_TYPE
if isinstance(content_type, bytes): # In some versions of Python guess_type, it returns bytes instead of str.
content_type = content_type.decode('utf-8')
bucket.upload_file(
sourcepath,
destpath,
ExtraArgs={'ACL': 'public-read', 'ContentType': content_type},
)
def updoad_all_content(temp_directory, fs):
"""
This standalone function handles the bulk upload of unzipped content.
"""
if not settings.DJFS.get('type', 'osfs') == "s3fs":
copy_fs(temp_directory, fs)
return
dest_dir = fs.dir_path
all_content = []
for dir_, _, files in walk(temp_directory.root_path):
for filename in files:
rel_dir = path.relpath(dir_, temp_directory.root_path)
rel_file = path.join(rel_dir, filename)
all_content.append(rel_file)
if len(all_content) < FILES_THRESHOLD_FOR_ASYNC:
# We estimate no problem here, just upload the files
s3_upload(all_content, temp_directory, dest_dir)
else:
# The raw number of files is going to make this request time out. Use celery instead
s3_upload.apply_async((all_content, temp_directory, dest_dir), serializer='pickle')
class ScormXBlock(XBlock):
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
default="Scorm",
scope=Scope.settings,
)
scorm_file = String(
display_name=_("Upload scorm file"),
scope=Scope.settings,
)
version_scorm = String(
default="SCORM_12",
scope=Scope.settings,
)
# save completion_status for SCORM_2004
lesson_status = String(
scope=Scope.user_state,
default='not attempted'
)
success_status = String(
scope=Scope.user_state,
default='unknown'
)
lesson_location = String(
scope=Scope.user_state,
default=''
)
suspend_data = String(
scope=Scope.user_state,
default=''
)
data_scorm = Dict(
scope=Scope.user_state,
default={}
)
lesson_score = Float(
scope=Scope.user_state,
default=0
)
weight = Float(
default=1,
scope=Scope.settings
)
has_score = Boolean(
display_name=_("Scored"),
help=_("Select True if this component will receive a numerical score from the Scorm"),
default=False,
scope=Scope.settings
)
icon_class = String(
default="video",
scope=Scope.settings,
)
has_author_view = True
def resource_string(self, path):
"""Handy helper for getting resources from our kit."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
def student_view(self, context=None):
context_html = self.get_context_student()
template = self.render_template('static/html/scormxblock.html', context_html)
frag = Fragment(template)
frag.add_css(self.resource_string("static/css/scormxblock.css"))
frag.add_javascript(self.resource_string("static/js/src/scormxblock.js"))
settings = {
'version_scorm': self.version_scorm
}
frag.initialize_js('ScormXBlock', json_args=settings)
return frag
def studio_view(self, context=None):
context_html = self.get_context_studio()
template = self.render_template('static/html/studio.html', context_html)
frag = Fragment(template)
frag.add_css(self.resource_string("static/css/scormxblock.css"))
frag.add_javascript(self.resource_string("static/js/src/studio.js"))
frag.initialize_js('ScormStudioXBlock')
return frag
def author_view(self, context):
context_html = self.get_context_student()
html = self.render_template('static/html/author_view.html', context_html)
frag = Fragment(html)
return frag
@XBlock.handler
def studio_submit(self, request, suffix=''):
self.display_name = request.params['display_name']
self.has_score = request.params['has_score']
self.icon_class = 'problem' if self.has_score == 'True' else 'video'
if hasattr(request.params['file'], 'file'):
file = request.params['file'].file
zip_file = zipfile.ZipFile(file, 'r')
# Create a temporaray directory where the zip will extract.
temp_directory = TempFS()
# Extract the files in the temp directory just created.
zip_file.extractall(temp_directory.root_path)
manifest_path = '{}/imsmanifest.xml'.format(temp_directory.root_path)
with open(manifest_path, 'r') as manifest_file:
manifest = manifest_file.read()
self.set_fields_xblock(manifest)
# Now the part where we copy the data fast fast fast
fs = djpyfs.get_filesystem(self.location.block_id)
updoad_all_content(temp_directory, fs)
# Destroy temp directory after all files are copied.
temp_directory.close()
return Response({'result': 'success'}, content_type='application/json')
@XBlock.json_handler
def scorm_get_value(self, data, suffix=''):
name = data.get('name')
if name in ['cmi.core.lesson_status', 'cmi.completion_status']:
return {'value': self.lesson_status}
elif name == 'cmi.success_status':
return {'value': self.success_status}
elif name == 'cmi.core.lesson_location':
return {'value': self.lesson_location}
elif name == 'cmi.suspend_data':
return {'value': self.suspend_data}
else:
return {'value': self.data_scorm.get(name, '')}
@XBlock.json_handler
def scorm_set_value(self, data, suffix=''):
context = {'result': 'success'}
name = data.get('name')
if name in ['cmi.core.lesson_status', 'cmi.completion_status']:
self.lesson_status = data.get('value')
if self.has_score and data.get('value') in ['completed', 'failed', 'passed']:
self.publish_grade()
context.update({"lesson_score": self.format_lesson_score})
elif name == 'cmi.success_status':
self.success_status = data.get('value')
if self.has_score:
if self.success_status == 'unknown':
self.lesson_score = 0
self.publish_grade()
context.update({"lesson_score": self.format_lesson_score})
elif name in ['cmi.core.score.raw', 'cmi.score.raw'] and self.has_score:
self.lesson_score = float(data.get('value', 0))/100.0
context.update({"lesson_score": self.format_lesson_score})
elif name == 'cmi.core.lesson_location':
self.lesson_location = data.get('value', '')
elif name == 'cmi.suspend_data':
self.suspend_data = data.get('value', '')
else:
self.data_scorm[name] = data.get('value', '')
context.update({"completion_status": self.get_completion_status()})
return context
def publish_grade(self):
if self.lesson_status == 'failed' or (self.version_scorm == 'SCORM_2004' and self.success_status in ['failed', 'unknown']):
self.runtime.publish(
self,
'grade',
{
'value': 0,
'max_value': self.weight,
})
else:
self.runtime.publish(
self,
'grade',
{
'value': self.lesson_score,
'max_value': self.weight,
})
def max_score(self):
"""
Return the maximum score possible.
"""
return self.weight if self.has_score else None
def get_context_studio(self):
return {
'field_display_name': self.fields['display_name'],
'display_name_value': self.display_name,
'field_scorm_file': self.fields['scorm_file'],
'field_has_score': self.fields['has_score'],
'has_score_value': self.has_score
}
def get_context_student(self):
"""
Returns the necessary context to display the units when in the LMS
"""
fs = djpyfs.get_filesystem(self.location.block_id)
scorm_file_path = ''
if self.scorm_file:
scorm_file_path = fs.get_url(self.scorm_file)
# Required when working with a S3 djfs confifuguration and a proxy for the files
# so that the Same-origin security policy does not block the files
if settings.DJFS.get('use_proxy', False):
proxy_file = scorm_file_path.split(settings.DJFS.get('prefix'))[-1]
scorm_file_path = "/{}{}".format(settings.DJFS.get('proxy_root'), proxy_file)
if settings.DJFS.get('remove_signature', False):
scorm_file_path = urljoin(scorm_file_path, urlparse(scorm_file_path).path)
scorm_file_path = unquote(scorm_file_path)
return {
'scorm_file_path': scorm_file_path,
'lesson_score': self.format_lesson_score,
'weight': self.weight,
'has_score': self.has_score,
'completion_status': self.get_completion_status()
}
def render_template(self, template_path, context):
template_str = self.resource_string(template_path)
template = Template(template_str)
return template.render(Context(context))
def set_fields_xblock(self, manifest):
path_index_page = 'index.html'
try:
tree = ET.fromstring(manifest)
# Getting the namespace from the tree does not have a clean API.
# We use the simplest method outlined here: https://stackoverflow.com/a/28283119/2072496
namespace = tree.tag.split('}')[0].strip('{')
# By standard a namesapace it's a URI
# we ensure the namespace we got in the tree object it's in fact a URL
# if not we return an empty namespace and procced to look for resource tag
namespace = namespace if namespace.startswith("http") else None
if namespace:
resource = tree.find('{{{0}}}resources/{{{0}}}resource'.format(namespace))
schemaversion = tree.find('{{{0}}}metadata/{{{0}}}schemaversion'.format(namespace))
else:
resource = tree.find('resources/resource')
schemaversion = tree.find('metadata/schemaversion')
if (not schemaversion is None) and (re.match('^1.2$', schemaversion.text) is None):
self.version_scorm = 'SCORM_2004'
path_index_page = resource.get("href")
except IOError:
pass
self.scorm_file = path_index_page
def get_completion_status(self):
completion_status = self.lesson_status
if self.version_scorm == 'SCORM_2004' and self.success_status != 'unknown':
completion_status = self.success_status
return completion_status
@property
def format_lesson_score(self):
return '{:.2f}'.format(self.lesson_score)
@staticmethod
def workbench_scenarios():
"""A canned scenario for display in the workbench."""
return [
("ScormXBlock",
"""<vertical_demo>
<scormxblock/>
</vertical_demo>
"""),
]
| 35.460106
| 131
| 0.621091
|
c69baeb3bf94cf2cf4e276a525d4a23bae925b6e
| 734
|
py
|
Python
|
zengo/migrations/0005_ticket_priority.py
|
lukeburden/django-zengo
|
7235379f8d740bedd76412c8d6aaca076300fac9
|
[
"MIT"
] | 10
|
2019-02-11T19:13:41.000Z
|
2021-12-10T21:23:51.000Z
|
zengo/migrations/0005_ticket_priority.py
|
lukeburden/django-zengo
|
7235379f8d740bedd76412c8d6aaca076300fac9
|
[
"MIT"
] | 4
|
2019-01-03T00:02:31.000Z
|
2020-11-11T01:31:06.000Z
|
zengo/migrations/0005_ticket_priority.py
|
lukeburden/django-zengo
|
7235379f8d740bedd76412c8d6aaca076300fac9
|
[
"MIT"
] | 3
|
2019-02-28T15:58:24.000Z
|
2020-06-09T02:45:42.000Z
|
# Generated by Django 2.2.10 on 2020-02-28 18:25
from django.db import migrations
import konst.models.fields
class Migration(migrations.Migration):
dependencies = [
("zengo", "0004_zendeskuser_alias"),
]
operations = [
migrations.AddField(
model_name="ticket",
name="priority",
field=konst.models.fields.ConstantChoiceCharField(
choices=[
("urgent", "urgent"),
("high", "high"),
("normal", "normal"),
("low", "low"),
],
default="normal",
max_length=8,
),
preserve_default=False,
),
]
| 23.677419
| 62
| 0.475477
|
5227ffe11653a0053cd8fb3716c3853421d9559f
| 5,127
|
py
|
Python
|
server/timing_engine.py
|
cmusatyalab/openscout
|
87bec509b8c1e343dbeb6f7f241b143b3686813b
|
[
"Apache-2.0"
] | 5
|
2020-07-31T12:49:04.000Z
|
2021-09-26T23:09:20.000Z
|
server/timing_engine.py
|
cmusatyalab/openscout
|
87bec509b8c1e343dbeb6f7f241b143b3686813b
|
[
"Apache-2.0"
] | 6
|
2020-08-20T16:44:55.000Z
|
2022-02-10T02:12:26.000Z
|
server/timing_engine.py
|
cmusatyalab/openscout
|
87bec509b8c1e343dbeb6f7f241b143b3686813b
|
[
"Apache-2.0"
] | 2
|
2020-09-07T05:47:30.000Z
|
2021-10-01T14:28:53.000Z
|
# OpenScout
# - Distributed Automated Situational Awareness
#
# Author: Thomas Eiszler <teiszler@andrew.cmu.edu>
#
# Copyright (C) 2020 Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
from openscout_object_engine import OpenScoutObjectEngine
from openscout_face_engine import OpenFaceEngine, MSFaceEngine
import time
#TODO: these timing engines need work as the metrics here are still inherited from OpenRTiST
class TimingOpenFaceEngine(OpenFaceEngine):
def __init__(self, args):
super().__init__(args )
self.count = 0
self.lasttime = time.time()
self.lastcount = 0
self.lastprint = self.lasttime
def handle(self, from_client):
self.t0 = time.time()
result = super().handle(from_client)
self.t3 = time.time()
self.count += 1
if self.t3 - self.lastprint > 5:
print("pre {0:.1f} ms, ".format((self.t1 - self.t0) * 1000), end="")
print("infer {0:.1f} ms, ".format((self.t2 - self.t1) * 1000), end="")
print("post {0:.1f} ms, ".format((self.t3 - self.t2) * 1000), end="")
print("wait {0:.1f} ms, ".format((self.t0 - self.lasttime) * 1000), end="")
print("fps {0:.2f}".format(1.0 / (self.t3 - self.lasttime)))
print(
"avg fps: {0:.2f}".format(
(self.count - self.lastcount) / (self.t3 - self.lastprint)
)
)
print()
self.lastcount = self.count
self.lastprint = self.t3
self.lasttime = self.t3
return result
def infer(self, image):
self.t1 = time.time()
results = super().infer(image)
self.t2 = time.time()
return results
class TimingMSFaceEngine(MSFaceEngine):
def __init__(self, args):
super().__init__(args )
self.count = 0
self.lasttime = time.time()
self.lastcount = 0
self.lastprint = self.lasttime
def handle(self, from_client):
self.t0 = time.time()
result = super().handle(from_client)
self.t3 = time.time()
self.count += 1
if self.t3 - self.lastprint > 5:
print("pre {0:.1f} ms, ".format((self.t1 - self.t0) * 1000), end="")
print("infer {0:.1f} ms, ".format((self.t2 - self.t1) * 1000), end="")
print("post {0:.1f} ms, ".format((self.t3 - self.t2) * 1000), end="")
print("wait {0:.1f} ms, ".format((self.t0 - self.lasttime) * 1000), end="")
print("fps {0:.2f}".format(1.0 / (self.t3 - self.lasttime)))
print(
"avg fps: {0:.2f}".format(
(self.count - self.lastcount) / (self.t3 - self.lastprint)
)
)
print()
self.lastcount = self.count
self.lastprint = self.t3
self.lasttime = self.t3
return result
def detection(self, image):
self.t1 = time.time()
results = super().detection(image)
self.t2 = time.time()
return results
def recognition(self, image):
self.t2 = time.time()
results = super().detection(image)
self.t3 = time.time()
return results
class TimingObjectEngine(OpenScoutObjectEngine):
def __init__(self, args):
super().__init__(args )
self.count = 0
self.lasttime = time.time()
self.lastcount = 0
self.lastprint = self.lasttime
def handle(self, from_client):
self.t0 = time.time()
result = super().handle(from_client)
self.t3 = time.time()
self.count += 1
if self.t3 - self.lastprint > 5:
print("pre {0:.1f} ms, ".format((self.t1 - self.t0) * 1000), end="")
print("infer {0:.1f} ms, ".format((self.t2 - self.t1) * 1000), end="")
print("post {0:.1f} ms, ".format((self.t3 - self.t2) * 1000), end="")
print("wait {0:.1f} ms, ".format((self.t0 - self.lasttime) * 1000), end="")
print("fps {0:.2f}".format(1.0 / (self.t3 - self.lasttime)))
print(
"avg fps: {0:.2f}".format(
(self.count - self.lastcount) / (self.t3 - self.lastprint)
)
)
print()
self.lastcount = self.count
self.lastprint = self.t3
self.lasttime = self.t3
return result
def inference(self, preprocessed):
self.t1 = time.time()
results = super().inference(preprocessed)
self.t2 = time.time()
return results
| 33.730263
| 92
| 0.560952
|
bad6745951332a46328227c7bb075b22ba409ff5
| 1,022
|
py
|
Python
|
src/sage/symbolic/all.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 1,742
|
2015-01-04T07:06:13.000Z
|
2022-03-30T11:32:52.000Z
|
src/sage/symbolic/all.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 66
|
2015-03-19T19:17:24.000Z
|
2022-03-16T11:59:30.000Z
|
src/sage/symbolic/all.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 495
|
2015-01-10T10:23:18.000Z
|
2022-03-24T22:06:11.000Z
|
from sage.misc.lazy_import import lazy_import
lazy_import("sage.symbolic.expression", "I", deprecation=(18036,
"import I from sage.symbolic.constants for the imaginary unit viewed as an element of SR, or from sage.rings.imaginary_unit for the element of ZZ[i]"))
lazy_import("sage.symbolic.expression", "I", as_="i", deprecation=(18036,
"import I from sage.symbolic.constants for the imaginary unit viewed as an element of SR, or from sage.rings.imaginary_unit for the element of ZZ[i]"))
import sage.symbolic.expression # initialize pynac before .ring
from .ring import SR
from .constants import (pi, e, NaN, golden_ratio, log2, euler_gamma, catalan,
khinchin, twinprime, mertens, glaisher)
from .expression import Expression, solve_diophantine, hold
from .callable import CallableSymbolicExpressionRing
from sage.symbolic.relation import solve, solve_mod, solve_ineq
from sage.symbolic.assumptions import assume, forget, assumptions, assuming
from .units import units
π = pi
| 48.666667
| 159
| 0.765166
|
26ca150429aed911f693600271272acf178310dc
| 69
|
py
|
Python
|
user_modified/sample_testcases.py
|
stevennovaryo/tctune
|
56d64ea70c544927e6749d1b0b8a25ad51cf71f4
|
[
"MIT"
] | null | null | null |
user_modified/sample_testcases.py
|
stevennovaryo/tctune
|
56d64ea70c544927e6749d1b0b8a25ad51cf71f4
|
[
"MIT"
] | null | null | null |
user_modified/sample_testcases.py
|
stevennovaryo/tctune
|
56d64ea70c544927e6749d1b0b8a25ad51cf71f4
|
[
"MIT"
] | null | null | null |
sample1 = [
'5 5',
'1 2 3 4 5',
]
sample2 = [
'1 1',
'10',
]
| 7.666667
| 14
| 0.362319
|
aee50159ccadd369338014364bbd6ca4193fdaaf
| 429
|
py
|
Python
|
src/backend/socialapp/jinja2.py
|
shouyang/group-CMPUT404-project
|
a8d114b493ad2786c90e8b1f10c087fa4d5c81de
|
[
"MIT"
] | 2
|
2019-02-04T17:55:48.000Z
|
2019-03-11T23:22:14.000Z
|
src/backend/socialapp/jinja2.py
|
shouyang/group-CMPUT404-project
|
a8d114b493ad2786c90e8b1f10c087fa4d5c81de
|
[
"MIT"
] | 7
|
2019-02-27T17:16:19.000Z
|
2019-03-19T20:13:56.000Z
|
src/backend/socialapp/jinja2.py
|
shouyang/group-CMPUT404-project
|
a8d114b493ad2786c90e8b1f10c087fa4d5c81de
|
[
"MIT"
] | 2
|
2019-04-10T17:01:07.000Z
|
2019-04-17T01:08:16.000Z
|
""" This file configures the environment settings for jinja2 templates. See the jinja 2 folder for actual templates.
"""
from django.contrib.staticfiles.storage import staticfiles_storage
from django.urls import reverse
from jinja2 import Environment
def environment(**options):
env = Environment(**options)
env.globals.update({
'static': staticfiles_storage.url,
'url': reverse,
})
return env
| 26.8125
| 116
| 0.729604
|
8cf20e68234b76dcdd56e15980d95a9221d9bb00
| 4,062
|
py
|
Python
|
python/bin/changename.py
|
beckerrh/simfemsrc
|
d857eb6f6f8627412d4f9d89a871834c756537db
|
[
"MIT"
] | null | null | null |
python/bin/changename.py
|
beckerrh/simfemsrc
|
d857eb6f6f8627412d4f9d89a871834c756537db
|
[
"MIT"
] | 1
|
2019-01-31T10:59:11.000Z
|
2019-01-31T10:59:11.000Z
|
python/bin/changename.py
|
beckerrh/simfemsrc
|
d857eb6f6f8627412d4f9d89a871834c756537db
|
[
"MIT"
] | null | null | null |
import sys, os, subprocess, shutil
import argparse
# ------------------------------------- #
def main():
parser = argparse.ArgumentParser(description='change name')
# parser.add_argument('scriptandargs', help='script and args to launch', nargs='*')
parser.add_argument('-old', type=str, help='old name', required=True)
parser.add_argument('-new', type=str, help='new name', required=True)
parser.add_argument('-dirname', type=str, help='directory to change name', required=True)
parser.add_argument('--dry', default = False, action="store_true", help='dery run')
args = vars(parser.parse_args(sys.argv[1:]))
changename(args)
def is_binary(filename):
fin = open(filename, 'rb')
try:
CHUNKSIZE = 1024
while 1:
chunk = fin.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
finally:
fin.close()
return False
class Replace(object):
def __init__(self, oldname, newname):
self.old = oldname
self.new = newname
self.oldupper = self.old.upper()
self.oldlower = self.old.lower()
self.newupper = self.new.upper()
self.newlower = self.new.lower()
print ("replace {} ==> {}".format(self.old, self.new))
print ("replace {} ==> {}".format(self.oldlower, self.oldlower))
print ("replace {} ==> {}".format(self.oldupper, self.oldupper))
# sys.exit(1)
def __call__(self, toto):
# return toto.replace(self.old,self.new)
return toto.replace(self.oldupper,self.newupper).replace(self.oldlower,self.newlower).replace(self.old,self.new)
def changename(args):
directory = args['dirname']
oldname = args['old']
newname = args['new']
dry = args['dry']
exclude = ['.svn', '.DS_Store']
if not os.path.isdir(directory):
raise ValueError("directory does not exists " + directory)
if dry:
replace = Replace(oldname, newname)
for root, dirs, files in os.walk(directory, topdown=True):
dirs[:] = [d for d in dirs if d not in exclude]
if root.find(".svn") !=-1:
raise NameError("@@@ ERROR in toto: subfolders must not contain .svn "+ root)
rootnew = replace(root)
print('directory {} --> {}'.format(root,rootnew))
for file in files:
fileroot = root + '/' + file
filenew = replace(fileroot)
print('file {} --> {}'.format(fileroot,filenew))
return
backupdirectory = directory + '.old'
if os.path.isdir(backupdirectory):
raise ValueError("directory exists " + backupdirectory)
shutil.copytree(directory, backupdirectory)
shutil.rmtree(directory)
replace = Replace(oldname, newname)
for root, dirs, files in os.walk(backupdirectory, topdown=True):
dirs[:] = [d for d in dirs if d not in exclude]
if root.find(".svn") !=-1:
raise NameError("@@@ ERROR in toto: subfolders must not contain .svn "+ root)
rootnew = replace(root.replace(backupdirectory,directory))
print('rootnew', rootnew)
os.mkdir(rootnew)
for file in files:
fileroot = root + '/' + file
# print 'fileroot: ', fileroot
# continue
filenew = replace(fileroot.replace(backupdirectory,directory))
if fileroot.find('.tgz') !=-1 or fileroot.find('.png') !=-1 != is_binary(fileroot):
shutil.copyfile(fileroot, filenew)
continue
print('filenew', filenew)
infile = open(fileroot, 'r')
outfile = open(filenew, 'w')
try:
toto = infile.read()
except:
print("cannot read file", fileroot)
totonew = replace(toto)
# print 'totonew', totonew
outfile.write(totonew)
infile.close()
outfile.close()
# ------------------------------------- #
if __name__ == '__main__':
main()
| 37.266055
| 117
| 0.577794
|
fd311817651d876f88e4e6f8595578d66b52e55c
| 429
|
py
|
Python
|
2.py
|
juandarr/ProjectEuler
|
951705ac62f550d7fbecdc3f35ab8c38b53b9225
|
[
"MIT"
] | null | null | null |
2.py
|
juandarr/ProjectEuler
|
951705ac62f550d7fbecdc3f35ab8c38b53b9225
|
[
"MIT"
] | null | null | null |
2.py
|
juandarr/ProjectEuler
|
951705ac62f550d7fbecdc3f35ab8c38b53b9225
|
[
"MIT"
] | null | null | null |
"""
Adds the even valued numbers of the fibonacci series below limit
Author: Juan Rios
"""
def fibonacci_sum(limit):
a = 1
b = 2
sum = 0
while (b<limit):
if (b%2==0):
sum += b
temp = b
b += a
a = temp
return sum
if __name__ == "__main__":
limit = 4000000
print('The sum of even-valued fibonnaci terms below {0} is {1}'.format(limit, fibonacci_sum(limit)))
| 21.45
| 104
| 0.566434
|
78debf21f3888e71a11ea8c35ab035591d0b947a
| 2,870
|
py
|
Python
|
models/bigru_attn/model.py
|
mjc92/JavaScriptAutoComplete
|
6eed02a9f8ef084208bf45d8a81a52d86e247810
|
[
"CNRI-Python"
] | null | null | null |
models/bigru_attn/model.py
|
mjc92/JavaScriptAutoComplete
|
6eed02a9f8ef084208bf45d8a81a52d86e247810
|
[
"CNRI-Python"
] | null | null | null |
models/bigru_attn/model.py
|
mjc92/JavaScriptAutoComplete
|
6eed02a9f8ef084208bf45d8a81a52d86e247810
|
[
"CNRI-Python"
] | null | null | null |
import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F
# RNN Based Language Model
class biGRU_attn(nn.Module):
def __init__(self, vocab_size, embed_size, hidden_size, num_layers, sos):
super(biGRU_attn, self).__init__()
self.embed = nn.Embedding(vocab_size, embed_size)
self.encoder = nn.GRU(embed_size, hidden_size, num_layers,
batch_first=True, bidirectional=True)
self.decoder = nn.GRU(embed_size + hidden_size*2, hidden_size, num_layers,
batch_first=True)
self.W1 = nn.Linear(hidden_size*2, hidden_size)
self.W2 = nn.Linear(hidden_size, hidden_size*2)
self.linear = nn.Linear(hidden_size, vocab_size)
self.init_weights()
self.sos = sos
self.hidden_size = hidden_size
def init_weights(self):
self.embed.weight.data.uniform_(-0.1, 0.1)
self.linear.bias.data.fill_(0)
self.linear.weight.data.uniform_(-0.1, 0.1)
def forward(self, enc_in, dec_in, teacher_forcing=True):
# Encoder
enc_embedded = self.embed(enc_in)
encoded, _ = self.encoder(enc_embedded)
# Decoder
sos = Variable(torch.LongTensor(np.ones([dec_in.size(0),1],dtype=int)*self.sos)).cuda()
dec_in = torch.cat([sos,dec_in[:,:-1]],dim=1)
dec_embedded = self.embed(dec_in)
state = self.W1(encoded[:,-1]).unsqueeze(0)
outputs = []
context = Variable(torch.FloatTensor(dec_in.size(0),
1,self.hidden_size*2).zero_()).cuda()# get initial context, [b x 1 x h*2]
for i in range(dec_in.size(1)):
if teacher_forcing==True:
input = torch.cat([context,dec_embedded[:,i].unsqueeze(1)],dim=2)
else:
if i==0:
input = dec_embedded[:,0].unsqueeze(1)
else:
next_words = self.linear(out.squeeze())
next_idx = next_words.max(1)[1]
input = self.embed(next_idx).unsqueeze(1)
input = torch.cat([context,input],dim=2)
out, state = self.decoder(input, state)
comp = self.W2(state) # [batch x hidden*2]
scores = torch.bmm(encoded,comp.view(comp.size(1),-1,1)) # [b x seq x 1]
scores = F.softmax(scores)
context = torch.bmm(scores.view(scores.size(0),1,-1),encoded) # [b x 1 x h*2]
outputs.append(out)
outputs = torch.cat(outputs,dim=1) # [b x seq x h]
# outputs = outputs.contiguous().view(-1, outputs.size(2))
# Decode hidden states of all time step
outputs = self.linear(outputs)
outputs = outputs.view(dec_in.size(0),dec_in.size(1),-1)
return outputs
| 44.84375
| 95
| 0.583275
|
268182fbd42d98a50743f5e2c61ce34f101b0778
| 201
|
py
|
Python
|
frappe/core/doctype/data_export/data_export.py
|
oryxsolutions/frappe
|
d193ea22d17ca40d57432040a8afad72287d9e23
|
[
"MIT"
] | null | null | null |
frappe/core/doctype/data_export/data_export.py
|
oryxsolutions/frappe
|
d193ea22d17ca40d57432040a8afad72287d9e23
|
[
"MIT"
] | null | null | null |
frappe/core/doctype/data_export/data_export.py
|
oryxsolutions/frappe
|
d193ea22d17ca40d57432040a8afad72287d9e23
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# License: MIT. See LICENSE
from frappe.model.document import Document
class DataExport(Document):
pass
| 20.1
| 68
| 0.731343
|
5408355367545c3ecfb6f1d71d3436f78737dd8e
| 1,212
|
py
|
Python
|
tools/split.py
|
thangnx183/kaggle-understanding-clouds
|
15ad2a9029958262437b899cb00525579da23911
|
[
"BSD-2-Clause"
] | 207
|
2019-11-21T19:07:17.000Z
|
2022-03-28T10:53:57.000Z
|
tools/split.py
|
ChasingStar95/kaggle-understanding-clouds
|
898319b564deab02b4267cc658bbebdbb15c49de
|
[
"BSD-2-Clause"
] | 12
|
2019-12-04T11:32:30.000Z
|
2022-03-12T00:06:11.000Z
|
tools/split.py
|
ChasingStar95/kaggle-understanding-clouds
|
898319b564deab02b4267cc658bbebdbb15c49de
|
[
"BSD-2-Clause"
] | 60
|
2019-11-21T17:32:56.000Z
|
2022-03-28T10:53:58.000Z
|
import tqdm
import numpy as np
import pandas as pd
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
N_SPLITS = 11
TRAIN_CSV_PATH = 'data/train.csv'
LABEL_MAP = {
'Fish': 0,
'Flower': 1,
'Gravel': 2,
'Sugar': 3
}
df_train = pd.read_csv(TRAIN_CSV_PATH)
df_train['Image'] = df_train.Image_Label.map(lambda v: v[:v.find('_')])
df_train['Label'] = df_train.Image_Label.map(lambda v: v[v.find('_')+1:])
df_train['LabelIndex'] = df_train.Label.map(lambda v: LABEL_MAP[v])
X = []
y = []
image_ids = []
df_group = df_train.groupby('Image')
for i, (key, df) in tqdm.tqdm(enumerate(df_group), total=len(df_group)):
X.append([i])
ml = np.array([0,0,0,0])
df = df.dropna()
ml[np.array(df.LabelIndex)-1] = 1
y.append(ml)
image_ids.append(key)
random_state = 1234
mskf = MultilabelStratifiedKFold(n_splits=N_SPLITS, random_state=random_state)
df_train['Fold'] = 0
df_train = df_train.set_index('Image')
for f, (train_index, test_index) in enumerate(mskf.split(X, y)):
for i in tqdm.tqdm(test_index):
df_train.loc[image_ids[i], 'Fold'] = f
df_train = df_train.reset_index()
df_train.to_csv(f'data/train.ver0.csv', index=False)
| 25.25
| 78
| 0.673267
|
a51470f31981901136165bf0a4b71aac1611ff29
| 6,024
|
py
|
Python
|
app/views.py
|
Evohmike/Awwards
|
a21c669ac4b76047bc4d6e960fd91258ea2c951d
|
[
"MIT"
] | null | null | null |
app/views.py
|
Evohmike/Awwards
|
a21c669ac4b76047bc4d6e960fd91258ea2c951d
|
[
"MIT"
] | 3
|
2021-03-18T21:16:10.000Z
|
2021-09-08T00:32:46.000Z
|
app/views.py
|
Evohmike/Awwards
|
a21c669ac4b76047bc4d6e960fd91258ea2c951d
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect , get_object_or_404
from django.http import HttpResponse
from django.contrib.auth import login, authenticate
from .forms import *
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from .tokens import account_activation_token
from django.contrib.auth.models import User
from django.core.mail import EmailMessage
import datetime as dt
from django.contrib.auth.decorators import login_required
from .models import *
# Create your views here.
def signup(request):
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.is_active = False
user.save()
current_site = get_current_site(request)
mail_subject = 'Activate your blog account.'
message = render_to_string('acc_active_email.html', {
'user': user,
'domain': current_site.domain,
'uid':urlsafe_base64_encode(force_bytes(user.pk)),
'token':account_activation_token.make_token(user),
})
to_email = form.cleaned_data.get('email')
email = EmailMessage(
mail_subject, message, to=[to_email]
)
email.send()
return HttpResponse('Please confirm your email address to complete the registration')
else:
form = SignupForm()
return render(request, 'signup.html', {'form': form})
def activate(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
login(request, user)
# return redirect('home')
return HttpResponse('Thank you for your email confirmation. Now you can login your account.')
else:
return HttpResponse('Activation link is invalid!')
@login_required(login_url='/signup/')
def home(request):
projects=Post.objects.all()
return render(request, 'home.html',{"projects":projects})
@login_required(login_url='/signup/')
def new_image(request):
current_user = request.user
if request.method == 'POST':
form = ImageForm(request.POST, request.FILES)
if form.is_valid():
image = form.save(commit=False)
# image.profile = profile
image.save()
return redirect('home')
else:
form = ImageForm()
return render(request, 'image.html', locals())
@login_required(login_url='/signup/')
def profile(request, user_id):
title = "Profile"
projo= Post.get_project_by_user(id= user_id).order_by('-posted_on')
profiles = Profile.objects.get(user_id=user_id)
users = User.objects.get(id=user_id)
return render(request, 'profile/profile.html', locals())
def view_project(request, id):
title = "View Project"
project = Project.get_project_by_id(id=id)
return render(request, 'view_project.html', locals())
@login_required(login_url='/signup/')
def post(request,post_id):
comment_form = CommentForm()
form = DesignForm()
form = UsabilityForm()
form = ContentForm()
try:
post = Post.objects.get(id = post_id)
except DoesNotExist:
raise Http404()
return render(request,"post.html",locals())
def add_design(request, id):
project = get_object_or_404(Post, pk=id)
if request.method == 'POST':
form = DesignForm(request.POST)
if form.is_valid():
rate = form.save(commit=False)
rate.project = project
rate.user_name = request.user
rate.profile = request.user.profile
rate.save()
return redirect('post', id)
else:
form = DesignForm()
return render(request, 'post.html',{'form': form})
def add_usability(request, id):
project = get_object_or_404(Post, pk=id)
if request.method == 'POST':
form = UsabilityForm(request.POST)
if form.is_valid():
rate = form.save(commit=False)
rate.project = project
rate.user_name = request.user
rate.profile = request.user.profile
rate.save()
return redirect('post',id)
else:
form = UsabilityForm()
return render(request, 'post.html',{'form': form})
def add_content(request, id):
project = get_object_or_404(Post, pk=id)
if request.method == 'POST':
form = ContentForm(request.POST)
if form.is_valid():
rate = form.save(commit=False)
rate.project = project
rate.user_name = request.user
rate.profile = request.user.profile
rate.save()
return redirect('post',id)
else:
form = ContentForm()
return render(request, 'post.html',{'form': form})
def search_projects(request):
if 'post' in request.GET and request.GET["post"]:
search_term = request.GET.get("post")
searched_projects = Project.search_by_title(search_term)
message = f"{search_term}"
return render(request, 'search.html',{"message":message,"post": searched_projects})
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message})
def comment(request,id):
upload = Post.objects.get(id=id)
if request.method == 'POST':
comment_form = CommentForm(request.POST)
if comment_form.is_valid():
comment = comment_form.save(commit=False)
# comment.user = request.user
comment.post= upload
comment.save()
print(comment)
return redirect('post',id)
| 31.375
| 101
| 0.643924
|
c45f0f9f11abc783f8a5e23ca0d0cb0262379d39
| 4,511
|
py
|
Python
|
lib/calculate_prices.py
|
AlvBarros/evse_charging
|
74f26b03d6b86d54823878aa84b761728e3f6c7c
|
[
"MIT"
] | null | null | null |
lib/calculate_prices.py
|
AlvBarros/evse_charging
|
74f26b03d6b86d54823878aa84b761728e3f6c7c
|
[
"MIT"
] | null | null | null |
lib/calculate_prices.py
|
AlvBarros/evse_charging
|
74f26b03d6b86d54823878aa84b761728e3f6c7c
|
[
"MIT"
] | null | null | null |
import math
import json
from lib.business.supplierPrice import SupplierPrice
from lib.business.charge import Charge
from lib.business.price import Price
def calculatePricesFromCleanData(clean_data):
# Initialize empty list
result = []
# For every Charge inside clean result
for charge in clean_data['transactions']:
# TODO: Not every supplier price is found
# Every transaction (supposedly) has a EVSE ID that identifies which Supplier Price is it from
# This method searches for such a SupplierPrice
sp = getSupplierPriceFromEvseId(clean_data['supplier_prices'], charge.evseId)
if (sp is not None):
# Instanciate a Price object
price = Price(sp.identifier, charge.sessionId)
# If SupplierPrice is in Fee category
if (sp.fee is not None):
# Set its price as the one calculated below
price.set_feePrice(calculateFeePrice(sp.fee))
# If SupplierPrice is in Time category
if (sp.time is not None):
# Set its price as the one calculated below
price.set_timePrice(calculateTimePrice(sp.time, charge))
# If SupplierPrice is in kWh category
if (sp.kwh is not None):
# Set its price as the one calculated below
price.set_kwhPrice(calculatekWhPrice(sp.kwh, charge))
# Add to Dictionary result (which is pretty much like a JSON already)
result.append({
"fee_price": price.get_feePrice(),
"time_price": price.get_timePrice(),
"kwh_price": price.get_kwhPrice(),
"total_price": price.get_totalPrice(),
"session_id": price.sessionId,
"supplier_price_id": price.supplierPriceId
})
# Return list of calculated prices
return result
def getSupplierPriceFromEvseId(supplier_prices, evseId):
# Search through the list for the one that has this EVSE ID
for sp in supplier_prices:
if (sp.evseId == evseId):
return sp
return None
def calculateFeePrice(feePrice):
# The minimum/maximum calculations are done at Price.get_totalPrice method
return feePrice.sessionFee
def calculateTimePrice(timePrice, charge):
if (timePrice.complexity == 'simple'):
# The formula is:
# Duration (in minutes) * Minute Price
# *Be aware of the minimum duration
if (
timePrice.simpleMinutePrice is None
):
raise 'Invalid values for Simple Time Price'
else:
duration = charge.durationInMinutes()
if (timePrice.minDuration is not None and duration < timePrice.minDuration):
# If duration is less than the minimum, sets the minimum instead
duration = timePrice.minDuration
return (duration * timePrice.simpleMinutePrice)
elif (timePrice.complexity == 'complex'):
# TODO: Finish complex calculation
return 0
# >> Just ignore this code below.
# Calculate using the minute price for that time price
# total_hours_charging_time = timePrice.durationInMinutes()/60
# if (timePrice.interval == 'start'):
# total_hours_charging_time = math.ceil(total_hours_charging_time)
# elif (timePrice.interval == 'end'):
# total_hours_charging_time = math.floor(total_hours_charging_time)
# else:
# raise 'Invalid interval'
else:
raise 'Invalid complexity for TimePrice calculation'
def calculatekWhPrice(kWhPrice, charge):
if (kWhPrice.complexity == 'simple'):
# Consumed kWh * kWh price
# Be aware of the minimum consumption
if (
kWhPrice.kwhPrice is None
):
raise 'Invalid kWhPrice'
else:
duration = charge.durationInMinutes()/60
if (kWhPrice.minConsumption is not None and duration < kWhPrice.minConsumption):
# If duration is less than the minimum, sets the minimum instead
# Duration is in hours, but minConsumption is in kWh, kW or hours?
duration = kWhPrice.minConsumption
return duration * kWhPrice.kwhPrice
elif (kWhPrice.complexity == 'complex'):
# TODO: Finish complex calculation
return 0
else:
raise 'Invalid complexity for kWhPrice calculation'
| 41.009091
| 102
| 0.627799
|
c05cbc062882a75f1bfb53d2bc8edaa27eb9059f
| 785
|
py
|
Python
|
games/pacman.py
|
aphawkins/microbit
|
8fa6a56504ef75082c3c4de7d962419eb8848b7a
|
[
"MIT"
] | null | null | null |
games/pacman.py
|
aphawkins/microbit
|
8fa6a56504ef75082c3c4de7d962419eb8848b7a
|
[
"MIT"
] | null | null | null |
games/pacman.py
|
aphawkins/microbit
|
8fa6a56504ef75082c3c4de7d962419eb8848b7a
|
[
"MIT"
] | null | null | null |
from microbit import *
import music
pacman_1 = Image(
"07970:"
"79997:"
"99999:"
"79997:"
"07970")
pacman_2 = Image(
"07970:"
"79985:"
"99972:"
"79985:"
"07970")
pacman_3 = Image(
"07970:"
"79970:"
"99700:"
"79970:"
"07970")
pacman_4 = Image(
"07950:"
"79700:"
"99200:"
"79700:"
"07950")
intro = [
'b1:2', 'b2:2', 'b1:2', 'b2:2',
'c2:2', 'c3:2', 'c2:2', 'c3:2',
'b1:2', 'b2:2', 'b1:2', 'b2:2',
'f#2:2', 'g#2:2', 'a#1:2', 'b2:2',
]
pacman_eat = [pacman_1, pacman_2, pacman_3, pacman_4, pacman_3, pacman_2]
music.play(intro)
while True:
display.show(pacman_eat, delay=100)
| 18.690476
| 73
| 0.438217
|
e699a814dc952df24b8bc8d44d416f7c99a9c840
| 2,833
|
py
|
Python
|
google/cloud/gkehub/v1/gkehub-v1-py/google/cloud/gkehub_v1/__init__.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/cloud/gkehub/v1/gkehub-v1-py/google/cloud/gkehub_v1/__init__.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/cloud/gkehub/v1/gkehub-v1-py/google/cloud/gkehub_v1/__init__.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.gke_hub import GkeHubClient
from .services.gke_hub import GkeHubAsyncClient
from .types.feature import CommonFeatureSpec
from .types.feature import CommonFeatureState
from .types.feature import Feature
from .types.feature import FeatureResourceState
from .types.feature import FeatureState
from .types.feature import MembershipFeatureSpec
from .types.feature import MembershipFeatureState
from .types.membership import Authority
from .types.membership import GkeCluster
from .types.membership import KubernetesMetadata
from .types.membership import Membership
from .types.membership import MembershipEndpoint
from .types.membership import MembershipState
from .types.service import ConnectAgentResource
from .types.service import CreateFeatureRequest
from .types.service import CreateMembershipRequest
from .types.service import DeleteFeatureRequest
from .types.service import DeleteMembershipRequest
from .types.service import GenerateConnectManifestRequest
from .types.service import GenerateConnectManifestResponse
from .types.service import GetFeatureRequest
from .types.service import GetMembershipRequest
from .types.service import ListFeaturesRequest
from .types.service import ListFeaturesResponse
from .types.service import ListMembershipsRequest
from .types.service import ListMembershipsResponse
from .types.service import OperationMetadata
from .types.service import TypeMeta
from .types.service import UpdateFeatureRequest
from .types.service import UpdateMembershipRequest
__all__ = (
'GkeHubAsyncClient',
'Authority',
'CommonFeatureSpec',
'CommonFeatureState',
'ConnectAgentResource',
'CreateFeatureRequest',
'CreateMembershipRequest',
'DeleteFeatureRequest',
'DeleteMembershipRequest',
'Feature',
'FeatureResourceState',
'FeatureState',
'GenerateConnectManifestRequest',
'GenerateConnectManifestResponse',
'GetFeatureRequest',
'GetMembershipRequest',
'GkeCluster',
'GkeHubClient',
'KubernetesMetadata',
'ListFeaturesRequest',
'ListFeaturesResponse',
'ListMembershipsRequest',
'ListMembershipsResponse',
'Membership',
'MembershipEndpoint',
'MembershipFeatureSpec',
'MembershipFeatureState',
'MembershipState',
'OperationMetadata',
'TypeMeta',
'UpdateFeatureRequest',
'UpdateMembershipRequest',
)
| 33.329412
| 74
| 0.82845
|
3ebfd1b4388fe58d2ca60000b496495c3b9c0de3
| 785
|
py
|
Python
|
flyvec/utils.py
|
bhoov/flyvec
|
3d5c1ef265c7f988c5dc72a74055479ed067a7f6
|
[
"Apache-2.0"
] | 31
|
2021-02-04T12:11:19.000Z
|
2022-01-11T16:02:33.000Z
|
flyvec/utils.py
|
bhoov/flyvec
|
3d5c1ef265c7f988c5dc72a74055479ed067a7f6
|
[
"Apache-2.0"
] | 7
|
2021-01-16T18:30:52.000Z
|
2022-01-18T06:42:23.000Z
|
flyvec/utils.py
|
bhoov/flyvec
|
3d5c1ef265c7f988c5dc72a74055479ed067a7f6
|
[
"Apache-2.0"
] | 6
|
2021-02-16T17:42:01.000Z
|
2022-02-23T07:06:54.000Z
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/04_utils.ipynb (unless otherwise specified).
__all__ = ['delegates']
# Cell
import inspect
def delegates(to=None, keep=False):
"Decorator: replace `**kwargs` in signature with params from `to`"
def _f(f):
if to is None: to_f,from_f = f.__base__.__init__,f.__init__
else: to_f,from_f = to,f
sig = inspect.signature(from_f)
sigd = dict(sig.parameters)
k = sigd.pop('kwargs')
s2 = {k:v for k,v in inspect.signature(to_f).parameters.items()
if v.default != inspect.Parameter.empty and k not in sigd}
sigd.update(s2)
if keep: sigd['kwargs'] = k
from_f.__signature__ = sig.replace(parameters=sigd.values())
return f
return _f
| 35.681818
| 92
| 0.628025
|
e0620f38e789d83c4f634185eea670aabfc70e36
| 2,770
|
py
|
Python
|
saleor/site/models.py
|
glosoftgroup/ps254-backend
|
f9c9d798ae8eba29a3a502c6913c2238c4d3906c
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/site/models.py
|
glosoftgroup/ps254-backend
|
f9c9d798ae8eba29a3a502c6913c2238c4d3906c
|
[
"BSD-3-Clause"
] | 6
|
2021-02-08T20:20:06.000Z
|
2022-03-11T23:18:59.000Z
|
saleor/site/models.py
|
glosoftgroup/ps254-backend
|
f9c9d798ae8eba29a3a502c6913c2238c4d3906c
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib.sites.models import _simple_domain_name_validator
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import pgettext_lazy
from . import AuthenticationBackends
from decimal import Decimal
from django.core.validators import MinValueValidator
from datetime import datetime
@python_2_unicode_compatible
class SiteSettings(models.Model):
domain = models.CharField(
pgettext_lazy('Site field', 'domain'), max_length=100,
validators=[_simple_domain_name_validator], unique=True)
name = models.CharField(pgettext_lazy('Site field', 'name'), max_length=50)
header_text = models.CharField(
pgettext_lazy('Site field', 'header text'), max_length=200, blank=True)
description = models.CharField(
pgettext_lazy('Site field', 'site description'), max_length=500,
blank=True)
loyalty_point_equiv = models.IntegerField( pgettext_lazy('Site field', 'loyalty points equivalency'),
validators=[MinValueValidator(0)], default=Decimal(0))
opening_time = models.TimeField(pgettext_lazy('Site field', 'opening time'),
auto_now=False, null=True, blank=True)
closing_time = models.TimeField(pgettext_lazy('Site field', 'closing time'),
auto_now=False, null=True, blank=True)
def __str__(self):
return self.name
def available_backends(self):
return self.authorizationkey_set.values_list('name', flat=True)
@python_2_unicode_compatible
class AuthorizationKey(models.Model):
site_settings = models.ForeignKey(SiteSettings)
name = models.CharField(
pgettext_lazy('Authentication field', 'name'), max_length=20,
choices=AuthenticationBackends.BACKENDS)
key = models.TextField(pgettext_lazy('Authentication field', 'key'))
password = models.TextField(
pgettext_lazy('Authentication field', 'password'))
class Meta:
unique_together = (('site_settings', 'name'),)
def __str__(self):
return self.name
def key_and_secret(self):
return self.key, self.password
class Bank(models.Model):
name = models.CharField(max_length=100, null=True, blank=True)
def __str__(self):
return str(self.name)
class BankBranch(models.Model):
name = models.CharField(max_length=100, null=True, blank=True)
bank = models.ForeignKey(Bank, related_name='branch', max_length=100, null=True, blank=True)
def __str__(self):
return str(self.name)
class Department(models.Model):
name = models.CharField(max_length=100, null=True, blank=True)
def __str__(self):
return str(self.name)
class UserRole(models.Model):
name = models.CharField(max_length=100, null=True, blank=True)
def __str__(self):
return str(self.name)
| 35.512821
| 105
| 0.736462
|
8d8b4d3883f2314aff89a3b787fe6557d537baf7
| 8,157
|
py
|
Python
|
cloudtts/aws.py
|
setomits/cloudtts
|
701c9f95d9926a8925b6dd2f2526b0b28c9a0fd7
|
[
"MIT"
] | 3
|
2018-08-29T23:27:14.000Z
|
2018-08-30T03:44:20.000Z
|
cloudtts/aws.py
|
setomits/cloudtts
|
701c9f95d9926a8925b6dd2f2526b0b28c9a0fd7
|
[
"MIT"
] | 1
|
2018-09-18T08:51:08.000Z
|
2018-09-18T08:51:08.000Z
|
cloudtts/aws.py
|
setomits/cloudtts
|
701c9f95d9926a8925b6dd2f2526b0b28c9a0fd7
|
[
"MIT"
] | 2
|
2018-10-19T07:48:56.000Z
|
2018-10-19T08:57:52.000Z
|
from contextlib import closing
import re
from boto3 import Session
from .client import AudioFormat
from .client import Client
from .client import CloudTTSError
from .client import Gender
from .client import Language
from .client import VoiceConfig
class PollyCredential:
def __init__(self, region_name,
aws_access_key_id='', aws_secret_access_key=''):
self.region_name = region_name
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
def has_access_key(self):
return self.aws_access_key_id and self.aws_secret_access_key
class PollyClient(Client):
'''
This is a client class for Amazon Polly API
>>> from cloudtts import PollyClient, PollyCredential
>>> cred = PollyCredential(
... aws_access_key_id=YOUR_ACCESS_KEY_ID,
... aws_secret_access_key=YOUR_SECRET_ACCESS_KEY,
... region_name=AWS_REGION_NAME
... )
>>> c = PollyClient(cred)
>>> audio = c.tts('Hello world!')
>>> open('/path/to/save/audio', 'wb') as f:
... f.write(audio)
'''
MAX_TEXT_LENGTH = 3000
AVAILABLE_SAMPLE_RATES = {
'mp3': ('8000', '16000', '22050'),
'ogg_vorbis': ('8000', '16000', '22050'),
'pcm': ('8000', '16000'),
}
AVAILABLE_VOICE_IDS = ('Aditi', 'Aditi', 'Amy', 'Astrid',
'Brian',
'Carla', 'Carmen', 'Celine', 'Chantal', 'Conchita',
'Cristiano', 'Céline',
'Dora', 'Dóra',
'Emma', 'Enrique', 'Ewa',
'Filiz',
'Geraint', 'Giorgio', 'Gwyneth',
'Hans',
'Ines', 'Inês', 'Ivy',
'Jacek', 'Jan', 'Joanna', 'Joey', 'Justin',
'Karl', 'Kendra', 'Kimberly',
'Liv', 'Lotte', 'Léa',
'Mads', 'Maja', 'Marlene', 'Mathieu', 'Matthew',
'Maxim', 'Miguel', 'Mizuki',
'Naja', 'Nicole',
'Penelope', 'Penélope',
'Raveena', 'Ricardo', 'Ruben', 'Russell',
'Salli', 'Seoyeon',
'Takumi', 'Tatyana',
'Vicki', 'Vitoria', 'Vitória',
)
AUDIO_FORMAT_DICT = {
AudioFormat.mp3: ('mp3', '22050'),
AudioFormat.ogg_vorbis: ('ogg_vorbis', '22050'),
AudioFormat.pcm: ('pcm', '16000'),
}
LANG_GENDER_DICT = {
(Language.da_DK, Gender.female): 'Naja',
(Language.da_DK, Gender.male): 'Mads',
(Language.de_DE, Gender.female): 'Marlene',
(Language.de_DE, Gender.male): 'Hans',
(Language.en_AU, Gender.female): 'Nicole',
(Language.en_AU, Gender.male): 'Russell',
(Language.en_GB, Gender.female): 'Amy',
(Language.en_GB, Gender.male): 'Brian',
(Language.en_IN, Gender.female): 'Aditi', # bilingual
(Language.en_US, Gender.female): 'Joanna',
(Language.en_US, Gender.male): 'Joey',
(Language.es_ES, Gender.female): 'Conchita',
(Language.es_ES, Gender.male): 'Enrique',
(Language.es_US, Gender.female): 'Penelope',
(Language.es_US, Gender.male): 'Miguel',
(Language.fr_CA, Gender.female): 'Chantal',
(Language.fr_FR, Gender.female): 'Celine',
(Language.fr_FR, Gender.male): 'Mathieu',
(Language.hi_IN, Gender.female): 'Aditi', # bilingual
(Language.it_IT, Gender.female): 'Carla',
(Language.it_IT, Gender.male): 'Giorgio',
(Language.ja_JP, Gender.female): 'Mizuki',
(Language.ja_JP, Gender.male): 'Takumi',
(Language.ko_KR, Gender.female): 'Seoyeon',
(Language.nb_NO, Gender.female): 'Liv',
(Language.nl_NL, Gender.female): 'Lotte',
(Language.nl_NL, Gender.male): 'Ruben',
(Language.pl_PL, Gender.female): 'Ewa',
(Language.pl_PL, Gender.male): 'Jacek',
(Language.pt_BR, Gender.female): 'Vitoria',
(Language.pt_BR, Gender.male): 'Ricardo',
(Language.pt_PT, Gender.female): 'Ines',
(Language.pt_PT, Gender.male): 'Cristiano',
(Language.ro_RO, Gender.female): 'Carmen',
(Language.ru_RU, Gender.female): 'Tatyana',
(Language.ru_RU, Gender.male): 'Maxim',
(Language.sv_SE, Gender.female): 'Astrid',
(Language.tr_TR, Gender.female): 'Filiz',
}
def _voice_config_to_dict(self, vc):
d = {}
if vc.audio_format in PollyClient.AUDIO_FORMAT_DICT:
d['output_format'], d['sample_rate'] = \
PollyClient.AUDIO_FORMAT_DICT[vc.audio_format]
if (vc.language, vc.gender) in PollyClient.LANG_GENDER_DICT:
voice_id = PollyClient.LANG_GENDER_DICT[(vc.language, vc.gender)]
d['voice_id'] = voice_id
return d
def _is_valid_output_format(self, params):
if not 'output_format' in params:
return False
return params['output_format'] in PollyClient.AVAILABLE_SAMPLE_RATES
def _is_valid_sample_rate(self, params):
if params['output_format'] not in PollyClient.AVAILABLE_SAMPLE_RATES:
return False
if 'sample_rate' not in params:
return False
rates = PollyClient.AVAILABLE_SAMPLE_RATES[params['output_format']]
return params['sample_rate'] in rates
def _is_valid_voice_id(self, params):
if 'voice_id' not in params:
return False
return params['voice_id'] in PollyClient.AVAILABLE_VOICE_IDS
def _is_valid_params(self, params):
return self._is_valid_output_format(params) and \
self._is_valid_sample_rate(params) and \
self._is_valid_voice_id(params)
def tts(self, text='', ssml='', voice_config=None, detail=None):
'''
Synthesizes audio data for text.
Args:
text: string / target to be synthesized(plain text)
ssml: string / target to be synthesized(SSML)
voice_config: VoiceConfig / parameters for voice and audio
detail: dict / detail parameters for voice and audio
Returns:
binary
'''
if self.credential:
if isinstance(self.credential, PollyCredential):
pass
else:
raise TypeError('Invalid credential')
else:
raise CloudTTSError('No Authentication yet')
if self.credential.has_access_key():
sess = Session(
region_name=self.credential.region_name,
aws_access_key_id=self.credential.aws_access_key_id,
aws_secret_access_key=self.credential.aws_secret_access_key
)
else:
sess = Session(region_name=self.credential.region_name)
if text:
if len(text) > PollyClient.MAX_TEXT_LENGTH:
msg = Client.TOO_LONG_DATA_MSG.format(
PollyClient.MAX_TEXT_LENGTH, len(text))
raise CloudTTSError(msg)
elif ssml:
_text = re.compile('</?speak>').sub('', ssml)
if len(_text) > PollyClient.MAX_TEXT_LENGTH:
msg = Client.TOO_LONG_DATA_MSG.format(
PollyClient.MAX_TEXT_LENGTH, len(_text))
raise CloudTTSError(msg)
else:
raise ValueError('No text or ssml is passed')
polly = sess.client('polly')
params = self._make_params(voice_config, detail)
response = polly.synthesize_speech(
Text=ssml if ssml else text,
TextType='ssml' if ssml else 'text',
OutputFormat=params['output_format'],
VoiceId=params['voice_id'],
SampleRate=params['sample_rate'],
)
audio = None
if 'AudioStream' in response:
with closing(response['AudioStream']) as stream:
audio = stream.read()
return audio
| 37.077273
| 78
| 0.566262
|
cb52ecaa6f4143fbbb8dc4ab36a7df8afa751154
| 7,409
|
py
|
Python
|
bluesky_queueserver/manager/tests/test_manager_options.py
|
ksunden/bluesky-queueserver
|
49a4b22d942f5f0593dedb6afa57e0929b459df4
|
[
"BSD-3-Clause"
] | null | null | null |
bluesky_queueserver/manager/tests/test_manager_options.py
|
ksunden/bluesky-queueserver
|
49a4b22d942f5f0593dedb6afa57e0929b459df4
|
[
"BSD-3-Clause"
] | null | null | null |
bluesky_queueserver/manager/tests/test_manager_options.py
|
ksunden/bluesky-queueserver
|
49a4b22d942f5f0593dedb6afa57e0929b459df4
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import shutil
import os
import glob
import subprocess
import time as ttime
import multiprocessing
from bluesky.callbacks.zmq import RemoteDispatcher
from bluesky_queueserver.manager.profile_ops import gen_list_of_plans_and_devices
from bluesky_queueserver.manager.comms import zmq_single_request
from ._common import (
copy_default_profile_collection,
append_code_to_last_startup_file,
wait_for_condition,
condition_environment_created,
condition_queue_processing_finished,
condition_environment_closed,
)
from ._common import re_manager_cmd # noqa: F401
# User name and user group name used throughout most of the tests.
_user, _user_group = "Testing Script", "admin"
_plan1 = {"name": "count", "args": [["det1", "det2"]], "item_type": "plan"}
_sample_plan1 = """
def simple_sample_plan():
'''
Simple plan for tests.
'''
yield from count([det1, det2])
"""
# fmt: off
@pytest.mark.parametrize("option", ["startup_dir", "profile", "multiple"])
# fmt: on
def test_manager_options_startup_profile(re_manager_cmd, tmp_path, monkeypatch, option): # noqa: F811
pc_path = copy_default_profile_collection(tmp_path)
# Add extra plan. The original set of startup files will not contain this plan.
append_code_to_last_startup_file(pc_path, additional_code=_sample_plan1)
# Generate the new list of allowed plans and devices and reload them
gen_list_of_plans_and_devices(startup_dir=pc_path, file_dir=pc_path, overwrite=True)
# Start manager
if option == "startup_dir":
re_manager_cmd(["--startup-dir", pc_path])
elif option == "profile":
# This option is more complicated: we want to recreate the structure of IPython startup
# directory: <some root dir>/profile_<profile_name>/startup.
root_dir = os.path.split(pc_path)[0]
monkeypatch.setenv("IPYTHONDIR", root_dir)
profile_name = "testing"
startup_path = os.path.join(root_dir, f"profile_{profile_name}", "startup")
os.makedirs(startup_path)
file_pattern = os.path.join(pc_path, "*")
for fl_path in glob.glob(file_pattern):
shutil.move(fl_path, startup_path)
os.rmdir(pc_path)
# We pass only profile name as a parameter.
re_manager_cmd(["--startup-profile", profile_name])
elif option == "multiple":
# Expected to fail if multiple options are selected.
with pytest.raises(TimeoutError, match="RE Manager failed to start"):
re_manager_cmd(["--startup-dir", pc_path, "--startup-profile", "some_name"])
return
else:
assert False, f"Unknown option '{option}'"
# Open the environment (make sure that the environment loads)
resp1, _ = zmq_single_request("environment_open")
assert resp1["success"] is True
assert wait_for_condition(time=10, condition=condition_environment_created)
# Add the plan to the queue (will fail if incorrect environment is loaded)
plan = {"name": "simple_sample_plan", "item_type": "plan"}
params = {"item": plan, "user": _user, "user_group": _user_group}
resp2, _ = zmq_single_request("queue_item_add", params)
assert resp2["success"] is True, f"resp={resp2}"
# Start the queue
resp3, _ = zmq_single_request("queue_start")
assert resp3["success"] is True
assert wait_for_condition(time=10, condition=condition_queue_processing_finished)
# Make sure that the plan was executed
resp4, _ = zmq_single_request("status")
assert resp4["items_in_queue"] == 0
assert resp4["items_in_history"] == 1
# Close the environment
resp5, _ = zmq_single_request("environment_close")
assert resp5["success"] is True, f"resp={resp5}"
assert wait_for_condition(time=5, condition=condition_environment_closed)
monkeypatch.setenv("IPYTHONDIR", "abc")
@pytest.fixture
def zmq_proxy():
cmd = ["bluesky-0MQ-proxy", "5567", "5568"]
p = subprocess.Popen(cmd, universal_newlines=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
yield
p.kill()
@pytest.fixture
def zmq_dispatcher():
# The following code was mostly borrowed from 'bluesky.tests.test_zmq.py' (test_zmq_no_RE)
def make_and_start_dispatcher(queue):
def put_in_queue(name, doc):
print("putting ", name, "in queue")
queue.put((name, doc))
d = RemoteDispatcher("127.0.0.1:5568")
d.subscribe(put_in_queue)
print("REMOTE IS READY TO START")
d.loop.call_later(9, d.stop)
d.start()
queue = multiprocessing.Queue()
dispatcher_proc = multiprocessing.Process(target=make_and_start_dispatcher, daemon=True, args=(queue,))
dispatcher_proc.start()
ttime.sleep(2) # As above, give this plenty of time to start.
yield queue
dispatcher_proc.terminate()
dispatcher_proc.join()
def test_manager_acq_with_0MQ_proxy(re_manager_cmd, zmq_proxy, zmq_dispatcher): # noqa: F811
re_manager_cmd(["--zmq-data-proxy-addr", "localhost:5567"])
# Open the environment (make sure that the environment loads)
resp1, _ = zmq_single_request("environment_open")
assert resp1["success"] is True
assert wait_for_condition(time=10, condition=condition_environment_created)
# Add the plan to the queue (will fail if incorrect environment is loaded)
params = {"item": _plan1, "user": _user, "user_group": _user_group}
resp2, _ = zmq_single_request("queue_item_add", params)
assert resp2["success"] is True, f"resp={resp2}"
# Start the queue
resp3, _ = zmq_single_request("queue_start")
assert resp3["success"] is True
assert wait_for_condition(time=10, condition=condition_queue_processing_finished)
# Make sure that the plan was executed
resp4, _ = zmq_single_request("status")
assert resp4["items_in_queue"] == 0
assert resp4["items_in_history"] == 1
# Close the environment
resp5, _ = zmq_single_request("environment_close")
assert resp5["success"] is True, f"resp={resp5}"
assert wait_for_condition(time=5, condition=condition_environment_closed)
# Test if the data was delivered to the consumer.
# Simple test: check if 'start' and 'stop' documents were delivered.
queue = zmq_dispatcher
remote_accumulator = []
while not queue.empty(): # Since queue is used by one process at a time, queue.empty() should work reliably
remote_accumulator.append(queue.get(timeout=2))
assert len(remote_accumulator) >= 2
assert remote_accumulator[0][0] == "start" # Start document
assert remote_accumulator[-1][0] == "stop" # Stop document
# fmt: off
@pytest.mark.parametrize("redis_addr, success", [
("localhost", True),
("localhost:6379", True),
("localhost:6378", False)]) # Incorrect port.
# fmt: on
def test_manager_redis_addr_parameter(re_manager_cmd, redis_addr, success): # noqa: F811
if success:
re_manager_cmd(["--redis-addr", redis_addr])
# Try to communicate with the server to make sure Redis is configure correctly.
# RE Manager has to access Redis in order to prepare 'status'.
resp1, _ = zmq_single_request("status")
assert resp1["items_in_queue"] == 0
assert resp1["items_in_history"] == 0
else:
with pytest.raises(TimeoutError, match="RE Manager failed to start"):
re_manager_cmd(["--redis-addr", redis_addr])
| 37.609137
| 112
| 0.703199
|
9a10ae7139de177ee30f99e43a99ca4a7883431e
| 1,889
|
py
|
Python
|
configs/_base_/datasets/coco_detection.py
|
aanna0701/swin_object_SCL
|
b1a8aa53fff0d6657fe3038fb08da260e780d760
|
[
"Apache-2.0"
] | null | null | null |
configs/_base_/datasets/coco_detection.py
|
aanna0701/swin_object_SCL
|
b1a8aa53fff0d6657fe3038fb08da260e780d760
|
[
"Apache-2.0"
] | null | null | null |
configs/_base_/datasets/coco_detection.py
|
aanna0701/swin_object_SCL
|
b1a8aa53fff0d6657fe3038fb08da260e780d760
|
[
"Apache-2.0"
] | null | null | null |
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
# test=dict(
# type=dataset_type,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# img_prefix=data_root + 'test2017/',
# pipeline=test_pipeline))
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
| 34.981481
| 77
| 0.627316
|
f619b57241a669375d5b41049544917cdce0384a
| 257
|
py
|
Python
|
sandbox/lib/pythonbin/libtmux/__about__.py
|
Jumpscale/sandbox_linux
|
2aacd36b467ef30ac83718abfa82c6883b67a02f
|
[
"Apache-2.0"
] | null | null | null |
sandbox/lib/pythonbin/libtmux/__about__.py
|
Jumpscale/sandbox_linux
|
2aacd36b467ef30ac83718abfa82c6883b67a02f
|
[
"Apache-2.0"
] | 1
|
2018-04-04T12:13:40.000Z
|
2018-05-03T07:57:52.000Z
|
sandbox/lib/pythonbin/libtmux/__about__.py
|
Jumpscale/sandbox_linux
|
2aacd36b467ef30ac83718abfa82c6883b67a02f
|
[
"Apache-2.0"
] | null | null | null |
__title__ = 'libtmux'
__package_name__ = 'libtmux'
__version__ = '0.8.0'
__description__ = 'scripting library / orm for tmux'
__email__ = 'tony@git-pull.com'
__author__ = 'Tony Narlock'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016-2018 Tony Narlock'
| 28.555556
| 52
| 0.750973
|
225c330f8649643c1a9132171f6c38e0e6ac7b07
| 2,139
|
py
|
Python
|
indra/tests/test_acsn.py
|
samuelbunga/indra
|
54bfa0f01f0825118a7873ef72762ef66eb2062f
|
[
"BSD-2-Clause"
] | null | null | null |
indra/tests/test_acsn.py
|
samuelbunga/indra
|
54bfa0f01f0825118a7873ef72762ef66eb2062f
|
[
"BSD-2-Clause"
] | null | null | null |
indra/tests/test_acsn.py
|
samuelbunga/indra
|
54bfa0f01f0825118a7873ef72762ef66eb2062f
|
[
"BSD-2-Clause"
] | null | null | null |
import pandas
import requests
from indra.sources.acsn import api
from indra.ontology.standardize import get_standard_agent
from indra.sources.acsn.processor import get_stmt_type, AcsnProcessor
relations_df = pandas.read_csv(api.ACSN_RELATIONS_URL, sep='\t')
gmt_file = requests.get(api.ACSN_CORRESPONDENCE_URL).text.split('\n')
correspondence_dict = api._transform_gmt(gmt_file)
ap = AcsnProcessor(relations_df, correspondence_dict)
def test_transform_gmt():
gmt_dict = api._transform_gmt(gmt_file)
assert 'C3' in gmt_dict['C3B*']
assert 'ZO4*' not in gmt_dict
assert not gmt_dict['SLC2A1'][0].endswith('\t')
assert not gmt_dict['SLC2A1'][0].startswith('\t')
def test_famplex_lookup():
fplx_lookup = ap.fplx_lookup
assert 'USPL' in fplx_lookup[('CYLD', 'USPL1')]
assert 'VEGFRR' not in fplx_lookup[('FLT1', 'FLT4', 'KDR')]
def test_get_stmt_type():
assert get_stmt_type('CATALYSIS').__name__ == 'Activation'
assert get_stmt_type('INHIBITION').__name__ == 'Inhibition'
assert get_stmt_type('HETERODIMER_ASSOCIATION').__name__ == 'Complex'
assert get_stmt_type('CATALYSIS;HETERODIMER_ASSOCIATION').__name__ == \
'Complex'
assert not get_stmt_type('Activation')
def test_get_agent():
# Agents
VEGFA = get_standard_agent('VEGFA', db_refs={'HGNC': '12680'})
MIRLET7A = get_standard_agent('MIRLET7A', db_refs={'FPLX': 'MIRLET7A'})
assert ap.get_agent('VEGFA').db_refs == VEGFA.db_refs, VEGFA.db_refs
assert ap.get_agent('MIRLET7A*').db_refs == \
MIRLET7A.db_refs, MIRLET7A.db_refs
assert ap.get_agent('XyZ') is None
def test_extract_statements():
ap.extract_statements()
stmts = ap.statements
assert stmts[345].evidence[0].source_api == 'acsn'
test_stmt = [stmt for stmt in stmts if(any(ag.name == 'SIVA1'
for ag in stmt.agent_list()) and
any(ag.name == 'TRAF2'
for ag in stmt.agent_list()))]
assert test_stmt[0] in stmts
assert '19392652' in test_stmt[0].evidence[0].pmid
| 36.87931
| 79
| 0.673212
|
cafea141416d6182f2e0a5ef0fb35960727f4a97
| 59,181
|
py
|
Python
|
src/kmol/vendor/dgllife/utils/featurizers.py
|
elix-tech/kmol
|
f7fb610d0cbe958891ed15032bb301fb664e4e25
|
[
"MIT"
] | 27
|
2021-11-18T12:12:38.000Z
|
2022-03-15T06:34:55.000Z
|
src/kmol/vendor/dgllife/utils/featurizers.py
|
elix-tech/kmol
|
f7fb610d0cbe958891ed15032bb301fb664e4e25
|
[
"MIT"
] | 3
|
2022-01-25T05:19:54.000Z
|
2022-02-03T01:30:29.000Z
|
src/kmol/vendor/dgllife/utils/featurizers.py
|
elix-tech/kmol
|
f7fb610d0cbe958891ed15032bb301fb664e4e25
|
[
"MIT"
] | 3
|
2022-02-08T16:12:14.000Z
|
2022-03-31T05:47:57.000Z
|
import itertools
import os.path as osp
from collections import defaultdict
from functools import partial
import numpy as np
import torch
from rdkit import Chem, RDConfig
from rdkit.Chem import AllChem, ChemicalFeatures
__all__ = [
'one_hot_encoding',
'atom_type_one_hot',
'atomic_number_one_hot',
'atomic_number',
'atom_degree_one_hot',
'atom_degree',
'atom_total_degree_one_hot',
'atom_total_degree',
'atom_explicit_valence_one_hot',
'atom_explicit_valence',
'atom_implicit_valence_one_hot',
'atom_implicit_valence',
'atom_hybridization_one_hot',
'atom_total_num_H_one_hot',
'atom_total_num_H',
'atom_formal_charge_one_hot',
'atom_formal_charge',
'atom_num_radical_electrons_one_hot',
'atom_num_radical_electrons',
'atom_is_aromatic_one_hot',
'atom_is_aromatic',
'atom_is_in_ring_one_hot',
'atom_is_in_ring',
'atom_chiral_tag_one_hot',
'atom_chirality_type_one_hot',
'atom_mass',
'atom_is_chiral_center',
'ConcatFeaturizer',
'BaseAtomFeaturizer',
'CanonicalAtomFeaturizer',
'WeaveAtomFeaturizer',
'PretrainAtomFeaturizer',
'AttentiveFPAtomFeaturizer',
'bond_type_one_hot',
'bond_is_conjugated_one_hot',
'bond_is_conjugated',
'bond_is_in_ring_one_hot',
'bond_is_in_ring',
'bond_stereo_one_hot',
'bond_direction_one_hot',
'BaseBondFeaturizer',
'CanonicalBondFeaturizer',
'WeaveEdgeFeaturizer',
'PretrainBondFeaturizer',
'AttentiveFPBondFeaturizer'
]
def one_hot_encoding(x, allowable_set, encode_unknown=False):
"""One-hot encoding.
Parameters
----------
x
Value to encode.
allowable_set : list
The elements of the allowable_set should be of the
same type as x.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element.
Returns
-------
list
List of boolean values where at most one value is True.
The list is of length ``len(allowable_set)`` if ``encode_unknown=False``
and ``len(allowable_set) + 1`` otherwise.
"""
if encode_unknown and (allowable_set[-1] is not None):
allowable_set.append(None)
if encode_unknown and (x not in allowable_set):
x = None
return list(map(lambda s: x == s, allowable_set))
#################################################################
# Atom featurization
#################################################################
def atom_type_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the type of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of str
Atom types to consider. Default: ``C``, ``N``, ``O``, ``S``, ``F``, ``Si``, ``P``,
``Cl``, ``Br``, ``Mg``, ``Na``, ``Ca``, ``Fe``, ``As``, ``Al``, ``I``, ``B``, ``V``,
``K``, ``Tl``, ``Yb``, ``Sb``, ``Sn``, ``Ag``, ``Pd``, ``Co``, ``Se``, ``Ti``, ``Zn``,
``H``, ``Li``, ``Ge``, ``Cu``, ``Au``, ``Ni``, ``Cd``, ``In``, ``Mn``, ``Zr``, ``Cr``,
``Pt``, ``Hg``, ``Pb``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
See Also
--------
one_hot_encoding
atomic_number
atomic_number_one_hot
"""
if allowable_set is None:
allowable_set = ['C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na', 'Ca',
'Fe', 'As', 'Al', 'I', 'B', 'V', 'K', 'Tl', 'Yb', 'Sb', 'Sn',
'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn', 'H', 'Li', 'Ge', 'Cu', 'Au',
'Ni', 'Cd', 'In', 'Mn', 'Zr', 'Cr', 'Pt', 'Hg', 'Pb']
return one_hot_encoding(atom.GetSymbol(), allowable_set, encode_unknown)
def atomic_number_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the atomic number of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of int
Atomic numbers to consider. Default: ``1`` - ``100``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
See Also
--------
one_hot_encoding
atomic_number
atom_type_one_hot
"""
if allowable_set is None:
allowable_set = list(range(1, 101))
return one_hot_encoding(atom.GetAtomicNum(), allowable_set, encode_unknown)
def atomic_number(atom):
"""Get the atomic number for an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Returns
-------
list
List containing one int only.
See Also
--------
atomic_number_one_hot
atom_type_one_hot
"""
return [atom.GetAtomicNum()]
def atom_degree_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the degree of an atom.
Note that the result will be different depending on whether the Hs are
explicitly modeled in the graph.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of int
Atom degrees to consider. Default: ``0`` - ``10``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
See Also
--------
one_hot_encoding
atom_degree
atom_total_degree
atom_total_degree_one_hot
"""
if allowable_set is None:
allowable_set = list(range(11))
return one_hot_encoding(atom.GetDegree(), allowable_set, encode_unknown)
def atom_degree(atom):
"""Get the degree of an atom.
Note that the result will be different depending on whether the Hs are
explicitly modeled in the graph.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Returns
-------
list
List containing one int only.
See Also
--------
atom_degree_one_hot
atom_total_degree
atom_total_degree_one_hot
"""
return [atom.GetDegree()]
def atom_total_degree_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the degree of an atom including Hs.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list
Total degrees to consider. Default: ``0`` - ``5``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
See Also
--------
one_hot_encoding
atom_degree
atom_degree_one_hot
atom_total_degree
"""
if allowable_set is None:
allowable_set = list(range(6))
return one_hot_encoding(atom.GetTotalDegree(), allowable_set, encode_unknown)
def atom_total_degree(atom):
"""The degree of an atom including Hs.
Returns
-------
list
List containing one int only.
See Also
--------
atom_total_degree_one_hot
atom_degree
atom_degree_one_hot
"""
return [atom.GetTotalDegree()]
def atom_explicit_valence_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the explicit valence of an aotm.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of int
Atom explicit valences to consider. Default: ``1`` - ``6``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
See Also
--------
one_hot_encoding
atom_explicit_valence
"""
if allowable_set is None:
allowable_set = list(range(1, 7))
return one_hot_encoding(atom.GetExplicitValence(), allowable_set, encode_unknown)
def atom_explicit_valence(atom):
"""Get the explicit valence of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Returns
-------
list
List containing one int only.
See Also
--------
atom_explicit_valence_one_hot
"""
return [atom.GetExplicitValence()]
def atom_implicit_valence_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the implicit valence of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of int
Atom implicit valences to consider. Default: ``0`` - ``6``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
See Also
--------
atom_implicit_valence
"""
if allowable_set is None:
allowable_set = list(range(7))
return one_hot_encoding(atom.GetImplicitValence(), allowable_set, encode_unknown)
def atom_implicit_valence(atom):
"""Get the implicit valence of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Reurns
------
list
List containing one int only.
See Also
--------
atom_implicit_valence_one_hot
"""
return [atom.GetImplicitValence()]
# pylint: disable=I1101
def atom_hybridization_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the hybridization of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of rdkit.Chem.rdchem.HybridizationType
Atom hybridizations to consider. Default: ``Chem.rdchem.HybridizationType.SP``,
``Chem.rdchem.HybridizationType.SP2``, ``Chem.rdchem.HybridizationType.SP3``,
``Chem.rdchem.HybridizationType.SP3D``, ``Chem.rdchem.HybridizationType.SP3D2``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
See Also
--------
one_hot_encoding
"""
if allowable_set is None:
allowable_set = [Chem.rdchem.HybridizationType.SP,
Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3,
Chem.rdchem.HybridizationType.SP3D,
Chem.rdchem.HybridizationType.SP3D2]
return one_hot_encoding(atom.GetHybridization(), allowable_set, encode_unknown)
def atom_total_num_H_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the total number of Hs of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of int
Total number of Hs to consider. Default: ``0`` - ``4``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
See Also
--------
one_hot_encoding
atom_total_num_H
"""
if allowable_set is None:
allowable_set = list(range(5))
return one_hot_encoding(atom.GetTotalNumHs(), allowable_set, encode_unknown)
def atom_total_num_H(atom):
"""Get the total number of Hs of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Returns
-------
list
List containing one int only.
See Also
--------
atom_total_num_H_one_hot
"""
return [atom.GetTotalNumHs()]
def atom_formal_charge_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the formal charge of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of int
Formal charges to consider. Default: ``-2`` - ``2``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
See Also
--------
one_hot_encoding
atom_formal_charge
"""
if allowable_set is None:
allowable_set = list(range(-2, 3))
return one_hot_encoding(atom.GetFormalCharge(), allowable_set, encode_unknown)
def atom_formal_charge(atom):
"""Get formal charge for an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Returns
-------
list
List containing one int only.
See Also
--------
atom_formal_charge_one_hot
"""
return [atom.GetFormalCharge()]
def atom_partial_charge(atom):
"""Get Gasteiger partial charge for an atom.
For using this function, you must have called ``AllChem.ComputeGasteigerCharges(mol)``
to compute Gasteiger charges.
Occasionally, we can get nan or infinity Gasteiger charges, in which case we will set
the result to be 0.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Returns
-------
list
List containing one float only.
"""
gasteiger_charge = atom.GetProp('_GasteigerCharge')
if gasteiger_charge in ['-nan', 'nan', '-inf', 'inf']:
gasteiger_charge = 0
return [float(gasteiger_charge)]
def atom_num_radical_electrons_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the number of radical electrons of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of int
Number of radical electrons to consider. Default: ``0`` - ``4``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
See Also
--------
one_hot_encoding
atom_num_radical_electrons
"""
if allowable_set is None:
allowable_set = list(range(5))
return one_hot_encoding(atom.GetNumRadicalElectrons(), allowable_set, encode_unknown)
def atom_num_radical_electrons(atom):
"""Get the number of radical electrons for an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Returns
-------
list
List containing one int only.
See Also
--------
atom_num_radical_electrons_one_hot
"""
return [atom.GetNumRadicalElectrons()]
def atom_is_aromatic_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for whether the atom is aromatic.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of bool
Conditions to consider. Default: ``False`` and ``True``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
See Also
--------
one_hot_encoding
atom_is_aromatic
"""
if allowable_set is None:
allowable_set = [False, True]
return one_hot_encoding(atom.GetIsAromatic(), allowable_set, encode_unknown)
def atom_is_aromatic(atom):
"""Get whether the atom is aromatic.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Returns
-------
list
List containing one bool only.
See Also
--------
atom_is_aromatic_one_hot
"""
return [atom.GetIsAromatic()]
def atom_is_in_ring_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for whether the atom is in ring.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of bool
Conditions to consider. Default: ``False`` and ``True``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
See Also
--------
one_hot_encoding
atom_is_in_ring
"""
if allowable_set is None:
allowable_set = [False, True]
return one_hot_encoding(atom.IsInRing(), allowable_set, encode_unknown)
def atom_is_in_ring(atom):
"""Get whether the atom is in ring.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Returns
-------
list
List containing one bool only.
See Also
--------
atom_is_in_ring_one_hot
"""
return [atom.IsInRing()]
def atom_chiral_tag_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the chiral tag of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of rdkit.Chem.rdchem.ChiralType
Chiral tags to consider. Default: ``rdkit.Chem.rdchem.ChiralType.CHI_UNSPECIFIED``,
``rdkit.Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW``,
``rdkit.Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW``,
``rdkit.Chem.rdchem.ChiralType.CHI_OTHER``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List containing one bool only.
See Also
--------
one_hot_encoding
atom_chirality_type_one_hot
"""
if allowable_set is None:
allowable_set = [Chem.rdchem.ChiralType.CHI_UNSPECIFIED,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW,
Chem.rdchem.ChiralType.CHI_OTHER]
return one_hot_encoding(atom.GetChiralTag(), allowable_set, encode_unknown)
def atom_chirality_type_one_hot(atom, allowable_set=None, encode_unknown=False):
"""One hot encoding for the chirality type of an atom.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
allowable_set : list of str
Chirality types to consider. Default: ``R``, ``S``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List containing one bool only.
See Also
--------
one_hot_encoding
atom_chiral_tag_one_hot
"""
if not atom.HasProp('_CIPCode'):
return [False, False]
if allowable_set is None:
allowable_set = ['R', 'S']
return one_hot_encoding(atom.GetProp('_CIPCode'), allowable_set, encode_unknown)
def atom_mass(atom, coef=0.01):
"""Get the mass of an atom and scale it.
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
coef : float
The mass will be multiplied by ``coef``.
Returns
-------
list
List containing one float only.
"""
return [atom.GetMass() * coef]
def atom_is_chiral_center(atom):
"""Get whether the atom is chiral center
Parameters
----------
atom : rdkit.Chem.rdchem.Atom
RDKit atom instance.
Returns
-------
list
List containing one bool only.
"""
return [atom.HasProp('_ChiralityPossible')]
class ConcatFeaturizer(object):
"""Concatenate the evaluation results of multiple functions as a single feature.
Parameters
----------
func_list : list
List of functions for computing molecular descriptors from objects of a same
particular data type, e.g. ``rdkit.Chem.rdchem.Atom``. Each function is of signature
``func(data_type) -> list of float or bool or int``. The resulting order of
the features will follow that of the functions in the list.
"""
def __init__(self, func_list):
self.func_list = func_list
def __call__(self, x):
"""Featurize the input data.
Parameters
----------
x :
Data to featurize.
Returns
-------
list
List of feature values, which can be of type bool, float or int.
"""
return list(itertools.chain.from_iterable(
[func(x) for func in self.func_list]))
class BaseAtomFeaturizer(object):
"""An abstract class for atom featurizers.
Loop over all atoms in a molecule and featurize them with the ``featurizer_funcs``.
**We assume the resulting DGLGraph will not contain any virtual nodes and a node i in the
graph corresponds to exactly atom i in the molecule.**
Parameters
----------
featurizer_funcs : dict
Mapping feature name to the featurization function.
Each function is of signature ``func(rdkit.Chem.rdchem.Atom) -> list or 1D numpy array``.
feat_sizes : dict
Mapping feature name to the size of the corresponding feature. If None, they will be
computed when needed. Default: None.
See Also
--------
CanonicalAtomFeaturizer
WeaveAtomFeaturizer
PretrainAtomFeaturizer
AttentiveFPAtomFeaturizer
"""
def __init__(self, featurizer_funcs, feat_sizes=None):
self.featurizer_funcs = featurizer_funcs
if feat_sizes is None:
feat_sizes = dict()
self._feat_sizes = feat_sizes
def feat_size(self, feat_name=None):
"""Get the feature size for ``feat_name``.
When there is only one feature, users do not need to provide ``feat_name``.
Parameters
----------
feat_name : str
Feature for query.
Returns
-------
int
Feature size for the feature with name ``feat_name``. Default to None.
"""
if feat_name is None:
assert len(self.featurizer_funcs) == 1, \
'feat_name should be provided if there are more than one features'
feat_name = list(self.featurizer_funcs.keys())[0]
if feat_name not in self.featurizer_funcs:
return ValueError('Expect feat_name to be in {}, got {}'.format(
list(self.featurizer_funcs.keys()), feat_name))
if feat_name not in self._feat_sizes:
atom = Chem.MolFromSmiles('C').GetAtomWithIdx(0)
self._feat_sizes[feat_name] = len(self.featurizer_funcs[feat_name](atom))
return self._feat_sizes[feat_name]
def __call__(self, mol):
"""Featurize all atoms in a molecule.
Parameters
----------
mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance.
Returns
-------
dict
For each function in self.featurizer_funcs with the key ``k``, store the computed
feature under the key ``k``. Each feature is a tensor of dtype float32 and shape
(N, M), where N is the number of atoms in the molecule.
"""
num_atoms = mol.GetNumAtoms()
atom_features = defaultdict(list)
# Compute features for each atom
for i in range(num_atoms):
atom = mol.GetAtomWithIdx(i)
for feat_name, feat_func in self.featurizer_funcs.items():
atom_features[feat_name].append(feat_func(atom))
# Stack the features and convert them to float arrays
processed_features = dict()
for feat_name, feat_list in atom_features.items():
feat = np.stack(feat_list)
processed_features[feat_name] = torch.as_tensor(feat.astype(np.float32))
return processed_features
class CanonicalAtomFeaturizer(BaseAtomFeaturizer):
"""A default featurizer for atoms.
The atom features include:
* **One hot encoding of the atom type**. The supported atom types include
``C``, ``N``, ``O``, ``S``, ``F``, ``Si``, ``P``, ``Cl``, ``Br``, ``Mg``,
``Na``, ``Ca``, ``Fe``, ``As``, ``Al``, ``I``, ``B``, ``V``, ``K``, ``Tl``,
``Yb``, ``Sb``, ``Sn``, ``Ag``, ``Pd``, ``Co``, ``Se``, ``Ti``, ``Zn``,
``H``, ``Li``, ``Ge``, ``Cu``, ``Au``, ``Ni``, ``Cd``, ``In``, ``Mn``, ``Zr``,
``Cr``, ``Pt``, ``Hg``, ``Pb``.
* **One hot encoding of the atom degree**. The supported possibilities
include ``0 - 10``.
* **One hot encoding of the number of implicit Hs on the atom**. The supported
possibilities include ``0 - 6``.
* **Formal charge of the atom**.
* **Number of radical electrons of the atom**.
* **One hot encoding of the atom hybridization**. The supported possibilities include
``SP``, ``SP2``, ``SP3``, ``SP3D``, ``SP3D2``.
* **Whether the atom is aromatic**.
* **One hot encoding of the number of total Hs on the atom**. The supported possibilities
include ``0 - 4``.
**We assume the resulting DGLGraph will not contain any virtual nodes.**
Parameters
----------
atom_data_field : str
Name for storing atom features in DGLGraphs, default to 'h'.
See Also
--------
BaseAtomFeaturizer
WeaveAtomFeaturizer
PretrainAtomFeaturizer
AttentiveFPAtomFeaturizer
"""
def __init__(self, atom_data_field='h'):
super(CanonicalAtomFeaturizer, self).__init__(
featurizer_funcs={atom_data_field: ConcatFeaturizer(
[atom_type_one_hot,
atom_degree_one_hot,
atom_implicit_valence_one_hot,
atom_formal_charge,
atom_num_radical_electrons,
atom_hybridization_one_hot,
atom_is_aromatic,
atom_total_num_H_one_hot]
)})
class WeaveAtomFeaturizer(object):
"""Atom featurizer in Weave.
The atom featurization performed in `Molecular Graph Convolutions: Moving Beyond Fingerprints
<https://arxiv.org/abs/1603.00856>`__, which considers:
* atom types
* chirality
* formal charge
* partial charge
* aromatic atom
* hybridization
* hydrogen bond donor
* hydrogen bond acceptor
* the number of rings the atom belongs to for ring size between 3 and 8
**We assume the resulting DGLGraph will not contain any virtual nodes.**
Parameters
----------
atom_data_field : str
Name for storing atom features in DGLGraphs, default to 'h'.
atom_types : list of str or None
Atom types to consider for one-hot encoding. If None, we will use a default
choice of ``'H', 'C', 'N', 'O', 'F', 'P', 'S', 'Cl', 'Br', 'I'``.
chiral_types : list of Chem.rdchem.ChiralType or None
Atom chirality to consider for one-hot encoding. If None, we will use a default
choice of ``Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW``.
hybridization_types : list of Chem.rdchem.HybridizationType or None
Atom hybridization types to consider for one-hot encoding. If None, we will use a
default choice of ``Chem.rdchem.HybridizationType.SP, Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3``.
See Also
--------
BaseAtomFeaturizer
CanonicalAtomFeaturizer
PretrainAtomFeaturizer
AttentiveFPAtomFeaturizer
"""
def __init__(self, atom_data_field='h', atom_types=None, chiral_types=None,
hybridization_types=None):
super(WeaveAtomFeaturizer, self).__init__()
self._atom_data_field = atom_data_field
if atom_types is None:
atom_types = ['H', 'C', 'N', 'O', 'F', 'P', 'S', 'Cl', 'Br', 'I']
self._atom_types = atom_types
if chiral_types is None:
chiral_types = [Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW]
self._chiral_types = chiral_types
if hybridization_types is None:
hybridization_types = [Chem.rdchem.HybridizationType.SP,
Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3]
self._hybridization_types = hybridization_types
self._featurizer = ConcatFeaturizer([
partial(atom_type_one_hot, allowable_set=atom_types, encode_unknown=True),
partial(atom_chiral_tag_one_hot, allowable_set=chiral_types),
atom_formal_charge, atom_partial_charge, atom_is_aromatic,
partial(atom_hybridization_one_hot, allowable_set=hybridization_types)
])
def feat_size(self):
"""Get the feature size.
Returns
-------
int
Feature size.
"""
mol = Chem.MolFromSmiles('C')
feats = self(mol)[self._atom_data_field]
return feats.shape[-1]
def get_donor_acceptor_info(self, mol_feats):
"""Bookkeep whether an atom is donor/acceptor for hydrogen bonds.
Parameters
----------
mol_feats : tuple of rdkit.Chem.rdMolChemicalFeatures.MolChemicalFeature
Features for molecules.
Returns
-------
is_donor : dict
Mapping atom ids to binary values indicating whether atoms
are donors for hydrogen bonds
is_acceptor : dict
Mapping atom ids to binary values indicating whether atoms
are acceptors for hydrogen bonds
"""
is_donor = defaultdict(bool)
is_acceptor = defaultdict(bool)
# Get hydrogen bond donor/acceptor information
for feats in mol_feats:
if feats.GetFamily() == 'Donor':
nodes = feats.GetAtomIds()
for u in nodes:
is_donor[u] = True
elif feats.GetFamily() == 'Acceptor':
nodes = feats.GetAtomIds()
for u in nodes:
is_acceptor[u] = True
return is_donor, is_acceptor
def __call__(self, mol):
"""Featurizes the input molecule.
Parameters
----------
mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance.
Returns
-------
dict
Mapping atom_data_field as specified in the input argument to the atom
features, which is a float32 tensor of shape (N, M), N is the number of
atoms and M is the feature size.
"""
atom_features = []
AllChem.ComputeGasteigerCharges(mol)
num_atoms = mol.GetNumAtoms()
# Get information for donor and acceptor
fdef_name = osp.join(RDConfig.RDDataDir, 'BaseFeatures.fdef')
mol_featurizer = ChemicalFeatures.BuildFeatureFactory(fdef_name)
mol_feats = mol_featurizer.GetFeaturesForMol(mol)
is_donor, is_acceptor = self.get_donor_acceptor_info(mol_feats)
# Get a symmetrized smallest set of smallest rings
# Following the practice from Chainer Chemistry (https://github.com/chainer/
# chainer-chemistry/blob/da2507b38f903a8ee333e487d422ba6dcec49b05/chainer_chemistry/
# dataset/preprocessors/weavenet_preprocessor.py)
sssr = Chem.GetSymmSSSR(mol)
for i in range(num_atoms):
atom = mol.GetAtomWithIdx(i)
# Features that can be computed directly from RDKit atom instances, which is a list
feats = self._featurizer(atom)
# Donor/acceptor indicator
feats.append(float(is_donor[i]))
feats.append(float(is_acceptor[i]))
# Count the number of rings the atom belongs to for ring size between 3 and 8
count = [0 for _ in range(3, 9)]
for ring in sssr:
ring_size = len(ring)
if i in ring and 3 <= ring_size <= 8:
count[ring_size - 3] += 1
feats.extend(count)
atom_features.append(feats)
atom_features = np.stack(atom_features)
return {self._atom_data_field: torch.as_tensor(atom_features.astype(np.float32))}
class PretrainAtomFeaturizer(object):
"""AtomFeaturizer in Strategies for Pre-training Graph Neural Networks.
The atom featurization performed in `Strategies for Pre-training Graph Neural Networks
<https://arxiv.org/abs/1905.12265>`__, which considers:
* atomic number
* chirality
**We assume the resulting DGLGraph will not contain any virtual nodes.**
Parameters
----------
atomic_number_types : list of int or None
Atomic number types to consider for one-hot encoding. If None, we will use a default
choice of 1-118.
chiral_types : list of Chem.rdchem.ChiralType or None
Atom chirality to consider for one-hot encoding. If None, we will use a default
choice of ``Chem.rdchem.ChiralType.CHI_UNSPECIFIED,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW, Chem.rdchem.ChiralType.CHI_OTHER``.
See Also
--------
BaseAtomFeaturizer
CanonicalAtomFeaturizer
WeaveAtomFeaturizer
AttentiveFPAtomFeaturizer
"""
def __init__(self, atomic_number_types=None, chiral_types=None):
if atomic_number_types is None:
atomic_number_types = list(range(1, 119))
self._atomic_number_types = atomic_number_types
if chiral_types is None:
chiral_types = [
Chem.rdchem.ChiralType.CHI_UNSPECIFIED,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW,
Chem.rdchem.ChiralType.CHI_OTHER
]
self._chiral_types = chiral_types
def __call__(self, mol):
"""Featurizes the input molecule.
Parameters
----------
mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance.
Returns
-------
dict
Mapping 'atomic_number' and 'chirality_type' to separately an int64 tensor
of shape (N, 1), N is the number of atoms
"""
atom_features = []
num_atoms = mol.GetNumAtoms()
for i in range(num_atoms):
atom = mol.GetAtomWithIdx(i)
atom_features.append([
self._atomic_number_types.index(atom.GetAtomicNum()),
self._chiral_types.index(atom.GetChiralTag())
])
atom_features = np.stack(atom_features)
atom_features = torch.as_tensor(atom_features.astype(np.int64))
return {
'atomic_number': atom_features[:, 0],
'chirality_type': atom_features[:, 1]
}
class AttentiveFPAtomFeaturizer(BaseAtomFeaturizer):
"""The atom featurizer used in AttentiveFP
AttentiveFP is introduced in
`Pushing the Boundaries of Molecular Representation for Drug Discovery with the Graph
Attention Mechanism. <https://www.ncbi.nlm.nih.gov/pubmed/31408336>`__
The atom features include:
* **One hot encoding of the atom type**. The supported atom types include
``B``, ``C``, ``N``, ``O``, ``F``, ``Si``, ``P``, ``S``, ``Cl``, ``As``,
``Se``, ``Br``, ``Te``, ``I``, ``At``, and ``other``.
* **One hot encoding of the atom degree**. The supported possibilities
include ``0 - 5``.
* **Formal charge of the atom**.
* **Number of radical electrons of the atom**.
* **One hot encoding of the atom hybridization**. The supported possibilities include
``SP``, ``SP2``, ``SP3``, ``SP3D``, ``SP3D2``, and ``other``.
* **Whether the atom is aromatic**.
* **One hot encoding of the number of total Hs on the atom**. The supported possibilities
include ``0 - 4``.
* **Whether the atom is chiral center**
* **One hot encoding of the atom chirality type**. The supported possibilities include
``R``, and ``S``.
**We assume the resulting DGLGraph will not contain any virtual nodes.**
Parameters
----------
atom_data_field : str
Name for storing atom features in DGLGraphs, default to 'h'.
See Also
--------
BaseAtomFeaturizer
CanonicalAtomFeaturizer
WeaveAtomFeaturizer
PretrainAtomFeaturizer
"""
def __init__(self, atom_data_field='h'):
super(AttentiveFPAtomFeaturizer, self).__init__(
featurizer_funcs={atom_data_field: ConcatFeaturizer(
[partial(atom_type_one_hot, allowable_set=[
'B', 'C', 'N', 'O', 'F', 'Si', 'P', 'S',
'Cl', 'As', 'Se', 'Br', 'Te', 'I', 'At'], encode_unknown=True),
partial(atom_degree_one_hot, allowable_set=list(range(6))),
atom_formal_charge,
atom_num_radical_electrons,
partial(atom_hybridization_one_hot, encode_unknown=True),
atom_is_aromatic,
atom_total_num_H_one_hot,
atom_is_chiral_center,
atom_chirality_type_one_hot]
)})
def bond_type_one_hot(bond, allowable_set=None, encode_unknown=False):
"""One hot encoding for the type of a bond.
Parameters
----------
bond : rdkit.Chem.rdchem.Bond
RDKit bond instance.
allowable_set : list of Chem.rdchem.BondType
Bond types to consider. Default: ``Chem.rdchem.BondType.SINGLE``,
``Chem.rdchem.BondType.DOUBLE``, ``Chem.rdchem.BondType.TRIPLE``,
``Chem.rdchem.BondType.AROMATIC``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
See Also
--------
one_hot_encoding
"""
if allowable_set is None:
allowable_set = [Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC]
return one_hot_encoding(bond.GetBondType(), allowable_set, encode_unknown)
def bond_is_conjugated_one_hot(bond, allowable_set=None, encode_unknown=False):
"""One hot encoding for whether the bond is conjugated.
Parameters
----------
bond : rdkit.Chem.rdchem.Bond
RDKit bond instance.
allowable_set : list of bool
Conditions to consider. Default: ``False`` and ``True``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
See Also
--------
one_hot_encoding
bond_is_conjugated
"""
if allowable_set is None:
allowable_set = [False, True]
return one_hot_encoding(bond.GetIsConjugated(), allowable_set, encode_unknown)
def bond_is_conjugated(bond):
"""Get whether the bond is conjugated.
Parameters
----------
bond : rdkit.Chem.rdchem.Bond
RDKit bond instance.
Returns
-------
list
List containing one bool only.
See Also
--------
bond_is_conjugated_one_hot
"""
return [bond.GetIsConjugated()]
def bond_is_in_ring_one_hot(bond, allowable_set=None, encode_unknown=False):
"""One hot encoding for whether the bond is in a ring of any size.
Parameters
----------
bond : rdkit.Chem.rdchem.Bond
RDKit bond instance.
allowable_set : list of bool
Conditions to consider. Default: ``False`` and ``True``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
See Also
--------
one_hot_encoding
bond_is_in_ring
"""
if allowable_set is None:
allowable_set = [False, True]
return one_hot_encoding(bond.IsInRing(), allowable_set, encode_unknown)
def bond_is_in_ring(bond):
"""Get whether the bond is in a ring of any size.
Parameters
----------
bond : rdkit.Chem.rdchem.Bond
RDKit bond instance.
Returns
-------
list
List containing one bool only.
See Also
--------
bond_is_in_ring_one_hot
"""
return [bond.IsInRing()]
def bond_stereo_one_hot(bond, allowable_set=None, encode_unknown=False):
"""One hot encoding for the stereo configuration of a bond.
Parameters
----------
bond : rdkit.Chem.rdchem.Bond
RDKit bond instance.
allowable_set : list of rdkit.Chem.rdchem.BondStereo
Stereo configurations to consider. Default: ``rdkit.Chem.rdchem.BondStereo.STEREONONE``,
``rdkit.Chem.rdchem.BondStereo.STEREOANY``, ``rdkit.Chem.rdchem.BondStereo.STEREOZ``,
``rdkit.Chem.rdchem.BondStereo.STEREOE``, ``rdkit.Chem.rdchem.BondStereo.STEREOCIS``,
``rdkit.Chem.rdchem.BondStereo.STEREOTRANS``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
See Also
--------
one_hot_encoding
"""
if allowable_set is None:
allowable_set = [Chem.rdchem.BondStereo.STEREONONE,
Chem.rdchem.BondStereo.STEREOANY,
Chem.rdchem.BondStereo.STEREOZ,
Chem.rdchem.BondStereo.STEREOE,
Chem.rdchem.BondStereo.STEREOCIS,
Chem.rdchem.BondStereo.STEREOTRANS]
return one_hot_encoding(bond.GetStereo(), allowable_set, encode_unknown)
def bond_direction_one_hot(bond, allowable_set=None, encode_unknown=False):
"""One hot encoding for the direction of a bond.
Parameters
----------
bond : rdkit.Chem.rdchem.Bond
RDKit bond instance.
allowable_set : list of Chem.rdchem.BondDir
Bond directions to consider. Default: ``Chem.rdchem.BondDir.NONE``,
``Chem.rdchem.BondDir.ENDUPRIGHT``, ``Chem.rdchem.BondDir.ENDDOWNRIGHT``.
encode_unknown : bool
If True, map inputs not in the allowable set to the
additional last element. (Default: False)
Returns
-------
list
List of boolean values where at most one value is True.
See Also
--------
one_hot_encoding
"""
if allowable_set is None:
allowable_set = [Chem.rdchem.BondDir.NONE,
Chem.rdchem.BondDir.ENDUPRIGHT,
Chem.rdchem.BondDir.ENDDOWNRIGHT]
return one_hot_encoding(bond.GetBondDir(), allowable_set, encode_unknown)
class BaseBondFeaturizer(object):
"""An abstract class for bond featurizers.
Loop over all bonds in a molecule and featurize them with the ``featurizer_funcs``.
We assume the constructed ``DGLGraph`` is a bi-directed graph where the **i** th bond in the
molecule, i.e. ``mol.GetBondWithIdx(i)``, corresponds to the **(2i)**-th and **(2i+1)**-th edges
in the DGLGraph.
**We assume the resulting DGLGraph will be created with :func:`smiles_to_bigraph` without
self loops.**
Parameters
----------
featurizer_funcs : dict
Mapping feature name to the featurization function.
Each function is of signature ``func(rdkit.Chem.rdchem.Bond) -> list or 1D numpy array``.
feat_sizes : dict
Mapping feature name to the size of the corresponding feature. If None, they will be
computed when needed. Default: None.
self_loop : bool
Whether self loops will be added. Default to False. If True, it will use an additional
column of binary values to indicate the identity of self loops in each bond feature.
The features of the self loops will be zero except for the additional columns.
See Also
--------
CanonicalBondFeaturizer
WeaveEdgeFeaturizer
PretrainBondFeaturizer
AttentiveFPBondFeaturizer
"""
def __init__(self, featurizer_funcs, feat_sizes=None, self_loop=False):
self.featurizer_funcs = featurizer_funcs
if feat_sizes is None:
feat_sizes = dict()
self._feat_sizes = feat_sizes
self._self_loop = self_loop
def feat_size(self, feat_name=None):
"""Get the feature size for ``feat_name``.
When there is only one feature, users do not need to provide ``feat_name``.
Parameters
----------
feat_name : str
Feature for query.
Returns
-------
int
Feature size for the feature with name ``feat_name``. Default to None.
"""
if feat_name is None:
assert len(self.featurizer_funcs) == 1, \
'feat_name should be provided if there are more than one features'
feat_name = list(self.featurizer_funcs.keys())[0]
if feat_name not in self.featurizer_funcs:
return ValueError('Expect feat_name to be in {}, got {}'.format(
list(self.featurizer_funcs.keys()), feat_name))
mol = Chem.MolFromSmiles('CCO')
feats = self(mol)
return feats[feat_name].shape[1]
def __call__(self, mol):
"""Featurize all bonds in a molecule.
Parameters
----------
mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance.
Returns
-------
dict
For each function in self.featurizer_funcs with the key ``k``, store the computed
feature under the key ``k``. Each feature is a tensor of dtype float32 and shape
(N, M), where N is the number of atoms in the molecule.
"""
num_bonds = mol.GetNumBonds()
bond_features = defaultdict(list)
# Compute features for each bond
for i in range(num_bonds):
bond = mol.GetBondWithIdx(i)
for feat_name, feat_func in self.featurizer_funcs.items():
feat = feat_func(bond)
bond_features[feat_name].extend([feat, feat.copy()])
# Stack the features and convert them to float arrays
processed_features = dict()
for feat_name, feat_list in bond_features.items():
feat = np.stack(feat_list)
processed_features[feat_name] = torch.as_tensor(feat.astype(np.float32))
if self._self_loop and num_bonds > 0:
num_atoms = mol.GetNumAtoms()
for feat_name in processed_features:
feats = processed_features[feat_name]
feats = torch.cat([feats, torch.zeros(feats.shape[0], 1)], dim=1)
self_loop_feats = torch.zeros(num_atoms, feats.shape[1])
self_loop_feats[:, -1] = 1
feats = torch.cat([feats, self_loop_feats], dim=0)
processed_features[feat_name] = feats
if self._self_loop and num_bonds == 0:
num_atoms = mol.GetNumAtoms()
toy_mol = Chem.MolFromSmiles('CO')
processed_features = self(toy_mol)
for feat_name in processed_features:
feats = processed_features[feat_name]
feats = torch.zeros(num_atoms, feats.shape[1])
feats[:, -1] = 1
processed_features[feat_name] = feats
return processed_features
class CanonicalBondFeaturizer(BaseBondFeaturizer):
"""A default featurizer for bonds.
The bond features include:
* **One hot encoding of the bond type**. The supported bond types include
``SINGLE``, ``DOUBLE``, ``TRIPLE``, ``AROMATIC``.
* **Whether the bond is conjugated.**.
* **Whether the bond is in a ring of any size.**
* **One hot encoding of the stereo configuration of a bond**. The supported bond stereo
configurations include ``STEREONONE``, ``STEREOANY``, ``STEREOZ``, ``STEREOE``,
``STEREOCIS``, ``STEREOTRANS``.
**We assume the resulting DGLGraph will be created with :func:`smiles_to_bigraph` without
self loops.**
Parameters
----------
bond_data_field : str
Name for storing bond features in DGLGraphs, default to ``'e'``.
self_loop : bool
Whether self loops will be added. Default to False. If True, it will use an additional
column of binary values to indicate the identity of self loops. The feature of the
self loops will be zero except for the additional column.
See Also
--------
BaseBondFeaturizer
WeaveEdgeFeaturizer
PretrainBondFeaturizer
AttentiveFPBondFeaturizer
"""
def __init__(self, bond_data_field='e', self_loop=False):
super(CanonicalBondFeaturizer, self).__init__(
featurizer_funcs={bond_data_field: ConcatFeaturizer(
[bond_type_one_hot,
bond_is_conjugated,
bond_is_in_ring,
bond_stereo_one_hot]
)}, self_loop=self_loop)
# pylint: disable=E1102
class WeaveEdgeFeaturizer(object):
"""Edge featurizer in Weave.
The edge featurization is introduced in `Molecular Graph Convolutions:
Moving Beyond Fingerprints <https://arxiv.org/abs/1603.00856>`__.
This featurization is performed for a complete graph of atoms with self loops added,
which considers:
* Number of bonds between each pairs of atoms
* One-hot encoding of bond type if a bond exists between a pair of atoms
* Whether a pair of atoms belongs to a same ring
Parameters
----------
edge_data_field : str
Name for storing edge features in DGLGraphs, default to ``'e'``.
max_distance : int
Maximum number of bonds to consider between each pair of atoms.
Default to 7.
bond_types : list of Chem.rdchem.BondType or None
Bond types to consider for one hot encoding. If None, we consider by
default single, double, triple and aromatic bonds.
See Also
--------
BaseBondFeaturizer
CanonicalBondFeaturizer
PretrainBondFeaturizer
AttentiveFPBondFeaturizer
"""
def __init__(self, edge_data_field='e', max_distance=7, bond_types=None):
super(WeaveEdgeFeaturizer, self).__init__()
self._edge_data_field = edge_data_field
self._max_distance = max_distance
if bond_types is None:
bond_types = [Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC]
self._bond_types = bond_types
def feat_size(self):
"""Get the feature size.
Returns
-------
int
Feature size.
"""
mol = Chem.MolFromSmiles('C')
feats = self(mol)[self._edge_data_field]
return feats.shape[-1]
def __call__(self, mol):
"""Featurizes the input molecule.
Parameters
----------
mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance.
Returns
-------
dict
Mapping self._edge_data_field to a float32 tensor of shape (N, M), where
N is the number of atom pairs and M is the feature size.
"""
# Part 1 based on number of bonds between each pair of atoms
distance_matrix = torch.from_numpy(Chem.GetDistanceMatrix(mol))
# Change shape from (V, V, 1) to (V^2, 1)
distance_matrix = distance_matrix.float().reshape(-1, 1)
# Elementwise compare if distance is bigger than 0, 1, ..., max_distance - 1
distance_indicators = (distance_matrix >
torch.arange(0, self._max_distance).float()).float()
# Part 2 for one hot encoding of bond type.
num_atoms = mol.GetNumAtoms()
bond_indicators = torch.zeros(num_atoms, num_atoms, len(self._bond_types))
for bond in mol.GetBonds():
bond_type_encoding = torch.tensor(
bond_type_one_hot(bond, allowable_set=self._bond_types)).float()
begin_atom_idx, end_atom_idx = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
bond_indicators[begin_atom_idx, end_atom_idx] = bond_type_encoding
bond_indicators[end_atom_idx, begin_atom_idx] = bond_type_encoding
# Reshape from (V, V, num_bond_types) to (V^2, num_bond_types)
bond_indicators = bond_indicators.reshape(-1, len(self._bond_types))
# Part 3 for whether a pair of atoms belongs to a same ring.
sssr = Chem.GetSymmSSSR(mol)
ring_mate_indicators = torch.zeros(num_atoms, num_atoms, 1)
for ring in sssr:
ring = list(ring)
num_atoms_in_ring = len(ring)
for i in range(num_atoms_in_ring):
ring_mate_indicators[ring[i], torch.tensor(ring)] = 1
ring_mate_indicators = ring_mate_indicators.reshape(-1, 1)
return {self._edge_data_field: torch.cat([distance_indicators,
bond_indicators,
ring_mate_indicators], dim=1)}
class PretrainBondFeaturizer(object):
"""BondFeaturizer in Strategies for Pre-training Graph Neural Networks.
The bond featurization performed in `Strategies for Pre-training Graph Neural Networks
<https://arxiv.org/abs/1905.12265>`__, which considers:
* bond type
* bond direction
Parameters
----------
bond_types : list of Chem.rdchem.BondType or None
Bond types to consider. Default to ``Chem.rdchem.BondType.SINGLE``,
``Chem.rdchem.BondType.DOUBLE``, ``Chem.rdchem.BondType.TRIPLE``,
``Chem.rdchem.BondType.AROMATIC``.
bond_direction_types : list of Chem.rdchem.BondDir or None
Bond directions to consider. Default to ``Chem.rdchem.BondDir.NONE``,
``Chem.rdchem.BondDir.ENDUPRIGHT``, ``Chem.rdchem.BondDir.ENDDOWNRIGHT``.
self_loop : bool
Whether self loops will be added. Default to True.
"""
def __init__(self, bond_types=None, bond_direction_types=None, self_loop=True):
if bond_types is None:
bond_types = [
Chem.rdchem.BondType.SINGLE, Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE, Chem.rdchem.BondType.AROMATIC
]
self._bond_types = bond_types
if bond_direction_types is None:
bond_direction_types = [
Chem.rdchem.BondDir.NONE,
Chem.rdchem.BondDir.ENDUPRIGHT,
Chem.rdchem.BondDir.ENDDOWNRIGHT
]
self._bond_direction_types = bond_direction_types
self._self_loop = self_loop
def __call__(self, mol):
"""Featurizes the input molecule.
Parameters
----------
mol : rdkit.Chem.rdchem.Mol
RDKit molecule instance.
Returns
-------
dict
Mapping 'bond_type' and 'bond_direction_type' separately to an int64
tensor of shape (N, 1), where N is the number of edges.
"""
edge_features = []
num_bonds = mol.GetNumBonds()
if num_bonds == 0:
assert self._self_loop, \
'The molecule has 0 bonds and we should set self._self_loop to True.'
# Compute features for each bond
for i in range(num_bonds):
bond = mol.GetBondWithIdx(i)
bond_feats = [
self._bond_types.index(bond.GetBondType()),
self._bond_direction_types.index(bond.GetBondDir())
]
edge_features.extend([bond_feats, bond_feats.copy()])
if self._self_loop:
self_loop_features = torch.zeros((mol.GetNumAtoms(), 2), dtype=torch.int64)
self_loop_features[:, 0] = len(self._bond_types)
if num_bonds == 0:
edge_features = self_loop_features
else:
edge_features = np.stack(edge_features)
edge_features = torch.as_tensor(edge_features.astype(np.int64))
if self._self_loop:
edge_features = torch.cat([edge_features, self_loop_features], dim=0)
return {'bond_type': edge_features[:, 0], 'bond_direction_type': edge_features[:, 1]}
class AttentiveFPBondFeaturizer(BaseBondFeaturizer):
"""The bond featurizer used in AttentiveFP
AttentiveFP is introduced in
`Pushing the Boundaries of Molecular Representation for Drug Discovery with the Graph
Attention Mechanism. <https://www.ncbi.nlm.nih.gov/pubmed/31408336>`__
The bond features include:
* **One hot encoding of the bond type**. The supported bond types include
``SINGLE``, ``DOUBLE``, ``TRIPLE``, ``AROMATIC``.
* **Whether the bond is conjugated.**.
* **Whether the bond is in a ring of any size.**
* **One hot encoding of the stereo configuration of a bond**. The supported bond stereo
configurations include ``STEREONONE``, ``STEREOANY``, ``STEREOZ``, ``STEREOE``.
**We assume the resulting DGLGraph will be created with :func:`smiles_to_bigraph` without
self loops.**
Parameters
----------
bond_data_field : str
Name for storing bond features in DGLGraphs, default to ``'e'``.
self_loop : bool
Whether self loops will be added. Default to False. If True, it will use an additional
column of binary values to indicate the identity of self loops. The feature of the
self loops will be zero except for the additional column.
See Also
--------
BaseBondFeaturizer
CanonicalBondFeaturizer
WeaveEdgeFeaturizer
PretrainBondFeaturizer
"""
def __init__(self, bond_data_field='e', self_loop=False):
super(AttentiveFPBondFeaturizer, self).__init__(
featurizer_funcs={bond_data_field: ConcatFeaturizer(
[bond_type_one_hot,
bond_is_conjugated,
bond_is_in_ring,
partial(bond_stereo_one_hot, allowable_set=[Chem.rdchem.BondStereo.STEREONONE,
Chem.rdchem.BondStereo.STEREOANY,
Chem.rdchem.BondStereo.STEREOZ,
Chem.rdchem.BondStereo.STEREOE])]
)}, self_loop=self_loop)
| 32.111232
| 100
| 0.620284
|
d2dcc5b325790b9efe72c206e0f5c5a1c7e02336
| 738
|
py
|
Python
|
packages/python/m/cli/commands/git/status.py
|
LaudateCorpus1/m
|
57e258ddb1347f8e29673410e12575d203bb19c8
|
[
"MIT"
] | null | null | null |
packages/python/m/cli/commands/git/status.py
|
LaudateCorpus1/m
|
57e258ddb1347f8e29673410e12575d203bb19c8
|
[
"MIT"
] | null | null | null |
packages/python/m/cli/commands/git/status.py
|
LaudateCorpus1/m
|
57e258ddb1347f8e29673410e12575d203bb19c8
|
[
"MIT"
] | 1
|
2021-12-31T13:25:33.000Z
|
2021-12-31T13:25:33.000Z
|
import inspect
from ...utils import call_main
def add_parser(sub_parser, raw):
desc = """
Display a single word representing the current git status.
$ m git status
clean
Statuses:
unknown
untracked
stash
clean
ahead
behind
staged
dirty
diverged
?
"""
sub_parser.add_parser(
'status',
help='display the current git status',
formatter_class=raw,
description=inspect.cleandoc(desc),
)
def run(_):
# pylint: disable=import-outside-toplevel
from .... import git
return call_main(git.get_status, [], print_raw=True)
| 19.421053
| 66
| 0.533875
|
e5fe29920c5676c9a908ee8c5754fa5bdfc2c9e2
| 91
|
py
|
Python
|
app/controllers/calls_log/__init__.py
|
voxity/vox-ui-api
|
9da442a2ae8e5fec92485cf7dc4adf1a560aa8f5
|
[
"MIT"
] | null | null | null |
app/controllers/calls_log/__init__.py
|
voxity/vox-ui-api
|
9da442a2ae8e5fec92485cf7dc4adf1a560aa8f5
|
[
"MIT"
] | null | null | null |
app/controllers/calls_log/__init__.py
|
voxity/vox-ui-api
|
9da442a2ae8e5fec92485cf7dc4adf1a560aa8f5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .views import CALLS_LOG
| 30.333333
| 38
| 0.758242
|
56edb70e56a9c0bba532c99251bb043cbc1714a3
| 1,971
|
py
|
Python
|
hetest/python/circuit_generation/ibm/ibm_gate_rotate.py
|
y4n9squared/HEtest
|
264459a8fa1480c7b65d946f88d94af1a038fbf1
|
[
"BSD-2-Clause"
] | 6
|
2017-12-05T14:46:47.000Z
|
2021-08-09T07:23:44.000Z
|
hetest/python/circuit_generation/int_circuit/ibm_gate_rotate.py
|
CianLevy/Modified-HEtest
|
4f4422bba48f269e953ca0d3863f774f66ba2247
|
[
"BSD-2-Clause"
] | 1
|
2018-02-22T14:55:07.000Z
|
2018-02-22T14:55:07.000Z
|
hetest/python/circuit_generation/int_circuit/ibm_gate_rotate.py
|
CianLevy/Modified-HEtest
|
4f4422bba48f269e953ca0d3863f774f66ba2247
|
[
"BSD-2-Clause"
] | 2
|
2018-02-19T20:58:58.000Z
|
2021-06-23T05:38:53.000Z
|
# *****************************************************************
# Copyright (c) 2013 Massachusetts Institute of Technology
#
# Developed exclusively at US Government expense under US Air Force contract
# FA8721-05-C-002. The rights of the United States Government to use, modify,
# reproduce, release, perform, display or disclose this computer software and
# computer software documentation in whole or in part, in any manner and for
# any purpose whatsoever, and to have or authorize others to do so, are
# Unrestricted and Unlimited.
#
# Licensed for use under the BSD License as described in the BSD-LICENSE.txt
# file in the root directory of this release.
#
# Project: SPAR
# Authors: SY
# Description: IBM TA2 rotate gate class
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 8 Nov 2012 SY Original Version
# *****************************************************************
import ibm_gate_one_inp_and_const as igoiac
import ibm_batch as ib
class IBMRotateGate(igoiac.IBMGateOneInpAndConst):
"""
This class represents a rotate gate.
"""
def __init__(self, displayname, input1, const, circuit):
"""Initializes the gate."""
if input1.get_batch_size() == 600:
additional_depth = .75
elif input1.get_batch_size() > 600:
# possible batch sizes greater than 600 are 682 and 630.
additional_depth = .25
elif input1.get_batch_size() < 600:
# possible batch sizes smaller than 600 are 256 and 378.
additional_depth = .5
D = input1.get_depth() + additional_depth
igoiac.IBMGateOneInpAndConst.__init__(self, displayname, D, input1,
int(const), circuit)
def get_func_name(self):
"""Returns the name of the function which this gate evaluates."""
return "LROTATE"
| 41.0625
| 77
| 0.599696
|
04e57d04f6aa8bd77488ab0df571d00e62c0107d
| 5,075
|
py
|
Python
|
aws_managers/feature_store/features_metadata.py
|
vahndi/aws-managers
|
bdbfb2b8a9258a53e3ea4dfbbfe5491a34113899
|
[
"MIT"
] | null | null | null |
aws_managers/feature_store/features_metadata.py
|
vahndi/aws-managers
|
bdbfb2b8a9258a53e3ea4dfbbfe5491a34113899
|
[
"MIT"
] | null | null | null |
aws_managers/feature_store/features_metadata.py
|
vahndi/aws-managers
|
bdbfb2b8a9258a53e3ea4dfbbfe5491a34113899
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from typing import Optional, Dict, List
from pandas import DataFrame, read_excel, Series, isnull
from aws_managers.utils.dtype_mappings import FS_NAME_TO_ATHENA_NAME
class FeaturesMetadata(object):
def __init__(self, metadata_fn: Path, dataset_name: str):
"""
Class to rename columns
:param dataset_name: Name of the dataset in the metadata spreadsheet.
"""
# read metadata
self.columns: DataFrame = read_excel(
metadata_fn,
sheet_name=dataset_name, engine='openpyxl'
).set_index('original_name')
attributes = read_excel(
metadata_fn,
sheet_name='attributes', engine='openpyxl'
)
self.attributes: Series = attributes.loc[
attributes['dataset'] == dataset_name
].set_index('attribute_name')['attribute_type']
self._name_mapping: Optional[Dict[str, str]] = None
def check_d_types(self, data: DataFrame):
"""
Check that the data can be converted to the d-types in the metadata.
:param data: The data whose d-types to check.
"""
for old_name, attribute_values in self.columns.iterrows():
print(f'\rChecking d-type for column {old_name}' + ' ' * 256,
end='')
if attribute_values['data_type'] == 'Integral':
_ = data[old_name].dropna().astype(int)
elif attribute_values['data_type'] == 'Fractional':
_ = data[old_name].dropna().astype(float)
elif attribute_values['data_type'] == 'String':
_ = data[old_name].dropna().astype('string')
print('\nAll checks passed.')
@property
def name_mapping(self) -> Dict[str, str]:
"""
Return a dictionary that maps old feature names to new ones.
"""
# return mapping if it already exists
if self._name_mapping is not None:
return self._name_mapping
# build mapping
old_to_new_name = {}
old_name: str
for old_name, attr_values in self.columns.iterrows():
new_name = f"{attr_values['feature']}___{attr_values['metric']}"
for attr_name, attr_type in self.attributes.items():
attribute_value = self.columns.loc[old_name, attr_name]
if isnull(attribute_value):
continue
if attr_type == 'string':
new_name += f'___{attr_name}__{attribute_value}'
elif attr_type == 'bool':
if attribute_value == True:
new_name += f'___{attr_name}'
elif attribute_value == False:
new_name += f'___not_{attr_name}'
else:
raise ValueError(
f'{attr_name} should be equal to True or False '
f'but is {attribute_value}'
)
elif attr_type == 'int_range':
new_name += f'___{attr_name}__{attribute_value}'
else:
raise ValueError(
f'Invalid attribute type for attribute '
f'{attr_name} ({attr_type})'
)
# set mapping
old_to_new_name[old_name] = new_name
# return created mapping
self._name_mapping = old_to_new_name
return self._name_mapping
@property
def old_names(self) -> List[str]:
"""
Return the old names of the dataset, as listed in the metadata.
"""
return self.columns.index.to_list()
@property
def new_names(self) -> List[str]:
"""
Return the old names of the dataset, as listed in the metadata.
"""
mapping = self.name_mapping
return [mapping[old_name] for old_name in self.old_names]
@property
def feature_types(self) -> Dict[str, str]:
"""
Return a dictionary that maps new feature names to their types.
"""
mapping = self.name_mapping
return {
mapping[old_name]: data_type
for old_name, data_type in self.columns['data_type'].items()
}
def athena_schema(self, identifier_name: str, identifier_type: str) -> str:
"""
Return a string of pairs of new column name and Athena data type.
:param identifier_name: Name of the FeatureStore record identifier.
:param identifier_type: Data type of the FeatureStore record identifier.
One of {'String', 'Integral', 'Fractional'}
"""
str_out = (
f'{identifier_name} {FS_NAME_TO_ATHENA_NAME[identifier_type]},\n'
)
mapping = self.name_mapping
str_out += ',\n'.join([
f'{mapping[old_name]} {FS_NAME_TO_ATHENA_NAME[data_type]}'
for old_name, data_type in self.columns['data_type'].items()
]) + '\n'
return str_out
| 38.157895
| 80
| 0.565517
|
2311ce62f850ebd1847c70faf7c44e746ba724eb
| 1,007
|
py
|
Python
|
oidc_example/op2/venvOidc/Scripts/pasteurize-script.py
|
State-xyz/pyoidc
|
cfbe40e43b7acb0004900520d50ede60858208d4
|
[
"Apache-2.0"
] | null | null | null |
oidc_example/op2/venvOidc/Scripts/pasteurize-script.py
|
State-xyz/pyoidc
|
cfbe40e43b7acb0004900520d50ede60858208d4
|
[
"Apache-2.0"
] | null | null | null |
oidc_example/op2/venvOidc/Scripts/pasteurize-script.py
|
State-xyz/pyoidc
|
cfbe40e43b7acb0004900520d50ede60858208d4
|
[
"Apache-2.0"
] | null | null | null |
#!e:\datn\pyoidc\oidc_example\op2\venvoidc\scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.17.1','console_scripts','pasteurize'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'future==0.17.1'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('future==0.17.1', 'console_scripts', 'pasteurize')())
| 29.617647
| 83
| 0.706058
|
ffab59426370f187a27bf66453ae67b0141b5dca
| 508
|
py
|
Python
|
forums/models.py
|
akiyoss-git/MineLearningMirror
|
bf183738f6a95e6717f7b22081628279f9d6f20b
|
[
"MIT"
] | null | null | null |
forums/models.py
|
akiyoss-git/MineLearningMirror
|
bf183738f6a95e6717f7b22081628279f9d6f20b
|
[
"MIT"
] | null | null | null |
forums/models.py
|
akiyoss-git/MineLearningMirror
|
bf183738f6a95e6717f7b22081628279f9d6f20b
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.template.defaultfilters import slugify
class Forum(models.Model):
name = models.CharField(max_length=30, unique=True)
description = models.CharField(max_length=200)
slug = models.SlugField(unique=True)
def __str__(self):
return self.slug
def save(self, *args, **kwargs):
if not self.id:
# Newly created object, so set slug
self.slug = slugify(self.name)
super(Forum, self).save(*args, **kwargs)
| 28.222222
| 55
| 0.663386
|
4d19c81bd51977d38eefd3dacc82b461c2de75ef
| 22,605
|
py
|
Python
|
selfdrive/car/volkswagen/values.py
|
SamuelSandoval/openpilot
|
a337097b5ee515560e9f1a804b997753767d3c9a
|
[
"MIT"
] | 2
|
2021-06-09T13:13:39.000Z
|
2021-09-24T16:12:52.000Z
|
selfdrive/car/volkswagen/values.py
|
SamuelSandoval/openpilot
|
a337097b5ee515560e9f1a804b997753767d3c9a
|
[
"MIT"
] | null | null | null |
selfdrive/car/volkswagen/values.py
|
SamuelSandoval/openpilot
|
a337097b5ee515560e9f1a804b997753767d3c9a
|
[
"MIT"
] | null | null | null |
# flake8: noqa
from selfdrive.car import dbc_dict
from cereal import car
Ecu = car.CarParams.Ecu
class CarControllerParams:
HCA_STEP = 2 # HCA_01 message frequency 50Hz
LDW_STEP = 10 # LDW_02 message frequency 10Hz
GRA_ACC_STEP = 3 # GRA_ACC_01 message frequency 33Hz
GRA_VBP_STEP = 100 # Send ACC virtual button presses once a second
GRA_VBP_COUNT = 16 # Send VBP messages for ~0.5s (GRA_ACC_STEP * 16)
# Observed documented MQB limits: 3.00 Nm max, rate of change 5.00 Nm/sec.
# Limiting rate-of-change based on real-world testing and Comma's safety
# requirements for minimum time to lane departure.
STEER_MAX = 300 # Max heading control assist torque 3.00 Nm
STEER_DELTA_UP = 4 # Max HCA reached in 1.50s (STEER_MAX / (50Hz * 1.50))
STEER_DELTA_DOWN = 10 # Min HCA reached in 0.60s (STEER_MAX / (50Hz * 0.60))
STEER_DRIVER_ALLOWANCE = 80
STEER_DRIVER_MULTIPLIER = 3 # weight driver torque heavily
STEER_DRIVER_FACTOR = 1 # from dbc
class CANBUS:
pt = 0
cam = 2
TransmissionType = car.CarParams.TransmissionType
GearShifter = car.CarState.GearShifter
BUTTON_STATES = {
"accelCruise": False,
"decelCruise": False,
"cancel": False,
"setCruise": False,
"resumeCruise": False,
"gapAdjustCruise": False
}
MQB_LDW_MESSAGES = {
"none": 0, # Nothing to display
"laneAssistUnavailChime": 1, # "Lane Assist currently not available." with chime
"laneAssistUnavailNoSensorChime": 3, # "Lane Assist not available. No sensor view." with chime
"laneAssistTakeOverUrgent": 4, # "Lane Assist: Please Take Over Steering" with urgent beep
"emergencyAssistUrgent": 6, # "Emergency Assist: Please Take Over Steering" with urgent beep
"laneAssistTakeOverChime": 7, # "Lane Assist: Please Take Over Steering" with chime
"laneAssistTakeOverSilent": 8, # "Lane Assist: Please Take Over Steering" silent
"emergencyAssistChangingLanes": 9, # "Emergency Assist: Changing lanes..." with urgent beep
"laneAssistDeactivated": 10, # "Lane Assist deactivated." silent with persistent icon afterward
}
# Check the 7th and 8th characters of the VIN before adding a new CAR. If the
# chassis code is already listed below, don't add a new CAR, just add to the
# FW_VERSIONS for that existing CAR.
class CAR:
GOLF_MK7 = "VOLKSWAGEN GOLF 7TH GEN" # Chassis 5G/AU/BA/BE, Mk7 VW Golf and variants
JETTA_MK7 = "VOLKSWAGEN JETTA 7TH GEN" # Chassis BU, Mk7 Jetta
PASSAT_MK8 = "VOLKSWAGEN PASSAT 8TH GEN" # Chassis 3G, Mk8 Passat and variants
TIGUAN_MK2 = "VOLKSWAGEN TIGUAN 2ND GEN" # Chassis AD/BW, Mk2 VW Tiguan and variants
AUDI_A3_MK3 = "AUDI A3 3RD GEN" # Chassis 8V/FF, Mk3 Audi A3 and variants
SEAT_ATECA_MK1 = "SEAT ATECA 1ST GEN" # Chassis 5F, Mk1 SEAT Ateca and CUPRA Ateca
SKODA_KODIAQ_MK1 = "SKODA KODIAQ 1ST GEN" # Chassis NS, Mk1 Skoda Kodiaq
SKODA_SCALA_MK1 = "SKODA SCALA 1ST GEN" # Chassis NW, Mk1 Skoda Scala and Skoda Kamiq
SKODA_SUPERB_MK3 = "SKODA SUPERB 3RD GEN" # Chassis 3V/NP, Mk3 Skoda Superb and variants
FINGERPRINTS = {
CAR.GOLF_MK7: [{
64: 8, 134: 8, 159: 8, 173: 8, 178: 8, 253: 8, 257: 8, 260: 8, 262: 8, 264: 8, 278: 8, 279: 8, 283: 8, 286: 8, 288: 8, 289: 8, 290: 8, 294: 8, 299: 8, 302: 8, 346: 8, 385: 8, 418: 8, 427: 8, 668: 8, 679: 8, 681: 8, 695: 8, 779: 8, 780: 8, 783: 8, 792: 8, 795: 8, 804: 8, 806: 8, 807: 8, 808: 8, 809: 8, 870: 8, 896: 8, 897: 8, 898: 8, 901: 8, 917: 8, 919: 8, 927: 8, 949: 8, 958: 8, 960: 4, 981: 8, 987: 8, 988: 8, 991: 8, 997: 8, 1000: 8, 1019: 8, 1120: 8, 1122: 8, 1123: 8, 1124: 8, 1153: 8, 1162: 8, 1175: 8, 1312: 8, 1385: 8, 1413: 8, 1440: 5, 1514: 8, 1515: 8, 1520: 8, 1529: 8, 1600: 8, 1601: 8, 1603: 8, 1605: 8, 1624: 8, 1626: 8, 1629: 8, 1631: 8, 1646: 8, 1648: 8, 1712: 6, 1714: 8, 1716: 8, 1717: 8, 1719: 8, 1720: 8, 1721: 8
}],
CAR.JETTA_MK7: [{
64: 8, 134: 8, 159: 8, 173: 8, 178: 8, 253: 8, 257: 8, 260: 8, 262: 8, 264: 8, 278: 8, 279: 8, 283: 8, 286: 8, 288: 8, 289: 8, 290: 8, 294: 8, 299: 8, 302: 8, 346: 8, 376: 8, 418: 8, 427: 8, 679: 8, 681: 8, 695: 8, 779: 8, 780: 8, 783: 8, 792: 8, 795: 8, 804: 8, 806: 8, 807: 8, 808: 8, 809: 8, 828: 8, 870: 8, 879: 8, 884: 8, 888: 8, 891: 8, 901: 8, 913: 8, 919: 8, 949: 8, 958: 8, 960: 4, 981: 8, 987: 8, 988: 8, 991: 8, 997: 8, 1000: 8, 1019: 8, 1122: 8, 1123: 8, 1124: 8, 1153: 8, 1156: 8, 1157: 8, 1158: 8, 1162: 8, 1312: 8, 1343: 8, 1385: 8, 1413: 8, 1440: 5, 1471: 4, 1514: 8, 1515: 8, 1520: 8, 1600: 8, 1601: 8, 1603: 8, 1605: 8, 1624: 8, 1626: 8, 1629: 8, 1631: 8, 1635: 8, 1646: 8, 1648: 8, 1712: 6, 1714: 8, 1716: 8, 1717: 8, 1719: 8, 1720: 8
}],
CAR.PASSAT_MK8: [{
64: 8, 134: 8, 159: 8, 173: 8, 178: 8, 253: 8, 257: 8, 260: 8, 262: 8, 264: 8, 278: 8, 279: 8, 283: 8, 286: 8, 288: 8, 289: 8, 290: 8, 294: 8, 295: 8, 299: 8, 302: 8, 346: 8, 385: 8, 391: 8, 427: 8, 668: 8, 679: 8, 681: 8, 695: 8, 779: 8, 780: 8, 783: 8, 787: 8, 788: 8, 789: 8, 791: 8, 792: 8, 799: 8, 802: 8, 804: 8, 806: 8, 807: 8, 808: 8, 809: 8, 828: 8, 838: 8, 839: 8, 840: 8, 841: 8, 842: 8, 843: 8, 844: 8, 845: 8, 870: 8, 896: 8, 897: 8, 898: 8, 901: 8, 917: 8, 919: 8, 927: 8, 949: 8, 958: 8, 960: 4, 981: 8, 987: 8, 988: 8, 991: 8, 997: 8, 1000: 8, 1019: 8, 1120: 8, 1122: 8, 1123: 8, 1124: 8, 1153: 8, 1162: 8, 1175: 8, 1312: 8, 1385: 8, 1413: 8, 1438: 8, 1440: 5, 1461: 8, 1514: 8, 1515: 8, 1520: 8, 1529: 8, 1600: 8, 1601: 8, 1603: 8, 1624: 8, 1629: 8, 1631: 8, 1646: 8, 1648: 8, 1712: 6, 1714: 8, 1716: 8, 1717: 8, 1719: 8, 1720: 8, 1721: 8
}],
CAR.TIGUAN_MK2: [{
64: 8, 134: 8, 159: 8, 173: 8, 178: 8, 253: 8, 257: 8, 260: 8, 262: 8, 278: 8, 279: 8, 283: 8, 286: 8, 288: 8, 289: 8, 290: 8, 294: 8, 299: 8, 302: 8, 346: 8, 376: 8, 418: 8, 427: 8, 573: 8, 679: 8, 681: 8, 684: 8, 695: 8, 779: 8, 780: 8, 783: 8, 787: 8, 788: 8, 789: 8, 792: 8, 795: 8, 804: 8, 806: 8, 807: 8, 808: 8, 809: 8, 828: 8, 870: 8, 879: 8, 884: 8, 888: 8, 891: 8, 896: 8, 897: 8, 898: 8, 901: 8, 913: 8, 917: 8, 919: 8, 949: 8, 958: 8, 960: 4, 981: 8, 987: 8, 988: 8, 991: 8, 997: 8, 1000: 8, 1019: 8, 1122: 8, 1123: 8, 1124: 8, 1153: 8, 1156: 8, 1157: 8, 1158: 8, 1162: 8, 1175: 8, 1312: 8, 1343: 8, 1385: 8, 1413: 8, 1440: 5, 1471: 4, 1514: 8, 1515: 8, 1520: 8, 1600: 8, 1601: 8, 1603: 8, 1605: 8, 1624: 8, 1626: 8, 1629: 8, 1631: 8, 1635: 8, 1646: 8, 1648: 8, 1712: 6, 1714: 8, 1716: 8, 1717: 8, 1719: 8, 1720: 8, 1721: 8
}],
CAR.AUDI_A3_MK3: [{
64: 8, 134: 8, 159: 8, 173: 8, 178: 8, 253: 8, 257: 8, 260: 8, 262: 8, 278: 8, 279: 8, 283: 8, 285: 8, 286: 8, 288: 8, 289: 8, 290: 8, 294: 8, 295: 8, 299: 8, 302: 8, 346: 8, 418: 8, 427: 8, 506: 8, 679: 8, 681: 8, 695: 8, 779: 8, 780: 8, 783: 8, 787: 8, 788: 8, 789: 8, 792: 8, 802: 8, 804: 8, 806: 8, 807: 8, 808: 8, 809: 8, 846: 8, 847: 8, 870: 8, 896: 8, 897: 8, 898: 8, 901: 8, 917: 8, 919: 8, 949: 8, 958: 8, 960: 4, 981: 8, 987: 8, 988: 8, 991: 8, 997: 8, 1000: 8, 1019: 8, 1122: 8, 1123: 8, 1124: 8, 1153: 8, 1162: 8, 1175: 8, 1312: 8, 1385: 8, 1413: 8, 1440: 5, 1514: 8, 1515: 8, 1520: 8, 1600: 8, 1601: 8, 1603: 8, 1624: 8, 1629: 8, 1631: 8, 1646: 8, 1648: 8, 1712: 6, 1714: 8, 1716: 8, 1717: 8, 1719: 8, 1720: 8, 1721: 8, 1792: 8, 1872: 8, 1976: 8, 1977: 8, 1982: 8, 1985: 8
}],
CAR.SEAT_ATECA_MK1: [{
64: 8, 134: 8, 159: 8, 173: 8, 178: 8, 253: 8, 257: 8, 260: 8, 262: 8, 278: 8, 279: 8, 283: 8, 286: 8, 288: 8, 289: 8, 290: 8, 294: 8, 299: 8, 302: 8, 346: 8, 385: 8, 418: 8, 427: 8, 668: 8, 679: 8, 681: 8, 684: 8, 779: 8, 780: 8, 792: 8, 795: 8, 804: 8, 806: 8, 807: 8, 808: 8, 809: 8, 870: 8, 901: 8, 917: 8, 919: 8, 927: 8, 949: 8, 958: 8, 960: 4, 981: 8, 987: 8, 988: 8, 991: 8, 997: 8, 1000: 8, 1019: 8, 1120: 8, 1122: 8, 1123: 8, 1124: 8, 1153: 8, 1162: 8, 1175: 8, 1312: 8, 1385: 8, 1413: 8, 1440: 5, 1514: 8, 1515: 8, 1520: 8, 1600: 8, 1601: 8, 1603: 8, 1605: 8, 1624: 8, 1626: 8, 1629: 8, 1631: 8, 1646: 8, 1648: 8, 1712: 6, 1714: 8, 1716: 8, 1717: 8, 1719: 8, 1720: 8, 1721: 8
}],
CAR.SKODA_KODIAQ_MK1: [{
64: 8, 134: 8, 159: 8, 173: 8, 178: 8, 253: 8, 257: 8, 260: 8, 262: 8, 278: 8, 279: 8, 283: 8, 286: 8, 288: 8, 289: 8, 290: 8, 294: 8, 299: 8, 302: 8, 346: 8, 385: 8, 418: 8, 427: 8, 573: 8, 668: 8, 679: 8, 681: 8, 684: 8, 695: 8, 779: 8, 780: 8, 783: 8, 787: 8, 788: 8, 789: 8, 792: 8, 795: 8, 802: 8, 804: 8, 806: 8, 807: 8, 808: 8, 809: 8, 828: 8, 870: 8, 896: 8, 897: 8, 898: 8, 901: 8, 917: 8, 919: 8, 949: 8, 958: 8, 960: 4, 981: 8, 987: 8, 988: 8, 991: 8, 997: 8, 1000: 8, 1019: 8, 1120: 8, 1153: 8, 1162: 8, 1175: 8, 1312: 8, 1385: 8, 1413: 8, 1440: 5, 1514: 8, 1515: 8, 1520: 8, 1529: 8, 1600: 8, 1601: 8, 1603: 8, 1605: 8, 1624: 8, 1626: 8, 1629: 8, 1631: 8, 1646: 8, 1648: 8, 1712: 6, 1714: 8, 1716: 8, 1717: 8, 1719: 8, 1720: 8, 1721: 8, 1792: 8, 1871: 8, 1872: 8, 1879: 8, 1909: 8, 1976: 8, 1977: 8, 1985: 8
}],
CAR.SKODA_SCALA_MK1: [{
64: 8, 134: 8, 159: 8, 173: 8, 178: 8, 253: 8, 257: 8, 262: 8, 278: 8, 279: 8, 283: 8, 286: 8, 288: 8, 289: 8, 290: 8, 294: 8, 299: 8, 302: 8, 346: 8, 418: 8, 427: 8, 506: 8, 568: 8, 569: 8, 572: 8, 573: 8, 679: 8, 681: 8, 684: 8, 695: 8, 779: 8, 780: 8, 783: 8, 787: 8, 788: 8, 789: 8, 792: 8, 795: 8, 804: 8, 806: 8, 807: 8, 808: 8, 809: 8, 826: 8, 827: 8, 828: 8, 870: 8, 879: 8, 884: 8, 888: 8, 891: 8, 901: 8, 913: 8, 917: 8, 919: 8, 949: 8, 958: 8, 960: 4, 981: 8, 987: 8, 988: 8, 991: 8, 997: 8, 1000: 8, 1019: 8, 1122: 8, 1123: 8, 1124: 8, 1153: 8, 1156: 8, 1157: 8, 1158: 8, 1162: 8, 1175: 8, 1312: 8, 1343: 8, 1385: 8, 1413: 8, 1440: 5, 1514: 8, 1515: 8, 1520: 8, 1600: 8, 1601: 8, 1603: 8, 1605: 8, 1624: 8, 1626: 8, 1629: 8, 1631: 8, 1635: 8, 1646: 8, 1648: 8, 1712: 6, 1714: 8, 1716: 8, 1717: 8, 1719: 8, 1720: 8, 1721: 8, 1792: 8, 1872: 8, 1879: 8, 1976: 8, 1977: 8, 1982: 8, 1985: 8
}],
CAR.SKODA_SUPERB_MK3: [{
64: 8, 134: 8, 159: 8, 178: 8, 253: 8, 257: 8, 260: 8, 262: 8, 278: 8, 279: 8, 283: 8, 286: 8, 288: 8, 289: 8, 290: 8, 294: 8, 295: 8, 299: 8, 302: 8, 346: 8, 418: 8, 427: 8, 679: 8, 681: 8, 695: 8, 779: 8, 780: 8, 783: 8, 791: 8, 792: 8, 795: 8, 799: 8, 804: 8, 806: 8, 807: 8, 808: 8, 809: 8, 838: 8, 839: 8, 840: 8, 841: 8, 842: 8, 843: 8, 844: 8, 845: 8, 870: 8, 896: 8, 897: 8, 898: 8, 901: 8, 917: 8, 919: 8, 949: 8, 958: 8, 960: 4, 981: 8, 987: 8, 988: 8, 991: 8, 997: 8, 1000: 8, 1019: 8, 1153: 8, 1162: 8, 1175: 8, 1312: 8, 1385: 8, 1413: 8, 1440: 5, 1514: 8, 1515: 8, 1520: 8, 1600: 8, 1601: 8, 1603: 8, 1624: 8, 1626: 8, 1629: 8, 1631: 8, 1646: 8, 1648: 8, 1712: 6, 1714: 8, 1716: 8, 1717: 8, 1719: 8, 1720: 8, 1792: 8, 1872: 8, 1879: 8, 1976: 8, 1977: 8, 1985: 8, 2017: 8
}],
}
# All VW should be here
IGNORED_FINGERPRINTS = [CAR.JETTA_MK7, CAR.PASSAT_MK8, CAR.TIGUAN_MK2, CAR.AUDI_A3_MK3,
CAR.SEAT_ATECA_MK1, CAR.SKODA_KODIAQ_MK1, CAR.SKODA_SCALA_MK1,
CAR.SKODA_SUPERB_MK3, CAR.GOLF_MK7]
FW_VERSIONS = {
CAR.GOLF_MK7: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704E906016A \xf1\x897697',
b'\xf1\x8704E906016AD\xf1\x895758',
b'\xf1\x8704E906023AG\xf1\x891726',
b'\xf1\x8704E906023BN\xf1\x894518',
b'\xf1\x8704E906027GR\xf1\x892394',
b'\xf1\x8704L906026NF\xf1\x899528',
b'\xf1\x8704L906056CR\xf1\x895813',
b'\xf1\x8704L906056HE\xf1\x893758',
b'\xf1\x870EA906016A \xf1\x898343',
b'\xf1\x870EA906016F \xf1\x895002',
b'\xf1\x870EA906016S \xf1\x897207',
b'\xf1\x875G0906259 \xf1\x890007',
b'\xf1\x875G0906259J \xf1\x890002',
b'\xf1\x875G0906259L \xf1\x890002',
b'\xf1\x875G0906259Q \xf1\x890002',
b'\xf1\x875G0906259Q \xf1\x892313',
b'\xf1\x878V0906259J \xf1\x890003',
b'\xf1\x878V0906259P \xf1\x890001',
b'\xf1\x878V0906259Q \xf1\x890002',
b'\xf1\x878V0906264F \xf1\x890003',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x8709G927749AP\xf1\x892943',
b'\xf1\x870CW300042F \xf1\x891604',
b'\xf1\x870CW300045 \xf1\x894531',
b'\xf1\x870CW300047D \xf1\x895261',
b'\xf1\x870CW300048J \xf1\x890611',
b'\xf1\x870D9300012 \xf1\x894913',
b'\xf1\x870D9300020S \xf1\x895201',
b'\xf1\x870D9300040S \xf1\x894311',
b'\xf1\x870DD300045K \xf1\x891120',
b'\xf1\x870DD300046F \xf1\x891601',
b'\xf1\x870GC300012A \xf1\x891403',
b'\xf1\x870GC300014B \xf1\x892401',
b'\xf1\x870GC300014B \xf1\x892405',
b'\xf1\x870GC300020G \xf1\x892403',
b'\xf1\x870GC300020G \xf1\x892404',
b'\xf1\x870GC300043T \xf1\x899999',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x875Q0959655AA\xf1\x890386\xf1\x82\0211413001113120043114317121C111C9113',
b'\xf1\x875Q0959655AA\xf1\x890386\xf1\x82\0211413001113120053114317121C111C9113',
b'\xf1\x875Q0959655AA\xf1\x890388\xf1\x82\0211413001113120043114317121C111C9113',
b'\xf1\x875Q0959655AA\xf1\x890388\xf1\x82\0211413001113120043114417121411149113',
b'\xf1\x875Q0959655AA\xf1\x890388\xf1\x82\0211413001113120053114317121C111C9113',
b'\xf1\x875Q0959655BH\xf1\x890336\xf1\x82\02314160011123300314211012230229333463100',
b'\xf1\x875Q0959655BT\xf1\x890403\xf1\x82\023141600111233003142405A2252229333463100',
b'\xf1\x875Q0959655J \xf1\x890830\xf1\x82\023271212111312--071104171838103891131211',
b'\xf1\x875Q0959655J \xf1\x890830\xf1\x82\023341512112212--071104172328102891131211',
b'\xf1\x875Q0959655J \xf1\x890830\xf1\x82\x13272512111312--07110417182C102C91131211',
b'\xf1\x875Q0959655M \xf1\x890361\xf1\x82\0211413001112120041114115121611169112',
b'\xf1\x875Q0959655S \xf1\x890870\xf1\x82\02315120011211200621143171717111791132111',
b'\xf1\x875Q0959655S \xf1\x890870\xf1\x82\02324230011211200061104171724102491132111',
b'\xf1\x875Q0959655S \xf1\x890870\xf1\x82\02324230011211200621143171724112491132111',
b'\xf1\x875Q0959655S \xf1\x890870\xf1\x82\x1315120011211200061104171717101791132111',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x873Q0909144F \xf1\x895043\xf1\x82\00561A01612A0',
b'\xf1\x873Q0909144H \xf1\x895061\xf1\x82\00566A0J612A1',
b'\xf1\x873Q0909144J \xf1\x895063\xf1\x82\00566A00514A1',
b'\xf1\x873Q0909144K \xf1\x895072\xf1\x82\00571A0J714A1',
b'\xf1\x873Q0909144L \xf1\x895081\xf1\x82\x0571A0JA15A1',
b'\xf1\x873Q0909144M \xf1\x895082\xf1\x82\00571A0JA16A1',
b'\xf1\x875Q0909143K \xf1\x892033\xf1\x820519A9040203',
b'\xf1\x875Q0909144AA\xf1\x891081\xf1\x82\00521A00441A1',
b'\xf1\x875Q0909144AA\xf1\x891081\xf1\x82\x0521A00641A1',
b'\xf1\x875Q0909144AB\xf1\x891082\xf1\x82\00521A00642A1',
b'\xf1\x875Q0909144AB\xf1\x891082\xf1\x82\00521A07B05A1',
b'\xf1\x875Q0909144L \xf1\x891021\xf1\x82\00522A00402A0',
b'\xf1\x875Q0909144P \xf1\x891043\xf1\x82\00511A00403A0',
b'\xf1\x875Q0909144R \xf1\x891061\xf1\x82\00516A00604A1',
b'\xf1\x875Q0909144S \xf1\x891063\xf1\x82\00516A07A02A1',
b'\xf1\x875QN909144A \xf1\x895081\xf1\x82\00571A01A18A1',
b'\xf1\x875QN909144A \xf1\x895081\xf1\x82\x0571A01A17A1',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x875Q0907572A \xf1\x890141\xf1\x82\00101',
b'\xf1\x875Q0907572B \xf1\x890200\xf1\x82\00101',
b'\xf1\x875Q0907572C \xf1\x890210\xf1\x82\00101',
b'\xf1\x875Q0907572D \xf1\x890304\xf1\x82\00101',
b'\xf1\x875Q0907572F \xf1\x890400\xf1\x82\00101',
b'\xf1\x875Q0907572H \xf1\x890620',
b'\xf1\x875Q0907572J \xf1\x890654',
b'\xf1\x875Q0907572P \xf1\x890682',
],
},
CAR.JETTA_MK7: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704E906024AK\xf1\x899937',
b'\xf1\x8704E906024B \xf1\x895594',
b'\xf1\x8704E906024L \xf1\x895595',
b'\xf1\x875G0906259T \xf1\x890003',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x8709S927158R \xf1\x893552',
b'\xf1\x8709S927158R \xf1\x893587',
b'\xf1\x870GC300020N \xf1\x892803',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x875Q0959655AG\xf1\x890336\xf1\x82\02314171231313500314611011630169333463100',
b'\xf1\x875Q0959655BM\xf1\x890403\xf1\x82\02314171231313500314643011650169333463100',
b'\xf1\x875Q0959655BR\xf1\x890403\xf1\x82\02311170031313300314240011150119333433100',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x875QM909144B \xf1\x891081\xf1\x82\00521A10A01A1',
b'\xf1\x875QM909144B \xf1\x891081\xf1\x82\x0521B00404A1',
b'\xf1\x875QM909144C \xf1\x891082\xf1\x82\00521A10A01A1',
b'\xf1\x875QN909144B \xf1\x895082\xf1\x82\00571A10A11A1',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x875Q0907572N \xf1\x890681',
b'\xf1\x875Q0907572R \xf1\x890771',
],
},
CAR.PASSAT_MK8: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704E906023AH\xf1\x893379',
b'\xf1\x8704L906026GA\xf1\x892013',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x870D9300014L \xf1\x895002',
b'\xf1\x870DD300045T \xf1\x891601',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x873Q0959655AN\xf1\x890306\xf1\x82\r58160058140013036914110311',
b'\xf1\x875Q0959655S \xf1\x890870\xf1\x82\02315120011111200631145171716121691132111',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x875Q0909143M \xf1\x892041\xf1\x820522B0080803',
b'\xf1\x875Q0909144T \xf1\x891072\xf1\x82\00521B00703A1',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x873Q0907572C \xf1\x890195',
b'\xf1\x875Q0907572R \xf1\x890771',
],
},
CAR.TIGUAN_MK2: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8783A907115B \xf1\x890005',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x8709G927158DT\xf1\x893698',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x875Q0959655BM\xf1\x890403\xf1\x82\02316143231313500314641011750179333423100',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x875QM909144C \xf1\x891082\xf1\x82\00521A60804A1',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x872Q0907572R \xf1\x890372',
],
},
CAR.AUDI_A3_MK3: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704E906023AN\xf1\x893695',
b'\xf1\x8704E906023AR\xf1\x893440',
b'\xf1\x8704E906023BL\xf1\x895190',
b'\xf1\x8704L997022N \xf1\x899459',
b'\xf1\x875G0906259L \xf1\x890002',
b'\xf1\x878V0906264B \xf1\x890003',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x870CW300048 \xf1\x895201',
b'\xf1\x870D9300013B \xf1\x894931',
b'\xf1\x870D9300041N \xf1\x894512',
b'\xf1\x870DD300046A \xf1\x891602',
b'\xf1\x870DD300046F \xf1\x891602',
b'\xf1\x870DD300046G \xf1\x891601',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x875Q0959655AM\xf1\x890315\xf1\x82\x1311111111111111311411011231129321212100',
b'\xf1\x875Q0959655J \xf1\x890825\xf1\x82\023111112111111--171115141112221291163221',
b'\xf1\x875Q0959655J \xf1\x890830\xf1\x82\023121111111211--261117141112231291163221',
b'\xf1\x875Q0959655J \xf1\x890830\xf1\x82\x13121111111111--341117141212231291163221',
b'\xf1\x875Q0959655N \xf1\x890361\xf1\x82\0211212001112111104110411111521159114',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x875Q0909144P \xf1\x891043\xf1\x82\00503G00803A0',
b'\xf1\x875Q0909144R \xf1\x891061\xf1\x82\00516G00804A1',
b'\xf1\x875Q0909144T \xf1\x891072\xf1\x82\00521G00807A1',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x875Q0907572D \xf1\x890304\xf1\x82\00101',
b'\xf1\x875Q0907572G \xf1\x890571',
],
},
CAR.SEAT_ATECA_MK1: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704E906027KA\xf1\x893749',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x870D9300014S \xf1\x895202',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x873Q0959655BH\xf1\x890703\xf1\x82\0161212001211001305121211052900',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x873Q0909144L \xf1\x895081\xf1\x82\00571N60511A1',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x872Q0907572M \xf1\x890233',
],
},
CAR.SKODA_KODIAQ_MK1: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704E906027DD\xf1\x893123',
b'\xf1\x875NA907115E \xf1\x890003',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x870D9300043 \xf1\x895202',
b'\xf1\x870DL300012M \xf1\x892107',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x873Q0959655BJ\xf1\x890703\xf1\x82\0161213001211001205212111052100',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x875Q0909143P \xf1\x892051\xf1\x820527T6050405',
b'\xf1\x875Q0909143P \xf1\x892051\xf1\x820527T6060405',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x872Q0907572R \xf1\x890372',
],
},
CAR.SKODA_SCALA_MK1: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704C906025AK\xf1\x897053',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x870CW300050 \xf1\x891709',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x872Q0959655AM\xf1\x890351\xf1\x82\022111104111104112104040404111111112H14',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x872Q1909144M \xf1\x896041',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x872Q0907572R \xf1\x890372',
],
},
CAR.SKODA_SUPERB_MK3: {
(Ecu.engine, 0x7e0, None): [
b'\xf1\x8704L906026KB\xf1\x894071',
b'\xf1\x873G0906259B \xf1\x890002',
b'\xf1\x8704L906026FP\xf1\x891196',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x870D9300012 \xf1\x894940',
b'\xf1\x870D9300011T \xf1\x894801',
],
(Ecu.srs, 0x715, None): [
b'\xf1\x875Q0959655AE\xf1\x890130\xf1\x82\022111200111121001121118112231292221111',
b'\xf1\x875Q0959655BH\xf1\x890336\xf1\x82\02331310031313100313131013141319331413100',
b'\xf1\x875Q0959655AK\xf1\x890130\xf1\x82\022111200111121001121110012211292221111',
],
(Ecu.eps, 0x712, None): [
b'\xf1\x875Q0909143M \xf1\x892041\xf1\x820522UZ070303',
b'\xf1\x875Q0910143B \xf1\x892201\xf1\x82\00563UZ060700',
b'\xf1\x875Q0909143K \xf1\x892033\xf1\x820514UZ070203',
],
(Ecu.fwdRadar, 0x757, None): [
b'\xf1\x873Q0907572B \xf1\x890194',
b'\xf1\x873Q0907572C \xf1\x890195',
b'\xf1\x873Q0907572B \xf1\x890192',
],
},
}
DBC = {
CAR.GOLF_MK7: dbc_dict('vw_mqb_2010', None),
CAR.JETTA_MK7: dbc_dict('vw_mqb_2010', None),
CAR.PASSAT_MK8: dbc_dict('vw_mqb_2010', None),
CAR.TIGUAN_MK2: dbc_dict('vw_mqb_2010', None),
CAR.AUDI_A3_MK3: dbc_dict('vw_mqb_2010', None),
CAR.SEAT_ATECA_MK1: dbc_dict('vw_mqb_2010', None),
CAR.SKODA_KODIAQ_MK1: dbc_dict('vw_mqb_2010', None),
CAR.SKODA_SCALA_MK1: dbc_dict('vw_mqb_2010', None),
CAR.SKODA_SUPERB_MK3: dbc_dict('vw_mqb_2010', None),
}
| 58.562176
| 901
| 0.623446
|
2f7802a4a5bff0a74bd38886f3d9f38d640fe2eb
| 9,435
|
py
|
Python
|
hfc/protos/peer/resources_pb2.py
|
roviso/hyberledger-py
|
908dd597e0822f99cf618f235dd517824ba44bc4
|
[
"Apache-2.0"
] | 389
|
2016-09-18T11:50:10.000Z
|
2022-03-29T21:45:40.000Z
|
hfc/protos/peer/resources_pb2.py
|
roviso/hyberledger-py
|
908dd597e0822f99cf618f235dd517824ba44bc4
|
[
"Apache-2.0"
] | 112
|
2017-08-18T00:32:21.000Z
|
2022-02-25T18:55:57.000Z
|
hfc/protos/peer/resources_pb2.py
|
roviso/hyberledger-py
|
908dd597e0822f99cf618f235dd517824ba44bc4
|
[
"Apache-2.0"
] | 268
|
2016-10-12T02:56:58.000Z
|
2022-03-30T09:50:54.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: hfc/protos/peer/resources.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from hfc.protos.common import configtx_pb2 as hfc_dot_protos_dot_common_dot_configtx__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='hfc/protos/peer/resources.proto',
package='protos',
syntax='proto3',
serialized_options=b'\n\"org.hyperledger.fabric.protos.peerZ,github.com/hyperledger/fabric-protos-go/peer',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1fhfc/protos/peer/resources.proto\x12\x06protos\x1a hfc/protos/common/configtx.proto\"4\n\x13\x43haincodeIdentifier\x12\x0c\n\x04hash\x18\x01 \x01(\x0c\x12\x0f\n\x07version\x18\x02 \x01(\t\"5\n\x13\x43haincodeValidation\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08\x61rgument\x18\x02 \x01(\x0c\"*\n\x08VSCCArgs\x12\x1e\n\x16\x65ndorsement_policy_ref\x18\x01 \x01(\t\"$\n\x14\x43haincodeEndorsement\x12\x0c\n\x04name\x18\x01 \x01(\t\"^\n\nConfigTree\x12&\n\x0e\x63hannel_config\x18\x01 \x01(\x0b\x32\x0e.common.Config\x12(\n\x10resources_config\x18\x02 \x01(\x0b\x32\x0e.common.ConfigBR\n\"org.hyperledger.fabric.protos.peerZ,github.com/hyperledger/fabric-protos-go/peerb\x06proto3'
,
dependencies=[hfc_dot_protos_dot_common_dot_configtx__pb2.DESCRIPTOR,])
_CHAINCODEIDENTIFIER = _descriptor.Descriptor(
name='ChaincodeIdentifier',
full_name='protos.ChaincodeIdentifier',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='hash', full_name='protos.ChaincodeIdentifier.hash', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version', full_name='protos.ChaincodeIdentifier.version', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=77,
serialized_end=129,
)
_CHAINCODEVALIDATION = _descriptor.Descriptor(
name='ChaincodeValidation',
full_name='protos.ChaincodeValidation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='protos.ChaincodeValidation.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='argument', full_name='protos.ChaincodeValidation.argument', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=131,
serialized_end=184,
)
_VSCCARGS = _descriptor.Descriptor(
name='VSCCArgs',
full_name='protos.VSCCArgs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='endorsement_policy_ref', full_name='protos.VSCCArgs.endorsement_policy_ref', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=186,
serialized_end=228,
)
_CHAINCODEENDORSEMENT = _descriptor.Descriptor(
name='ChaincodeEndorsement',
full_name='protos.ChaincodeEndorsement',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='protos.ChaincodeEndorsement.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=230,
serialized_end=266,
)
_CONFIGTREE = _descriptor.Descriptor(
name='ConfigTree',
full_name='protos.ConfigTree',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='channel_config', full_name='protos.ConfigTree.channel_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='resources_config', full_name='protos.ConfigTree.resources_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=268,
serialized_end=362,
)
_CONFIGTREE.fields_by_name['channel_config'].message_type = hfc_dot_protos_dot_common_dot_configtx__pb2._CONFIG
_CONFIGTREE.fields_by_name['resources_config'].message_type = hfc_dot_protos_dot_common_dot_configtx__pb2._CONFIG
DESCRIPTOR.message_types_by_name['ChaincodeIdentifier'] = _CHAINCODEIDENTIFIER
DESCRIPTOR.message_types_by_name['ChaincodeValidation'] = _CHAINCODEVALIDATION
DESCRIPTOR.message_types_by_name['VSCCArgs'] = _VSCCARGS
DESCRIPTOR.message_types_by_name['ChaincodeEndorsement'] = _CHAINCODEENDORSEMENT
DESCRIPTOR.message_types_by_name['ConfigTree'] = _CONFIGTREE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ChaincodeIdentifier = _reflection.GeneratedProtocolMessageType('ChaincodeIdentifier', (_message.Message,), {
'DESCRIPTOR' : _CHAINCODEIDENTIFIER,
'__module__' : 'hfc.protos.peer.resources_pb2'
# @@protoc_insertion_point(class_scope:protos.ChaincodeIdentifier)
})
_sym_db.RegisterMessage(ChaincodeIdentifier)
ChaincodeValidation = _reflection.GeneratedProtocolMessageType('ChaincodeValidation', (_message.Message,), {
'DESCRIPTOR' : _CHAINCODEVALIDATION,
'__module__' : 'hfc.protos.peer.resources_pb2'
# @@protoc_insertion_point(class_scope:protos.ChaincodeValidation)
})
_sym_db.RegisterMessage(ChaincodeValidation)
VSCCArgs = _reflection.GeneratedProtocolMessageType('VSCCArgs', (_message.Message,), {
'DESCRIPTOR' : _VSCCARGS,
'__module__' : 'hfc.protos.peer.resources_pb2'
# @@protoc_insertion_point(class_scope:protos.VSCCArgs)
})
_sym_db.RegisterMessage(VSCCArgs)
ChaincodeEndorsement = _reflection.GeneratedProtocolMessageType('ChaincodeEndorsement', (_message.Message,), {
'DESCRIPTOR' : _CHAINCODEENDORSEMENT,
'__module__' : 'hfc.protos.peer.resources_pb2'
# @@protoc_insertion_point(class_scope:protos.ChaincodeEndorsement)
})
_sym_db.RegisterMessage(ChaincodeEndorsement)
ConfigTree = _reflection.GeneratedProtocolMessageType('ConfigTree', (_message.Message,), {
'DESCRIPTOR' : _CONFIGTREE,
'__module__' : 'hfc.protos.peer.resources_pb2'
# @@protoc_insertion_point(class_scope:protos.ConfigTree)
})
_sym_db.RegisterMessage(ConfigTree)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 36.712062
| 706
| 0.765872
|
eac623bb90e55bf60b5ee3411d60a6c67113b384
| 619
|
py
|
Python
|
tests/test_global_phot.py
|
temuller/cosmo_phot
|
011333f84486614cb9339d3874dc072c45ebed23
|
[
"MIT"
] | null | null | null |
tests/test_global_phot.py
|
temuller/cosmo_phot
|
011333f84486614cb9339d3874dc072c45ebed23
|
[
"MIT"
] | null | null | null |
tests/test_global_phot.py
|
temuller/cosmo_phot
|
011333f84486614cb9339d3874dc072c45ebed23
|
[
"MIT"
] | null | null | null |
import unittest
from hostphot.cutouts import download_images
import hostphot.global_photometry as gp
class TestHostPhot(unittest.TestCase):
def test_global_phot(self):
sn_name = 'SN2004eo'
host_ra = 308.2092
host_dec = 9.92755
z = 0.0157
survey = 'PS1'
download_images(sn_name, host_ra, host_dec, survey=survey)
gp.multi_band_phot(sn_name, host_ra, host_dec, survey=survey,
use_mask=False, common_aperture=False,
optimze_kronrad=True, save_plots=True)
if __name__ == '__main__':
unittest.main()
| 29.47619
| 69
| 0.644588
|
2b1341c2a2ab4e7f443b959e8416a9ebda35a897
| 2,325
|
py
|
Python
|
line_search/goldstein.py
|
Hintonthu/opt_methods
|
e711ca708479c6fc99b7cad8fa2a078bd8d48cfd
|
[
"MIT"
] | 1
|
2020-07-17T08:46:18.000Z
|
2020-07-17T08:46:18.000Z
|
line_search/goldstein.py
|
Hintonthu/opt_methods
|
e711ca708479c6fc99b7cad8fa2a078bd8d48cfd
|
[
"MIT"
] | null | null | null |
line_search/goldstein.py
|
Hintonthu/opt_methods
|
e711ca708479c6fc99b7cad8fa2a078bd8d48cfd
|
[
"MIT"
] | null | null | null |
import copy
from .line_search import LineSearch
class Armijo(LineSearch):
"""
Armijo line search with optional resetting of the initial stepsize
of each iteration. If resetting is used, the previous value multiplied
by 1/backtracking is used as the first stepsize to try at this iteration.
Otherwise, it starts with the maximal stepsize.
Arguments:
armijo_const (float, optional): proportionality constant for the armijo constant (default: 0.5)
start_with_prev_lr (boolean, optional): sets the reset option from (default: True)
backtracking (float, optional): constant to multiply the estimate stepsize with (default: 0.5)
"""
def __init__(self, armijo_const=0.5, start_with_prev_lr=True, backtracking=0.5, *args, **kwargs):
super(Armijo, self).__init__(*args, **kwargs)
self.armijo_const = armijo_const
self.start_with_prev_lr = start_with_prev_lr
self.backtracking = backtracking
self.x_prev = None
self.val_prev = None
def condition(self, gradient, x, x_new):
new_value = self.loss.value(x_new)
self.x_prev = copy.deepcopy(x_new)
self.val_prev = new_value
descent = self.armijo_const * self.loss.inner_prod(gradient, x - x_new)
return new_value <= self.current_value - descent
def __call__(self, gradient=None, direction=None, x=None, x_new=None):
if gradient is None:
gradient = self.optimizer.grad
if x is None:
x = self.optimizer.x
self.lr = self.lr / self.backtracking if self.start_with_prev_lr else self.lr0
if direction is None:
direction = (x_new - x) / self.lr
if x_new is None:
x_new = x + direction * self.lr
if x is self.x_prev:
self.current_value = self.val_prev
else:
self.current_value = self.loss.value(x)
armijo_condition_met = self.condition(gradient, x, x_new)
it_extra = 0
while not armijo_condition_met:
self.lr *= self.backtracking
x_new = x + self.lr * direction
armijo_condition_met = self.condition(gradient, x, x_new)
it_extra += 1
self.it += self.it_per_call + it_extra
return x_new
| 40.789474
| 103
| 0.64129
|
e80fa07154d2832ba36bdffbe25ef7e50a6a1c82
| 4,542
|
py
|
Python
|
src/etools_permissions/backends.py
|
unicef/etools-permissions
|
7a6da87c9829290af3cea458314e60dd6d1239fd
|
[
"Apache-2.0"
] | null | null | null |
src/etools_permissions/backends.py
|
unicef/etools-permissions
|
7a6da87c9829290af3cea458314e60dd6d1239fd
|
[
"Apache-2.0"
] | null | null | null |
src/etools_permissions/backends.py
|
unicef/etools-permissions
|
7a6da87c9829290af3cea458314e60dd6d1239fd
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib.auth.backends import ModelBackend
from django.core.exceptions import PermissionDenied
from etools_permissions.models import Permission, Realm
class RealmBackend(ModelBackend):
def _get_realm(self, user):
try:
return Realm.objects.get(user=user)
except Realm.DoesNotExist:
raise PermissionDenied
def _get_realm_permissions(self, realm):
return realm.permissions.all()
def _get_group_permissions(self, realm):
realm_groups_field = Realm._meta.get_field('groups')
realm_groups_query = 'group__{}'.format(
realm_groups_field.related_query_name()
)
return Permission.objects.filter(**{realm_groups_query: realm})
def _get_permissions(self, realm, obj, from_name):
"""
Return the permissions of `realm` from `from_name`. `from_name` can
be either "group" or "realm" to return permissions from
`_get_group_permissions` or `_get_realm_permissions` respectively.
"""
if not realm.user.is_active or realm.user.is_anonymous or obj is not None:
return set()
perm_cache_name = '_{}_perm_cache'.format(from_name)
if not hasattr(realm, perm_cache_name):
if realm.user.is_superuser:
perms = Permission.objects.all()
else:
perms = getattr(
self,
'_get_{}_permissions'.format(from_name)
)(realm)
perms = perms.values_list(
'permission',
'permission_type',
'target',
).order_by()
setattr(
realm,
perm_cache_name,
{"{}.{}.{}".format(
perm_type,
perm,
target,
) for perm, perm_type, target in perms}
)
return getattr(realm, perm_cache_name)
def get_realm_permissions(self, realm, obj=None):
"""
Return a set of permission strings the `realm` has from their
`permissions`.
"""
return self._get_permissions(realm, obj, 'realm')
def get_group_permissions(self, realm, obj=None):
"""
Return a set of permission strings the `realm` has from the
groups they belong.
"""
return self._get_permissions(realm, obj, 'group')
def get_all_permissions(self, realm, obj=None):
if not realm.user.is_active or realm.user.is_anonymous:
return set()
if not hasattr(realm, '_perm_cache'):
realm._perm_cache = set()
realm._perm_cache.update(self.get_realm_permissions(realm, obj))
realm._perm_cache.update(self.get_group_permissions(realm, obj))
return realm._perm_cache
def _parse_target(self, target):
"""Target may have preceding data"""
perm, actual_target = target.split(".", 1)
# If we have any packages named "edit" or "view"
# this falls apart! Doh!
if perm in [Permission.EDIT, Permission.VIEW]:
return perm, actual_target
else:
return None, target
def perm_valid(self, permissions, target):
"""Check if target matches any permissions user has"""
target_perm, target = self._parse_target(target)
for permission in permissions:
perm_type, perm, perm_target = permission.split(".", 2)
if perm_type == Permission.TYPE_DISALLOW:
continue
if target_perm == Permission.EDIT and perm_type == Permission.VIEW:
continue
if perm_target[-1] == '*':
if target.startswith(perm_target[:-1]):
return True
else:
if perm_target == target:
return True
return False
def has_perm(self, user, perm, obj=None):
if not user.is_active:
return False
permissions = self.get_all_permissions(self._get_realm(user), obj)
return self.perm_valid(permissions, perm)
# def has_module_perms(self, realm_obj, app_label):
# """
# Return True if realm_obj has any permissions in the given app_label.
# """
# if not realm_obj.user.is_active:
# return False
# for perm in self.get_all_permissions(realm_obj):
# if perm[:perm.index('.')] == app_label:
# return True
# return False
| 35.484375
| 82
| 0.584544
|
eaed71fe04775f233a01f0f52d299da0143fad33
| 7,393
|
py
|
Python
|
tests/unit/test_views.py
|
hershman/server
|
5344ccc45249ef0771b9ca4d8d585312a6e04231
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_views.py
|
hershman/server
|
5344ccc45249ef0771b9ca4d8d585312a6e04231
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_views.py
|
hershman/server
|
5344ccc45249ef0771b9ca4d8d585312a6e04231
|
[
"Apache-2.0"
] | null | null | null |
"""
Unit tests for the frontend code.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import ga4gh.frontend as frontend
import ga4gh.protocol as protocol
import tests.utils as utils
_app = None
def setUp(self):
config = {
"DATA_SOURCE": "__SIMULATED__",
"SIMULATED_BACKEND_RANDOM_SEED": 1111,
"SIMULATED_BACKEND_NUM_CALLS": 1,
"SIMULATED_BACKEND_VARIANT_DENSITY": 1.0,
"SIMULATED_BACKEND_NUM_VARIANT_SETS": 1,
# "DEBUG" : True
}
frontend.configure(
baseConfig="TestConfig", extraConfig=config)
global _app
_app = frontend.app.test_client()
def tearDown(self):
global _app
_app = None
class TestFrontend(unittest.TestCase):
"""
Tests the basic routing and HTTP handling for the Flask app.
"""
exampleUrl = 'www.example.com'
def setUp(self):
global _app
self.app = _app
def sendRequest(self, path, request):
"""
Sends the specified GA request object and returns the response.
"""
versionedPath = utils.applyVersion(path)
headers = {
'Content-type': 'application/json',
'Origin': self.exampleUrl,
}
return self.app.post(
versionedPath, headers=headers,
data=request.toJsonString())
def sendVariantsSearch(self):
response = self.sendVariantSetsSearch()
variantSets = protocol.GASearchVariantSetsResponse().fromJsonString(
response.data).variantSets
request = protocol.GASearchVariantsRequest()
request.variantSetIds = [variantSets[0].id]
request.referenceName = "1"
request.start = 0
request.end = 1
return self.sendRequest('/variants/search', request)
def sendVariantSetsSearch(self, datasetIds=[""]):
request = protocol.GASearchVariantSetsRequest()
request.datasetIds = datasetIds
return self.sendRequest('/variantsets/search', request)
def sendCallSetsSearch(self):
response = self.sendVariantSetsSearch()
variantSets = protocol.GASearchVariantSetsResponse().fromJsonString(
response.data).variantSets
request = protocol.GASearchCallSetsRequest()
request.variantSetIds = [variantSets[0].id]
return self.sendRequest('/callsets/search', request)
def sendReadsSearch(self, readGroupIds=None):
if readGroupIds is None:
readGroupIds = ['aReadGroupSet:one']
request = protocol.GASearchReadsRequest()
request.readGroupIds = readGroupIds
return self.sendRequest('/reads/search', request)
def test404sReturnJson(self):
path = utils.applyVersion('/doesNotExist')
response = self.app.get(path)
protocol.GAException.fromJsonString(response.get_data())
self.assertEqual(404, response.status_code)
def testCors(self):
def assertHeaders(response):
self.assertEqual(self.exampleUrl,
response.headers['Access-Control-Allow-Origin'])
self.assertTrue('Content-Type' in response.headers)
assertHeaders(self.sendVariantsSearch())
assertHeaders(self.sendVariantSetsSearch())
assertHeaders(self.sendReadsSearch())
# TODO: Test other methods as they are implemented
def verifySearchRouting(self, path, getDefined=False):
"""
Verifies that the specified path has the correct routing for a search
command. If getDefined is False we check to see if it returns the
correct status code.
"""
versionedPath = utils.applyVersion(path)
response = self.app.post(versionedPath)
protocol.GAException.fromJsonString(response.get_data())
self.assertEqual(415, response.status_code)
if not getDefined:
getResponse = self.app.get(versionedPath)
protocol.GAException.fromJsonString(getResponse.get_data())
self.assertEqual(405, getResponse.status_code)
# Malformed requests should return 400
for badJson in ["", None, "JSON", "<xml/>", "{]"]:
badResponse = self.app.post(
versionedPath, data=badJson,
headers={'Content-type': 'application/json'})
self.assertEqual(400, badResponse.status_code)
# OPTIONS should return success
self.assertEqual(200, self.app.options(versionedPath).status_code)
def testRouteReferences(self):
paths = ['/references/1', 'references/1/bases', 'referencesets/1']
for path in paths:
versionedPath = utils.applyVersion(path)
self.assertEqual(404, self.app.get(versionedPath).status_code)
paths = ['/references/search']
for path in paths:
versionedPath = utils.applyVersion(path)
self.assertEqual(404, self.app.get(versionedPath).status_code)
self.verifySearchRouting('/referencesets/search', True)
def testRouteCallsets(self):
path = utils.applyVersion('/callsets/search')
self.assertEqual(415, self.app.post(path).status_code)
self.assertEqual(200, self.app.options(path).status_code)
self.assertEqual(405, self.app.get(path).status_code)
def testRouteReads(self):
paths = ['/reads/search', '/readgroupsets/search']
for path in paths:
self.verifySearchRouting(path)
def testRouteVariants(self):
for path in ['/variantsets/search', '/variants/search']:
self.verifySearchRouting(path)
def testRouteIndex(self):
response = self.app.get("/")
self.assertEqual(200, response.status_code)
self.assertEqual("text/html", response.mimetype)
self.assertGreater(len(response.data), 0)
def testVariantsSearch(self):
response = self.sendVariantsSearch()
self.assertEqual(200, response.status_code)
responseData = protocol.GASearchVariantsResponse.fromJsonString(
response.data)
self.assertEqual(len(responseData.variants), 1)
def testVariantSetsSearch(self):
response = self.sendVariantSetsSearch()
self.assertEqual(200, response.status_code)
responseData = protocol.GASearchVariantSetsResponse.fromJsonString(
response.data)
self.assertEqual(len(responseData.variantSets), 1)
def testCallSetsSearch(self):
response = self.sendCallSetsSearch()
self.assertEqual(200, response.status_code)
responseData = protocol.GASearchCallSetsResponse.fromJsonString(
response.data)
self.assertEqual(len(responseData.callSets), 1)
def testReadsSearch(self):
response = self.sendReadsSearch()
self.assertEqual(200, response.status_code)
responseData = protocol.GASearchReadsResponse.fromJsonString(
response.data)
self.assertEqual(len(responseData.alignments), 2)
self.assertEqual(
responseData.alignments[0].id,
"aReadGroupSet:one:simulated0")
self.assertEqual(
responseData.alignments[1].id,
"aReadGroupSet:one:simulated1")
def testWrongVersion(self):
path = '/v0.1.2/variantsets/search'
self.assertEqual(404, self.app.options(path).status_code)
| 36.59901
| 77
| 0.661301
|
af9f26a74c40b99a0cddff26607a754e0a6dd05e
| 5,648
|
py
|
Python
|
examples/model_compress/pruning/v2/norm_pruning_torch.py
|
JiahangXu/nni
|
eb577361841ecd210aa2933d2f0500d981d02230
|
[
"MIT"
] | 1
|
2022-01-27T01:42:41.000Z
|
2022-01-27T01:42:41.000Z
|
examples/model_compress/pruning/v2/norm_pruning_torch.py
|
JiahangXu/nni
|
eb577361841ecd210aa2933d2f0500d981d02230
|
[
"MIT"
] | null | null | null |
examples/model_compress/pruning/v2/norm_pruning_torch.py
|
JiahangXu/nni
|
eb577361841ecd210aa2933d2f0500d981d02230
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
'''
NNI example for supported l1norm and l2norm pruning algorithms.
In this example, we show the end-to-end pruning process: pre-training -> pruning -> fine-tuning.
Note that pruners use masks to simulate the real pruning. In order to obtain a real compressed model, model speed up is required.
'''
import argparse
import sys
import torch
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import MultiStepLR
from nni.compression.pytorch import ModelSpeedup
from nni.compression.pytorch.utils.counter import count_flops_params
from nni.algorithms.compression.v2.pytorch.pruning.basic_pruner import L1NormPruner, L2NormPruner
sys.path.append('../../models')
from cifar10.vgg import VGG
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
g_epoch = 0
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data', train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]), download=True),
batch_size=128, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('./data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False)
def trainer(model, optimizer, criterion):
global g_epoch
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if batch_idx and batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
g_epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
g_epoch += 1
def evaluator(model):
model.eval()
correct = 0.0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
acc = 100 * correct / len(test_loader.dataset)
print('Accuracy: {}%\n'.format(acc))
return acc
def optimizer_scheduler_generator(model, _lr=0.1, _momentum=0.9, _weight_decay=5e-4):
optimizer = torch.optim.SGD(model.parameters(), lr=_lr, momentum=_momentum, weight_decay=_weight_decay)
scheduler = MultiStepLR(optimizer, milestones=[int(args.pretrain_epochs * 0.5), int(args.pretrain_epochs * 0.75)], gamma=0.1)
return optimizer, scheduler
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch Example for model comporession')
parser.add_argument('--pruner', type=str, default='l1norm',
choices=['l1norm', 'l2norm'],
help='pruner to use')
parser.add_argument('--pretrain-epochs', type=int, default=20,
help='number of epochs to pretrain the model')
parser.add_argument('--fine-tune-epochs', type=int, default=20,
help='number of epochs to fine tune the model')
args = parser.parse_args()
print('\n' + '=' * 50 + ' START TO TRAIN THE MODEL ' + '=' * 50)
model = VGG().to(device)
optimizer, scheduler = optimizer_scheduler_generator(model)
criterion = torch.nn.CrossEntropyLoss()
pre_best_acc = 0.0
best_state_dict = None
for i in range(args.pretrain_epochs):
trainer(model, optimizer, criterion)
scheduler.step()
acc = evaluator(model)
if acc > pre_best_acc:
pre_best_acc = acc
best_state_dict = model.state_dict()
print("Best accuracy: {}".format(pre_best_acc))
model.load_state_dict(best_state_dict)
pre_flops, pre_params, _ = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device))
g_epoch = 0
# Start to prune and speedup
print('\n' + '=' * 50 + ' START TO PRUNE THE BEST ACCURACY PRETRAINED MODEL ' + '=' * 50)
config_list = [{
'sparsity': 0.5,
'op_types': ['Conv2d']
}]
if 'l1' in args.pruner:
pruner = L1NormPruner(model, config_list)
else:
pruner = L2NormPruner(model, config_list)
_, masks = pruner.compress()
pruner.show_pruned_weights()
pruner._unwrap_model()
ModelSpeedup(model, dummy_input=torch.rand([10, 3, 32, 32]).to(device), masks_file=masks).speedup_model()
print('\n' + '=' * 50 + ' EVALUATE THE MODEL AFTER SPEEDUP ' + '=' * 50)
evaluator(model)
# Optimizer used in the pruner might be patched, so recommend to new an optimizer for fine-tuning stage.
print('\n' + '=' * 50 + ' START TO FINE TUNE THE MODEL ' + '=' * 50)
optimizer, scheduler = optimizer_scheduler_generator(model, _lr=0.01)
best_acc = 0.0
for i in range(args.fine_tune_epochs):
trainer(model, optimizer, criterion)
scheduler.step()
best_acc = max(evaluator(model), best_acc)
flops, params, results = count_flops_params(model, torch.randn([128, 3, 32, 32]).to(device))
print(f'Pretrained model FLOPs {pre_flops/1e6:.2f} M, #Params: {pre_params/1e6:.2f}M, Accuracy: {pre_best_acc: .2f}%')
print(f'Finetuned model FLOPs {flops/1e6:.2f} M, #Params: {params/1e6:.2f}M, Accuracy: {best_acc: .2f}%')
| 41.226277
| 129
| 0.659525
|
cb72137d9bb93d9de3041e6104026def7c9bcb58
| 4,017
|
py
|
Python
|
tests/test_struct/test_transformations/test_transfer.py
|
cthoyt/pybel
|
ed66f013a77f9cbc513892b0dad1025b8f68bb46
|
[
"Apache-2.0"
] | null | null | null |
tests/test_struct/test_transformations/test_transfer.py
|
cthoyt/pybel
|
ed66f013a77f9cbc513892b0dad1025b8f68bb46
|
[
"Apache-2.0"
] | 11
|
2017-12-28T08:03:14.000Z
|
2019-01-15T02:13:58.000Z
|
tests/test_struct/test_transformations/test_transfer.py
|
cthoyt/pybel
|
ed66f013a77f9cbc513892b0dad1025b8f68bb46
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Tests for transfer of knowledge and inference functions."""
import unittest
from pybel.examples.statin_example import (
avorastatin,
ec_11134,
ec_11188,
fluvastatin,
hmgcr,
hmgcr_inhibitor,
mevinolinic_acid,
statin,
statin_graph,
synthetic_statin,
)
from pybel.struct.mutation import infer_child_relations
from pybel.struct.mutation.inference.transfer import iter_children
class TestTransfer(unittest.TestCase):
"""Tests for transfer of knowledge and inference functions."""
def test_get_children(self):
"""Test iterating over the children of a node."""
children = list(iter_children(statin_graph, hmgcr_inhibitor))
self.assertNotEqual(0, len(children), msg="no children found")
self.assertIn(mevinolinic_acid, children, msg="direct child not found")
def test_infer(self):
"""Test inferring child relations."""
graph = statin_graph.copy()
self.assertEqual(9, graph.number_of_nodes())
self.assertEqual(8, graph.number_of_edges())
self.assertNotIn(ec_11134, graph[fluvastatin])
self.assertNotIn(ec_11188, graph[fluvastatin])
self.assertNotIn(ec_11134, graph[avorastatin])
self.assertNotIn(ec_11188, graph[avorastatin])
self.assertNotIn(ec_11134, graph[synthetic_statin])
self.assertNotIn(ec_11188, graph[synthetic_statin])
self.assertNotIn(ec_11134, graph[statin])
self.assertNotIn(ec_11188, graph[statin])
self.assertNotIn(ec_11134, graph[mevinolinic_acid])
self.assertNotIn(ec_11188, graph[mevinolinic_acid])
self.assertIn(ec_11134, graph[hmgcr_inhibitor])
self.assertIn(ec_11188, graph[hmgcr_inhibitor])
infer_child_relations(graph, hmgcr_inhibitor)
self.assertIn(ec_11134, graph[fluvastatin])
self.assertIn(ec_11188, graph[fluvastatin])
self.assertIn(ec_11134, graph[avorastatin])
self.assertIn(ec_11188, graph[avorastatin])
self.assertIn(ec_11134, graph[synthetic_statin])
self.assertIn(ec_11188, graph[synthetic_statin])
self.assertIn(ec_11134, graph[statin])
self.assertIn(ec_11188, graph[statin])
self.assertIn(ec_11134, graph[mevinolinic_acid])
self.assertIn(ec_11188, graph[mevinolinic_acid])
self.assertIn(ec_11134, graph[hmgcr_inhibitor])
self.assertIn(ec_11188, graph[hmgcr_inhibitor])
self.assertEqual(9, graph.number_of_nodes())
self.assertEqual(18, graph.number_of_edges())
infer_child_relations(graph, ec_11134)
self.assertIn(hmgcr, graph[fluvastatin])
self.assertIn(hmgcr, graph[avorastatin])
self.assertIn(hmgcr, graph[synthetic_statin])
self.assertIn(hmgcr, graph[statin])
self.assertIn(hmgcr, graph[mevinolinic_acid])
self.assertIn(hmgcr, graph[hmgcr_inhibitor])
self.assertEqual(9, graph.number_of_nodes())
self.assertEqual(24, graph.number_of_edges())
self.assertEqual(
9,
statin_graph.number_of_nodes(),
msg="original graph nodes should not be modified",
)
self.assertEqual(
8,
statin_graph.number_of_edges(),
msg="original graph edges should not be modified",
)
def test_does_not_redo(self):
"""Test that :func:`propagate_node_relations` does not add the same edges twice."""
graph = statin_graph.copy()
self.assertEqual(9, graph.number_of_nodes())
self.assertEqual(8, graph.number_of_edges())
infer_child_relations(graph, hmgcr_inhibitor)
self.assertEqual(9, graph.number_of_nodes())
self.assertEqual(18, graph.number_of_edges())
infer_child_relations(graph, hmgcr_inhibitor)
self.assertEqual(9, graph.number_of_nodes())
self.assertEqual(18, graph.number_of_edges(), msg="edges should not be added again")
if __name__ == "__main__":
unittest.main()
| 36.518182
| 92
| 0.685337
|
961d7fe963e0de1296e9fb54c77b5e6d60125e06
| 988
|
py
|
Python
|
tests/test_urlparse4.py
|
mladenangel/pyurlparser
|
fda910309aa189d57473dbb12e2d2acde49c1736
|
[
"Apache-2.0"
] | 45
|
2016-07-07T20:15:13.000Z
|
2021-03-28T22:59:13.000Z
|
tests/test_urlparse4.py
|
mladenangel/pyurlparser
|
fda910309aa189d57473dbb12e2d2acde49c1736
|
[
"Apache-2.0"
] | 14
|
2016-07-09T18:32:13.000Z
|
2020-07-01T16:33:59.000Z
|
tests/test_urlparse4.py
|
mladenangel/pyurlparser
|
fda910309aa189d57473dbb12e2d2acde49c1736
|
[
"Apache-2.0"
] | 8
|
2016-07-07T20:21:50.000Z
|
2019-12-10T22:16:30.000Z
|
# https://github.com/python/cpython/blob/40dac3272231773af0015fc35df5353783d77c4e/Lib/test/test_urlparse.py
import sys
import os
sys.path.insert(-1, os.path.dirname(os.path.dirname(__file__)))
from test import test_support
import unittest
import urlparse4 as urlparse
urlsplit_testcases = [
["mailto:webtechs@oltn.odl.state.ok.us", ("mailto", "webtechs@oltn.odl.state.ok.us", "", "", "")],
["mailto:mailto:webtechs@oltn.odl.state.ok.us", ("mailto", "mailto:webtechs@oltn.odl.state.ok.us", "", "", "")],
["http://a@example.com:80", ("http", "a@example.com:80", "", "", "")],
]
urljoin_testcases = [
[("", "http://example.com"), "http://example.com"]
]
class UrlParse4TestCase(unittest.TestCase):
def test_urlsplit(self):
for case in urlsplit_testcases:
self.assertEqual(urlparse.urlsplit(case[0]), case[1])
def test_urljoin(self):
for case in urljoin_testcases:
self.assertEqual(urlparse.urljoin(*case[0]), case[1])
| 30.875
| 116
| 0.671053
|
ab0032a2a324a944050d61ed00f47e160305c60e
| 2,302
|
py
|
Python
|
qal/dataset/tests/test_files.py
|
OptimalBPM/qal
|
4d7a31c0d68042b4110e1fa3e733711e0fdd473e
|
[
"Unlicense"
] | 3
|
2016-05-02T14:35:55.000Z
|
2021-08-31T14:19:15.000Z
|
qal/dataset/tests/test_files.py
|
OptimalBPM/qal
|
4d7a31c0d68042b4110e1fa3e733711e0fdd473e
|
[
"Unlicense"
] | null | null | null |
qal/dataset/tests/test_files.py
|
OptimalBPM/qal
|
4d7a31c0d68042b4110e1fa3e733711e0fdd473e
|
[
"Unlicense"
] | 1
|
2018-03-18T13:19:52.000Z
|
2018-03-18T13:19:52.000Z
|
"""
Created on Dec 17, 2013
@author: Nicklas Boerjesson
"""
import json
import unittest
from shutil import copyfile
import os
from qal.dataset.files import FilesDataset
from qal.common.resources import Resources
from qal.dataset.custom import DATASET_LOGLEVEL_DETAIL
Test_Script_Dir = os.path.dirname(__file__)
Test_Resource_Dir = os.path.join(Test_Script_Dir, 'resources')
class Test(unittest.TestCase):
def __init__(self, _method_name='runTest'):
self.maxDiff = None
super(Test, self).__init__(_method_name)
def test_load_into_db(self):
"""
This test loads files into a structure and then back to a file
"""
# Use the XML dest_in-file to have something to compare
copyfile(Test_Resource_Dir + "/xml_dest_in.xml", Test_Resource_Dir + "/files_data_xml.xml")
copyfile(Test_Resource_Dir + "/jpeg_source.jpg", Test_Resource_Dir + "/files_data_jpeg.jpg")
_f_r = open(Test_Resource_Dir + "/resources.json", "r")
_resources_list = json.load(_f_r)
_resources = Resources(_resources_list=_resources_list, _base_path=Test_Resource_Dir)
# Init tests
_source = FilesDataset(_resource=_resources.get_resource("{42446be5-12a0-4781-aef6-04d52e6d47d6}"))
_source._log_level = DATASET_LOGLEVEL_DETAIL
_source.load()
# Remove temporary source
os.remove(Test_Resource_Dir + "/files_data_xml.xml")
os.remove(Test_Resource_Dir + "/files_data_jpeg.jpg")
# Write back
_source.save()
# Compare XML
_f_a = open(Test_Resource_Dir + "/xml_dest_in.xml", "r")
_f_b = open(Test_Resource_Dir + "/files_data_xml.xml", "r")
_a = _f_a.read()
_b = _f_b.read()
_f_a.close()
_f_b.close()
self.assertEqual(_a, _b, "test_1_Load_Save: XML-File doesn't match")
# Compare binary JPG
_f_a = open(Test_Resource_Dir + "/jpeg_source.jpg", "rb")
_f_b = open(Test_Resource_Dir + "/files_data_jpeg.jpg", "rb")
_a = _f_a.read()
_b = _f_b.read()
_f_a.close()
_f_b.close()
self.assertEqual(_a, _b, "test_1_Load_Save: JPEG-File doesn't match")
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 31.534247
| 107
| 0.662467
|
864eb8bef4bad09c1f6a91b10224fe72769b8c69
| 1,775
|
py
|
Python
|
main.py
|
Raisahab77/library_management_system
|
7b0f74029b1424d37b715e9a43eb78478c07a8e8
|
[
"MIT"
] | null | null | null |
main.py
|
Raisahab77/library_management_system
|
7b0f74029b1424d37b715e9a43eb78478c07a8e8
|
[
"MIT"
] | null | null | null |
main.py
|
Raisahab77/library_management_system
|
7b0f74029b1424d37b715e9a43eb78478c07a8e8
|
[
"MIT"
] | null | null | null |
from tkinter import *
from PIL import ImageTk,Image
from tkinter import messagebox
from AddBook import *
from DeleteBook import *
from ViewBooks import *
from IssueBook import *
from ReturnBook import *
import sqlite3
con = sqlite3.connect(mydatabase)
cur = con.cursor()
cur.execute("""CREATE TABLE IF NOT EXISTS bookTable (book_id varchar(20) primary key, book_title varchar(30), author varchar(30), status varchar(30))""")
root = Tk()
root.title("Library")
root.minsize(width=400,height=400)
root.geometry("1366x720")
frame = Frame(root,bg="white")
frame.place(relheight=1,relwidth=1)
bg = ImageTk.PhotoImage(file="library.jpg")
photo = PhotoImage(file="buttons.png")
resizeImg = photo.subsample(5,5)
# Create a Canvas
canvas = Canvas(frame, width=700, height=3500)
canvas.pack(fill=BOTH, expand=True)
# Add Image inside the Canvas
canvas.create_image(0, 0, image=bg, anchor='nw')
btn1 = Button(frame,text="Add Book Details",bg='black',image=resizeImg, fg='white', command=addBook,relief=FLAT,compound=CENTER)
btn1.place(relx=0.45,rely=0.2)
btn2 = Button(frame,text="Delete Book",bg='black', fg='white',image=resizeImg, command=delete,relief=FLAT,compound=CENTER)
btn2.place(relx=0.45,rely=0.35)
btn3 = Button(frame,text="View Book List",bg='black', fg='white',image=resizeImg, command=View,relief=FLAT,compound=CENTER)
btn3.place(relx=0.45,rely=0.5)
btn4 = Button(frame,text="Issue Book to Student",bg='black', fg='white',image=resizeImg, command = issueBook,relief=FLAT,compound=CENTER)
btn4.place(relx=0.45,rely=0.65)
btn5 = Button(frame,text="Return Book",bg='black', fg='white',image=resizeImg, command = returnBook,relief=FLAT,compound=CENTER)
btn5.place(relx=0.45,rely=0.80)
root.mainloop()
| 36.22449
| 154
| 0.725634
|
3c59f30c54696b2c74e67740c78e54f5026307f0
| 1,452
|
py
|
Python
|
Linear/ShearCenter/test.py
|
mkraska/CalculiX-Examples
|
5e28b3d06331f3a958b1758b31cb84b2e253553b
|
[
"MIT"
] | 177
|
2016-01-03T14:33:44.000Z
|
2022-02-15T11:29:48.000Z
|
Linear/ShearCenter/test.py
|
xyg1996/CalculiX-Examples
|
5e28b3d06331f3a958b1758b31cb84b2e253553b
|
[
"MIT"
] | 52
|
2016-01-05T20:37:04.000Z
|
2022-02-06T15:30:29.000Z
|
Linear/ShearCenter/test.py
|
xyg1996/CalculiX-Examples
|
5e28b3d06331f3a958b1758b31cb84b2e253553b
|
[
"MIT"
] | 73
|
2016-02-09T12:17:06.000Z
|
2022-01-22T13:31:00.000Z
|
#!/usr/bin/python
import os
import multiprocessing
import shutil
# Provide access to the helper scripts
def modify_path():
scripts_dir = os.path.dirname(__file__)
while not 'Scripts' in os.listdir(scripts_dir):
scripts_dir = os.path.abspath(os.path.join(scripts_dir, '..'))
scripts_dir = os.path.join(scripts_dir, 'Scripts')
if not scripts_dir in os.environ['PATH']:
os.environ['PATH'] += os.pathsep + scripts_dir
print '\nPATH = {}\n'.format(os.environ['PATH'])
# Move new files and folders to 'Refs'
def move(old_snap):
new_snap = os.listdir(os.curdir)
if not os.path.exists('Refs'):
os.mkdir('Refs')
for f in new_snap:
if not f in old_snap:
fname = os.path.basename(f)
new_name = os.path.join(os.curdir, 'Refs', fname)
if os.path.isfile(new_name):
os.remove(new_name)
if os.path.isdir(new_name):
shutil.rmtree(new_name)
os.rename(f, new_name)
if __name__ == '__main__':
# Enable multithreading for ccx
os.environ['OMP_NUM_THREADS'] = str(multiprocessing.cpu_count())
# Explicitly move to example's directory
os.chdir(os.path.dirname(__file__))
# Run the example
modify_path()
snap = os.listdir(os.curdir)
os.system("param.py par.II-pre.fbl")
os.system("cgx -b II-pre.fbl")
os.system("ccx II")
os.system("cgx -b II-post.fbl")
move(snap)
| 29.04
| 70
| 0.630165
|
6b1a2e2efd8dd3d6bdd04a6f1f83ab9ad73f826e
| 865
|
py
|
Python
|
byceps/services/email/transfer/models.py
|
GyBraLAN/byceps
|
b53087849c10a531b66d08999116fa1bef312a7f
|
[
"BSD-3-Clause"
] | 33
|
2018-01-16T02:04:51.000Z
|
2022-03-22T22:57:29.000Z
|
byceps/services/email/transfer/models.py
|
GyBraLAN/byceps
|
b53087849c10a531b66d08999116fa1bef312a7f
|
[
"BSD-3-Clause"
] | 7
|
2019-06-16T22:02:03.000Z
|
2021-10-02T13:45:31.000Z
|
byceps/services/email/transfer/models.py
|
GyBraLAN/byceps
|
b53087849c10a531b66d08999116fa1bef312a7f
|
[
"BSD-3-Clause"
] | 14
|
2019-06-01T21:39:24.000Z
|
2022-03-14T17:56:43.000Z
|
"""
byceps.services.email.transfer.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from __future__ import annotations
from dataclasses import dataclass
from email.utils import formataddr
from typing import Optional
from ....typing import BrandID
@dataclass(frozen=True)
class NameAndAddress:
name: Optional[str]
address: str
def format(self):
"""Format the name and address as a string value suitable for an
e-mail header.
"""
return formataddr((self.name, self.address))
@dataclass(frozen=True)
class EmailConfig:
brand_id: BrandID
sender: NameAndAddress
contact_address: str
@dataclass(frozen=True)
class Message:
sender: NameAndAddress
recipients: list[str]
subject: str
body: str
| 20.595238
| 72
| 0.685549
|
624d58b1dd34ca5a94b424b02f1eb8db3ab2bb9c
| 1,598
|
py
|
Python
|
renzongxian/0022/0022.py
|
saurabh896/python-1
|
f8d3aedf4c0fe6e24dfa3269ea7e642c9f7dd9b7
|
[
"MIT"
] | 3,976
|
2015-01-01T15:49:39.000Z
|
2022-03-31T03:47:56.000Z
|
renzongxian/0022/0022.py
|
dwh65416396/python
|
1a7e3edd1cd3422cc0eaa55471a0b42e004a9a1a
|
[
"MIT"
] | 97
|
2015-01-11T02:59:46.000Z
|
2022-03-16T14:01:56.000Z
|
renzongxian/0022/0022.py
|
dwh65416396/python
|
1a7e3edd1cd3422cc0eaa55471a0b42e004a9a1a
|
[
"MIT"
] | 3,533
|
2015-01-01T06:19:30.000Z
|
2022-03-28T13:14:54.000Z
|
# Source:https://github.com/Show-Me-the-Code/show-me-the-code
# Author:renzongxian
# Date:2014-12-24
# Python 3.4
"""
第 0022 题: iPhone 6、iPhone 6 Plus 早已上市开卖。请查看你写得 第 0005 题的代码是否可以复用。
"""
from PIL import Image
import os
import sys
def resize_image(image, t_weight, t_height):
im = Image.open(image)
weight, height = im.size
if weight > t_weight or height > t_height:
dw = weight / t_weight
dh = height / t_height
ds = max(dw, dh)
new_weight = int(weight / ds)
new_height = int(height / ds)
im = im.resize((new_weight, new_height))
print("Succeed to resize the image %s to %s*%s " % (image, new_weight, new_height))
im.save(image)
else:
print("The image %s doesn't need to be resized." % image)
if __name__ == "__main__":
trans_weight = 0
trans_height = 0
if len(sys.argv) <= 1:
print("Need at least 1 parameter. Try to execute 'python 0022.py $dir_path'")
else:
while True:
flag = input("请选择你要转换的分辨率: 1. iPhone 6 2. iPhone 6 Plus:")
if flag == '1':
trans_weight = 750
trans_height = 1334
break
elif flag == '2':
trans_weight = 1080
trans_height = 1920
break
else:
print("输入有误,重新选择!")
for dir_path in sys.argv[1:]:
for image_name in os.listdir(dir_path):
image_path = os.path.join(dir_path, image_name)
resize_image(image_path, trans_weight, trans_height)
| 28.535714
| 92
| 0.56821
|
6a3f34e8a5aaae7b7164afc25f4bdb686e3a31a7
| 3,155
|
py
|
Python
|
koku/masu/prometheus_stats.py
|
Vasyka/koku
|
b5aa9ec41c3b0821e74afe9ff3a5ffaedb910614
|
[
"Apache-2.0"
] | 2
|
2022-01-12T03:42:39.000Z
|
2022-01-12T03:42:40.000Z
|
koku/masu/prometheus_stats.py
|
Vasyka/koku
|
b5aa9ec41c3b0821e74afe9ff3a5ffaedb910614
|
[
"Apache-2.0"
] | null | null | null |
koku/masu/prometheus_stats.py
|
Vasyka/koku
|
b5aa9ec41c3b0821e74afe9ff3a5ffaedb910614
|
[
"Apache-2.0"
] | 1
|
2021-07-21T09:33:59.000Z
|
2021-07-21T09:33:59.000Z
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Prometheus Stats."""
from prometheus_client import CollectorRegistry
from prometheus_client import Counter
from prometheus_client import Gauge
from prometheus_client import multiprocess
WORKER_REGISTRY = CollectorRegistry()
multiprocess.MultiProcessCollector(WORKER_REGISTRY)
GET_REPORT_ATTEMPTS_COUNTER = Counter(
"get_report_files_attempts_count", "Number of ingest attempts", ["provider_type"], registry=WORKER_REGISTRY
)
REPORT_FILE_DOWNLOAD_ERROR_COUNTER = Counter(
"report_file_download_error_count",
"Number of report file download errors",
["provider_type"],
registry=WORKER_REGISTRY,
)
PROCESS_REPORT_ATTEMPTS_COUNTER = Counter(
"process_report_attempts_count",
"Number of report files attempted processing",
["provider_type"],
registry=WORKER_REGISTRY,
)
PROCESS_REPORT_ERROR_COUNTER = Counter(
"process_report_error_count",
"Number of report files attempted processing",
["provider_type"],
registry=WORKER_REGISTRY,
)
REPORT_SUMMARY_ATTEMPTS_COUNTER = Counter(
"report_summary_attempts_count", "Number of report summary attempts", ["provider_type"], registry=WORKER_REGISTRY
)
COST_MODEL_COST_UPDATE_ATTEMPTS_COUNTER = Counter(
"charge_update_attempts_count", "Number of derivied cost update attempts", registry=WORKER_REGISTRY
)
COST_SUMMARY_ATTEMPTS_COUNTER = Counter(
"cost_summary_attempts_count", "Number of cost summary update attempts", registry=WORKER_REGISTRY
)
KAFKA_CONNECTION_ERRORS_COUNTER = Counter(
"kafka_connection_errors", "Number of Kafka connection errors", registry=WORKER_REGISTRY
)
CELERY_ERRORS_COUNTER = Counter("celery_errors", "Number of celery errors", registry=WORKER_REGISTRY)
DOWNLOAD_BACKLOG = Gauge("download_backlog", "Number of celery tasks in the download queue", registry=WORKER_REGISTRY)
SUMMARY_BACKLOG = Gauge("summary_backlog", "Number of celery tasks in the summary queue", registry=WORKER_REGISTRY)
PRIORITY_BACKLOG = Gauge("priority_backlog", "Number of celery tasks in the priority queue", registry=WORKER_REGISTRY)
REFRESH_BACKLOG = Gauge("refresh_backlog", "Number of celery tasks in the refresh queue", registry=WORKER_REGISTRY)
COST_MODEL_BACKLOG = Gauge(
"cost_model_backlog", "Number of celery tasks in the cost model queue", registry=WORKER_REGISTRY
)
DEFAULT_BACKLOG = Gauge("default_backlog", "Number of celery tasks in the default queue", registry=WORKER_REGISTRY)
QUEUES = {
"download": DOWNLOAD_BACKLOG,
"summary": SUMMARY_BACKLOG,
"priority": PRIORITY_BACKLOG,
"refresh": REFRESH_BACKLOG,
"cost_model": COST_MODEL_BACKLOG,
"celery": DEFAULT_BACKLOG,
}
SOURCES_KAFKA_LOOP_RETRY = Counter(
"sources_kafka_retry_errors", "Number of sources kafka retry errors", registry=WORKER_REGISTRY
)
SOURCES_PROVIDER_OP_RETRY_LOOP_COUNTER = Counter(
"sources_provider_op_retry_errors", "Number of sources provider operation retry errors", registry=WORKER_REGISTRY
)
SOURCES_HTTP_CLIENT_ERROR_COUNTER = Counter(
"sources_http_client_errors", "Number of sources http client errors", registry=WORKER_REGISTRY
)
| 38.950617
| 118
| 0.796513
|
b65d1ce0d1d63d72885fff0f14ddd2e2bfa6f2e8
| 1,140
|
py
|
Python
|
command_helper/utils/util.py
|
wasteland-rider/sublime-robot-framework-assistant
|
7d75b85c5364995f1012a55ac9f67600aa8ba5e3
|
[
"MIT"
] | null | null | null |
command_helper/utils/util.py
|
wasteland-rider/sublime-robot-framework-assistant
|
7d75b85c5364995f1012a55ac9f67600aa8ba5e3
|
[
"MIT"
] | null | null | null |
command_helper/utils/util.py
|
wasteland-rider/sublime-robot-framework-assistant
|
7d75b85c5364995f1012a55ac9f67600aa8ba5e3
|
[
"MIT"
] | null | null | null |
import re
from json import load as json_load
def get_data_from_json(json_file):
f = open(json_file)
data = json_load(f)
f.close()
return data
def _keyword_with_embedded_arg(kw, kw_candite):
kw = kw.lower().replace(' ', '').replace('_', '')
kw_candite = kw_candite.lower().replace(' ', '').replace('_', '')
kw_re = re.sub(r'(?i)(\$\{[\w ]*\})', r'(?i)(\\S+)', kw_candite)
return re.search(kw_re, kw)
def _keyword_no_embedded_arg(kw, kw_candite):
kw = kw.lower().replace(' ', '').replace('_', '')
kw_candite = kw_candite.lower().replace(' ', '').replace('_', '')
kw_candite = kw_candite.lstrip('.')
return kw == kw_candite
def kw_equals_kw_candite(kw, kw_candite):
"""Returns True if kw == kw_canditate
Spaces, under score are removed and
strings are converted to lower before validation.
Also support keyword conditate with emedded args
"""
if '$' in kw_candite:
return _keyword_with_embedded_arg(kw, kw_candite)
else:
return _keyword_no_embedded_arg(kw, kw_candite)
| 30
| 70
| 0.604386
|
9e9ba0a0249d19c84179b6049ea72dc7b56979e6
| 494
|
py
|
Python
|
pset_classes/vehicles/p6.py
|
mottaquikarim/pydev-psets
|
9749e0d216ee0a5c586d0d3013ef481cc21dee27
|
[
"MIT"
] | 5
|
2019-04-08T20:05:37.000Z
|
2019-12-04T20:48:45.000Z
|
pset_classes/vehicles/p6.py
|
mottaquikarim/pydev-psets
|
9749e0d216ee0a5c586d0d3013ef481cc21dee27
|
[
"MIT"
] | 8
|
2019-04-15T15:16:05.000Z
|
2022-02-12T10:33:32.000Z
|
pset_classes/vehicles/p6.py
|
mottaquikarim/pydev-psets
|
9749e0d216ee0a5c586d0d3013ef481cc21dee27
|
[
"MIT"
] | 2
|
2019-04-10T00:14:42.000Z
|
2020-02-26T20:35:21.000Z
|
"""
Vehicles VI
"""
# Next, define new method called "check_fuel_level()" for your newly expanded Car class. If the fuel_level attribute is < 15, the method should reset fuel_level to 50 and print out how many units it refueled the car, e.g. 'Refueled 38 units.' Otherwise, it should simply print 'No need to refuel right now.'
# Create at least TWO instances of Car, one of which has a fuel level below 15. Access the new attributes and call the check_fuel_level() method for each instance.
| 61.75
| 307
| 0.761134
|
50f10e5e96fe62ba91f541147c5d8e79baa41e06
| 4,815
|
py
|
Python
|
cipher.py
|
peterwilliams97/compression
|
dd40032a24cc111d4a881f4f8aa2389df519ddd0
|
[
"MIT"
] | null | null | null |
cipher.py
|
peterwilliams97/compression
|
dd40032a24cc111d4a881f4f8aa2389df519ddd0
|
[
"MIT"
] | null | null | null |
cipher.py
|
peterwilliams97/compression
|
dd40032a24cc111d4a881f4f8aa2389df519ddd0
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
from letter_frequencies import letter_frequencies
def xor(x, y):
return [a ^ b for a, b in zip(x, y)]
def from_h(s):
"""Returns list of bytes corresponding to hex string `s`"""
assert len(s) % 2 == 0
return [int(s[2 * i: 2 * i + 2], 16) for i in range(len(s) // 2)]
ciphertexts = [
# ciphertext #1:
'315c4eeaa8b5f8aaf9174145bf43e1784b8fa00dc71d885a804e5ee9fa40b16349c146fb778cdf2d3aff021dfff5b403b510d0d0455468aeb98622b137dae857553ccd8883a7bc37520e06e515d22c954eba5025b8cc57ee59418ce7dc6bc41556bdb36bbca3e8774301fbcaa3b83b220809560987815f65286764703de0f3d524400a19b159610b11ef3e',
# ciphertext #2:
'234c02ecbbfbafa3ed18510abd11fa724fcda2018a1a8342cf064bbde548b12b07df44ba7191d9606ef4081ffde5ad46a5069d9f7f543bedb9c861bf29c7e205132eda9382b0bc2c5c4b45f919cf3a9f1cb74151f6d551f4480c82b2cb24cc5b028aa76eb7b4ab24171ab3cdadb8356f',
# ciphertext #3:
'32510ba9a7b2bba9b8005d43a304b5714cc0bb0c8a34884dd91304b8ad40b62b07df44ba6e9d8a2368e51d04e0e7b207b70b9b8261112bacb6c866a232dfe257527dc29398f5f3251a0d47e503c66e935de81230b59b7afb5f41afa8d661cb',
# ciphertext #4:
'32510ba9aab2a8a4fd06414fb517b5605cc0aa0dc91a8908c2064ba8ad5ea06a029056f47a8ad3306ef5021eafe1ac01a81197847a5c68a1b78769a37bc8f4575432c198ccb4ef63590256e305cd3a9544ee4160ead45aef520489e7da7d835402bca670bda8eb775200b8dabbba246b130f040d8ec6447e2c767f3d30ed81ea2e4c1404e1315a1010e7229be6636aaa',
# ciphertext #5:
'3f561ba9adb4b6ebec54424ba317b564418fac0dd35f8c08d31a1fe9e24fe56808c213f17c81d9607cee021dafe1e001b21ade877a5e68bea88d61b93ac5ee0d562e8e9582f5ef375f0a4ae20ed86e935de81230b59b73fb4302cd95d770c65b40aaa065f2a5e33a5a0bb5dcaba43722130f042f8ec85b7c2070',
# ciphertext #6:
'32510bfbacfbb9befd54415da243e1695ecabd58c519cd4bd2061bbde24eb76a19d84aba34d8de287be84d07e7e9a30ee714979c7e1123a8bd9822a33ecaf512472e8e8f8db3f9635c1949e640c621854eba0d79eccf52ff111284b4cc61d11902aebc66f2b2e436434eacc0aba938220b084800c2ca4e693522643573b2c4ce35050b0cf774201f0fe52ac9f26d71b6cf61a711cc229f77ace7aa88a2f19983122b11be87a59c355d25f8e4',
# ciphertext #7:
'32510bfbacfbb9befd54415da243e1695ecabd58c519cd4bd90f1fa6ea5ba47b01c909ba7696cf606ef40c04afe1ac0aa8148dd066592ded9f8774b529c7ea125d298e8883f5e9305f4b44f915cb2bd05af51373fd9b4af511039fa2d96f83414aaaf261bda2e97b170fb5cce2a53e675c154c0d9681596934777e2275b381ce2e40582afe67650b13e72287ff2270abcf73bb028932836fbdecfecee0a3b894473c1bbeb6b4913a536ce4f9b13f1efff71ea313c8661dd9a4ce',
# ciphertext #8:
'315c4eeaa8b5f8bffd11155ea506b56041c6a00c8a08854dd21a4bbde54ce56801d943ba708b8a3574f40c00fff9e00fa1439fd0654327a3bfc860b92f89ee04132ecb9298f5fd2d5e4b45e40ecc3b9d59e9417df7c95bba410e9aa2ca24c5474da2f276baa3ac325918b2daada43d6712150441c2e04f6565517f317da9d3',
# ciphertext #9:
'271946f9bbb2aeadec111841a81abc300ecaa01bd8069d5cc91005e9fe4aad6e04d513e96d99de2569bc5e50eeeca709b50a8a987f4264edb6896fb537d0a716132ddc938fb0f836480e06ed0fcd6e9759f40462f9cf57f4564186a2c1778f1543efa270bda5e933421cbe88a4a52222190f471e9bd15f652b653b7071aec59a2705081ffe72651d08f822c9ed6d76e48b63ab15d0208573a7eef027',
# ciphertext #10:
'466d06ece998b7a2fb1d464fed2ced7641ddaa3cc31c9941cf110abbf409ed39598005b3399ccfafb61d0315fca0a314be138a9f32503bedac8067f03adbf3575c3b8edc9ba7f537530541ab0f9f3cd04ff50d66f1d559ba520e89a2cb2a83',
# target ciphertext (decrypt this one):
'32510ba9babebbbefd001547a810e67149caee11d945cd7fc81a05e9f85aac650e9052ba6a8cd8257bf14d13e6f0a803b54fde9e77472dbff89d71b57bddef121336cb85ccb8f3315f4b52e301d16e9f52f904'
]
ciphers = [from_h(s) for s in ciphertexts]
test = ciphers[-1]
train = ciphers[:-1]
# alphabet is the plaintext alphabet as numbers (not chars)
alphabet = list(range(32, 256))
def prob_xor(c):
"""Returns dict {a: p} where `a` is a character from `alphabet` and p is the probability of `a`
being xor'd with another character to give `c`
"""
prob = {}
for a in alphabet:
v = c ^ a
if v < 32 or v >= 128:
continue
aa = chr(a)
vv = chr(v)
if aa == ' ':
prob[vv] = letter_frequencies['Space']
else:
A = aa.upper()
if A in letter_frequencies:
prob[vv] = letter_frequencies[A]
return prob
letter_prob_alphabet = {c: prob_xor(c) for c in alphabet}
def most_probable(i):
"""Returns most probable value of ith character in plaintext of ciphertext `test`"""
probabilities = defaultdict(float)
for cip in train:
v = test[i] ^ cip[i]
letter_prob = letter_prob_alphabet.get(v, {})
for a, p in letter_prob.items():
probabilities[a] += p
return max(probabilities, key=lambda k: probabilities[k])
guess = [most_probable(i) for i in range(len(test))]
print(''.join(guess))
| 49.639175
| 379
| 0.82243
|
4ca151ba9d7c5e807fc0a3dd9deb223c22ad77bf
| 58
|
py
|
Python
|
database.py
|
monologid/paperboy
|
dfb427af50c576057a350e4bab60ca5c56136932
|
[
"Apache-2.0"
] | null | null | null |
database.py
|
monologid/paperboy
|
dfb427af50c576057a350e4bab60ca5c56136932
|
[
"Apache-2.0"
] | null | null | null |
database.py
|
monologid/paperboy
|
dfb427af50c576057a350e4bab60ca5c56136932
|
[
"Apache-2.0"
] | null | null | null |
from core.storage import Storage
DB = Storage().connect()
| 19.333333
| 32
| 0.758621
|
3bc9e50ffe4dd0e4944421dbc0f306f1e8c8cc2d
| 22,915
|
py
|
Python
|
qctrlopencontrols/driven_controls/driven_control.py
|
kingjoseph223/python-open-controls
|
b21431c039aea2d466996f33baec01a833f55eaf
|
[
"Apache-2.0"
] | null | null | null |
qctrlopencontrols/driven_controls/driven_control.py
|
kingjoseph223/python-open-controls
|
b21431c039aea2d466996f33baec01a833f55eaf
|
[
"Apache-2.0"
] | 1
|
2020-07-22T00:57:48.000Z
|
2020-07-22T00:57:48.000Z
|
qctrlopencontrols/driven_controls/driven_control.py
|
kingjoseph223/python-open-controls
|
b21431c039aea2d466996f33baec01a833f55eaf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Q-CTRL Pty Ltd & Q-CTRL Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Driven control module.
"""
import json
import numpy as np
from ..driven_controls import (
LOWER_BOUND_DURATION,
UPPER_BOUND_DETUNING_RATE,
UPPER_BOUND_DURATION,
UPPER_BOUND_RABI_RATE,
UPPER_BOUND_SEGMENTS,
)
from ..exceptions import ArgumentsValueError
from ..utils import (
Coordinate,
FileFormat,
FileType,
create_repr_from_attributes,
)
class DrivenControl:
"""
Creates a driven control. A driven is a set of segments made up of amplitude vectors
and corresponding durations.
Parameters
----------
rabi_rates : numpy.ndarray, optional
1-D array of size nx1 where n is number of segments;
Each entry is the rabi rate for the segment. Defaults to None
azimuthal_angles : numpy.ndarray, optional
1-D array of size nx1 where n is the number of segments;
Each entry is the azimuthal angle for the segment; Defaults to None
detunings : numpy.ndarray, optional
1-D array of size nx1 where n is the number of segments;
Each entry is the detuning angle for the segment; Defaults to None
durations : numpy.ndarray, optional
1-D array of size nx1 where n is the number of segments;
Each entry is the duration of the segment (in seconds); Defaults to None
name : string, optional
An optional string to name the driven control. Defaults to None.
Raises
------
ArgumentsValueError
Raised when an argument is invalid.
"""
def __init__(
self,
rabi_rates=None,
azimuthal_angles=None,
detunings=None,
durations=None,
name=None,
):
self.name = name
if self.name is not None:
self.name = str(self.name)
check_none_values = [
(rabi_rates is None),
(azimuthal_angles is None),
(detunings is None),
(durations is None),
]
all_are_none = all(value is True for value in check_none_values)
if all_are_none:
rabi_rates = np.array([np.pi])
azimuthal_angles = np.array([0.0])
detunings = np.array([0.0])
durations = np.array([1.0])
else:
# some may be None while others are not
input_array_lengths = []
if not check_none_values[0]:
rabi_rates = np.array(rabi_rates, dtype=np.float).reshape((-1,))
input_array_lengths.append(rabi_rates.shape[0])
if not check_none_values[1]:
azimuthal_angles = np.array(azimuthal_angles, dtype=np.float).reshape(
(-1,)
)
input_array_lengths.append(len(azimuthal_angles))
if not check_none_values[2]:
detunings = np.array(detunings, dtype=np.float).reshape((-1,))
input_array_lengths.append(len(detunings))
if not check_none_values[3]:
durations = np.array(durations, dtype=np.float).reshape((-1,))
input_array_lengths.append(len(durations))
# check all valid array lengths are equal
if max(input_array_lengths) != min(input_array_lengths):
raise ArgumentsValueError(
"Rabi rates, Azimuthal angles, Detunings and Durations "
"must be of same length",
{
"rabi_rates": rabi_rates,
"azimuthal_angles": azimuthal_angles,
"detunings": detunings,
"durations": durations,
},
)
valid_input_length = max(input_array_lengths)
if check_none_values[0]:
rabi_rates = np.zeros((valid_input_length,))
if check_none_values[1]:
azimuthal_angles = np.zeros((valid_input_length,))
if check_none_values[2]:
detunings = np.zeros((valid_input_length,))
if check_none_values[3]:
durations = np.ones((valid_input_length,))
self.rabi_rates = rabi_rates
self.azimuthal_angles = azimuthal_angles
self.detunings = detunings
self.durations = durations
# check if all the rabi_rates are greater than zero
if np.any(rabi_rates < 0.0):
raise ArgumentsValueError(
"All rabi rates must be greater than zero.",
{"rabi_rates": rabi_rates},
extras={
"azimuthal_angles": azimuthal_angles,
"detunings": detunings,
"durations": durations,
},
)
# check if all the durations are greater than zero
if np.any(durations <= 0):
raise ArgumentsValueError(
"Duration of driven control segments must all be greater"
+ " than zero.",
{"durations": self.durations},
)
if self.number_of_segments > UPPER_BOUND_SEGMENTS:
raise ArgumentsValueError(
"The number of segments must be smaller than the upper bound:"
+ str(UPPER_BOUND_SEGMENTS),
{"number_of_segments": self.number_of_segments},
)
if self.maximum_rabi_rate > UPPER_BOUND_RABI_RATE:
raise ArgumentsValueError(
"Maximum rabi rate of segments must be smaller than the upper bound: "
+ str(UPPER_BOUND_RABI_RATE),
{"maximum_rabi_rate": self.maximum_rabi_rate},
)
if self.maximum_detuning > UPPER_BOUND_DETUNING_RATE:
raise ArgumentsValueError(
"Maximum detuning of segments must be smaller than the upper bound: "
+ str(UPPER_BOUND_DETUNING_RATE),
{"maximum_detuning": self.maximum_detuning},
)
if self.maximum_duration > UPPER_BOUND_DURATION:
raise ArgumentsValueError(
"Maximum duration of segments must be smaller than the upper bound: "
+ str(UPPER_BOUND_DURATION),
{"maximum_duration": self.maximum_duration},
)
if self.minimum_duration < LOWER_BOUND_DURATION:
raise ArgumentsValueError(
"Minimum duration of segments must be larger than the lower bound: "
+ str(LOWER_BOUND_DURATION),
{"minimum_duration": self.minimum_duration},
)
@property
def number_of_segments(self):
"""Returns the number of segments
Returns
-------
int
The number of segments in the driven control
"""
return self.rabi_rates.shape[0]
@property
def maximum_rabi_rate(self):
"""Returns the maximum rabi rate of the control
Returns
-------
float
The maximum rabi rate of the control
"""
return np.amax(self.rabi_rates)
@property
def maximum_detuning(self):
"""Returns the maximum detuning of the control
Returns
-------
float
The maximum detuning of the control
"""
return np.amax(self.detunings)
@property
def amplitude_x(self):
"""Return the X-Amplitude
Returns
-------
numpy.ndarray
X-Amplitude of each segment
"""
return self.rabi_rates * np.cos(self.azimuthal_angles)
@property
def amplitude_y(self):
"""Return the Y-Amplitude
Returns
-------
numpy.ndarray
Y-Amplitude of each segment
"""
return self.rabi_rates * np.sin(self.azimuthal_angles)
@property
def angles(self):
"""Returns the angles
Returns
-------
numpy.darray
Angles as 1-D array of floats
"""
amplitudes = np.sqrt(
self.amplitude_x ** 2 + self.amplitude_y ** 2 + self.detunings ** 2
)
angles = amplitudes * self.durations
return angles
@property
def directions(self):
"""Returns the directions
Returns
-------
numpy.ndarray
Directions as 1-D array of floats
"""
amplitudes = np.sqrt(
self.amplitude_x ** 2 + self.amplitude_y ** 2 + self.detunings ** 2
)
# Reduces tolerance of the comparison to zero in case the units chosen
# make the amplitudes very small, but never allows it to be higher than the
# default atol value of 1e-8
tolerance = min(1e-20 * np.max(amplitudes), 1e-8)
safe_amplitudes = np.where(
np.isclose(amplitudes, 0, atol=tolerance), 1.0, amplitudes
)
normalized_amplitude_x = self.amplitude_x / safe_amplitudes
normalized_amplitude_y = self.amplitude_y / safe_amplitudes
normalized_detunings = self.detunings / safe_amplitudes
directions = np.hstack(
(
normalized_amplitude_x[:, np.newaxis],
normalized_amplitude_y[:, np.newaxis],
normalized_detunings[:, np.newaxis],
)
)
return directions
@property
def times(self):
"""Returns the time of each segment within the duration
of the control
Returns
------
numpy.ndarray
Segment times as 1-D array of floats
"""
return np.insert(np.cumsum(self.durations), 0, 0.0)
@property
def maximum_duration(self):
"""Returns the maximum duration of all the control segments
Returns
-------
float
The maximum duration of all the control segments
"""
return np.amax(self.durations)
@property
def minimum_duration(self):
"""Returns the minimum duration of all the control segments
Returns
-------
float
The minimum duration of all the controls segments
"""
return np.amin(self.durations)
@property
def duration(self):
"""Returns the total duration of the control
Returns
-------
float
Total duration of the control
"""
return np.sum(self.durations)
def _qctrl_expanded_export_content(self, file_type, coordinates):
"""Private method to prepare the content to be saved in Q-CTRL expanded format
Parameters
----------
file_type : str, optional
One of 'CSV' or 'JSON'; defaults to 'CSV'.
coordinates : str, optional
Indicates the co-ordinate system requested. Must be one of
'cylindrical', 'cartesian' or 'polar'; defaults to 'cylindrical'
Returns
-------
list or dict
Based on file_type; list if 'CSV', dict if 'JSON'
"""
control_info = None
amplitude_x = self.amplitude_x
amplitude_y = self.amplitude_y
if coordinates == Coordinate.CYLINDRICAL.value:
if file_type == FileType.CSV.value:
control_info = list()
control_info.append(
"amplitude_x,amplitude_y,detuning,duration,maximum_rabi_rate"
)
for segment_idx in range(self.number_of_segments):
control_info.append(
"{},{},{},{},{}".format(
amplitude_x[segment_idx],
amplitude_y[segment_idx],
self.detunings[segment_idx],
self.durations[segment_idx],
self.maximum_rabi_rate,
)
)
else:
control_info = dict()
if self.name is not None:
control_info["name"] = self.name
control_info["maximum_rabi_rate"] = self.maximum_rabi_rate
control_info["amplitude_x"] = list(amplitude_x)
control_info["amplitude_y"] = list(amplitude_y)
control_info["detuning"] = list(self.detunings)
control_info["duration"] = list(self.durations)
else:
if file_type == FileType.CSV.value:
control_info = list()
control_info.append(
"rabi_rate,azimuthal_angle,detuning,duration,maximum_rabi_rate"
)
for segment_idx in range(self.number_of_segments):
control_info.append(
"{},{},{},{},{}".format(
self.rabi_rates[segment_idx] / self.maximum_rabi_rate,
np.arctan2(
amplitude_y[segment_idx], amplitude_x[segment_idx]
),
self.detunings[segment_idx],
self.durations[segment_idx],
self.maximum_rabi_rate,
)
)
else:
control_info = dict()
if self.name is not None:
control_info["name"] = self.name
control_info["maximum_rabi_rate"] = self.maximum_rabi_rate
control_info["rabi_rates"] = list(
self.rabi_rates / self.maximum_rabi_rate
)
control_info["azimuthal_angles"] = list(
np.arctan2(amplitude_y, amplitude_x)
)
control_info["detuning"] = list(self.detunings)
control_info["duration"] = list(self.durations)
return control_info
def _export_to_qctrl_expanded_format(
self,
filename=None,
file_type=FileType.CSV.value,
coordinates=Coordinate.CYLINDRICAL.value,
):
"""Private method to save control in qctrl_expanded_format
Parameters
----------
filename : str, optional
Name and path of the file to save the control into.
Defaults to None
file_type : str, optional
One of 'CSV' or 'JSON'; defaults to 'CSV'.
coordinates : str, optional
Indicates the co-ordinate system requested. Must be one of
'cylindrical', 'cartesian'; defaults to 'cylindrical'
"""
control_info = self._qctrl_expanded_export_content(
file_type=file_type, coordinates=coordinates
)
if file_type == FileType.CSV.value:
with open(filename, "wt") as handle:
control_info = "\n".join(control_info)
handle.write(control_info)
else:
with open(filename, "wt") as handle:
json.dump(control_info, handle, sort_keys=True, indent=4)
def export_to_file(
self,
filename=None,
file_format=FileFormat.QCTRL.value,
file_type=FileType.CSV.value,
coordinates=Coordinate.CYLINDRICAL.value,
):
"""Prepares and saves the driven control in a file.
Parameters
----------
filename : str, optional
Name and path of the file to save the control into.
Defaults to None
file_format : str
Specified file format for saving the control. Defaults to
'Q-CTRL expanded'; Currently it does not support any other format.
For detail of the `Q-CTRL Expanded Format` consult
`Q-CTRL Control Data Format
<https://docs.q-ctrl.com/wiki/output-data-formats#q-ctrl-hardware>` _.
file_type : str, optional
One of 'CSV' or 'JSON'; defaults to 'CSV'.
coordinates : str, optional
Indicates the co-ordinate system requested. Must be one of
'cylindrical', 'cartesian'; defaults to 'cylindrical'
References
----------
`Q-CTRL Control Data Format
<https://docs.q-ctrl.com/wiki/output-data-formats#q-ctrl-hardware>` _.
Raises
------
ArgumentsValueError
Raised if some of the parameters are invalid.
"""
_file_types = [v.value for v in FileType]
_file_formats = [v.value for v in FileFormat]
_coordinate_systems = [v.value for v in Coordinate]
if filename is None:
raise ArgumentsValueError(
"Invalid filename provided.", {"filename": filename}
)
if file_format not in _file_formats:
raise ArgumentsValueError(
"Requested file format is not supported. Please use "
"one of {}".format(_file_formats),
{"file_format": file_format},
)
if file_type not in _file_types:
raise ArgumentsValueError(
"Requested file type is not supported. Please use "
"one of {}".format(_file_types),
{"file_type": file_type},
)
if coordinates not in _coordinate_systems:
raise ArgumentsValueError(
"Requested coordinate type is not supported. Please use "
"one of {}".format(_coordinate_systems),
{"coordinates": coordinates},
)
if file_format == FileFormat.QCTRL.value:
self._export_to_qctrl_expanded_format(
filename=filename, file_type=file_type, coordinates=coordinates
)
def export(
self, coordinates=Coordinate.CYLINDRICAL.value, dimensionless_rabi_rate=True
):
""" Returns a dictionary formatted for plotting using the qctrl-visualizer package.
Parameters
----------
dimensionless_rabi_rate: boolean
If True, normalizes the Rabi rate so that its largest absolute value is 1.
coordinates: string
Indicates whether the Rabi frequency should be plotted in terms of its
'cylindrical' or 'cartesian' components.
Returns
-------
dict
Dictionary with plot data that can be used by the plot_controls
method of the qctrl-visualizer package. It has keywords 'Rabi rate'
and 'Detuning' for 'cylindrical' coordinates and 'X amplitude', 'Y amplitude',
and 'Detuning' for 'cartesian' coordinates.
Raises
------
ArgumentsValueError
Raised when an argument is invalid.
"""
if coordinates not in [v.value for v in Coordinate]:
raise ArgumentsValueError(
"Unsupported coordinates provided: ",
arguments={"coordinates": coordinates},
)
if dimensionless_rabi_rate:
normalizer = self.maximum_rabi_rate
else:
normalizer = 1
plot_dictionary = {}
plot_x = self.amplitude_x / normalizer
plot_y = self.amplitude_y / normalizer
plot_r = self.rabi_rates / normalizer
plot_theta = self.azimuthal_angles
plot_durations = self.durations
plot_detunings = self.detunings
if coordinates == Coordinate.CARTESIAN.value:
plot_dictionary["X amplitude"] = [
{"value": v, "duration": t} for v, t in zip(plot_x, plot_durations)
]
plot_dictionary["Y amplitude"] = [
{"value": v, "duration": t} for v, t in zip(plot_y, plot_durations)
]
if coordinates == Coordinate.CYLINDRICAL.value:
plot_dictionary["Rabi rate"] = [
{"value": r * np.exp(1.0j * theta), "duration": t}
for r, theta, t in zip(plot_r, plot_theta, plot_durations)
]
plot_dictionary["Detuning"] = [
{"value": v, "duration": t} for v, t in zip(plot_detunings, plot_durations)
]
return plot_dictionary
def __str__(self):
"""Prepares a friendly string format for a Driven Control
"""
driven_control_string = list()
if self.name is not None:
driven_control_string.append("{}:".format(self.name))
pretty_rabi_rates = [
str(rabi_rate / self.maximum_rabi_rate)
if self.maximum_rabi_rate != 0
else "0"
for rabi_rate in list(self.rabi_rates)
]
pretty_rabi_rates = ",".join(pretty_rabi_rates)
pretty_azimuthal_angles = [
str(azimuthal_angle / np.pi) for azimuthal_angle in self.azimuthal_angles
]
pretty_azimuthal_angles = ",".join(pretty_azimuthal_angles)
pretty_detuning = [
str(detuning / self.maximum_detuning) if self.maximum_detuning != 0 else "0"
for detuning in list(self.detunings)
]
pretty_detuning = ",".join(pretty_detuning)
pretty_durations = [
str(duration / self.duration) for duration in self.durations
]
pretty_durations = ",".join(pretty_durations)
driven_control_string.append(
"Rabi Rates = [{}] x {}".format(pretty_rabi_rates, self.maximum_rabi_rate)
)
driven_control_string.append(
"Azimuthal Angles = [{}] x pi".format(pretty_azimuthal_angles)
)
driven_control_string.append(
"Detunings = [{}] x {}".format(pretty_detuning, self.maximum_detuning)
)
driven_control_string.append(
"Durations = [{}] x {}".format(pretty_durations, self.duration)
)
driven_control_string = "\n".join(driven_control_string)
return driven_control_string
def __repr__(self):
"""Returns a string representation for the object. The returned string looks like a valid
Python expression that could be used to recreate the object, including default arguments.
Returns
-------
str
String representation of the object including the values of the arguments.
"""
attributes = [
"rabi_rates",
"azimuthal_angles",
"detunings",
"durations",
"name",
]
return create_repr_from_attributes(self, attributes)
| 34.099702
| 97
| 0.569932
|
ed4605f92a4dd6326e777e11fa1fdd88969c47ac
| 16,275
|
py
|
Python
|
gplpy/gggp/derivation.py
|
Brisingeros/GPLpy
|
f7202027cd86e867f250d2c63c39353df9da452a
|
[
"Apache-2.0"
] | 2
|
2020-04-12T06:04:21.000Z
|
2021-12-19T00:45:25.000Z
|
gplpy/gggp/derivation.py
|
Brisingeros/GPLpy
|
f7202027cd86e867f250d2c63c39353df9da452a
|
[
"Apache-2.0"
] | null | null | null |
gplpy/gggp/derivation.py
|
Brisingeros/GPLpy
|
f7202027cd86e867f250d2c63c39353df9da452a
|
[
"Apache-2.0"
] | 3
|
2020-12-03T23:05:45.000Z
|
2021-11-07T15:35:58.000Z
|
"""
This module include all logic for Grammar Guide Genetic Programming including Derivation Trees, Wihgham and Depth
Control Crossovers and mutation
"""
from functools import reduce
import numpy as np
from gplpy.gggp.grammar import Terminal, Variable, NonTerminal, ProbabilisticModel
class Derivation:
"""Derivation tree"""
grammar = None
probabilistic_model=ProbabilisticModel.uniform
def __init__(self, max_recursions=0, tree=None, crossover_node=None, subtree=None):
"""Derivation tree constructor
Keyword arguments:
grammar -- derivation tree grammar
max_recursions -- maximum recursions applied on the derivation tree (default math.inf)
tree -- main subtree for offspring derivation tree creation (default None)
crossover_node -- non terminal symbol where crossover takes place and subtree must be replaced (default None)
subtree -- subtree to replace in crossover node (default None)
mutation_rate -- mutation rate to be applied on crossover creation (default 0.02)
probabilistic model -- probabilistic model type for new derivation trees initialization ( default Probabilistic_Modell.uniform )
"""
self.max_recursions = max_recursions
self._depth = None
self._recursions = None
self._word = None
self._str_word = None
# Create a new derivation tree
if tree is None:
remaining_recursions = np.random.random_integers(0, max_recursions) if Derivation.probabilistic_model is ProbabilisticModel.uniform else max_recursions
self.tree = Tree(derivation=self,
remaining_recursions=remaining_recursions,
node=Derivation.grammar.axiom)
# Copy a derivation tree
else:
# TODO check root variable
self.tree = Tree(derivation=self,
node=Derivation.grammar.axiom,
tree=tree,
crossover_node=crossover_node,
subtree=subtree)
@property
def depth(self):
"""Returns derivation tree depth"""
if self._depth is None:
self._depth = self.tree.depth
return self._depth
@property
def recursions(self):
"""Returns number of recursive derivations applied"""
if self._recursions is None:
self._recursions = self.tree.recursions
return self._recursions
@property
def word(self):
"""Returns the word generated by the derivation tree"""
if self._word is None:
self._word = self.tree.word
return self._word
def __str__(self):
"""Returns the word generated by the derivation tree in string format"""
if self._str_word is None:
self._str_word = ' '.join(map(str, self.word))
return self._str_word
def __iter__(self):
"""Iterator of the elements of the word generated by the derivation tree"""
return iter(self.word)
def __len__(self):
"""Length of the word generated by the derivation tree"""
return len(self.word)
class Tree:
"""Tree structure for derivation tree"""
def __init__(self, derivation, remaining_recursions=0, max_recursions=0,
root=None, node=None, tree=None,
mutation_rate=0.0, crossover_node=None, subtree=None):
"""Tree constructor"""
# Sore derivation information
self.derivation = derivation
# Store node information
self.root = root
self.leaves = None
# Information generated during properties method calls
self._word = None
self.str_word = None
self._non_terminal_nodes = None
self._non_terminal_nodes_by_depth = None
self._depth = None
self._recursions = None
self._height = None
# If the node is a Terminal or a Critical Terminal there is nothing else to do
# Variable have their own value, a copy of Critical Terminal is created
if isinstance(node, Variable):
self.node = node
self._word = [self.node]
return
# Terminal
if isinstance(node, Terminal):
self.node = node
self._word = [self.node]
return
# Axiom & Non terminal
self.node = node
# Create a derivation tree using parent's derivation trees, if it doesn't mutate
if tree:
self.leaves = []
if crossover_node:
# If the root node of the tree is the crossover node we continue copying the subtree
if tree is crossover_node:
tree = subtree
self.production = tree.production
for leave in tree.leaves:
self.leaves.append(Tree(derivation=derivation,
remaining_recursions=remaining_recursions-1 if self.production.left in self.production.right else remaining_recursions,
root=self,
node=leave.node,
tree=leave,
mutation_rate=mutation_rate))
# Continue copying the main tree
else:
self.production = tree.production
for leave in tree.leaves:
self.leaves.append(Tree(derivation=derivation,
remaining_recursions=remaining_recursions-1 if self.production.left in self.production.right else remaining_recursions,
root=self,
node=leave.node,
tree=leave,
crossover_node=crossover_node,
subtree=subtree,
mutation_rate=mutation_rate))
# Continue copying the subtree
else:
self.production = tree.production
for leave in tree.leaves:
self.leaves.append(Tree(derivation=derivation,
remaining_recursions=remaining_recursions-1 if self.production.left in self.production.right else remaining_recursions,
root=self,
node=leave.node,
tree=leave,
mutation_rate=mutation_rate))
# Create a new derivation tree or mutate a tree
else:
self.production = self.derivation.grammar.get_production_with_probabilistic_model(symbol=self.node.symbol,
model=self.derivation.probabilistic_model,
remaining_recursions=remaining_recursions)
if self.production.left in self.production.right:
remaining_recursions -= 1
self.leaves = []
# Split remaining recursions between recursive nonterminal symbols
# TODO WARNING It can reduce or increase by 1 the remaining recursions
remaining_recursions_per_recursive_nt = np.round(np.random.dirichlet(np.ones(self.production.recursion_arity)) * remaining_recursions).tolist()
for leave in self.production.right:
self.leaves.append(Tree(derivation=derivation,
remaining_recursions=remaining_recursions_per_recursive_nt.pop() if leave.recursive else 0, root=self,
node=leave))
@property
def depth(self):
if self._depth is None:
if self.leaves is None:
self._depth = 0
else:
self._depth = reduce(lambda x, y: x if (x.depth > y.depth) else y, self.leaves).depth + 1
return self._depth
@property
def recursions(self):
if self._recursions is None:
if self.leaves is None:
self._recursions = 0
else:
self._recursions = reduce(lambda x, y: x + y.recursions, self.leaves, 1 if self.production.left in self.production.right else 0)
return self._recursions
@property
def height(self):
if self._height is None:
if self.root is None:
self._height = 0
else:
self._height = self.root.height + 1
return self._height
@property
def word(self):
if self._word is None:
self._word = []
for leave in self.leaves:
self._word += leave.word
return self._word
def __str__(self):
if self.str_word is None:
self.str_word = str()
for letter in self.word:
self.str_word += ' ' + letter.__str__()
return self.str_word
@property
def non_terminal_subtrees_by_node(self):
if self._non_terminal_nodes is None:
self._non_terminal_nodes = {}
if isinstance(self.node, NonTerminal):
# Axiom is included as a crossover node
#if not self.node.axiom:
self._non_terminal_nodes[self.node] = [self]
if self.leaves is not None:
for leave in self.leaves:
for nt in leave.non_terminal_subtrees_by_node:
if nt in self._non_terminal_nodes:
self._non_terminal_nodes[nt] += leave.non_terminal_subtrees_by_node[nt]
else:
self._non_terminal_nodes[nt] = leave.non_terminal_subtrees_by_node[nt]
return self._non_terminal_nodes
def reset_non_terminal_nodes(self):
self._non_terminal_nodes is None
if self.leaves is not None:
for leave in self.leaves:
leave.reset_non_terminal_nodes()
@property
def non_terminal_nodes_by_depth(self):
if self._non_terminal_nodes_by_depth is None:
self._non_terminal_nodes_by_depth = {}
if isinstance(self.node, NonTerminal):
if not self.node.axiom:
self._non_terminal_nodes_by_depth[self.node][self.node.depth] = [self]
if self.leaves is not None:
for leaf in self.leaves:
for nt in leaf.non_terminal_nodes_by_depth:
if nt in self._non_terminal_nodes_by_depth:
for d in self._non_terminal_nodes_by_depth[nt]:
if d in self._non_terminal_nodes_by_depth[nt]:
self._non_terminal_nodes_by_depth[nt][d] += \
leaf.non_terminal_nodes_by_depth[nt][d]
else:
self._non_terminal_nodes_by_depth[nt][d] = \
leaf.non_terminal_nodes_by_depth[nt][d]
else:
self._non_terminal_nodes_by_depth[nt] = leaf.non_terminal_nodes_by_depth[nt]
return self._non_terminal_nodes_by_depth
class WX:
@staticmethod
def crossover(derivations, max_recursions=0):
common_non_terminals = derivations[0].tree.non_terminal_subtrees_by_node.keys() & derivations[1].tree.non_terminal_subtrees_by_node.keys()
choosing_pool = []
if common_non_terminals:
for nt in common_non_terminals:
choosing_pool += (
[nt] * (len(derivations[0].tree.non_terminal_subtrees_by_node[nt]) + len(derivations[1].tree.non_terminal_subtrees_by_node[nt])))
while True:
crossover_non_terminal = np.random.choice(choosing_pool)
genome_0_crossover_node = np.random.choice(derivations[0].tree.non_terminal_subtrees_by_node[crossover_non_terminal])
genome_1_crossover_node = np.random.choice(derivations[1].tree.non_terminal_subtrees_by_node[crossover_non_terminal])
if (genome_0_crossover_node.recursions + derivations[1].recursions - genome_1_crossover_node.recursions) <= max_recursions and \
(genome_1_crossover_node.recursions + derivations[0].recursions - genome_0_crossover_node.recursions) <= max_recursions:
break
son_1 = Derivation(max_recursions=max_recursions, tree=derivations[0].tree, crossover_node=genome_0_crossover_node, subtree=genome_1_crossover_node)
son_2 = Derivation(max_recursions=max_recursions, tree=derivations[1].tree, crossover_node=genome_1_crossover_node, subtree=genome_0_crossover_node)
return son_1, son_2
else:
return None, None
class OnePointMutation:
@staticmethod
def mutate(individuals, mutation_rate=0.05):
for i in individuals:
d = i.derivation
if np.random.random() < mutation_rate:
tree = np.random.choice([tree for nt in d.tree.non_terminal_subtrees_by_node.values() for tree in nt])
d.tree.reset_non_terminal_nodes()
node_remaining_recursions = d.max_recursions - d.recursions + tree.recursions
if node_remaining_recursions<0:
print("max" + str(d.max_recursions))
print(d.recursions)
print(d)
print(tree.recursions)
print(tree)
node_remaining_recursions=0
if tree.node.recursive:
remaining_recursions = np.random.random_integers(0, node_remaining_recursions) if d.probabilistic_model is ProbabilisticModel.uniform else node_remaining_recursions
else:
remaining_recursions = 0
tree.production = d.grammar.get_production_with_probabilistic_model(symbol=tree.node.symbol,
model=d.probabilistic_model,
remaining_recursions=remaining_recursions)
# Information generated during properties method calls
tree._word = None
tree.str_word = None
tree._non_terminal_nodes = None
tree._non_terminal_nodes_by_depth = None
tree._depth = None
tree._recursions = None
tree._height = None
tree.leaves = []
if tree.production.left in tree.production.right:
remaining_recursions -= 1
# Split remaining recursions between recursive nonterminal symbols
remaining_recursions = np.round(np.random.dirichlet(np.ones(tree.production.recursion_arity)) * remaining_recursions).tolist()
for leave in tree.production.right:
tree.leaves.append(Tree(derivation=d,
remaining_recursions=remaining_recursions.pop() if leave.recursive else 0,
root=tree,
node=leave))
return individuals
if __name__ == "__main__":
from gplpy.gggp.grammar import CFG
gr = CFG("../../gr/symbolic_regression_problem.gr")
Derivation.grammar = gr
b = Derivation(max_recursions=50)
print(b.recursions, b)
__author__ = "aturing"
__license__ = "Apache License 2.0"
__version__ = "1.1.0"
__maintainer__ = "Pablo Ramos"
__email__ = "pablo.ramos@aturing.com"
__status__ = "Production"
| 45.460894
| 184
| 0.567988
|
6dea2744430074b20f92b63b4f4aa2f639ed964f
| 17,092
|
py
|
Python
|
SLRParser.py
|
fenilgmehta/SLR-Parser
|
c4d873ba27e85592282427cde5714feb86db6385
|
[
"MIT"
] | null | null | null |
SLRParser.py
|
fenilgmehta/SLR-Parser
|
c4d873ba27e85592282427cde5714feb86db6385
|
[
"MIT"
] | null | null | null |
SLRParser.py
|
fenilgmehta/SLR-Parser
|
c4d873ba27e85592282427cde5714feb86db6385
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from typing import Set, List, Union, Dict, Tuple
from graphviz import Digraph
# -----------------------------------------------------------------------------------------
def parse_grammar(file_name: str) -> Tuple[
Dict[str, List[List[str]]],
List[List[Union[str, List[str]]]],
str,
List[str],
List[str],
List[str]
]:
# Grammar where lhs is key and rhs is list of productions
G_prime: Dict[str, List[List[str]]] = {} # Dict[str, Union[List[List[str]], list]]
# Grammar where index of the production is used for writing reduce operations
G_indexed: List[List[Union[str, List[str]]]] = [['', '']] # List[Union[List[str], List[Union[str, List[str]]]]]
start: str = ''
terminals: Set[str] = set([])
nonterminals: Set[str] = set([])
# Open and read the grammar file
with open(file_name) as grammar_file:
# filter ensure blank lines are discarded
grammar = list(filter(None, grammar_file.read().splitlines()))
for g in grammar:
head, _, prods = g.partition(' -> ')
prods = [prod.split() for prod in ' '.join(prods.split()).split('|')]
# prods = [prod.split() for prod in prods.split('|')]
if not start: # If first production, then make the grammar Augmented Grammar
start = f"{head}'" # Create augmented production
G_prime[start] = [[head]]
if head not in G_prime:
G_prime[head] = []
nonterminals.add(head)
for prod in prods:
G_prime[head].append(prod)
G_indexed.append([head, prod])
for symbol in prod:
if not symbol.isupper() and symbol != '^': # ^ denotes epsilon, i.e. empty string
terminals.add(symbol)
elif symbol.isupper():
nonterminals.add(symbol)
# else: # This else will be executed if the symbol is '^', i.e. epsilon = empty string
# print(f"WARNING: check the symbol = '{symbol}'")
return G_prime, G_indexed, start, list(terminals), list(nonterminals), list(terminals | nonterminals)
# -----------------------------------------------------------------------------------------
first_seen: List[str] = []
def FIRST(X: str) -> Set[str]:
global global_terminals
if X in global_terminals: # CASE 1
return {X}
else:
global first_seen, global_G_prime
first = set()
while True:
first_seen.append(X)
first_len = len(first)
for prod in global_G_prime[X]: # Grammar Productions: Dict[str, List[List[str]]], prod: List[str]
if prod != ['^']: # CASE 2 # `^` is empty string, i.e. Epsilon
for symbol in prod:
if symbol == X and '^' in first:
continue
if symbol in first_seen:
break
symbol_first: Set[str] = FIRST(symbol)
first = first | (symbol_first - set('^'))
if '^' not in symbol_first:
break
first.add('^') # TODO: check if this is right or wrong
else: # CASE 3
first.add('^')
first_seen.remove(X)
if first_len == len(first):
return first
follow_seen: List[str] = []
def FOLLOW(A: str):
global follow_seen
follow = set()
follow_seen.append(A)
if A == global_start: # CASE 1
follow.add('$')
for head, prods in global_G_prime.items():
for prod in prods:
if A in prod[:-1]: # CASE 2
first = FIRST(prod[prod.index(A) + 1])
follow |= (first - set('^')) # `^` is empty string, i.e. Epsilon
if ('^' in first) and (head not in follow_seen): # CASE 3
follow |= FOLLOW(head)
elif (A in prod[-1]) and (head not in follow_seen): # CASE 3
follow |= FOLLOW(head)
follow_seen.remove(A)
return follow
def CLOSURE(I: Dict[str, List[List[str]]]) -> Dict[str, List[List[str]]]:
"""
Used to find the whole state from the first production ONLY
:param I: A partially filled state of the Finite Automata
:return: Completely filled state
"""
global global_G_prime, global_nonterminals
J = I
while True:
item_len: int = len(J)
for head, prods in J.copy().items():
for prod in prods:
if '.' not in prod[:-1]:
continue
symbol_after_dot: str = prod[prod.index('.') + 1]
if symbol_after_dot not in global_nonterminals:
continue
for G_prod in global_G_prime[symbol_after_dot]:
if G_prod == ['^']:
if symbol_after_dot not in J.keys():
J[symbol_after_dot] = [['.']]
elif ['.'] not in J[symbol_after_dot]:
J[symbol_after_dot].append(['.'])
else:
if symbol_after_dot not in J.keys():
J[symbol_after_dot] = [['.'] + G_prod]
elif ['.'] + G_prod not in J[symbol_after_dot]:
J[symbol_after_dot].append(['.'] + G_prod)
if item_len == len(J):
return J
def GOTO(I: Dict[str, List[List[str]]], X: str) -> Dict[str, List[List[str]]]:
"""
On reading `X` from state `I`, we get a new state.
:param I: state of the Finite Automata
:param X: variable/terminal which is read from state `I`
:return: The new state is returned
"""
goto = {}
for head, prods in I.items():
for prod in prods:
if '.' not in prod[:-1]: # This is true if all the Symbols of the production are read
# print(f'WARNING: Inside GOTO -> head={head}, prod={prod}, X={X}');
continue
dot_pos = prod.index('.')
if prod[dot_pos + 1] != X: # If the symbol after `.` is not `X`, then skip this production
continue
for C_head, C_prods in CLOSURE({head: [
prod[:dot_pos] + [X, '.'] + prod[dot_pos + 2:]
]}).items(): # Swap the position of `.` and `X`, and find the closure
if C_head not in goto.keys():
goto[C_head] = C_prods
else:
for C_prod in C_prods:
if C_prod not in goto[C_head]:
goto[C_head].append(C_prod)
return goto
# -----------------------------------------------------------------------------------------
def items1(): # Generate all the state of the Finite Automata
global global_start, global_symbols
# Stores all the states of the Finite Automata
C = [CLOSURE({global_start: [['.'] + [global_start[:-1]]]})]
while True:
item_len = len(C)
for Ith_state in C.copy():
for X in global_symbols:
goto_i_x_state = GOTO(Ith_state, X)
if len(goto_i_x_state) > 0 and (goto_i_x_state not in C):
C.append(goto_i_x_state)
if item_len == len(C):
return C
def construct_table() -> Dict[int, Dict[str, str]]:
global global_start, global_G_indexed, global_terminals, global_nonterminals, global_C
parse_table = {
r: {
c: '' for c in global_terminals + ['$'] + global_nonterminals
} for r in range(len(global_C))
}
for i, I in enumerate(global_C):
for head, prods in I.items():
for prod in prods:
if '.' in prod[:-1]: # CASE 2 a # Add reduce operations to the Parsing table
symbol_after_dot = prod[prod.index('.') + 1]
if symbol_after_dot in global_terminals:
s = f's{global_C.index(GOTO(I, symbol_after_dot))}'
if s not in parse_table[i][symbol_after_dot]:
# This IF is done to avoid redundant addition of the same shift operation
if 'r' in parse_table[i][symbol_after_dot]:
parse_table[i][symbol_after_dot] += '/'
parse_table[i][symbol_after_dot] += s
elif prod[-1] == '.' and head != global_start:
# CASE 2 b # Add reduce operations to the Parsing Table # Executes if `I` is not the starting state
for j, (G_head, G_prod) in enumerate(global_G_indexed):
# This loop is used to find the index of the production `head -> prod`
if G_head == head and (G_prod == prod[:-1] or (G_prod == ['^'] and prod == ['.'])):
for f in FOLLOW(head):
if parse_table[i][f] != '':
parse_table[i][f] += '/'
parse_table[i][f] += f'r{j}'
break
else: # CASE 2 c # Add accept to the Parsing Table
parse_table[i]['$'] = 'acc'
for A in global_nonterminals: # CASE 3 # Add state number under the columns of Variables/NonTerminals
j = GOTO(I, A)
if j in global_C:
parse_table[i][A] = global_C.index(j)
return parse_table
def print_info():
global global_G_prime, global_terminals, global_nonterminals, global_symbols
max_G_prime = len(max(global_G_prime.keys(), key=len)) # Stores the max length of a Variable/NonTerminal string
print('Augmented Grammar:')
i = 0
for head, prods in global_G_prime.items():
for prod in prods:
print(f' {i:>{len(str(len(global_G_indexed) - 1))}}: {head:>{max_G_prime}} -> {" ".join(prod)}')
i += 1
print()
print(f'{"Terminals:":>15} {", ".join(global_terminals)}')
print(f'{"NonTerminals:":>15} {", ".join(global_nonterminals)}')
print(f'{"Symbols:":>15} {", ".join(global_symbols)}')
print('\nFIRST:')
for head in global_G_prime.keys():
print(f' {head:>{max_G_prime}} = {{ {", ".join(FIRST(head))} }}')
print('\nFOLLOW:')
for head in global_G_prime.keys():
print(f' {head:>{max_G_prime}} = {{ {", ".join(FOLLOW(head))} }}')
width = max(len(c) for c in ['ACTION'] + global_symbols) + 2 # It is single column width
for r in range(len(global_C)):
max_len = max([len(str(c)) for c in global_parse_table[r].values()])
if width < (max_len + 2):
width = max_len + 2
print('\nParsing Table:')
print(f'+{"-" * width}+{"-" * ((width + 1) * len(global_terminals + ["$"]) - 1)}+{"-" * ((width + 1) * len(global_nonterminals) - 1)}+')
print(f'|{"":{width}}|{"ACTION":^{(width + 1) * len(global_terminals + ["$"]) - 1}}|{"GOTO":^{(width + 1) * len(global_nonterminals) - 1}}|')
print(f'|{"STATE":^{width}}+{("-" * width + "+") * len(global_symbols + ["$"])}')
print(f'|{"":^{width}}|', end=' ')
for symbol in global_terminals + ['$'] + global_nonterminals:
print(f'{symbol:^{width - 1}}|', end=' ')
print(f'\n+{("-" * width + "+") * (len(global_symbols + ["$"]) + 1)}')
for r in range(len(global_C)):
print(f'|{r:^{width}}|', end=' ')
for c in global_terminals + ['$'] + global_nonterminals:
print(f'{global_parse_table[r][c]:^{width - 1}}|', end=' ')
print()
print(f'+{("-" * width + "+") * (len(global_symbols + ["$"]) + 1)}')
# -----------------------------------------------------------------------------------------
def generate_automaton():
automaton = Digraph('automaton', node_attr={'shape': 'record'})
max_G_prime = len(max(global_G_prime.keys(), key=len))
for i, I in enumerate(global_C):
I_str = f'<<I>I</I><SUB>{i}</SUB><BR/>'
for (head, prods) in I.items():
for prod in prods:
I_str += f'<I>{head:>{max_G_prime}}</I> →'
for symbol in prod:
if symbol in global_nonterminals:
I_str += f' <I>{symbol}</I>'
elif symbol in global_terminals:
I_str += f' <B>{symbol}</B>'
else:
I_str += f' {symbol}'
I_str += '<BR ALIGN="LEFT"/>'
automaton.node(f'I{i}', f'{I_str}>')
for r in range(len(global_C)):
for c in global_terminals + ['$'] + global_nonterminals:
if isinstance(global_parse_table[r][c], int):
automaton.edge(f'I{r}', f'I{global_parse_table[r][c]}', label=f'<<I>{c}</I>>')
elif 's' in global_parse_table[r][c]:
i = global_parse_table[r][c][global_parse_table[r][c].index('s') + 1:]
if '/' in i:
i = i[:i.index('/')]
automaton.edge(f'I{r}', f'I{i}', label=f'<<B>{c}</B>>' if c in global_terminals else c)
elif global_parse_table[r][c] == 'acc':
automaton.node('acc', '<<B>accept</B>>', shape='none')
automaton.edge(f'I{r}', 'acc', label='$')
automaton.view()
def LR_parser(w: str):
def print_line():
print(f'{"".join(["+" + ("-" * (max_len + 2)) for max_len in max_lens.values()])}+')
buffer = f'{w} $'.split()
pointer = 0
a = buffer[pointer]
stack = ['0']
symbols = ['']
histories = {'step': [''], 'stack': ['STACK'] + stack, 'symbols': ['SYMBOLS'] + symbols, 'input': ['INPUT'], 'action': ['ACTION']}
step = 0
while True:
s = int(stack[-1])
step += 1
histories['step'].append(f'({step})')
histories['input'].append(' '.join(buffer[pointer:]))
if a not in global_parse_table[s].keys():
histories['action'].append(f'ERROR: unrecognized symbol {a}')
break
elif not global_parse_table[s][a]:
histories['action'].append('ERROR: input cannot be parsed by given grammar')
break
elif '/' in global_parse_table[s][a]:
if global_parse_table[s][a].count('r') > 1:
histories['action'].append(f'ERROR: reduce-reduce conflict at state {s}, symbol {a}')
else:
histories['action'].append(f'ERROR: shift-reduce conflict at state {s}, symbol {a}')
break
elif global_parse_table[s][a].startswith('s'):
histories['action'].append('shift')
stack.append(global_parse_table[s][a][1:])
symbols.append(a)
histories['stack'].append(' '.join(stack))
histories['symbols'].append(' '.join(symbols))
pointer += 1
a = buffer[pointer]
elif global_parse_table[s][a].startswith('r'):
head, prod = global_G_indexed[int(global_parse_table[s][a][1:])]
histories['action'].append(f'reduce by {head} -> {" ".join(prod)}')
if prod != ['^']:
stack = stack[:-len(prod)]
symbols = symbols[:-len(prod)]
stack.append(str(global_parse_table[int(stack[-1])][head]))
symbols.append(head)
histories['stack'].append(' '.join(stack))
histories['symbols'].append(' '.join(symbols))
elif global_parse_table[s][a] == 'acc':
histories['action'].append('accept')
break
max_lens = {key: max(len(value) for value in histories[key]) for key in histories.keys()}
justs = {'step': '>', 'stack': '', 'symbols': '', 'input': '>', 'action': ''}
print_line()
print(''.join([f'| {history[0]:^{max_len}} ' for history, max_len in zip(histories.values(), max_lens.values())]) + '|')
print_line()
for i, step in enumerate(histories['step'][:-1], 1):
print(''.join([f'| {history[i]:{just}{max_len}} ' for history, just, max_len in
zip(histories.values(), justs.values(), max_lens.values())]) + '|')
print_line()
# -----------------------------------------------------------------------------------------
if __name__ == '__main__':
import sys
file_name = 'grammar.txt'
if len(sys.argv) == 2:
file_name = sys.argv[1]
elif len(sys.argv) != 1:
print("Usage:")
print(" python SLRParse.py [FILENAME]")
print("\nWARNING: Only 1 grammar file as input can be passed")
exit(1)
print(f"Grammar file = '{file_name}'")
# all the variables are list except G_prime # global_start stores `E'` i.e. `E prime`
global_G_prime, global_G_indexed, global_start, global_terminals, global_nonterminals, global_symbols = parse_grammar(file_name)
# Find all the states of the Finite Automata for the grammar
global_C = items1()
global_parse_table = construct_table()
print_info()
generate_automaton()
LR_parser(input('\nEnter Input: '))
| 40.889952
| 145
| 0.512169
|
62aa31761ab0cce7ffb8543583be9778b70f0c42
| 3,231
|
py
|
Python
|
stdplugins/carbon1.py
|
ppppspsljdhdd/Pepe
|
1e57825ddb0ab3ba15a19cad0ecfbf2622f6b851
|
[
"Apache-2.0"
] | 1
|
2020-11-06T14:14:24.000Z
|
2020-11-06T14:14:24.000Z
|
stdplugins/carbon1.py
|
ishaizz/PepeBot
|
7440cadc8228106d221fc8e436a0809a86be5159
|
[
"Apache-2.0"
] | null | null | null |
stdplugins/carbon1.py
|
ishaizz/PepeBot
|
7440cadc8228106d221fc8e436a0809a86be5159
|
[
"Apache-2.0"
] | null | null | null |
"""
Carbon Scraper Plugin for Userbot. //text in creative way.
usage: .carbon //as a reply to any text message
Thanks to @NeoMatrix90 for vars
Type : .kar00
"""
import asyncio
import os
from urllib.parse import quote_plus
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from uniborg.util import admin_cmd
CARBONLANG = "auto"
@borg.on(admin_cmd(pattern="crblang ?(.*)"))
async def setlang(prog):
global CARBONLANG
CARBONLANG = prog.pattern_match.group(1)
await prog.edit(f"Language for carbon.now.sh set to {CARBONLANG}")
@borg.on(admin_cmd(pattern="kar00 ?(.*)"))
async def carbon_api(e):
""" A Wrapper for carbon.now.sh """
await e.edit("`Processing..`")
CARBON = "https://carbon.now.sh/?l={lang}&code={code}"
global CARBONLANG
textx = await e.get_reply_message()
pcode = e.text
if pcode[8:]:
pcode = str(pcode[8:])
elif textx:
pcode = str(textx.message) # Importing message to module
code = quote_plus(pcode) # Converting to urlencoded
await e.edit("`Processing..\n25%`")
if os.path.isfile("./carbon.png"):
os.remove("./carbon.png")
url = CARBON.format(code=code, lang=CARBONLANG)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.binary_location = Config.CHROME_BIN
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-gpu")
prefs = {"download.default_directory": "./"}
chrome_options.add_experimental_option("prefs", prefs)
driver = webdriver.Chrome(
executable_path=Config.CHROME_DRIVER, options=chrome_options
)
driver.get(url)
await e.edit("`Processing..\n50%`")
download_path = "./"
driver.command_executor._commands["send_command"] = (
"POST",
"/session/$sessionId/chromium/send_command",
)
params = {
"cmd": "Page.setDownloadBehavior",
"params": {"behavior": "allow", "downloadPath": download_path},
}
driver.execute("send_command", params)
driver.find_element_by_xpath(
'//*[@id="__next"]/main/div[3]/div[2]/div[1]/div[1]/div/span[2]'
).click()
driver.find_element_by_id("export-menu").click()
# driver.find_element_by_xpath("//button[contains(text(),'Export')]").click()
driver.find_element_by_xpath("//button[contains(text(),'4x')]").click()
driver.find_element_by_xpath("//button[contains(text(),'PNG')]").click()
await e.edit("`Processing..\n75%`")
# Waiting for downloading
while not os.path.isfile("./carbon.png"):
await asyncio.sleep(0.5)
await e.edit("`Processing..\n100%`")
file = "./carbon.png"
await e.edit("`Uploading..`")
await e.client.send_file(
e.chat_id,
file,
caption="Made using [Carbon](https://carbon.now.sh/about/),\
\na project by [Dawn Labs](https://dawnlabs.io/)",
force_document=True,
reply_to=e.message.reply_to_msg_id,
)
os.remove("./carbon.png")
driver.quit()
# Removing carbon.png after uploading
await e.delete() # Deleting msg
| 33.309278
| 81
| 0.660477
|
e913f7e27994882a1491ce72556f1496610badea
| 726
|
py
|
Python
|
python_boilerplate/commandline.py
|
lincoln-harris/python_boilerplate
|
79cb48219f5a1ac822cd3c9ffa0c97b6342b1ac5
|
[
"MIT"
] | null | null | null |
python_boilerplate/commandline.py
|
lincoln-harris/python_boilerplate
|
79cb48219f5a1ac822cd3c9ffa0c97b6342b1ac5
|
[
"MIT"
] | null | null | null |
python_boilerplate/commandline.py
|
lincoln-harris/python_boilerplate
|
79cb48219f5a1ac822cd3c9ffa0c97b6342b1ac5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Import modified 'os' module with LC_LANG set so click doesn't complain
from .os_utils import os # noqa: F401
# Python standard library imports
from functools import partial
# 3rd party libraries
import click
# Within-module imports
from python_boilerplate.hello import hello
click.option = partial(click.option, show_default=True)
settings = dict(help_option_names=['-h', '--help'])
@click.group(options_metavar='', subcommand_metavar='<command>',
context_settings=settings)
def cli():
"""
python boilerplate contains all the boilerplate you need to create a Python package.
"""
pass
cli.add_command(hello, name='hello')
if __name__ == "__main__":
cli()
| 21.352941
| 88
| 0.712121
|
eb60539c0033e8ed98c7c0813aee37b50851b11f
| 19,919
|
py
|
Python
|
pytests/castest/opschangecas.py
|
sumedhpb/TAF
|
fc6f4cb8dc0b8234393f2e52a7b4a1aa723d9449
|
[
"Apache-2.0"
] | 9
|
2019-02-19T05:55:00.000Z
|
2022-01-20T10:37:28.000Z
|
pytests/castest/opschangecas.py
|
sumedhpb/TAF
|
fc6f4cb8dc0b8234393f2e52a7b4a1aa723d9449
|
[
"Apache-2.0"
] | 2
|
2019-02-19T07:28:54.000Z
|
2019-06-18T11:22:29.000Z
|
pytests/castest/opschangecas.py
|
sumedhpb/TAF
|
fc6f4cb8dc0b8234393f2e52a7b4a1aa723d9449
|
[
"Apache-2.0"
] | 155
|
2018-11-13T14:57:07.000Z
|
2022-03-28T11:53:22.000Z
|
import json
from cb_tools.cbstats import Cbstats
from castest.cas_base import CasBaseTest
from couchbase_helper.documentgenerator import doc_generator
from remote.remote_util import RemoteMachineShellConnection
from sdk_client3 import SDKClient
from sdk_exceptions import SDKException
class OpsChangeCasTests(CasBaseTest):
def setUp(self):
super(OpsChangeCasTests, self).setUp()
self.key = 'test_cas_docs'.rjust(self.key_size, '0')
self.log.info("=========Finished OpsChangeCasTests base setup=======")
def tearDown(self):
super(OpsChangeCasTests, self).tearDown()
def _load_all_buckets(self, generator, op_type):
for bucket in self.cluster.buckets:
task = self.task.async_load_gen_docs(
self.cluster, bucket, generator, op_type, 0,
flag=self.item_flag,
batch_size=10,
process_concurrency=8,
replicate_to=self.replicate_to,
persist_to=self.persist_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
compression=self.sdk_compression)
self.task.jython_task_manager.get_task_result(task)
def verify_cas(self, ops, generator):
"""
Verify CAS value manipulation.
For update we use the latest CAS value return by set()
to do the mutation again to see if there is any exceptions.
We should be able to mutate that item with the latest CAS value.
For delete(), after it is called, we try to mutate that item with the
cas value returned by delete(). We should see SDK Error.
Otherwise the test should fail.
For expire, We want to verify using the latest CAS value of that item
can not mutate it because it is expired already.
"""
for bucket in self.cluster.buckets:
client = SDKClient([self.cluster.master], bucket)
gen = generator
while gen.has_next():
key, value = gen.next()
vb_of_key = self.bucket_util.get_vbucket_num_for_key(key)
active_node_ip = None
for node_ip in self.shell_conn.keys():
if vb_of_key in self.vb_details[node_ip]["active"]:
active_node_ip = node_ip
break
self.log.info("Performing %s on key %s" % (ops, key))
if ops in ["update", "touch"]:
for x in range(self.mutate_times):
old_cas = client.crud("read", key, timeout=10)["cas"]
# value = {"val": "mysql-new-value-%s" % x}
if ops == 'update':
result = client.crud(
"replace", key, value,
durability=self.durability_level,
cas=old_cas)
else:
prev_exp = 0
for exp in [0, 60, 0, 0]:
result = client.touch(
key, exp,
durability=self.durability_level,
timeout=self.sdk_timeout)
if exp == prev_exp:
if result["cas"] != old_cas:
self.log_failure(
"CAS updated for "
"touch with same exp: %s"
% result)
else:
if result["cas"] == old_cas:
self.log_failure(
"CAS not updated %s == %s"
% (old_cas, result["cas"]))
old_cas = result["cas"]
prev_exp = exp
if result["status"] is False:
client.close()
self.log_failure("Touch / replace with cas failed")
return
new_cas = result["cas"]
if ops == 'update':
if old_cas == new_cas:
self.log_failure("CAS old (%s) == new (%s)"
% (old_cas, new_cas))
if result["value"] != value:
self.log_failure("Value mismatch. "
"%s != %s"
% (result["value"], value))
else:
self.log.debug(
"Mutate %s with CAS %s successfully! "
"Current CAS: %s"
% (key, old_cas, new_cas))
active_read = client.crud("read", key,
timeout=self.sdk_timeout)
active_cas = active_read["cas"]
replica_cas = -1
cas_in_active_node = \
self.cb_stat[active_node_ip].vbucket_details(
bucket.name)[str(vb_of_key)]["max_cas"]
if str(cas_in_active_node) != str(new_cas):
self.log_failure("CbStats CAS mismatch. %s != %s"
% (cas_in_active_node, new_cas))
poll_count = 0
max_retry = 5
while poll_count < max_retry:
replica_read = client.get_from_all_replicas(key)[0]
replica_cas = replica_read["cas"]
if active_cas == replica_cas \
or self.durability_level:
break
poll_count = poll_count + 1
self.sleep(1, "Retry read CAS from replica..")
if active_cas != replica_cas:
self.log_failure("Replica cas mismatch. %s != %s"
% (new_cas, replica_cas))
elif ops == "delete":
old_cas = client.crud("read", key, timeout=10)["cas"]
result = client.crud("delete", key,
durability=self.durability_level,
timeout=self.sdk_timeout)
self.log.info("CAS after delete of key %s: %s"
% (key, result["cas"]))
result = client.crud("replace", key, "test",
durability=self.durability_level,
timeout=self.sdk_timeout,
cas=old_cas)
if result["status"] is True:
self.log_failure("The item should already be deleted")
if SDKException.DocumentNotFoundException \
not in result["error"]:
self.log_failure("Invalid Exception: %s" % result)
if result["cas"] != 0:
self.log_failure("Delete returned invalid cas: %s, "
"Expected 0" % result["cas"])
if result["cas"] == old_cas:
self.log_failure("Deleted doc returned old cas: %s "
% old_cas)
elif ops == "expire":
old_cas = client.crud("read", key, timeout=10)["cas"]
result = client.crud("touch", key, exp=self.expire_time)
if result["status"] is True:
if result["cas"] == old_cas:
self.log_failure("Touch failed to update CAS")
else:
self.log_failure("Touch operation failed")
self.sleep(self.expire_time+1, "Wait for item to expire")
result = client.crud("replace", key, "test",
durability=self.durability_level,
timeout=self.sdk_timeout,
cas=old_cas)
if result["status"] is True:
self.log_failure("Able to mutate %s with old cas: %s"
% (key, old_cas))
if SDKException.DocumentNotFoundException \
not in result["error"]:
self.log_failure("Invalid error after expiry: %s"
% result)
def ops_change_cas(self):
"""
CAS value manipulation by update, delete, expire test.
We load a certain number of items. Then for half of them, we use
MemcachedClient cas() method to mutate those item values in order
to change CAS value of those items.
We use MemcachedClient set() to set a quarter of the items expired.
We also use MemcachedClient delete() to delete a quarter of the items
"""
gen_load = doc_generator('nosql', 0, self.num_items,
doc_size=self.doc_size)
gen_update = doc_generator('nosql', 0, self.num_items/2,
doc_size=self.doc_size)
gen_delete = doc_generator('nosql',
self.num_items/2,
(self.num_items * 3 / 4),
doc_size=self.doc_size)
gen_expire = doc_generator('nosql',
(self.num_items * 3 / 4),
self.num_items,
doc_size=self.doc_size)
self._load_all_buckets(gen_load, "create")
self.bucket_util.verify_stats_all_buckets(self.cluster, self.num_items)
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets)
# Create cbstat objects
self.shell_conn = dict()
self.cb_stat = dict()
self.vb_details = dict()
for node in self.cluster_util.get_kv_nodes(self.cluster):
self.vb_details[node.ip] = dict()
self.vb_details[node.ip]["active"] = list()
self.vb_details[node.ip]["replica"] = list()
self.shell_conn[node.ip] = RemoteMachineShellConnection(node)
self.cb_stat[node.ip] = Cbstats(self.shell_conn[node.ip])
self.vb_details[node.ip]["active"] = \
self.cb_stat[node.ip].vbucket_list(self.bucket.name, "active")
self.vb_details[node.ip]["replica"] = \
self.cb_stat[node.ip].vbucket_list(self.bucket.name, "replica")
if self.doc_ops is not None:
if "update" in self.doc_ops:
self.verify_cas("update", gen_update)
if "touch" in self.doc_ops:
self.verify_cas("touch", gen_update)
if "delete" in self.doc_ops:
self.verify_cas("delete", gen_delete)
if "expire" in self.doc_ops:
self.verify_cas("expire", gen_expire)
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets)
self.validate_test_failure()
def touch_test(self):
self.log.info("1. Loading initial set of documents")
load_gen = doc_generator(self.key, 0, self.num_items,
doc_size=self.doc_size)
self._load_all_buckets(load_gen, "create")
self.bucket_util.verify_stats_all_buckets(self.cluster, self.num_items)
self.bucket_util._wait_for_stats_all_buckets(self.cluster,
self.cluster.buckets)
self.log.info("2. Loading bucket into DGM")
dgm_gen = doc_generator(
self.key, self.num_items, self.num_items+1)
dgm_task = self.task.async_load_gen_docs(
self.cluster, self.cluster.buckets[0], dgm_gen, "create", 0,
persist_to=self.persist_to,
replicate_to=self.replicate_to,
durability=self.durability_level,
timeout_secs=self.sdk_timeout,
batch_size=10,
process_concurrency=4,
active_resident_threshold=self.active_resident_threshold)
self.task_manager.get_task_result(dgm_task)
self.log.info("3. Touch intial self.num_items docs which are "
"residing on disk due to DGM")
client = SDKClient([self.cluster.master],
self.cluster.buckets[0])
while load_gen.has_next():
key, _ = load_gen.next()
result = client.crud("touch", key,
durability=self.durability_level,
timeout=self.sdk_timeout)
if result["status"] is not True:
self.log_failure("Touch on %s failed: %s" % (key, result))
client.close()
self.validate_test_failure()
def _corrupt_max_cas(self, mcd, key):
# set the CAS to -2 and then mutate to increment to -1 and
# then it should stop there
mcd.setWithMetaInvalid(key, json.dumps({'value': 'value2'}),
0, 0, 0, -2)
# print 'max cas pt1', mcd.getMeta(key)[4]
mcd.set(key, 0, 0, json.dumps({'value': 'value3'}))
# print 'max cas pt2', mcd.getMeta(key)[4]
mcd.set(key, 0, 0, json.dumps({'value': 'value4'}))
# print 'max cas pt3', mcd.getMeta(key)[4]
# MB-17517: Verify if max CAS somehow becomes -1 we can recover from it
def corrupt_cas_is_healed_on_rebalance_out_in(self):
self.log.info('Start corrupt_cas_is_healed_on_rebalance_out_in')
KEY_NAME = 'key1'
client = SDKClient([self.cluster.master], 'default')
# set a key
client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,
json.dumps({'value': 'value1'}))
# figure out which node it is on
mc_active = client.memcached(KEY_NAME)
mc_replica = client.memcached(KEY_NAME, replica_index=0)
# set the CAS to -2 and then mutate to increment to -1 and
# then it should stop there
self._corrupt_max_cas(mc_active, KEY_NAME)
# CAS should be 0 now, do some gets and sets to verify that
# nothing bad happens
resp = mc_active.get(KEY_NAME)
self.log.info('get for {0} is {1}'.format(KEY_NAME, resp))
# remove that node
self.log.info('Remove the node with -1 max cas')
rebalance = self.cluster.async_rebalance(self.servers[-1:],
[], [self.cluster.master])
rebalance.result()
replica_CAS = mc_replica.getMeta(KEY_NAME)[4]
# add the node back
self.log.info('Add the node back, the max_cas should be healed')
rebalance = self.cluster.async_rebalance(self.servers[-1:],
[self.cluster.master], [])
rebalance.result()
# verify the CAS is good
client = SDKClient([self.cluster.master], 'default')
mc_active = client.memcached(KEY_NAME)
active_CAS = mc_active.getMeta(KEY_NAME)[4]
self.assertTrue(replica_CAS == active_CAS,
'cas mismatch active {0} replica {1}'
.format(active_CAS, replica_CAS))
# One node only needed for this test
def corrupt_cas_is_healed_on_reboot(self):
self.log.info('Start corrupt_cas_is_healed_on_reboot')
KEY_NAME = 'key1'
client = SDKClient([self.cluster.master], 'default')
# set a key
client.memcached(KEY_NAME).set(KEY_NAME, 0, 0,
json.dumps({'value': 'value1'}))
# figure out which node it is on
mc_active = client.memcached(KEY_NAME)
# set the CAS to -2 and then mutate to increment to -1
# and then it should stop there
self._corrupt_max_cas(mc_active, KEY_NAME)
# print 'max cas k2', mc_active.getMeta('k2')[4]
# CAS should be 0 now, do some gets and sets to verify
# that nothing bad happens
# self._restart_memcache('default')
remote = RemoteMachineShellConnection(self.cluster.master)
remote.stop_server()
self.sleep(30, "Wait for server to stop")
remote.start_server()
self.sleep(30, "Wait for server to start")
client = SDKClient([self.cluster.master], 'default')
mc_active = client.memcached(KEY_NAME)
maxCas = mc_active.getMeta(KEY_NAME)[4]
self.assertTrue(maxCas == 0,
'max cas after reboot is {0} != 0'.format(maxCas))
"""
MB-21448 bug test
Description: REPLACE_WITH_CAS on a key that has recently been deleted and
then requested sometimes returns key exists with different CAS instead of
key not exists error, this test only requires one node
"""
def key_not_exists_test(self):
client = SDKClient([self.cluster.master], self.bucket)
load_gen = doc_generator(self.key, 0, 1,
doc_size=256)
key, val = load_gen.next()
for _ in range(1500):
result = client.crud("create", key, val,
durability=self.durability_level,
timeout=self.sdk_timeout)
if result["status"] is False:
self.log_failure("Create failed: %s" % result)
create_cas = result["cas"]
# Delete and verify get fails
result = client.crud("delete", key,
durability=self.durability_level,
timeout=self.sdk_timeout)
if result["status"] is False:
self.log_failure("Delete failed: %s" % result)
elif result["cas"] <= create_cas:
self.log_failure("Delete returned invalid cas: %s" % result)
result = client.crud("read", key,
timeout=self.sdk_timeout)
if result["status"] is True:
self.log_failure("Read succeeded after delete: %s" % result)
elif SDKException.DocumentNotFoundException \
not in str(result["error"]):
self.log_failure("Invalid exception during read "
"for non-exists key: %s" % result)
# cas errors do not sleep the test for 10 seconds,
# plus we need to check that the correct error is being thrown
result = client.crud("replace", key, val, exp=60,
timeout=self.sdk_timeout,
cas=create_cas)
if result["status"] is True:
self.log_failure("Replace succeeded after delete: %s" % result)
if SDKException.DocumentNotFoundException \
not in str(result["error"]):
self.log_failure("Invalid exception during read "
"for non-exists key: %s" % result)
self.validate_test_failure()
| 47.089835
| 79
| 0.501682
|
dae1336182cf226734ea19029bb2bebccff7e0bb
| 723
|
py
|
Python
|
app/core/models.py
|
tsatsujnr139/icd-disease-classication-managment-api
|
40802f199f204195f47dfdaaf598794b8f79d6a5
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
tsatsujnr139/icd-disease-classication-managment-api
|
40802f199f204195f47dfdaaf598794b8f79d6a5
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
tsatsujnr139/icd-disease-classication-managment-api
|
40802f199f204195f47dfdaaf598794b8f79d6a5
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class DiseaseClassification(models.Model):
"""Model definition for Disease Classification"""
classification_standard = models.CharField(
max_length=6, blank=False)
category_code = models.CharField(max_length=4, blank=False)
diagnosis_code = models.CharField(max_length=4, null=True)
full_code = models.CharField(
max_length=7, unique=True, blank=False)
abbreviated_description = models.CharField(
max_length=150, blank=False)
full_description = models.CharField(max_length=255, blank=False)
category_title = models.CharField(max_length=100, blank=False)
def __str__(self):
return self.full_code
| 34.428571
| 68
| 0.73444
|
11926e2823fc04f9b454ede93931fcb5f4742c45
| 7,049
|
py
|
Python
|
uhd_restpy/testplatform/sessions/ixnetwork/topology/bgplscommunitieslist_fdb216f1d4195f82ad738e19cb2b5d32.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
uhd_restpy/testplatform/sessions/ixnetwork/topology/bgplscommunitieslist_fdb216f1d4195f82ad738e19cb2b5d32.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
uhd_restpy/testplatform/sessions/ixnetwork/topology/bgplscommunitieslist_fdb216f1d4195f82ad738e19cb2b5d32.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class BgpLsCommunitiesList(Base):
"""Bgp Non VPN RR Communities
The BgpLsCommunitiesList class encapsulates a list of bgpLsCommunitiesList resources that are managed by the system.
A list of resources can be retrieved from the server using the BgpLsCommunitiesList.find() method.
"""
__slots__ = ()
_SDM_NAME = 'bgpLsCommunitiesList'
_SDM_ATT_MAP = {
'AsNumber': 'asNumber',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'LastTwoOctets': 'lastTwoOctets',
'Name': 'name',
'Type': 'type',
}
def __init__(self, parent):
super(BgpLsCommunitiesList, self).__init__(parent)
@property
def AsNumber(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): AS #
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AsNumber']))
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def LastTwoOctets(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Last Two Octets
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LastTwoOctets']))
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def Type(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Type
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Type']))
def update(self, Name=None):
"""Updates bgpLsCommunitiesList resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Count=None, DescriptiveName=None, Name=None):
"""Finds and retrieves bgpLsCommunitiesList resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve bgpLsCommunitiesList resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all bgpLsCommunitiesList resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with matching bgpLsCommunitiesList resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of bgpLsCommunitiesList data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the bgpLsCommunitiesList resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, AsNumber=None, LastTwoOctets=None, Type=None):
"""Base class infrastructure that gets a list of bgpLsCommunitiesList device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- AsNumber (str): optional regex of asNumber
- LastTwoOctets (str): optional regex of lastTwoOctets
- Type (str): optional regex of type
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| 38.309783
| 174
| 0.668038
|
c9bb08aee75a2e1390621636d15124e0ef19c2cb
| 889
|
py
|
Python
|
cfp/resolver_factories/parameter_store_resolver_factory.py
|
cariad/cfp
|
43336c45cf56abb7e037fa2f139ea86e679e77b0
|
[
"MIT"
] | null | null | null |
cfp/resolver_factories/parameter_store_resolver_factory.py
|
cariad/cfp
|
43336c45cf56abb7e037fa2f139ea86e679e77b0
|
[
"MIT"
] | 6
|
2021-11-08T08:42:10.000Z
|
2021-12-16T14:17:58.000Z
|
cfp/resolver_factories/parameter_store_resolver_factory.py
|
cariad/cfp
|
43336c45cf56abb7e037fa2f139ea86e679e77b0
|
[
"MIT"
] | null | null | null |
from typing import Dict, Optional
from cfp.resolver_factories.resolver_factory import ResolverFactory
from cfp.resolvers import ParameterStoreResolver
from cfp.sources import AnySource, FromParameterStore
from cfp.types import RegionName
class ParameterStoreResolverFactory(
ResolverFactory[
FromParameterStore,
ParameterStoreResolver,
]
):
def __init__(self) -> None:
self._resolvers: Dict[Optional[RegionName], ParameterStoreResolver] = {}
@staticmethod
def can_resolve(source: AnySource) -> bool:
return isinstance(source, FromParameterStore)
def make(self, source: FromParameterStore) -> ParameterStoreResolver:
if existing := self._resolvers.get(source.region, None):
return existing
resolver = ParameterStoreResolver()
self._resolvers[source.region] = resolver
return resolver
| 30.655172
| 80
| 0.733408
|
9612bf8fe5aca8cb8253335dd3e26324fc0d1d16
| 418
|
py
|
Python
|
utils/message.py
|
sotblad/Curium-Discord-Tips
|
4b1932a6c3471301e209158d976fdb8dd7ce98a7
|
[
"MIT"
] | null | null | null |
utils/message.py
|
sotblad/Curium-Discord-Tips
|
4b1932a6c3471301e209158d976fdb8dd7ce98a7
|
[
"MIT"
] | null | null | null |
utils/message.py
|
sotblad/Curium-Discord-Tips
|
4b1932a6c3471301e209158d976fdb8dd7ce98a7
|
[
"MIT"
] | 1
|
2018-10-26T18:54:28.000Z
|
2018-10-26T18:54:28.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
depositMsg_J = (u"I've DMed the Curium address!")
depositMsg_E = ("I PMed you your address! Make sure to double check that it is from me!")
depositDM_J = (u"To check the balance, please use the balance command.")
depositDM_E = ("Remember to use cru.balance to check your balance and not an explorer. The address balance and your actual balance are not always the same!")
| 59.714286
| 157
| 0.73445
|
db2e454e5ea6b1b848c414fc19c0f6085674892f
| 1,646
|
py
|
Python
|
env/lib/python3.8/site-packages/numpy/distutils/__config__.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 1,738
|
2017-09-21T10:59:12.000Z
|
2022-03-31T21:05:46.000Z
|
env/lib/python3.8/site-packages/numpy/distutils/__config__.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 427
|
2017-09-29T22:54:36.000Z
|
2022-02-15T19:26:50.000Z
|
env/lib/python3.8/site-packages/numpy/distutils/__config__.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 671
|
2017-09-21T08:04:01.000Z
|
2022-03-29T14:30:07.000Z
|
# This file is generated by numpy's setup.py
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
import os
import sys
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
if sys.version_info >= (3, 8):
os.add_dll_directory(extra_dll_dir)
else:
os.environ.setdefault('PATH', '')
os.environ['PATH'] += os.pathsep + extra_dll_dir
blas_mkl_info={}
blis_info={}
openblas_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
blas_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
lapack_mkl_info={}
openblas_lapack_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
lapack_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
| 40.146341
| 154
| 0.606318
|
039d7aca10b93406fd3c17df13db340b43d98bd5
| 116
|
py
|
Python
|
src/docs/ec2/gunicorn.conf.py
|
icebreakerone/open-energy-python-infrastructure
|
1aa1e09c75e66c78bf3cfc9e57114901a8ab9172
|
[
"MIT"
] | 1
|
2021-07-07T18:13:00.000Z
|
2021-07-07T18:13:00.000Z
|
src/docs/ec2/gunicorn.conf.py
|
icebreakerone/open-energy-python-infrastructure
|
1aa1e09c75e66c78bf3cfc9e57114901a8ab9172
|
[
"MIT"
] | 6
|
2021-07-26T10:53:44.000Z
|
2022-02-24T12:39:50.000Z
|
src/docs/ec2/gunicorn.conf.py
|
icebreakerone/open-energy-python-infrastructure
|
1aa1e09c75e66c78bf3cfc9e57114901a8ab9172
|
[
"MIT"
] | 1
|
2021-06-22T19:22:45.000Z
|
2021-06-22T19:22:45.000Z
|
import multiprocessing
bind = 'unix:/run/gunicorn.sock'
workers = multiprocessing.cpu_count() * 2 + 1
timeout = 30
| 19.333333
| 45
| 0.741379
|
5e49b2a6b90f3c2c8c14b8894e33c37ed4bcdcc0
| 4,191
|
py
|
Python
|
indico/modules/events/editing/notifications.py
|
jgrigera/indico
|
b5538f2755bc38a02313d079bac831ee3dfb44ab
|
[
"MIT"
] | 1
|
2018-11-12T21:29:26.000Z
|
2018-11-12T21:29:26.000Z
|
indico/modules/events/editing/notifications.py
|
jgrigera/indico
|
b5538f2755bc38a02313d079bac831ee3dfb44ab
|
[
"MIT"
] | 9
|
2020-09-08T09:25:57.000Z
|
2022-01-13T02:59:05.000Z
|
indico/modules/events/editing/notifications.py
|
jgrigera/indico
|
b5538f2755bc38a02313d079bac831ee3dfb44ab
|
[
"MIT"
] | 3
|
2020-07-20T09:09:44.000Z
|
2020-10-19T00:29:49.000Z
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.core.notifications import make_email, send_email
from indico.modules.events.editing.schemas import EditingConfirmationAction
from indico.web.flask.templating import get_template_module
def _get_commenting_users(revision, check_internal_access=False):
return {
c.user
for c in revision.comments
if not check_internal_access or revision.editable.can_use_internal_comments(c.user)
}
def notify_comment(comment):
"""Notify about a new comments on a revision."""
revision = comment.revision
editor = revision.editable.editor
author = comment.user
if comment.internal:
# internal comments notify the editor and anyone who commented + can see internal comments
recipients = _get_commenting_users(revision, check_internal_access=True) | {editor}
elif author == editor:
# editor comments notify the submitter and anyone else who commented
recipients = _get_commenting_users(revision) | {revision.submitter}
elif revision.editable.can_perform_submitter_actions(author):
# submitter comments notify the editor and anyone else who commented
recipients = _get_commenting_users(revision) | {editor}
else:
# comments from someone else (managers) notify everyone
recipients = _get_commenting_users(revision) | {editor, revision.submitter}
recipients.discard(None) # in case there's no editor assigned
recipients.discard(author) # never bother people about their own comments
for recipient in recipients:
tpl = get_template_module('events/editing/emails/comment_notification.txt',
author_name=author.first_name,
timeline_url=revision.editable.external_timeline_url,
recipient_name=recipient.first_name)
send_email(make_email(recipient.email, template=tpl))
def notify_editor_judgment(revision, editor):
"""Notify the submitter about a judgment made by an editor."""
submitter = revision.submitter
tpl = get_template_module('events/editing/emails/editor_judgment_notification.txt',
editor_name=editor.first_name,
timeline_url=revision.editable.external_timeline_url,
recipient_name=submitter.first_name)
send_email(make_email(submitter.email, template=tpl))
def notify_submitter_upload(revision):
"""Notify the editor about the submitter uploading a new revision."""
submitter = revision.submitter
editor = revision.editable.editor
if not editor:
return
tpl = get_template_module('events/editing/emails/submitter_upload_notification.txt',
submitter_name=submitter.first_name,
timeline_url=revision.editable.external_timeline_url,
recipient_name=editor.first_name)
send_email(make_email(editor.email, template=tpl))
def notify_submitter_confirmation(revision, submitter, action):
"""Notify the editor(s) about submitter accepting/rejecting revision changes."""
editable = revision.editable
current_editor = editable.editor
prev_revision_editor = editable.revisions[-2].editor
recipients = {current_editor, prev_revision_editor}
recipients.discard(None)
if action == EditingConfirmationAction.accept:
template_path = 'events/editing/emails/submitter_confirmation_notification.txt'
else:
template_path = 'events/editing/emails/submitter_rejection_notification.txt'
for recipient in recipients:
tpl = get_template_module(template_path,
submitter_name=submitter.first_name,
timeline_url=revision.editable.external_timeline_url,
recipient_name=recipient.first_name)
send_email(make_email(recipient.email, template=tpl))
| 47.089888
| 98
| 0.699356
|
6b4358228de46cd361adef1d2797aff012486563
| 4,202
|
py
|
Python
|
0425_Word_Squares.py
|
imguozr/LC-Solutions
|
5e5e7098d2310c972314c9c9895aafd048047fe6
|
[
"WTFPL"
] | null | null | null |
0425_Word_Squares.py
|
imguozr/LC-Solutions
|
5e5e7098d2310c972314c9c9895aafd048047fe6
|
[
"WTFPL"
] | null | null | null |
0425_Word_Squares.py
|
imguozr/LC-Solutions
|
5e5e7098d2310c972314c9c9895aafd048047fe6
|
[
"WTFPL"
] | null | null | null |
from collections import defaultdict
from typing import List
class Solution1:
"""
Backtrack w/ dict
"""
def __init__(self):
self.words = []
self.word_len = 0
self.prefix_dict = {}
def wordSquares(self, words: List[str]) -> List[List[str]]:
self.words = words
self.word_len = len(words[0])
self.prefix_dict = self.build_prefix(self.words)
res = []
for word in self.words:
square = [word]
self.backtrack(1, square, res)
return res
def backtrack(self, step, square, res):
if step == self.word_len:
res.append(square[:])
return
prefix = ''.join([word[step] for word in square])
for word in self.prefix_dict[prefix]:
square.append(word)
self.backtrack(step + 1, square, res)
square.pop()
def build_prefix(self, words):
prefix_dict = defaultdict(set)
for word in words:
for prefix in (word[:i] for i in range(1, len(word))):
prefix_dict[prefix].add(word)
return prefix_dict
class Solution2:
"""
Backtrack w/ Trie.
"""
class TrieNode:
def __init__(self):
self.children = defaultdict(Solution2.TrieNode)
self.is_word = False
class Trie:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = Solution2.TrieNode()
def insert(self, word: str) -> None:
"""
Inserts a word into the trie.
"""
curr = self.root
for ch in word:
curr = curr.children[ch]
curr.is_word = True
def search(self, word: str) -> bool:
"""
Returns if the word is in the trie.
"""
curr = self.root
for ch in word:
curr = curr.children.get(ch)
if curr is None:
return False
return curr.is_word
def starts_with(self, prefix: str) -> bool:
"""
Returns if there is any word in the trie that starts with the given prefix.
"""
curr = self.root
for ch in prefix:
curr = curr.children.get(ch)
if curr is None:
return False
return True
def words_start_with_prefix(self, prefix: str) -> List[str]:
"""
Returns words start with prefix
"""
if not self.starts_with(prefix):
return []
if self.search(prefix):
return [prefix]
curr = self.root
for ch in prefix:
curr = curr.children.get(ch)
return self._get_word(prefix, curr)
def _get_word(self, prefix, node):
if not node:
return []
word_list = []
if node.is_word:
word_list.append(prefix)
for key in node.children.keys():
word_list += self._get_word(prefix + key, node.children.get(key))
return word_list
def __init__(self):
self.words = []
self.word_len = 0
self.trie = Solution2.Trie()
def wordSquares(self, words: List[str]) -> List[List[str]]:
self.words = words
self.word_len = len(words[0])
self.trie = self.build_trie(self.words)
res = []
for word in self.words:
square = [word]
self.backtrack(1, square, res)
return res
def backtrack(self, step, square, res):
if step == self.word_len:
res.append(square[:])
return
prefix = ''.join([word[step] for word in square])
word_list = self.trie.words_start_with_prefix(prefix)
for word in word_list:
square.append(word)
self.backtrack(step + 1, square, res)
square.pop()
def build_trie(self, words):
tree = Solution2.Trie()
for word in words:
tree.insert(word)
return tree
| 27.464052
| 87
| 0.508567
|
e9dff233b7b8d3d562a45c318e5c079744ef9da7
| 41,846
|
py
|
Python
|
modin/pandas/groupby.py
|
naren-ponder/modin
|
4ec7f6347903f9133c65ebc5b6e0e15553b98577
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
modin/pandas/groupby.py
|
naren-ponder/modin
|
4ec7f6347903f9133c65ebc5b6e0e15553b98577
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
modin/pandas/groupby.py
|
naren-ponder/modin
|
4ec7f6347903f9133c65ebc5b6e0e15553b98577
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Implement GroupBy public API as pandas does."""
import numpy as np
import pandas
import pandas.core.groupby
from pandas.core.dtypes.common import is_list_like, is_numeric_dtype
from pandas.core.apply import reconstruct_func
from pandas._libs.lib import no_default
import pandas.core.common as com
from types import BuiltinFunctionType
from collections.abc import Iterable
from modin.error_message import ErrorMessage
from modin.utils import (
_inherit_docstrings,
try_cast_to_pandas,
wrap_udf_function,
hashable,
wrap_into_list,
)
from modin.core.storage_formats.base.query_compiler import BaseQueryCompiler
from modin.core.dataframe.algebra.default2pandas.groupby import GroupBy
from modin.config import IsExperimental
from modin.logging import LoggerMetaClass, metaclass_resolver
from .series import Series
from .utils import is_label
@_inherit_docstrings(pandas.core.groupby.DataFrameGroupBy)
class DataFrameGroupBy(object, metaclass=LoggerMetaClass):
def __init__(
self,
df,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
drop,
**kwargs,
):
self._axis = axis
self._idx_name = idx_name
self._df = df
self._query_compiler = self._df._query_compiler
self._columns = self._query_compiler.columns
self._by = by
self._drop = drop
if (
level is None
and is_list_like(by)
or isinstance(by, type(self._query_compiler))
):
# This tells us whether or not there are multiple columns/rows in the groupby
self._is_multi_by = (
isinstance(by, type(self._query_compiler)) and len(by.columns) > 1
) or (
not isinstance(by, type(self._query_compiler))
and axis == 0
and all(
(hashable(obj) and obj in self._query_compiler.columns)
or isinstance(obj, type(self._query_compiler))
or is_list_like(obj)
for obj in self._by
)
)
else:
self._is_multi_by = False
self._level = level
self._kwargs = {
"level": level,
"sort": sort,
"as_index": as_index,
"group_keys": group_keys,
}
self._squeeze = squeeze
self._kwargs.update(kwargs)
def __getattr__(self, key):
"""
Alter regular attribute access, looks up the name in the columns.
Parameters
----------
key : str
Attribute name.
Returns
-------
The value of the attribute.
"""
try:
return object.__getattribute__(self, key)
except AttributeError as e:
if key in self._columns:
return self.__getitem__(key)
raise e
@property
def ngroups(self):
return len(self)
def skew(self, *args, **kwargs):
return self._wrap_aggregation(
type(self._query_compiler).groupby_skew,
agg_args=args,
agg_kwargs=kwargs,
numeric_only=True,
)
def ffill(self, limit=None):
return self._default_to_pandas(lambda df: df.ffill(limit=limit))
def sem(self, ddof=1):
return self._default_to_pandas(lambda df: df.sem(ddof=ddof))
def mean(self, numeric_only=None):
return self._check_index(
self._wrap_aggregation(
type(self._query_compiler).groupby_mean,
numeric_only=numeric_only,
)
)
def any(self, skipna=True):
return self._wrap_aggregation(
type(self._query_compiler).groupby_any,
numeric_only=False,
agg_kwargs=dict(skipna=skipna),
)
@property
def plot(self): # pragma: no cover
return self._default_to_pandas(lambda df: df.plot)
def ohlc(self):
return self._default_to_pandas(lambda df: df.ohlc())
def __bytes__(self):
"""
Convert DataFrameGroupBy object into a python2-style byte string.
Returns
-------
bytearray
Byte array representation of `self`.
Notes
-----
Deprecated and removed in pandas and will be likely removed in Modin.
"""
return self._default_to_pandas(lambda df: df.__bytes__())
@property
def tshift(self):
return self._default_to_pandas(lambda df: df.tshift)
_groups_cache = no_default
# TODO: since python 3.9:
# @cached_property
@property
def groups(self):
if self._groups_cache is not no_default:
return self._groups_cache
self._groups_cache = self._compute_index_grouped(numerical=False)
return self._groups_cache
def min(self, numeric_only=False, min_count=-1):
return self._wrap_aggregation(
type(self._query_compiler).groupby_min,
numeric_only=numeric_only,
agg_kwargs=dict(min_count=min_count),
)
def idxmax(self):
return self._default_to_pandas(lambda df: df.idxmax())
@property
def ndim(self):
"""
Return 2.
Returns
-------
int
Returns 2.
Notes
-----
Deprecated and removed in pandas and will be likely removed in Modin.
"""
return 2 # ndim is always 2 for DataFrames
def shift(self, periods=1, freq=None, axis=0, fill_value=None):
def _shift(data, periods, freq, axis, fill_value, is_set_nan_rows=True):
from .dataframe import DataFrame
result = data.shift(periods, freq, axis, fill_value)
if (
is_set_nan_rows
and isinstance(self._by, BaseQueryCompiler)
and (
# Check using `issubset` is effective only in case of MultiIndex
set(self._by.columns).issubset(list(data.columns))
if isinstance(self._by.columns, pandas.MultiIndex)
else len(
self._by.columns.unique()
.sort_values()
.difference(data.columns.unique().sort_values())
)
== 0
)
and DataFrame(query_compiler=self._by.isna()).any(axis=None)
):
mask_nan_rows = data[self._by.columns].isna().any(axis=1)
# drop NaN groups
result = result.loc[~mask_nan_rows]
return result
if freq is None and axis == 1 and self._axis == 0:
result = _shift(self._df, periods, freq, axis, fill_value)
elif (
freq is not None
and axis == 0
and self._axis == 0
and isinstance(self._by, BaseQueryCompiler)
):
result = _shift(
self._df, periods, freq, axis, fill_value, is_set_nan_rows=False
)
new_idx_lvl_arrays = np.concatenate(
[self._df[self._by.columns].values.T, [list(result.index)]]
)
result.index = pandas.MultiIndex.from_arrays(
new_idx_lvl_arrays,
names=[col_name for col_name in self._by.columns]
+ [result._query_compiler.get_index_name()],
)
result = result.dropna(subset=self._by.columns).sort_index()
else:
result = self._check_index_name(
self._wrap_aggregation(
type(self._query_compiler).groupby_shift,
numeric_only=False,
agg_kwargs=dict(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
),
)
)
return result
def nth(self, n, dropna=None):
return self._default_to_pandas(lambda df: df.nth(n, dropna=dropna))
def cumsum(self, axis=0, *args, **kwargs):
return self._check_index_name(
self._wrap_aggregation(
type(self._query_compiler).groupby_cumsum,
agg_args=args,
agg_kwargs=dict(axis=axis, **kwargs),
numeric_only=True,
)
)
_indices_cache = no_default
# TODO: since python 3.9:
# @cached_property
@property
def indices(self):
if self._indices_cache is not no_default:
return self._indices_cache
self._indices_cache = self._compute_index_grouped(numerical=True)
return self._indices_cache
def pct_change(self, periods=1, fill_method="ffill", limit=None, freq=None, axis=0):
return self._default_to_pandas(
lambda df: df.pct_change(
periods=periods,
fill_method=fill_method,
limit=limit,
freq=freq,
axis=axis,
)
)
def filter(self, func, dropna=True, *args, **kwargs):
return self._default_to_pandas(
lambda df: df.filter(func, dropna=dropna, *args, **kwargs)
)
def cummax(self, axis=0, **kwargs):
return self._check_index_name(
self._wrap_aggregation(
type(self._query_compiler).groupby_cummax,
agg_kwargs=dict(axis=axis, **kwargs),
numeric_only=False,
)
)
def apply(self, func, *args, **kwargs):
if not isinstance(func, BuiltinFunctionType):
func = wrap_udf_function(func)
return self._check_index(
self._wrap_aggregation(
qc_method=type(self._query_compiler).groupby_agg,
numeric_only=False,
agg_func=func,
agg_args=args,
agg_kwargs=kwargs,
how="group_wise",
)
)
@property
def dtypes(self):
if self._axis == 1:
raise ValueError("Cannot call dtypes on groupby with axis=1")
return self._check_index(
self._wrap_aggregation(
type(self._query_compiler).groupby_dtypes,
numeric_only=False,
)
)
def first(self, **kwargs):
return self._default_to_pandas(lambda df: df.first(**kwargs))
def backfill(self, limit=None):
return self.bfill(limit)
_internal_by_cache = no_default
# TODO: since python 3.9:
# @cached_property
@property
def _internal_by(self):
"""
Get only those components of 'by' that are column labels of the source frame.
Returns
-------
tuple of labels
"""
if self._internal_by_cache is not no_default:
return self._internal_by_cache
internal_by = tuple()
if self._drop:
if is_list_like(self._by):
internal_by = tuple(by for by in self._by if isinstance(by, str))
else:
ErrorMessage.catch_bugs_and_request_email(
failure_condition=not isinstance(self._by, BaseQueryCompiler),
extra_log=f"When 'drop' is True, 'by' must be either list-like or a QueryCompiler, met: {type(self._by)}.",
)
internal_by = tuple(self._by.columns)
self._internal_by_cache = internal_by
return internal_by
def __getitem__(self, key):
"""
Implement indexing operation on a DataFrameGroupBy object.
Parameters
----------
key : list or str
Names of columns to use as subset of original object.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Result of indexing operation.
Raises
------
NotImplementedError
Column lookups on GroupBy with arbitrary Series in by is not yet supported.
"""
# These parameters are common for building the resulted Series or DataFrame groupby object
kwargs = {
**self._kwargs.copy(),
"by": self._by,
"axis": self._axis,
"idx_name": self._idx_name,
"squeeze": self._squeeze,
}
# The rules of type deduction for the resulted object is the following:
# 1. If `key` is a list-like or `as_index is False`, then the resulted object is a DataFrameGroupBy
# 2. Otherwise, the resulted object is SeriesGroupBy
# 3. Result type does not depend on the `by` origin
# Examples:
# - drop: any, as_index: any, __getitem__(key: list_like) -> DataFrameGroupBy
# - drop: any, as_index: False, __getitem__(key: any) -> DataFrameGroupBy
# - drop: any, as_index: True, __getitem__(key: label) -> SeriesGroupBy
if is_list_like(key):
make_dataframe = True
else:
if self._as_index:
make_dataframe = False
else:
make_dataframe = True
key = [key]
if make_dataframe:
internal_by = frozenset(self._internal_by)
if len(internal_by.intersection(key)) != 0:
ErrorMessage.missmatch_with_pandas(
operation="GroupBy.__getitem__",
message=(
"intersection of the selection and 'by' columns is not yet supported, "
+ "to achieve the desired result rewrite the original code from:\n"
+ "df.groupby('by_column')['by_column']\n"
+ "to the:\n"
+ "df.groupby(df['by_column'].copy())['by_column']"
),
)
cols_to_grab = internal_by.union(key)
key = [col for col in self._df.columns if col in cols_to_grab]
return DataFrameGroupBy(
self._df[key],
drop=self._drop,
**kwargs,
)
if (
self._is_multi_by
and isinstance(self._by, list)
and not all(hashable(o) and o in self._df for o in self._by)
):
raise NotImplementedError(
"Column lookups on GroupBy with arbitrary Series in by"
+ " is not yet supported."
)
return SeriesGroupBy(
self._df[key],
drop=False,
**kwargs,
)
def cummin(self, axis=0, **kwargs):
return self._check_index_name(
self._wrap_aggregation(
type(self._query_compiler).groupby_cummin,
agg_kwargs=dict(axis=axis, **kwargs),
numeric_only=False,
)
)
def bfill(self, limit=None):
return self._default_to_pandas(lambda df: df.bfill(limit=limit))
def idxmin(self):
return self._default_to_pandas(lambda df: df.idxmin())
def prod(self, numeric_only=None, min_count=0):
return self._wrap_aggregation(
type(self._query_compiler).groupby_prod,
agg_kwargs=dict(min_count=min_count),
numeric_only=numeric_only,
)
def std(self, ddof=1):
return self._wrap_aggregation(
type(self._query_compiler).groupby_std,
agg_kwargs=dict(ddof=ddof),
numeric_only=True,
)
def aggregate(self, func=None, *args, **kwargs):
if self._axis != 0:
# This is not implemented in pandas,
# so we throw a different message
raise NotImplementedError("axis other than 0 is not supported")
if (
callable(func)
and isinstance(func, BuiltinFunctionType)
and func.__name__ in dir(self)
):
func = func.__name__
relabeling_required = False
if isinstance(func, dict) or func is None:
def try_get_str_func(fn):
if not isinstance(fn, str) and isinstance(fn, Iterable):
return [try_get_str_func(f) for f in fn]
return fn.__name__ if callable(fn) and fn.__name__ in dir(self) else fn
relabeling_required, func_dict, new_columns, order = reconstruct_func(
func, **kwargs
)
func_dict = {col: try_get_str_func(fn) for col, fn in func_dict.items()}
if (
relabeling_required
and not self._as_index
and any(col in func_dict for col in self._internal_by)
):
ErrorMessage.missmatch_with_pandas(
operation="GroupBy.aggregate(**dictionary_renaming_aggregation)",
message=(
"intersection of the columns to aggregate and 'by' is not yet supported when 'as_index=False', "
+ "columns with group names of the intersection will not be presented in the result. "
+ "To achieve the desired result rewrite the original code from:\n"
+ "df.groupby('by_column', as_index=False).agg(agg_func=('by_column', agg_func))\n"
+ "to the:\n"
+ "df.groupby('by_column').agg(agg_func=('by_column', agg_func)).reset_index()"
),
)
if any(i not in self._df.columns for i in func_dict.keys()):
from pandas.core.base import SpecificationError
raise SpecificationError("nested renamer is not supported")
if func is None:
kwargs = {}
func = func_dict
elif is_list_like(func):
return self._default_to_pandas(
lambda df, *args, **kwargs: df.aggregate(func, *args, **kwargs),
*args,
**kwargs,
)
elif callable(func):
return self._check_index(
self._wrap_aggregation(
qc_method=type(self._query_compiler).groupby_agg,
numeric_only=False,
agg_func=func,
agg_args=args,
agg_kwargs=kwargs,
how="axis_wise",
)
)
elif isinstance(func, str):
# Using "getattr" here masks possible AttributeError which we throw
# in __getattr__, so we should call __getattr__ directly instead.
agg_func = self.__getattr__(func)
if callable(agg_func):
return agg_func(*args, **kwargs)
result = self._wrap_aggregation(
qc_method=type(self._query_compiler).groupby_agg,
numeric_only=False,
agg_func=func,
agg_args=args,
agg_kwargs=kwargs,
how="axis_wise",
)
if relabeling_required:
if not self._as_index:
nby_cols = len(result.columns) - len(new_columns)
order = np.concatenate([np.arange(nby_cols), order + nby_cols])
by_cols = result.columns[:nby_cols]
new_columns = pandas.Index(new_columns)
if by_cols.nlevels != new_columns.nlevels:
by_cols = by_cols.remove_unused_levels()
empty_levels = [
i
for i, level in enumerate(by_cols.levels)
if len(level) == 1 and level[0] == ""
]
by_cols = by_cols.droplevel(empty_levels)
new_columns = by_cols.append(new_columns)
result = result.iloc[:, order]
result.columns = new_columns
return result
agg = aggregate
def last(self, **kwargs):
return self._default_to_pandas(lambda df: df.last(**kwargs))
def mad(self, **kwargs):
return self._default_to_pandas(lambda df: df.mad(**kwargs))
def rank(self, **kwargs):
result = self._wrap_aggregation(
type(self._query_compiler).groupby_rank,
agg_kwargs=kwargs,
numeric_only=False,
)
# pandas does not name the index on rank
result._query_compiler.set_index_name(None)
return result
@property
def corrwith(self):
return self._default_to_pandas(lambda df: df.corrwith)
def pad(self, limit=None):
return self._default_to_pandas(lambda df: df.pad(limit=limit))
def max(self, numeric_only=False, min_count=-1):
return self._wrap_aggregation(
type(self._query_compiler).groupby_max,
numeric_only=numeric_only,
agg_kwargs=dict(min_count=min_count),
)
def var(self, ddof=1):
return self._wrap_aggregation(
type(self._query_compiler).groupby_var,
agg_kwargs=dict(ddof=ddof),
numeric_only=True,
)
def get_group(self, name, obj=None):
return self._default_to_pandas(lambda df: df.get_group(name, obj=obj))
def __len__(self):
return len(self.indices)
def all(self, skipna=True):
return self._wrap_aggregation(
type(self._query_compiler).groupby_all,
numeric_only=False,
agg_kwargs=dict(skipna=skipna),
)
def size(self):
if self._axis == 1:
return DataFrameGroupBy(
self._df.T.iloc[:, [0]],
self._by,
0,
drop=self._drop,
idx_name=self._idx_name,
squeeze=self._squeeze,
**self._kwargs,
).size()
work_object = type(self)(
self._df,
self._by,
self._axis,
drop=False,
idx_name=None,
squeeze=self._squeeze,
**self._kwargs,
)
result = work_object._wrap_aggregation(
type(work_object._query_compiler).groupby_size,
numeric_only=False,
)
if not isinstance(result, Series):
result = result.squeeze(axis=1)
if not self._kwargs.get("as_index") and not isinstance(result, Series):
result = result.rename(columns={0: "size"})
result = (
result.rename(columns={"__reduced__": "index"})
if "__reduced__" in result.columns
else result
)
elif isinstance(self._df, Series):
result.name = self._df.name
else:
result.name = None
return result.fillna(0)
def sum(self, numeric_only=None, min_count=0):
return self._wrap_aggregation(
type(self._query_compiler).groupby_sum,
agg_kwargs=dict(min_count=min_count),
numeric_only=numeric_only,
)
def describe(self, **kwargs):
return self._default_to_pandas(lambda df: df.describe(**kwargs))
def boxplot(
self,
grouped,
subplots=True,
column=None,
fontsize=None,
rot=0,
grid=True,
ax=None,
figsize=None,
layout=None,
**kwargs,
):
return self._default_to_pandas(
lambda df: df.boxplot(
grouped,
subplots=subplots,
column=column,
fontsize=fontsize,
rot=rot,
grid=grid,
ax=ax,
figsize=figsize,
layout=layout,
**kwargs,
)
)
def ngroup(self, ascending=True):
return self._default_to_pandas(lambda df: df.ngroup(ascending))
def nunique(self, dropna=True):
return self._check_index(
self._wrap_aggregation(
type(self._query_compiler).groupby_nunique,
numeric_only=False,
agg_kwargs=dict(dropna=dropna),
)
)
def resample(self, rule, *args, **kwargs):
return self._default_to_pandas(lambda df: df.resample(rule, *args, **kwargs))
def median(self, numeric_only=None):
return self._check_index(
self._wrap_aggregation(
type(self._query_compiler).groupby_median,
numeric_only=numeric_only,
)
)
def head(self, n=5):
return self._default_to_pandas(lambda df: df.head(n))
def cumprod(self, axis=0, *args, **kwargs):
return self._check_index_name(
self._wrap_aggregation(
type(self._query_compiler).groupby_cumprod,
agg_args=args,
agg_kwargs=dict(axis=axis, **kwargs),
numeric_only=True,
)
)
def __iter__(self):
return self._iter.__iter__()
def cov(self):
return self._default_to_pandas(lambda df: df.cov())
def transform(self, func, *args, **kwargs):
return self._check_index_name(
self._wrap_aggregation(
qc_method=type(self._query_compiler).groupby_agg,
numeric_only=False,
agg_func=func,
agg_args=args,
agg_kwargs=kwargs,
how="transform",
)
)
def corr(self, **kwargs):
return self._default_to_pandas(lambda df: df.corr(**kwargs))
def fillna(self, *args, **kwargs):
new_groupby_kwargs = self._kwargs.copy()
new_groupby_kwargs["as_index"] = True
work_object = type(self)(
df=self._df,
by=self._by,
axis=self._axis,
idx_name=self._idx_name,
drop=self._drop,
squeeze=self._squeeze,
**new_groupby_kwargs,
)
return work_object._check_index_name(
work_object._wrap_aggregation(
type(self._query_compiler).groupby_fillna,
numeric_only=False,
agg_args=args,
agg_kwargs=kwargs,
)
)
def count(self):
result = self._wrap_aggregation(
type(self._query_compiler).groupby_count,
numeric_only=False,
)
# pandas do it in case of Series
if isinstance(result, Series):
result = result.fillna(0)
return result
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
def cumcount(self, ascending=True):
result = self._default_to_pandas(lambda df: df.cumcount(ascending=ascending))
# pandas does not name the index on cumcount
result._query_compiler.set_index_name(None)
return result
def tail(self, n=5):
return self._default_to_pandas(lambda df: df.tail(n))
# expanding and rolling are unique cases and need to likely be handled
# separately. They do not appear to be commonly used.
def expanding(self, *args, **kwargs):
return self._default_to_pandas(lambda df: df.expanding(*args, **kwargs))
def rolling(self, *args, **kwargs):
return self._default_to_pandas(lambda df: df.rolling(*args, **kwargs))
def hist(self):
return self._default_to_pandas(lambda df: df.hist())
def quantile(self, q=0.5, interpolation="linear"):
if is_list_like(q):
return self._default_to_pandas(
lambda df: df.quantile(q=q, interpolation=interpolation)
)
return self._check_index(
self._wrap_aggregation(
type(self._query_compiler).groupby_quantile,
numeric_only=False,
agg_kwargs=dict(q=q, interpolation=interpolation),
)
)
def diff(self):
return self._default_to_pandas(lambda df: df.diff())
def take(self, **kwargs):
return self._default_to_pandas(lambda df: df.take(**kwargs))
@property
def _index(self):
"""
Get index value.
Returns
-------
pandas.Index
Index value.
"""
return self._query_compiler.index
@property
def _sort(self):
"""
Get sort parameter value.
Returns
-------
bool
Value of sort parameter used to create DataFrameGroupBy object.
"""
return self._kwargs.get("sort")
@property
def _as_index(self):
"""
Get as_index parameter value.
Returns
-------
bool
Value of as_index parameter used to create DataFrameGroupBy object.
"""
return self._kwargs.get("as_index")
@property
def _iter(self):
"""
Construct a tuple of (group_id, DataFrame) tuples to allow iteration over groups.
Returns
-------
generator
Generator expression of GroupBy object broken down into tuples for iteration.
"""
from .dataframe import DataFrame
indices = self.indices
group_ids = indices.keys()
if self._axis == 0:
return (
(
k,
DataFrame(
query_compiler=self._query_compiler.getitem_row_array(
indices[k]
)
),
)
for k in (sorted(group_ids) if self._sort else group_ids)
)
else:
return (
(
k,
DataFrame(
query_compiler=self._query_compiler.getitem_column_array(
indices[k], numeric=True
)
),
)
for k in (sorted(group_ids) if self._sort else group_ids)
)
def _compute_index_grouped(self, numerical=False):
"""
Construct an index of group IDs.
Parameters
----------
numerical : bool, default: False
Whether a group indices should be positional (True) or label-based (False).
Returns
-------
dict
A dict of {group name -> group indices} values.
See Also
--------
pandas.core.groupby.GroupBy.groups
"""
# We end up using pure pandas to compute group indices, so raising a warning
ErrorMessage.default_to_pandas("Group indices computation")
# Splitting level-by and column-by since we serialize them in a different ways
by = None
level = []
if self._level is not None:
level = self._level
if not isinstance(level, list):
level = [level]
elif isinstance(self._by, list):
by = []
for o in self._by:
if hashable(o) and o in self._query_compiler.get_index_names(
self._axis
):
level.append(o)
else:
by.append(o)
else:
by = self._by
is_multi_by = self._is_multi_by or (by is not None and len(level) > 0)
# `dropna` param is the only one that matters for the group indices result
dropna = self._kwargs.get("dropna", True)
if hasattr(self._by, "columns") and is_multi_by:
by = list(self._by.columns)
if is_multi_by:
# Because we are doing a collect (to_pandas) here and then groupby, we
# end up using pandas implementation. Add the warning so the user is
# aware.
ErrorMessage.catch_bugs_and_request_email(self._axis == 1)
if isinstance(by, list) and all(
is_label(self._df, o, self._axis) for o in by
):
pandas_df = self._df._query_compiler.getitem_column_array(
by
).to_pandas()
else:
by = try_cast_to_pandas(by, squeeze=True)
pandas_df = self._df._to_pandas()
by = wrap_into_list(by, level)
groupby_obj = pandas_df.groupby(by=by, dropna=dropna)
return groupby_obj.indices if numerical else groupby_obj.groups
else:
if isinstance(self._by, type(self._query_compiler)):
by = self._by.to_pandas().squeeze().values
elif self._by is None:
index = self._query_compiler.get_axis(self._axis)
levels_to_drop = [
i
for i, name in enumerate(index.names)
if name not in level and i not in level
]
by = index.droplevel(levels_to_drop)
if isinstance(by, pandas.MultiIndex):
by = by.reorder_levels(level)
else:
by = self._by
axis_labels = self._query_compiler.get_axis(self._axis)
if numerical:
# Since we want positional indices of the groups, we want to group
# on a `RangeIndex`, not on the actual index labels
axis_labels = pandas.RangeIndex(len(axis_labels))
# `pandas.Index.groupby` doesn't take any parameters except `by`.
# Have to convert an Index to a Series to be able to process `dropna=False`:
if dropna:
return axis_labels.groupby(by)
else:
groupby_obj = axis_labels.to_series().groupby(by, dropna=dropna)
return groupby_obj.indices if numerical else groupby_obj.groups
def _wrap_aggregation(
self,
qc_method,
numeric_only=None,
agg_args=None,
agg_kwargs=None,
**kwargs,
):
"""
Perform common metadata transformations and apply groupby functions.
Parameters
----------
qc_method : callable
The query compiler method to call.
numeric_only : {None, True, False}, default: None
Specifies whether to aggregate non numeric columns:
- True: include only numeric columns (including categories that holds a numeric dtype)
- False: include all columns
- None: infer the parameter, ``False`` if there are no numeric types in the frame,
``True`` otherwise.
agg_args : list-like, optional
Positional arguments to pass to the aggregation function.
agg_kwargs : dict-like, optional
Keyword arguments to pass to the aggregation function.
**kwargs : dict
Keyword arguments to pass to the specified query compiler's method.
Returns
-------
DataFrame or Series
Returns the same type as `self._df`.
"""
agg_args = tuple() if agg_args is None else agg_args
agg_kwargs = dict() if agg_kwargs is None else agg_kwargs
if numeric_only is None:
# pandas behavior: if `numeric_only` wasn't explicitly specified then
# the parameter is considered to be `False` if there are no numeric types
# in the frame and `True` otherwise.
numeric_only = any(
is_numeric_dtype(dtype) for dtype in self._query_compiler.dtypes
)
if numeric_only and self.ndim == 2:
by_cols = self._internal_by
mask_cols = [
col
for col, dtype in self._query_compiler.dtypes.items()
if (
is_numeric_dtype(dtype)
or (
isinstance(dtype, pandas.CategoricalDtype)
and is_numeric_dtype(dtype.categories.dtype)
)
or col in by_cols
)
]
groupby_qc = self._query_compiler.getitem_column_array(mask_cols)
else:
groupby_qc = self._query_compiler
result = type(self._df)(
query_compiler=qc_method(
groupby_qc,
by=self._by,
axis=self._axis,
groupby_kwargs=self._kwargs,
agg_args=agg_args,
agg_kwargs=agg_kwargs,
drop=self._drop,
**kwargs,
)
)
if self._squeeze:
return result.squeeze()
return result
def _check_index(self, result):
"""
Check the result of groupby aggregation on the need of resetting index.
Parameters
----------
result : DataFrame
Group by aggregation result.
Returns
-------
DataFrame
"""
if self._by is None and not self._as_index:
# This is a workaround to align behavior with pandas. In this case pandas
# resets index, but Modin doesn't do that. More details are in https://github.com/modin-project/modin/issues/3716.
result.reset_index(drop=True, inplace=True)
return result
def _check_index_name(self, result):
"""
Check the result of groupby aggregation on the need of resetting index name.
Parameters
----------
result : DataFrame
Group by aggregation result.
Returns
-------
DataFrame
"""
if self._by is not None:
# pandas does not name the index for this case
result._query_compiler.set_index_name(None)
return result
def _default_to_pandas(self, f, *args, **kwargs):
"""
Execute function `f` in default-to-pandas way.
Parameters
----------
f : callable
The function to apply to each group.
*args : list
Extra positional arguments to pass to `f`.
**kwargs : dict
Extra keyword arguments to pass to `f`.
Returns
-------
modin.pandas.DataFrame
A new Modin DataFrame with the result of the pandas function.
"""
if (
isinstance(self._by, type(self._query_compiler))
and len(self._by.columns) == 1
):
by = self._by.columns[0] if self._drop else self._by.to_pandas().squeeze()
# converting QC 'by' to a list of column labels only if this 'by' comes from the self (if drop is True)
elif self._drop and isinstance(self._by, type(self._query_compiler)):
by = list(self._by.columns)
else:
by = self._by
by = try_cast_to_pandas(by, squeeze=True)
# Since 'by' may be a 2D query compiler holding columns to group by,
# to_pandas will also produce a pandas DataFrame containing them.
# So splitting 2D 'by' into a list of 1D Series using 'GroupBy.validate_by':
by = GroupBy.validate_by(by)
def groupby_on_multiple_columns(df, *args, **kwargs):
return f(
df.groupby(
by=by, axis=self._axis, squeeze=self._squeeze, **self._kwargs
),
*args,
**kwargs,
)
return self._df._default_to_pandas(groupby_on_multiple_columns, *args, **kwargs)
@_inherit_docstrings(pandas.core.groupby.SeriesGroupBy)
class SeriesGroupBy(metaclass_resolver(DataFrameGroupBy)):
@property
def ndim(self):
"""
Return 1.
Returns
-------
int
Returns 1.
Notes
-----
Deprecated and removed in pandas and will be likely removed in Modin.
"""
return 1 # ndim is always 1 for Series
@property
def _iter(self):
"""
Construct a tuple of (group_id, Series) tuples to allow iteration over groups.
Returns
-------
generator
Generator expression of GroupBy object broken down into tuples for iteration.
"""
indices = self.indices
group_ids = indices.keys()
if self._axis == 0:
return (
(
k,
Series(
query_compiler=self._query_compiler.getitem_row_array(
indices[k]
)
),
)
for k in (sorted(group_ids) if self._sort else group_ids)
)
else:
return (
(
k,
Series(
query_compiler=self._query_compiler.getitem_column_array(
indices[k], numeric=True
)
),
)
for k in (sorted(group_ids) if self._sort else group_ids)
)
if IsExperimental.get():
from modin.experimental.cloud.meta_magic import make_wrapped_class
make_wrapped_class(DataFrameGroupBy, "make_dataframe_groupby_wrapper")
| 33.692432
| 127
| 0.553004
|
7011a150df8cd002f9cbab11a523b6f243c86957
| 2,310
|
py
|
Python
|
example/views.py
|
Fulla/django-nestedgroupedlists
|
4709ecc94c9bc26724d9d3981f63aff6f5c8b66f
|
[
"MIT"
] | null | null | null |
example/views.py
|
Fulla/django-nestedgroupedlists
|
4709ecc94c9bc26724d9d3981f63aff6f5c8b66f
|
[
"MIT"
] | null | null | null |
example/views.py
|
Fulla/django-nestedgroupedlists
|
4709ecc94c9bc26724d9d3981f63aff6f5c8b66f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.shortcuts import render
from nestgrouplists.views import *
from example.serializers import CitizenSerializer, CitizenExtSerializer
from example.models import Citizen, CitizenExt
from rest_framework import generics
from rest_framework.response import Response
from rest_framework.decorators import api_view
# Create your views here.
# Method using nested-grouped-listing with annotations
@api_view(['GET'])
def ListGrouped(request):
aggFields = [
{'function': 'Count', 'field': 'name'},
{'function': 'Sum', 'field': 'age'}
]
# SIMPLE GROUPING - Params
## 1: field to group by
## 2: list of aggregations, where each element in the list is in the format {'function':'Func', 'field': 'Field'}
# Func is one of the followings: 'Sum', 'Avg', 'Max', 'Min', 'Count', 'StdDev', 'Variance'
# Field is the field of your model over which the function is applied
## 3: model over wich the request is made
## 4: modelserializer for the model
listgrouped = simplegrouping('city', aggFields, Citizen, CitizenSerializer)
return Response(listgrouped)
# Method using nested-grouped-listing with annotations
@api_view(['GET'])
def NestListGrouped(request):
aggFields = [
{'function': 'Count', 'field': 'name'},
{'function': 'Sum', 'field': 'age'},
{'function': 'Avg', 'field': 'age'}
]
# NESTED MULTIPLE GROUPING - Params
## 1: fields to group by
## 2: list of aggregations, where each element in the list is in the format {'function':'Func', 'field': 'Field'}
# Func is one of the followings: 'Sum', 'Avg', 'Max', 'Min', 'Count', 'StdDev', 'Variance'
# Field is the field of your model over which the function is applied
## 3: model over wich the request is made
## 4: modelserializer for the model
listgrouped = nestedgrouping(['country','city'], aggFields, CitizenExt, CitizenExtSerializer)
return Response(listgrouped)
# Class using standar listing
class CitizenView(generics.ListCreateAPIView):
queryset = Citizen.objects.all()
serializer_class = CitizenSerializer
# Class using standar listing
class CitizenExtView(generics.ListCreateAPIView):
queryset = CitizenExt.objects.all()
serializer_class = CitizenExtSerializer
| 39.152542
| 117
| 0.692641
|
4d7d9337e093f700fb56fc170cdf164175c6b185
| 18,186
|
py
|
Python
|
articles/BAS4-pws/custauth/custauth.py
|
FrancescoRizzi/AWSomesauce
|
33b80b45a8509fa8f7031ae6be928a9fdd8425fa
|
[
"MIT"
] | 1
|
2019-04-09T20:50:21.000Z
|
2019-04-09T20:50:21.000Z
|
articles/BAS4-pws/custauth/custauth.py
|
FrancescoRizzi/AWSomesauce
|
33b80b45a8509fa8f7031ae6be928a9fdd8425fa
|
[
"MIT"
] | 1
|
2021-05-19T09:09:04.000Z
|
2021-05-19T09:09:04.000Z
|
articles/BAS4-pws/custauth/custauth.py
|
FrancescoRizzi/AWSomesauce
|
33b80b45a8509fa8f7031ae6be928a9fdd8425fa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import json
import StringIO
from contextlib import closing
import re
import time
import pprint
import boto3
from boto3.session import Session
import botocore
import jwt
from cryptography.x509 import load_pem_x509_certificate
from cryptography.hazmat.backends import default_backend
# Simplest form of logging using the standard logging module:
# ============================================================
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Top-Level Handler:
# ============================================================
def lambda_handler(event, context):
logger.info("CustAuth Triggered.")
authToken = event.get('authorizationToken', '')
methodArn = event.get('methodArn', '')
authHeader = event.get('Authorization', '')
logger.info("Authorization Token : '{0!s}'.".format(authToken))
logger.info("Method ARN : '{0!s}'.".format(methodArn))
logger.info("Authorization Header: '{0!s}'.".format(authHeader))
# Check Configuration before wasting time
# ========================================================
# AUTH_APP_ID: required
auth_app_id = os.environ.get('AUTH_APP_ID', None)
if not auth_app_id:
logger.error("Missing Required 'AUTH_APP_ID' Environmental Variable.")
raise ValueError("Missing/blank 'AUTH_APP_ID'")
logger.info("Auth App ID : '{0!s}'.".format(auth_app_id))
# AUTH_TENANT_ID: required
auth_tenant_id = os.environ.get('AUTH_TENANT_ID', None)
if not auth_tenant_id:
logger.error("Missing Required 'AUTH_TENANT_ID' Environmental Variable.")
raise ValueError("Missing/blank 'AUTH_TENANT_ID'")
logger.info("Auth Tenant ID : '{0!s}'.".format(auth_tenant_id))
# CERTS_BUCKET: required
certs_bucket = os.environ.get('CERTS_BUCKET', None)
if not certs_bucket:
logger.error("Missing Required 'CERTS_BUCKET' Environmental Variable.")
raise ValueError("Missing/blank 'CERTS_BUCKET'")
logger.info("Certificates Bucket : '{0!s}'.".format(certs_bucket))
# ========================================================
# Client credentials expected in the authorizationToken, in the form:
# 'Bearer <id_token>'
# Missing authorizationToken:
# response 401 - Unauthorized (although we don't send back a 'WWW-Authenticate' header as we should)
if not authToken:
logger.warn("Missing Authorization Token: will trigger 401-Unauthorized response.")
raise Exception('Unauthorized')
validator = TokenValidator()
validToken = validator.ValidateToken(authToken, auth_app_id, auth_tenant_id, certs_bucket)
logger.info("Is the Authorization Token valid? {0!s}".format(validToken))
# authorizationToken invalid (format or contents):
# respond with Policy DENYING access, which will trigger API Gateway to respond with
# response 403 - Forbidden
# authorizationToken valid (format and contents):
# respond with Policy ALLOWING access, which will trigger API Gateway to
# proceed with the backend integration configured on the method.
principalId = auth_app_id
arnParts = event['methodArn'].split(':')
apiGatewayArnTmp = arnParts[5].split('/')
awsAccountId = arnParts[4]
policy = AuthPolicy(principalId, awsAccountId)
policy.restApiId = apiGatewayArnTmp[0]
policy.region = arnParts[3]
policy.stage = apiGatewayArnTmp[1]
policyDesc = ''
if validToken:
policy.allowAllMethods()
policyDesc = 'ALLOW'
else:
policy.denyAllMethods()
policyDesc = 'DENY'
authResponse = policy.build()
# Optional: context
# The response can also include a 'context' key-value pairs mapping,
# which will be rendered available to the configured backend
# (if the policy is such that the request handling continues)
# as $context.authorizer.<key>
# This mapping is part of the cached response.
#
# context = {
# 'key': 'value', # $context.authorizer.key -> value
# 'number' : 1,
# 'bool' : True
# }
# authResponse['context'] = context
#
# INVALID formats:
# context['arr'] = ['foo']
# context['obj'] = {'foo':'bar'}
logger.info("CustAuth completed: returning policy to {0!s} access.".format(policyDesc))
return authResponse
# TokenValidator
# ============================================================
class TokenValidator(object):
PEMSTART = "-----BEGIN CERTIFICATE-----\n"
PEMEND = "\n-----END CERTIFICATE-----\n"
def __init__(self):
self._session = None
self._client = None
def ValidateToken(self, auth_header, auth_app_id, auth_tenant_id, certs_bucket):
# auth_header expected to be in the form:
# 'Bearer <id_token>'
(pre, encoded_token) = auth_header.split(' ', 2)
if (not pre) or (pre.upper() != "BEARER"):
logger.warn("Authorization Token did not match expected 'Bearer <id_token>' format.")
return False
expected_issuer = 'https://sts.windows.net/{0!s}/'.format(auth_tenant_id)
unverified_headers = jwt.get_unverified_header(encoded_token)
#unverified_token = jwt.decode(encoded_token, algorithms=['RS256'], audience=auth_app_id, issuer=expected_issuer, options={'verify_signature': False})
#x5t = unverified_token.get('x5t', None)
#kid = unverified_token.get('kid', None)
kid = unverified_headers.get('kid', None)
logger.info("Token 'kid': '{0!s}'.".format(kid))
if not kid:
logger.warn("Could not extract 'kid' property from token.")
return False
cert_pem = self.GetSigningCertificate(certs_bucket, kid)
if cert_pem:
logger.info("Retrieved Signing Certificate.")
#if isinstance(cert_pem, unicode):
# logger.info("Signing Certificate is unicode. Will attempt STRICT conversion.")
# cert_pem = cert_pem.encode('ascii', 'strict')
# logger.info("Signing Certificate unicode encoded to ASCII.")
cert = load_pem_x509_certificate(cert_pem, default_backend())
logger.info("Loaded Signing Certificate.")
public_key = cert.public_key()
logger.info("Extracted Public Key from Signing Certificate.")
decoded = jwt.decode(encoded_token, public_key, algorithms=['RS256'], audience=auth_app_id, issuer=expected_issuer)
# NOTE: the JWT decode method verifies
# - general format of the encoded token
# - signature, using the given public key
# - aud claim (Audience) vs audience value
# - exp claim (Expiration) vs current datetime (UTC)
# - nbf claim (Not Before) vs current datetime (UTC)
# - iss claim (Issuer) vs issuer value
if decoded:
logger.info("Token Decoded and Validated Successfully.")
return True
else:
logger.warn("Failed to Decode Token when verifying signature.")
return False
else:
logger.warn("Could not retrieve signing certificate matching token's 'kid' property ('{0!s}').".format(kid))
return False
def GetSigningCertificate(self, certs_bucket, kid):
self.EnsureClient()
discovery_record_str = None
with closing(StringIO.StringIO()) as dest:
self._client.download_fileobj(
Bucket=certs_bucket,
Key=kid,
Fileobj=dest)
discovery_record_str = dest.getvalue()
if not discovery_record_str:
logger.warn("Could not retrieve Discovery Record from Bucket.")
return None
logger.info("Retrieved Discovery Record Payload from Bucket.")
# discovery_record_str is the payload extracted from
# the bucket, presumed to be the JSON-formatted string
# of the signing certificate discovery record. eg:
# {
# "x5t": "...",
# "use": "...",
# "e": "...",
# "kty": "...",
# "n": "...",
# "x5c": [
# "..."
# ],
# "issuer": "...",
# "kid": "..."
# }
# What we need to extract as 'certificate' is
# the first value in the "x5c" property list
discovery_record = json.loads(discovery_record_str)
logger.info("Parsed Discovery Record JSON.")
x5c = discovery_record.get('x5c', None)
if not x5c:
logger.warn("Could not find 'x5c' property from Discovery Record.")
return None
logger.info("Discovery Record x5c found.")
raw_cert = ""
if isinstance(x5c, list):
raw_cert = x5c[0]
elif isinstance(x5c, basestring):
raw_cert = x5c
else:
logger.warn("Unexpected data type for x5c value from Discovery Record (expected string or list).")
return None
logger.info("Raw Cert:|{0!s}|".format(raw_cert))
if isinstance(raw_cert, unicode):
logger.info("Raw Certificate is unicode. Attempting STRICT conversion to ASCII.")
raw_cert = raw_cert.encode('ascii', 'strict')
logger.info("Raw Certificate encoded to ASCII.")
logger.info("Formatting Raw Certificate according to PEM 64-characters lines.")
raw_cert = self.InsertNewLines(raw_cert)
logger.info("Raw Certificate lines length normalized to PEM.")
pem_cert = self.PEMSTART + raw_cert + self.PEMEND
logger.info("After wrapping Raw certificate in PEM Markers:")
logger.info(pem_cert)
#tmp = "is NOT"
#if isinstance(raw_cert, unicode):
# tmp = "is"
#logger.info("Before Wrapping in PEM delimiters, the raw_cert data type {0!s} unicode.".format(tmp))
#
#pem_cert = self.PEMSTART + raw_cert + self.PEMEND
#logger.info("PEM Cert:|{0!s}|".format(pem_cert))
#
#tmp = "is NOT"
#if isinstance(pem_cert, unicode):
# tmp = "is"
#logger.info("After Wrapping in PEM delimiters, the pem_cert data type {0!s} unicode.".format(tmp))
#
#if isinstance(pem_cert, unicode):
# logger.info("Signing Certificate is unicode. Will attempt STRICT conversion.")
# pem_cert = pem_cert.encode('ascii', 'strict')
# logger.info("Signing Certificate unicode encoded to ASCII.")
#
#logger.info("Splitting according to PEM format (64 characters per line).")
#pem_cert = self.InsertNewLines(pem_cert)
#logger.info("After splitting in 64-character long lines:")
#logger.info(pem_cert)
return pem_cert
def InsertNewLines(self, s, every=64):
lines = []
for i in xrange(0, len(s), every):
lines.append(s[i:i+every])
return '\n'.join(lines)
def EnsureClient(self):
self.EnsureSession()
if not self._client:
self._client = self._session.client('s3')
def EnsureSession(self):
if not self._session:
self._session = boto3.Session()
# HttpVerbs
# ============================================================
class HttpVerb:
GET = "GET"
POST = "POST"
PUT = "PUT"
PATCH = "PATCH"
HEAD = "HEAD"
DELETE = "DELETE"
OPTIONS = "OPTIONS"
ALL = "*"
# AuthPolicy
# ============================================================
class AuthPolicy(object):
awsAccountId = ""
"""The AWS account id the policy will be generated for. This is used to create the method ARNs."""
principalId = ""
"""The principal used for the policy, this should be a unique identifier for the end user."""
version = "2012-10-17"
"""The policy version used for the evaluation. This should always be '2012-10-17'"""
pathRegex = "^[/.a-zA-Z0-9-\*]+$"
"""The regular expression used to validate resource paths for the policy"""
"""these are the internal lists of allowed and denied methods. These are lists
of objects and each object has 2 properties: A resource ARN and a nullable
conditions statement.
the build method processes these lists and generates the approriate
statements for the final policy"""
allowMethods = []
denyMethods = []
restApiId = "*"
"""The API Gateway API id. By default this is set to '*'"""
region = "*"
"""The region where the API is deployed. By default this is set to '*'"""
stage = "*"
"""The name of the stage used in the policy. By default this is set to '*'"""
def __init__(self, principal, awsAccountId):
self.awsAccountId = awsAccountId
self.principalId = principal
self.allowMethods = []
self.denyMethods = []
def _addMethod(self, effect, verb, resource, conditions):
"""Adds a method to the internal lists of allowed or denied methods. Each object in
the internal list contains a resource ARN and a condition statement. The condition
statement can be null."""
if verb != "*" and not hasattr(HttpVerb, verb):
raise NameError("Invalid HTTP verb " + verb + ". Allowed verbs in HttpVerb class")
resourcePattern = re.compile(self.pathRegex)
if not resourcePattern.match(resource):
raise NameError("Invalid resource path: " + resource + ". Path should match " + self.pathRegex)
if resource[:1] == "/":
resource = resource[1:]
resourceArn = ("arn:aws:execute-api:" +
self.region + ":" +
self.awsAccountId + ":" +
self.restApiId + "/" +
self.stage + "/" +
verb + "/" +
resource)
if effect.lower() == "allow":
self.allowMethods.append({
'resourceArn' : resourceArn,
'conditions' : conditions
})
elif effect.lower() == "deny":
self.denyMethods.append({
'resourceArn' : resourceArn,
'conditions' : conditions
})
def _getEmptyStatement(self, effect):
"""Returns an empty statement object prepopulated with the correct action and the
desired effect."""
statement = {
'Action': 'execute-api:Invoke',
'Effect': effect[:1].upper() + effect[1:].lower(),
'Resource': []
}
return statement
def _getStatementForEffect(self, effect, methods):
"""This function loops over an array of objects containing a resourceArn and
conditions statement and generates the array of statements for the policy."""
statements = []
if len(methods) > 0:
statement = self._getEmptyStatement(effect)
for curMethod in methods:
if curMethod['conditions'] is None or len(curMethod['conditions']) == 0:
statement['Resource'].append(curMethod['resourceArn'])
else:
conditionalStatement = self._getEmptyStatement(effect)
conditionalStatement['Resource'].append(curMethod['resourceArn'])
conditionalStatement['Condition'] = curMethod['conditions']
statements.append(conditionalStatement)
statements.append(statement)
return statements
def allowAllMethods(self):
"""Adds a '*' allow to the policy to authorize access to all methods of an API"""
self._addMethod("Allow", HttpVerb.ALL, "*", [])
def denyAllMethods(self):
"""Adds a '*' allow to the policy to deny access to all methods of an API"""
self._addMethod("Deny", HttpVerb.ALL, "*", [])
def allowMethod(self, verb, resource):
"""Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods for the policy"""
self._addMethod("Allow", verb, resource, [])
def denyMethod(self, verb, resource):
"""Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods for the policy"""
self._addMethod("Deny", verb, resource, [])
def allowMethodWithConditions(self, verb, resource, conditions):
"""Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition"""
self._addMethod("Allow", verb, resource, conditions)
def denyMethodWithConditions(self, verb, resource, conditions):
"""Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition"""
self._addMethod("Deny", verb, resource, conditions)
def build(self):
"""Generates the policy document based on the internal lists of allowed and denied
conditions. This will generate a policy with two main statements for the effect:
one statement for Allow and one statement for Deny.
Methods that includes conditions will have their own statement in the policy."""
if ((self.allowMethods is None or len(self.allowMethods) == 0) and
(self.denyMethods is None or len(self.denyMethods) == 0)):
raise NameError("No statements defined for the policy")
policy = {
'principalId' : self.principalId,
'policyDocument' : {
'Version' : self.version,
'Statement' : []
}
}
policy['policyDocument']['Statement'].extend(self._getStatementForEffect("Allow", self.allowMethods))
policy['policyDocument']['Statement'].extend(self._getStatementForEffect("Deny", self.denyMethods))
return policy
| 39.025751
| 158
| 0.609535
|
bad540b09cd49bea620c5aa225b29a4acec8aab5
| 2,212
|
py
|
Python
|
src/classes.py
|
Ciremun/nam-nyam-foods
|
63e190326287b01b0c99db23e2fbb2df52012497
|
[
"MIT"
] | null | null | null |
src/classes.py
|
Ciremun/nam-nyam-foods
|
63e190326287b01b0c99db23e2fbb2df52012497
|
[
"MIT"
] | 7
|
2021-02-06T19:44:43.000Z
|
2021-02-16T20:27:48.000Z
|
src/classes.py
|
Ciremun/nam-nyam-foods
|
63e190326287b01b0c99db23e2fbb2df52012497
|
[
"MIT"
] | null | null | null |
from typing import NamedTuple, Optional
from enum import Enum
import datetime
from flask import flash, redirect, make_response, abort, jsonify, request
import src.config as cfg
class FoodItem:
def __init__(self, title, weight, calories, price, link, image_link, ID=None):
self.title = title
self.weight = weight
self.calories = calories
self.price = price
self.link = link
self.image_link = image_link
self.ID = ID
class ShortFoodItem:
def __init__(self, title, price, link, amount=None, ID=None):
self.title = title
self.price = price
self.link = link
self.ID = ID
self.amount = amount
class Session(NamedTuple):
SID: str
username: str
displayname: str
usertype: str
date: datetime.datetime
user_id: int
account_share_id: int
class Cookie(NamedTuple):
key: str
value: str
class ResponseType(Enum):
HTTP = 1
JSON = 2
class ResponseTypeError(Exception):
pass
class FormHandler:
def __init__(self, redirect_url=None, flash_type=None, response_type=ResponseType.HTTP):
self.redirect_url = redirect_url
self.flash_type = flash_type
self.response_type = response_type
def make_response(self, **data):
if self.response_type == ResponseType.JSON:
response_data = jsonify(**data)
elif self.response_type == ResponseType.HTTP:
if data.get('message'):
flash(data['message'], self.flash_type)
response_data = redirect(self.redirect_url)
else:
raise ResponseTypeError(
f'Unknown ResponseType: {self.response_type}')
response = make_response(response_data)
if data.get('cookie'):
response.set_cookie(
data['cookie'].key, data['cookie'].value, max_age=2620000, secure=cfg.https)
return response
def get_form(self, request: request) -> Optional[dict]:
data = request.form
if not data:
data = request.get_json()
if not data:
abort(400)
self.response_type = ResponseType.JSON
return data
| 25.425287
| 92
| 0.626582
|
b3ff2cad265ac0ae1ca8e8efd35c66444a0fbda0
| 121
|
py
|
Python
|
beranda/admin.py
|
almaaqila/tugas-tengah-semester-pbp
|
7b7bc72ea19cedd61c77c26ed503efd9eb42bc0b
|
[
"MIT"
] | null | null | null |
beranda/admin.py
|
almaaqila/tugas-tengah-semester-pbp
|
7b7bc72ea19cedd61c77c26ed503efd9eb42bc0b
|
[
"MIT"
] | null | null | null |
beranda/admin.py
|
almaaqila/tugas-tengah-semester-pbp
|
7b7bc72ea19cedd61c77c26ed503efd9eb42bc0b
|
[
"MIT"
] | 5
|
2021-10-14T15:22:52.000Z
|
2021-12-29T12:22:43.000Z
|
from django.contrib import admin
# Register your models here.
from .models import Feedback
admin.site.register(Feedback)
| 24.2
| 32
| 0.818182
|
18d2270442d3a64e8fa0085e7b3a4cf7a4778c8d
| 3,357
|
py
|
Python
|
catalog-be/src/main/resources/scripts/import/tosca/importOnapTypes.py
|
onapdemo/sdc
|
3f1fee2ca76332b48e6f36662b32f2b5096c25e7
|
[
"Apache-2.0"
] | null | null | null |
catalog-be/src/main/resources/scripts/import/tosca/importOnapTypes.py
|
onapdemo/sdc
|
3f1fee2ca76332b48e6f36662b32f2b5096c25e7
|
[
"Apache-2.0"
] | null | null | null |
catalog-be/src/main/resources/scripts/import/tosca/importOnapTypes.py
|
onapdemo/sdc
|
3f1fee2ca76332b48e6f36662b32f2b5096c25e7
|
[
"Apache-2.0"
] | null | null | null |
import pycurl
import sys, getopt
from StringIO import StringIO
import json
import copy
from importCommon import *
from importNormativeTypes import *
import importCommon
################################################################################################################################################
# #
# Import all users from a given file #
# #
# activation : #
# python importUsers.py [-i <be host> | --ip=<be host>] [-p <be port> | --port=<be port> ] [-f <input file> | --ifile=<input file> ] #
# #
# shortest activation (be host = localhost, be port = 8080): #
# python importUsers.py [-f <input file> | --ifile=<input file> ] #
# #
################################################################################################################################################
def importOnapTypes(beHost, bePort, adminUser, fileDir, updateversion):
onapTypes = [ "extImageFile",
"extLocalStorage",
"extZteCP",
"extZteVDU",
"extZteVL",
"NSD",
"VDU",
"vduCompute",
"vduCpd",
"vduVirtualStorage",
"vnfVirtualLinkDesc"
]
responseCodes = [200, 201]
if(updateversion == 'false'):
responseCodes = [200, 201, 409]
results = []
for onapType in onapTypes:
result = createNormativeType(beHost, bePort, adminUser, fileDir, onapType, updateversion)
results.append(result)
if ( result[1] == None or result[1] not in responseCodes) :
print "Failed creating heat type " + onapType + ". " + str(result[1])
return results
def main(argv):
print 'Number of arguments:', len(sys.argv), 'arguments.'
beHost = 'localhost'
bePort = '8080'
adminUser = 'jh0003'
updateversion = 'true'
try:
opts, args = getopt.getopt(argv,"i:p:u:v:h:",["ip=","port=","user=","updateversion="])
except getopt.GetoptError:
usage()
errorAndExit(2, 'Invalid input')
for opt, arg in opts:
#print opt, arg
if opt == '-h':
usage()
sys.exit(3)
elif opt in ("-i", "--ip"):
beHost = arg
elif opt in ("-p", "--port"):
bePort = arg
elif opt in ("-u", "--user"):
adminUser = arg
elif opt in ("-v", "--updateversion"):
if (arg.lower() == "false" or arg.lower() == "no"):
updateversion = 'false'
print 'be host =',beHost,', be port =', bePort,', user =', adminUser
if ( beHost == None ):
usage()
sys.exit(3)
results = importOnapTypes(beHost, bePort, adminUser, "../../../import/tosca/onap-types/", updateversion)
print "-----------------------------"
for result in results:
print "{0:20} | {1:6}".format(result[0], result[1])
print "-----------------------------"
responseCodes = [200, 201]
if(updateversion == 'false'):
responseCodes = [200, 201, 409]
failedNormatives = filter(lambda x: x[1] == None or x[1] not in responseCodes, results)
if (len(failedNormatives) > 0):
errorAndExit(1, None)
else:
errorAndExit(0, None)
if __name__ == "__main__":
main(sys.argv[1:])
| 30.798165
| 144
| 0.481382
|
a12bd926bee26027be7012db39c8445907b41a86
| 1,340
|
py
|
Python
|
middlewares/current_user.py
|
DNL-inc/bit
|
b6f35e95b2b40a3eec308a2c7179a73eadad3556
|
[
"MIT"
] | 1
|
2020-11-04T16:15:52.000Z
|
2020-11-04T16:15:52.000Z
|
middlewares/current_user.py
|
DNL-inc/bit
|
b6f35e95b2b40a3eec308a2c7179a73eadad3556
|
[
"MIT"
] | null | null | null |
middlewares/current_user.py
|
DNL-inc/bit
|
b6f35e95b2b40a3eec308a2c7179a73eadad3556
|
[
"MIT"
] | null | null | null |
from aiogram.dispatcher.middlewares import BaseMiddleware
from aiogram.dispatcher.handler import CancelHandler, current_handler
from aiogram import types
from models import User
class CurrentUserMiddleware(BaseMiddleware):
async def on_process_message(self, msg: types.Message, data: dict):
from middlewares import _
handler = current_handler.get()
if handler and getattr(handler, 'get_current_user', False):
user = await User().select_user_by_tele_id(msg.from_user.id)
if user:
data['user'] = user
else:
await msg.answer(_("""
Похоже, что тебя нет в базе данных.
Чтобы это исправить - нажми на /start
"""))
raise CancelHandler()
async def on_process_callback_query(self, callback: types.CallbackQuery, data: dict):
handler = current_handler.get()
if handler and getattr(handler, 'get_current_user', False):
from middlewares import _
user = await User().select_user_by_tele_id(callback.from_user.id)
if user:
data['user'] = user
else:
await callback.answer(_("""
Похоже, что тебя нет в базе данных.
Чтобы это исправить - нажми на /start
"""))
raise CancelHandler()
| 39.411765
| 89
| 0.625373
|
d22259d360fcf4f80d83fd851fabd036ea6063f1
| 27,523
|
py
|
Python
|
indra/assemblers/cx/assembler.py
|
djinnome/indra
|
382b7f236e0b1422c96a268ef873530b5e92d48f
|
[
"BSD-2-Clause"
] | null | null | null |
indra/assemblers/cx/assembler.py
|
djinnome/indra
|
382b7f236e0b1422c96a268ef873530b5e92d48f
|
[
"BSD-2-Clause"
] | null | null | null |
indra/assemblers/cx/assembler.py
|
djinnome/indra
|
382b7f236e0b1422c96a268ef873530b5e92d48f
|
[
"BSD-2-Clause"
] | null | null | null |
import re
import json
import time
import logging
import itertools
from ndex2.nice_cx_network import NiceCXNetwork
from collections import OrderedDict
from indra.statements import *
from indra.databases import context_client, ndex_client, get_identifiers_url, \
url_prefixes
logger = logging.getLogger(__name__)
class NiceCxAssembler(object):
"""Assembles a Nice CX network from a set of INDRA Statements.
Parameters
----------
stmts : Optional[list[indra.statements.Statement]]
A list of INDRA Statements to be assembled.
network_name : Optional[str]
The name of the network to be assembled. Default: indra_assembled
Attributes
----------
network : ndex2.nice_cx_network.NiceCXNetwork
A Nice CX network object that is assembled from Statements.
"""
def __init__(self, stmts=None, network_name=None):
self.statements = stmts if stmts else []
self.network = NiceCXNetwork()
self.network.set_network_attribute('name',
(network_name if network_name
else 'indra_assembled'))
self.node_keys = {}
def make_model(self, self_loops=False, network_attributes=None):
"""Return a Nice CX network object after running assembly.
Parameters
----------
self_loops : Optional[bool]
If False, self-loops are excluded from the network. Default: False
network_attributes : Optional[dict]
A dictionary containing attributes to be added to the
assembled network.
Returns
-------
ndex2.nice_cx_network.NiceCXNetwork
The assembled Nice CX network.
"""
for stmt in self.statements:
agents = stmt.agent_list()
not_none_agents = [a for a in agents if a is not None]
if len(not_none_agents) < 2:
continue
for a1, a2 in itertools.combinations(not_none_agents, 2):
a1_id = self.add_node(a1)
a2_id = self.add_node(a2)
if not self_loops and a1_id == a2_id:
continue
edge_id = self.add_edge(a1_id, a2_id, stmt)
prefixes = {k: v for k, v in url_prefixes.items()}
prefixes['pubmed'] = 'https://identifiers.org/pubmed/'
self.network.set_network_attribute('@context', json.dumps(prefixes))
if network_attributes:
for k, v in network_attributes.items():
self.network.set_network_attribute(k, v, 'string')
return self.network
def add_node(self, agent):
"""Add an Agent to the network as a node."""
agent_key = self.get_agent_key(agent)
# If the node already exists
if agent_key in self.node_keys:
return self.node_keys[agent_key]
# If the node doesn't exist yet
db_ns, db_id = agent.get_grounding()
# TODO: handle more represents name spaces
if db_ns == 'HGNC':
represents = 'hgnc.symbol:%s' % agent.name
else:
represents = None
node_id = self.network.create_node(agent.name,
node_represents=represents)
self.node_keys[agent_key] = node_id
# Add db_refs as aliases
db_refs_list = ['%s:%s' % (db_name, db_id)
for db_name, db_id in agent.db_refs.items()
if db_name in url_prefixes]
if db_refs_list:
self.network.add_node_attribute(property_of=node_id,
name='aliases',
values=db_refs_list,
type='list_of_string')
# Add the type of the node, inferred from grounding
if db_ns:
mapped_type = db_ns_type_mappings.get(db_ns)
if mapped_type:
self.network.add_node_attribute(property_of=node_id,
name='type',
values=mapped_type,
type='string')
return node_id
def add_edge(self, a1_id, a2_id, stmt):
"""Add a Statement to the network as an edge."""
stmt_type = stmt.__class__.__name__
edge_id = self.network.create_edge(a1_id, a2_id, stmt_type)
evs = []
for ev in stmt.evidence:
# We skip evidences with no PMID
if not ev.pmid:
continue
# We take a maximum 200 character snippet of the evidence text
if not ev.text:
ev_txt = 'Evidence text not available.'
elif len(ev.text) > 200:
ev_txt = ev.text[:200] + '...'
else:
ev_txt = ev.text
# Construct a clickable PMID link with the source and evidence text
ev_str = ('<a target="_blank" '
'href="http://identifiers.org/pubmed/%s">'
'pubmed:%s</a> (%s) %s') % (ev.pmid, ev.pmid,
ev.source_api, ev_txt)
evs.append((ev_str, 0 if ev.text is None else 1))
# Reorder to have ones with text first
evs = sorted(evs, key=lambda x: x[1], reverse=True)
# Cap at 10 pieces of evidence
evs = [e[0] for e in evs[:10]]
self.network.set_edge_attribute(edge_id, 'citation', evs,
type='list_of_string')
return edge_id
def print_model(self):
"""Return the CX string of the assembled model."""
return self.network.to_cx()
@staticmethod
def get_agent_key(agent):
return agent.name
db_ns_type_mappings = {'HGNC': 'gene',
'UP': 'protein',
'FPLX': 'proteinfamily',
'CHEBI': 'chemical',
'GO': 'biological_process'}
class CxAssembler(object):
"""This class assembles a CX network from a set of INDRA Statements.
The CX format is an aspect oriented data mode for networks.
The format is defined at http://www.home.ndexbio.org/data-model/.
The CX format is the standard for NDEx and is compatible with
CytoScape via the CyNDEx plugin.
Parameters
----------
stmts : Optional[list[indra.statements.Statement]]
A list of INDRA Statements to be assembled.
network_name : Optional[str]
The name of the network to be assembled. Default: indra_assembled
Attributes
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements to be assembled.
network_name : str
The name of the network to be assembled.
cx : dict
The structure of the CX network that is assembled.
"""
def __init__(self, stmts=None, network_name=None):
if stmts is None:
self.statements = []
else:
self.statements = stmts
if network_name is None:
self.network_name = 'indra_assembled'
else:
self.network_name = network_name
self.cx = {'nodes': [], 'edges': [],
'nodeAttributes': [], 'edgeAttributes': [],
'citations': [], 'edgeCitations': [],
'supports': [], 'edgeSupports': [],
'networkAttributes': []}
self._existing_nodes = {}
self._existing_edges = {}
self._id_counter = 0
def add_statements(self, stmts):
"""Add INDRA Statements to the assembler's list of statements.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of :py:class:`indra.statements.Statement`
to be added to the statement list of the assembler.
"""
for stmt in stmts:
self.statements.append(stmt)
def make_model(self, add_indra_json=True):
"""Assemble the CX network from the collected INDRA Statements.
This method assembles a CX network from the set of INDRA Statements.
The assembled network is set as the assembler's cx argument.
Parameters
----------
add_indra_json : Optional[bool]
If True, the INDRA Statement JSON annotation is added to each
edge in the network. Default: True
Returns
-------
cx_str : str
The json serialized CX model.
"""
self.add_indra_json = add_indra_json
for stmt in self.statements:
if isinstance(stmt, Modification):
self._add_modification(stmt)
if isinstance(stmt, SelfModification):
self._add_self_modification(stmt)
elif isinstance(stmt, RegulateActivity) or \
isinstance(stmt, RegulateAmount):
self._add_regulation(stmt)
elif isinstance(stmt, Complex):
self._add_complex(stmt)
elif isinstance(stmt, Gef):
self._add_gef(stmt)
elif isinstance(stmt, Gap):
self._add_gap(stmt)
elif isinstance(stmt, Influence):
self._add_influence(stmt)
network_description = ''
self.cx['networkAttributes'].append({'n': 'name',
'v': self.network_name})
self.cx['networkAttributes'].append({'n': 'description',
'v': network_description})
cx_str = self.print_cx()
return cx_str
def print_cx(self, pretty=True):
"""Return the assembled CX network as a json string.
Parameters
----------
pretty : bool
If True, the CX string is formatted with indentation (for human
viewing) otherwise no indentation is used.
Returns
-------
json_str : str
A json formatted string representation of the CX network.
"""
def _get_aspect_metadata(aspect):
count = len(self.cx.get(aspect)) if self.cx.get(aspect) else 0
if not count:
return None
data = {'name': aspect,
'idCounter': self._id_counter,
'consistencyGroup': 1,
'elementCount': count}
return data
full_cx = OrderedDict()
full_cx['numberVerification'] = [{'longNumber': 281474976710655}]
aspects = ['nodes', 'edges', 'supports', 'citations', 'edgeAttributes',
'edgeCitations', 'edgeSupports', 'networkAttributes',
'nodeAttributes', 'cartesianLayout']
full_cx['metaData'] = []
for aspect in aspects:
metadata = _get_aspect_metadata(aspect)
if metadata:
full_cx['metaData'].append(metadata)
for k, v in self.cx.items():
full_cx[k] = v
full_cx['status'] = [{'error': '', 'success': True}]
full_cx = [{k: v} for k, v in full_cx.items()]
if pretty:
json_str = json.dumps(full_cx, indent=2)
else:
json_str = json.dumps(full_cx)
return json_str
def save_model(self, file_name='model.cx'):
"""Save the assembled CX network in a file.
Parameters
----------
file_name : Optional[str]
The name of the file to save the CX network to. Default: model.cx
"""
with open(file_name, 'wt') as fh:
cx_str = self.print_cx()
fh.write(cx_str)
def upload_model(self, ndex_cred=None, private=True, style='default'):
"""Creates a new NDEx network of the assembled CX model.
To upload the assembled CX model to NDEx, you need to have
a registered account on NDEx (http://ndexbio.org/) and have
the `ndex` python package installed. The uploaded network
is private by default.
Parameters
----------
ndex_cred : Optional[dict]
A dictionary with the following entries:
'user': NDEx user name
'password': NDEx password
private : Optional[bool]
Whether or not the created network will be private on NDEX.
style : Optional[str]
This optional parameter can either be (1)
The UUID of an existing NDEx network whose style should be applied
to the new network. (2) Unspecified or 'default' to use
the default INDRA-assembled network style. (3) None to
not set a network style.
Returns
-------
network_id : str
The UUID of the NDEx network that was created by uploading
the assembled CX model.
"""
cx_str = self.print_cx()
if not ndex_cred:
username, password = ndex_client.get_default_ndex_cred({})
ndex_cred = {'user': username,
'password': password}
network_id = ndex_client.create_network(cx_str, ndex_cred, private)
if network_id and style:
template_id = None if style == 'default' else style
nretries = 3
for retry_idx in range(nretries):
time.sleep(3)
try:
ndex_client.set_style(network_id, ndex_cred, template_id)
break
except Exception:
msg = 'Style setting failed, '
if retry_idx + 1 < nretries:
logger.info(msg + 'retrying %d more times' %
(nretries - (retry_idx+1)))
else:
logger.info(msg + 'the network will be missing style '
'information.')
return network_id
def set_context(self, cell_type):
"""Set protein expression data and mutational status as node attribute
This method uses :py:mod:`indra.databases.context_client` to get
protein expression levels and mutational status for a given cell type
and set a node attribute for proteins accordingly.
Parameters
----------
cell_type : str
Cell type name for which expression levels are queried.
The cell type name follows the CCLE database conventions.
Example: LOXIMVI_SKIN, BT20_BREAST
"""
node_names = [node['n'] for node in self.cx['nodes']]
res_expr = context_client.get_protein_expression(node_names,
[cell_type])
res_mut = context_client.get_mutations(node_names,
[cell_type])
res_expr = res_expr.get(cell_type)
res_mut = res_mut.get(cell_type)
if not res_expr:
msg = 'Could not get protein expression for %s cell type.' % \
cell_type
logger.warning(msg)
if not res_mut:
msg = 'Could not get mutational status for %s cell type.' % \
cell_type
logger.warning(msg)
if not res_expr and not res_mut:
return
self.cx['networkAttributes'].append({'n': 'cellular_context',
'v': cell_type})
counter = 0
for node in self.cx['nodes']:
amount = res_expr.get(node['n'])
mut = res_mut.get(node['n'])
if amount is not None:
node_attribute = {'po': node['@id'],
'n': 'expression_amount',
'v': int(amount)}
self.cx['nodeAttributes'].append(node_attribute)
if mut is not None:
is_mutated = 1 if mut else 0
node_attribute = {'po': node['@id'],
'n': 'is_mutated',
'v': is_mutated}
self.cx['nodeAttributes'].append(node_attribute)
if mut is not None or amount is not None:
counter += 1
logger.info('Set context for %d nodes.' % counter)
def _get_new_id(self):
ret = self._id_counter
self._id_counter += 1
return ret
def _add_modification(self, stmt):
if stmt.enz is None:
return
enz_id = self._add_node(stmt.enz)
sub_id = self._add_node(stmt.sub)
stmt_type = stmt.__class__.__name__
self._add_edge(enz_id, sub_id, stmt_type, stmt)
def _add_self_modification(self, stmt):
enz_id = self._add_node(stmt.enz)
stmt_type = stmt.__class__.__name__
self._add_edge(enz_id, enz_id, stmt_type, stmt)
def _add_complex(self, stmt):
# Here we do some bookkeeping to handle the special case where
# a member appears twice in a complex e.g.
# Complex(CDK12(), RECQL4(), RECQL4(), Ku())
# and we don't want to have duplicate edges.
added_edges = set()
for m1, m2 in itertools.combinations(stmt.members, 2):
m1_id = self._add_node(m1)
m2_id = self._add_node(m2)
if (m1_id, m2_id) not in added_edges:
self._add_edge(m1_id, m2_id, 'Complex', stmt)
added_edges.add((m1_id, m2_id))
def _add_regulation(self, stmt):
if stmt.subj is None:
return
subj_id = self._add_node(stmt.subj)
obj_id = self._add_node(stmt.obj)
stmt_type = stmt.__class__.__name__
self._add_edge(subj_id, obj_id, stmt_type, stmt)
def _add_influence(self, stmt):
subj_id = self._add_node(stmt.subj.concept)
obj_id = self._add_node(stmt.obj.concept)
stmt_type = stmt.__class__.__name__
self._add_edge(subj_id, obj_id, stmt_type, stmt)
def _add_gef(self, stmt):
gef_id = self._add_node(stmt.gef)
ras_id = self._add_node(stmt.ras)
stmt_type = stmt.__class__.__name__
self._add_edge(gef_id, ras_id, stmt_type, stmt)
def _add_gap(self, stmt):
gap_id = self._add_node(stmt.gap)
ras_id = self._add_node(stmt.ras)
stmt_type = stmt.__class__.__name__
self._add_edge(gap_id, ras_id, stmt_type, stmt)
def _add_node(self, agent):
node_key = agent.name
node_id = self._existing_nodes.get(node_key)
if node_id is not None:
return node_id
node_id = self._get_new_id()
self._existing_nodes[node_key] = node_id
node = {'@id': node_id,
'n': agent.name}
self.cx['nodes'].append(node)
self._add_node_metadata(node_id, agent)
return node_id
def _add_node_metadata(self, node_id, agent):
agent_type = _get_agent_type(agent)
node_attribute = {'po': node_id,
'n': 'type',
'v': agent_type}
self.cx['nodeAttributes'].append(node_attribute)
for db_name, db_ids in agent.db_refs.items():
if not db_ids:
logger.warning('Missing db_id for %s' % agent)
continue
elif isinstance(db_ids, int):
db_id = str(db_ids)
elif isinstance(db_ids, list):
db_id = db_ids[0][0]
else:
db_id = db_ids
url = get_identifiers_url(db_name, db_id)
if not url:
continue
db_name_map = {
'UP': 'UniProt', 'PUBCHEM': 'PubChem',
'IP': 'InterPro', 'NXPFA': 'NextProtFamily',
'PF': 'Pfam', 'CHEBI': 'ChEBI'}
name = db_name_map.get(db_name)
if not name:
name = db_name
node_attribute = {'po': node_id,
'n': name,
'v': url}
self.cx['nodeAttributes'].append(node_attribute)
def _add_edge(self, source, target, interaction, stmt):
edge_key = (source, target, interaction)
try:
edge_id = self._existing_edges[edge_key]
return edge_id
except KeyError:
pass
edge_id = self._get_new_id()
self._existing_nodes[edge_key] = edge_id
edge = {'@id': edge_id,
's': source,
't': target,
'i': interaction.lower()}
self.cx['edges'].append(edge)
self._add_edge_metadata(edge_id, stmt)
return edge_id
def _add_edge_metadata(self, edge_id, stmt):
# Add the string of the statement itself
indra_stmt_str = '%s' % stmt
edge_attribute = {'po': edge_id,
'n': 'INDRA statement',
'v': indra_stmt_str}
self.cx['edgeAttributes'].append(edge_attribute)
# Add INDRA JSON
if self.add_indra_json:
indra_stmt_json = json.dumps(stmt.to_json())
edge_attribute = {'po': edge_id,
'n': '__INDRA json',
'v': indra_stmt_json}
self.cx['edgeAttributes'].append(edge_attribute)
# Add the type of statement as the edge type
stmt_type, stmt_polarity = _get_stmt_type(stmt)
edge_attribute = {'po': edge_id,
'n': 'type',
'v': stmt_type}
self.cx['edgeAttributes'].append(edge_attribute)
edge_attribute = {'po': edge_id,
'n': 'polarity',
'v': stmt_polarity}
self.cx['edgeAttributes'].append(edge_attribute)
# Add the citations for the edge
pmids = [e.pmid for e in stmt.evidence if e.pmid]
edge_citations = []
pmids_added = []
for pmid in pmids:
pmid_txt = None
if re.match('[0-9]+', pmid):
pmid_txt = 'pmid:' + pmid
if pmid_txt not in pmids_added:
citation_id = self._get_new_id()
citation = {'@id': citation_id,
'dc:identifier': pmid_txt}
self.cx['citations'].append(citation)
edge_citations.append(citation_id)
pmids_added.append(pmid_txt)
if edge_citations:
edge_citation = {'citations': edge_citations,
'po': [edge_id]}
self.cx['edgeCitations'].append(edge_citation)
# Add the textual supports for the edge
texts = [_fix_evidence_text(e.text) for e in stmt.evidence if e.text]
edge_supports = []
for text in texts:
support_id = self._get_new_id()
support = {'@id': support_id,
'text': text}
self.cx['supports'].append(support)
edge_supports.append(support_id)
if edge_supports:
edge_support = {'supports': edge_supports,
'po': [edge_id]}
self.cx['edgeSupports'].append(edge_support)
belief_str = '%.2f' % stmt.belief
edge_attribute = {'po': edge_id,
'n': 'belief',
'v': belief_str}
self.cx['edgeAttributes'].append(edge_attribute)
# NOTE: supports and edgeSupports are currently
# not shown on NDEx therefore we add text evidence as a generic
# edgeAttribute
if texts:
text = texts[0]
edge_attribute = {'po': edge_id,
'n': 'text',
'v': text}
self.cx['edgeAttributes'].append(edge_attribute)
# Add the serialized JSON INDRA Statement
stmt_dict = stmt.to_json()
edge_attribute = {'po': edge_id, 'n': 'indra', 'v': stmt_dict}
self.cx['edgeAttributes'].append(edge_attribute)
# Add support type
support_type = _get_support_type(stmt)
edge_attribute = {'po': edge_id, 'n': 'supportType', 'v': support_type}
self.cx['edgeAttributes'].append(edge_attribute)
def _get_support_type(stmt):
dbs = ['bel', 'biopax', 'phosphosite', 'biogrid']
readers = ['reach', 'trips', 'sparser', 'r3']
has_db = False
has_reading = False
for ev in stmt.evidence:
if ev.source_api in dbs:
has_db = True
if ev.source_api in readers:
has_reading = True
if has_db and not has_reading:
return 'database'
elif has_db and has_db:
return 'database and literature'
elif not has_db and has_reading:
return 'literature'
def _get_stmt_type(stmt):
if isinstance(stmt, AddModification):
edge_type = 'Modification'
edge_polarity = 'positive'
elif isinstance(stmt, RemoveModification):
edge_type = 'Modification'
edge_polarity = 'negative'
elif isinstance(stmt, SelfModification):
edge_type = 'SelfModification'
edge_polarity = 'positive'
elif isinstance(stmt, Complex):
edge_type = 'Complex'
edge_polarity = 'none'
elif isinstance(stmt, Activation):
edge_type = 'Activation'
edge_polarity = 'positive'
elif isinstance(stmt, Inhibition):
edge_type = 'Inhibition'
edge_polarity = 'negative'
elif isinstance(stmt, DecreaseAmount):
edge_type = 'DecreaseAmount'
edge_polarity = 'negative'
elif isinstance(stmt, IncreaseAmount):
edge_type = 'IncreaseAmount'
edge_polarity = 'positive'
elif isinstance(stmt, Gef):
edge_type = 'Gef'
edge_polarity = 'positive'
elif isinstance(stmt, Gap):
edge_type = 'Gap'
edge_polarity = 'negative'
elif isinstance(stmt, Influence):
edge_type = 'Influence'
if stmt.overall_polarity() == -1:
edge_polarity = 'negative'
elif stmt.overall_polarity() == 1:
edge_polarity = 'positive'
else:
edge_polarity = 'none'
else:
edge_type = stmt.__class__.__str__()
edge_polarity = 'none'
return edge_type, edge_polarity
def _get_agent_type(agent):
hgnc_id = agent.db_refs.get('HGNC')
uniprot_id = agent.db_refs.get('UP')
pfam_id = agent.db_refs.get('PF')
fa_id = agent.db_refs.get('FA')
chebi_id = agent.db_refs.get('CHEBI')
pubchem_id = agent.db_refs.get('PUBCHEM')
be_id = agent.db_refs.get('FPLX')
go_id = agent.db_refs.get('GO')
mir_id = agent.db_refs.get('MIRBASEM') or agent.db_refs.get('MIRBASE')
if hgnc_id or uniprot_id:
agent_type = 'protein'
elif pfam_id or fa_id or be_id:
agent_type = 'proteinfamily'
elif chebi_id or pubchem_id:
agent_type = 'chemical'
elif go_id:
agent_type = 'bioprocess'
elif mir_id:
agent_type = 'microrna'
else:
agent_type = 'other'
return agent_type
def _fix_evidence_text(txt):
"""Eliminate some symbols to have cleaner supporting text."""
txt = re.sub('[ ]?\( xref \)', '', txt)
# This is to make [ xref ] become [] to match the two readers
txt = re.sub('\[ xref \]', '[]', txt)
txt = re.sub('[\(]?XREF_BIBR[\)]?[,]?', '', txt)
txt = re.sub('[\(]?XREF_FIG[\)]?[,]?', '', txt)
txt = re.sub('[\(]?XREF_SUPPLEMENT[\)]?[,]?', '', txt)
txt = txt.strip()
return txt
| 37.910468
| 79
| 0.553537
|
a06559dd154f744aed276eb796a5ae105aebbf9f
| 4,227
|
py
|
Python
|
BEGAN/model.py
|
IvoryCandy/generative-adversarial-networks
|
4010a20b22ecb016da164b37d6f915788e8f09f5
|
[
"Apache-2.0"
] | 1
|
2018-06-05T02:26:48.000Z
|
2018-06-05T02:26:48.000Z
|
BEGAN/model.py
|
IvoryCandy/generative-adversarial-networks
|
4010a20b22ecb016da164b37d6f915788e8f09f5
|
[
"Apache-2.0"
] | null | null | null |
BEGAN/model.py
|
IvoryCandy/generative-adversarial-networks
|
4010a20b22ecb016da164b37d6f915788e8f09f5
|
[
"Apache-2.0"
] | null | null | null |
import torch.nn as nn
def conv_block(in_dim, out_dim):
return nn.Sequential(nn.Conv2d(in_dim, in_dim, kernel_size=3, stride=1, padding=1),
nn.ELU(True),
nn.Conv2d(in_dim, in_dim, kernel_size=3, stride=1, padding=1),
nn.ELU(True),
nn.Conv2d(in_dim, out_dim, kernel_size=1, stride=1, padding=0),
nn.AvgPool2d(kernel_size=2, stride=2))
def de_conv_block(in_dim, out_dim):
return nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=3, stride=1, padding=1),
nn.ELU(True),
nn.Conv2d(out_dim, out_dim, kernel_size=3, stride=1, padding=1),
nn.ELU(True),
nn.Upsample(scale_factor=2))
def normal_init(m, mean, std):
if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):
m.weight.data.normal_(mean, std)
m.bias.data.zero_()
class Generator(nn.Module):
def __init__(self, conv_dim, image_size, z_dim):
super(Generator, self).__init__()
# 1
self.decode = nn.ConvTranspose2d(z_dim, conv_dim, kernel_size=image_size // 16, stride=1, padding=0)
# 8
self.de_conv6 = de_conv_block(conv_dim, conv_dim)
# 16
self.de_conv5 = de_conv_block(conv_dim, conv_dim)
# 32
self.de_conv4 = de_conv_block(conv_dim, conv_dim)
# 64
self.de_conv3 = de_conv_block(conv_dim, conv_dim)
# 128
# self.de_conv2 = de_conv_block(conv_dim, conv_dim)
# 256
self.de_conv1 = nn.Sequential(nn.Conv2d(in_channels=conv_dim, out_channels=conv_dim, kernel_size=3, stride=1, padding=1),
nn.ELU(True),
nn.Conv2d(in_channels=conv_dim, out_channels=conv_dim, kernel_size=3, stride=1, padding=1),
nn.ELU(True),
nn.Conv2d(in_channels=conv_dim, out_channels=3, kernel_size=3, stride=1, padding=1),
nn.Tanh())
# weight_init
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
def forward(self, x):
x = self.decode(x)
x = self.de_conv6(x)
x = self.de_conv5(x)
x = self.de_conv4(x)
x = self.de_conv3(x)
# x = self.de_conv2(x)
x = self.de_conv1(x)
return x
class Discriminator(nn.Module):
def __init__(self, conv_dim, image_size, z_dim):
super(Discriminator, self).__init__()
# 256
self.conv1 = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=conv_dim, kernel_size=3, stride=1, padding=1), nn.ELU(True))
# 256
self.conv2 = conv_block(conv_dim, conv_dim)
# 128
self.conv3 = conv_block(conv_dim, conv_dim * 2)
# 64
self.conv4 = conv_block(conv_dim * 2, conv_dim * 3)
# 32
self.conv5 = conv_block(conv_dim * 3, conv_dim * 4)
# 16
# self.conv6 = conv_block(conv_dim*4, conv_dim*4)
# 8
self.encode = nn.Conv2d(conv_dim * 4, z_dim, kernel_size=image_size // 16, stride=1, padding=0)
# 1
# weight_init
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
# x = self.conv6(x)
x = self.encode(x)
return x
class D(nn.Module):
def __init__(self, d_conv_dim, g_conv_dim, image_size, z_dim):
super(D, self).__init__()
enc = Discriminator(d_conv_dim, image_size, z_dim)
dec = Generator(g_conv_dim, image_size, z_dim)
self.discriminator = enc
self.generator = dec
# weight_init
def weight_init(self, mean, std):
self.discriminator.weight_init(mean, std)
self.generator.weight_init(mean, std)
def forward(self, x):
h = self.discriminator(x)
out = self.generator(h)
return out
| 34.933884
| 133
| 0.567779
|
030176680e5c3b78fa600295354339686b921cae
| 4,139
|
py
|
Python
|
emailreports.py
|
RaviTejaKomma/Automate-Boring-Stuff-Python
|
e5d8df1b060f20e50691f824ecabc3a30dc845c7
|
[
"MIT"
] | null | null | null |
emailreports.py
|
RaviTejaKomma/Automate-Boring-Stuff-Python
|
e5d8df1b060f20e50691f824ecabc3a30dc845c7
|
[
"MIT"
] | null | null | null |
emailreports.py
|
RaviTejaKomma/Automate-Boring-Stuff-Python
|
e5d8df1b060f20e50691f824ecabc3a30dc845c7
|
[
"MIT"
] | null | null | null |
""" Assignment6 """
'''
This exercise builds on assignment 5 and explores sending mail programatically using smtplib
Assuming that you have imported the data into the database in previous assignment,
write a click script (collegereport.py) which will take a college acronym (say gvp)
and sends out a class report to a specified email (use your friends email).
The report should contain 3 parts:
The list of college students and their scores
The college summary (count of students, min, max, avg) and
The global summary for the whole class (for comparison).
Use smtplib and and send email from gmail.
You can take the gmail credentials as environment variables (click supports reading arguments from env variables).
'''
import click
import smtplib
import MySQLdb
from MySQLdb import Error
import getpass
def generate_report(collegeacronym):
try:
conn = MySQLdb.connect(host="localhost",user="root",passwd="raviprince57",db="statistics")
cur1 = conn.cursor()
cur2 = conn.cursor()
query = '''SELECT STUDENTS.NAME,MARKS.TRANFORM,MARKS.FROM_CUSTOM_BASE26,MARKS.GET_PIG_LATIN,MARKS.TOP_CHARS,MARKS.TOTAL
FROM STUDENTS INNER JOIN MARKS ON
TRIM(TRAILING '_mock' FROM TRIM( LEADING 'ol2016_%s_' FROM MARKS.STUDENT)) = LOWER(STUDENTS.DBNAMES)''' % collegeacronym
cur1.execute(query)
college_report = list(cur1.fetchall())
college_report = [[report[0], str(report[1]), str(report[2]), str(report[3]), str(report[4])] for report in college_report]
query = query = "SELECT CAST(COUNT(TOTAL) AS CHAR(8)), CAST(AVG(TOTAL) AS CHAR(8)),CAST(MAX(TOTAL) AS CHAR(8))," \
"CAST(MIN(TOTAL) AS CHAR(8)) " \
"FROM MARKS WHERE STUDENT LIKE '%"+collegeacronym+"%'"
cur1.execute(query)
college_summary = list(cur1.fetchall()[0])
query = "SELECT COUNT(COLLEGE),COLLEGE FROM STUDENTS GROUP BY COLLEGE"
cur1.execute(query)
students_count = {tup[1]: tup[0] for tup in cur1.fetchall()} ## collegename : count
query = "SELECT STUDENTS.COLLEGE,AVG(MARKS.TOTAL),MAX(MARKS.TOTAL),MIN(MARKS.TOTAL) FROM MARKS , STUDENTS " \
"WHERE MARKS.STUDENT LIKE CONCAT(CONCAT('%',STUDENTS.COLLEGE),'%') GROUP BY STUDENTS.COLLEGE"
cur1.execute(query)
result = cur1.fetchall()
global_summary = []
for report in result:
count = students_count[report[0]]
global_summary.append([report[0], str(count), str(report[1]), str(report[2]), str(report[3])])
conn.close()
except Error as e:
print e
return (college_report,college_summary,global_summary)
@click.command()
@click.argument("collegeacronym",nargs=1)
@click.argument("emailidstosendreports",nargs=-1)
def emailreports(collegeacronym,emailidstosendreports):
college_report,college_summary,global_summary = generate_report(collegeacronym)
global_summary = [" ".join(report) for report in global_summary]
college_report = [" ".join(report) for report in college_report]
"sending the generated reports as email"
gmail_user = "ravieee929374s@gmail.com"
gmail_password = getpass.getpass("Enter the password : ")
sent_from = gmail_user
to = emailidstosendreports
subject = "%s student's performance report" % collegeacronym
body = "%s college students results\n\n" % collegeacronym +\
"\n".join(college_report)+\
"\n\n%s summary\n\n" % collegeacronym + \
" ".join(college_summary)+\
"\n\nAll colleges summary\n\n" +\
"\n".join(global_summary)
email_text = """\
From: %s
To: %s
Subject: %s
%s
""" % (sent_from, ", ".join(to), subject, body)
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(gmail_user, gmail_password)
server.sendmail(sent_from, to, email_text)
server.close()
print "--Reports sent successfully!--"
except:
print 'Something went wrong...'
pass
if __name__=='__main__':
emailreports()
| 37.972477
| 131
| 0.663928
|
2427cbe9eea4392090f4507d5172967fbba5342a
| 4,838
|
py
|
Python
|
lte/gateway/python/magma/mobilityd/uplink_gw.py
|
nitinneet/test23
|
c44df1a3290195cd3fc59d3483ef640ca8aaeb1e
|
[
"BSD-3-Clause"
] | 1
|
2021-08-08T15:49:05.000Z
|
2021-08-08T15:49:05.000Z
|
lte/gateway/python/magma/mobilityd/uplink_gw.py
|
nitinneet/test23
|
c44df1a3290195cd3fc59d3483ef640ca8aaeb1e
|
[
"BSD-3-Clause"
] | 151
|
2020-09-03T20:44:13.000Z
|
2022-03-31T20:28:52.000Z
|
lte/gateway/python/magma/mobilityd/uplink_gw.py
|
nitinneet/test23
|
c44df1a3290195cd3fc59d3483ef640ca8aaeb1e
|
[
"BSD-3-Clause"
] | 2
|
2021-05-27T18:15:16.000Z
|
2021-05-27T18:41:39.000Z
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ipaddress
import logging
from typing import List, MutableMapping, Optional
import netifaces
from lte.protos.mobilityd_pb2 import GWInfo, IPAddress
NO_VLAN = "NO_VLAN"
def _get_vlan_key(vlan) -> str:
# Validate vlan id is valid VLAN
vlan_id_parsed = 0
try:
if vlan:
vlan_id_parsed = int(vlan)
except ValueError:
logging.debug("invalid vlan id: %s", vlan)
if vlan_id_parsed == 0:
return NO_VLAN
if vlan_id_parsed < 0 or vlan_id_parsed > 4095:
raise InvalidVlanId("invalid vlan: " + str(vlan))
return str(vlan)
# TODO: move helper class to separate directory.
class UplinkGatewayInfo:
def __init__(self, gw_info_map: MutableMapping[str, GWInfo]):
"""
This maintains uptodate information about upstream GW.
The GW table is keyed bt vlan-id.
Args:
gw_info_map: map to store GW info.
"""
self._backing_map = gw_info_map
def get_gw_ip(self, vlan_id: Optional[str] = "") -> Optional[str]:
"""
Retrieve gw IP address
Args:
vlan_id: vlan if of the GW.
"""
vlan_key = _get_vlan_key(vlan_id)
if vlan_key in self._backing_map:
gw_info = self._backing_map.get(vlan_key)
ip = ipaddress.ip_address(gw_info.ip.address)
return str(ip)
def read_default_gw(self):
gws = netifaces.gateways()
logging.info("Using GW info: %s", gws)
if gws is not None:
default_gw = gws['default']
gw_ip_addr = None
if default_gw is not None:
gw_ip_addr = default_gw.get(netifaces.AF_INET, None)
if gw_ip_addr is not None:
self.update_ip(gw_ip_addr[0])
def update_ip(self, ip: Optional[str], vlan_id=None):
"""
Update IP address of the GW in mobilityD GW table.
Args:
ip: gw ip address
vlan_id: vlan of the GW, None in case of no vlan used.
"""
try:
ip_addr = ipaddress.ip_address(ip)
except ValueError:
logging.debug("could not parse GW IP: %s", ip)
return
gw_ip = IPAddress(version=IPAddress.IPV4,
address=ip_addr.packed)
# keep mac address same if its same GW IP
vlan_key = _get_vlan_key(vlan_id)
if vlan_key in self._backing_map:
gw_info = self._backing_map[vlan_key]
if gw_info and gw_info.ip == gw_ip:
logging.debug("GW update: no change %s", ip)
return
updated_info = GWInfo(ip=gw_ip, mac="", vlan=vlan_key)
self._backing_map[vlan_key] = updated_info
logging.info("GW update: GW IP[%s]: %s" % (vlan_key, ip))
def get_gw_mac(self, vlan_id: Optional[str] = None) -> Optional[str]:
"""
Retrieve Mac address of default gw.
Args:
vlan_id: vlan of the gw, None if GW is not in a vlan.
"""
vlan_key = _get_vlan_key(vlan_id)
if vlan_key in self._backing_map:
return self._backing_map.get(vlan_key).mac
else:
return None
def update_mac(self, ip: Optional[str], mac: Optional[str], vlan_id=None):
"""
Update mac address of GW in mobilityD GW table
Args:
ip: gw ip address.
vlan_id: Vlan of the gw.
mac: mac address of the GW.
"""
try:
ip_addr = ipaddress.ip_address(ip)
except ValueError:
logging.debug("could not parse GW IP: %s", ip)
return
vlan_key = _get_vlan_key(vlan_id)
# TODO: enhance check for MAC address sanity.
if mac is None or ':' not in mac:
logging.error("Incorrect mac format: %s for IP %s (vlan_key %s)",
mac, ip, vlan_id)
return
gw_ip = IPAddress(version=IPAddress.IPV4,
address=ip_addr.packed)
updated_info = GWInfo(ip=gw_ip, mac=mac, vlan=vlan_key)
self._backing_map[vlan_key] = updated_info
logging.info("GW update: GW IP[%s]: %s : mac %s" % (vlan_key, ip, mac))
def get_all_router_ips(self) -> List[GWInfo]:
return list(self._backing_map.values())
class InvalidVlanId(Exception):
pass
| 33.136986
| 79
| 0.603969
|
e67714cd02fe965621861960d6d34a83e5c6274f
| 2,004
|
py
|
Python
|
Python Programs/imdbscrapper.py
|
Chibi-Shem/Hacktoberfest2020-Expert
|
324843464aec039e130e85a16e74b76d310f1497
|
[
"MIT"
] | 77
|
2020-10-01T10:06:59.000Z
|
2021-11-08T08:57:18.000Z
|
Python Programs/imdbscrapper.py
|
Chibi-Shem/Hacktoberfest2020-Expert
|
324843464aec039e130e85a16e74b76d310f1497
|
[
"MIT"
] | 46
|
2020-09-27T04:55:36.000Z
|
2021-05-14T18:49:06.000Z
|
Python Programs/imdbscrapper.py
|
Chibi-Shem/Hacktoberfest2020-Expert
|
324843464aec039e130e85a16e74b76d310f1497
|
[
"MIT"
] | 327
|
2020-09-26T17:06:03.000Z
|
2021-10-09T06:04:39.000Z
|
from bs4 import BeautifulSoup
import urllib.request as req
from tabulate import tabulate
def getResponse(url):
response = req.urlopen(url)
data = response.read()
soup = BeautifulSoup(data, "lxml")
#print(soup.prettify("utf-8"))
return soup
def selectChoice():
'''options = {
1: ('top'),
2: ('moviemeter'),
3: ('top-english-movies'),
4: ('toptv'),
5: ('tvmeter'),
6: ('bottom'),
7: ('boxoffice')
}
'''
options_map = {
1: ('Top movies' , 'top'),
2: ('Most Popular Movies' , 'moviemeter'),
3: ('Top English Movies' , 'top-english-movies'),
4: ('Top TV Shows' , 'toptv'),
5: ('Most Popular TV Shows' , 'tvmeter'),
6: ('Low Rated Movies', 'bottom'),
7: ('Top Box Office collection', 'boxoffice')
}
for i,option in enumerate(options_map,1):
print("{}) {}".format(i,options_map[option][0]))
choice = int(input('\nChoice please..\n'))
while(choice<1 or choice>len(options_map)):
print('Wrong choice, enter again..')
choice = int(input('\nChoice please..\n'))
return options_map[choice][1]
def getData(base_url, option):
complete_url = base_url + option
soup = getResponse(complete_url)
card_list = soup.find_all('span',{'class':'media-body media-vertical-align'}) #material card list
result = []
count = 1
for card in card_list:
try:
name = card.find('h4').text.replace("\n"," ").lstrip("0123456789.- ") #removes order indexes for movies 1,2,3,4,...
except:
pass
try:
rating = card.find('p').text.strip()
except:
pass
result.append([count,name,rating])
count += 1
print(tabulate(result, headers=["Index", "Name", "Ratings"], tablefmt="grid"))
def main():
base_url = "http://m.imdb.com/chart/"
choice = selectChoice()
#print(choice)
getData(base_url, choice)
if __name__ == '__main__':
main()
'''
#table formats
- "plain"
- "simple"
- "grid"
- "fancy_grid"
- "pipe"
- "orgtbl"
- "jira"
- "presto"
- "psql"
- "rst"
- "mediawiki"
- "moinmoin"
- "youtrack"
- "html"
- "latex"
- "latex_raw"
- "latex_booktabs"
- "textile"
'''
| 21.782609
| 119
| 0.637226
|
8f3e1d9098981dfccab30ad1e64e79c3e8a8960f
| 2,307
|
py
|
Python
|
python/tests/ParserTest.py
|
MindongLab/libhokchew
|
7e5b2e11377ec57bf9a5bb8bb66be6d99237640c
|
[
"MIT"
] | 1
|
2020-07-23T14:18:25.000Z
|
2020-07-23T14:18:25.000Z
|
python/tests/ParserTest.py
|
MindongLab/libhokchew
|
7e5b2e11377ec57bf9a5bb8bb66be6d99237640c
|
[
"MIT"
] | 6
|
2020-04-25T05:20:04.000Z
|
2021-08-30T09:08:50.000Z
|
python/tests/ParserTest.py
|
MindongLab/libhokchew
|
7e5b2e11377ec57bf9a5bb8bb66be6d99237640c
|
[
"MIT"
] | null | null | null |
import unittest
from python.pyhokchew.models.FoochowRomanized import FoochowRomanizedSyllable
from python.pyhokchew.parser import parse_ciklin
from python.pyhokchew.utils import normalise, denormalise
from python.pyhokchew.convert import foochow_romanized_to_ciklin, ciklin_to_foochow_romanized
class ParserTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_foochow_romanized(self):
"""測試基本的 Foochow Romanized 解析."""
fr = FoochowRomanizedSyllable.from_string('sĭng')
self.assertEqual(fr.get_initial(),'s')
self.assertEqual(fr.get_final_without_tone(),'ing')
self.assertEqual(fr.get_tone(), 1)
def test_foochow_romanized_capital(self):
"""Ensures that we can parse Foochow Romanized syllables with capital letters."""
fr = FoochowRomanizedSyllable.from_string('Sĭng')
self.assertEqual(fr.get_initial(),'s')
self.assertEqual(fr.get_final_without_tone(),'ing')
self.assertEqual(fr.get_tone(), 1)
fr = FoochowRomanizedSyllable.from_string('À̤')
self.assertEqual(fr.get_initial(),'')
self.assertEqual(fr.get_final_without_tone(),'a̤')
self.assertEqual(fr.get_tone(), 5)
def test_interop(self):
"""Ensures round-trip compatibility between FoochowRomanizedSyllable and CikLinSyllable."""
cases = ['góng', 'hióng', 'cê', 'gă', 'gì', 'dĭ', 'sék', 'báik', 'gōng', 'biêng']
for c in cases:
parsed = FoochowRomanizedSyllable.from_string(c)
converted = ciklin_to_foochow_romanized(foochow_romanized_to_ciklin(parsed))
self.assertEqual(normalise(c), converted.get_string())
def test_foochow_romanized_ingbing_omitting(self):
"""確保 FoochowRomanizedSyllable 可以解析省略陰平調號的音節."""
errored = False
try:
fr = FoochowRomanizedSyllable.from_string('sing')
except:
errored = True
self.assertTrue(errored)
fr = FoochowRomanizedSyllable.from_string('sing', allow_omit_ingbing = True)
self.assertEqual(fr.get_initial(),'s')
self.assertEqual(fr.get_final_without_tone(),'ing')
self.assertEqual(fr.get_tone(), 1)
def test_ciklin(self):
# self.assertEqual
pass
| 39.775862
| 99
| 0.680971
|
c81d51632faa3b452462cc6cdeeaca9f81d484b7
| 1,461
|
py
|
Python
|
template/{{cookiecutter.directory_name}}/test_ai.py
|
dat-boris/playtest-deck-mechanics
|
6fbf7a068b2d8303f5ca0ed31862025fc9fa958e
|
[
"MIT"
] | 1
|
2020-03-23T13:59:30.000Z
|
2020-03-23T13:59:30.000Z
|
template/{{cookiecutter.directory_name}}/test_ai.py
|
dat-boris/playtest-deck-mechanics
|
6fbf7a068b2d8303f5ca0ed31862025fc9fa958e
|
[
"MIT"
] | null | null | null |
template/{{cookiecutter.directory_name}}/test_ai.py
|
dat-boris/playtest-deck-mechanics
|
6fbf7a068b2d8303f5ca0ed31862025fc9fa958e
|
[
"MIT"
] | null | null | null |
"""A test file to validate that training with AI will work
"""
import os
import pytest
from playtest.agents import KerasDQNAgent, train_agents
from playtest.env import GameWrapperEnvironment, EnvironmentInteration
from .game import Game
from .constants import Param
AGENT_FILENAME = "example_agent_{{cookiecutter.directory_name}}.h5f"
@pytest.fixture
def env() -> GameWrapperEnvironment:
env = GameWrapperEnvironment(Game(Param(number_of_players=4)))
return env
@pytest.mark.xfail
def test_training(env: GameWrapperEnvironment):
agents = [KerasDQNAgent(env) for _ in range(env.n_agents)]
try:
os.remove(AGENT_FILENAME)
except OSError:
pass
train_agents(env, agents, save_filenames=[AGENT_FILENAME], nb_steps=10)
assert os.path.exists(AGENT_FILENAME)
new_agent = KerasDQNAgent(env)
new_agent.load_weights(AGENT_FILENAME)
assert new_agent
@pytest.mark.xfail
def test_playing(env):
if not os.path.exists(AGENT_FILENAME):
agents = [KerasDQNAgent(env) for _ in range(env.n_agents)]
# create agent file
train_agents(env, agents, save_filenames=[AGENT_FILENAME], nb_steps=10)
else:
agents = [
KerasDQNAgent(env, weight_file=AGENT_FILENAME) for _ in range(env.n_agents)
]
# Let's play 4 rounds of game!
game = EnvironmentInteration(env, agents, rounds=4)
game.play()
state = game.env.game.s
assert True, "Game exists"
| 26.089286
| 87
| 0.718001
|
3f113d014312dfc64f30025d344911d01dcbbf04
| 758
|
py
|
Python
|
ccal/make_match_panel_annotations.py
|
alex-wenzel/ccal
|
74dfc604d93e6ce9e12f34a828b601618df51faa
|
[
"MIT"
] | null | null | null |
ccal/make_match_panel_annotations.py
|
alex-wenzel/ccal
|
74dfc604d93e6ce9e12f34a828b601618df51faa
|
[
"MIT"
] | null | null | null |
ccal/make_match_panel_annotations.py
|
alex-wenzel/ccal
|
74dfc604d93e6ce9e12f34a828b601618df51faa
|
[
"MIT"
] | null | null | null |
from pandas import DataFrame
def make_match_panel_annotations(score_moe_p_value_fdr):
annotations = DataFrame(index=score_moe_p_value_fdr.index)
if score_moe_p_value_fdr["0.95 MoE"].isna().all():
annotations["Score"] = score_moe_p_value_fdr["Score"].apply("{:.2f}".format)
else:
annotations["Score(\u0394)"] = score_moe_p_value_fdr[
["Score", "0.95 MoE"]
].apply(lambda score_moe: "{:.2f}({:.2f})".format(*score_moe), axis=1)
if not score_moe_p_value_fdr["P-Value"].isna().all():
function = "{:.2e}".format
annotations["P-Value"] = score_moe_p_value_fdr["P-Value"].apply(function)
annotations["FDR"] = score_moe_p_value_fdr["FDR"].apply(function)
return annotations
| 28.074074
| 84
| 0.659631
|
70631f3adf7f1d7f68b58d3ece8584a87dc9873f
| 891
|
py
|
Python
|
webdriver_wharf/cli.py
|
mshriver/webdriver-wharf
|
d02091805dea2830a0fcd1044ba51339cafa453a
|
[
"MIT"
] | 4
|
2017-12-21T10:30:24.000Z
|
2021-11-27T13:58:55.000Z
|
webdriver_wharf/cli.py
|
mshriver/webdriver-wharf
|
d02091805dea2830a0fcd1044ba51339cafa453a
|
[
"MIT"
] | 13
|
2017-05-18T14:58:45.000Z
|
2019-07-02T20:52:59.000Z
|
webdriver_wharf/cli.py
|
mshriver/webdriver-wharf
|
d02091805dea2830a0fcd1044ba51339cafa453a
|
[
"MIT"
] | 4
|
2017-05-18T15:46:21.000Z
|
2020-03-17T14:21:56.000Z
|
import logging
import os
import signal
import waitress
from webdriver_wharf import app, logging_init
logger = logging.getLogger(__name__)
loglevel = getattr(
logging, os.environ.get("WEBDRIVER_WHARF_LOG_LEVEL", "info").upper(), "INFO"
)
listen_host = os.environ.get("WEBDRIVER_WHARF_LISTEN_HOST", "0.0.0.0")
listen_port = int(os.environ.get("WEBDRIVER_WHARF_LISTEN_PORT", 4899))
def handle_hup(signum, stackframe):
app.pull_latest_image.trigger()
signal.signal(signal.SIGHUP, handle_hup)
def main():
# TODO: Centralized config would be nice, bring in argparse or something that already
# handles envvars. Also expose interactions.destroy_all somehow, so wharf can clean
# up after itself when asked
logging_init(loglevel)
app.application.try_trigger_before_first_request_functions()
waitress.serve(app.application, host=listen_host, port=listen_port)
| 29.7
| 89
| 0.774411
|
56ba01ad344ad1dd746064583819e9142327b833
| 189
|
py
|
Python
|
bindings/python/inekf/__init__.py
|
mayataka/invariant-ekf
|
775d9ab5ac7599fe2fd983b8a907c241c7d3a8e0
|
[
"BSD-3-Clause"
] | 1
|
2022-03-28T12:38:09.000Z
|
2022-03-28T12:38:09.000Z
|
bindings/python/inekf/__init__.py
|
mayataka/inekf
|
775d9ab5ac7599fe2fd983b8a907c241c7d3a8e0
|
[
"BSD-3-Clause"
] | null | null | null |
bindings/python/inekf/__init__.py
|
mayataka/inekf
|
775d9ab5ac7599fe2fd983b8a907c241c7d3a8e0
|
[
"BSD-3-Clause"
] | null | null | null |
from .robot_model import *
from .schmitt_trigger import *
from .contact_estimator import *
from .state_estimator_settings import *
from .state_estimator import *
from .noise_params import *
| 31.5
| 39
| 0.814815
|
80472adb206809f41cf80d23b6c301aca041b7c4
| 6,345
|
py
|
Python
|
server/db/__init__.py
|
V2Associate/pilotassist
|
2a6ecef54a811d9a7a00b7808a76d53344a9ef65
|
[
"MIT"
] | null | null | null |
server/db/__init__.py
|
V2Associate/pilotassist
|
2a6ecef54a811d9a7a00b7808a76d53344a9ef65
|
[
"MIT"
] | null | null | null |
server/db/__init__.py
|
V2Associate/pilotassist
|
2a6ecef54a811d9a7a00b7808a76d53344a9ef65
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import pymysql
import time
from server.common import Roster, Trip
from server.db.db_connection import DBConnection
from server.common import get_date_as_timestamp
HOSTNAME = "localhost"
# TODO: Need to create a mysql user
USERNAME = "root"
PASSWORD = ""
DB_NAME = "pilotassist"
CHARSET = "utf8mb4"
connection = DBConnection(
database=DB_NAME, username=USERNAME, password=PASSWORD, server=HOSTNAME)
# TODO does changing this to class will make things better
ALL_DB_PARAMS = {
"TABLE_ROUTE": "route",
"TABLE_MEMBER": "member",
"TABLE_TRIP_DETAILS": "trip_details",
"COL_ROUTE_NAME": "route_name",
"COL_ROUTE_SOURCE": "source",
"COL_ROUTE_DESTINATION": "destination",
"COL_DEPARTURE_TIME": "departure_time",
"COL_ARRIVAL_TIME": "arrival_time",
"COL_FLEW_DATE": "flew_date",
"COL_AIRCRAFT_MAKE": "aircraft_make",
"COL_ACTUAL_DEPARTURE_TIME": "actual_departure_time",
"COL_ACTUAL_ARRIVAL_TIME": "actual_arrival_time",
"COL_INSTRUMENT_HRS": "instrument_hrs",
"COL_NIGHT_HRS": "night_hrs",
"COL_MEMBER_ID": "member_id",
"COL_ROUTE_ID": "route_id",
"COL_MEMBER_NAME": "name",
"COL_ID": "id",
}
# select aircraft_make, flew_date, actual_departure_time, actual_arrival_time, instrument_hrs, night_hrs, route_name, source, destination, departure_time, arrival_time, name from trip_details INNER JOIN route ON (trip_details.route_id = route.id) INNER JOIN member on trip_details.member_id = member.id where member_id=1;
QUERY_GET_ROSTER_FOR_DATE = "select {COL_AIRCRAFT_MAKE}, {COL_FLEW_DATE}, {COL_ACTUAL_DEPARTURE_TIME}, {COL_ACTUAL_ARRIVAL_TIME}, {COL_INSTRUMENT_HRS}, {COL_NIGHT_HRS}, {COL_ROUTE_NAME}, {COL_ROUTE_SOURCE}, {COL_ROUTE_DESTINATION}, {COL_DEPARTURE_TIME}, {COL_ARRIVAL_TIME}, {COL_MEMBER_NAME} from {TABLE_TRIP_DETAILS} INNER JOIN {TABLE_ROUTE} ON ({TABLE_TRIP_DETAILS}.{COL_ROUTE_ID} = {TABLE_ROUTE}.{COL_ID}) INNER JOIN {TABLE_MEMBER} on {TABLE_TRIP_DETAILS}.{COL_MEMBER_ID} = {TABLE_MEMBER}.{COL_ID} where {COL_MEMBER_ID}={MEMBER_ID} and {COL_FLEW_DATE} between {DEPARTURE_START_TIME} and {DEPARTURE_END_TIME}"
QUERY_DELETE_TRIP_FROM_ROSTER = "delete from {TABLE_TRIP_DETAILS} where {COL_MEMBER_ID}={MEMBER_ID} and {COL_FLEW_DATE}={FLEW_DATE} and {COL_ROUTE_ID} in (select {COL_ID} from {TABLE_ROUTE} where {COL_ROUTE_NAME}='{ROUTE_NAME}')"
QUERY_ADD_TRIP_TO_ROSTER = "insert into {TABLE_TRIP_DETAILS}({COL_FLEW_DATE}, {COL_AIRCRAFT_MAKE}, {COL_ACTUAL_DEPARTURE_TIME}, {COL_ACTUAL_ARRIVAL_TIME}, {COL_MEMBER_ID}, {COL_ROUTE_ID}) SELECT {FLEW_DATE}, '{AIRCRAFT_MAKE}', {ACTUAL_DEPARTURE_TIME}, {ACTUAL_ARRIVAL_TIME}, {MEMBER_ID}, {COL_ID} from {TABLE_ROUTE} where {COL_ROUTE_NAME}='{ROUTE_NAME}';"
SECONDS_IN_A_DAY = 24 * 60 * 60
class DB:
def __init__(self):
pass
"""
Gets the roster for the passed in times. Times should be in unixtimestamp
If no start_time, then get 2 days past
if no end_time, get today
"""
def get_roster_details(self, member_id, start_time=None, end_time=None):
start_time = int(
start_time) if start_time else self.get_current_time() - SECONDS_IN_A_DAY
end_time = int(end_time) if end_time else start_time + SECONDS_IN_A_DAY
arguments = ALL_DB_PARAMS
arguments.update(
{"DEPARTURE_START_TIME": start_time, "DEPARTURE_END_TIME": end_time, "MEMBER_ID": member_id})
print "query ", QUERY_GET_ROSTER_FOR_DATE.format(**arguments)
trips = connection.query_db(
QUERY_GET_ROSTER_FOR_DATE.format(**arguments))
print "Trips is", trips
roster = Roster()
roster.add_trips(trips)
return roster
def delete_trip_from_roster(self, member_id, date, flight_number):
arguments = ALL_DB_PARAMS
arguments.update(
{"FLEW_DATE": date, "ROUTE_NAME": flight_number, "MEMBER_ID": member_id})
print "query", QUERY_DELETE_TRIP_FROM_ROSTER.format(**arguments)
rows_delted = connection.execute(
QUERY_DELETE_TRIP_FROM_ROSTER.format(**arguments))
print "Number of rows deleted ", rows_delted
return rows_delted
def add_trip_to_roster(self, trip):
# TODO: AI-777 will be flying from BLR to HYD, then HYD to DEL. SO flight_number is not unique
# Need to fix this
arguments = ALL_DB_PARAMS
date_as_timestamp = get_date_as_timestamp(trip['departureTime'])
arguments.update(
{"FLEW_DATE": date_as_timestamp, "AIRCRAFT_MAKE": trip['flightNumber'], "ACTUAL_DEPARTURE_TIME": trip['departureTime'],
"ACTUAL_ARRIVAL_TIME": trip['arrivalTime'], "MEMBER_ID": 1, "ROUTE_NAME": trip['flightNumber']}
)
print "query", QUERY_ADD_TRIP_TO_ROSTER.format(**arguments)
rows_added = connection.execute(
QUERY_ADD_TRIP_TO_ROSTER.format(**arguments))
print "Number of rows added ", rows_added
return rows_added
def get_current_time(self):
return int(time.time())
def to_trip(self, trip):
print trip
return Trip(flight_number=trip[ALL_DB_PARAMS["COL_ROUTE_NAME"]], departure=trip[ALL_DB_PARAMS["COL_ROUTE_SOURCE"]],
arrival=trip[ALL_DB_PARAMS["COL_ROUTE_DESTINATION"]
], departure_time=trip[ALL_DB_PARAMS["COL_DEPARTURE_TIME"]],
arrival_time=trip[ALL_DB_PARAMS["COL_ARRIVAL_TIME"]])
# db = DB()
# db.get_roster_details()
# update trip_details set actual_departure_time=1519056000,actual_arrival_time=1519056000 where actual_departure_time=1600 and actual_arrival_time=1700
# select aircraft_make, flew_date, actual_departure_time, actual_arrival_time, instrument_hrs, night_hrs, route_name, source, destination, departure_time, arrival_time, name from trip_details INNER JOIN route ON (trip_details.route_id = route.id) INNER JOIN member on trip_details.member_id = member.id where member_id=1;
# select aircraft_make, flew_date, actual_departure_time, actual_arrival_time, instrument_hrs, night_hrs, route_name, source, destination, departure_time, arrival_time, name from trip_details INNER JOIN route ON(trip_details.route_id=route.id) INNER JOIN member on trip_details.member_id = member.id where member_id = 1 and actual_departure_time between 1518998400 and 1519084740
| 54.230769
| 612
| 0.733018
|
a8420ad36c7180b3c496e1ccad3926ab53371d01
| 9,411
|
py
|
Python
|
tests/unit/common/services/identity/test_identity.py
|
jogeo/rally-openstack
|
83437e7c5925d5d647cd28f1821b6d51687b0123
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/common/services/identity/test_identity.py
|
jogeo/rally-openstack
|
83437e7c5925d5d647cd28f1821b6d51687b0123
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/common/services/identity/test_identity.py
|
jogeo/rally-openstack
|
83437e7c5925d5d647cd28f1821b6d51687b0123
|
[
"Apache-2.0"
] | 1
|
2021-08-10T03:11:51.000Z
|
2021-08-10T03:11:51.000Z
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from rally_openstack.common.services.identity import identity
from tests.unit import test
@ddt.ddt
class IdentityTestCase(test.TestCase):
def setUp(self):
super(IdentityTestCase, self).setUp()
self.clients = mock.MagicMock()
def get_service_with_fake_impl(self):
path = "rally_openstack.common.services.identity.identity"
with mock.patch("%s.Identity.discover_impl" % path) as mock_discover:
mock_discover.return_value = mock.MagicMock(), None
service = identity.Identity(self.clients)
return service
def test_create_project(self):
service = self.get_service_with_fake_impl()
project_name = "name"
domain_name = "domain"
service.create_project(project_name, domain_name=domain_name)
service._impl.create_project.assert_called_once_with(
project_name, domain_name=domain_name)
def test_update_project(self):
service = self.get_service_with_fake_impl()
project_id = "id"
project_name = "name"
description = "descr"
enabled = False
service.update_project(project_id=project_id, name=project_name,
description=description, enabled=enabled)
service._impl.update_project.assert_called_once_with(
project_id, name=project_name, description=description,
enabled=enabled)
def test_delete_project(self):
service = self.get_service_with_fake_impl()
project = "id"
service.delete_project(project)
service._impl.delete_project.assert_called_once_with(project)
def test_list_projects(self):
service = self.get_service_with_fake_impl()
service.list_projects()
service._impl.list_projects.assert_called_once_with()
def test_get_project(self):
service = self.get_service_with_fake_impl()
project = "id"
service.get_project(project)
service._impl.get_project.assert_called_once_with(project)
def test_create_user(self):
service = self.get_service_with_fake_impl()
username = "username"
password = "password"
project_id = "project_id"
domain_name = "domain_name"
service.create_user(username=username, password=password,
project_id=project_id, domain_name=domain_name)
service._impl.create_user.assert_called_once_with(
username=username, password=password, project_id=project_id,
domain_name=domain_name, default_role="member")
def test_create_users(self):
service = self.get_service_with_fake_impl()
project_id = "project_id"
n = 3
user_create_args = {}
service.create_users(project_id, number_of_users=n,
user_create_args=user_create_args)
service._impl.create_users.assert_called_once_with(
project_id, number_of_users=n, user_create_args=user_create_args)
def test_delete_user(self):
service = self.get_service_with_fake_impl()
user_id = "fake_id"
service.delete_user(user_id)
service._impl.delete_user.assert_called_once_with(user_id)
def test_list_users(self):
service = self.get_service_with_fake_impl()
service.list_users()
service._impl.list_users.assert_called_once_with()
def test_update_user(self):
service = self.get_service_with_fake_impl()
user_id = "id"
user_name = "name"
email = "mail"
password = "pass"
enabled = False
service.update_user(user_id, name=user_name, password=password,
email=email, enabled=enabled)
service._impl.update_user.assert_called_once_with(
user_id, name=user_name, password=password, email=email,
enabled=enabled)
def test_get_user(self):
service = self.get_service_with_fake_impl()
user = "id"
service.get_user(user)
service._impl.get_user.assert_called_once_with(user)
def test_create_service(self):
service = self.get_service_with_fake_impl()
service_name = "name"
service_type = "service_type"
description = "descr"
service.create_service(service_name, service_type=service_type,
description=description)
service._impl.create_service.assert_called_once_with(
name=service_name, service_type=service_type,
description=description)
def test_delete_service(self):
service = self.get_service_with_fake_impl()
service_id = "id"
service.delete_service(service_id)
service._impl.delete_service.assert_called_once_with(service_id)
def test_list_services(self):
service = self.get_service_with_fake_impl()
service.list_services()
service._impl.list_services.assert_called_once_with()
def test_get_service(self):
service = self.get_service_with_fake_impl()
service_id = "id"
service.get_service(service_id)
service._impl.get_service.assert_called_once_with(service_id)
def test_get_service_by_name(self):
service = self.get_service_with_fake_impl()
service_name = "name"
service.get_service_by_name(service_name)
service._impl.get_service_by_name.assert_called_once_with(service_name)
def test_create_role(self):
service = self.get_service_with_fake_impl()
name = "name"
service.create_role(name)
service._impl.create_role.assert_called_once_with(
name=name, domain_name=None)
def test_add_role(self):
service = self.get_service_with_fake_impl()
role_id = "id"
user_id = "user_id"
project_id = "project_id"
service.add_role(role_id, user_id=user_id, project_id=project_id)
service._impl.add_role.assert_called_once_with(role_id=role_id,
user_id=user_id,
project_id=project_id)
def test_delete_role(self):
service = self.get_service_with_fake_impl()
role = "id"
service.delete_role(role)
service._impl.delete_role.assert_called_once_with(role)
def test_revoke_role(self):
service = self.get_service_with_fake_impl()
role_id = "id"
user_id = "user_id"
project_id = "project_id"
service.revoke_role(role_id, user_id=user_id, project_id=project_id)
service._impl.revoke_role.assert_called_once_with(
role_id=role_id, user_id=user_id, project_id=project_id)
@ddt.data((None, None, None), ("user_id", "project_id", "domain"))
def test_list_roles(self, params):
user, project, domain = params
service = self.get_service_with_fake_impl()
service.list_roles(user_id=user, project_id=project,
domain_name=domain)
service._impl.list_roles.assert_called_once_with(user_id=user,
project_id=project,
domain_name=domain)
def test_get_role(self):
service = self.get_service_with_fake_impl()
role = "id"
service.get_role(role)
service._impl.get_role.assert_called_once_with(role)
def test_create_ec2credentials(self):
service = self.get_service_with_fake_impl()
user_id = "id"
project_id = "project-id"
service.create_ec2credentials(user_id=user_id, project_id=project_id)
service._impl.create_ec2credentials.assert_called_once_with(
user_id=user_id, project_id=project_id)
def test_list_ec2credentials(self):
service = self.get_service_with_fake_impl()
user_id = "id"
service.list_ec2credentials(user_id=user_id)
service._impl.list_ec2credentials.assert_called_once_with(user_id)
def test_delete_ec2credential(self):
service = self.get_service_with_fake_impl()
user_id = "id"
access = "access"
service.delete_ec2credential(user_id=user_id, access=access)
service._impl.delete_ec2credential.assert_called_once_with(
user_id=user_id, access=access)
def test_fetch_token(self):
service = self.get_service_with_fake_impl()
service.fetch_token()
service._impl.fetch_token.assert_called_once_with()
def test_validate_token(self):
service = self.get_service_with_fake_impl()
token = "id"
service.validate_token(token)
service._impl.validate_token.assert_called_once_with(token)
| 36.335907
| 79
| 0.668686
|
d9361f79f3b1950d13b89c4cb116e9c2d6f73ebf
| 548
|
py
|
Python
|
matplotlib/piechart001.py
|
bismog/leetcode
|
13b8a77045f96e7c59ddfe287481f6aaa68e564d
|
[
"MIT"
] | null | null | null |
matplotlib/piechart001.py
|
bismog/leetcode
|
13b8a77045f96e7c59ddfe287481f6aaa68e564d
|
[
"MIT"
] | null | null | null |
matplotlib/piechart001.py
|
bismog/leetcode
|
13b8a77045f96e7c59ddfe287481f6aaa68e564d
|
[
"MIT"
] | 1
|
2018-08-17T07:07:15.000Z
|
2018-08-17T07:07:15.000Z
|
#!/usr/bin/env python
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('classic')
fig = plt.figure()
days = [1,2,3,4,5]
sleeping =[7,8,6,11,7]
eating = [2,3,4,3,2]
working =[7,8,7,2,2]
playing = [8,5,7,8,13]
slices = [7,2,2,13]
activities = ['sleeping','eating','working','playing']
cols = ['c','m','r','b']
plt.pie(slices,
labels=activities,
colors=cols,
startangle=90,
shadow= True,
explode=(0,0.1,0,0),
autopct='%1.1f%%')
plt.title('Pie Plot')
fig.savefig('/home/tmp/piechart001.png')
| 17.125
| 54
| 0.631387
|
21c6111119a57a43d62f430f6f65b1c733f222e9
| 352
|
py
|
Python
|
netbox_bgp/migrations/0005_netbox_bgp.py
|
liquid-metal/netbox-bgp
|
cdca172527370bc05603962384618feebfd09b8e
|
[
"Apache-2.0"
] | 79
|
2021-04-07T14:14:54.000Z
|
2022-03-22T10:42:15.000Z
|
netbox_bgp/migrations/0005_netbox_bgp.py
|
liquid-metal/netbox-bgp
|
cdca172527370bc05603962384618feebfd09b8e
|
[
"Apache-2.0"
] | 65
|
2021-04-14T09:29:00.000Z
|
2022-03-05T23:18:58.000Z
|
netbox_bgp/migrations/0005_netbox_bgp.py
|
liquid-metal/netbox-bgp
|
cdca172527370bc05603962384618feebfd09b8e
|
[
"Apache-2.0"
] | 17
|
2021-04-14T09:29:23.000Z
|
2022-03-16T18:58:28.000Z
|
# Generated by Django 3.1.3 on 2021-04-07 10:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('netbox_bgp', '0004_netbox_bgp'),
]
operations = [
migrations.AlterModelOptions(
name='asn',
options={'verbose_name_plural': 'AS Numbers'},
),
]
| 19.555556
| 58
| 0.599432
|
ca0ba06bad8da81146129ee3cf81ee5ef4b07748
| 7,832
|
py
|
Python
|
contrib/bitrpc/bitrpc.py
|
x805/ONEA
|
e5579bb689dfd58e78fafccac4739254db56099d
|
[
"MIT"
] | null | null | null |
contrib/bitrpc/bitrpc.py
|
x805/ONEA
|
e5579bb689dfd58e78fafccac4739254db56099d
|
[
"MIT"
] | null | null | null |
contrib/bitrpc/bitrpc.py
|
x805/ONEA
|
e5579bb689dfd58e78fafccac4739254db56099d
|
[
"MIT"
] | null | null | null |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:21514")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:21514")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a ONEA address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a ONEA address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| 24.098462
| 79
| 0.668029
|
614b3ac2172b3e45aa8d235138439b43ab947064
| 9,437
|
py
|
Python
|
tests/interfaces_t/test_siteadapter.py
|
maxfischer2781/tardis
|
a83ba0a02d2f153a8ab95b84ec78bc6ababa57a5
|
[
"MIT"
] | 11
|
2019-06-06T14:44:56.000Z
|
2021-12-17T19:46:18.000Z
|
tests/interfaces_t/test_siteadapter.py
|
stefan-k/tardis
|
d26af13b56ab55cdbddd27ec3332cc74093d487a
|
[
"MIT"
] | 165
|
2019-04-26T09:31:19.000Z
|
2022-03-09T16:45:45.000Z
|
tests/interfaces_t/test_siteadapter.py
|
stefan-k/tardis
|
d26af13b56ab55cdbddd27ec3332cc74093d487a
|
[
"MIT"
] | 12
|
2019-06-06T14:06:15.000Z
|
2021-12-21T12:31:03.000Z
|
from tardis.interfaces.siteadapter import SiteAdapter
from tardis.utilities.attributedict import AttributeDict
from ..utilities.utilities import run_async
from cobald.utility.primitives import infinity as inf
from unittest import TestCase
from unittest.mock import patch
from pydantic.error_wrappers import ValidationError
import logging
class TestSiteAdapter(TestCase):
mock_config_patcher = None
@classmethod
def setUpClass(cls):
cls.mock_config_patcher = patch("tardis.interfaces.siteadapter.Configuration")
cls.mock_config = cls.mock_config_patcher.start()
@classmethod
def tearDownClass(cls):
cls.mock_config_patcher.stop()
@patch.multiple(SiteAdapter, __abstractmethods__=set())
def setUp(self) -> None:
self.config = self.mock_config.return_value
self.config.Sites = [
AttributeDict(name="TestSite", adapter="TestSite", quota=1)
]
self.config.TestSite = AttributeDict(
MachineTypes=["TestMachineType"],
MachineMetaData=AttributeDict(
TestMachineType=AttributeDict(Cores=128, Memory=512, Disk=100)
),
MachineTypeConfiguration=AttributeDict(
TestMachineType=AttributeDict(test_id="abc123")
),
)
self.site_adapter = SiteAdapter()
self.site_adapter._site_name = "TestSite"
self.site_adapter._machine_type = "TestMachineType"
def test_configuration(self):
self.assertEqual(self.site_adapter.configuration, self.config.TestSite)
def test_deploy_resource(self):
with self.assertRaises(NotImplementedError):
run_async(self.site_adapter.deploy_resource, dict())
def test_drone_environment(self):
self.site_adapter._machine_type = "TestMachineType"
self.assertEqual(
AttributeDict(Cores=128, Memory=524288, Disk=104857600, Uuid="test-123"),
self.site_adapter.drone_environment(
drone_uuid="test-123",
meta_data_translation_mapping=AttributeDict(
Cores=1,
Memory=1024,
Disk=1024 * 1024,
),
),
)
with self.assertLogs(
logger="cobald.runtime.tardis.utilities.utils", level=logging.CRITICAL
), self.assertRaises(KeyError):
self.site_adapter.drone_environment(
drone_uuid="test-123",
meta_data_translation_mapping=AttributeDict(
Memory=1024,
Disk=1024 * 1024,
),
)
def test_drone_heartbeat_interval(self):
self.assertEqual(self.site_adapter.drone_heartbeat_interval, 60)
# lru_cache needs to be cleared before manipulating site configuration
# noinspection PyUnresolvedReferences
SiteAdapter.site_configuration.fget.cache_clear()
self.config.Sites[0]["drone_heartbeat_interval"] = 10
self.assertEqual(self.site_adapter.drone_heartbeat_interval, 10)
# noinspection PyUnresolvedReferences
SiteAdapter.site_configuration.fget.cache_clear()
self.config.Sites[0]["drone_heartbeat_interval"] = -1
with self.assertRaises(ValidationError):
# noinspection PyStatementEffect
self.site_adapter.drone_heartbeat_interval
def test_drone_minimum_lifetime(self):
self.assertEqual(self.site_adapter.drone_minimum_lifetime, None)
# lru_cache needs to be cleared before manipulating site configuration
# noinspection PyUnresolvedReferences
SiteAdapter.site_configuration.fget.cache_clear()
self.config.Sites[0]["drone_minimum_lifetime"] = 10
self.assertEqual(self.site_adapter.drone_minimum_lifetime, 10)
# noinspection PyUnresolvedReferences
SiteAdapter.site_configuration.fget.cache_clear()
self.config.Sites[0]["drone_minimum_lifetime"] = -1
with self.assertRaises(ValidationError):
# noinspection PyStatementEffect
self.site_adapter.drone_minimum_lifetime
def test_drone_uuid(self):
self.assertEqual(
"testsite-test123", self.site_adapter.drone_uuid(uuid="test123")
)
def test_handle_exception(self):
with self.assertRaises(NotImplementedError):
self.site_adapter.handle_exceptions()
def test_handle_response_matching(self):
test_response = {"test": 123}
test_key_translator = {"new_test": "test"}
test_translator_functions = {"test": str}
self.assertEqual(
self.site_adapter.handle_response(
test_response, test_key_translator, test_translator_functions
),
AttributeDict(new_test="123"),
)
self.assertEqual(
self.site_adapter.handle_response(
test_response,
test_key_translator,
test_translator_functions,
additional="test123",
),
AttributeDict(new_test="123", additional="test123"),
)
def test_handle_response_non_matching(self):
test_response = {"other_test": 123}
test_key_translator = {"new_test": "test"}
test_translator_functions = {"test": str}
self.assertEqual(
self.site_adapter.handle_response(
test_response, test_key_translator, test_translator_functions
),
AttributeDict(),
)
self.assertEqual(
self.site_adapter.handle_response(
test_response,
test_key_translator,
test_translator_functions,
additional="test123",
),
AttributeDict(additional="test123"),
)
def test_machine_meta_data(self):
self.assertEqual(
self.site_adapter.machine_meta_data,
AttributeDict(Cores=128, Memory=512, Disk=100),
)
# noinspection PyUnresolvedReferences
del self.site_adapter._machine_type
with self.assertRaises(AttributeError):
# noinspection PyStatementEffect
self.site_adapter.machine_meta_data
def test_machine_type(self):
self.assertEqual(self.site_adapter.machine_type, "TestMachineType")
# noinspection PyUnresolvedReferences
del self.site_adapter._machine_type
with self.assertRaises(AttributeError):
# noinspection PyStatementEffect
self.site_adapter.machine_type
def test_machine_type_configuration(self):
self.assertEqual(
self.site_adapter.machine_type_configuration,
AttributeDict(test_id="abc123"),
)
# noinspection PyUnresolvedReferences
del self.site_adapter._machine_type
with self.assertRaises(AttributeError):
# noinspection PyStatementEffect
self.site_adapter.machine_type_configuration
def test_resource_status(self):
with self.assertRaises(NotImplementedError):
run_async(self.site_adapter.resource_status, dict())
def test_site_configuration(self):
self.assertEqual(
self.site_adapter.site_configuration,
AttributeDict(
name="TestSite",
adapter="TestSite",
quota=1,
drone_minimum_lifetime=None,
drone_heartbeat_interval=60,
),
)
# noinspection PyUnresolvedReferences
SiteAdapter.site_configuration.fget.cache_clear()
del self.config.Sites[0]["quota"]
self.assertEqual(
self.site_adapter.site_configuration,
AttributeDict(
name="TestSite",
adapter="TestSite",
quota=inf,
drone_minimum_lifetime=None,
drone_heartbeat_interval=60,
),
)
# noinspection PyUnresolvedReferences
SiteAdapter.site_configuration.fget.cache_clear()
self.config.Sites[0]["extra"] = "Should fail!"
with self.assertRaises(ValidationError):
self.assertEqual(
self.site_adapter.site_configuration,
AttributeDict(
name="TestSite",
adapter="TestSite",
quota=inf,
drone_minimum_lifetime=None,
drone_heartbeat_interval=60,
),
)
# noinspection PyUnresolvedReferences
SiteAdapter.site_configuration.fget.cache_clear()
self.config.Sites[0]["quota"] = 0
with self.assertRaises(ValidationError):
# noinspection PyStatementEffect
self.site_adapter.site_configuration
def test_site_name(self):
self.assertEqual(self.site_adapter.site_name, "TestSite")
del self.site_adapter._site_name
with self.assertRaises(AttributeError):
# noinspection PyStatementEffect
self.site_adapter.site_name
def test_stop_resource(self):
with self.assertRaises(NotImplementedError):
run_async(self.site_adapter.stop_resource, dict())
def test_terminate_resource(self):
with self.assertRaises(NotImplementedError):
run_async(self.site_adapter.terminate_resource, dict())
| 34.192029
| 86
| 0.63855
|
79e7290a09e4e592fb6909da99035e6ce4fa1c84
| 62,441
|
py
|
Python
|
scripts/run_conga.py
|
jeremycfd/conga
|
204bfd14cab3c4c07fd9b95d072b1b7b79c3d239
|
[
"MIT"
] | null | null | null |
scripts/run_conga.py
|
jeremycfd/conga
|
204bfd14cab3c4c07fd9b95d072b1b7b79c3d239
|
[
"MIT"
] | null | null | null |
scripts/run_conga.py
|
jeremycfd/conga
|
204bfd14cab3c4c07fd9b95d072b1b7b79c3d239
|
[
"MIT"
] | null | null | null |
######################## MAX LINE LENGTH OF ABOUT 120 ##################################################################
import argparse
parser = argparse.ArgumentParser(description="Run the CoNGA clonotype neighbor-graph analysis pipeline")
#type is str by default
parser.add_argument('--gex_data', help='Input file with the single-cell gene expression data')
parser.add_argument('--gex_data_type', choices=['h5ad', '10x_mtx', '10x_h5'],
help="""Format of the GEX input file. Options are '10x_mtx' for a 10x directory with .mtx and associated files; '10x_h5' for a 10x HDF5 formatted file; and 'h5ad' for a scanpy formatted hdf5 file""")
parser.add_argument('--clones_file', help='tsv-formatted clonotype file generated by setup_10x_for_conga.py (for example)')
parser.add_argument('--kpca_file', help='Pass filename if using a non-standard location (ie not clones_file[:-4]+\'_AB.dist_50_kpcs\')')
parser.add_argument('--organism', choices=['mouse', 'human', 'mouse_gd', 'human_gd', 'human_ig'])
parser.add_argument('--nbr_fracs', type=float, nargs='*', default=[0.01,0.1], help='Size of neighborhoods to use in building K nearest neighbor graphs, expressed as a fraction of the total dataset size in clonotypes')
parser.add_argument('--outfile_prefix', required=True, help='string that will be prepended to all output files and images')
parser.add_argument('--restart', help='Name of a scanpy h5ad file to restart from; skips preprocessing, clustering, UMAP, etc. Could be the *_final.h5ad file generated at the end of a previous conga run.')
parser.add_argument('--checkpoint', action='store_true', help='Save a scanpy h5ad checkpoint file after preprocessing')
parser.add_argument('--rerun_kpca', action='store_true')
parser.add_argument('--no_kpca', action='store_true')
parser.add_argument('--use_exact_tcrdist_nbrs', action='store_true', help='The default is to use the nbrs defined by euclidean distances in the tcrdist kernel pc space. This flag will force a re-computation of all the tcrdist distances')
parser.add_argument('--use_tcrdist_umap', action='store_true')
parser.add_argument('--use_tcrdist_clusters', action='store_true')
parser.add_argument('--kpca_kernel', help='only used if rerun_kpca is True; if not provided will use classic kernel')
parser.add_argument('--kpca_gaussian_kernel_sdev', default=100.0, type=float,
help='only used if rerun_kpca and kpca_kernel==\'gaussian\'')
parser.add_argument('--kpca_default_kernel_Dmax', type=float,
help='only used if rerun_kpca and kpca_kernel==None')
parser.add_argument('--exclude_gex_clusters', type=int, nargs='*')
parser.add_argument('--exclude_mait_and_inkt_cells', action='store_true')
parser.add_argument('--subset_to_CD4', action='store_true')
parser.add_argument('--subset_to_CD8', action='store_true')
parser.add_argument('--min_cluster_size', type=int, default=5)
parser.add_argument('--min_cluster_size_for_tcr_clumping_logos', type=int, default=3)
parser.add_argument('--min_cluster_size_for_batch_bias_logos', type=int, default=5)
parser.add_argument('--min_cluster_size_fraction', type=float, default=0.001)
parser.add_argument('--clustering_method', choices=['louvain','leiden'])
parser.add_argument('--clustering_resolution', type=float, default = 1.0)
parser.add_argument('--bad_barcodes_file')
parser.add_argument('--make_unfiltered_logos', action='store_true')
#parser.add_argument('--make_avggood_logos', action='store_true') # see old versions on github
parser.add_argument('--make_avgfull_logos', action='store_true')
parser.add_argument('--make_clone_plots', action='store_true')
parser.add_argument('--write_proj_info', action='store_true')
parser.add_argument('--filter_ribo_norm_low_cells', action='store_true')
# the main modes of operation
parser.add_argument('--all', action='store_true', help='Run all reasonable analyses')
parser.add_argument('--graph_vs_graph', action='store_true')
parser.add_argument('--graph_vs_tcr_features', action='store_true')
parser.add_argument('--graph_vs_gex_features', action='store_true')
# some extra analyses
parser.add_argument('--cluster_vs_cluster', action='store_true')
parser.add_argument('--tcr_clumping', action='store_true')
parser.add_argument('--intra_cluster_tcr_clumping', action='store_true')
parser.add_argument('--find_batch_biases', action='store_true')
parser.add_argument('--calc_clone_pmhc_pvals', action='store_true')
parser.add_argument('--find_pmhc_nbrhood_overlaps', action='store_true') # only if pmhc info is present
parser.add_argument('--find_distance_correlations', action='store_true')
parser.add_argument('--find_gex_cluster_degs', action='store_true')
parser.add_argument('--find_hotspot_features', action='store_true')
parser.add_argument('--plot_cluster_gene_compositions', action='store_true')
parser.add_argument('--make_tcrdist_trees', action='store_true')
parser.add_argument('--make_hotspot_nbrhood_logos', action='store_true')
parser.add_argument('--analyze_CD4_CD8', action='store_true')
parser.add_argument('--analyze_proteins', action='store_true')
parser.add_argument('--analyze_special_genes', action='store_true')
# configure things
parser.add_argument('--skip_gex_header', action='store_true')
parser.add_argument('--average_clone_gex', action='store_true')
parser.add_argument('--include_protein_features', action='store_true')
parser.add_argument('--skip_gex_header_raw', action='store_true')
parser.add_argument('--skip_gex_header_nbrZ', action='store_true')
parser.add_argument('--verbose_nbrs', action='store_true')
parser.add_argument('--analyze_junctions', action='store_true')
parser.add_argument('--skip_tcr_scores_in_gex_header', action='store_true')
parser.add_argument('--tenx_agbt', action='store_true')
parser.add_argument('--include_alphadist_in_tcr_feature_logos', action='store_true')
parser.add_argument('--show_pmhc_info_in_logos', action='store_true')
parser.add_argument('--gex_header_tcr_score_names', type=str, nargs='*')
parser.add_argument('--batch_keys', type=str, nargs='*')
parser.add_argument('--exclude_batch_keys_for_biases', type=str, nargs='*')
parser.add_argument('--radii_for_tcr_clumping', type=int, nargs='*')
parser.add_argument('--num_random_samples_for_tcr_clumping', type=int)
parser.add_argument('--gex_nbrhood_tcr_score_names', type=str, nargs='*')
parser.add_argument('--shuffle_tcr_kpcs', action='store_true') # shuffle the TCR kpcs to test for FDR
parser.add_argument('--shuffle_gex_nbrs', action='store_true') # for debugging
parser.add_argument('--exclude_vgene_strings', type=str, nargs='*')
parser.add_argument('--suffix_for_non_gene_features', type=str)
parser.add_argument('--max_genes_per_cell', type=int)
parser.add_argument('--qc_plots', action='store_true')
args = parser.parse_args()
# do the imports now since they are so freakin slow
import sys
import os
from collections import Counter
from os.path import exists
import time
sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) ) # in order to import conga package
import matplotlib
matplotlib.use('Agg') # for remote calcs
import matplotlib.pyplot as plt
import conga
import scanpy as sc
import scanpy.neighbors
from sklearn.metrics import pairwise_distances
import numpy as np
import pandas as pd
start_time = time.time()
if args.gex_nbrhood_tcr_score_names is None:
args.gex_nbrhood_tcr_score_names = conga.tcr_scoring.all_tcr_scorenames
if args.all:
all_modes = """graph_vs_graph
graph_vs_gex_features
graph_vs_tcr_features
cluster_vs_cluster
find_hotspot_features
find_gex_cluster_degs
tcr_clumping
make_tcrdist_trees""".split()
for mode in all_modes:
print(f'--all implies --{mode} ==> Running {mode} analysis.')
setattr(args, mode, True)
if args.no_kpca:
print('--no_kpca implies --use_exact_tcrdist_nbrs and --use_tcrdist_umap --use_tcrdist_clusters')
print('setting those flags now')
args.use_exact_tcrdist_nbrs = True
args.use_tcrdist_umap = True
args.use_tcrdist_clusters = True
## check consistency of args
if args.find_pmhc_nbrhood_overlaps or args.calc_clone_pmhc_pvals:
# we need pmhc info for these analyses; right now that's restricted to the 10x AGBT dataset format
assert args.tenx_agbt
if args.batch_keys:
assert args.gex_data_type == 'h5ad' # need the info already in the obs dict
if args.restart: # these are incompatible with restarting
assert not (args.calc_clone_pmhc_pvals or
args.bad_barcodes_file or
args.filter_ribo_norm_low_cells or
args.exclude_vgene_strings or
#args.shuffle_tcr_kpcs or
args.rerun_kpca )
logfile = args.outfile_prefix+'_log.txt'
outlog = open(logfile, 'w')
outlog.write('sys.argv: {}\n'.format(' '.join(sys.argv)))
sc.logging.print_versions() # goes to stdout
hostname = os.popen('hostname').readlines()[0][:-1]
outlog.write('hostname: {}\n'.format(hostname))
if args.restart is None:
allow_missing_kpca_file = args.use_exact_tcrdist_nbrs and args.use_tcrdist_umap and args.use_tcrdist_clusters
assert exists(args.gex_data)
assert exists(args.clones_file)
## load the dataset
if args.rerun_kpca:
if args.kpca_file is None:
args.kpca_file = args.outfile_prefix+'_rerun_tcrdist_kpca.txt'
else:
print('WARNING:: overwriting', args.kpca_file, 'since --rerun_kpca is True')
conga.preprocess.make_tcrdist_kernel_pcs_file_from_clones_file(
args.clones_file,
args.organism,
kernel=args.kpca_kernel,
outfile=args.kpca_file,
gaussian_kernel_sdev=args.kpca_gaussian_kernel_sdev,
force_Dmax=args.kpca_default_kernel_Dmax,
)
adata = conga.preprocess.read_dataset(
args.gex_data, args.gex_data_type, args.clones_file, kpca_file=args.kpca_file, # default is None
allow_missing_kpca_file=allow_missing_kpca_file, gex_only=False,
suffix_for_non_gene_features=args.suffix_for_non_gene_features)
assert args.organism
adata.uns['organism'] = args.organism
assert 'organism' in adata.uns_keys()
if args.batch_keys:
adata.uns['batch_keys'] = args.batch_keys
for k in args.batch_keys:
assert k in adata.obs_keys()
vals = np.array(adata.obs[k]).astype(int)
#assert np.min(vals)==0
counts = Counter(vals)
expected_choices = np.max(vals)+1
observed_choices = len(counts.keys())
print(f'read batch info for key {k} with {expected_choices} possible and {observed_choices} observed choices')
# confirm integer-value
adata.obs[k] = vals
if args.exclude_vgene_strings:
tcrs = conga.preprocess.retrieve_tcrs_from_adata(adata)
exclude_mask = np.full((adata.shape[0],),False)
for s in args.exclude_vgene_strings:
mask = np.array([s in x[0][0] or s in x[1][0] for x in tcrs])
print('exclude_vgene_strings:', s, 'num_matches:', np.sum(mask))
exclude_mask |= mask
adata = adata[~exclude_mask].copy()
if args.exclude_mait_and_inkt_cells:
tcrs = conga.preprocess.retrieve_tcrs_from_adata(adata)
if args.organism == 'human':
mask = [ not (conga.tcr_scoring.is_human_mait_alpha_chain(x[0]) or
conga.tcr_scoring.is_human_inkt_tcr(x)) for x in tcrs ]
elif args.organism == 'mouse':
mask = [ not (conga.tcr_scoring.is_mouse_mait_alpha_chain(x[0]) or
conga.tcr_scoring.is_mouse_inkt_alpha_chain(x[0])) for x in tcrs ]
else:
print('ERROR: --exclude_mait_and_inkt_cells option is only compatible with a/b tcrs')
print('ERROR: but organism is not "human" or "mouse"')
sys.exit(1)
print('excluding {} mait/inkt cells from dataset of size {}'\
.format(adata.shape[0]-np.sum(mask), adata.shape[0]))
adata = adata[mask].copy()
if args.tenx_agbt:
conga.pmhc_scoring.shorten_pmhc_var_names(adata)
adata.uns['pmhc_var_names'] = conga.pmhc_scoring.get_tenx_agbt_pmhc_var_names(adata)
print('pmhc_var_names:', adata.uns['pmhc_var_names'])
if args.bad_barcodes_file:
bad_barcodes = frozenset([x[:-1] for x in open(args.bad_barcodes_file,'rU')])
bad_bc_mask = np.array( [x in bad_barcodes for x in adata.obs_names ] )
num_bad = np.sum(bad_bc_mask)
if num_bad:
print('excluding {} bad barcodes found in {}'\
.format(num_bad, args.bad_barcodes_file))
adata = adata[~bad_bc_mask,:].copy()
else:
print('WARNING:: no matched barcodes in bad_barcodes_file: {}'.format(args.bad_barcodes_file))
assert not adata.isview
assert allow_missing_kpca_file or 'X_pca_tcr' in adata.obsm_keys() # tcr-dist kPCA info
assert 'cdr3a' in adata.obs # tcr sequence (VDJ) info (plus other obs keys)
print(adata)
outfile_prefix_for_qc_plots = None if args.qc_plots is None else args.outfile_prefix
adata = conga.preprocess.filter_and_scale( adata, n_genes = args.max_genes_per_cell,
outfile_prefix_for_qc_plots = outfile_prefix_for_qc_plots )
if args.filter_ribo_norm_low_cells:
adata = conga.preprocess.filter_cells_by_ribo_norm( adata )
if args.calc_clone_pmhc_pvals: # do this before condensing to a single clone per cell
# note that we are doing this after filtering out the ribo-low cells
results_df = conga.pmhc_scoring.calc_clone_pmhc_pvals(adata)
tsvfile = args.outfile_prefix+'_clone_pvals.tsv'
print('making:', tsvfile)
results_df.to_csv(tsvfile, sep='\t', index=False)
if args.make_clone_plots:
# need to compute cluster and umaps for these plots
# these will be re-computed once we reduce to a single cell per clonotype
#
print('make_clone_plots: cluster_and_tsne_and_umap')
adata = pp.cluster_and_tsne_and_umap( adata, skip_tcr=True )
conga.plotting.make_clone_gex_umap_plots(adata, args.outfile_prefix)
print('run reduce_to_single_cell_per_clone'); sys.stdout.flush()
adata = conga.preprocess.reduce_to_single_cell_per_clone( adata, average_clone_gex=args.average_clone_gex )
assert 'X_igex' in adata.obsm_keys()
if args.include_protein_features:
# this fills X_pca_gex_only, X_pca_gex (combo), X_pca_prot
# in the adata.obsm array
conga.preprocess.calc_X_pca_gex_including_protein_features(
adata, compare_distance_distributions=True)
if args.shuffle_tcr_kpcs:
X_pca_tcr = adata.obsm['X_pca_tcr']
assert X_pca_tcr.shape[0] == adata.shape[0]
reorder = np.random.permutation(X_pca_tcr.shape[0])
adata.obsm['X_pca_tcr'] = X_pca_tcr[reorder,:]
outlog.write('randomly permuting X_pca_tcr {}\n'.format(X_pca_tcr.shape))
clustering_resolution = 2.0 if (args.subset_to_CD8 or args.subset_to_CD4) else args.clustering_resolution
print('run cluster_and_tsne_and_umap'); sys.stdout.flush()
adata = conga.preprocess.cluster_and_tsne_and_umap(
adata, clustering_resolution = clustering_resolution,
clustering_method=args.clustering_method,
skip_tcr=(args.use_tcrdist_umap and args.use_tcrdist_clusters))
if args.checkpoint:
adata.write_h5ad(args.outfile_prefix+'_checkpoint.h5ad')
#############################################################################
else: ############## restarting from a previous conga run #######################
#############################################################################
assert exists(args.restart)
adata = sc.read_h5ad(args.restart)
print('recover from h5ad file:', args.restart, adata )
if 'organism' not in adata.uns_keys():
assert args.organism
adata.uns['organism'] = args.organism
if args.exclude_mait_and_inkt_cells and not args.exclude_gex_clusters:
# should move this code into a helper function in conga!
organism = adata.uns['organism']
tcrs = conga.preprocess.retrieve_tcrs_from_adata(adata)
if organism == 'human':
mask = [ not (conga.tcr_scoring.is_human_mait_alpha_chain(x[0]) or
conga.tcr_scoring.is_human_inkt_tcr(x)) for x in tcrs ]
elif organism == 'mouse':
mask = [ not (conga.tcr_scoring.is_mouse_mait_alpha_chain(x[0]) or
conga.tcr_scoring.is_mouse_inkt_alpha_chain(x[0])) for x in tcrs ]
else:
print('ERROR: --exclude_mait_and_inkt_cells option is only compatible with a/b tcrs')
print('ERROR: but organism is not "human" or "mouse"')
sys.exit(1)
print('excluding {} mait/inkt cells from dataset of size {}'\
.format(adata.shape[0]-np.sum(mask), adata.shape[0]))
adata = adata[mask].copy()
# need to redo the cluster/tsne/umap
adata = conga.preprocess.cluster_and_tsne_and_umap(
adata, clustering_method=args.clustering_method,
clustering_resolution=args.clustering_resolution,
skip_tcr=(args.use_tcrdist_umap and args.use_tcrdist_clusters))
if args.shuffle_tcr_kpcs:
# shuffle the kpcs and anything derived from them that is relevant to GvG (this is just for testing)
# NOTE: we need to add shuffling of the neighbors if we are going to recover nbr info rather
# than recomputing...
X_pca_tcr = adata.obsm['X_pca_tcr']
assert X_pca_tcr.shape[0] == adata.shape[0]
reorder = np.random.permutation(X_pca_tcr.shape[0])
adata.obsm['X_pca_tcr'] = X_pca_tcr[reorder,:]
adata.obs['clusters_tcr'] = np.array(adata.obs['clusters_tcr'])[reorder]
adata.obsm['X_tcr_2d'] = np.array(adata.obsm['X_tcr_2d'])[reorder,:]
print('shuffle_tcr_kpcs:: shuffled X_pca_tcr, clusters_tcr, and X_tcr_2d')
outlog.write('randomly permuting X_pca_tcr {}\n'.format(X_pca_tcr.shape))
if args.exclude_gex_clusters:
xl = args.exclude_gex_clusters
clusters_gex = np.array(adata.obs['clusters_gex'])
mask = (clusters_gex==xl[0])
for c in xl[1:]:
mask |= (clusters_gex==c)
print('exclude_gex_clusters: exclude {} cells in {} clusters: {}'.format(np.sum(mask), len(xl), xl))
sys.stdout.flush()
adata = adata[~mask,:].copy()
if args.exclude_mait_and_inkt_cells:
organism = adata.uns['organism']
tcrs = conga.preprocess.retrieve_tcrs_from_adata(adata)
if organism == 'human':
mask = [ not (conga.tcr_scoring.is_human_mait_alpha_chain(x[0]) or
conga.tcr_scoring.is_human_inkt_tcr(x)) for x in tcrs ]
elif organism == 'mouse':
mask = [ not (conga.tcr_scoring.is_mouse_mait_alpha_chain(x[0]) or
conga.tcr_scoring.is_mouse_inkt_alpha_chain(x[0])) for x in tcrs ]
else:
print('ERROR: --exclude_mait_and_inkt_cells option is only compatible with a/b tcrs')
print('ERROR: but organism is not "human" or "mouse"')
sys.exit(1)
print('excluding {} mait/inkt cells from dataset of size {}'\
.format(adata.shape[0]-np.sum(mask), adata.shape[0]))
adata = adata[mask].copy()
adata = conga.preprocess.cluster_and_tsne_and_umap(
adata, clustering_method=args.clustering_method,
clustering_resolution=args.clustering_resolution,
skip_tcr=(args.use_tcrdist_umap and args.use_tcrdist_clusters))
if args.checkpoint:
adata.write_h5ad(args.outfile_prefix+'_checkpoint.h5ad')
if args.subset_to_CD4 or args.subset_to_CD8:
assert not (args.subset_to_CD4 and args.subset_to_CD8)
which_subset = 'CD4' if args.subset_to_CD4 else 'CD8'
adata = conga.preprocess.subset_to_CD4_or_CD8_clusters(
adata, which_subset, use_protein_features=args.include_protein_features)
adata = conga.preprocess.cluster_and_tsne_and_umap(
adata, clustering_method=args.clustering_method,
clustering_resolution=args.clustering_resolution,
skip_tcr=(args.use_tcrdist_umap and args.use_tcrdist_clusters))
if args.use_tcrdist_umap or args.use_tcrdist_clusters:
umap_key_added = 'X_tcr_2d' if args.use_tcrdist_umap else 'X_tcrdist_2d'
cluster_key_added = 'clusters_tcr' if args.use_tcrdist_clusters else 'clusters_tcrdist'
num_nbrs = 10
conga.preprocess.calc_tcrdist_nbrs_umap_clusters_cpp(
adata, num_nbrs, args.outfile_prefix, umap_key_added=umap_key_added, cluster_key_added=cluster_key_added)
################################################ DONE WITH INITIAL SETUP #########################################
# all_nbrs is dict from nbr_frac to [nbrs_gex, nbrs_tcr]
# for nndist calculations, use a smallish nbr_frac, but not too small:
num_clones = adata.shape[0]
nbr_frac_for_nndists = min( x for x in args.nbr_fracs if x*num_clones>=10 or x==max(args.nbr_fracs) )
outlog.write(f'nbr_frac_for_nndists: {nbr_frac_for_nndists}\n')
obsm_tag_tcr = None if args.use_exact_tcrdist_nbrs else 'X_pca_tcr'
all_nbrs, nndists_gex, nndists_tcr = conga.preprocess.calc_nbrs(
adata, args.nbr_fracs, also_calc_nndists=True, nbr_frac_for_nndists=nbr_frac_for_nndists,
obsm_tag_tcr=obsm_tag_tcr, use_exact_tcrdist_nbrs=args.use_exact_tcrdist_nbrs)
#
if args.analyze_junctions:
tcrs = conga.preprocess.retrieve_tcrs_from_adata(adata)
new_tcrs = conga.tcrdist.tcr_sampler.find_alternate_alleles_for_tcrs(
adata.uns['organism'], tcrs, verbose=True)
junctions_df = conga.tcrdist.tcr_sampler.parse_tcr_junctions(
adata.uns['organism'], new_tcrs)
num_inserts = (np.array(junctions_df.a_insert) +
np.array(junctions_df.vd_insert) +
np.array(junctions_df.dj_insert) +
np.array(junctions_df.vj_insert))
adata.obs['N_ins'] = num_inserts
args.gex_nbrhood_tcr_score_names.append('N_ins')
if args.shuffle_gex_nbrs:
reorder = np.random.permutation(num_clones)
print('shuffling gex nbrs: num_shuffle_fixed_points=', np.sum(reorder==np.arange(num_clones)))
reorder_list = list(reorder)
# reorder maps from the old index to the permuted index, ie new_i = reorder[old_i]
for nbr_frac in args.nbr_fracs:
old_nbrs = all_nbrs[nbr_frac][0]
new_nbrs = []
for new_ii in range(num_clones): # the new index
old_ii = reorder_list.index(new_ii)
new_nbrs.append( [ reorder[x] for x in old_nbrs[old_ii]])
all_nbrs[nbr_frac] = [np.array(new_nbrs), all_nbrs[nbr_frac][1]]
# stash these in obs array, they are used in a few places...
adata.obs['nndists_gex'] = nndists_gex
adata.obs['nndists_tcr'] = nndists_tcr
conga.preprocess.setup_tcr_cluster_names(adata) #stores in adata.uns
if args.verbose_nbrs:
for nbr_frac in args.nbr_fracs:
for tag, nbrs in [ ['gex', all_nbrs[nbr_frac][0]], ['tcr', all_nbrs[nbr_frac][1]]]:
outfile = '{}_{}_nbrs_{:.3f}.txt'.format(args.outfile_prefix, tag, nbr_frac)
np.savetxt(outfile, nbrs, fmt='%d')
print('wrote nbrs to file:', outfile)
if args.tcr_clumping:
num_random_samples = 50000 if args.num_random_samples_for_tcr_clumping is None \
else args.num_random_samples_for_tcr_clumping
radii = [24, 48, 72, 96] if args.radii_for_tcr_clumping is None else args.radii_for_tcr_clumping
pvalue_threshold = 0.05 # could use 1.0 maybe?
results = conga.tcr_clumping.assess_tcr_clumping(
adata, args.outfile_prefix, radii=radii, num_random_samples=num_random_samples,
pvalue_threshold = pvalue_threshold,
also_find_clumps_within_gex_clusters=args.intra_cluster_tcr_clumping)
if results.shape[0]:
# add clusters info for results tsvfile
clusters_gex = np.array(adata.obs['clusters_gex'])
clusters_tcr = np.array(adata.obs['clusters_tcr'])
results['clusters_gex'] = [ clusters_gex[x] for x in results.clone_index]
results['clusters_tcr'] = [ clusters_tcr[x] for x in results.clone_index]
tsvfile = args.outfile_prefix+'_tcr_clumping.tsv'
results.to_csv(tsvfile, sep='\t', index=False)
nbrs_gex, nbrs_tcr = all_nbrs[ max(args.nbr_fracs) ]
#min_cluster_size = max( args.min_cluster_size, int( 0.5 + args.min_cluster_size_fraction * num_clones) )
conga.plotting.make_tcr_clumping_plots(
adata, results, nbrs_gex, nbrs_tcr, args.min_cluster_size_for_tcr_clumping_logos,
pvalue_threshold, args.outfile_prefix)
num_clones = adata.shape[0]
tcr_clumping_pvalues = np.full((num_clones,), num_clones).astype(float)
for l in results.itertuples():
tcr_clumping_pvalues[l.clone_index] = min(tcr_clumping_pvalues[l.clone_index],
l.pvalue_adj)
adata.obs['tcr_clumping_pvalues'] = tcr_clumping_pvalues # stash in adata.obs
if args.graph_vs_graph: ############################################################################################
# make these numpy arrays because there seems to be a problem with np.nonzero on pandas series...
clusters_gex = np.array(adata.obs['clusters_gex'])
clusters_tcr = np.array(adata.obs['clusters_tcr'])
# run the graph vs graph analysis
results_df = conga.correlations.run_graph_vs_graph(adata, all_nbrs, verbose=args.verbose_nbrs)
if results_df.shape[0]:
# add in some extra info that may be useful before writing to tsv file
indices = results_df['clone_index']
results_df['gex_cluster'] = list(clusters_gex[indices])
results_df['tcr_cluster'] = list(clusters_tcr[indices])
for tag in 'va ja cdr3a vb jb cdr3b'.split():
results_df[tag] = list(adata.obs[tag][indices])
tsvfile = args.outfile_prefix+'_graph_vs_graph_hits.tsv'
results_df.to_csv(tsvfile, sep='\t', index=False)
# the conga scores
conga_scores = np.array(adata.obs['conga_scores'])
good_mask = (conga_scores <= 1.0)
adata.obs['good_score_mask'] = good_mask
bic_counts = Counter( (x,y) for x,y,m in zip(clusters_gex, clusters_tcr, good_mask) if m )
# take the LARGER of the two min_cluster_size thresholds
min_cluster_size = max( args.min_cluster_size, int( 0.5 + args.min_cluster_size_fraction * num_clones) )
num_good_biclusters = sum( 1 for x,y in bic_counts.items() if y>=min_cluster_size )
outlog.write(f'num_gvg_hit_clonotypes: {np.sum(good_mask)} num_gvg_hit_biclusters: {num_good_biclusters}\n')
print('num_good_biclusters:', num_good_biclusters)
# for the logo plots, use the largest nbr_frac
nbrs_gex, nbrs_tcr = all_nbrs[ max(args.nbr_fracs) ]
if num_good_biclusters:
# calc tcr sequence features of good cluster pairs
good_bicluster_tcr_scores = conga.correlations.calc_good_cluster_tcr_features(
adata, good_mask, clusters_gex, clusters_tcr, args.gex_nbrhood_tcr_score_names, min_count=min_cluster_size)
# run rank_genes on most common bics
rank_genes_uns_tag = 'rank_genes_good_biclusters'
conga.correlations.run_rank_genes_on_good_biclusters(
adata, good_mask, clusters_gex, clusters_tcr, min_count=min_cluster_size, key_added= rank_genes_uns_tag)
if args.skip_tcr_scores_in_gex_header:
gex_header_tcr_score_names = []
elif args.gex_header_tcr_score_names is None:
if '_ig' in adata.uns['organism']:
gex_header_tcr_score_names = ['af2', 'cdr3len', 'volume', 'nndists_tcr']
else:
gex_header_tcr_score_names = ['imhc', 'cdr3len', 'cd8', 'nndists_tcr']
else:
gex_header_tcr_score_names = args.gex_header_tcr_score_names
conga.plotting.make_logo_plots(
adata, nbrs_gex, nbrs_tcr, min_cluster_size, args.outfile_prefix+'_bicluster_logos.png',
good_bicluster_tcr_scores=good_bicluster_tcr_scores,
make_gex_header = not args.skip_gex_header,
make_gex_header_raw = not args.skip_gex_header_raw,
make_gex_header_nbrZ = not args.skip_gex_header_nbrZ,
include_alphadist_in_tcr_feature_logos=args.include_alphadist_in_tcr_feature_logos,
rank_genes_uns_tag = rank_genes_uns_tag,
show_pmhc_info_in_logos = args.show_pmhc_info_in_logos,
gex_header_tcr_score_names = gex_header_tcr_score_names )
batch_bias_results = None
if args.find_batch_biases:
pval_threshold = 0.05 # kind of arbitrary
nbrhood_results, hotspot_results = conga.correlations.find_batch_biases(
adata, all_nbrs, pval_threshold=pval_threshold, exclude_batch_keys=args.exclude_batch_keys_for_biases)
if nbrhood_results.shape[0]:
tsvfile = args.outfile_prefix+'_nbrhood_batch_biases.tsv'
nbrhood_results.to_csv(tsvfile, sep='\t', index=False)
nbrs_gex, nbrs_tcr = all_nbrs[ max(args.nbr_fracs) ]
#min_cluster_size = max( args.min_cluster_size, int( 0.5 + args.min_cluster_size_fraction * num_clones) )
conga.plotting.make_batch_bias_plots(
adata, nbrhood_results, nbrs_gex, nbrs_tcr, args.min_cluster_size_for_batch_bias_logos,
pval_threshold, args.outfile_prefix)
if hotspot_results.shape[0]:
tsvfile = args.outfile_prefix+'_batch_hotspots.tsv'
hotspot_results.to_csv(tsvfile, sep='\t', index=False)
batch_bias_results = (nbrhood_results, hotspot_results)
if args.graph_vs_gex_features: #######################################################################################
clusters_gex = np.array(adata.obs['clusters_gex'])
clusters_tcr = np.array(adata.obs['clusters_tcr'])
## first use the TCRdist kPCA nbr graph:
pval_threshold = 1.
results = []
for nbr_frac in args.nbr_fracs:
nbrs_gex, nbrs_tcr = all_nbrs[nbr_frac]
results.append( conga.correlations.tcr_nbrhood_rank_genes_fast( adata, nbrs_tcr, pval_threshold))
results[-1]['nbr_frac'] = nbr_frac
tsvfile = args.outfile_prefix+'_tcr_nbr_graph_vs_gex_features.tsv'
print('making:', tsvfile)
results_df = pd.concat(results, ignore_index=True)
results_df.to_csv(tsvfile, index=False, sep='\t')
tcr_nbrhood_genes_results = results_df
combo_results = []
if results_df.shape[0]:
combo_results.append( results_df)
# now make a TCR cluster graph and use the nbrhoods in there
# make some fake nbrs-- note that only one clone per cluster has a nonempty nbrhood
fake_nbrs_tcr = conga.correlations.setup_fake_nbrs_from_clusters_for_graph_vs_features_analysis(clusters_tcr)
pval_threshold = 1.
results_df = conga.correlations.tcr_nbrhood_rank_genes_fast(
adata, fake_nbrs_tcr, pval_threshold, prefix_tag='clust')
if results_df.shape[0]:
results_df['clone_index'] = -1
tsvfile = args.outfile_prefix+'_tcr_cluster_graph_vs_gex_features.tsv'
print('making:', tsvfile)
results_df.to_csv(tsvfile, index=False, sep='\t')
results_df['nbr_frac'] = 0.0
tcr_cluster_genes_results = results_df
combo_results.append(results_df)
else:
tcr_cluster_genes_results = None
if combo_results:
results_df = pd.concat(combo_results, ignore_index=True)
pngfile = args.outfile_prefix+'_tcr_nbr_graph_vs_gex_features.png'
print('making:', pngfile)
conga.plotting.plot_ranked_strings_on_cells(
adata, results_df, 'X_tcr_2d', 'clone_index', 'mwu_pvalue_adj', 1.0, 'feature', pngfile)
pngfile = args.outfile_prefix+'_tcr_nbr_graph_vs_gex_features_panels.png'
print('making:', pngfile)
conga.plotting.make_feature_panel_plots(adata, 'tcr', all_nbrs, results_df, pngfile)
# show the genes in a clustermap
clustermap_pvalue_threshold = 0.05
gene_pvalues = {}
for l in results_df.itertuples():
if l.mwu_pvalue_adj <= clustermap_pvalue_threshold:
gene_pvalues[l.feature] = min(l.mwu_pvalue_adj, gene_pvalues.get(l.feature, 1.0))
genes = list(gene_pvalues.keys())
if len(genes)>1 and 'X_pca_tcr' in adata.obsm_keys(): # TMP HACK
gene_labels = ['{:9.1e} {}'.format(gene_pvalues[x], x) for x in genes]
pngfile = '{}_all_tcr_graph_genes_clustermap.png'.format(args.outfile_prefix)
nbr_frac = max(args.nbr_fracs)
gex_nbrs, tcr_nbrs = all_nbrs[nbr_frac]
conga.plotting.plot_interesting_features_vs_clustermap(
adata, genes, pngfile, 'tcr', nbrs=tcr_nbrs, compute_nbr_averages=True, feature_labels=gene_labels)
## now make another fake nbr graph defined by TCR gene segment usage
tcrs = conga.preprocess.retrieve_tcrs_from_adata(adata)
for iab,ab in enumerate('AB'):
for iseg,seg in enumerate('VJ'):
genes = [ x[iab][iseg] for x in tcrs ]
genes = np.array([ x[:x.index('*')] for x in genes ])
# make some fake nbrs
fake_nbrs_tcr = []
clone_display_names = []
seen = set()
for g in genes:
if g in seen:
fake_nbrs_tcr.append([])
clone_display_names.append('')
else:
seen.add(g)
# this will include self but dont think thats a problem
fake_nbrs_tcr.append(np.nonzero( genes==g )[0] )
clone_display_names.append(g)
pval_threshold = 1.
results_df = conga.correlations.tcr_nbrhood_rank_genes_fast(
adata, fake_nbrs_tcr, pval_threshold, prefix_tag=seg+ab, clone_display_names=clone_display_names )
if results_df.shape[0]:
results_df['clone_index'] = -1
tsvfile = args.outfile_prefix+'_tcr_gene_segments_vs_gex_features.tsv'
print('making:', tsvfile)
results_df.to_csv(tsvfile, index=False, sep='\t')
results_df['nbr_frac'] = 0.0
pngfile = args.outfile_prefix+'_tcr_gene_segments_vs_gex_features_panels.png'
print('making:', pngfile)
use_nbr_frac = max(args.nbr_fracs)
conga.plotting.make_feature_panel_plots(adata, 'tcr', all_nbrs, results_df, pngfile,
use_nbr_frac=use_nbr_frac)
if args.graph_vs_tcr_features: #######################################################################################
clusters_gex = np.array(adata.obs['clusters_gex'])
clusters_tcr = np.array(adata.obs['clusters_tcr'])
pval_threshold = 1.
results = []
tcr_score_names = list(args.gex_nbrhood_tcr_score_names)
if True: #args.include_vj_genes_as_tcr_features: # (used to be an option)
min_gene_count = 5
tcrs = conga.preprocess.retrieve_tcrs_from_adata(adata)
organism_genes = conga.tcrdist.all_genes.all_genes[adata.uns['organism']]
counts = Counter( [ organism_genes[x[i_ab][j_vj]].count_rep
for x in tcrs for i_ab in range(2) for j_vj in range(2)] )
count_reps = [x for x,y in counts.most_common() if y>=min_gene_count ]
tcr_score_names += count_reps
for nbr_frac in args.nbr_fracs:
nbrs_gex, nbrs_tcr = all_nbrs[nbr_frac]
results.append( conga.correlations.gex_nbrhood_rank_tcr_scores(
adata, nbrs_gex, tcr_score_names, pval_threshold ))
results[-1]['nbr_frac'] = nbr_frac
results_df = pd.concat(results, ignore_index=True)
tsvfile = args.outfile_prefix+'_gex_nbr_graph_vs_tcr_features.tsv'
print('making:', tsvfile)
results_df.to_csv(tsvfile, index=False, sep='\t')
gex_nbrhood_scores_results = results_df
combo_results = []
if results_df.shape[0]:
combo_results.append(results_df)
# make some fake nbrs
fake_nbrs_gex = conga.correlations.setup_fake_nbrs_from_clusters_for_graph_vs_features_analysis(clusters_gex)
pval_threshold = 1.
results_df = conga.correlations.gex_nbrhood_rank_tcr_scores(
adata, fake_nbrs_gex, tcr_score_names, pval_threshold, prefix_tag = 'clust' )
if results_df.shape[0]:
results_df['clone_index'] = -1 # the clone_index values are not meaningful
tsvfile = args.outfile_prefix+'_gex_cluster_graph_vs_tcr_features.tsv'
print('making:', tsvfile)
results_df.to_csv(tsvfile, index=False, sep='\t')
results_df['nbr_frac'] = 0.0
gex_cluster_scores_results = results_df
combo_results.append(results_df)
else:
gex_cluster_scores_results = None
if combo_results:
pngfile = args.outfile_prefix+'_gex_nbr_graph_vs_tcr_features.png'
print('making:', pngfile)
results_df = pd.concat(combo_results, ignore_index=True)
conga.plotting.plot_ranked_strings_on_cells(
adata, results_df, 'X_gex_2d', 'clone_index', 'mwu_pvalue_adj', 1.0, 'feature', pngfile,
direction_column='ttest_stat')
pngfile = args.outfile_prefix+'_gex_nbr_graph_vs_tcr_features_panels.png'
print('making:', pngfile)
conga.plotting.make_feature_panel_plots(adata, 'gex', all_nbrs, results_df, pngfile)
if args.graph_vs_graph and args.graph_vs_tcr_features and args.graph_vs_gex_features: ################################
pngfile = args.outfile_prefix+'_summary.png'
print('making:', pngfile)
if tcr_cluster_genes_results is not None:
tcr_genes_results = pd.concat( [tcr_nbrhood_genes_results, tcr_cluster_genes_results ], ignore_index=True )
else:
tcr_genes_results = tcr_nbrhood_genes_results
if gex_cluster_scores_results is not None:
gex_scores_results = pd.concat( [gex_nbrhood_scores_results, gex_cluster_scores_results], ignore_index=True )
else:
gex_scores_results = gex_nbrhood_scores_results
# default pval thresholds are .05
conga.plotting.make_summary_figure(adata, tcr_genes_results, gex_scores_results, pngfile )
## some extra analyses
if args.make_tcrdist_trees: # make tcrdist trees for each of the gex clusters, and for conga hits with score < 10
#
width = 800
height = 1000
xpad = 25
organism = adata.uns['organism']
#precomputed = False
#read the raw tcrdist distances (could instead use the kpca euclidean dists)
#distfile = args.clones_file
clusters_gex = np.array(adata.obs['clusters_gex'])
num_clusters = np.max(clusters_gex)+1
tcrs = conga.preprocess.retrieve_tcrs_from_adata(adata)
num_clones = adata.shape[0]
if 'conga_scores' in adata.obs_keys():
conga_scores = np.maximum( 1e-100, np.array(adata.obs['conga_scores']) ) # no zeros!
scores = np.sqrt( np.maximum( 0.0, -1*np.log10( 100*conga_scores/num_clones)))
else:
scores = np.zeros((adata.shape[0],))
tcrdist = conga.tcrdist.tcr_distances.TcrDistCalculator(organism)
x_offset = 0
all_cmds = []
#color_score_range = [-1*np.log(10), -1*np.log(1e-5)]
color_score_range = [0, 3.0]
print('color_score_range:', color_score_range)
for clust in range(num_clusters):
cmask = (clusters_gex==clust)
csize = np.sum(cmask)
#cinds = np.nonzero(cmask)[0]
ctcrs = [x for x,y in zip( tcrs, cmask) if y]
cscores = [x for x,y in zip(scores, cmask) if y]
print('computing tcrdist distances:', clust, csize)
if csize>1000 and conga.util.tcrdist_cpp_available():
cdists = conga.preprocess.calc_tcrdist_matrix_cpp(ctcrs, adata.uns['organism'])
else:
cdists = np.array([ tcrdist(x,y) for x in ctcrs for y in ctcrs]).reshape(csize,csize)
cmds = conga.tcrdist.make_tcr_trees.make_tcr_tree_svg_commands(
ctcrs, organism, [x_offset,0], [width,height], cdists, max_tcrs_for_trees=400, tcrdist_calculator=tcrdist,
color_scores=cscores, color_score_range = color_score_range, title='GEX cluster {}'.format(clust))
x_offset += width + xpad
all_cmds.extend(cmds)
svgfile = args.outfile_prefix+'_gex_cluster_tcrdist_trees.svg'
print('making:', svgfile[:-3]+'png')
conga.svg_basic.create_file(all_cmds, x_offset-xpad, height, svgfile, create_png=True )
if 'conga_scores' in adata.obs_keys(): # also make a tree of tcrs with conga score < threshold (10?)
threshold = 10.
# recalibrate the scores
scores = np.sqrt( np.maximum( 0.0, -1*np.log10( conga_scores/threshold)))
color_score_range = [0, 3.0] #max(3.0, np.max(scores))]
cmask = (conga_scores<=threshold)
csize = np.sum(cmask)
if csize >= threshold and csize >= 2:
ctcrs = [x for x,y in zip( tcrs, cmask) if y]
cscores = [x for x,y in zip(scores, cmask) if y]
if csize>1000 and conga.util.tcrdist_cpp_available():
cdists = conga.preprocess.calc_tcrdist_matrix_cpp(ctcrs, adata.uns['organism'])
else:
print('computing tcrdist distances:', clust, csize)
cdists = np.array([ tcrdist(x,y) for x in ctcrs for y in ctcrs]).reshape(csize,csize)
cmds = conga.tcrdist.make_tcr_trees.make_tcr_tree_svg_commands(
ctcrs, organism, [0,0], [width,height], cdists, max_tcrs_for_trees=400, tcrdist_calculator=tcrdist,
color_scores=cscores, color_score_range = color_score_range,
title='conga_score_threshold {:.1f}'.format(threshold))
svgfile = args.outfile_prefix+'_conga_score_lt_{:.1f}_tcrdist_tree.svg'.format(threshold)
print('making:', svgfile[:-3]+'png')
conga.svg_basic.create_file(cmds, width, height, svgfile, create_png=True )
if args.cluster_vs_cluster:
tcrs = conga.preprocess.retrieve_tcrs_from_adata(adata)
clusters_gex = np.array(adata.obs['clusters_gex'])
clusters_tcr = np.array(adata.obs['clusters_tcr'])
barcodes = list(adata.obs_names)
barcode2tcr = dict(zip(barcodes,tcrs))
conga.correlations.compute_cluster_interactions( clusters_gex, clusters_tcr, barcodes, barcode2tcr, outlog )
if args.plot_cluster_gene_compositions:
pngfile = args.outfile_prefix+'_cluster_gene_compositions.png'
conga.plotting.plot_cluster_gene_compositions(adata, pngfile)
if args.find_gex_cluster_degs: # look at differentially expressed genes in gex clusters
obs_tag = 'genex_clusters'
adata.obs[obs_tag] = [ str(x) for x in adata.obs['clusters_gex']]#.astype('category')
key_added = 'degs_for_gex_clusters'
rank_method = 'wilcoxon'
all_clusters = sorted(set(adata.obs[obs_tag]))
sc.tl.rank_genes_groups(adata, groupby=obs_tag, method=rank_method, groups=all_clusters, reference='rest',
key_added=key_added)
n_genes = 25
sc.pl.rank_genes_groups(adata, n_genes=n_genes, sharey=False, show=False, key=key_added)
pngfile = args.outfile_prefix+'_gex_cluster_degs.png'
plt.savefig(pngfile, bbox_inches="tight")
print('made:', pngfile)
new_rank_genes_genes, var_group_positions, var_group_labels = [],[],[]
allow_gene_repeats = False
min_rank_genes_log2fold_change = 1.0
max_rank_genes_pval_adj=0.05
n_genes_for_plotting = 5
for group in all_clusters:
my_genes = []
for igene,gene in enumerate( adata.uns[key_added]['names'][group] ):
log2fold = adata.uns[key_added]['logfoldchanges'][group][igene]
pval_adj = adata.uns[key_added]['pvals_adj'][group][igene]
#print('rank_gene:',group, igene, gene, log2fold, pval_adj)
if len(my_genes) >= n_genes_for_plotting:
continue
if gene in new_rank_genes_genes and not allow_gene_repeats:
continue # no repeats
elif gene.startswith('MT-'):
continue
elif gene[:3] in ['RPL','RPS'] and gene[3].isdigit():
continue
elif abs(log2fold) < min_rank_genes_log2fold_change:
continue
elif pval_adj > max_rank_genes_pval_adj:
continue
print('log2fold: {:.2f} pval_adj: {:9.1e} score: {:.1f} {} {}'\
.format( log2fold, pval_adj, adata.uns[key_added]['scores'][group][igene],
gene, group ) )
my_genes.append( gene )
if my_genes:
var_group_positions.append( ( len(new_rank_genes_genes),
len(new_rank_genes_genes)+len(my_genes)-1 ) )
var_group_labels.append( group )
new_rank_genes_genes.extend( my_genes )
if new_rank_genes_genes:
sc.pl.stacked_violin( adata, var_names = new_rank_genes_genes, groupby=obs_tag,
figsize=(10,n_genes_for_plotting*10),
use_raw = True,
stripplot=True, show=False, swap_axes=True,
var_group_positions = var_group_positions,
var_group_labels = var_group_labels,
var_group_rotation = 1.0 )
pngfile = args.outfile_prefix+'_gex_cluster_degs_violin.png'
plt.savefig(pngfile, bbox_inches="tight")
print('made:',pngfile)
sc.pl.dotplot(adata, var_names=new_rank_genes_genes, groupby=obs_tag, show=False,
var_group_labels=var_group_labels,
var_group_positions=var_group_positions)
pngfile = args.outfile_prefix+'_gex_cluster_degs_dotplot.png'
plt.savefig(pngfile, bbox_inches="tight")
print('made:', pngfile)
# this plot_scatter seems to have moved in scanpy; need to update
#sc.pl._tools.plot_scatter( adata, 'gex_2d', ncols = 6, color = new_rank_genes_genes, show=False,
# use_raw = True, s=40)
#pngfile = args.outfile_prefix+'_gex_cluster_degs_tsne.png'
#plt.savefig(pngfile, bbox_inches="tight")
#print('made:', pngfile)
if adata.uns['organism'] == 'human_ig':
# list of B cell marker genes from "Human germinal centres engage memory and naive B cells after influenza vaccination" Turner...Ellebedy, Nature 2020: https://doi.org/10.1038/s41586-020-2711-0
# note that they say acivated B cells are distinguished by *lack* of CR2
genes_lines = """GC-Bs BCL6, RGS13, MEF2B, STMN1, ELL3, SERPINA9
PBs XBP1, IRF4, SEC11C, FKBP11, JCHAIN, PRDM1
naive TCL1A, IL4R, CCR7, IGHM, IGHD
act-Bs TBX21, FCRL5, ITGAX, NKG7, ZEB2, CR2
rest TNFRSF13B, CD27, CD24
misc IGHA1 IGHA2 IGHG1 IGHG2 IGHG3 IGHG4 IGHE""".replace(',',' ').split('\n')
genes, var_group_positions, var_group_labels = [], [], []
for line in genes_lines:
my_genes = [ x for x in line.split()[1:] if x in adata.raw.var_names]
print(len(my_genes), line.split())
if my_genes:
var_group_positions.append( (len(genes), len(genes)+len(my_genes)-1) )
var_group_labels.append( line.split()[0])
genes.extend(my_genes)
sc.pl.dotplot(adata, var_names=genes, groupby=obs_tag, show=False, var_group_labels=var_group_labels,
var_group_positions=var_group_positions)
pngfile = args.outfile_prefix+'_gex_cluster_bcell_genes_dotplot.png'
plt.savefig(pngfile, bbox_inches="tight")
print('made:', pngfile)
# show some of our marker genes
organism = adata.uns['organism']
genes = conga.plotting.default_logo_genes[organism] + conga.plotting.default_gex_header_genes[organism]
genes = sorted(set(x for x in genes if x in adata.raw.var_names))
sc.pl.dotplot(adata, var_names=genes, groupby=obs_tag, show=False)
pngfile = args.outfile_prefix+'_gex_cluster_marker_genes_dotplot.png'
plt.savefig(pngfile, bbox_inches="tight")
print('made:', pngfile)
if args.find_hotspot_features:
# My hacky and probably buggy first implementation of the HotSpot method:
#
# "Identifying Informative Gene Modules Across Modalities of Single Cell Genomics"
# David DeTomaso, Nir Yosef
# https://www.biorxiv.org/content/10.1101/2020.02.06.937805v1
#all_bicluster_pvals = {}
all_hotspot_nbrhood_results = []
for nbr_frac in args.nbr_fracs:
nbrs_gex, nbrs_tcr = all_nbrs[nbr_frac]
print('find_hotspot_nbrhoods for nbr_frac', nbr_frac)
nbrhood_results = conga.correlations.find_hotspot_nbrhoods(
adata, nbrs_gex, nbrs_tcr, pval_threshold=1.0, also_use_cluster_graphs=False)
# the feature_type column is already set in nbrhood_results to {tcr/gex}_nbrs_vs_graph
if nbrhood_results.shape[0]: #make some simple plots
nbrhood_results['nbr_frac'] = nbr_frac
all_hotspot_nbrhood_results.append( nbrhood_results )
# nrows, ncols = 4, 6
# plt.figure(figsize=(ncols*4, nrows*4))
# for ii,(xy_tag, feature_nbr_tag, graph_tag) in enumerate([(x,y,z) for x in ['gex','tcr']
# for y in ['gex','tcr','combo','max']
# for z in ['graph','clust','combo']]):
# mask = np.full((nbrhood_results.shape[0],), False)
# for ftag in ['gex','tcr'] if feature_nbr_tag in ['combo','max'] else [feature_nbr_tag]:
# for gtag in ['graph','clust'] if graph_tag=='combo' else [graph_tag]:
# feature_type = '{}_nbrs_vs_{}'.format(ftag, gtag)
# mask |= nbrhood_results.feature_type==feature_type
# df = nbrhood_results[mask]
# if df.shape[0]==0:
# print('no hits:', feature_nbr_tag, graph_tag)
# continue
# if feature_nbr_tag == 'max':
# all_pvals = {}
# for tag in ['gex','tcr']:
# pvals = np.full((adata.shape[0],),1000.0)
# for l in df.itertuples():
# if l.feature_type.startswith(tag):
# pvals[l.clone_index] = min(l.pvalue_adj, pvals[l.clone_index])
# all_pvals[tag] = pvals
# pvals = np.maximum(all_pvals['gex'], all_pvals['tcr'])
# else:
# pvals = np.full((adata.shape[0],),1000.0)
# for l in df.itertuples():
# pvals[l.clone_index] = min(l.pvalue_adj, pvals[l.clone_index])
# colors = np.sqrt( np.maximum(0.0, -1*np.log10(pvals)))
# plt.subplot(nrows, ncols, ii+1)
# reorder = np.argsort(colors)
# xy = adata.obsm['X_{}_2d'.format(xy_tag)] # same umap as feature nbr-type
# vmax = np.sqrt(-1*np.log10(1e-5))
# plt.scatter( xy[reorder,0], xy[reorder,1], c=colors[reorder], vmin=0, vmax=vmax)
# plt.xticks([],[])
# plt.yticks([],[])
# plt.xlabel('{} UMAP1'.format(xy_tag))
# plt.title('{}_nbrs_vs_{} nbrfrac= {:.3f}'.format(feature_nbr_tag, graph_tag, nbr_frac))
# if feature_nbr_tag == 'max' and graph_tag == 'graph' and xy_tag=='gex':
# all_bicluster_pvals[nbr_frac] = pvals
# pngfile = '{}_hotspot_nbrhoods_{:.3f}_nbrs.png'.format(args.outfile_prefix, nbr_frac)
# print('making:', pngfile)
# plt.tight_layout()
# plt.savefig(pngfile)
# try making some logo plots. Here we are just using the graph-graph hotspot pvals, max'ed per clone over gex/tcr
# min_cluster_size = max( args.min_cluster_size, int(np.round(args.min_cluster_size_fraction * num_clones)))
# min_pvals = np.array([num_clones]*num_clones)
# for nbr_frac, pvals in all_bicluster_pvals.items():
# min_pvals = np.minimum(min_pvals, pvals)
# pngfile = '{}_hotspot_nbrhood_biclusters.png'.format(args.outfile_prefix)
# conga.plotting.make_cluster_logo_plots_figure(adata, min_pvals, 1.0, nbrs_gex, nbrs_tcr,
# min_cluster_size, pngfile)
print('find_hotspot_genes for nbr_frac', nbr_frac)
gex_results = conga.correlations.find_hotspot_genes(adata, nbrs_tcr, pval_threshold=0.05)
#gex_results['feature_type'] = 'gex'
print('find_hotspot_tcr_features for nbr_frac', nbr_frac)
tcr_results = conga.correlations.find_hotspot_tcr_features(adata, nbrs_gex, pval_threshold=0.05)
#tcr_results['feature_type'] = 'tcr'
combo_results = pd.concat([gex_results, tcr_results])
if combo_results.shape[0]:
tsvfile = '{}_hotspot_features_{:.3f}_nbrs.tsv'.format(args.outfile_prefix, nbr_frac)
combo_results.to_csv(tsvfile, sep='\t', index=False)
for tag, results in [ ['gex', gex_results],
['tcr', tcr_results],
['combo', combo_results] ]:
if results.shape[0]<1:
continue
for plot_tag, plot_nbrs in [['gex',nbrs_gex], ['tcr',nbrs_tcr]]:
if tag == plot_tag:
continue
# 2D UMAPs colored by nbr-averaged feature values
pngfile = '{}_hotspot_{}_features_{:.3f}_nbrs_{}_umap.png'\
.format(args.outfile_prefix, tag, nbr_frac, plot_tag)
print('making:', pngfile)
conga.plotting.plot_hotspot_umap(adata, plot_tag, results, pngfile, nbrs=plot_nbrs,
compute_nbr_averages=True)
if results.shape[0]<2:
continue # clustermap not interesting...
if 'X_pca_'+plot_tag not in adata.obsm_keys():
print(f'skipping clustermap vs {plot_tag} since no X_pca_{plot_tag} in adata.obsm_keys!')
continue
if adata.shape[0] > 30000: ######################### TEMPORARY HACKING ############################
print('skipping hotspot clustermaps because adata is too big:', adata.shape)
continue
## clustermap of features versus cells
features = list(results.feature)
feature_labels = ['{:9.1e} {} {}'.format(x,y,z)
for x,y,z in zip(results.pvalue_adj, results.feature_type, results.feature)]
min_pval = 1e-299 # dont want log10 of 0.0
feature_scores = [np.sqrt(-1*np.log10(max(min_pval, x.pvalue_adj))) for x in results.itertuples()]
if False: # skip the redundant one
pngfile = '{}_{:.3f}_nbrs_{}_hotspot_features_vs_{}_clustermap.png'\
.format(args.outfile_prefix, nbr_frac, tag, plot_tag)
conga.plotting.plot_interesting_features_vs_clustermap(
adata, features, pngfile, plot_tag, nbrs=plot_nbrs, compute_nbr_averages=True,
feature_labels=feature_labels, feature_types = list(results.feature_type),
feature_scores = feature_scores )
# now a more compact version where we filter out redundant features
pngfile = '{}_{:.3f}_nbrs_{}_hotspot_features_vs_{}_clustermap_lessredundant.png'\
.format(args.outfile_prefix, nbr_frac, tag, plot_tag)
redundancy_threshold = 0.9 # duplicate if linear correlation > 0.9
if len(features)>60:
max_redundant_features = 0 # ie anything 1 or higher ==> no duplicates
elif len(features)>30:
max_redundant_features = 1 # at most 1 duplicate
else:
max_redundant_features = 2 # at most 2 duplicates
conga.plotting.plot_interesting_features_vs_clustermap(
adata, features, pngfile, plot_tag, nbrs=plot_nbrs, compute_nbr_averages=True,
feature_labels=feature_labels, feature_types = list(results.feature_type),
max_redundant_features=max_redundant_features, redundancy_threshold=redundancy_threshold,
feature_scores=feature_scores)
# make a plot summarizing the hotspot nbrhood pvals and also save them to a file
if all_hotspot_nbrhood_results:
nbrhood_results = pd.concat(all_hotspot_nbrhood_results)
tcrs = conga.preprocess.retrieve_tcrs_from_adata(adata)
outfile = '{}_hotspot_nbrhoods.tsv'.format(args.outfile_prefix)
for iab, ivj in [ (x,y) for x in range(2) for y in range(3) ]:
key = [ 'va ja cdr3a'.split(), 'vb jb cdr3b'.split()][iab][ivj]
nbrhood_results[key] = [tcrs[x.clone_index][iab][ivj]
for x in nbrhood_results.itertuples()]
print('making:', outfile)
nbrhood_results.to_csv(outfile, sep='\t', index=False)
num_clones = adata.shape[0]
nbrhood_pvals = { 'gex':np.full((num_clones,), num_clones).astype(float),
'tcr':np.full((num_clones,), num_clones).astype(float) }
for l in nbrhood_results.itertuples():
assert l.feature_type[3:] == '_nbrs_vs_graph'
tag = l.feature_type[:3]
nbrhood_pvals[tag][l.clone_index] = min(l.pvalue_adj, nbrhood_pvals[tag][l.clone_index])
plt.figure(figsize=(12,6))
for icol, tag in enumerate(['gex','tcr']):
plt.subplot(1,2,icol+1)
colors = np.sqrt( np.maximum(0.0, -1*np.log10(np.maximum(1e-100, nbrhood_pvals[tag])))) # no log10 of 0.0
print('colors:', tag, np.max(colors), list(colors[:100]))
reorder = np.argsort(colors)
xy = adata.obsm['X_{}_2d'.format(tag)] # same umap as feature nbr-type
vmax = np.sqrt(-1*np.log10(1e-5))
plt.scatter( xy[reorder,0], xy[reorder,1], c=colors[reorder], vmin=0, vmax=vmax)
plt.xticks([],[])
plt.yticks([],[])
plt.xlabel('{} UMAP1'.format(tag))
plt.ylabel('{} UMAP2'.format(tag))
plt.title('{} hotspot nbrhood pvalues'.format(tag))
plt.tight_layout()
pngfile = '{}_hotspot_nbrhoods.png'.format(args.outfile_prefix)
print('making:', pngfile)
plt.savefig(pngfile)
if args.make_hotspot_nbrhood_logos:
nbrs_gex, nbrs_tcr = all_nbrs[ max(args.nbr_fracs) ]
min_cluster_size = max( args.min_cluster_size, int( 0.5 + args.min_cluster_size_fraction * num_clones) )
conga.plotting.make_hotspot_nbrhood_logo_figures(adata, nbrs_gex, nbrs_tcr, nbrhood_results,
min_cluster_size, args.outfile_prefix,
pvalue_threshold=1.0)
if args.analyze_CD4_CD8:
min_nbrs = 10
for nbr_frac in sorted(all_nbrs.keys()):
if nbr_frac * adata.shape[0] > min_nbrs:
nbr_frac_for_plotting = nbr_frac
break
else:
nbr_frac_for_plotting = max(all_nbrs.keys())
conga.plotting.analyze_CD4_CD8(adata, all_nbrs[nbr_frac_for_plotting][0], args.outfile_prefix)
if args.analyze_proteins:
conga.plotting.analyze_proteins(adata, args.outfile_prefix)
if args.analyze_special_genes:
conga.plotting.analyze_special_genes(adata, args.outfile_prefix)
## make summary plots of top clones and their batch distributions
if 'batch_keys' in adata.uns_keys():
conga_scores, tcr_clumping_pvalues = None, None
if args.graph_vs_graph:
conga_scores = adata.obs['conga_scores']
if args.tcr_clumping:
tcr_clumping_pvalues = adata.obs['tcr_clumping_pvalues']
conga.plotting.make_clone_batch_clustermaps(
adata, args.outfile_prefix, adata.uns['batch_keys'],
conga_scores = conga_scores, tcr_clumping_pvalues = tcr_clumping_pvalues,
batch_bias_results = batch_bias_results )
# just out of curiosity:
conga.correlations.check_nbr_graphs_indegree_bias(all_nbrs)
if args.find_distance_correlations:
clusters_gex = np.array(adata.obs['clusters_gex'])
clusters_tcr = np.array(adata.obs['clusters_tcr'])
pvalues, rvalues = conga.correlations.compute_distance_correlations(adata)
results = []
for ii, (pval, rval) in enumerate(zip(rvalues, pvalues)):
if pval<1:
results.append( dict( clone_index=ii, pvalue_adj=pval, rvalue=rval, gex_cluster=clusters_gex[ii],
tcr_cluster=clusters_tcr[ii]))
if results:
results_df = pd.DataFrame(results)
outfile = args.outfile_prefix+'_distance_correlations.tsv'
results_df.to_csv(outfile, sep='\t', index=False)
if args.find_pmhc_nbrhood_overlaps:
agroups, bgroups = conga.preprocess.setup_tcr_groups(adata)
pmhc_nbrhood_overlap_results = []
for nbr_frac in args.nbr_fracs:
nbrs_gex, nbrs_tcr = all_nbrs[nbr_frac]
for tag, nbrs in [['gex', nbrs_gex], ['tcr', nbrs_tcr]]:
results_df = conga.pmhc_scoring.compute_pmhc_versus_nbrs(adata, nbrs, agroups, bgroups )
results_df['nbr_tag'] = tag
results_df['nbr_frac'] = nbr_frac
pmhc_nbrhood_overlap_results.append( results_df )
tsvfile = args.outfile_prefix+'_pmhc_versus_nbrs.tsv'
print('making:', tsvfile)
pd.concat(pmhc_nbrhood_overlap_results).to_csv(tsvfile, index=False, sep='\t')
if args.write_proj_info:
outfile = args.outfile_prefix+'_2d_proj_info.txt'
conga.preprocess.write_proj_info( adata, outfile )
adata.write_h5ad(args.outfile_prefix+'_final.h5ad')
adata.obs.to_csv(args.outfile_prefix+'_final_obs.tsv', sep='\t')
outlog.write('run_conga took {:.3f} minutes\n'.format((time.time()- start_time)/60))
outlog.close()
print('DONE')
| 49.166142
| 237
| 0.662433
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.