hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bede1a634039ded6d50b19ffe133714751da4bcc | 3,023 | py | Python | baseline/model.py | minus31/vision_competition | adf652a16711946d5976ed3cd9b2470bd98e27f5 | [
"MIT"
] | null | null | null | baseline/model.py | minus31/vision_competition | adf652a16711946d5976ed3cd9b2470bd98e27f5 | [
"MIT"
] | null | null | null | baseline/model.py | minus31/vision_competition | adf652a16711946d5976ed3cd9b2470bd98e27f5 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torchvision.models as models
from efficientnet_pytorch import EfficientNet
class Baseline(nn.Module):
def __init__(self, hidden_size, out_size):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=hidden_size, kernel_size=3, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(hidden_size),
nn.Conv2d(hidden_size, hidden_size, 3, 2, 1),
nn.ReLU(True),
nn.BatchNorm2d(hidden_size),
nn.Conv2d(hidden_size, hidden_size, 3, 2, 1),
nn.ReLU(True),
nn.BatchNorm2d(hidden_size),
nn.Conv2d(hidden_size, hidden_size, 3, 2, 1),
nn.ReLU(True),
nn.BatchNorm2d(hidden_size),
nn.Conv2d(hidden_size, hidden_size, 3, 2, 1),
nn.ReLU(True),
nn.BatchNorm2d(hidden_size),
nn.Conv2d(hidden_size, out_size, 4, 1),
)
def forward(self, image):
return self.net(image).squeeze(-1).squeeze(-1)
class Resnet(nn.Module):
def __init__(self, out_size):
super().__init__()
# model = models.resnet18(pretrained=True)
model = models.densenet161(pretrained=True)
for param in model.parameters():
param.requires_grad = False
model = list(model.children())[:-1]
model.append(nn.Conv2d(512, out_size, 1))
self.net = nn.Sequential(*model)
def forward(self, image):
return self.net(image).squeeze(-1).squeeze(-1)
class DenseNet(nn.Module):
def __init__(self, out_size):
super().__init__()
model = models.densenet161(pretrained=True)
for param in model.parameters():
param.requires_grad = False
model = list(model.children())[:-1]
model.append(nn.Conv2d(2208, out_size, 7))
self.net = nn.Sequential(*model)
def forward(self, image):
return self.net(image).squeeze(-1).squeeze(-1)
from efficientnet_pytorch import EfficientNet
# class Efficientnet(nn.Module):
# def __init__(self, out_size):
# super().__init__()
# self.model = EfficientNet.from_pretrained('efficientnet-b7')
# for param in self.model.parameters():
# param.requires_grad = False
# def forward(self, image):
# f = self.model.extract_features(image)
# conv = nn.Conv2d(2560, 350, 7).cuda()
# f = conv(f)
# return f.squeeze(-1).squeeze(-1)
#############################################
# no update extractor and add FCN at the last
class Efficientnet(nn.Module):
def __init__(self, out_size):
super().__init__()
self.model = EfficientNet.from_pretrained('efficientnet-b7')
# for param in self.model.parameters():
# param.requires_grad = False
self.model._fc = nn.Linear(2560, 350)
def forward(self, image):
return self.model(image) | 30.846939 | 99 | 0.594443 |
8d2aca29f447e745138a851cc81ebe6e4b6e60c4 | 580 | py | Python | apps/vscode/snippets/csharp_snippets.py | joshwcomeau/knausj_talon | b7ba05834699eb7337f6a6be7e5ba18a4ce6ffba | [
"Unlicense"
] | 1 | 2021-02-24T19:55:37.000Z | 2021-02-24T19:55:37.000Z | apps/vscode/snippets/csharp_snippets.py | joshwcomeau/knausj_talon | b7ba05834699eb7337f6a6be7e5ba18a4ce6ffba | [
"Unlicense"
] | null | null | null | apps/vscode/snippets/csharp_snippets.py | joshwcomeau/knausj_talon | b7ba05834699eb7337f6a6be7e5ba18a4ce6ffba | [
"Unlicense"
] | null | null | null | from talon import Context, actions, ui, Module, app
ctx = Context()
ctx.matches = r'''
app: Code
app: Code - OSS
app: Code
app: Visual Studio Code
app: Code.exe
mode: user.csharp
mode: command
and code.language: csharp
'''
#short name -> ide clip name
ctx.lists["user.snippets"] = {
#"funky": "def",
#"for": "for",
"for each": "foreach",
"while": "while",
"class": "class",
#"class funky": "def(class method)",
#"class static funky": "def(class static method)",
"if": "if",
"else": "else",
"try except": "try",
"try finally": "tryf"
} | 21.481481 | 54 | 0.596552 |
e9e21b6815ba507f6ffd2ba6ae2ec5b51beb1b09 | 23,698 | py | Python | code/ch08/ch08.py | ergosfera1974/python-machine-learning-book-2nd-edition | ee85b8fe8551e16a9ac24ed294a37bfe5ff31bf2 | [
"MIT"
] | 6,947 | 2017-08-19T17:18:55.000Z | 2022-03-29T05:58:23.000Z | code/ch08/ch08.py | ergosfera1974/python-machine-learning-book-2nd-edition | ee85b8fe8551e16a9ac24ed294a37bfe5ff31bf2 | [
"MIT"
] | 94 | 2017-08-19T17:27:39.000Z | 2021-02-05T02:17:47.000Z | code/ch08/ch08.py | ergosfera1974/python-machine-learning-book-2nd-edition | ee85b8fe8551e16a9ac24ed294a37bfe5ff31bf2 | [
"MIT"
] | 2,771 | 2017-08-20T04:09:00.000Z | 2022-03-31T01:31:22.000Z | # coding: utf-8
import os
import sys
import tarfile
import time
import pyprind
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import re
from nltk.stem.porter import PorterStemmer
import nltk
from nltk.corpus import stopwords
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.decomposition import LatentDirichletAllocation
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
# Added version check for recent scikit-learn 0.18 checks
# *Python Machine Learning 2nd Edition* by [Sebastian Raschka](https://sebastianraschka.com), Packt Publishing Ltd. 2017
#
# Code Repository: https://github.com/rasbt/python-machine-learning-book-2nd-edition
#
# Code License: [MIT License](https://github.com/rasbt/python-machine-learning-book-2nd-edition/blob/master/LICENSE.txt)
# # Python Machine Learning - Code Examples
# # Chapter 8 - Applying Machine Learning To Sentiment Analysis
# Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).
# *The use of `watermark` is optional. You can install this IPython extension via "`pip install watermark`". For more information, please see: https://github.com/rasbt/watermark.*
# ### Overview
# - [Preparing the IMDb movie review data for text processing](#Preparing-the-IMDb-movie-review-data-for-text-processing)
# - [Obtaining the IMDb movie review dataset](#Obtaining-the-IMDb-movie-review-dataset)
# - [Preprocessing the movie dataset into more convenient format](#Preprocessing-the-movie-dataset-into-more-convenient-format)
# - [Introducing the bag-of-words model](#Introducing-the-bag-of-words-model)
# - [Transforming words into feature vectors](#Transforming-words-into-feature-vectors)
# - [Assessing word relevancy via term frequency-inverse document frequency](#Assessing-word-relevancy-via-term-frequency-inverse-document-frequency)
# - [Cleaning text data](#Cleaning-text-data)
# - [Processing documents into tokens](#Processing-documents-into-tokens)
# - [Training a logistic regression model for document classification](#Training-a-logistic-regression-model-for-document-classification)
# - [Working with bigger data – online algorithms and out-of-core learning](#Working-with-bigger-data-–-online-algorithms-and-out-of-core-learning)
# - [Topic modeling](#Topic-modeling)
# - [Decomposing text documents with Latent Dirichlet Allocation](#Decomposing-text-documents-with-Latent-Dirichlet-Allocation)
# - [Latent Dirichlet Allocation with scikit-learn](#Latent-Dirichlet-Allocation-with-scikit-learn)
# - [Summary](#Summary)
# # Preparing the IMDb movie review data for text processing
# ## Obtaining the IMDb movie review dataset
# The IMDB movie review set can be downloaded from [http://ai.stanford.edu/~amaas/data/sentiment/](http://ai.stanford.edu/~amaas/data/sentiment/).
# After downloading the dataset, decompress the files.
#
# A) If you are working with Linux or MacOS X, open a new terminal windowm `cd` into the download directory and execute
#
# `tar -zxf aclImdb_v1.tar.gz`
#
# B) If you are working with Windows, download an archiver such as [7Zip](http://www.7-zip.org) to extract the files from the download archive.
# **Optional code to download and unzip the dataset via Python:**
source = 'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
target = 'aclImdb_v1.tar.gz'
def reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = progress_size / (1024.**2 * duration)
percent = count * block_size * 100. / total_size
sys.stdout.write("\r%d%% | %d MB | %.2f MB/s | %d sec elapsed" %
(percent, progress_size / (1024.**2), speed, duration))
sys.stdout.flush()
if not os.path.isdir('aclImdb') and not os.path.isfile('aclImdb_v1.tar.gz'):
if (sys.version_info < (3, 0)):
import urllib
urllib.urlretrieve(source, target, reporthook)
else:
import urllib.request
urllib.request.urlretrieve(source, target, reporthook)
if not os.path.isdir('aclImdb'):
with tarfile.open(target, 'r:gz') as tar:
tar.extractall()
# ## Preprocessing the movie dataset into more convenient format
# change the `basepath` to the directory of the
# unzipped movie dataset
basepath = 'aclImdb'
labels = {'pos': 1, 'neg': 0}
pbar = pyprind.ProgBar(50000)
df = pd.DataFrame()
for s in ('test', 'train'):
for l in ('pos', 'neg'):
path = os.path.join(basepath, s, l)
for file in os.listdir(path):
with open(os.path.join(path, file),
'r', encoding='utf-8') as infile:
txt = infile.read()
df = df.append([[txt, labels[l]]],
ignore_index=True)
pbar.update()
df.columns = ['review', 'sentiment']
# Shuffling the DataFrame:
np.random.seed(0)
df = df.reindex(np.random.permutation(df.index))
# Optional: Saving the assembled data as CSV file:
df.to_csv('movie_data.csv', index=False, encoding='utf-8')
df = pd.read_csv('movie_data.csv', encoding='utf-8')
df.head(3)
# ### Note
#
# If you have problems with creating the `movie_data.csv` file in the previous chapter, you can find a download a zip archive at
# https://github.com/rasbt/python-machine-learning-book-2nd-edition/tree/master/code/ch08/
# # Introducing the bag-of-words model
# ...
# ## Transforming documents into feature vectors
# By calling the fit_transform method on CountVectorizer, we just constructed the vocabulary of the bag-of-words model and transformed the following three sentences into sparse feature vectors:
# 1. The sun is shining
# 2. The weather is sweet
# 3. The sun is shining, the weather is sweet, and one and one is two
#
count = CountVectorizer()
docs = np.array([
'The sun is shining',
'The weather is sweet',
'The sun is shining, the weather is sweet, and one and one is two'])
bag = count.fit_transform(docs)
# Now let us print the contents of the vocabulary to get a better understanding of the underlying concepts:
print(count.vocabulary_)
# As we can see from executing the preceding command, the vocabulary is stored in a Python dictionary, which maps the unique words that are mapped to integer indices. Next let us print the feature vectors that we just created:
# Each index position in the feature vectors shown here corresponds to the integer values that are stored as dictionary items in the CountVectorizer vocabulary. For example, the rst feature at index position 0 resembles the count of the word and, which only occurs in the last document, and the word is at index position 1 (the 2nd feature in the document vectors) occurs in all three sentences. Those values in the feature vectors are also called the raw term frequencies: *tf (t,d)*—the number of times a term t occurs in a document *d*.
print(bag.toarray())
# ## Assessing word relevancy via term frequency-inverse document frequency
np.set_printoptions(precision=2)
# When we are analyzing text data, we often encounter words that occur across multiple documents from both classes. Those frequently occurring words typically don't contain useful or discriminatory information. In this subsection, we will learn about a useful technique called term frequency-inverse document frequency (tf-idf) that can be used to downweight those frequently occurring words in the feature vectors. The tf-idf can be de ned as the product of the term frequency and the inverse document frequency:
#
# $$\text{tf-idf}(t,d)=\text{tf (t,d)}\times \text{idf}(t,d)$$
#
# Here the tf(t, d) is the term frequency that we introduced in the previous section,
# and the inverse document frequency *idf(t, d)* can be calculated as:
#
# $$\text{idf}(t,d) = \text{log}\frac{n_d}{1+\text{df}(d, t)},$$
#
# where $n_d$ is the total number of documents, and *df(d, t)* is the number of documents *d* that contain the term *t*. Note that adding the constant 1 to the denominator is optional and serves the purpose of assigning a non-zero value to terms that occur in all training samples; the log is used to ensure that low document frequencies are not given too much weight.
#
# Scikit-learn implements yet another transformer, the `TfidfTransformer`, that takes the raw term frequencies from `CountVectorizer` as input and transforms them into tf-idfs:
tfidf = TfidfTransformer(use_idf=True,
norm='l2',
smooth_idf=True)
print(tfidf.fit_transform(count.fit_transform(docs))
.toarray())
# As we saw in the previous subsection, the word is had the largest term frequency in the 3rd document, being the most frequently occurring word. However, after transforming the same feature vector into tf-idfs, we see that the word is is
# now associated with a relatively small tf-idf (0.45) in document 3 since it is
# also contained in documents 1 and 2 and thus is unlikely to contain any useful, discriminatory information.
#
# However, if we'd manually calculated the tf-idfs of the individual terms in our feature vectors, we'd have noticed that the `TfidfTransformer` calculates the tf-idfs slightly differently compared to the standard textbook equations that we de ned earlier. The equations for the idf and tf-idf that were implemented in scikit-learn are:
# $$\text{idf} (t,d) = log\frac{1 + n_d}{1 + \text{df}(d, t)}$$
#
# The tf-idf equation that was implemented in scikit-learn is as follows:
#
# $$\text{tf-idf}(t,d) = \text{tf}(t,d) \times (\text{idf}(t,d)+1)$$
#
# While it is also more typical to normalize the raw term frequencies before calculating the tf-idfs, the `TfidfTransformer` normalizes the tf-idfs directly.
#
# By default (`norm='l2'`), scikit-learn's TfidfTransformer applies the L2-normalization, which returns a vector of length 1 by dividing an un-normalized feature vector *v* by its L2-norm:
#
# $$v_{\text{norm}} = \frac{v}{||v||_2} = \frac{v}{\sqrt{v_{1}^{2} + v_{2}^{2} + \dots + v_{n}^{2}}} = \frac{v}{\big (\sum_{i=1}^{n} v_{i}^{2}\big)^\frac{1}{2}}$$
#
# To make sure that we understand how TfidfTransformer works, let us walk
# through an example and calculate the tf-idf of the word is in the 3rd document.
#
# The word is has a term frequency of 3 (tf = 3) in document 3, and the document frequency of this term is 3 since the term is occurs in all three documents (df = 3). Thus, we can calculate the idf as follows:
#
# $$\text{idf}("is", d3) = log \frac{1+3}{1+3} = 0$$
#
# Now in order to calculate the tf-idf, we simply need to add 1 to the inverse document frequency and multiply it by the term frequency:
#
# $$\text{tf-idf}("is",d3)= 3 \times (0+1) = 3$$
tf_is = 3
n_docs = 3
idf_is = np.log((n_docs+1) / (3+1))
tfidf_is = tf_is * (idf_is + 1)
print('tf-idf of term "is" = %.2f' % tfidf_is)
# If we repeated these calculations for all terms in the 3rd document, we'd obtain the following tf-idf vectors: [3.39, 3.0, 3.39, 1.29, 1.29, 1.29, 2.0 , 1.69, 1.29]. However, we notice that the values in this feature vector are different from the values that we obtained from the TfidfTransformer that we used previously. The nal step that we are missing in this tf-idf calculation is the L2-normalization, which can be applied as follows:
# $$\text{tfi-df}_{norm} = \frac{[3.39, 3.0, 3.39, 1.29, 1.29, 1.29, 2.0 , 1.69, 1.29]}{\sqrt{[3.39^2, 3.0^2, 3.39^2, 1.29^2, 1.29^2, 1.29^2, 2.0^2 , 1.69^2, 1.29^2]}}$$
#
# $$=[0.5, 0.45, 0.5, 0.19, 0.19, 0.19, 0.3, 0.25, 0.19]$$
#
# $$\Rightarrow \text{tfi-df}_{norm}("is", d3) = 0.45$$
# As we can see, the results match the results returned by scikit-learn's `TfidfTransformer` (below). Since we now understand how tf-idfs are calculated, let us proceed to the next sections and apply those concepts to the movie review dataset.
tfidf = TfidfTransformer(use_idf=True, norm=None, smooth_idf=True)
raw_tfidf = tfidf.fit_transform(count.fit_transform(docs)).toarray()[-1]
raw_tfidf
l2_tfidf = raw_tfidf / np.sqrt(np.sum(raw_tfidf**2))
l2_tfidf
# ## Cleaning text data
df.loc[0, 'review'][-50:]
def preprocessor(text):
text = re.sub('<[^>]*>', '', text)
emoticons = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)',
text)
text = (re.sub('[\W]+', ' ', text.lower()) +
' '.join(emoticons).replace('-', ''))
return text
preprocessor(df.loc[0, 'review'][-50:])
preprocessor("</a>This :) is :( a test :-)!")
df['review'] = df['review'].apply(preprocessor)
# ## Processing documents into tokens
porter = PorterStemmer()
def tokenizer(text):
return text.split()
def tokenizer_porter(text):
return [porter.stem(word) for word in text.split()]
tokenizer('runners like running and thus they run')
tokenizer_porter('runners like running and thus they run')
nltk.download('stopwords')
stop = stopwords.words('english')
[w for w in tokenizer_porter('a runner likes running and runs a lot')[-10:]
if w not in stop]
# # Training a logistic regression model for document classification
# Strip HTML and punctuation to speed up the GridSearch later:
X_train = df.loc[:25000, 'review'].values
y_train = df.loc[:25000, 'sentiment'].values
X_test = df.loc[25000:, 'review'].values
y_test = df.loc[25000:, 'sentiment'].values
tfidf = TfidfVectorizer(strip_accents=None,
lowercase=False,
preprocessor=None)
param_grid = [{'vect__ngram_range': [(1, 1)],
'vect__stop_words': [stop, None],
'vect__tokenizer': [tokenizer, tokenizer_porter],
'clf__penalty': ['l1', 'l2'],
'clf__C': [1.0, 10.0, 100.0]},
{'vect__ngram_range': [(1, 1)],
'vect__stop_words': [stop, None],
'vect__tokenizer': [tokenizer, tokenizer_porter],
'vect__use_idf':[False],
'vect__norm':[None],
'clf__penalty': ['l1', 'l2'],
'clf__C': [1.0, 10.0, 100.0]},
]
lr_tfidf = Pipeline([('vect', tfidf),
('clf', LogisticRegression(random_state=0))])
gs_lr_tfidf = GridSearchCV(lr_tfidf, param_grid,
scoring='accuracy',
cv=5,
verbose=1,
n_jobs=-1)
# **Important Note about `n_jobs`**
#
# Please note that it is highly recommended to use `n_jobs=-1` (instead of `n_jobs=1`) in the previous code example to utilize all available cores on your machine and speed up the grid search. However, some Windows users reported issues when running the previous code with the `n_jobs=-1` setting related to pickling the tokenizer and tokenizer_porter functions for multiprocessing on Windows. Another workaround would be to replace those two functions, `[tokenizer, tokenizer_porter]`, with `[str.split]`. However, note that the replacement by the simple `str.split` would not support stemming.
# **Important Note about the running time**
#
# Executing the following code cell **may take up to 30-60 min** depending on your machine, since based on the parameter grid we defined, there are 2*2*2*3*5 + 2*2*2*3*5 = 240 models to fit.
#
# If you do not wish to wait so long, you could reduce the size of the dataset by decreasing the number of training samples, for example, as follows:
#
# X_train = df.loc[:2500, 'review'].values
# y_train = df.loc[:2500, 'sentiment'].values
#
# However, note that decreasing the training set size to such a small number will likely result in poorly performing models. Alternatively, you can delete parameters from the grid above to reduce the number of models to fit -- for example, by using the following:
#
# param_grid = [{'vect__ngram_range': [(1, 1)],
# 'vect__stop_words': [stop, None],
# 'vect__tokenizer': [tokenizer],
# 'clf__penalty': ['l1', 'l2'],
# 'clf__C': [1.0, 10.0]},
# ]
## @Readers: PLEASE IGNORE THIS CELL
##
## This cell is meant to generate more
## "logging" output when this notebook is run
## on the Travis Continuous Integration
## platform to test the code as well as
## speeding up the run using a smaller
## dataset for debugging
if 'TRAVIS' in os.environ:
gs_lr_tfidf.verbose=2
X_train = df.loc[:250, 'review'].values
y_train = df.loc[:250, 'sentiment'].values
X_test = df.loc[25000:25250, 'review'].values
y_test = df.loc[25000:25250, 'sentiment'].values
gs_lr_tfidf.fit(X_train, y_train)
print('Best parameter set: %s ' % gs_lr_tfidf.best_params_)
print('CV Accuracy: %.3f' % gs_lr_tfidf.best_score_)
clf = gs_lr_tfidf.best_estimator_
print('Test Accuracy: %.3f' % clf.score(X_test, y_test))
# #### Start comment:
#
# Please note that `gs_lr_tfidf.best_score_` is the average k-fold cross-validation score. I.e., if we have a `GridSearchCV` object with 5-fold cross-validation (like the one above), the `best_score_` attribute returns the average score over the 5-folds of the best model. To illustrate this with an example:
np.random.seed(0)
np.set_printoptions(precision=6)
y = [np.random.randint(3) for i in range(25)]
X = (y + np.random.randn(25)).reshape(-1, 1)
cv5_idx = list(StratifiedKFold(n_splits=5, shuffle=False, random_state=0).split(X, y))
cross_val_score(LogisticRegression(random_state=123), X, y, cv=cv5_idx)
# By executing the code above, we created a simple data set of random integers that shall represent our class labels. Next, we fed the indices of 5 cross-validation folds (`cv3_idx`) to the `cross_val_score` scorer, which returned 5 accuracy scores -- these are the 5 accuracy values for the 5 test folds.
#
# Next, let us use the `GridSearchCV` object and feed it the same 5 cross-validation sets (via the pre-generated `cv3_idx` indices):
gs = GridSearchCV(LogisticRegression(), {}, cv=cv5_idx, verbose=3).fit(X, y)
# As we can see, the scores for the 5 folds are exactly the same as the ones from `cross_val_score` earlier.
# Now, the best_score_ attribute of the `GridSearchCV` object, which becomes available after `fit`ting, returns the average accuracy score of the best model:
gs.best_score_
# As we can see, the result above is consistent with the average score computed the `cross_val_score`.
cross_val_score(LogisticRegression(), X, y, cv=cv5_idx).mean()
# #### End comment.
#
# # Working with bigger data - online algorithms and out-of-core learning
def tokenizer(text):
text = re.sub('<[^>]*>', '', text)
emoticons = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)', text.lower())
text = re.sub('[\W]+', ' ', text.lower()) + ' '.join(emoticons).replace('-', '')
tokenized = [w for w in text.split() if w not in stop]
return tokenized
def stream_docs(path):
with open(path, 'r', encoding='utf-8') as csv:
next(csv) # skip header
for line in csv:
text, label = line[:-3], int(line[-2])
yield text, label
next(stream_docs(path='movie_data.csv'))
def get_minibatch(doc_stream, size):
docs, y = [], []
try:
for _ in range(size):
text, label = next(doc_stream)
docs.append(text)
y.append(label)
except StopIteration:
return None, None
return docs, y
vect = HashingVectorizer(decode_error='ignore',
n_features=2**21,
preprocessor=None,
tokenizer=tokenizer)
if Version(sklearn_version) < '0.18':
clf = SGDClassifier(loss='log', random_state=1, n_iter=1)
else:
clf = SGDClassifier(loss='log', random_state=1, max_iter=1)
doc_stream = stream_docs(path='movie_data.csv')
# **Note**
#
# - You can replace `Perceptron(n_iter, ...)` by `Perceptron(max_iter, ...)` in scikit-learn >= 0.19. The `n_iter` parameter is used here deriberately, because some people still use scikit-learn 0.18.
#
pbar = pyprind.ProgBar(45)
classes = np.array([0, 1])
for _ in range(45):
X_train, y_train = get_minibatch(doc_stream, size=1000)
if not X_train:
break
X_train = vect.transform(X_train)
clf.partial_fit(X_train, y_train, classes=classes)
pbar.update()
X_test, y_test = get_minibatch(doc_stream, size=5000)
X_test = vect.transform(X_test)
print('Accuracy: %.3f' % clf.score(X_test, y_test))
clf = clf.partial_fit(X_test, y_test)
# ## Topic modeling
# ### Decomposing text documents with Latent Dirichlet Allocation
# ### Latent Dirichlet Allocation with scikit-learn
df = pd.read_csv('movie_data.csv', encoding='utf-8')
df.head(3)
## @Readers: PLEASE IGNORE THIS CELL
##
## This cell is meant to create a smaller dataset if
## the notebook is run on the Travis Continuous Integration
## platform to test the code on a smaller dataset
## to prevent timeout errors and just serves a debugging tool
## for this notebook
if 'TRAVIS' in os.environ:
df.loc[:500].to_csv('movie_data.csv')
df = pd.read_csv('movie_data.csv', nrows=500)
print('SMALL DATA SUBSET CREATED FOR TESTING')
count = CountVectorizer(stop_words='english',
max_df=.1,
max_features=5000)
X = count.fit_transform(df['review'].values)
lda = LatentDirichletAllocation(n_topics=10,
random_state=123,
learning_method='batch')
X_topics = lda.fit_transform(X)
lda.components_.shape
n_top_words = 5
feature_names = count.get_feature_names()
for topic_idx, topic in enumerate(lda.components_):
print("Topic %d:" % (topic_idx + 1))
print(" ".join([feature_names[i]
for i in topic.argsort()\
[:-n_top_words - 1:-1]]))
# Based on reading the 5 most important words for each topic, we may guess that the LDA identified the following topics:
#
# 1. Generally bad movies (not really a topic category)
# 2. Movies about families
# 3. War movies
# 4. Art movies
# 5. Crime movies
# 6. Horror movies
# 7. Comedies
# 8. Movies somehow related to TV shows
# 9. Movies based on books
# 10. Action movies
# To confirm that the categories make sense based on the reviews, let's plot 5 movies from the horror movie category (category 6 at index position 5):
horror = X_topics[:, 5].argsort()[::-1]
for iter_idx, movie_idx in enumerate(horror[:3]):
print('\nHorror movie #%d:' % (iter_idx + 1))
print(df['review'][movie_idx][:300], '...')
# Using the preceeding code example, we printed the first 300 characters from the top 3 horror movies and indeed, we can see that the reviews -- even though we don't know which exact movie they belong to -- sound like reviews of horror movies, indeed. (However, one might argue that movie #2 could also belong to topic category 1.)
# # Summary
# ...
# ---
#
# Readers may ignore the next cell.
| 32.596974 | 595 | 0.688286 |
4cf3c34f078754cc6aac9593b80f440f97c80085 | 5,036 | py | Python | src/lib/models/FPN_RFB_RON.py | TeleMidia/audio_reconstruction | 6fca53f6267081c7f9cdf04a1ff0faebda00f3a4 | [
"Apache-2.0"
] | 2 | 2021-10-16T01:39:19.000Z | 2022-01-06T15:53:10.000Z | src/lib/models/FPN_RFB_RON.py | TeleMidia/audio_reconstruction | 6fca53f6267081c7f9cdf04a1ff0faebda00f3a4 | [
"Apache-2.0"
] | null | null | null | src/lib/models/FPN_RFB_RON.py | TeleMidia/audio_reconstruction | 6fca53f6267081c7f9cdf04a1ff0faebda00f3a4 | [
"Apache-2.0"
] | 1 | 2021-11-25T16:59:16.000Z | 2021-11-25T16:59:16.000Z | import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, ReLU, Conv2DTranspose, MaxPool2D, UpSampling2D
from tensorflow.keras.activations import relu
class FPN_RFB_RON(Model):
def __init__(self):
super(FPN_RFB_RON, self).__init__()
initializer = tf.keras.initializers.GlorotNormal(seed=0)
self.max_pool = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
self.upsample_2x = Conv2DTranspose(32, 3, activation='relu', strides=2, padding='same', kernel_initializer=initializer)
self.upsample_4x = Conv2DTranspose(32, 3, activation='relu', strides=4, padding='same', kernel_initializer=initializer)
self.upsample_8x = Conv2DTranspose(32, 3, activation='relu', strides=8, padding='same', kernel_initializer=initializer)
self.upsample_16x = Conv2DTranspose(32, 3, activation='relu', strides=16, padding='same', kernel_initializer=initializer)
self.up_1 = Conv2DTranspose(128, 3, activation='relu', strides=2, padding='same', kernel_initializer=initializer)
self.up_2 = Conv2DTranspose(128, 3, activation='relu', strides=2, padding='same', kernel_initializer=initializer)
self.up_3 = Conv2DTranspose(128, 3, activation='relu', strides=2, padding='same', kernel_initializer=initializer)
self.up_4 = Conv2DTranspose(128, 3, activation='relu', strides=2, padding='same', kernel_initializer=initializer)
self.conv_1 = self.Conv2dBatchLayer(32,3)
self.conv_2 = self.Conv2dBatchLayer(64,3)
self.conv_3 = self.Conv2dBatchLayer(128,3)
self.conv_4 = self.Conv2dBatchLayer(256,3)
self.conv_5 = self.Conv2dBatchLayer(512,3)
self.side_conv_1 = Conv2D(128, 1, activation='relu', strides=1, padding='same', kernel_initializer=initializer)
self.side_conv_2 = Conv2D(128, 1, activation='relu', strides=1, padding='same', kernel_initializer=initializer)
self.side_conv_3 = Conv2D(128, 1, activation='relu', strides=1, padding='same', kernel_initializer=initializer)
self.side_conv_4 = Conv2D(128, 1, activation='relu', strides=1, padding='same', kernel_initializer=initializer)
self.side_conv_1_1 = self.Conv2dBatchLayer_2x(32, 3)
self.side_conv_2_1 = self.Conv2dBatchLayer_2x(32, 3)
self.side_conv_3_1 = self.Conv2dBatchLayer_2x(32, 3)
self.side_conv_4_1 = self.Conv2dBatchLayer_2x(32, 3)
self.conv_7 = self.Conv2dBatchLayer(32,3)
self.last_conv = Conv2D(1, 3, strides=1, padding='same', kernel_initializer=initializer)
def Conv2dBatchLayer(self, filters, kernel_size):
initializer = tf.keras.initializers.GlorotNormal(seed=0)
result = tf.keras.Sequential()
result.add(Conv2D(filters, kernel_size, strides=1, padding='same', kernel_initializer=initializer))
result.add(BatchNormalization())
result.add(ReLU())
return result
def Conv2dBatchLayer_2x(self, filters, kernel_size):
initializer = tf.keras.initializers.GlorotNormal(seed=0)
result = tf.keras.Sequential()
result.add(Conv2D(filters, kernel_size, strides=1, padding='same', kernel_initializer=initializer))
result.add(BatchNormalization())
result.add(ReLU())
result.add(Conv2D(filters, kernel_size, strides=1, padding='same', kernel_initializer=initializer))
result.add(BatchNormalization())
result.add(ReLU())
return result
def call(self, input):
#224 -> 112
conv1 = self.conv_1(input)
down1 = self.max_pool(conv1)
#112 -> 56
conv2 = self.conv_2(down1)
down2 = self.max_pool(conv2)
#56 -> 28
conv3 = self.conv_3(down2)
down3 = self.max_pool(conv3)
#28 -> 14
conv4 = self.conv_4(down3)
down4 = self.max_pool(conv4)
#14 -> 7
conv5 = self.conv_5(down4)
down5 = self.max_pool(conv5)
#7 -> 14
side1 = self.side_conv_1(down5)
up1 = self.up_1(side1)
#14 -> 28
side2 = self.side_conv_2(down4)
add1 = tf.keras.layers.Add()([side2, up1])
up2 = self.up_2(add1)
#28 -> 56
side3 = self.side_conv_3(down3)
add2 = tf.keras.layers.Add()([side3, up2])
up3 = self.up_3(add2)
#56 -> 112
side4 = self.side_conv_4(down2)
add3 = tf.keras.layers.Add()([side4, up3])
up4 = self.up_4(add3)
side_1_1 = self.side_conv_1_1(up1)
side_2_1 = self.side_conv_2_1(up2)
side_3_1 = self.side_conv_3_1(up3)
side_4_1 = self.side_conv_4_1(up4)
up_f_2x = self.upsample_2x(side_4_1)
up_f_4x = self.upsample_4x(side_3_1)
up_f_8x = self.upsample_8x(side_2_1)
up_f_16x = self.upsample_16x(side_1_1)
concat = tf.keras.layers.concatenate([up_f_16x, up_f_8x, up_f_4x, up_f_2x], axis=3)
conv7 = self.conv_7(concat)
return self.last_conv(conv7) | 43.042735 | 129 | 0.664019 |
44b23e77c5dd2f110443fdcc65a4849cb64b6680 | 14,644 | py | Python | lib/workflow.py | bcarr15/bioblend-scripts | b53fc0b88e77dc6864ae6e0ddae23f46327922e8 | [
"MIT"
] | null | null | null | lib/workflow.py | bcarr15/bioblend-scripts | b53fc0b88e77dc6864ae6e0ddae23f46327922e8 | [
"MIT"
] | null | null | null | lib/workflow.py | bcarr15/bioblend-scripts | b53fc0b88e77dc6864ae6e0ddae23f46327922e8 | [
"MIT"
] | null | null | null | import os
import sys
import yaml
import json
from pprint import pprint
from planemo.runnable import for_path
from planemo.galaxy.workflows import install_shed_repos
import lib
import common
from bioblend.galaxy import GalaxyInstance
from common import connect
INVOCATIONS_DIR = "invocations"
METRICS_DIR = "metrics"
class Keys:
NAME = 'name'
RUNS = 'runs'
INPUTS = 'inputs'
REFERENCE_DATA = 'reference_data'
WORKFLOW_ID = 'workflow_id'
DATASET_ID = 'dataset_id'
HISTORY_BASE_NAME = 'output_history_base_name'
HISTORY_NAME = 'history_name'
def find_workflow_id(gi, name_or_id):
try:
wf = gi.workflows.show_workflow(name_or_id)
return wf['id']
except:
pass
try:
wf = gi.workflows.get_workflows(name=name_or_id, published=True)
return wf[0]['id']
except:
pass
#print(f"Warning: unable to find workflow {name_or_id}")
return None
def find_dataset_id(gi, name_or_id):
# print(f"Finding dataset {name_or_id}")
try:
ds = gi.datasets.show_dataset(name_or_id)
return ds['id']
except:
pass
try:
# print('Trying by name')
ds = gi.datasets.get_datasets(name=name_or_id) # , deleted=True, purged=True)
if len(ds) > 0:
return ds[0]['id']
except:
print('Caught an exception')
print(sys.exc_info())
#print(f"Warning: unable to find dataset {name_or_id}")
return None
def parse_workflow(workflow_path: str):
if not os.path.exists(workflow_path):
print(f'ERROR: could not find workflow file {workflow_path}')
return None
with open(workflow_path, 'r') as stream:
try:
config = yaml.safe_load(stream)
# print(f"Loaded {name}")
except yaml.YAMLError as exc:
print('Error encountered parsing the YAML input file')
print(exc)
#TODO Don't do this...
sys.exit(1)
return config
def list(args: list):
gi = connect()
workflows = gi.workflows.get_workflows(published=True)
if len(workflows) == 0:
print('No workflows found')
return
print(f'Found {len(workflows)} workflows')
for workflow in workflows:
print(f"{workflow['id']}\t{workflow['name']}")
def delete(args: list):
if len(args) == 0:
print(f'ERROR: no workflow ID given.')
return
gi = connect()
print(gi.workflows.delete_workflow(args[0]))
def upload(args: list):
if len(args) == 0:
print('ERROR: no workflow file given')
return
path = args[0]
if not os.path.exists(path):
print(f'ERROR: file not found: {path}')
return
gi = connect()
print("Importing the workflow")
pprint(gi.workflows.import_workflow_from_local_path(path, publish=True))
runnable = for_path(path)
print("Installing tools")
result = install_shed_repos(runnable, gi, False)
pprint(result)
def download(args: list):
if len(args) == 0:
print('ERROR: no workflow ID given')
return
gi = connect()
workflow = json.dumps(gi.workflows.export_workflow_dict(args[0]), indent=4)
if len(args) == 2:
with open(args[1], 'w') as f:
f.write(workflow)
print(f'Wrote {args[1]}')
else:
print(workflow)
def show(args: list):
if len(args) == 0:
print('ERROR: no workflow ID given')
return
gi = connect()
pprint(gi.workflows.show_workflow(args[0]))
def find(args: list):
if len(args) == 0:
print("ERROR: no workflow name given")
return
gi = connect()
pprint(gi.workflows.get_workflows(name=args[0]))
def run(args: list):
"""
Runs a single workflow defined by *args[0]*
:param args: a list that contains a single element, the path to a workflow
configuration file.
:return: True if the workflows completed sucessfully. False otherwise.
"""
if len(args) == 0:
print('ERROR: no workflow configuration specified')
return
workflow_path = args[0]
if not os.path.exists(workflow_path):
print(f'ERROR: can not find workflow configuration {workflow_path}')
return
if os.path.exists(INVOCATIONS_DIR):
if not os.path.isdir(INVOCATIONS_DIR):
print('ERROR: Can not save invocation status, directory name in use.')
sys.exit(1)
else:
os.mkdir(INVOCATIONS_DIR)
if os.path.exists(METRICS_DIR):
if not os.path.isdir(METRICS_DIR):
print('ERROR: Can not save metrics, directory name in use.')
#sys.exit(1)
return False
else:
os.mkdir(METRICS_DIR)
gi = connect()
workflows = parse_workflow(workflow_path)
print(f"Found {len(workflows)} workflow definitions")
for workflow in workflows:
wf_name = workflow[Keys.WORKFLOW_ID]
wfid = find_workflow_id(gi, wf_name)
if wfid is None:
print(f"Unable to load the workflow ID for {workflow[Keys.WORKFLOW_ID]}")
return False
else:
print(f"Found workflow id {wfid}")
inputs = {}
history_base_name = wfid
if Keys.HISTORY_BASE_NAME in workflow:
history_base_name = workflow[Keys.HISTORY_BASE_NAME]
if Keys.REFERENCE_DATA in workflow:
for spec in workflow[Keys.REFERENCE_DATA]:
input = gi.workflows.get_workflow_inputs(wfid, spec[Keys.NAME])
if input is None or len(input) == 0:
print(f'ERROR: Invalid input specification for {spec[Keys.NAME]}')
return False
dsid = find_dataset_id(gi, spec[Keys.DATASET_ID])
print(f"Reference input dataset {dsid}")
inputs[input[0]] = {'id': dsid, 'src': 'hda'}
count = 0
for run in workflow[Keys.RUNS]:
count += 1
if Keys.HISTORY_NAME in run:
output_history_name = f"{history_base_name} {run[Keys.HISTORY_NAME]}"
else:
output_history_name = f"{history_base_name} run {count}"
for spec in run[Keys.INPUTS]:
input = gi.workflows.get_workflow_inputs(wfid, spec[Keys.NAME])
if input is None or len(input) == 0:
print(f'ERROR: Invalid input specification for {spec[Keys.NAME]}')
return False
dsid = find_dataset_id(gi, spec[Keys.DATASET_ID])
print(f"Input dataset ID: {dsid}")
inputs[input[0]] = {'id': dsid, 'src': 'hda'}
print(f"Running workflow {wfid}")
new_history_name = output_history_name
if len(args) > 1:
new_history_name = f"{args[1]} {output_history_name}"
invocation = gi.workflows.invoke_workflow(wfid, inputs=inputs, history_name=new_history_name)
id = invocation['id']
output_path = os.path.join(INVOCATIONS_DIR, id + '.json')
with open(output_path, 'w') as f:
json.dump(invocation, f, indent=4)
print(f"Wrote invocation data to {output_path}")
invocations = gi.invocations.wait_for_invocation(id, 86400, 10, False)
print("Waiting for jobs")
if len(args) > 1:
for parts in args[1].split():
invocations['run'] = parts[0]
invocations['cloud'] = parts[1]
invocations['job_conf'] = parts[2]
wait_for_jobs(gi, invocations)
print("Benchmarking run complete")
return True
def test(args: list):
# gi = connect()
# print(f"Searching for workflow {args[0]}")
# flows = gi.workflows.get_workflows(name=args[0], published=True)
# pprint(flows)
print(__name__)
def publish(args: list):
if len(args) != 1:
print("USAGE: publish ID" )
return
gi = connect()
result = gi.workflows.update_workflow(args[0], published=True)
print(f"Published: {result['published']}")
def rename(args: list):
if len(args) != 2:
print("USAGE: rename ID 'new workflow name'")
return
gi = connect()
result = gi.workflows.update_workflow(args[0], name=args[1])
print(f"Renamed workflow to {result['name']}")
def translate(args: list):
if len(args) == 0:
print('ERROR: no workflow configuration specified')
return
workflow_path = args[0]
if not os.path.exists(workflow_path):
print(f'ERROR: can not find workflow configuration {workflow_path}')
return
gi = connect()
# wf_index,ds_index = create_rev_index(gi)
workflows = parse_workflow(args[0])
for workflow in workflows:
wfid = workflow[Keys.WORKFLOW_ID]
wfinfo = gi.workflows.show_workflow(wfid)
if wfinfo is None or 'name' not in wfinfo:
print(f"Warning: unable to translate workflow ID {wfid}")
else:
workflow[Keys.WORKFLOW_ID] = wfinfo['name']
# if workflow[Keys.WORKFLOW_ID] in wf_index:
# workflow[Keys.WORKFLOW_ID] = wf_index[workflow[Keys.WORKFLOW_ID]]
# else:
# print(f"Warning: no workflow id for {workflow[Keys.WORKFLOW_ID]}")
if Keys.REFERENCE_DATA in workflow:
for ref in workflow[Keys.REFERENCE_DATA]:
dsid = ref[Keys.DATASET_ID]
dataset = gi.datasets.show_dataset(dsid)
if dataset is None:
print(f"Warning: could not translate dataset ID {dsid}")
else:
ref[Keys.DATASET_ID] = dataset['name']
for run in workflow[Keys.RUNS]:
for input in run[Keys.INPUTS]:
dsid = input[Keys.DATASET_ID]
dataset = gi.datasets.show_dataset(dsid)
if dataset is None:
print(f"Warning: could not translate dataset ID {dsid}")
else:
input[Keys.DATASET_ID] = dataset['name']
print(yaml.dump(workflows))
def validate(args: list):
if len(args) == 0:
print('ERROR: no workflow configuration specified')
return
workflow_path = args[0]
if not os.path.exists(workflow_path):
print(f'ERROR: can not find workflow configuration {workflow_path}')
return
print(f"Validating workflow on {lib.GALAXY_SERVER}")
workflows = parse_workflow(workflow_path)
gi = connect()
total_errors = 0
for workflow in workflows:
wfid = workflow[Keys.WORKFLOW_ID]
try:
wfid = find_workflow_id(gi, wfid)
except:
wfid = None
if wfid is None:
print(f"The workflow '{workflow[Keys.WORKFLOW_ID]}' does not exist on this server.")
return
else:
print(f"Workflow: {workflow[Keys.WORKFLOW_ID]} -> {wfid}")
inputs = {}
errors = 0
history_base_name = wfid
if Keys.HISTORY_BASE_NAME in workflow:
history_base_name = workflow[Keys.HISTORY_BASE_NAME]
if Keys.REFERENCE_DATA in workflow:
for spec in workflow[Keys.REFERENCE_DATA]:
input = gi.workflows.get_workflow_inputs(wfid, spec[Keys.NAME])
if input is None or len(input) == 0:
print(f'ERROR: Invalid input specification for {spec[Keys.NAME]}')
errors += 1
#sys.exit(1)
else:
dsid = find_dataset_id(gi, spec[Keys.DATASET_ID])
if dsid is None:
print(f"ERROR: Reference dataset not found {spec[Keys.DATASET_ID]}")
errors += 1
else:
print(f"Reference input dataset {spec[Keys.DATASET_ID]} -> {dsid}")
inputs[input[0]] = {'id': dsid, 'src': 'hda'}
count = 0
for run in workflow[Keys.RUNS]:
count += 1
for spec in run[Keys.INPUTS]:
input = gi.workflows.get_workflow_inputs(wfid, spec[Keys.NAME])
if input is None or len(input) == 0:
print(f'ERROR: Invalid input specification for {spec[Keys.NAME]}')
errors += 1
else:
dsid = find_dataset_id(gi, spec[Keys.DATASET_ID])
if dsid is None:
print(f"ERROR: Dataset not found {spec[Keys.DATASET_ID]}")
errors += 1
else:
print(f"Input dataset: {spec[Keys.DATASET_ID]} -> {dsid}")
inputs[input[0]] = {'id': dsid, 'src': 'hda'}
if errors == 0:
print("This workflow configuration is valid and can be executed on this server.")
else:
print("---------------------------------")
print("WARNING")
print("The above problems need to be corrected before this workflow configuration can be used.")
print("---------------------------------")
total_errors += errors
return total_errors == 0
def wait_for_jobs(gi: GalaxyInstance, invocations: dict):
""" Blocks until all jobs defined in the *invocations* to complete.
:param gi: The *GalaxyInstance** running the jobs
:param invocations:
:return:
"""
wfid = invocations['workflow_id']
hid = invocations['history_id']
run = invocations['run']
cloud = invocations['cloud']
conf = invocations['job_conf']
for step in invocations['steps']:
job_id = step['job_id']
if job_id is not None:
print(f"Waiting for job {job_id} on {lib.GALAXY_SERVER}")
try:
# TDOD Should retry if anything throws an exception.
status = gi.jobs.wait_for_job(job_id, 86400, 10, False)
data = gi.jobs.show_job(job_id, full_details=True)
metrics = {
'run': run,
'cloud': cloud,
'job_conf': conf,
'workflow_id': wfid,
'history_id': hid,
'metrics': data,
'status': status,
'server': lib.GALAXY_SERVER
}
output_path = os.path.join(METRICS_DIR, f"{job_id}.json")
with open(output_path, "w") as f:
json.dump(metrics, f, indent=4)
print(f"Wrote metrics to {output_path}")
except Exception as e:
print(f"ERROR: {e}")
| 34.701422 | 108 | 0.573409 |
7468bb0179c71da8d67d3ee366d97bdda226af45 | 4,098 | py | Python | app.py | olesyat/hotdogs | 965268e4cd57f1e3d2813905bad9a57230d4a0c9 | [
"MIT"
] | null | null | null | app.py | olesyat/hotdogs | 965268e4cd57f1e3d2813905bad9a57230d4a0c9 | [
"MIT"
] | null | null | null | app.py | olesyat/hotdogs | 965268e4cd57f1e3d2813905bad9a57230d4a0c9 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, redirect, request
import sqlite3
import mysql.connector
def get_connection():
return sqlite3.connect("HOTDOGS.db")
app = Flask(__name__)
@app.route('/')
def read():
connection = get_connection()
cursor = connection.cursor()
try:
cursor.execute(
f"select * from DOGGIES")
doggies = cursor.fetchall()
except mysql.connector.Error as error:
print(error)
connection.commit()
connection.close()
if doggies:
return render_template('index.html', doggies=doggies, l=len(doggies))
else:
return render_template("no_doggies.html")
@app.route('/create', methods=['POST', "GET"])
def create_hotdog():
if request.method == 'POST':
name = request.form['name']
meat = request.form['meat']
mustard = request.form['mustard']
ketchup = request.form['ketchup']
connection = get_connection()
cursor = connection.cursor()
try:
cursor.execute(
f"insert into DOGGIES (name, meat, mustard, ketchup) values ('{name}','{meat}','{mustard}', '{ketchup}')")
except mysql.connector.Error as error:
print(error)
connection.commit()
connection.close()
return read()
else:
return render_template('create.html')
@app.route('/update', methods=['POST', 'GET'])
def update_hotdog():
if request.method == 'POST':
old = request.form['old']
connection = get_connection()
cursor = connection.cursor()
try:
cursor.execute(
f"SELECT * FROM DOGGIES WHERE ID = {old}")
dog = cursor.fetchone()
buns = ['simple', 'gluten-free', 'with-poppy-seeds', 'wholemeal']
meats = ['beef', 'veal', 'pork', 'vegan']
mustards = ['Yes', 'No']
ketchups = ['Yes', 'No']
lst = [buns, meats, mustards, ketchups]
for i, l in enumerate(lst):
l = l.remove(dog[i+1])
mustard_prime = "hidden"
mustard_secondary = "checkbox"
if dog[3] == 'Yes':
mustard_prime, mustard_secondary = mustard_secondary, mustard_prime
ketchup_prime = "hidden"
ketchup_secondary = "checkbox"
if dog[4] == 'Yes':
ketchup_prime, ketchup_secondary = ketchup_secondary, ketchup_prime
except mysql.connector.Error as error:
print(error)
connection.commit()
connection.close()
return render_template("update.html", dog=dog, buns=buns, meats=meats, mustards=mustards, mustard_prime=mustard_prime, mustard_secondary=mustard_secondary, ketchups=ketchups, ketchup_prime=ketchup_prime, ketchup_secondary=ketchup_secondary)
else:
return "oops"
@app.route('/updated', methods=['PoST'])
def updated():
if request.method == 'POST':
newname = request.form['name']
newmeat = request.form['meat']
newmustard = request.form['mustard']
newketchup = request.form['ketchup']
id = request.form['id']
connection = get_connection()
cursor = connection.cursor()
try:
cursor.execute(
f"UPDATE DOGGIES SET name = '{newname}', meat = '{newmeat}', mustard = '{newmustard}', ketchup = '{newketchup}' WHERE ID = {id}")
except mysql.connector.Error as error:
print(error)
connection.commit()
connection.close()
return read()
@app.route('/delete', methods=['POST'])
def delete_hotdog():
if request.method == 'POST':
delete = request.form['trash']
connection = get_connection()
cursor = connection.cursor()
try:
cursor.execute(
f"DELETE FROM DOGGIES WHERE ID = {delete}")
except mysql.connector.Error as error:
print(error)
connection.commit()
connection.close()
return read()
else:
return "oops"
if __name__ == '__main__':
app.run()
| 33.317073 | 248 | 0.582479 |
527213937fb6a54ace61f9072b8e54afe227418e | 588 | py | Python | lesson_06/Classwork_01.py | rotorypower/lessons | bd8f0d54159023e677608f2104e38d82de388fe4 | [
"BSD-3-Clause"
] | null | null | null | lesson_06/Classwork_01.py | rotorypower/lessons | bd8f0d54159023e677608f2104e38d82de388fe4 | [
"BSD-3-Clause"
] | null | null | null | lesson_06/Classwork_01.py | rotorypower/lessons | bd8f0d54159023e677608f2104e38d82de388fe4 | [
"BSD-3-Clause"
] | null | null | null | """Дан словарь, где в качестве ключей английские слова,
а значений - их перевод на русский язык. Написать две функции,
одна переводит слово с английского на русский,
где слово - это входной параметр, вторая функция - с русского на английский."""
def translate_en_to_ru(d, word):
return d[word]
def translate_ru_to_en(d, word):
for en, ru in d.items():
if word == ru:
return en
if __name__ == "__main__":
dictionary = {"sun": "Солнце", "moon": "Луна"}
print(translate_en_to_ru(dictionary, "sun"))
print(translate_ru_to_en(dictionary, "Луна")) | 28 | 79 | 0.685374 |
9670329cd8ca18f5169d08606f9b8f6edaa373c8 | 2,685 | py | Python | saas/backend/api/application/views.py | nannan00/bk-iam-saas | 217600fa6e5fd466fff9c33c20c4dbd7c69f77d9 | [
"MIT"
] | null | null | null | saas/backend/api/application/views.py | nannan00/bk-iam-saas | 217600fa6e5fd466fff9c33c20c4dbd7c69f77d9 | [
"MIT"
] | null | null | null | saas/backend/api/application/views.py | nannan00/bk-iam-saas | 217600fa6e5fd466fff9c33c20c4dbd7c69f77d9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from urllib.parse import urlencode
from django.conf import settings
from drf_yasg.utils import swagger_auto_schema
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import views
from backend.api.authentication import ESBAuthentication
from backend.api.mixins import ExceptionHandlerMixin
from backend.biz.open import ApplicationPolicyListCache
from backend.common.swagger import ResponseSwaggerAutoSchema
from backend.trans.open_application import AccessSystemApplicationTrans
from .serializers import AccessSystemApplicationSLZ, AccessSystemApplicationUrlSLZ
class ApplicationView(ExceptionHandlerMixin, views.APIView):
"""
接入系统申请
"""
authentication_classes = [ESBAuthentication]
permission_classes = [IsAuthenticated]
access_system_application_biz = AccessSystemApplicationTrans()
application_policy_list_cache = ApplicationPolicyListCache()
@swagger_auto_schema(
operation_description="接入系统权限申请",
request_body=AccessSystemApplicationSLZ(label="接入系统申请数据"),
auto_schema=ResponseSwaggerAutoSchema,
responses={status.HTTP_200_OK: AccessSystemApplicationUrlSLZ(label="重定向URL")},
tags=["open"],
)
def post(self, request):
# 校验数据
serializer = AccessSystemApplicationSLZ(data=request.data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
system_id = data["system"]
# 将申请的数据转换为PolicyBeanList数据结构,同时需要进行数据检查
policy_list = self.access_system_application_biz.to_policy_list(data)
# 保存到cache中
cache_id = self.application_policy_list_cache.set(policy_list)
# 返回重定向地址
url = f"{settings.APP_URL}/apply-custom-perm"
params = {"system_id": system_id, "cache_id": cache_id}
url = url + "?" + urlencode(params)
return Response({"url": url})
| 40.074627 | 115 | 0.766853 |
5c9a59564d9c28581679d503828a81df1093ef75 | 1,946 | py | Python | examples/echo_server/topology.py | GianGian/SDN-Slicing-in-ComNetsEmu | 4fded82ee61f948fc5ebb11a668fbcbd526afbb0 | [
"MIT"
] | null | null | null | examples/echo_server/topology.py | GianGian/SDN-Slicing-in-ComNetsEmu | 4fded82ee61f948fc5ebb11a668fbcbd526afbb0 | [
"MIT"
] | null | null | null | examples/echo_server/topology.py | GianGian/SDN-Slicing-in-ComNetsEmu | 4fded82ee61f948fc5ebb11a668fbcbd526afbb0 | [
"MIT"
] | 1 | 2020-04-20T10:42:03.000Z | 2020-04-20T10:42:03.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from comnetsemu.cli import CLI, spawnXtermDocker
from comnetsemu.net import Containernet, VNFManager
from mininet.link import TCLink
from mininet.log import info, setLogLevel
from mininet.node import Controller
if __name__ == "__main__":
# Only used for auto-testing.
AUTOTEST_MODE = os.environ.get("COMNETSEMU_AUTOTEST_MODE", 0)
setLogLevel("info")
net = Containernet(controller=Controller, link=TCLink, xterms=False)
mgr = VNFManager(net)
info("*** Add controller\n")
net.addController("c0")
info("*** Creating hosts\n")
h1 = net.addDockerHost(
"h1", dimage="dev_test", ip="10.0.0.1", docker_args={"hostname": "h1"},
)
h2 = net.addDockerHost(
"h2", dimage="dev_test", ip="10.0.0.2", docker_args={"hostname": "h2"},
)
h3 = net.addDockerHost(
"h3", dimage="dev_test", ip="10.0.0.3", docker_args={"hostname": "h3"},
)
info("*** Adding switch and links\n")
switch1 = net.addSwitch("s1")
switch2 = net.addSwitch("s2")
net.addLink(switch1, h1, bw=10, delay="10ms")
net.addLink(switch1, switch2, bw=10, delay="10ms")
net.addLink(switch2, h2, bw=10, delay="10ms")
net.addLink(switch2, h3, bw=20, delay="1ms")
info("\n*** Starting network\n")
net.start()
srv1 = mgr.addContainer(
"srv1", "h1", "echo_server", "python /home/server.py", docker_args={},
)
srv2 = mgr.addContainer("srv2", "h2", "dev_test", "bash", docker_args={})
srv3 = mgr.addContainer(
"srv3", "h3", "echo_server", "python /home/server.py", docker_args={},
)
if not AUTOTEST_MODE:
# Cannot spawn xterm for srv1 since BASH is not installed in the image:
# echo_server.
spawnXtermDocker("srv2")
CLI(net)
mgr.removeContainer("srv1")
mgr.removeContainer("srv2")
mgr.removeContainer("srv3")
net.stop()
mgr.stop()
| 29.484848 | 79 | 0.631038 |
4871bec789f655eea5b7a7dd58ba886ff517928f | 302 | py | Python | collectionmodeladmin/templatetags/collectionmodeladmin_tags.py | BabisK/wagtail-collectionmodeladmin | 9e1b6e0fecaafbdb473e5f9048ce9390926c9726 | [
"BSD-3-Clause"
] | null | null | null | collectionmodeladmin/templatetags/collectionmodeladmin_tags.py | BabisK/wagtail-collectionmodeladmin | 9e1b6e0fecaafbdb473e5f9048ce9390926c9726 | [
"BSD-3-Clause"
] | null | null | null | collectionmodeladmin/templatetags/collectionmodeladmin_tags.py | BabisK/wagtail-collectionmodeladmin | 9e1b6e0fecaafbdb473e5f9048ce9390926c9726 | [
"BSD-3-Clause"
] | null | null | null | from django.template import Library
register = Library()
@register.filter(name='get_app_name')
def get_app_name(d):
return d._meta.app_label
@register.filter(name='get_model_name')
def get_model_name(d):
return d._meta.model_name
@register.simple_tag
def setvar(val=None):
return val | 16.777778 | 39 | 0.754967 |
f3ccc33df11ed47776989bbe09c3c3cfeb419fb4 | 2,204 | py | Python | algorithm_mic_sdk/ws/speech_recognition_chinese.py | panyunsuo/AlgorithmicMicroserviceSDK | e7094a0a2a55b7bc7f65765bfb3918fc1fa305cc | [
"MIT"
] | null | null | null | algorithm_mic_sdk/ws/speech_recognition_chinese.py | panyunsuo/AlgorithmicMicroserviceSDK | e7094a0a2a55b7bc7f65765bfb3918fc1fa305cc | [
"MIT"
] | null | null | null | algorithm_mic_sdk/ws/speech_recognition_chinese.py | panyunsuo/AlgorithmicMicroserviceSDK | e7094a0a2a55b7bc7f65765bfb3918fc1fa305cc | [
"MIT"
] | null | null | null | import base64
import json
import time
from ..auth import AuthInfo
from ..ws_base import WSAlgoBase
def build_data_stream_source(data_stream_iterator):
def on_open(ws):
for stream in data_stream_iterator:
if isinstance(stream, bytes):
part = base64.b64encode(stream).decode()
else:
part = stream
data = {
'state': 'running',
'part': part,
'server_send_time': time.time()
}
ws.send(json.dumps(data))
ws.send(json.dumps({'state': 'end'}))
return on_open
class SpeechRecognitionChinese(WSAlgoBase):
__algo_name__ = 'speech_recognition_chinese'
def __init__(self, auth_info: AuthInfo, audio_format='PCM', data_stream_iterator=None,
recognition_result_callback_func=None, minimum_segment_frame=None, minimum_valid_frame=None,
maximum_audio_segment=None, log_record=True, **kwargs):
"""
语音识别算法(中文)
文档见 https://www.yuque.com/fenfendeyouzhiqingnian/algorithm/kbmfrx
@param auth_info:个人权限配置参数
@param audio_format:音频格式
@param data_stream_iterator: 数据流的可迭代对象,可以通过for循环来获得数据流,数据流可以是字节,也可以是base64编码后的数据
@param recognition_result_callback_func: 识别结果回调函数,该函数接收两个参数 (ws, data)
其中,ws为当前连接的WebSocket句柄,data为服务器返回的结果
@param minimum_valid_frame:最小的可识别的有效帧数量
@param minimum_segment_frame:用来分段的最小的静音帧数量
@param log_record:是否需要日志记录
@param maximum_audio_segment:最大分段长度,大于此长度的段,将自动进行分段操作
@param lm_weight:语言模型的权重
@param kwargs:
"""
super().__init__(auth_info)
self.request['audio_format'] = audio_format
self.request['minimum_segment_frame'] = minimum_segment_frame
self.request['minimum_valid_frame'] = minimum_valid_frame
self.request['maximum_audio_segment'] = maximum_audio_segment
self.request['log_record'] = log_record
self.request['state'] = 'ready'
self.request.update(kwargs)
self.set_on_message(recognition_result_callback_func)
self.set_on_open(build_data_stream_source(data_stream_iterator))
| 37.355932 | 109 | 0.666062 |
546124bbee899348e7519ce50f28017a698ba963 | 7,027 | py | Python | python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py | Ray2020BD/Paddle | 994087188816575d456c2f9c2a6c90aad83b4e71 | [
"Apache-2.0"
] | 2 | 2020-12-09T16:09:59.000Z | 2020-12-09T16:10:02.000Z | python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py | Ray2020BD/Paddle | 994087188816575d456c2f9c2a6c90aad83b4e71 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass.py | Ray2020BD/Paddle | 994087188816575d456c2f9c2a6c90aad83b4e71 | [
"Apache-2.0"
] | 1 | 2021-03-23T00:59:48.000Z | 2021-03-23T00:59:48.000Z | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import Parameter
import numpy as np
from simple_nets import simple_fc_net
import random
import unittest
import os
batch_size = 32
feed_dict = {
'image': np.random.random([batch_size, 784]).astype('float32'),
'label': np.random.random_integers(
low=0, high=9, size=[batch_size, 1]).astype('int64')
}
class InplaceTestBase(unittest.TestCase):
def initParameter(self):
self.use_cuda = True
self.fuse_all_optimizer_ops = False
def setUp(self):
paddle.enable_static()
self.initParameter()
if self.use_cuda and fluid.core.is_compiled_with_cuda():
self.device_count = fluid.core.get_cuda_device_count()
else:
self.device_count = 4
assert batch_size % self.device_count == 0
def build_program_and_scope(self):
self.place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace()
paddle.seed(1)
paddle.framework.random._manual_program_seed(1)
startup_program = fluid.Program()
main_program = fluid.Program()
scope = fluid.Scope()
with fluid.program_guard(main_program, startup_program):
with fluid.unique_name.guard():
loss = simple_fc_net()
adam = fluid.optimizer.Adam(learning_rate=1e-3)
adam.minimize(loss)
with fluid.scope_guard(scope):
exe = fluid.Executor(
fluid.CUDAPlace(0)
if self.use_cuda else fluid.CPUPlace())
exe.run(startup_program)
return main_program, scope, exe, loss
def is_invalid_test(self):
return self.use_cuda and not fluid.core.is_compiled_with_cuda()
def get_all_vars(self, program):
all_vars = program.global_block().vars
all_vars_name = []
for name, var in all_vars.items():
if 0 not in var.shape and not var.persistable:
all_vars_name.append(name)
return all_vars_name
def check_single_card_fetch_var(self):
if self.is_invalid_test():
return
prog1, scope1, exe, loss1 = self.build_program_and_scope()
scopes = []
compiled_programs = []
for memory_optimize in [False, True]:
for enable_inplace in [False, True]:
prog, scope, _, loss = self.build_program_and_scope()
scopes.append(scope)
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = memory_optimize
build_strategy.enable_inplace = enable_inplace
build_strategy.fuse_all_optimizer_ops = self.fuse_all_optimizer_ops
compiled_prog = fluid.CompiledProgram(prog).with_data_parallel(
loss_name=loss.name,
build_strategy=build_strategy,
places=self.place)
compiled_programs.append(compiled_prog)
all_vars_name = self.get_all_vars(prog1)
repeated_var_names = all_vars_name * 2
random.shuffle(repeated_var_names) # add some random
for fetch_var in repeated_var_names:
for _ in range(4):
with fluid.scope_guard(scope1):
fetch_val1, = exe.run(prog1,
feed=feed_dict,
fetch_list=[fetch_var])
for scope, compiled_prog in zip(scopes, compiled_programs):
with fluid.scope_guard(scope):
fetch_val2, = exe.run(compiled_prog,
feed=feed_dict,
fetch_list=[fetch_var])
self.assertTrue(np.array_equal(fetch_val1, fetch_val2))
def check_multi_card_fetch_var(self):
if self.is_invalid_test():
return
prog1, scope1, exe, loss1 = self.build_program_and_scope()
scopes = []
compiled_programs = []
if self.use_cuda:
places = fluid.cuda_places()
else:
places = fluid.cpu_places(self.device_count)
for memory_optimize in [False, True]:
for enable_inplace in [False, True]:
prog, scope, _, loss = self.build_program_and_scope()
scopes.append(scope)
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = memory_optimize
build_strategy.enable_inplace = enable_inplace
build_strategy.fuse_all_optimizer_ops = self.fuse_all_optimizer_ops
compiled_program = fluid.CompiledProgram(
prog).with_data_parallel(
loss_name=loss.name,
build_strategy=build_strategy,
places=places)
compiled_programs.append(compiled_program)
repeated_var_names = self.get_all_vars(prog1) * 2
random.shuffle(repeated_var_names) # add some random
for fetch_var in repeated_var_names:
for _ in range(4):
fetch_vals = []
for scope, compiled_prog in zip(scopes, compiled_programs):
with fluid.scope_guard(scope):
fetch_val, = exe.run(compiled_prog,
feed=feed_dict,
fetch_list=[fetch_var])
fetch_vals.append(fetch_val)
for item in fetch_vals:
self.assertTrue(np.array_equal(fetch_vals[0], item))
class CUDAInplaceTest(InplaceTestBase):
def initParameter(self):
self.use_cuda = True
self.fuse_all_optimizer_ops = False
def test_multi_card_fetch_var(self):
self.check_multi_card_fetch_var()
def test_single_card_fetch_var(self):
self.check_single_card_fetch_var()
class CPUInplaceTest(InplaceTestBase):
def initParameter(self):
self.use_cuda = False
self.fuse_all_optimizer_ops = False
def test_multi_card_fetch_var(self):
self.check_multi_card_fetch_var()
def test_single_card_fetch_var(self):
self.check_single_card_fetch_var()
if __name__ == '__main__':
unittest.main()
| 36.790576 | 83 | 0.608652 |
36be95c85ac9e3a964d98555ddb93bbe3d6c3cc6 | 734 | py | Python | tests/utils.py | immunochomik/notmany | 7b75c35cac2c3d0e465402099bcaa469924c7ca2 | [
"MIT"
] | null | null | null | tests/utils.py | immunochomik/notmany | 7b75c35cac2c3d0e465402099bcaa469924c7ca2 | [
"MIT"
] | null | null | null | tests/utils.py | immunochomik/notmany | 7b75c35cac2c3d0e465402099bcaa469924c7ca2 | [
"MIT"
] | null | null | null | import contextlib
import tempfile
from datetime import datetime, timedelta
import os
import shutil
from uuid import uuid4
from notmany.store.base import FORMAT
def dt(date):
return datetime.strptime(date, FORMAT)
@contextlib.contextmanager
def temporary_directory(sub_name=None):
sub_name = sub_name or str(uuid4())
tmp_dir = os.path.join(tempfile.gettempdir(), sub_name)
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
try:
yield tmp_dir
finally:
shutil.rmtree(tmp_dir)
def file_content(path, fname=None):
if fname:
path = os.path.join(path, fname)
with open(path, 'r') as fp:
return fp.read()
def seconds(num):
return timedelta(seconds=num)
| 18.35 | 59 | 0.692098 |
e4de733e5c0ae2e4678e9cda8d9bbc096dd360a5 | 594 | py | Python | server/grpc/pyserver.py | Panthereum/DigitalBeing | 7fda011f34dd62c03d1072035ae0ad2a129281a7 | [
"MIT"
] | 53 | 2021-07-20T04:01:57.000Z | 2022-03-13T17:31:08.000Z | server/grpc/pyserver.py | Panthereum/DigitalBeing | 7fda011f34dd62c03d1072035ae0ad2a129281a7 | [
"MIT"
] | 58 | 2021-08-20T02:22:16.000Z | 2021-12-13T10:38:58.000Z | server/grpc/pyserver.py | Panthereum/DigitalBeing | 7fda011f34dd62c03d1072035ae0ad2a129281a7 | [
"MIT"
] | 13 | 2021-08-23T20:16:14.000Z | 2022-01-31T23:59:21.000Z | import logging
import time
# import the original example.py
from handler import DigitalBeing as DB
logger = logging.getLogger('server_logger')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('grpc_server.log')
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
# create a class to define the server functions, derived from
# example_pb2_grpc.AgentServicer
class Service():
def __init__(self):
self.digital_being = DB()
Service()
while True:
time.sleep(86400)
#except KeyboardInterrupt:
#server.stop(0) | 22.846154 | 61 | 0.754209 |
6b9ca5a84a2b459f045547f6ffced3af8e3631fb | 5,430 | py | Python | MobileRevelator/python/fs_nqvault.py | ohunecker/MR | b0c93436c7964d87a0b8154f8b7662b1731124b9 | [
"MIT"
] | 98 | 2019-02-03T22:50:24.000Z | 2022-03-17T12:50:56.000Z | MobileRevelator/python/fs_nqvault.py | cewatkins/MR | 5ba553fd0eb4c1d80842074a553119486f005822 | [
"MIT"
] | 10 | 2019-03-14T20:12:10.000Z | 2020-05-23T10:37:54.000Z | MobileRevelator/python/fs_nqvault.py | cewatkins/MR | 5ba553fd0eb4c1d80842074a553119486f005822 | [
"MIT"
] | 30 | 2019-02-03T22:50:27.000Z | 2022-03-30T12:37:30.000Z | #Pluginname="NQ Vault (Android,FS)"
#Category="Extraction"
#Type=FS
import struct
import os
import tempfile
from Library.java import JavaFunc
from binascii import hexlify, unhexlify
def findnqvault():
#Lets see where the nq vault files are
ctx.gui_setMainLabel("Seeking for NQ Vault database")
result={}
for datadir in ["/data/com.netqin.ps/databases/contactsDB","com.netqin.ps/db/contactsDB"]:
if (ctx.fs_isFile(datadir)==True):
result["Data"]=datadir;
return result
def findfiles():
files={}
allfiles=ctx.fs_getselected()
m=0
if len(allfiles)==0:
allfiles=ctx.fs_filelist()
filecount=len(allfiles)
for file in allfiles:
if (".image/" in file or ".audio/" in file or ".video/" in file):
if ".bin" in file:
item=file[file.rfind("/")+1:file.rfind(".bin")]
files[item]=file
if ("322w465ay423xy11" in file):
st=file[file.rfind("/")+1:]
if len("322w465ay423xy11")==len(st):
files[st+str(m)]=file
m+=1
return files
def hashcode(passcode):
h = 0
for i in range(0, len(passcode)):
h = 31 * h + ord(passcode[i:i + 1])
h = h & 0xffffffff
return h;
def bruteforcepin(password):
#Lets bruteforce the pin
i=0
pwd=str(password)
for i in range(0,99999999):
curcode=hashcode(str(i))
if ((i%10000)==0):
ctx.gui_setMainLabel("NQVault: Trying pin "+str(i))
if str(curcode)==pwd:
return str(i)
return ""
def getpin(db):
conn=ctx.sqlite_run_cmd(db,"SELECT password FROM private_password;")
usertable={}
pins={}
for i in range(ctx.sqlite_get_data_size(conn)[0]):
encodedpassword=ctx.sqlite_get_data(conn,i,0)
pin=bruteforcepin(encodedpassword)
if (pin!=""):
pins[i]=pin
ctx.sqlite_cmd_close(conn)
return pins
def verifypin(db,pin):
conn=ctx.sqlite_run_cmd(db,"SELECT password FROM private_password;")
usertable={}
pins={}
for i in range(ctx.sqlite_get_data_size(conn)[0]):
encodedpassword=ctx.sqlite_get_data(conn,i,0)
if (str(encodedpassword)==str(hashcode(pin))):
pins[0]=pin
ctx.sqlite_cmd_close(conn)
return pins
def decryptfile(key,infilename):
with open(infilename,'r+b') as f:
dec=f.read(0x80)
f.seek(0)
for i in range(0,0x80):
chr=bytes([dec[i]^key])
f.write(chr)
def getfilenamesfromdb(databasename):
filenames={}
db=ctx.sqlite_open(databasename,False)
if db==-1:
return filenames
conn=ctx.sqlite_run_cmd(db,"SELECT file_name_from, file_path_new FROM hideimagevideo;")
if (conn is not None):
for i in range(ctx.sqlite_get_data_size(conn)[0]):
tname=ctx.sqlite_get_data(conn,i,1)
if (tname is not None):
tname=tname[tname.rfind("/")+1:tname.rfind(".")]
filenames[tname]=ctx.sqlite_get_data(conn,i,0)
ctx.sqlite_cmd_close(conn)
ctx.sqlite_close(db)
return filenames
def main():
nametable={}
encfiles=findfiles()
for t in encfiles:
if "322w465ay423xy11" in t:
nt=getfilenamesfromdb(encfiles[t])
for h in nt:
nametable[h]=nt[h]
error=""
files=findnqvault()
ctx.gui_setMainProgressBar(0)
if (len(files)==0 and len(encfiles)==0):
error="Couldn't find NQ Vault"
return error
db=-1
pins={}
keys=[]
if (len(files)!=0):
db=ctx.sqlite_open(files["Data"],False)
if db!=-1:
checkpin=ctx.gui_askText("Please enter pin:")
if (checkpin!=""):
pins=verifypin(db,checkpin)
if (len(pins)==0):
ctx.gui_setMainMessage(1,"NQVault: PIN "+checkpin+" is wrong, trying to bruteforce.","Info");
else:
keys.append(hashcode(checkpin)&0xFF)
if (len(pins)==0):
if(ctx.gui_askYesNo("Shall we bruteforce the pin ?")):
ctx.gui_setMainLabel("NQVault: Please wait, recovering pins..");
pins=getpin(db)
pincodes=""
for x in range(0,len(pins)):
pincodes+=pins[x]+";"
keys.append(hashcode(pins[x])&0xFF)
ctx.gui_setMainMessage(0,"NQVault: PINs recovered="+pincodes,"Info");
else:
conn=ctx.sqlite_run_cmd(db,"SELECT password FROM private_password;")
for i in range(ctx.sqlite_get_data_size(conn)[0]):
encodedpassword=ctx.sqlite_get_data(conn,i,0)
keys.append(int(encodedpassword)&0xFF)
break
if len(keys)==0:
error="NQVault: Couldn't recover pins"
ctx.sqlite_close(db)
return error
extracttodir=ctx.gui_askSaveDir("Please select directory to decrypt the files to")
if (extracttodir==""):
error="Can't generate report without directory"
ctx.setMainLabel("Status: Idle.")
return error
ctx.gui_setMainLabel("Status: Extracting files to: "+extracttodir)
for f in encfiles:
if (len(keys)==0) and (".image" in encfiles[f]):
outfile=encfiles[f]
outfile=outfile[outfile.rfind("/")+1:]
ctx.fs_file_extract(encfiles[f],extracttodir+"/"+outfile)
with open(extracttodir+"/"+outfile,'rb') as t:
chr=ord(t.read(1))
keys.append((chr^0xFF))
if nametable!=None:
for f in encfiles:
if f in nametable:
infile=encfiles[f]
outfile=nametable[f]
ctx.fs_file_extract(infile,extracttodir+"/"+outfile)
decryptfile(keys[0],extracttodir+"/"+outfile)
elif "322w465ay423xy11" not in f:
infile=encfiles[f]
outfile=encfiles[f]
outfile=outfile[outfile.rfind("/")+1:]
ctx.fs_file_extract(infile,extracttodir+"/"+outfile)
decryptfile(keys[0],extracttodir+"/"+outfile)
ctx.gui_setMainLabel("Status: Idle.")
ctx.gui_setMainProgressBar(0)
return "Finished running plugin." | 29.037433 | 98 | 0.671455 |
6d7d7123e339a9523e4fa08e5b23ade6729d1582 | 1,040 | py | Python | merlin/models/utils/nvt_utils.py | bschifferer/models-1 | b6042dbd1b98150cc50fd7d2cb6c07033f42fd35 | [
"Apache-2.0"
] | 45 | 2022-02-01T20:27:18.000Z | 2022-03-29T10:06:53.000Z | merlin/models/utils/nvt_utils.py | bschifferer/models-1 | b6042dbd1b98150cc50fd7d2cb6c07033f42fd35 | [
"Apache-2.0"
] | 142 | 2022-02-01T22:19:04.000Z | 2022-03-31T23:13:08.000Z | merlin/models/utils/nvt_utils.py | bschifferer/models-1 | b6042dbd1b98150cc50fd7d2cb6c07033f42fd35 | [
"Apache-2.0"
] | 12 | 2022-02-01T19:54:28.000Z | 2022-03-23T17:53:29.000Z | import logging
def require_nvt():
try:
import nvtabular as nvt # noqa
backend = None
try:
import tensorflow as tf
backend = tf
if tf.test.is_gpu_available():
_check_nvt_gpu()
except ImportError:
pass
if not backend:
try:
import torch
backend = torch
if torch.cuda.is_available():
_check_nvt_gpu()
except ImportError:
pass
except ImportError:
raise ImportError(
"nvtabular is required for this feature.",
"Please install it with `pip install nvtabular`.",
)
def _check_nvt_gpu():
try:
import cudf # noqa
except ImportError:
logging.warning(
"A GPU was detected but rapids is not installed.",
"NVTabular will not be able to use GPU.",
"Look at the documentation for more information at rapids.ai",
)
| 22.12766 | 74 | 0.518269 |
c32020963db7e585e8acd5bcf02594f18bf1af76 | 26,872 | py | Python | cvpysdk/subclients/virtualserver/vmwaresubclient.py | jack1806/cvpysdk | 6aa0beb426a95de877cd531602234515723ccc94 | [
"Apache-2.0"
] | 1 | 2021-02-27T05:31:38.000Z | 2021-02-27T05:31:38.000Z | cvpysdk/subclients/virtualserver/vmwaresubclient.py | jack1806/cvpysdk | 6aa0beb426a95de877cd531602234515723ccc94 | [
"Apache-2.0"
] | null | null | null | cvpysdk/subclients/virtualserver/vmwaresubclient.py | jack1806/cvpysdk | 6aa0beb426a95de877cd531602234515723ccc94 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# --------------------------------------------------------------------------
# Copyright Commvault Systems, Inc.
# See LICENSE.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""File for operating on a Virtual Server VMWare Subclient.
VMWareVirtualServerSubclient is the only class defined in this file.
VMWareVirtualServerSubclient: Derived class from VirtualServerSubClient Base
class,representing a VMware Subclient,
and to perform operations on that Subclient
VMWareVirtualServerSubclient:
__init__(
backupset_object,
subclient_name,
subclient_id) -- initialize object of vmware subclient class,
associated with the VirtualServer subclient
full_vm_restore_in_place() -- restores the VM specified by the user to
the same location
full_vm_restore_out_of_place() -- restores the VM specified to the provided
VMware psuedoclient vcenter via
vcenter_client
"""
from ..vssubclient import VirtualServerSubclient
from ...exception import SDKException
from past.builtins import basestring
class VMWareVirtualServerSubclient(VirtualServerSubclient):
"""Derived class from VirtualServerSubclient Base class.
This represents a VMWare virtual server subclient,
and can perform restore operations on only that subclient.
"""
def __init__(self, backupset_object, subclient_name, subclient_id=None):
"""Initialize the Instance object for the given Virtual Server instance.
Args
class_object (backupset_object, subclient_name, subclient_id) -- instance of the
backupset class, subclient name, subclient id
"""
super(VMWareVirtualServerSubclient, self).__init__(
backupset_object, subclient_name, subclient_id)
self.diskExtension = [".vmdk"]
self._disk_option = {
'Original': 0,
'Thick Lazy Zero': 1,
'Thin': 2,
'Thick Eager Zero': 3
}
self._transport_mode = {
'Auto': 0,
'SAN': 1,
'Hot Add': 2,
'NBD': 5,
'NBD SSL': 4
}
def full_vm_restore_in_place(
self,
vm_to_restore=None,
overwrite=True,
power_on=True,
copy_precedence=0,
disk_option='Original',
transport_mode='Auto',
proxy_client=None,
indexing_v2=False):
"""Restores the FULL Virtual machine specified in the input list
to the location same as the actual location of the VM in VCenter.
Args:
vm_to_restore (list) -- provide the VM name to restore
default: None
overwrite (bool) -- overwrite the existing VM
default: True
power_on (bool) -- power on the restored VM
default: True
copy_precedence (int) -- copy precedence value
default: 0
disk_option (basestring) -- disk provisioning for the restored vm
Options for input are: 'Original',
'Thick Lazy Zero', 'Thin',
'Thick Eager Zero'
default: Original
transport_mode (basestring) -- transport mode to be used for
the restore.
Options for input are: 'Auto', 'SAN',
''Hot Add', NBD', 'NBD SSL'
default: Auto
proxy_client (basestring) -- proxy client to be used for restore
default: proxy added in subclient
indexing_v2 (boolean) -- true if restore has to be performed from
child vm for v2
Returns:
object - instance of the Job class for this restore job
Raises:
SDKException:
if inputs are not of correct type as per definition
if failed to initialize job
if response is empty
if response is not success
"""
restore_option = {}
# check input parameters are correct
if vm_to_restore and not isinstance(vm_to_restore, basestring):
raise SDKException('Subclient', '101')
disk_option_value = self._disk_option[disk_option]
transport_mode_value = self._transport_mode[transport_mode]
if copy_precedence:
restore_option['copy_precedence_applicable'] = True
if proxy_client is not None:
restore_option['client'] = proxy_client
restore_option_copy = restore_option.copy()
if indexing_v2:
vm_to_restore = self._set_vm_to_restore(vm_to_restore)
job_objects = []
for eachvm in vm_to_restore:
restore_option = {}
restore_option = restore_option_copy.copy()
# set attr for all the option in restore xml from user inputs
self._set_restore_inputs(
restore_option,
vm_to_restore=[eachvm],
in_place=True,
esx_server_name="",
volume_level_restore=1,
unconditional_overwrite=overwrite,
power_on=power_on,
disk_option=disk_option_value,
transport_mode=transport_mode_value,
copy_precedence=copy_precedence
)
request_json = self._prepare_fullvm_restore_json(restore_option)
_vmclient_obj = self._commcell_object.clients.get(eachvm)
_vmagent_obj = _vmclient_obj.agents.get(self._agent_object._agent_name)
_vminstance_obj = _vmagent_obj.instances.get('VMInstance')
_vmbackupset_obj = _vminstance_obj.backupsets.get(
self._backupset_object._backupset_name)
_vmsub_obj = _vmbackupset_obj.subclients.get('default')
request_json['taskInfo']['associations'][0]['clientName'] = eachvm
request_json['taskInfo']['associations'][0]['clientId'] = _vmsub_obj._subClientEntity[
'clientId']
request_json['taskInfo']['associations'][0]['instanceName'] = 'VMInstance'
request_json['taskInfo']['associations'][0]['backupsetId'] = _vmsub_obj._subClientEntity[
'backupsetId']
request_json['taskInfo']['associations'][0]['instanceId'] = _vmsub_obj._subClientEntity[
'instanceId']
request_json['taskInfo']['associations'][0]['subclientGUID'] = _vmsub_obj._subClientEntity[
'subclientGUID']
request_json['taskInfo']['associations'][0]['subclientName'] = 'default'
request_json['taskInfo']['associations'][0]['subclientId'] = _vmsub_obj._subClientEntity[
'subclientId']
job_objects.append(self._process_restore_response(request_json))
return job_objects
else:
# set attr for all the option in restore xml from user inputs
self._set_restore_inputs(
restore_option,
vm_to_restore=self._set_vm_to_restore(vm_to_restore),
in_place=True,
esx_server_name="",
volume_level_restore=1,
unconditional_overwrite=overwrite,
power_on=power_on,
disk_option=disk_option_value,
transport_mode=transport_mode_value,
copy_precedence=copy_precedence
)
request_json = self._prepare_fullvm_restore_json(restore_option)
return self._process_restore_response(request_json)
def full_vm_restore_out_of_place(
self,
vm_to_restore=None,
restored_vm_name=None,
vcenter_client=None,
esx_host=None,
datastore=None,
overwrite=True,
power_on=True,
copy_precedence=0,
disk_option='Original',
transport_mode='Auto',
proxy_client=None,
source_ip=None,
destination_ip=None,
network=None,
indexing_v2=False
):
"""Restores the FULL Virtual machine specified in the input list
to the provided vcenter client along with the ESX and the datastores.
If the provided client name is none then it restores the Full Virtual
Machine to the source client and corresponding ESX and datastore.
Args:
vm_to_restore (str) -- VM that is to be restored
restored_vm_name (str) -- new name of vm. If nothing is passed,
'delete' is appended to the original vm name
vcenter_client (basestring) -- name of the vcenter client where the VM
should be restored.
esx_host (basestring) -- destination esx host. Restores to the source
VM esx if this value is not specified
datastore (basestring) -- datastore where the restored VM should be
located. Restores to the source VM datastore
if this value is not specified
overwrite (bool) -- overwrite the existing VM
default: True
power_on (bool) -- power on the restored VM
default: True
copy_precedence (int) -- copy precedence value
default: 0
disk_option (basestring) -- disk provisioning for the restored vm
Options for input are: 'Original',
'Thick Lazy Zero', 'Thin', 'Thick Eager Zero'
default: 'Original'
transport_mode (basestring) -- transport mode to be used for the restore.
Options for input are: 'Auto', 'SAN',
'Hot Add', 'NBD', 'NBD SSL'
default: Auto
proxy_client (basestring) -- destination proxy client
source_ip (basestring) -- IP of the source VM
destination_ip (basestring) -- IP of the destination VM
network (basestring) -- Network of the detination vm
indexing_v2 (boolean) -- true if restore has to be performed from
child vm for v2
Returns:
object - instance of the Job class for this restore job
Raises:
SDKException:
if inputs are not of correct type as per definition
if failed to initialize job
if response is empty
if response is not success
"""
restore_option = {}
# check mandatory input parameters are correct
if vm_to_restore and not isinstance(vm_to_restore, basestring):
raise SDKException('Subclient', '101')
if copy_precedence:
restore_option['copy_precedence_applicable'] = True
# populating proxy client. It assumes the proxy controller added in instance
# properties if not specified
if proxy_client is not None:
restore_option['client'] = proxy_client
if restored_vm_name:
if not(isinstance(vm_to_restore, basestring) or
isinstance(restored_vm_name, basestring)):
raise SDKException('Subclient', '101')
restore_option['restore_new_name'] = restored_vm_name
if vm_to_restore:
vm_to_restore = [vm_to_restore]
restore_option_copy = restore_option.copy()
if indexing_v2:
vm_to_restore = self._set_vm_to_restore(vm_to_restore)
job_objects = []
for eachvm in vm_to_restore:
restore_option = {}
restore_option = restore_option_copy.copy()
self._set_restore_inputs(
restore_option,
in_place=False,
vcenter_client=vcenter_client,
datastore=datastore,
esx_host=esx_host,
esx_server='',
unconditional_overwrite=overwrite,
power_on=power_on,
vm_to_restore=[eachvm],
disk_option=self._disk_option[disk_option],
transport_mode=self._transport_mode[transport_mode],
copy_precedence=copy_precedence,
volume_level_restore=1,
source_item=[],
source_ip=source_ip,
destination_ip=destination_ip,
network=network
)
request_json = self._prepare_fullvm_restore_json(restore_option)
_vmclient_obj = self._commcell_object.clients.get(eachvm)
_vmagent_obj = _vmclient_obj.agents.get(self._agent_object._agent_name)
_vminstance_obj = _vmagent_obj.instances.get('VMInstance')
_vmbackupset_obj = _vminstance_obj.backupsets.get(
self._backupset_object._backupset_name)
_vmsub_obj = _vmbackupset_obj.subclients.get('default')
request_json['taskInfo']['associations'][0]['clientName'] = eachvm
request_json['taskInfo']['associations'][0]['clientId'] = _vmsub_obj._subClientEntity['clientId']
request_json['taskInfo']['associations'][0]['instanceName'] = 'VMInstance'
request_json['taskInfo']['associations'][0]['backupsetId'] = _vmsub_obj._subClientEntity['backupsetId']
request_json['taskInfo']['associations'][0]['instanceId'] = _vmsub_obj._subClientEntity['instanceId']
request_json['taskInfo']['associations'][0]['subclientGUID'] = _vmsub_obj._subClientEntity['subclientGUID']
request_json['taskInfo']['associations'][0]['subclientName'] = 'default'
request_json['taskInfo']['associations'][0]['subclientId'] = _vmsub_obj._subClientEntity['subclientId']
job_objects.append(self._process_restore_response(request_json))
return job_objects
else:
self._set_restore_inputs(
restore_option,
in_place=False,
vcenter_client=vcenter_client,
datastore=datastore,
esx_host=esx_host,
esx_server='',
unconditional_overwrite=overwrite,
power_on=power_on,
vm_to_restore=self._set_vm_to_restore(vm_to_restore),
disk_option=self._disk_option[disk_option],
transport_mode=self._transport_mode[transport_mode],
copy_precedence=copy_precedence,
volume_level_restore=1,
source_item=[],
source_ip=source_ip,
destination_ip=destination_ip,
network=network
)
request_json = self._prepare_fullvm_restore_json(restore_option)
return self._process_restore_response(request_json)
def disk_restore(self,
vm_name,
destination_path,
disk_name=None,
proxy_client=None,
copy_precedence=0,
convert_to=None):
"""Restores the disk specified in the input paths list to the same location
Args:
vm_name (basestring) -- Name of the VM added in subclient content
whose disk is selected for restore
destination_path (basestring) -- Staging (destination) path to restore the
disk.
disk_name (list) -- name of the disk which has to be restored
(only vmdk files permitted - enter full
name of the disk)
default: None
proxy_client (basestring) -- Destination proxy client to be used
default: None
copy_precedence (int) -- SP copy precedence from which browse has to
be performed
convert_to (basestring) -- disk format for the restored disk
(applicable only when the vmdk disk is
selected for restore). Allowed values are
"VHDX" or "VHD"
default: None
Returns:
object - instance of the Job class for this restore job
Raises:
SDKException:
if inputs are not passed in proper expected format
if response is empty
if response is not success
"""
vm_names, vm_ids = self._get_vm_ids_and_names_dict_from_browse()
_disk_restore_option = {}
disk_extn = '.vmdk'
if not disk_name:
disk_name = []
else:
disk_extn = self._get_disk_extension(disk_name)
# check if inputs are correct
if not (isinstance(vm_name, basestring) and
isinstance(destination_path, basestring) and
isinstance(disk_name, list) and
disk_extn == '.vmdk'):
raise SDKException('Subclient', '101')
if convert_to is not None:
convert_to = convert_to.lower()
if convert_to not in ['vhdx', 'vhd']:
raise SDKException('Subclient', '101')
if copy_precedence:
_disk_restore_option['copy_precedence_applicable'] = True
# fetching all disks from the vm
disk_list, disk_info_dict = self.disk_level_browse(
"\\" + vm_ids[vm_name])
if not disk_name: # if disk names are not provided, restore all vmdk disks
for each_disk_path in disk_list:
disk_name.append(each_disk_path.split('\\')[-1])
else: # else, check if the given VM has a disk with the list of disks in disk_name.
for each_disk in disk_name:
each_disk_path = "\\" + str(vm_name) + "\\" + each_disk
if each_disk_path not in disk_list:
raise SDKException('Subclient', '111')
# if conversion option is given
if convert_to is not None:
dest_disk_dict = {
'VHD_DYNAMIC': 13,
'VHDX_DYNAMIC': 21
}
vol_restore, dest_disk = self._get_conversion_disk_Type('vmdk', convert_to)
_disk_restore_option["destination_disktype"] = dest_disk_dict[dest_disk]
_disk_restore_option["volume_level_restore"] = 4
else:
_disk_restore_option["volume_level_restore"] = 3
_disk_restore_option["destination_vendor"] = \
self._backupset_object._instance_object._vendor_id
if proxy_client is not None:
_disk_restore_option['client'] = proxy_client
else:
_disk_restore_option['client'] = self._backupset_object._instance_object.co_ordinator
# set Source item List
src_item_list = []
for each_disk in disk_name:
src_item_list.append("\\" + vm_ids[vm_name] + "\\" + each_disk.split("\\")[-1])
_disk_restore_option['paths'] = src_item_list
self._set_restore_inputs(
_disk_restore_option,
in_place=False,
copy_precedence=copy_precedence,
destination_path=destination_path,
paths=src_item_list
)
request_json = self._prepare_disk_restore_json(_disk_restore_option)
return self._process_restore_response(request_json)
def full_vm_conversion_azurerm(
self,
azure_client,
vm_to_restore=None,
resource_group=None,
storage_account=True,
overwrite=True,
power_on=True,
proxy_client=None,
instance_size=None,
public_ip=True,
restore_as_managed=False,
copy_precedence=0,
restore_option=None):
"""
This converts the VMware to AzureRM
Args:
vm_to_restore (list): provide the VM names to restore
azure_client (basestring): name of the AzureRM client
where the VM should be
restored.
resource_group (basestring): destination Resource group
in the AzureRM
storage_account (basestring): storage account where the
restored VM should be located
in AzureRM
overwrite (bool): overwrite the existing VM
default: True
power_on (bool): power on the restored VM
default: True
instance_size (basestring): Instance Size of restored VM
public_ip (bool): If True, creates the Public IP of
restored VM
restore_as_managed (bool): If True, restore as Managed VM in Azure
copy_precedence (int): copy precedence value
default: 0
proxy_client (basestring): destination proxy client
Returns:
object - instance of the Job class for this restore job
Raises:
SDKException:
if inputs are not of correct type as per definition
if failed to initialize job
if response is empty
if response is not success
"""
if restore_option is None:
restore_option = {}
# check mandatory input parameters are correct
if not (isinstance(azure_client, basestring)):
raise SDKException('Subclient', '101')
subclient = self._set_vm_conversion_defaults(azure_client, restore_option)
instance = subclient._backupset_object._instance_object
if proxy_client is None:
proxy_client = instance.server_host_name[0]
self._set_restore_inputs(
restore_option,
in_place=False,
vcenter_client=azure_client,
datastore=storage_account,
esx_host=resource_group,
unconditional_overwrite=overwrite,
client_name=proxy_client,
power_on=power_on,
vm_to_restore=self._set_vm_to_restore(vm_to_restore),
copy_precedence=copy_precedence,
createPublicIP=public_ip,
restoreAsManagedVM=restore_as_managed,
instanceSize=instance_size,
volume_level_restore=1,
destination_instance=instance.instance_name,
backupset_client_name=instance._agent_object._client_object.client_name
)
request_json = self._prepare_fullvm_restore_json(restore_option)
return self._process_restore_response(request_json)
| 44.052459 | 124 | 0.500037 |
cce936e48cd1ac83b848d242a5b412e47d6452f8 | 2,365 | py | Python | voctocore/lib/tcpmulticonnection.py | 0xflotus/voctomix | 3156f3546890e6ae8d379df17e5cc718eee14b15 | [
"MIT"
] | 521 | 2015-01-07T21:43:30.000Z | 2022-03-17T22:07:13.000Z | voctocore/lib/tcpmulticonnection.py | 0xflotus/voctomix | 3156f3546890e6ae8d379df17e5cc718eee14b15 | [
"MIT"
] | 241 | 2015-05-27T10:11:09.000Z | 2022-02-11T03:29:20.000Z | voctocore/lib/tcpmulticonnection.py | 0xflotus/voctomix | 3156f3546890e6ae8d379df17e5cc718eee14b15 | [
"MIT"
] | 111 | 2015-08-13T20:06:52.000Z | 2022-03-11T09:48:46.000Z | import logging
import socket
import sys
from queue import Queue
from abc import ABCMeta, abstractmethod
from gi.repository import GObject
class TCPMultiConnection(object, metaclass=ABCMeta):
def __init__(self, port):
if not hasattr(self, 'log'):
self.log = logging.getLogger('TCPMultiConnection')
self._port = None
try:
self.boundSocket = None
self.currentConnections = dict()
self.log.debug('Binding to Source-Socket on [::]:%u', port)
self.boundSocket = socket.socket(socket.AF_INET6)
self.boundSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.boundSocket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY,
False)
self.boundSocket.bind(('::', port))
self.boundSocket.listen(1)
self._port = port
self.log.debug('Setting GObject io-watch on Socket')
GObject.io_add_watch(self.boundSocket, GObject.IO_IN, self.on_connect)
except OSError:
self.log.error("Can not open listening port %d because it is already in use. Is another instance of voctocore running already?" % port)
sys.exit(-1)
def port(self):
return "%s:%d" % (socket.gethostname(), self._port if self._port else 0)
def num_connections(self):
return len(self.currentConnections)
def is_input(self):
return False
def on_connect(self, sock, *args):
conn, addr = sock.accept()
conn.setblocking(False)
self.log.info("Incoming Connection from [%s]:%u (fd=%u)",
addr[0], addr[1], conn.fileno())
self.currentConnections[conn] = Queue()
self.log.info('Now %u Receiver(s) connected',
len(self.currentConnections))
self.on_accepted(conn, addr)
return True
def close_connection(self, conn):
if conn in self.currentConnections:
conn.close()
del(self.currentConnections[conn])
self.log.info('Now %u Receiver connected',
len(self.currentConnections))
@abstractmethod
def on_accepted(self, conn, addr):
raise NotImplementedError(
"child classes of TCPMultiConnection must implement on_accepted()"
)
| 32.39726 | 147 | 0.612262 |
3c5d70e08ebe2600901ea97dd1e5635afcab25c4 | 3,279 | py | Python | contrib/zmq/zmq_sub3.4.py | rocheston/rosecoin | 5ea5390fc250bae8fdea9667eae760bbf6225f8c | [
"MIT"
] | 4 | 2020-12-15T02:44:30.000Z | 2022-03-24T13:09:20.000Z | contrib/zmq/zmq_sub3.4.py | Penny-Admixture/rosecoin | 5ea5390fc250bae8fdea9667eae760bbf6225f8c | [
"MIT"
] | null | null | null | contrib/zmq/zmq_sub3.4.py | Penny-Admixture/rosecoin | 5ea5390fc250bae8fdea9667eae760bbf6225f8c | [
"MIT"
] | 1 | 2021-05-02T23:07:21.000Z | 2021-05-02T23:07:21.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
RoseCoin should be started with the command line arguments:
rosecoind -testnet -daemon \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
-zmqpubrawblock=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
The `@asyncio.coroutine` decorator and the `yield from` syntax found here
was introduced in python 3.4 and has been deprecated in favor of the `async`
and `await` keywords respectively.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if not (sys.version_info.major >= 3 and sys.version_info.minor >= 4):
print("This example only works with Python 3.4 and greater")
sys.exit(1)
port = 28332
class ZMQHandler():
def __init__(self):
self.loop = zmq.asyncio.install()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
@asyncio.coroutine
def handle(self) :
msg = yield from self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body))
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
| 36.433333 | 107 | 0.648978 |
4b354fc42a634ba6c09c1a3d6a8b062296c19685 | 295 | py | Python | record/templatetags/record_extras.py | souper-streetwise/charity_data | 6d91d4b3d635dad868a5eabab94bbb20eea3d082 | [
"MIT"
] | 3 | 2020-02-05T06:42:15.000Z | 2020-02-17T14:39:58.000Z | record/templatetags/record_extras.py | souper-streetwise/charity_data | 6d91d4b3d635dad868a5eabab94bbb20eea3d082 | [
"MIT"
] | 8 | 2020-02-03T19:25:18.000Z | 2021-03-10T22:30:58.000Z | record/templatetags/record_extras.py | souper-streetwise/charity_data | 6d91d4b3d635dad868a5eabab94bbb20eea3d082 | [
"MIT"
] | null | null | null | from django import template
register = template.Library()
@register.inclusion_tag("record/bulk_entry.html", takes_context=True)
def bulk_entry(context):
return context
@register.inclusion_tag("record/clicker_entry.html", takes_context=True)
def clicker_entry(context):
return context
| 24.583333 | 72 | 0.79661 |
f57af5cdf19bb2746aa142a74a365237100994f8 | 6,571 | py | Python | ownphotos/settings.py | robertcontois/librephotos | 42f5ebc4f286e3da0bcadfc05004622b46ec3f29 | [
"MIT"
] | null | null | null | ownphotos/settings.py | robertcontois/librephotos | 42f5ebc4f286e3da0bcadfc05004622b46ec3f29 | [
"MIT"
] | null | null | null | ownphotos/settings.py | robertcontois/librephotos | 42f5ebc4f286e3da0bcadfc05004622b46ec3f29 | [
"MIT"
] | null | null | null | """
Django settings for ownphotos project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import datetime
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET_KEY']
RQ_API_TOKEN = os.environ['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = (os.environ.get('DEBUG', '').lower() == 'true')
ALLOWED_HOSTS = [
'192.168.1.100', 'localhost', 'ownphotos-api.local','backend',
os.environ.get('BACKEND_HOST'), 'ownphotos.local'
]
AUTH_USER_MODEL = 'api.User'
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': datetime.timedelta(minutes=5),
# 'ACCESS_TOKEN_LIFETIME': datetime.timedelta(minutes=60),
'REFRESH_TOKEN_LIFETIME': datetime.timedelta(days=7),
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.postgres',
'api',
'nextcloud',
'rest_framework',
'corsheaders',
'django_extensions',
"django_rq",
'constance',
'constance.backends.database',
]
CONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend'
CONSTANCE_DATABASE_CACHE_BACKEND = 'default'
CONSTANCE_CONFIG = {
'ALLOW_REGISTRATION': (False, 'Publicly allow user registration', bool)
}
INTERNAL_IPS = ('127.0.0.1', 'localhost', '192.168.1.100')
CORS_ALLOW_HEADERS = (
'cache-control',
'accept',
'accept-encoding',
'allow-credentials',
'withcredentials',
'authorization',
'content-type',
'dnt',
'origin',
'user-agent',
'x-csrftoken',
'x-requested-with',
)
CORS_ORIGIN_WHITELIST = (
'http://localhost:3000',
'http://192.168.1.100:3000'
)
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated', ),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
'DEFAULT_FILTER_BACKENDS':
('django_filters.rest_framework.DjangoFilterBackend', ),
'DEFAULT_PAGINATION_CLASS':
'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE':
20000,
}
REST_FRAMEWORK_EXTENSIONS = {
'DEFAULT_OBJECT_CACHE_KEY_FUNC':
'rest_framework_extensions.utils.default_object_cache_key_func',
'DEFAULT_LIST_CACHE_KEY_FUNC':
'rest_framework_extensions.utils.default_list_cache_key_func',
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'api.middleware.FingerPrintMiddleware',
]
ROOT_URLCONF = 'ownphotos.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ownphotos.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.' + os.environ['DB_BACKEND'],
'NAME': os.environ['DB_NAME'],
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASS'],
'HOST': os.environ['DB_HOST'],
'PORT': os.environ['DB_PORT'],
},
}
if 'REDIS_PATH' in os.environ:
redis_path = 'unix://' + os.environ['REDIS_PATH']
redis_path += '?db=' + os.environ.get('REDIS_DB', '0')
else:
redis_path = "redis://" + os.environ['REDIS_HOST']
redis_path += ":" + os.environ["REDIS_PORT"] + "/1"
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": redis_path,
"TIMEOUT": 60 * 60 * 24,
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
RQ_QUEUES = {
'default': {
'USE_REDIS_CACHE': 'default',
'DEFAULT_TIMEOUT': 360,
'DB': 0
}
}
RQ = {
'DEFAULT_RESULT_TTL': 60,
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':
'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = os.environ['TIME_ZONE']
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'protected_media')
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
DATA_ROOT = '/data'
THUMBNAIL_SIZE_TINY = (30, 30)
THUMBNAIL_SIZE_SMALL = (100, 100)
THUMBNAIL_SIZE_MEDIUM = (500, 500)
THUMBNAIL_SIZE = (500, 500)
THUMBNAIL_SIZE_BIG = (2048, 2048)
FULLPHOTO_SIZE = (1000, 1000)
CORS_ORIGIN_ALLOW_ALL = False
CORS_ALLOW_CREDENTIALS = True
IMAGE_SIMILARITY_SERVER = 'http://localhost:8002'
| 26.820408 | 83 | 0.681327 |
2a707051bebad818283dfad295b6fcc2d749c457 | 4,495 | py | Python | data-processing/converter_in.py | Ali-RT/Transit-Logic | ca812184d31b884b26173a506649c4b2828f0502 | [
"Apache-2.0"
] | null | null | null | data-processing/converter_in.py | Ali-RT/Transit-Logic | ca812184d31b884b26173a506649c4b2828f0502 | [
"Apache-2.0"
] | null | null | null | data-processing/converter_in.py | Ali-RT/Transit-Logic | ca812184d31b884b26173a506649c4b2828f0502 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""Kafka deserializer
"""
__author__ = 'Ali Rahim-Taleqani'
__copyright__ = 'Copyright 2020, The Insight Data Engineering'
__credits__ = [""]
__version__ = '0.2'
__maintainer__ = 'Ali Rahim-Taleqani'
__email__ = 'ali.rahim.taleani@gmail.com'
__status__ = 'Development'
import asyncio
import json
from uuid import uuid4
from confluent_kafka.admin import AdminClient, NewTopic
from confluent_kafka.avro import AvroConsumer, CachedSchemaRegistryClient
from confluent_kafka import Producer
import logging.config
import argparse
logging.config.fileConfig('logging.ini', disable_existing_loggers=False)
logger = logging.getLogger(__name__)
def topic_exists(client, topic_name):
"""
Reports if the topic is created
Args:
client (Kafka Client): The Kafka admin client
topic_name (str): the topic name to be checked
"""
topic_data = client.list_topics(timeout=2)
return topic_name in set(t.topic for t in iter(topic_data.topics.values()))
def create_topic(client, topic_name):
"""
Creates a Kafka topic
Args:
client (Kafka Client): The Kafka admin client
topic_name (str): the topic to be created
"""
futures = client.create_topics(
[
NewTopic(
topic=topic_name,
num_partitions=5,
replication_factor=1,
config={
"cleanup.policy": "delete",
"delete.retention.ms": "2000",
"file.delete.delay.ms": "2000",
},
)
]
)
for topic, future in futures.items():
try:
future.result()
logger.info("topic created")
except Exception as e:
logger.error(f"failed to create topic {topic_name}: {e}")
async def converter(CONSUME_TOPIC, PRODUCE_TOPIC, BROKER_URL, SCHEMA_REGISTRY_URL):
"""Consumes data from the Kafka Topic
"""
schema_registry = CachedSchemaRegistryClient({"url": SCHEMA_REGISTRY_URL})
c = AvroConsumer(
{
"bootstrap.servers": BROKER_URL,
"client.id": "project-insight",
"group.id": "convertor-in-consumer",
"auto.offset.reset": "earliest",
},
schema_registry=schema_registry,
)
c.subscribe([CONSUME_TOPIC])
p = Producer({"bootstrap.servers": BROKER_URL})
while True:
message = c.poll(1.0)
if message is None:
logger.info("no message received by consumer")
elif message.error() is not None:
logger.error(f"error from consumer {message.error()}")
else:
try:
print(message.value())
p.produce(topic=PRODUCE_TOPIC, key=str(uuid4()), value=json.dumps(message.value()))
except KeyError as e:
logger.error(f"Failed to unpack message {e}")
await asyncio.sleep(0.01)
async def consume_produce(con_topic, pro_topic, broker_url, schema_url):
t = asyncio.create_task(converter(con_topic, pro_topic, broker_url, schema_url))
await t
def main(args):
# SCHEMA_REGISTRY_URL = "http://localhost:8081"
SCHEMA_REGISTRY_URL = args.schema_registry
# BROKER_URL = "PLAINTEXT://localhost:9092"
BROKER_URL = args.bootstrap_servers
# OPERATOR = "bird"
OPERATOR = args.operator
# Topic name
CONSUME_TOPIC = f"com.insight.project.{OPERATOR}.producer"
PRODUCE_TOPIC = f"com.insight.project.{OPERATOR}.converter"
client = AdminClient({"bootstrap.servers": BROKER_URL})
exists = topic_exists(client, PRODUCE_TOPIC)
logger.info(f"Topic {PRODUCE_TOPIC} exists: {exists}")
if exists is False:
create_topic(client, PRODUCE_TOPIC)
try:
asyncio.run(consume_produce(CONSUME_TOPIC, PRODUCE_TOPIC, BROKER_URL, SCHEMA_REGISTRY_URL))
except KeyboardInterrupt as e:
logger.error(f"Failed to unpack message {e}")
logger.info("shutting down")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Converter In")
parser.add_argument('-b', dest="bootstrap_servers", required=True,
help="Bootstrap broker(s) (host[:port])")
parser.add_argument('-o', dest="operator", required=True,
help="Operator name")
parser.add_argument('-s', dest="schema_registry", required=True,
help="Schema Registry (http(s)://host[:port]")
main(parser.parse_args())
| 32.107143 | 99 | 0.639377 |
3c738e3b5ff1849fdf669ee71bcf176fc0d62929 | 2,641 | py | Python | NUMPY-SKILLS/code.py | Manpreet1377/ga-learner-dst-repo | b712886472c8dc88e266494adb5c24f0a9987014 | [
"MIT"
] | null | null | null | NUMPY-SKILLS/code.py | Manpreet1377/ga-learner-dst-repo | b712886472c8dc88e266494adb5c24f0a9987014 | [
"MIT"
] | null | null | null | NUMPY-SKILLS/code.py | Manpreet1377/ga-learner-dst-repo | b712886472c8dc88e266494adb5c24f0a9987014 | [
"MIT"
] | null | null | null | # --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
#Code starts here
print("STEP 1: LOAD DATA TO NUMPY ARRAY AND ADDING A NEW RECORD TO IT")
census = np.concatenate((data,new_record), axis = 0)
print(" The shape of concatenated data is {}".format(np.shape(census)))
print("STEP 2: ANALYSIS OF AGE DISTRIBUTION")
age = census[:,0]
print(" The array of ages: {}".format(age))
max_age = age.max()
min_age = age.min()
age_mean = age.mean()
age_std = np.std(age)
print(" Maximum age is {} \n Minimum Age is {} \n The mean of ages is {} \n The standard deviation of ages is{}".format(max_age, min_age, age_mean, age_std))
print("STEP 3:CHECKING COUNTRY'S RACE DISTRIBUTION")
#RACE COLUMN ARRAYS
race = census[:,2]
race_0 = race[race==0]
race_1 = race[race==1]
race_2 = race[race==2]
race_3 = race[race==3]
race_4 = race[race==4]
#LENGTH OF RACE ARRAYS
len_0 = len(race_0)
len_1 = len(race_1)
len_2 = len(race_2)
len_3 = len(race_3)
len_4 = len(race_4)
#MINORITY RACE
mini = (len_0, len_1, len_2, len_3, len_4)
minority_race = mini.index(min(mini))
print(" The minority race is {}".format(minority_race))
print("STEP 4: CHECKING THE GOVT. POLICY - CITIZENS ABOVE 60 SHOULD NOT WORK MORE THAN 25 HRS PER WEEK")
senior_citizens = census[census[:,0] > 60]
working_hours_sum = senior_citizens.sum(axis=0)[6]
print(" The sum of working hours is {}".format(working_hours_sum))
senior_citizens_len = len(senior_citizens)
print(" The number of senior citizens are {}".format(senior_citizens_len))
avg_working_hours = working_hours_sum / senior_citizens_len
print(" The average working hours of citizens above age of 60 is {}".format(avg_working_hours))
if avg_working_hours <= 25:
print(" THE GOVT. POLICY IS BEING FOLLOWED")
else:
print(" THE GOVT. POLICY IS NOT BEING FOLLOWED")
print("STEP 5: CHECKING WHETHER HIGHLY EDUCATED PEOPLE HAVE BETTER PAY IN GENERAL")
high = census[census[:,1] > 10]
low = census[census[:,1] <= 10]
sum_high = high.sum(axis=0)[7]
len_high = len(high)
avg_pay_high = sum_high / len_high
sum_low = low.sum(axis=0)[7]
len_low = len(low)
avg_pay_low = sum_low / len_low
print(" The average pay of highly educated citizens is {} \n The pay of less educated people is {}".format(avg_pay_high, avg_pay_low ))
if avg_pay_high > avg_pay_low:
print(" HIGHER EDUCATED HAVE BETTER PAY IN GENERAL")
else:
print(" THE LESS EDUCATED HAVE BETTER PAY TOO")
| 34.75 | 158 | 0.698978 |
8aa101ca2f5205e7605335e41ff73b2c8ae80709 | 4,462 | py | Python | tests/components/homematicip_cloud/test_alarm_control_panel.py | rwbr/home-assistant | 58e3e2105a6e142fffc39f3a68c46e1d3c861910 | [
"Apache-2.0"
] | 1 | 2020-02-15T09:18:14.000Z | 2020-02-15T09:18:14.000Z | tests/components/homematicip_cloud/test_alarm_control_panel.py | rwbr/home-assistant | 58e3e2105a6e142fffc39f3a68c46e1d3c861910 | [
"Apache-2.0"
] | null | null | null | tests/components/homematicip_cloud/test_alarm_control_panel.py | rwbr/home-assistant | 58e3e2105a6e142fffc39f3a68c46e1d3c861910 | [
"Apache-2.0"
] | null | null | null | """Tests for HomematicIP Cloud alarm control panel."""
from homeassistant.components.alarm_control_panel import (
DOMAIN as ALARM_CONTROL_PANEL_DOMAIN,
)
from homeassistant.components.homematicip_cloud import DOMAIN as HMIPC_DOMAIN
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from homeassistant.setup import async_setup_component
from .helper import get_and_check_entity_basics
async def _async_manipulate_security_zones(
hass, home, internal_active=False, external_active=False, alarm_triggered=False
):
"""Set new values on hmip security zones."""
json = home._rawJSONData # pylint: disable=protected-access
json["functionalHomes"]["SECURITY_AND_ALARM"]["alarmActive"] = alarm_triggered
external_zone_id = json["functionalHomes"]["SECURITY_AND_ALARM"]["securityZones"][
"EXTERNAL"
]
internal_zone_id = json["functionalHomes"]["SECURITY_AND_ALARM"]["securityZones"][
"INTERNAL"
]
external_zone = home.search_group_by_id(external_zone_id)
external_zone.active = external_active
internal_zone = home.search_group_by_id(internal_zone_id)
internal_zone.active = internal_active
home.from_json(json)
home._get_functionalHomes(json)
home._load_functionalChannels()
home.fire_update_event(json)
await hass.async_block_till_done()
async def test_manually_configured_platform(hass):
"""Test that we do not set up an access point."""
assert (
await async_setup_component(
hass,
ALARM_CONTROL_PANEL_DOMAIN,
{ALARM_CONTROL_PANEL_DOMAIN: {"platform": HMIPC_DOMAIN}},
)
is True
)
assert not hass.data.get(HMIPC_DOMAIN)
async def test_hmip_alarm_control_panel(hass, default_mock_hap_factory):
"""Test HomematicipAlarmControlPanel."""
entity_id = "alarm_control_panel.hmip_alarm_control_panel"
entity_name = "HmIP Alarm Control Panel"
device_model = None
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_groups=["EXTERNAL", "INTERNAL"]
)
ha_state, hmip_device = get_and_check_entity_basics(
hass, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == "disarmed"
assert not hmip_device
home = mock_hap.home
await hass.services.async_call(
"alarm_control_panel", "alarm_arm_away", {"entity_id": entity_id}, blocking=True
)
assert home.mock_calls[-1][0] == "set_security_zones_activation"
assert home.mock_calls[-1][1] == (True, True)
await _async_manipulate_security_zones(
hass, home, internal_active=True, external_active=True
)
assert hass.states.get(entity_id).state is STATE_ALARM_ARMED_AWAY
await hass.services.async_call(
"alarm_control_panel", "alarm_arm_home", {"entity_id": entity_id}, blocking=True
)
assert home.mock_calls[-1][0] == "set_security_zones_activation"
assert home.mock_calls[-1][1] == (False, True)
await _async_manipulate_security_zones(hass, home, external_active=True)
assert hass.states.get(entity_id).state is STATE_ALARM_ARMED_HOME
await hass.services.async_call(
"alarm_control_panel", "alarm_disarm", {"entity_id": entity_id}, blocking=True
)
assert home.mock_calls[-1][0] == "set_security_zones_activation"
assert home.mock_calls[-1][1] == (False, False)
await _async_manipulate_security_zones(hass, home)
assert hass.states.get(entity_id).state is STATE_ALARM_DISARMED
await hass.services.async_call(
"alarm_control_panel", "alarm_arm_away", {"entity_id": entity_id}, blocking=True
)
assert home.mock_calls[-1][0] == "set_security_zones_activation"
assert home.mock_calls[-1][1] == (True, True)
await _async_manipulate_security_zones(
hass, home, internal_active=True, external_active=True, alarm_triggered=True
)
assert hass.states.get(entity_id).state is STATE_ALARM_TRIGGERED
await hass.services.async_call(
"alarm_control_panel", "alarm_arm_home", {"entity_id": entity_id}, blocking=True
)
assert home.mock_calls[-1][0] == "set_security_zones_activation"
assert home.mock_calls[-1][1] == (False, True)
await _async_manipulate_security_zones(
hass, home, external_active=True, alarm_triggered=True
)
assert hass.states.get(entity_id).state is STATE_ALARM_TRIGGERED
| 38.136752 | 88 | 0.732631 |
e12c0a69d43979013e741fc81714ee98bc556536 | 1,166 | py | Python | configs/celeba.py | yeshwanthv5/PruneFL | ad1f7f33b0605d1d79abfbe42ef287fcc613a943 | [
"MIT"
] | 6 | 2021-07-01T05:35:08.000Z | 2022-03-04T18:53:31.000Z | configs/celeba.py | yeshwanthv5/PruneFL | ad1f7f33b0605d1d79abfbe42ef287fcc613a943 | [
"MIT"
] | null | null | null | configs/celeba.py | yeshwanthv5/PruneFL | ad1f7f33b0605d1d79abfbe42ef287fcc613a943 | [
"MIT"
] | 1 | 2021-06-21T14:24:47.000Z | 2021-06-21T14:24:47.000Z | EXP_NAME = "CelebA"
IMG_DIM = (218, 178)
NUM_FEATURES = 218 * 178
NUM_CLASSES = 2
NUM_TRAIN_DATA = 177457
NUM_TEST_DATA = 22831
NUM_USERS = 9343
NUM_CLIENTS = 10
NUM_LOCAL_UPDATES = 5
CLIENT_BATCH_SIZE = 20
INIT_LR = 0.001
# Conv4
DENSE_TIME = 3.724286518478766
SPARSE_ALL_TIME = 2.66478774077259
SPARSE1_TIME = 1.598415535595268
COEFFICIENTS_SINGLE = [0.0003404870760969184, 6.0777404652307096e-05, 1.7590354819798735e-05, 3.7744218545217262e-06,
0.]
SPARSE_TIME = SPARSE1_TIME - sum(COEFFICIENTS_SINGLE)
COMP_COEFFICIENTS = [c * NUM_LOCAL_UPDATES for c in COEFFICIENTS_SINGLE]
# 1MBps = 4e-6 * 2
COMM_COEFFICIENT = 5.561621025626998e-06
TIME_CONSTANT = SPARSE_TIME * NUM_LOCAL_UPDATES
MAX_ROUND = 3001
# Adaptive pruning config
ADJ_INTERVAL = 50
EVAL_DISP_INTERVAL = 10
IP_MAX_ROUNDS = 1000
IP_ADJ_INTERVAL = ADJ_INTERVAL
IP_DATA_BATCH = 10
IP_THR = 0.1
MAX_INC_DIFF = None
MAX_DEC_DIFF = 0.3
ADJ_THR_FACTOR = 1.5
ADJ_THR_ACC = ADJ_THR_FACTOR / NUM_CLASSES
ADJ_HALF_LIFE = 10000
# Iterative pruning config
NUM_ITERATIVE_PRUNING = 20
# Online algorithm config
MAX_NUM_UPLOAD = 5
| 22.423077 | 118 | 0.740995 |
b2aa814c50b27635c411c4d9e5e064b577c89245 | 4,996 | py | Python | Sample_Run/path_attn_Q/Config.py | yashchandak/GNN | 818d1aa25bd50a65bff3577758306d2e6c591100 | [
"MIT"
] | 1 | 2018-07-13T05:50:48.000Z | 2018-07-13T05:50:48.000Z | Sample_Run/path_attn_Q/Config.py | yashchandak/GNN | 818d1aa25bd50a65bff3577758306d2e6c591100 | [
"MIT"
] | null | null | null | Sample_Run/path_attn_Q/Config.py | yashchandak/GNN | 818d1aa25bd50a65bff3577758306d2e6c591100 | [
"MIT"
] | null | null | null | import tensorflow as tf
import sys, os, shutil
class Config(object):
def __init__(self, args):
self.codebase_root_path = args.path
sys.path.insert(0, self.codebase_root_path)
#### Directory paths ####
# Folder name and project name is the same
self.project_name = args.project
self.dataset_name = args.dataset
self.train_percent = args.percent
self.train_fold = args.folds
self.logs_d = '/Logs/'
self.ckpt_d = '/Checkpoints/'
self.embed_d = '/Embeddings/'
self.result_d = '/Results/'
# Retrain
self.retrain = args.retrain
# Debug with small dataset
self.debug = args.debug
# Batch size
self.batch_size = args.batch_size
# maximum depth for trajecory from NOI
self.max_depth = args.max_depth
# Number of steps to run trainer
self.max_outer_epochs = args.max_outer
self.max_inner_epochs = args.max_inner
self.boot_epochs = args.boot_epochs
self.boot_reset = args.boot_reset
# Validation frequence
self.val_epochs_freq = args.val_freq #1
# Model save frequency
self.save_epochs_after = args.save_after #0
# earlystopping hyperparametrs
self.patience = args.pat # look as this many epochs regardless
self.patience_increase = args.pat_inc # wait this much longer when a new best is found
self.improvement_threshold = args.pat_improve # a relative improvement of this much is considered significant
self.metrics = ['coverage', 'average_precision', 'ranking_loss', 'micro_f1', 'macro_f1', 'micro_precision',
'macro_precision', 'micro_recall', 'macro_recall', 'p@1', 'p@3', 'p@5', 'hamming_loss',
'bae', 'cross-entropy', 'accuracy']
class Solver(object):
def __init__(self, args):
# Initial learning rate
self.learning_rate = args.lr
self.label_update_rate = args.lu
# optimizer
if args.opt == 'adam': self.opt = tf.train.AdamOptimizer
elif args.opt == 'rmsprop': self.opt = tf.train.RMSPropOptimizer
elif args.opt == 'sgd': self.opt= tf.train.GradientDescentOptimizer
else: raise ValueError('Undefined type of optmizer')
self._optimizer = self.opt(self.learning_rate)
self._curr_label_loss = True
self._L2loss = args.l2
self.wce = args.wce
self.gradients = args.gradients
class Data_sets(object):
def __init__(self, args):
self.reduced_dims = args.reduce
self.binary_label_updates = args.bin_upd
self.label_type = args.labels
class RNNArchitecture(object):
def __init__(self, args):
self._hidden_size = args.hidden
self._keep_prob_in = 1 - args.drop_in
self._keep_prob_out = 1 - args.drop_out
self.cell = args.cell
self.concat = args.concat
self.attention = args.attention
self.solver = Solver(args)
self.data_sets = Data_sets(args)
self.mRNN = RNNArchitecture(args)
self.init2()
def init2(self):
self.walks_dir = self.codebase_root_path + 'Datasets/' + self.dataset_name+'/walks/walks_80.txt'
self.label_fold_dir = self.codebase_root_path + 'Datasets/' + self.dataset_name+'/'+self.data_sets.label_type+'/'+ str(self.train_percent) + '/' + str(self.train_fold) + '/'
self.label_dir = self.codebase_root_path + 'Datasets/' + self.dataset_name+'/labels.npy'
self.features_dir = self.codebase_root_path + 'Datasets/' + self.dataset_name+'/features.npy'
self.dist_dir = self.codebase_root_path + 'Datasets/' + self.dataset_name+'/length.pkl'
#Logs and checkpoints to be stored in the code directory
self.project_prefix_path = self.codebase_root_path+ self.project_name+'/'
def check_n_create(self, path):
if not os.path.exists(path):
#if the path doesn't exists, create it
os.mkdir(path)
else:
if not self.retrain:
#path exists but if retrain in False, then replace previous folder with new folder
shutil.rmtree(path)
os.mkdir(path)
def create(self, ext_path =""):
#create directories
ext_path = './'+ext_path
self.logs_dir = ext_path + self.logs_d
self.ckpt_dir = ext_path + self.ckpt_d
#self.embed_dir= ext_path + self.embed_d
self.results_folder = ext_path+self.result_d
self.check_n_create(ext_path)
self.check_n_create(self.logs_dir)
self.check_n_create(self.ckpt_dir)
#self.check_n_create(self.embed_dir)
self.check_n_create(self.results_folder)
| 39.650794 | 181 | 0.611689 |
6de49fc447ccb81c97d36a41c8a030b2778b0b3c | 8,720 | py | Python | userbot/modules/system_stats.py | teye01/OUBnew | 5654dae797496afa0b3a70bef39c4f69e8c8a071 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2020-06-03T19:19:25.000Z | 2020-06-03T19:19:25.000Z | userbot/modules/system_stats.py | teye01/OUBnew | 5654dae797496afa0b3a70bef39c4f69e8c8a071 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/system_stats.py | teye01/OUBnew | 5654dae797496afa0b3a70bef39c4f69e8c8a071 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2020-05-04T16:18:52.000Z | 2020-05-04T16:18:52.000Z | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module for getting information about the server. """
from asyncio import create_subprocess_exec as asyncrunapp
from asyncio.subprocess import PIPE as asyncPIPE
from platform import python_version, uname
from shutil import which
from os import remove
from telethon import version
from userbot import CMD_HELP, ALIVE_NAME
from userbot.events import register
# ================= CONSTANT =================
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else uname().node
# ============================================
MODULESTR = [
"0"
]
@register(outgoing=True, pattern="^.sysd$")
async def sysdetails(sysd):
#Prevent Channel Bug to run sysd commad
if sysd.is_channel and not sysd.is_group:
await sysd.edit("`sysd Commad isn't permitted on channels`")
return
""" For .sysd command, get system info using neofetch. """
if not sysd.text[0].isalpha() and sysd.text[0] not in ("/", "#", "@", "!"):
try:
fetch = await asyncrunapp(
"neofetch",
"--stdout",
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await fetch.communicate()
result = str(stdout.decode().strip()) \
+ str(stderr.decode().strip())
await sysd.edit("`" + result + "`")
except FileNotFoundError:
await sysd.edit("`Install neofetch first !!`")
@register(outgoing=True, pattern="^.botver$")
async def bot_ver(event):
#Prevent Channel Bug to run botver commad
if event.is_channel and not event.is_group:
await event.edit("`botver Commad isn't permitted on channels`")
return
""" For .botver command, get the bot version. """
if which("git") is not None:
invokever = "git describe --all --long"
ver = await asyncrunapp(
invokever,
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await ver.communicate()
verout = str(stdout.decode().strip()) \
+ str(stderr.decode().strip())
invokerev = "git rev-list --all --count"
rev = await asyncrunapp(
invokerev,
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await rev.communicate()
revout = str(stdout.decode().strip()) \
+ str(stderr.decode().strip())
await event.edit("`Userbot Version: "
f"{verout}"
"` \n"
"`Revision: "
f"{revout}"
"`")
else:
await event.edit(
"Shame that you don't have git, You're running 5.0 - 'Extended' anyway"
)
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@",
"!"):
if which("git") is not None:
ver = await asyncrunapp(
"git",
"describe",
"--all",
"--long",
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await ver.communicate()
verout = str(stdout.decode().strip()) \
+ str(stderr.decode().strip())
rev = await asyncrunapp(
"git",
"rev-list",
"--all",
"--count",
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await rev.communicate()
revout = str(stdout.decode().strip()) \
+ str(stderr.decode().strip())
await event.edit("`Userbot Version: "
f"{verout}"
"` \n"
"`Revision: "
f"{revout}"
"`")
else:
await event.edit(
"Shame that you don't have git, you're running - 'v1.beta.4' anyway!"
)
@register(outgoing=True, pattern="^.pip(?: |$)(.*)")
async def pipcheck(pip):
#Prevent Channel Bug to run pip commad
if pip.is_channel and not pip.is_group:
await pip.edit("`pip Commad isn't permitted on channels`")
return
""" For .pip command, do a pip search. """
if not pip.text[0].isalpha() and pip.text[0] not in ("/", "#", "@", "!"):
pipmodule = pip.pattern_match.group(1)
if pipmodule:
await pip.edit("`Searching . . .`")
pipc = await asyncrunapp(
"pip3",
"search",
pipmodule,
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await pipc.communicate()
pipout = str(stdout.decode().strip()) \
+ str(stderr.decode().strip())
if pipout:
if len(pipout) > 4096:
await pip.edit("`Output too large, sending as file`")
file = open("output.txt", "w+")
file.write(pipout)
file.close()
await pip.client.send_file(
pip.chat_id,
"output.txt",
reply_to=pip.id,
)
remove("output.txt")
return
await pip.edit("**Query: **\n`"
f"pip3 search {pipmodule}"
"`\n**Result: **\n`"
f"{pipout}"
"`")
else:
await pip.edit("**Query: **\n`"
f"pip3 search {pipmodule}"
"`\n**Result: **\n`No Result Returned/False`")
else:
await pip.edit("`Use .help pip to see an example`")
@register(outgoing=True, pattern=r"^\.(?:live|on)\s?(.)?")
async def amireallyalive(alive):
#Prevent Channel Bug to run alive commad
if alive.is_channel and not alive.is_group:
await alive.edit("`alive Commad isn't permitted on channels`")
return
""" For .alive command, check if the bot is running. """
await alive.edit("running on __sql-extended__ \n"
"----------------------------------------\n"
"`Bot Version Info` \n"
f"`Telethon : v{version.__version__} `\n"
f"`Python : v{python_version()} `\n"
"----------------------------------------\n"
f"`User : `{DEFAULTUSER} \n\n"
"`All modules loaded with (0) errors`")
@register(outgoing=True, pattern="^.aliveu")
async def amireallyaliveuser(username):
#Prevent Channel Bug to run aliveu commad
if username.is_channel and not username.is_group:
await username.edit("`aliveu Commad isn't permitted on channels`")
return
""" For .aliveu command, change the username in the .alive command. """
message = username.text
output = '.aliveu [new user without brackets] nor can it be empty'
if not (message == '.aliveu' or message[7:8] != ' '):
newuser = message[8:]
global DEFAULTUSER
DEFAULTUSER = newuser
output = 'Successfully changed user to ' + newuser + '!'
await username.edit("`" f"{output}" "`")
@register(outgoing=True, pattern="^.resetalive$")
async def amireallyalivereset(ureset):
#Prevent Channel Bug to run resetalive commad
if ureset.is_channel and not ureset.is_group:
await ureset.edit("`resetalive Commad isn't permitted on channels`")
return
""" For .resetalive command, reset the username in the .alive command. """
global DEFAULTUSER
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else uname().node
await ureset.edit("`" "Successfully reset user for alive!" "`")
CMD_HELP.update(
{"sysd": ".sysd\
\nUsage: Shows system information using neofetch."})
CMD_HELP.update({"botver": ".botver\
\nUsage: Shows the userbot version."})
CMD_HELP.update(
{"pip": ".pip <module(s)>\
\nUsage: Does a search of pip modules(s)."})
CMD_HELP.update({
"on":
".live | .on\
\nUsage: Type .on or .alive to see wether your bot is working or not.\
\n\n.aliveu <text>\
\nUsage: Changes the 'user' in alive to the text you want.\
\n\n.resetalive\
\nUsage: Resets the user to default."
})
| 36.333333 | 85 | 0.511239 |
48808b7a4c6f8b6cdb635a933a552799c8e9a5de | 1,364 | py | Python | rdmo/conditions/tests/test_views.py | hafu/rdmo | 06ea034f7ffa0fbc1fd360be75d3031b038907af | [
"Apache-2.0"
] | null | null | null | rdmo/conditions/tests/test_views.py | hafu/rdmo | 06ea034f7ffa0fbc1fd360be75d3031b038907af | [
"Apache-2.0"
] | null | null | null | rdmo/conditions/tests/test_views.py | hafu/rdmo | 06ea034f7ffa0fbc1fd360be75d3031b038907af | [
"Apache-2.0"
] | null | null | null | from django.test import TestCase
from test_generator.views import TestListViewMixin
from rdmo.core.testing.mixins import TestExportViewMixin, TestImportViewMixin
from rdmo.accounts.utils import set_group_permissions
class ConditionsViewTestCase(TestCase):
fixtures = (
'users.json',
'groups.json',
'accounts.json',
'conditions.json',
'domain.json',
'options.json',
)
users = (
('editor', 'editor'),
('reviewer', 'reviewer'),
('user', 'user'),
('api', 'api'),
('anonymous', None),
)
status_map = {
'list_view': {
'editor': 200, 'reviewer': 200, 'api': 200, 'user': 403, 'anonymous': 302
},
'export_view': {
'editor': 200, 'reviewer': 200, 'api': 200, 'user': 403, 'anonymous': 302
},
'import_view': {
'editor': 302, 'reviewer': 403, 'api': 302, 'user': 403, 'anonymous': 302
}
}
@classmethod
def setUpTestData(cls):
set_group_permissions()
class ConditionsTests(TestListViewMixin, TestExportViewMixin, TestImportViewMixin, ConditionsViewTestCase):
url_names = {
'list_view': 'conditions',
'export_view': 'conditions_export',
'import_view': 'conditions_import'
}
import_file = 'testing/xml/conditions.xml'
| 25.259259 | 107 | 0.587977 |
66d412e154c0204b99dfb5f00808af6e009a0ea5 | 675 | py | Python | django_for_professionals/project_3_bookstore/manage.py | rednafi/django-unchained | 0f71c8d056699496d4af3ab049f9b2f9d057486b | [
"MIT"
] | 1 | 2020-07-25T18:06:56.000Z | 2020-07-25T18:06:56.000Z | django_for_professionals/project_3_bookstore/manage.py | rednafi/django-unchained | 0f71c8d056699496d4af3ab049f9b2f9d057486b | [
"MIT"
] | null | null | null | django_for_professionals/project_3_bookstore/manage.py | rednafi/django-unchained | 0f71c8d056699496d4af3ab049f9b2f9d057486b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project_3_bookstore.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| 29.347826 | 83 | 0.684444 |
d8c8c282ee6d0de5b6ae3ce90750a5791695f960 | 602 | py | Python | conanfile.py | czoido/libA | f73b203fea64ea8d0773dc21f01d55bd68612eaf | [
"MIT"
] | null | null | null | conanfile.py | czoido/libA | f73b203fea64ea8d0773dc21f01d55bd68612eaf | [
"MIT"
] | null | null | null | conanfile.py | czoido/libA | f73b203fea64ea8d0773dc21f01d55bd68612eaf | [
"MIT"
] | null | null | null | from conans import ConanFile, CMake
class LibA(ConanFile):
name = "libA"
version = "0.0"
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False]}
default_options = {"shared": False}
generators = "cmake"
scm = {"type": "git",
"url": "auto",
"revision": "auto"}
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
cmake.install()
def package(self):
self.copy("LICENSE", dst="licenses")
def package_info(self):
self.cpp_info.libs = ["libA",]
| 21.5 | 53 | 0.548173 |
f8991d1038a5cc4dd67c9578dadd31ce1d94ce17 | 4,010 | py | Python | venv/Lib/site-packages/webargs/fields.py | nwrocketman64/JKWWOODWORKS-codebase | 81b49a6cddf3ded9166925cc7d4b5717a7ee45f0 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/webargs/fields.py | nwrocketman64/JKWWOODWORKS-codebase | 81b49a6cddf3ded9166925cc7d4b5717a7ee45f0 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/webargs/fields.py | nwrocketman64/JKWWOODWORKS-codebase | 81b49a6cddf3ded9166925cc7d4b5717a7ee45f0 | [
"MIT"
] | null | null | null | """Field classes.
Includes all fields from `marshmallow.fields` in addition to a custom
`Nested` field and `DelimitedList`.
All fields can optionally take a special `location` keyword argument, which
tells webargs where to parse the request argument from.
.. code-block:: python
args = {
"active": fields.Bool(location="query"),
"content_type": fields.Str(data_key="Content-Type", location="headers"),
}
"""
import typing
import marshmallow as ma
# Expose all fields from marshmallow.fields.
from marshmallow.fields import * # noqa: F40
__all__ = ["DelimitedList"] + ma.fields.__all__
class Nested(ma.fields.Nested): # type: ignore[no-redef]
"""Same as `marshmallow.fields.Nested`, except can be passed a dictionary as
the first argument, which will be converted to a `marshmallow.Schema`.
.. note::
The schema class here will always be `marshmallow.Schema`, regardless
of whether a custom schema class is set on the parser. Pass an explicit schema
class if necessary.
"""
def __init__(self, nested, *args, **kwargs):
if isinstance(nested, dict):
nested = ma.Schema.from_dict(nested)
super().__init__(nested, *args, **kwargs)
class DelimitedFieldMixin:
"""
This is a mixin class for subclasses of ma.fields.List and ma.fields.Tuple
which split on a pre-specified delimiter. By default, the delimiter will be ","
Because we want the MRO to reach this class before the List or Tuple class,
it must be listed first in the superclasses
For example, a DelimitedList-like type can be defined like so:
>>> class MyDelimitedList(DelimitedFieldMixin, ma.fields.List):
>>> pass
"""
delimiter: str = ","
# delimited fields set is_multiple=False for webargs.core.is_multiple
is_multiple: bool = False
def _serialize(self, value, attr, obj, **kwargs):
# serializing will start with parent-class serialization, so that we correctly
# output lists of non-primitive types, e.g. DelimitedList(DateTime)
return self.delimiter.join(
format(each) for each in super()._serialize(value, attr, obj, **kwargs)
)
def _deserialize(self, value, attr, data, **kwargs):
# attempting to deserialize from a non-string source is an error
if not isinstance(value, (str, bytes)):
raise self.make_error("invalid")
return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)
class DelimitedList(DelimitedFieldMixin, ma.fields.List):
"""A field which is similar to a List, but takes its input as a delimited
string (e.g. "foo,bar,baz").
Like List, it can be given a nested field type which it will use to
de/serialize each element of the list.
:param Field cls_or_instance: A field class or instance.
:param str delimiter: Delimiter between values.
"""
default_error_messages = {"invalid": "Not a valid delimited list."}
def __init__(
self,
cls_or_instance: typing.Union[ma.fields.Field, type],
*,
delimiter: typing.Optional[str] = None,
**kwargs
):
self.delimiter = delimiter or self.delimiter
super().__init__(cls_or_instance, **kwargs)
class DelimitedTuple(DelimitedFieldMixin, ma.fields.Tuple):
"""A field which is similar to a Tuple, but takes its input as a delimited
string (e.g. "foo,bar,baz").
Like Tuple, it can be given a tuple of nested field types which it will use to
de/serialize each element of the tuple.
:param Iterable[Field] tuple_fields: An iterable of field classes or instances.
:param str delimiter: Delimiter between values.
"""
default_error_messages = {"invalid": "Not a valid delimited tuple."}
def __init__(
self, tuple_fields, *, delimiter: typing.Optional[str] = None, **kwargs
):
self.delimiter = delimiter or self.delimiter
super().__init__(tuple_fields, **kwargs)
| 34.273504 | 86 | 0.681047 |
0c73247b97a402d8834d79cbfed139ffb5109296 | 5,350 | py | Python | dayu_widgets3/toast.py | muyr/dayu_widgets3 | a319cc719d84c031829893c45b8f20e87cbbabc8 | [
"MIT"
] | 5 | 2020-10-16T03:46:47.000Z | 2022-03-21T07:10:37.000Z | dayu_widgets3/toast.py | muyr/dayu_widgets3 | a319cc719d84c031829893c45b8f20e87cbbabc8 | [
"MIT"
] | null | null | null | dayu_widgets3/toast.py | muyr/dayu_widgets3 | a319cc719d84c031829893c45b8f20e87cbbabc8 | [
"MIT"
] | 1 | 2022-02-16T14:18:43.000Z | 2022-02-16T14:18:43.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.2
# Email : muyanru345@163.com
###################################################################
"""
MToast
"""
from dayu_widgets3 import dayu_theme
from dayu_widgets3.avatar import MAvatar
from dayu_widgets3.label import MLabel
from dayu_widgets3.loading import MLoading
from dayu_widgets3.qt import QWidget, Signal, Qt, QSize, MPixmap, QHBoxLayout, QVBoxLayout, \
QTimer, QPropertyAnimation, QEasingCurve, QAbstractAnimation, QPoint
class MToast(QWidget):
"""
MToast
A Phone style message.
"""
InfoType = 'info'
SuccessType = 'success'
WarningType = 'warning'
ErrorType = 'error'
LoadingType = 'loading'
default_config = {
'duration': 2,
}
sig_closed = Signal()
def __init__(self, text, duration=None, dayu_type=None, parent=None):
super(MToast, self).__init__(parent)
self.setWindowFlags(
Qt.FramelessWindowHint | Qt.Dialog | Qt.WA_TranslucentBackground | Qt.WA_DeleteOnClose)
self.setAttribute(Qt.WA_StyledBackground)
_icon_lay = QHBoxLayout()
_icon_lay.addStretch()
if dayu_type == MToast.LoadingType:
_icon_lay.addWidget(MLoading(size=dayu_theme.huge, color=dayu_theme.text_color_inverse))
else:
_icon_label = MAvatar()
_icon_label.set_dayu_size(60)
_icon_label.set_dayu_image(MPixmap('{}_line.svg'.format(dayu_type or MToast.InfoType),
dayu_theme.text_color_inverse))
_icon_lay.addWidget(_icon_label)
_icon_lay.addStretch()
_content_label = MLabel()
_content_label.setText(text)
_content_label.setAlignment(Qt.AlignCenter)
_main_lay = QVBoxLayout()
_main_lay.setContentsMargins(0, 0, 0, 0)
_main_lay.addStretch()
_main_lay.addLayout(_icon_lay)
_main_lay.addSpacing(10)
_main_lay.addWidget(_content_label)
_main_lay.addStretch()
self.setLayout(_main_lay)
self.setFixedSize(QSize(120, 120))
_close_timer = QTimer(self)
_close_timer.setSingleShot(True)
_close_timer.timeout.connect(self.close)
_close_timer.timeout.connect(self.sig_closed)
_close_timer.setInterval((duration or self.default_config.get('duration')) * 1000)
_ani_timer = QTimer(self)
_ani_timer.timeout.connect(self._fade_out)
_ani_timer.setInterval((duration or self.default_config.get('duration')) * 1000 - 300)
_close_timer.start()
_ani_timer.start()
self._opacity_ani = QPropertyAnimation()
self._opacity_ani.setTargetObject(self)
self._opacity_ani.setDuration(300)
self._opacity_ani.setEasingCurve(QEasingCurve.OutCubic)
self._opacity_ani.setPropertyName('windowOpacity')
self._opacity_ani.setStartValue(0.0)
self._opacity_ani.setEndValue(0.9)
self._get_center_position(parent)
self._fade_int()
def _fade_out(self):
self._opacity_ani.setDirection(QAbstractAnimation.Backward)
self._opacity_ani.start()
def _fade_int(self):
self._opacity_ani.start()
def _get_center_position(self, parent):
parent_geo = parent.geometry()
pos = parent_geo.topLeft() \
if parent.parent() is None else parent.mapToGlobal(parent_geo.topLeft())
offset = 0
for child in parent.children():
if isinstance(child, MToast) and child.isVisible():
offset = max(offset, child.y())
target_x = pos.x() + parent_geo.width() / 2 - self.width() / 2
target_y = pos.y() + parent_geo.height() / 2 - self.height() / 2
self.setProperty('pos', QPoint(target_x, target_y))
@classmethod
def info(cls, text, parent, duration=None):
"""Show a normal toast message"""
inst = cls(text, duration=duration, dayu_type=MToast.InfoType, parent=parent)
inst.show()
return inst
@classmethod
def success(cls, text, parent, duration=None):
"""Show a success toast message"""
inst = cls(text, duration=duration, dayu_type=MToast.SuccessType, parent=parent)
inst.show()
return inst
@classmethod
def warning(cls, text, parent, duration=None):
"""Show a warning toast message"""
inst = cls(text, duration=duration, dayu_type=MToast.WarningType, parent=parent)
inst.show()
return inst
@classmethod
def error(cls, text, parent, duration=None):
"""Show an error toast message"""
inst = cls(text, duration=duration, dayu_type=MToast.ErrorType, parent=parent)
inst.show()
return inst
@classmethod
def loading(cls, text, parent):
"""Show a toast message with loading animation"""
inst = cls(text, dayu_type=MToast.LoadingType, parent=parent)
inst.show()
return inst
@classmethod
def config(cls, duration):
"""
Config the global MToast duration setting.
:param duration: int (unit is second)
:return: None
"""
if duration is not None:
cls.default_config['duration'] = duration
| 34.294872 | 100 | 0.630654 |
1a3830f06fd11d43b5c683b0f9a95332a16a4801 | 12,156 | py | Python | vaxapp/migrations/0016_auto__chg_field_alert_text.py | ewheeler/vaxtrack | 57191d5ae5626c944d765071f9a1d1fddfaad068 | [
"BSD-3-Clause"
] | 1 | 2015-03-28T17:00:54.000Z | 2015-03-28T17:00:54.000Z | vaxapp/migrations/0016_auto__chg_field_alert_text.py | ewheeler/vaxtrack | 57191d5ae5626c944d765071f9a1d1fddfaad068 | [
"BSD-3-Clause"
] | null | null | null | vaxapp/migrations/0016_auto__chg_field_alert_text.py | ewheeler/vaxtrack | 57191d5ae5626c944d765071f9a1d1fddfaad068 | [
"BSD-3-Clause"
] | null | null | null | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Alert.text'
db.alter_column('vaxapp_alert', 'text', self.gf('django.db.models.fields.CharField')(max_length=2, null=True))
def backwards(self, orm):
# Changing field 'Alert.text'
db.alter_column('vaxapp_alert', 'text', self.gf('django.db.models.fields.CharField')(max_length=160, null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'vaxapp.alert': {
'Meta': {'object_name': 'Alert'},
'analyzed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'countrystock': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vaxapp.CountryStock']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reference_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'risk': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '2', 'null': 'True', 'blank': 'True'})
},
'vaxapp.country': {
'Meta': {'object_name': 'Country'},
'iso2_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso3_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '160', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '160', 'null': 'True', 'blank': 'True'}),
'numerical_code': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'vaxapp.countrystock': {
'Meta': {'object_name': 'CountryStock'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vaxapp.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'vaccine': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vaxapp.Vaccine']"})
},
'vaxapp.countrystockstats': {
'Meta': {'object_name': 'CountryStockStats'},
'actual_cons_rate': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'actual_cons_rate'", 'null': 'True', 'to': "orm['vaxapp.Dicty']"}),
'analyzed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'annual_demand': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'annual_demand'", 'null': 'True', 'to': "orm['vaxapp.Dicty']"}),
'consumed_in_year': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumed_in_year'", 'null': 'True', 'to': "orm['vaxapp.Dicty']"}),
'countrystock': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vaxapp.CountryStock']"}),
'days_of_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'demand_for_period': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'doses_delivered_this_year': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'doses_on_orders': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'est_daily_cons': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nine_by_year': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'nine_by_year'", 'null': 'True', 'to': "orm['vaxapp.Dicty']"}),
'percent_coverage': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'reference_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'three_by_year': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'three_by_year'", 'null': 'True', 'to': "orm['vaxapp.Dicty']"})
},
'vaxapp.dicty': {
'Meta': {'object_name': 'Dicty'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '160'})
},
'vaxapp.document': {
'Meta': {'object_name': 'Document'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'date_exception': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_process_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_process_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_queued': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_stored': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_uploaded': ('django.db.models.fields.DateTimeField', [], {}),
'exception': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'local_document': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'remote_document': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '1'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'vaxapp.keyval': {
'Meta': {'object_name': 'KeyVal'},
'dicty': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vaxapp.Dicty']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'val': ('django.db.models.fields.CharField', [], {'max_length': '160', 'null': 'True', 'blank': 'True'})
},
'vaxapp.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vaxapp.Country']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'vaxapp.vaccine': {
'Meta': {'object_name': 'Vaccine'},
'abbr_en': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'abbr_en_alt': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'abbr_fr': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'abbr_fr_alt': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vaxapp.VaccineGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '160', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '160', 'null': 'True', 'blank': 'True'})
},
'vaxapp.vaccinegroup': {
'Meta': {'object_name': 'VaccineGroup'},
'abbr_en': ('django.db.models.fields.CharField', [], {'max_length': '160', 'null': 'True', 'blank': 'True'}),
'abbr_fr': ('django.db.models.fields.CharField', [], {'max_length': '160', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['vaxapp']
| 77.426752 | 185 | 0.556022 |
2a32f4f4e03bef6cb405cb53ddfcd9a188cafd63 | 2,614 | py | Python | syntropy_sdk/models/settings_types.py | SyntropyNet/syntropy-python-sdk | 27b7756b136f83886fd2a6e342fa4d4073779ff7 | [
"MIT"
] | 1 | 2020-12-17T17:30:12.000Z | 2020-12-17T17:30:12.000Z | syntropy_sdk/models/settings_types.py | SyntropyNet/syntropy-python-sdk | 27b7756b136f83886fd2a6e342fa4d4073779ff7 | [
"MIT"
] | null | null | null | syntropy_sdk/models/settings_types.py | SyntropyNet/syntropy-python-sdk | 27b7756b136f83886fd2a6e342fa4d4073779ff7 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
syntropy-controller
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SettingsTypes(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
INTEGER = "INTEGER"
SERIALIZED_STRINGS = "SERIALIZED_STRINGS"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {}
attribute_map = {}
def __init__(self): # noqa: E501
"""SettingsTypes - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(SettingsTypes, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SettingsTypes):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.413043 | 119 | 0.546289 |
4a97ef5eb6cc52ab14a9b684c78284219e1b1898 | 2,370 | py | Python | airflow/api_connexion/schemas/pool_schema.py | ChaseKnowlden/airflow | 6b71eac1997a7c0db3b8e3aed6b4e65d01871440 | [
"Apache-2.0"
] | 15,947 | 2019-01-05T13:51:02.000Z | 2022-03-31T23:33:16.000Z | airflow/api_connexion/schemas/pool_schema.py | ChaseKnowlden/airflow | 6b71eac1997a7c0db3b8e3aed6b4e65d01871440 | [
"Apache-2.0"
] | 14,603 | 2019-01-05T09:43:19.000Z | 2022-03-31T23:11:59.000Z | airflow/api_connexion/schemas/pool_schema.py | ChaseKnowlden/airflow | 6b71eac1997a7c0db3b8e3aed6b4e65d01871440 | [
"Apache-2.0"
] | 8,429 | 2019-01-05T19:45:47.000Z | 2022-03-31T22:13:01.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List, NamedTuple
from marshmallow import Schema, fields
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow.models.pool import Pool
class PoolSchema(SQLAlchemySchema):
"""Pool schema"""
class Meta:
"""Meta"""
model = Pool
name = auto_field("pool")
slots = auto_field()
occupied_slots = fields.Method("get_occupied_slots", dump_only=True)
running_slots = fields.Method("get_running_slots", dump_only=True)
queued_slots = fields.Method("get_queued_slots", dump_only=True)
open_slots = fields.Method("get_open_slots", dump_only=True)
@staticmethod
def get_occupied_slots(obj: Pool) -> int:
"""Returns the occupied slots of the pool."""
return obj.occupied_slots()
@staticmethod
def get_running_slots(obj: Pool) -> int:
"""Returns the running slots of the pool."""
return obj.running_slots()
@staticmethod
def get_queued_slots(obj: Pool) -> int:
"""Returns the queued slots of the pool."""
return obj.queued_slots()
@staticmethod
def get_open_slots(obj: Pool) -> float:
"""Returns the open slots of the pool."""
return obj.open_slots()
class PoolCollection(NamedTuple):
"""List of Pools with metadata"""
pools: List[Pool]
total_entries: int
class PoolCollectionSchema(Schema):
"""Pool Collection schema"""
pools = fields.List(fields.Nested(PoolSchema))
total_entries = fields.Int()
pool_collection_schema = PoolCollectionSchema()
pool_schema = PoolSchema()
| 30.384615 | 72 | 0.715612 |
a21d9c1865e64f938c1d68099e9b9f8367cbd778 | 1,194 | py | Python | drf_elasticsearch_dsl/management/commands/update_index.py | ajbeach2/drf-elasticsearch-dsl | 2b55a295ece0438d6dd0ac92e291d0a69deb66ca | [
"MIT"
] | 3 | 2017-12-27T01:10:53.000Z | 2019-04-12T02:11:41.000Z | drf_elasticsearch_dsl/management/commands/update_index.py | ajbeach2/drf-elasticsearch-dsl | 2b55a295ece0438d6dd0ac92e291d0a69deb66ca | [
"MIT"
] | 5 | 2020-06-05T17:49:44.000Z | 2021-06-10T20:35:59.000Z | drf_elasticsearch_dsl/management/commands/update_index.py | ajbeach2/drf-elasticsearch-dsl | 2b55a295ece0438d6dd0ac92e291d0a69deb66ca | [
"MIT"
] | null | null | null | import multiprocessing
from django import db
from drf_elasticsearch_dsl.connection_handler import connection_handler
from django.core.management.base import BaseCommand
DEFAULT_BATCH_SIZE = 500
DEFAULT_WORKERS = 4
def worker(args):
document, model, start, end = args
db.connections.close_all()
row = model.objects.all()[start:end]
document.bulk_index(row)
db.reset_queries()
class Command(BaseCommand):
help = "Freshens the index for the given app(s)."
def handle(self, *args, **options):
self.workers = 4 # options.get('workers', DEFAULT_WORKERS)
self.batch_size = options.get('batch_size', DEFAULT_BATCH_SIZE)
pool = multiprocessing.Pool(self.workers)
queue = []
for lablel, document in connection_handler.documents.items():
model = document.get_model()
qs = model.objects.all()
total = qs.count()
print('Indexing {0} records of {1}'.format(total, lablel))
for start in range(0, total, self.batch_size):
end = min(start + self.batch_size, total)
queue.append((document, model, start, end))
pool.map(worker, queue)
| 31.421053 | 71 | 0.657454 |
ce0f933d57a36e8b6d980f5b604d255517547bc4 | 333 | py | Python | aws-pentesting-with-python/kms/correct/kms_client.py | qodirovshohijahon/python-scripting | 08f8492e137df37e7f9b41e9ee546f5783501c1a | [
"CNRI-Python"
] | null | null | null | aws-pentesting-with-python/kms/correct/kms_client.py | qodirovshohijahon/python-scripting | 08f8492e137df37e7f9b41e9ee546f5783501c1a | [
"CNRI-Python"
] | null | null | null | aws-pentesting-with-python/kms/correct/kms_client.py | qodirovshohijahon/python-scripting | 08f8492e137df37e7f9b41e9ee546f5783501c1a | [
"CNRI-Python"
] | null | null | null | import os
import boto3
def kmsClient():
kmsClient = boto3.client(
"kms",
aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"],
aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"],
region_name=os.environ["AWS_REGION"],
)
return kmsClient
# print(os.environ["AWS_SECRET_ACCESS_KEY"]) | 20.8125 | 66 | 0.684685 |
8c8b411a4809be8e8b899e318bdfd05a3188a4b6 | 1,651 | py | Python | components/job-orchestration/job_orchestration/scheduler/scheduler_data.py | kirkrodrigues/clp | bb81eec43da218a5fa3f3a367e0a24c144bdf0c8 | [
"Apache-2.0"
] | 28 | 2021-07-18T02:21:14.000Z | 2021-09-30T22:46:24.000Z | components/job-orchestration/job_orchestration/scheduler/scheduler_data.py | kirkrodrigues/clp | bb81eec43da218a5fa3f3a367e0a24c144bdf0c8 | [
"Apache-2.0"
] | 15 | 2021-10-12T03:55:07.000Z | 2022-03-24T09:04:35.000Z | components/job-orchestration/job_orchestration/scheduler/scheduler_data.py | kirkrodrigues/clp | bb81eec43da218a5fa3f3a367e0a24c144bdf0c8 | [
"Apache-2.0"
] | 11 | 2021-10-06T11:35:47.000Z | 2022-03-20T11:40:49.000Z | import datetime
import json
import typing
from typing import Dict
import msgpack
import zstandard
from celery.result import AsyncResult
from pydantic import BaseModel, validator
class TaskUpdate(BaseModel):
job_id: int
task_id: int
status: str
@validator('status')
def valid_status(cls, field):
supported_status = ['COMPRESSING', 'COMPLETED', 'FAILED']
if field not in supported_status:
raise ValueError(f'must be one of the following {"|".join(supported_status)}')
return field
class TaskCompletionUpdate(TaskUpdate):
total_uncompressed_size: int
total_compressed_size: int
class TaskFailureUpdate(TaskUpdate):
error_message: str
class Task(BaseModel):
task_id: int
task_status: str
priority: int = 1
clp_paths_to_compress: bytes
task_start_time: datetime.datetime = None
instance: AsyncResult = None
class Config:
arbitrary_types_allowed = True
def get_clp_paths_to_compress_json(self, dctx: zstandard.ZstdDecompressor = None):
if dctx is None:
dctx = zstandard.ZstdDecompressor()
return json.dumps(msgpack.unpackb(dctx.decompress(self.clp_paths_to_compress)))
class Job(BaseModel):
job_id: int
job_status: str
job_start_time: datetime.datetime
clp_config: bytes
num_tasks: typing.Optional[int]
num_tasks_completed: int
tasks: Dict[int, Task] = {}
def get_clp_config_json(self, dctx: zstandard.ZstdDecompressor = None):
if not dctx:
dctx = zstandard.ZstdDecompressor()
return json.dumps(msgpack.unpackb(dctx.decompress(self.clp_config)))
| 25.796875 | 90 | 0.710478 |
40fd5d1eef21c92e7871de5294755f81c75b76a6 | 679 | py | Python | tutorials/W3D3_NetworkCausality/solutions/W3D3_Tutorial4_Solution_d89caba5.py | himahuja/course-content | f3e17aedf722c818708b83b213a267682a238194 | [
"CC-BY-4.0"
] | null | null | null | tutorials/W3D3_NetworkCausality/solutions/W3D3_Tutorial4_Solution_d89caba5.py | himahuja/course-content | f3e17aedf722c818708b83b213a267682a238194 | [
"CC-BY-4.0"
] | null | null | null | tutorials/W3D3_NetworkCausality/solutions/W3D3_Tutorial4_Solution_d89caba5.py | himahuja/course-content | f3e17aedf722c818708b83b213a267682a238194 | [
"CC-BY-4.0"
] | null | null | null | def fit_second_stage(T_hat,Y):
"""
Estimates a scalar causal effect from 2-stage least squares regression using
an instrument.
Args:
T_hat (np.ndarray): the output of the first stage regression
Y (np.ndarray): our observed response (n, 1)
Returns:
beta (float): the estimated causal effect
"""
# Initialize linear regression model
stage2 = LinearRegression(fit_intercept=True)
# Fit model to data
stage2.fit(T_hat, Y)
return stage2.coef_
# Uncomment below to test your function
T_hat = fit_first_stage(T,Z)
beta = fit_second_stage(T_hat,Y)
print("Estimated causal effect is: {:.3f}".format(beta[0,0])) | 27.16 | 80 | 0.681885 |
17b5abe5c85726e6286e4d5591bf7967b0e78944 | 3,479 | py | Python | db/migrations/0001_initial.py | housepig7/ops | ed1dc6f6e160e2a4a414c1eeeee78ded02597013 | [
"Apache-2.0"
] | null | null | null | db/migrations/0001_initial.py | housepig7/ops | ed1dc6f6e160e2a4a414c1eeeee78ded02597013 | [
"Apache-2.0"
] | null | null | null | db/migrations/0001_initial.py | housepig7/ops | ed1dc6f6e160e2a4a414c1eeeee78ded02597013 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.1.7 on 2019-03-11 01:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
('asset', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='db_mysql',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hostname', models.CharField(max_length=64, unique=True, verbose_name='数据库名字')),
('ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='IP')),
('port', models.IntegerField(blank=True, default='3306', null=True, verbose_name='端口')),
('model', models.CharField(blank=True, max_length=128, null=True, verbose_name='数据库型号')),
('is_active', models.BooleanField(default=True, verbose_name='是否启用')),
('ps', models.CharField(blank=True, max_length=1024, null=True, verbose_name='备注')),
('ctime', models.DateTimeField(auto_now_add=True, null=True, verbose_name='创建时间')),
('utime', models.DateTimeField(auto_now=True, null=True, verbose_name='更新时间')),
('data_center', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='asset.data_centers', verbose_name='数据中心')),
],
options={
'verbose_name': '数据库管理',
'verbose_name_plural': '数据库管理',
'db_table': 'db_mysql',
'permissions': {('task_db_mysql', '执行数据库资产'), ('read_db_mysql', '只读数据库资产')},
},
),
migrations.CreateModel(
name='db_user',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True, verbose_name='名称')),
('username', models.CharField(blank=True, default='root', max_length=64, null=True, verbose_name='登陆用户')),
('password', models.CharField(blank=True, max_length=255, null=True, verbose_name='登陆密码')),
('ps', models.CharField(blank=True, max_length=1024, null=True, verbose_name='备注')),
('ctime', models.DateTimeField(auto_now_add=True, null=True, verbose_name='创建时间')),
('utime', models.DateTimeField(auto_now=True, null=True, verbose_name='更新时间')),
('product_line', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='auth.Group', verbose_name='产品线')),
],
options={
'verbose_name': '数据库登陆用户',
'verbose_name_plural': '数据库登陆用户',
'db_table': 'db_user',
'permissions': {('read_db_user', '只读系统登陆用户')},
},
),
migrations.AddField(
model_name='db_mysql',
name='db_user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='db.db_user', verbose_name='数据库登陆用户'),
),
migrations.AddField(
model_name='db_mysql',
name='product_line',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='auth.Group', verbose_name='产品线'),
),
]
| 51.161765 | 166 | 0.592986 |
e3cf06aea46c71154f220b43eb60c06a52a0c6e6 | 5,632 | py | Python | tests/test_file.py | sebastianmika/dollar-ref | 9b067c8a32ac86a5c90f6c2377725cd52b039a29 | [
"MIT"
] | 11 | 2017-11-20T05:37:07.000Z | 2020-07-24T04:19:31.000Z | tests/test_file.py | sebastianmika/dollar-ref | 9b067c8a32ac86a5c90f6c2377725cd52b039a29 | [
"MIT"
] | 8 | 2017-11-18T15:13:24.000Z | 2021-06-01T13:24:29.000Z | tests/test_file.py | sebastianmika/dollar-ref | 9b067c8a32ac86a5c90f6c2377725cd52b039a29 | [
"MIT"
] | 8 | 2017-11-18T14:57:36.000Z | 2020-03-30T09:11:22.000Z | import json
import yaml
from dollar_ref import resolve
def test_basic(tmpdir):
root_dir = tmpdir.mkdir('root_dir')
root_doc = root_dir.join('root.json')
file_data = {
'some': 'in_file_data'
}
root_doc.write(json.dumps(file_data))
data = {
'inline': 'data',
'file_ref': {
'$ref': f"{str(root_doc)}#/some"
}
}
resolved = resolve(data)
assert resolved == {
'inline': 'data',
'file_ref': 'in_file_data'
}
def test_external_only(tmpdir):
root_dir = tmpdir.mkdir('root_dir')
root_doc = root_dir.join('root.json')
file_data = {
'some': 'in_file_data'
}
root_doc.write(json.dumps(file_data))
data = {
'inline': 'data',
'internal_ref': {
'$ref': '#/inline'
},
'file_ref': {
'$ref': f"{str(root_doc)}#/some"
}
}
resolved = resolve(data, external_only=True)
assert resolved == {
'inline': 'data',
'file_ref': 'in_file_data',
'internal_ref': {
'$ref': '#/inline'
}
}
def test_yaml(tmpdir):
root_dir = tmpdir.mkdir('root_dir')
root_doc = root_dir.join('root.yaml')
root_doc.write('''---
hello: yaml
thing:
'$ref': '#/hello'
''')
data = {
'inline': 'data',
'file_ref': {
'$ref': f"{str(root_doc)}#/thing"
}
}
resolved = resolve(data)
assert resolved == {
'inline': 'data',
'file_ref': 'yaml'
}
def test_not_dict(tmpdir):
root_dir = tmpdir.mkdir('root_dir')
root_doc = root_dir.join('root.json')
file_data = 'some stuff here'
root_doc.write(json.dumps(file_data))
data = {
'inline': 'data',
'file_ref': {
'$ref': f"{str(root_doc)}"
}
}
resolved = resolve(data)
assert resolved == {
'inline': 'data',
'file_ref': 'some stuff here'
}
def test_cwd(tmpdir):
root_dir = tmpdir.mkdir('root_dir')
root_doc = root_dir.join('root.json')
file_data = {
'some': 'stuff'
}
root_doc.write(json.dumps(file_data))
data = {
'inline': 'data',
'file_ref': {
'$ref': 'root.json#/some'
}
}
resolved = resolve(data, cwd=str(root_dir))
assert resolved == {
'inline': 'data',
'file_ref': 'stuff'
}
def test_nested(tmpdir):
root_dir = tmpdir.mkdir('root_dir')
root_doc = root_dir.join('root.json')
root_data = {
'some': 'stuff',
'some_ref': {
'$ref': 'child_dir/child.json#/some_key/child_key'
}
}
root_doc.write(json.dumps(root_data))
child_doc = root_dir.mkdir('child_dir').join('child.json')
child_data = {
'some': 'useless thing',
'now': 'useful data',
'some_key': {
'another': 'useless thing',
'child_key': {
'$ref': '#/now'
}
}
}
child_doc.write(json.dumps(child_data))
data = {
'file_ref': {
'$ref': f'{str(root_doc)}#/some_ref'
}
}
resolved = resolve(data)
assert resolved == {
'file_ref': 'useful data'
}
def test_nested_yaml(tmpdir):
root_dir = tmpdir.mkdir('root_dir')
root_doc = root_dir.join('root.yaml')
root_data = {
'some': 'stuff',
'some_ref': {
'$ref': 'child_dir/child.yaml#/some_key/child_key'
}
}
root_doc.write(yaml.dump(root_data, default_flow_style=False))
child_doc = root_dir.mkdir('child_dir').join('child.yaml')
child_data = {
'some': 'useless thing',
'now': 'useful data',
'some_key': {
'another': 'useless thing',
'child_key': {
'$ref': '#/now'
}
}
}
child_doc.write(yaml.dump(child_data, default_flow_style=False))
data = {
'file_ref': {
'$ref': f'{str(root_doc)}#/some_ref'
}
}
resolved = resolve(data)
assert resolved == {
'file_ref': 'useful data'
}
def test_complicated(tmpdir):
root_dir = tmpdir.mkdir('root_dir')
root_doc = root_dir.join('root_doc.json')
root_data = {
'this': ['is', 'useless'],
'some_key': {
'actual': 'stuff',
'things': [1, {'$ref': 'child1_dir/child1_doc.json#/for_root'}, 3]
}
}
root_doc.write(json.dumps(root_data))
child1_doc = root_dir.mkdir('child1_dir').join('child1_doc.json')
child1_data = {
'again': 'useless',
'for_root': {
'childish': 'things',
'weird': {
'$ref': '../child2_dir/child2_doc.json#/for_child1'
}
}
}
child1_doc.write(json.dumps(child1_data))
child2_doc = root_dir.mkdir('child2_dir').join('child2_doc.json')
child2_data = {
'still': 'useless',
'for_child1': {
'grand': 'child?',
}
}
child2_doc.write(json.dumps(child2_data))
data = {
'some': 'data',
'file': {
'$ref': f'{str(root_doc)}#/some_key'
}
}
resolved = resolve(data)
assert resolved == {
'some': 'data',
'file': {
'actual': 'stuff',
'things': [
1,
{
'childish': 'things',
'weird': {
'grand': 'child?'
}
},
3
]
}
}
| 20.782288 | 78 | 0.489347 |
a07241156757f6190126196d2c44acdd9a1500f1 | 687 | py | Python | mattermost_giphy/settings.py | hoplaventure/mattermost-integration-giphy | e495f942a77609c6381f743847fc8b7638c2733f | [
"Apache-2.0"
] | 1 | 2018-11-19T21:13:58.000Z | 2018-11-19T21:13:58.000Z | mattermost_giphy/settings.py | hoplaventure/mattermost-integration-giphy | e495f942a77609c6381f743847fc8b7638c2733f | [
"Apache-2.0"
] | null | null | null | mattermost_giphy/settings.py | hoplaventure/mattermost-integration-giphy | e495f942a77609c6381f743847fc8b7638c2733f | [
"Apache-2.0"
] | 3 | 2018-12-01T15:00:25.000Z | 2021-12-22T07:47:30.000Z | # -*- coding: utf-8 -*-
import os
# username the bot posts as
USERNAME = os.environ.get('USERNAME', 'giphy')
# display picture the bot posts with
ICON_URL = os.environ.get('ICON_URL', 'https://avatars0.githubusercontent.com/u/3588525?v=3&s=200')
# the maximum parental rating of gifs posted
RATING = os.environ.get('RATING', 'pg')
# scheme to be used for the gif url return to mattermost
SCHEME = os.environ.get('SCHEME', 'https')
# the is a public beta key from giphy api
GIPHY_API_KEY = os.environ.get('GIPHY_API_KEY', 'dc6zaTOxFJmzC')
# the Mattemost token generated when you created your outgoing webhook
MATTERMOST_GIPHY_TOKEN = os.environ.get('MATTERMOST_GIPHY_TOKEN', None)
| 32.714286 | 99 | 0.745269 |
23b6468b6b41e661947ce3f6179d46d612905fbf | 2,389 | py | Python | src/optical_flow.py | avaneesh93/frame-prediction | c7f45fed3651bd8fc7e2db1a88eefb8a8057f117 | [
"MIT"
] | 1 | 2020-07-08T10:20:22.000Z | 2020-07-08T10:20:22.000Z | src/optical_flow.py | avaneesh93/frame-prediction | c7f45fed3651bd8fc7e2db1a88eefb8a8057f117 | [
"MIT"
] | null | null | null | src/optical_flow.py | avaneesh93/frame-prediction | c7f45fed3651bd8fc7e2db1a88eefb8a8057f117 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 2 13:54:01 2018
@author: roshanprakash
"""
import numpy as np
import cv2 as cv
def compute_optical_flow(x):
""" Computes a multi-layered dense optical flow output from a sequence of images first. \
Next, the average optical flow from this dense flow output will be computed. \
Finally, transforms the average flow output to a dimension identical to that of each input image.
param x : a numpy array of a sequence of images, shape --> (N, H, W, 3)
returns : a numpy array of shape (1, H, W) corresponding to the average optical flow
"""
try:
N, H, W, C = x.shape
except:
raise ValueError('Check the inputs! Something went wrong there.')
# initializations
hsv = np.zeros_like(x[0])
dense_optical_flow = []
# first compute a dense multi-layered optical flow from the sequence of image frames
for idx in range(1, x.shape[0]): # start from the second image to compute pair-wise optical flow between successive images
# note here that we are already using grayscale images of shape (N, H, 3) \
# so we will only need pixels from one channel as all the channels have the same corresponding pixel values !
prev_img = x[idx - 1, :][:, :, 0]
next_img = x[idx, :][:, :, 0]
flow = cv.calcOpticalFlowFarneback(prev_img, next_img, None, 0.5, 3, 15, 3, 5, 1.2, 0) # has shape (H, W, 2)
dense_optical_flow.append(flow)
# next, compute the average flow
average_flow = np.mean(np.array(dense_optical_flow), axis = 0, keepdims = False)
# next, map from cartesian coordinates to polar coordinates
magnitude, angle = cv.cartToPolar(average_flow[..., 0], average_flow[..., 1])
# next, map from polar coordinates to HSV coordinates
hsv[..., 0] = angle * 180 / (np.pi / 2) # angle ---> hue component
hsv[..., 1] = 255 # all pixels are given full saturation
hsv[..., 2] = cv.normalize(magnitude, None, 0, 255, cv.NORM_MINMAX) # magnitude ----> value component
# next, map from HSV to RGB space
rgb = cv.cvtColor(hsv, cv.COLOR_HSV2RGB)
# finally, mapping from RGB to grayscale
optical_flow = cv.cvtColor(rgb, cv.COLOR_RGB2GRAY).reshape((H, W, 1))
return optical_flow/255.0 + 1
| 39.816667 | 126 | 0.63918 |
fc0bf1d74989bb9ffe8331aede12ab7fd98b6006 | 38,697 | py | Python | tests/nlu/test_evaluation.py | fintzd/rasa | 6359be5509c7d87cd29c2ab5149bc45e843fea85 | [
"Apache-2.0"
] | 1 | 2022-03-03T16:11:02.000Z | 2022-03-03T16:11:02.000Z | tests/nlu/test_evaluation.py | fintzd/rasa | 6359be5509c7d87cd29c2ab5149bc45e843fea85 | [
"Apache-2.0"
] | 250 | 2020-08-14T13:41:26.000Z | 2022-03-28T12:10:13.000Z | tests/nlu/test_evaluation.py | fintzd/rasa | 6359be5509c7d87cd29c2ab5149bc45e843fea85 | [
"Apache-2.0"
] | 1 | 2021-12-03T13:04:55.000Z | 2021-12-03T13:04:55.000Z | import json
import os
import sys
import textwrap
from pathlib import Path
from typing import Text, List, Dict, Any, Set
from rasa.core.agent import Agent
from rasa.core.channels import UserMessage
import pytest
from _pytest.monkeypatch import MonkeyPatch
from unittest.mock import Mock
from rasa.nlu.extractors.crf_entity_extractor import CRFEntityExtractor
from rasa.nlu.extractors.mitie_entity_extractor import MitieEntityExtractor
from rasa.nlu.extractors.spacy_entity_extractor import SpacyEntityExtractor
from tests.conftest import AsyncMock
import rasa.nlu.test
import rasa.shared.nlu.training_data.loading
import rasa.shared.utils.io
import rasa.utils.io
import rasa.model
from rasa.nlu.test import (
is_token_within_entity,
do_entities_overlap,
merge_labels,
remove_empty_intent_examples,
remove_empty_response_examples,
_get_active_entity_extractors,
drop_intents_below_freq,
cross_validate,
run_evaluation,
substitute_labels,
IntentEvaluationResult,
EntityEvaluationResult,
ResponseSelectionEvaluationResult,
evaluate_intents,
evaluate_entities,
evaluate_response_selections,
NO_ENTITY,
collect_successful_entity_predictions,
collect_incorrect_entity_predictions,
merge_confidences,
_get_entity_confidences,
get_eval_data,
does_token_cross_borders,
align_entity_predictions,
determine_intersection,
determine_token_labels,
_remove_entities_of_extractors,
)
from rasa.nlu.tokenizers.tokenizer import Token
from rasa.shared.constants import DEFAULT_NLU_FALLBACK_INTENT_NAME
from rasa.shared.importers.importer import TrainingDataImporter
from rasa.shared.nlu.constants import (
NO_ENTITY_TAG,
INTENT,
INTENT_RANKING_KEY,
INTENT_NAME_KEY,
PREDICTED_CONFIDENCE_KEY,
ENTITIES,
)
from rasa.shared.nlu.constants import (
ENTITY_ATTRIBUTE_TYPE,
ENTITY_ATTRIBUTE_VALUE,
EXTRACTOR,
)
from rasa.shared.nlu.training_data.message import Message
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.model_testing import compare_nlu_models
from rasa.utils.tensorflow.constants import EPOCHS
# https://github.com/pytest-dev/pytest-asyncio/issues/68
# this event_loop is used by pytest-asyncio, and redefining it
# is currently the only way of changing the scope of this fixture
from tests.nlu.utilities import write_file_config
# Chinese Example
# "对面食过敏" -> To be allergic to wheat-based food
CH_wrong_segmentation = [
Token("对面", 0),
Token("食", 2),
Token("过敏", 3), # opposite, food, allergy
]
CH_correct_segmentation = [
Token("对", 0),
Token("面食", 1),
Token("过敏", 3), # towards, wheat-based food, allergy
]
CH_wrong_entity = {"start": 0, "end": 2, "value": "对面", "entity": "direction"}
CH_correct_entity = {"start": 1, "end": 3, "value": "面食", "entity": "food_type"}
# EN example
# "Hey Robot, I would like to eat pizza near Alexanderplatz tonight"
EN_indices = [0, 4, 9, 11, 13, 19, 24, 27, 31, 37, 42, 57]
EN_tokens = [
"Hey",
"Robot",
",",
"I",
"would",
"like",
"to",
"eat",
"pizza",
"near",
"Alexanderplatz",
"tonight",
]
EN_tokens = [Token(t, i) for t, i in zip(EN_tokens, EN_indices)]
EN_targets = [
{"start": 31, "end": 36, "value": "pizza", "entity": "food"},
{"start": 37, "end": 56, "value": "near Alexanderplatz", "entity": "location"},
{"start": 57, "end": 64, "value": "tonight", "entity": "datetime"},
]
EN_predicted = [
{
"start": 4,
"end": 9,
"value": "Robot",
"entity": "person",
"extractor": "EntityExtractorA",
},
{
"start": 31,
"end": 36,
"value": "pizza",
"entity": "food",
"extractor": "EntityExtractorA",
},
{
"start": 42,
"end": 56,
"value": "Alexanderplatz",
"entity": "location",
"extractor": "EntityExtractorA",
},
{
"start": 42,
"end": 64,
"value": "Alexanderplatz tonight",
"entity": "movie",
"extractor": "EntityExtractorB",
},
]
EN_entity_result = EntityEvaluationResult(
EN_targets, EN_predicted, EN_tokens, " ".join([t.text for t in EN_tokens])
)
EN_entity_result_no_tokens = EntityEvaluationResult(EN_targets, EN_predicted, [], "")
def test_token_entity_intersection():
# included
intsec = determine_intersection(CH_correct_segmentation[1], CH_correct_entity)
assert intsec == len(CH_correct_segmentation[1].text)
# completely outside
intsec = determine_intersection(CH_correct_segmentation[2], CH_correct_entity)
assert intsec == 0
# border crossing
intsec = determine_intersection(CH_correct_segmentation[1], CH_wrong_entity)
assert intsec == 1
def test_token_entity_boundaries():
# smaller and included
assert is_token_within_entity(CH_wrong_segmentation[1], CH_correct_entity)
assert not does_token_cross_borders(CH_wrong_segmentation[1], CH_correct_entity)
# exact match
assert is_token_within_entity(CH_correct_segmentation[1], CH_correct_entity)
assert not does_token_cross_borders(CH_correct_segmentation[1], CH_correct_entity)
# completely outside
assert not is_token_within_entity(CH_correct_segmentation[0], CH_correct_entity)
assert not does_token_cross_borders(CH_correct_segmentation[0], CH_correct_entity)
# border crossing
assert not is_token_within_entity(CH_wrong_segmentation[0], CH_correct_entity)
assert does_token_cross_borders(CH_wrong_segmentation[0], CH_correct_entity)
def test_entity_overlap():
assert do_entities_overlap([CH_correct_entity, CH_wrong_entity])
assert not do_entities_overlap(EN_targets)
def test_determine_token_labels_throws_error():
with pytest.raises(ValueError):
determine_token_labels(
CH_correct_segmentation[0],
[CH_correct_entity, CH_wrong_entity],
{CRFEntityExtractor.__name__},
)
def test_determine_token_labels_no_extractors():
with pytest.raises(ValueError):
determine_token_labels(
CH_correct_segmentation[0], [CH_correct_entity, CH_wrong_entity], None
)
def test_determine_token_labels_no_extractors_no_overlap():
label = determine_token_labels(CH_correct_segmentation[0], EN_targets, None)
assert label == NO_ENTITY_TAG
def test_determine_token_labels_with_extractors():
label = determine_token_labels(
CH_correct_segmentation[0],
[CH_correct_entity, CH_wrong_entity],
{SpacyEntityExtractor.__name__, MitieEntityExtractor.__name__,},
)
assert label == "direction"
@pytest.mark.parametrize(
"token, entities, extractors, expected_confidence",
[
(
Token("pizza", 4),
[
{
"start": 4,
"end": 9,
"value": "pizza",
"entity": "food",
"extractor": "EntityExtractorA",
}
],
["EntityExtractorA"],
0.0,
),
(Token("pizza", 4), [], ["EntityExtractorA"], 0.0),
(
Token("pizza", 4),
[
{
"start": 4,
"end": 9,
"value": "pizza",
"entity": "food",
"confidence_entity": 0.87,
"extractor": "CRFEntityExtractor",
}
],
["CRFEntityExtractor"],
0.87,
),
(
Token("pizza", 4),
[
{
"start": 4,
"end": 9,
"value": "pizza",
"entity": "food",
"confidence_entity": 0.87,
"extractor": "DIETClassifier",
}
],
["DIETClassifier"],
0.87,
),
],
)
def test_get_entity_confidences(
token: Token,
entities: List[Dict[Text, Any]],
extractors: List[Text],
expected_confidence: float,
):
confidence = _get_entity_confidences(token, entities, extractors)
assert confidence == expected_confidence
def test_label_merging():
import numpy as np
aligned_predictions = [
{
"target_labels": ["O", "O"],
"extractor_labels": {"EntityExtractorA": ["O", "O"]},
},
{
"target_labels": ["LOC", "O", "O"],
"extractor_labels": {"EntityExtractorA": ["O", "O", "O"]},
},
]
assert np.all(merge_labels(aligned_predictions) == ["O", "O", "LOC", "O", "O"])
assert np.all(
merge_labels(aligned_predictions, "EntityExtractorA")
== ["O", "O", "O", "O", "O"]
)
def test_confidence_merging():
import numpy as np
aligned_predictions = [
{
"target_labels": ["O", "O"],
"extractor_labels": {"EntityExtractorA": ["O", "O"]},
"confidences": {"EntityExtractorA": [0.0, 0.0]},
},
{
"target_labels": ["LOC", "O", "O"],
"extractor_labels": {"EntityExtractorA": ["O", "O", "O"]},
"confidences": {"EntityExtractorA": [0.98, 0.0, 0.0]},
},
]
assert np.all(
merge_confidences(aligned_predictions, "EntityExtractorA")
== [0.0, 0.0, 0.98, 0.0, 0.0]
)
def test_drop_intents_below_freq():
td = rasa.shared.nlu.training_data.loading.load_data(
"data/examples/rasa/demo-rasa.json"
)
# include some lookup tables and make sure new td has them
td = td.merge(TrainingData(lookup_tables=[{"lookup_table": "lookup_entry"}]))
clean_td = drop_intents_below_freq(td, 0)
assert clean_td.intents == {
"affirm",
"goodbye",
"greet",
"restaurant_search",
"chitchat",
}
clean_td = drop_intents_below_freq(td, 10)
assert clean_td.intents == {"affirm", "restaurant_search"}
assert clean_td.lookup_tables == td.lookup_tables
@pytest.mark.timeout(
300, func_only=True
) # these can take a longer time than the default timeout
async def test_run_evaluation(mood_agent: Agent, nlu_as_json_path: Text):
result = await run_evaluation(
nlu_as_json_path,
mood_agent.processor,
errors=False,
successes=False,
disable_plotting=True,
)
assert result.get("intent_evaluation")
assert all(
prediction["confidence"] is not None
for prediction in result["response_selection_evaluation"]["predictions"]
)
@pytest.mark.timeout(
300, func_only=True
) # these can take a longer time than the default timeout
async def test_run_evaluation_with_regex_message(mood_agent: Agent, tmp_path: Path):
training_data = textwrap.dedent(
"""
version: '2.0'
nlu:
- intent: goodbye
examples: |
- Bye
- /goodbye{"location": "29432"}
"""
)
data_path = tmp_path / "test.yml"
rasa.shared.utils.io.write_text_file(training_data, data_path)
# Does not raise
await run_evaluation(
str(data_path),
mood_agent.processor,
errors=False,
successes=False,
disable_plotting=True,
)
async def test_eval_data(
tmp_path: Path, project: Text, trained_rasa_model: Text,
):
config_path = os.path.join(project, "config.yml")
data_importer = TrainingDataImporter.load_nlu_importer_from_config(
config_path,
training_data_paths=[
"data/examples/rasa/demo-rasa.yml",
"data/examples/rasa/demo-rasa-responses.yml",
],
)
processor = Agent.load(trained_rasa_model).processor
data = data_importer.get_nlu_data()
(intent_results, response_selection_results, entity_results) = await get_eval_data(
processor, data
)
assert len(intent_results) == 46
assert len(response_selection_results) == 46
assert len(entity_results) == 46
@pytest.mark.timeout(
240, func_only=True
) # these can take a longer time than the default timeout
async def test_run_cv_evaluation():
td = rasa.shared.nlu.training_data.loading.load_data(
"data/test/demo-rasa-more-ents-and-multiplied.yml"
)
nlu_config = {
"language": "en",
"pipeline": [
{"name": "WhitespaceTokenizer"},
{"name": "CountVectorsFeaturizer"},
{"name": "DIETClassifier", EPOCHS: 2},
],
}
n_folds = 2
intent_results, entity_results, response_selection_results = await cross_validate(
td,
n_folds,
nlu_config,
successes=False,
errors=False,
disable_plotting=True,
report_as_dict=True,
)
assert len(intent_results.train["Accuracy"]) == n_folds
assert len(intent_results.train["Precision"]) == n_folds
assert len(intent_results.train["F1-score"]) == n_folds
assert len(intent_results.test["Accuracy"]) == n_folds
assert len(intent_results.test["Precision"]) == n_folds
assert len(intent_results.test["F1-score"]) == n_folds
assert all(key in intent_results.evaluation for key in ["errors", "report"])
assert any(
isinstance(intent_report, dict)
and intent_report.get("confused_with") is not None
for intent_report in intent_results.evaluation["report"].values()
)
for extractor_evaluation in entity_results.evaluation.values():
assert all(key in extractor_evaluation for key in ["errors", "report"])
@pytest.mark.timeout(
180, func_only=True
) # these can take a longer time than the default timeout
async def test_run_cv_evaluation_no_entities():
td = rasa.shared.nlu.training_data.loading.load_data(
"data/test/demo-rasa-no-ents.yml"
)
nlu_config = {
"language": "en",
"pipeline": [
{"name": "WhitespaceTokenizer"},
{"name": "CountVectorsFeaturizer"},
{"name": "DIETClassifier", EPOCHS: 25},
],
}
n_folds = 2
intent_results, entity_results, response_selection_results = await cross_validate(
td,
n_folds,
nlu_config,
successes=False,
errors=False,
disable_plotting=True,
report_as_dict=True,
)
assert len(intent_results.train["Accuracy"]) == n_folds
assert len(intent_results.train["Precision"]) == n_folds
assert len(intent_results.train["F1-score"]) == n_folds
assert len(intent_results.test["Accuracy"]) == n_folds
assert len(intent_results.test["Precision"]) == n_folds
assert len(intent_results.test["F1-score"]) == n_folds
assert all(key in intent_results.evaluation for key in ["errors", "report"])
assert any(
isinstance(intent_report, dict)
and intent_report.get("confused_with") is not None
for intent_report in intent_results.evaluation["report"].values()
)
assert len(entity_results.train) == 0
assert len(entity_results.test) == 0
assert len(entity_results.evaluation) == 0
@pytest.mark.timeout(
280, func_only=True
) # these can take a longer time than the default timeout
async def test_run_cv_evaluation_with_response_selector():
training_data_obj = rasa.shared.nlu.training_data.loading.load_data(
"data/test/demo-rasa-more-ents-and-multiplied.yml"
)
training_data_responses_obj = rasa.shared.nlu.training_data.loading.load_data(
"data/examples/rasa/demo-rasa-responses.yml"
)
training_data_obj = training_data_obj.merge(training_data_responses_obj)
nlu_config = {
"language": "en",
"pipeline": [
{"name": "WhitespaceTokenizer"},
{"name": "CountVectorsFeaturizer"},
{"name": "DIETClassifier", EPOCHS: 25},
{"name": "ResponseSelector", EPOCHS: 2},
],
}
n_folds = 2
intent_results, entity_results, response_selection_results = await cross_validate(
training_data_obj,
n_folds,
nlu_config,
successes=False,
errors=False,
disable_plotting=True,
report_as_dict=True,
)
assert len(intent_results.train["Accuracy"]) == n_folds
assert len(intent_results.train["Precision"]) == n_folds
assert len(intent_results.train["F1-score"]) == n_folds
assert len(intent_results.test["Accuracy"]) == n_folds
assert len(intent_results.test["Precision"]) == n_folds
assert len(intent_results.test["F1-score"]) == n_folds
assert all(key in intent_results.evaluation for key in ["errors", "report"])
assert any(
isinstance(intent_report, dict)
and intent_report.get("confused_with") is not None
for intent_report in intent_results.evaluation["report"].values()
)
assert len(response_selection_results.train["Accuracy"]) == n_folds
assert len(response_selection_results.train["Precision"]) == n_folds
assert len(response_selection_results.train["F1-score"]) == n_folds
assert len(response_selection_results.test["Accuracy"]) == n_folds
assert len(response_selection_results.test["Precision"]) == n_folds
assert len(response_selection_results.test["F1-score"]) == n_folds
assert all(
key in response_selection_results.evaluation for key in ["errors", "report"]
)
assert all(
prediction["confidence"] is not None and prediction["confidence"] != 0.0
for prediction in response_selection_results.evaluation["predictions"]
)
assert any(
isinstance(intent_report, dict)
and intent_report.get("confused_with") is not None
for intent_report in response_selection_results.evaluation["report"].values()
)
diet_name = "DIETClassifier"
assert len(entity_results.train[diet_name]["Accuracy"]) == n_folds
assert len(entity_results.train[diet_name]["Precision"]) == n_folds
assert len(entity_results.train[diet_name]["F1-score"]) == n_folds
assert len(entity_results.test[diet_name]["Accuracy"]) == n_folds
assert len(entity_results.test[diet_name]["Precision"]) == n_folds
assert len(entity_results.test[diet_name]["F1-score"]) == n_folds
for extractor_evaluation in entity_results.evaluation.values():
assert all(key in extractor_evaluation for key in ["errors", "report"])
def test_intent_evaluation_report(tmp_path: Path):
path = tmp_path / "evaluation"
path.mkdir()
report_folder = str(path / "reports")
report_filename = os.path.join(report_folder, "intent_report.json")
rasa.shared.utils.io.create_directory(report_folder)
intent_results = [
IntentEvaluationResult("", "restaurant_search", "I am hungry", 0.12345),
IntentEvaluationResult("greet", "greet", "hello", 0.98765),
]
result = evaluate_intents(
intent_results,
report_folder,
successes=True,
errors=True,
disable_plotting=False,
)
report = json.loads(rasa.shared.utils.io.read_file(report_filename))
greet_results = {
"precision": 1.0,
"recall": 1.0,
"f1-score": 1.0,
"support": 1,
"confused_with": {},
}
prediction = {
"text": "hello",
"intent": "greet",
"predicted": "greet",
"confidence": 0.98765,
}
assert len(report.keys()) == 4
assert report["greet"] == greet_results
assert result["predictions"][0] == prediction
assert os.path.exists(os.path.join(report_folder, "intent_confusion_matrix.png"))
assert os.path.exists(os.path.join(report_folder, "intent_histogram.png"))
assert not os.path.exists(os.path.join(report_folder, "intent_errors.json"))
assert os.path.exists(os.path.join(report_folder, "intent_successes.json"))
def test_intent_evaluation_report_large(tmp_path: Path):
path = tmp_path / "evaluation"
path.mkdir()
report_folder = path / "reports"
report_filename = report_folder / "intent_report.json"
rasa.shared.utils.io.create_directory(str(report_folder))
def correct(label: Text) -> IntentEvaluationResult:
return IntentEvaluationResult(label, label, "", 1.0)
def incorrect(label: Text, _label: Text) -> IntentEvaluationResult:
return IntentEvaluationResult(label, _label, "", 1.0)
a_results = [correct("A")] * 10
b_results = [correct("B")] * 7 + [incorrect("B", "C")] * 3
c_results = [correct("C")] * 3 + [incorrect("C", "D")] + [incorrect("C", "E")]
d_results = [correct("D")] * 29 + [incorrect("D", "B")] * 3
e_results = [incorrect("E", "C")] * 5 + [incorrect("E", "")] * 5
intent_results = a_results + b_results + c_results + d_results + e_results
evaluate_intents(
intent_results,
str(report_folder),
successes=False,
errors=False,
disable_plotting=True,
)
report = json.loads(rasa.shared.utils.io.read_file(str(report_filename)))
a_results = {
"precision": 1.0,
"recall": 1.0,
"f1-score": 1.0,
"support": 10,
"confused_with": {},
}
e_results = {
"precision": 0.0,
"recall": 0.0,
"f1-score": 0.0,
"support": 10,
"confused_with": {"C": 5, "": 5},
}
c_confused_with = {"D": 1, "E": 1}
assert len(report.keys()) == 8
assert report["A"] == a_results
assert report["E"] == e_results
assert report["C"]["confused_with"] == c_confused_with
def test_response_evaluation_report(tmp_path: Path):
path = tmp_path / "evaluation"
path.mkdir()
report_folder = str(path / "reports")
report_filename = os.path.join(report_folder, "response_selection_report.json")
rasa.shared.utils.io.create_directory(report_folder)
response_results = [
ResponseSelectionEvaluationResult(
"chitchat/ask_weather",
"chitchat/ask_weather",
"What's the weather",
0.65432,
),
ResponseSelectionEvaluationResult(
"chitchat/ask_name", "chitchat/ask_name", "What's your name?", 0.98765
),
]
result = evaluate_response_selections(
response_results,
report_folder,
successes=True,
errors=True,
disable_plotting=False,
)
report = json.loads(rasa.shared.utils.io.read_file(report_filename))
name_query_results = {
"precision": 1.0,
"recall": 1.0,
"f1-score": 1.0,
"support": 1,
"confused_with": {},
}
prediction = {
"text": "What's your name?",
"intent_response_key_target": "chitchat/ask_name",
"intent_response_key_prediction": "chitchat/ask_name",
"confidence": 0.98765,
}
assert len(report.keys()) == 5
assert report["chitchat/ask_name"] == name_query_results
assert result["predictions"][1] == prediction
assert os.path.exists(
os.path.join(report_folder, "response_selection_confusion_matrix.png")
)
assert os.path.exists(
os.path.join(report_folder, "response_selection_histogram.png")
)
assert not os.path.exists(
os.path.join(report_folder, "response_selection_errors.json")
)
assert os.path.exists(
os.path.join(report_folder, "response_selection_successes.json")
)
@pytest.mark.parametrize(
"entity_results, expected_extractors",
[
([], set()),
([EN_entity_result], {"EntityExtractorA", "EntityExtractorB"}),
(
[EN_entity_result, EN_entity_result],
{"EntityExtractorA", "EntityExtractorB"},
),
],
)
def test_get_active_entity_extractors(
entity_results: List[EntityEvaluationResult], expected_extractors: Set[Text]
):
extractors = _get_active_entity_extractors(entity_results)
assert extractors == expected_extractors
def test_entity_evaluation_report(tmp_path: Path):
path = tmp_path / "evaluation"
path.mkdir()
report_folder = str(path / "reports")
report_filename_a = os.path.join(report_folder, "EntityExtractorA_report.json")
report_filename_b = os.path.join(report_folder, "EntityExtractorB_report.json")
rasa.shared.utils.io.create_directory(report_folder)
extractors = _get_active_entity_extractors([EN_entity_result])
result = evaluate_entities(
[EN_entity_result],
extractors,
report_folder,
errors=True,
successes=True,
disable_plotting=False,
)
report_a = json.loads(rasa.shared.utils.io.read_file(report_filename_a))
report_b = json.loads(rasa.shared.utils.io.read_file(report_filename_b))
assert len(report_a) == 6
assert report_a["datetime"]["support"] == 1.0
assert report_b["macro avg"]["recall"] == 0.0
assert report_a["macro avg"]["recall"] == 0.5
assert result["EntityExtractorA"]["accuracy"] == 0.75
assert os.path.exists(
os.path.join(report_folder, "EntityExtractorA_confusion_matrix.png")
)
assert os.path.exists(os.path.join(report_folder, "EntityExtractorA_errors.json"))
assert os.path.exists(
os.path.join(report_folder, "EntityExtractorA_successes.json")
)
assert not os.path.exists(
os.path.join(report_folder, "EntityExtractorA_histogram.png")
)
def test_empty_intent_removal():
intent_results = [
IntentEvaluationResult("", "restaurant_search", "I am hungry", 0.12345),
IntentEvaluationResult("greet", "greet", "hello", 0.98765),
]
intent_results = remove_empty_intent_examples(intent_results)
assert len(intent_results) == 1
assert intent_results[0].intent_target == "greet"
assert intent_results[0].intent_prediction == "greet"
assert intent_results[0].confidence == 0.98765
assert intent_results[0].message == "hello"
def test_empty_response_removal():
response_results = [
ResponseSelectionEvaluationResult(None, None, "What's the weather", 0.65432),
ResponseSelectionEvaluationResult(
"chitchat/ask_name", "chitchat/ask_name", "What's your name?", 0.98765
),
# This happens if response selection test data is present but no response
# selector is part of the model
ResponseSelectionEvaluationResult(
"chitchat/ask_name", None, "What's your name?", None
),
]
response_results = remove_empty_response_examples(response_results)
assert len(response_results) == 2
assert response_results[0].intent_response_key_target == "chitchat/ask_name"
assert response_results[0].intent_response_key_prediction == "chitchat/ask_name"
assert response_results[0].confidence == 0.98765
assert response_results[0].message == "What's your name?"
assert response_results[1].intent_response_key_target == "chitchat/ask_name"
assert response_results[1].intent_response_key_prediction == ""
assert response_results[1].confidence == 0.0
assert response_results[1].message == "What's your name?"
def test_evaluate_entities_cv_empty_tokens():
mock_extractors = ["EntityExtractorA", "EntityExtractorB"]
result = align_entity_predictions(EN_entity_result_no_tokens, mock_extractors)
assert result == {
"target_labels": [],
"extractor_labels": {"EntityExtractorA": [], "EntityExtractorB": []},
"confidences": {"EntityExtractorA": [], "EntityExtractorB": []},
}, "Wrong entity prediction alignment"
def test_evaluate_entities_cv():
mock_extractors = ["EntityExtractorA", "EntityExtractorB"]
result = align_entity_predictions(EN_entity_result, mock_extractors)
assert result == {
"target_labels": [
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"food",
"location",
"location",
"datetime",
],
"extractor_labels": {
"EntityExtractorA": [
"O",
"person",
"O",
"O",
"O",
"O",
"O",
"O",
"food",
"O",
"location",
"O",
],
"EntityExtractorB": [
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"movie",
"movie",
],
},
"confidences": {
"EntityExtractorA": [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
"EntityExtractorB": [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
},
}, "Wrong entity prediction alignment"
def test_label_replacement():
original_labels = ["O", "location"]
target_labels = ["no_entity", "location"]
assert substitute_labels(original_labels, "O", "no_entity") == target_labels
async def test_nlu_comparison(
tmp_path: Path, monkeypatch: MonkeyPatch, nlu_as_json_path: Text
):
config = {
"language": "en",
"pipeline": [
{"name": "WhitespaceTokenizer"},
{"name": "KeywordIntentClassifier"},
{"name": "RegexEntityExtractor"},
],
}
# the configs need to be at a different path, otherwise the results are
# combined on the same dictionary key and cannot be plotted properly
configs = [write_file_config(config).name, write_file_config(config).name]
monkeypatch.setattr(
sys.modules["rasa.nlu.test"],
"get_eval_data",
AsyncMock(return_value=(1, None, (None,),)),
)
monkeypatch.setattr(
sys.modules["rasa.nlu.test"],
"evaluate_intents",
Mock(return_value={"f1_score": 1}),
)
output = str(tmp_path)
test_data_importer = TrainingDataImporter.load_from_dict(
training_data_paths=[nlu_as_json_path]
)
test_data = test_data_importer.get_nlu_data()
await compare_nlu_models(
configs, test_data, output, runs=2, exclusion_percentages=[50, 80]
)
assert set(os.listdir(output)) == {
"run_1",
"run_2",
"results.json",
"nlu_model_comparison_graph.pdf",
}
run_1_path = os.path.join(output, "run_1")
assert set(os.listdir(run_1_path)) == {"50%_exclusion", "80%_exclusion", "test.yml"}
exclude_50_path = os.path.join(run_1_path, "50%_exclusion")
modelnames = [os.path.splitext(os.path.basename(config))[0] for config in configs]
modeloutputs = set(
["train"]
+ [f"{m}_report" for m in modelnames]
+ [f"{m}.tar.gz" for m in modelnames]
)
assert set(os.listdir(exclude_50_path)) == modeloutputs
@pytest.mark.parametrize(
"entity_results,targets,predictions,successes,errors",
[
(
[
EntityEvaluationResult(
entity_targets=[
{
"start": 17,
"end": 24,
"value": "Italian",
"entity": "cuisine",
}
],
entity_predictions=[
{
"start": 17,
"end": 24,
"value": "Italian",
"entity": "cuisine",
}
],
tokens=[
"I",
"want",
"to",
"book",
"an",
"Italian",
"restaurant",
".",
],
message="I want to book an Italian restaurant.",
),
EntityEvaluationResult(
entity_targets=[
{
"start": 8,
"end": 15,
"value": "Mexican",
"entity": "cuisine",
},
{
"start": 31,
"end": 32,
"value": "4",
"entity": "number_people",
},
],
entity_predictions=[],
tokens=[
"Book",
"an",
"Mexican",
"restaurant",
"for",
"4",
"people",
".",
],
message="Book an Mexican restaurant for 4 people.",
),
],
[
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
"cuisine",
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
"cuisine",
NO_ENTITY,
NO_ENTITY,
"number_people",
NO_ENTITY,
NO_ENTITY,
],
[
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
"cuisine",
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
NO_ENTITY,
],
[
{
"text": "I want to book an Italian restaurant.",
"entities": [
{
"start": 17,
"end": 24,
"value": "Italian",
"entity": "cuisine",
}
],
"predicted_entities": [
{
"start": 17,
"end": 24,
"value": "Italian",
"entity": "cuisine",
}
],
}
],
[
{
"text": "Book an Mexican restaurant for 4 people.",
"entities": [
{
"start": 8,
"end": 15,
"value": "Mexican",
"entity": "cuisine",
},
{
"start": 31,
"end": 32,
"value": "4",
"entity": "number_people",
},
],
"predicted_entities": [],
}
],
)
],
)
def test_collect_entity_predictions(
entity_results: List[EntityEvaluationResult],
targets: List[Text],
predictions: List[Text],
successes: List[Dict[Text, Any]],
errors: List[Dict[Text, Any]],
):
actual = collect_successful_entity_predictions(entity_results, targets, predictions)
assert len(successes) == len(actual)
assert successes == actual
actual = collect_incorrect_entity_predictions(entity_results, targets, predictions)
assert len(errors) == len(actual)
assert errors == actual
class ConstantProcessor:
def __init__(self, prediction_to_return: Dict[Text, Any]) -> None:
self.prediction = prediction_to_return
async def parse_message(
self, message: UserMessage, only_output_properties: bool = True,
) -> Dict[Text, Any]:
return self.prediction
async def test_replacing_fallback_intent():
expected_intent = "greet"
expected_confidence = 0.345
fallback_prediction = {
INTENT: {
INTENT_NAME_KEY: DEFAULT_NLU_FALLBACK_INTENT_NAME,
PREDICTED_CONFIDENCE_KEY: 1,
},
INTENT_RANKING_KEY: [
{
INTENT_NAME_KEY: DEFAULT_NLU_FALLBACK_INTENT_NAME,
PREDICTED_CONFIDENCE_KEY: 1,
},
{
INTENT_NAME_KEY: expected_intent,
PREDICTED_CONFIDENCE_KEY: expected_confidence,
},
{INTENT_NAME_KEY: "some", PREDICTED_CONFIDENCE_KEY: 0.1},
],
}
processor = ConstantProcessor(fallback_prediction)
training_data = TrainingData(
[Message.build("hi", "greet"), Message.build("bye", "bye")]
)
intent_evaluations, _, _ = await get_eval_data(processor, training_data)
assert all(
prediction.intent_prediction == expected_intent
and prediction.confidence == expected_confidence
for prediction in intent_evaluations
)
async def test_remove_entities_of_extractors():
extractor = "TestExtractor"
extractor_2 = "DIET"
extractor_3 = "YetAnotherExtractor"
# shouldn't crash when there are no annotations
_remove_entities_of_extractors({}, [extractor])
# add some entities
entities = [
{
ENTITY_ATTRIBUTE_TYPE: "time",
ENTITY_ATTRIBUTE_VALUE: "12:00",
EXTRACTOR: extractor,
},
{
ENTITY_ATTRIBUTE_TYPE: "location",
ENTITY_ATTRIBUTE_VALUE: "Berlin - Alexanderplatz",
EXTRACTOR: extractor_3,
},
{
ENTITY_ATTRIBUTE_TYPE: "name",
ENTITY_ATTRIBUTE_VALUE: "Joe",
EXTRACTOR: extractor_2,
},
]
result_dict = {ENTITIES: entities}
_remove_entities_of_extractors(result_dict, [extractor, extractor_3])
assert len(result_dict[ENTITIES]) == 1
remaining_entity = result_dict[ENTITIES][0]
assert remaining_entity[EXTRACTOR] == extractor_2
| 30.932854 | 88 | 0.581156 |
e8bbc8e665fab8a4ceae6ab368c8383d39c6a48e | 8,396 | py | Python | airflow/gcp/example_dags/example_bigtable.py | penghou620/airflow | 7b6045a479edca06732fd766ef8ceabf41c7e82a | [
"Apache-2.0"
] | 1 | 2019-10-02T13:33:48.000Z | 2019-10-02T13:33:48.000Z | airflow/gcp/example_dags/example_bigtable.py | penghou620/airflow | 7b6045a479edca06732fd766ef8ceabf41c7e82a | [
"Apache-2.0"
] | null | null | null | airflow/gcp/example_dags/example_bigtable.py | penghou620/airflow | 7b6045a479edca06732fd766ef8ceabf41c7e82a | [
"Apache-2.0"
] | 1 | 2019-11-26T21:53:20.000Z | 2019-11-26T21:53:20.000Z | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# noinspection LongLine
"""
Example Airflow DAG that creates and performs following operations on Cloud Bigtable:
- creates an Instance
- creates a Table
- updates Cluster
- waits for Table replication completeness
- deletes the Table
- deletes the Instance
This DAG relies on the following environment variables:
* GCP_PROJECT_ID - Google Cloud Platform project
* CBT_INSTANCE_ID - desired ID of a Cloud Bigtable instance
* CBT_INSTANCE_DISPLAY_NAME - desired human-readable display name of the Instance
* CBT_INSTANCE_TYPE - type of the Instance, e.g. 1 for DEVELOPMENT
See https://googleapis.github.io/google-cloud-python/latest/bigtable/instance.html#google.cloud.bigtable.instance.Instance # noqa E501 # pylint: disable=line-too-long
* CBT_INSTANCE_LABELS - labels to add for the Instance
* CBT_CLUSTER_ID - desired ID of the main Cluster created for the Instance
* CBT_CLUSTER_ZONE - zone in which main Cluster will be created. e.g. europe-west1-b
See available zones: https://cloud.google.com/bigtable/docs/locations
* CBT_CLUSTER_NODES - initial amount of nodes of the Cluster
* CBT_CLUSTER_NODES_UPDATED - amount of nodes for BigtableClusterUpdateOperator
* CBT_CLUSTER_STORAGE_TYPE - storage for the Cluster, e.g. 1 for SSD
See https://googleapis.github.io/google-cloud-python/latest/bigtable/instance.html#google.cloud.bigtable.instance.Instance.cluster # noqa E501 # pylint: disable=line-too-long
* CBT_TABLE_ID - desired ID of the Table
* CBT_POKE_INTERVAL - number of seconds between every attempt of Sensor check
"""
import json
from os import getenv
import airflow
from airflow import models
from airflow.gcp.operators.bigtable import \
BigtableInstanceCreateOperator, \
BigtableInstanceDeleteOperator, \
BigtableClusterUpdateOperator, \
BigtableTableCreateOperator, \
BigtableTableDeleteOperator
from airflow.gcp.sensors.bigtable import BigtableTableWaitForReplicationSensor
# [START howto_operator_gcp_bigtable_args]
GCP_PROJECT_ID = getenv('GCP_PROJECT_ID', 'example-project')
CBT_INSTANCE_ID = getenv('CBT_INSTANCE_ID', 'some-instance-id')
CBT_INSTANCE_DISPLAY_NAME = getenv('CBT_INSTANCE_DISPLAY_NAME', 'Human-readable name')
CBT_INSTANCE_TYPE = getenv('CBT_INSTANCE_TYPE', '2')
CBT_INSTANCE_LABELS = getenv('CBT_INSTANCE_LABELS', '{}')
CBT_CLUSTER_ID = getenv('CBT_CLUSTER_ID', 'some-cluster-id')
CBT_CLUSTER_ZONE = getenv('CBT_CLUSTER_ZONE', 'europe-west1-b')
CBT_CLUSTER_NODES = getenv('CBT_CLUSTER_NODES', '3')
CBT_CLUSTER_NODES_UPDATED = getenv('CBT_CLUSTER_NODES_UPDATED', '5')
CBT_CLUSTER_STORAGE_TYPE = getenv('CBT_CLUSTER_STORAGE_TYPE', '2')
CBT_TABLE_ID = getenv('CBT_TABLE_ID', 'some-table-id')
CBT_POKE_INTERVAL = getenv('CBT_POKE_INTERVAL', '60')
# [END howto_operator_gcp_bigtable_args]
default_args = {
'start_date': airflow.utils.dates.days_ago(1)
}
with models.DAG(
'example_gcp_bigtable_operators',
default_args=default_args,
schedule_interval=None # Override to match your needs
) as dag:
# [START howto_operator_gcp_bigtable_instance_create]
create_instance_task = BigtableInstanceCreateOperator(
project_id=GCP_PROJECT_ID,
instance_id=CBT_INSTANCE_ID,
main_cluster_id=CBT_CLUSTER_ID,
main_cluster_zone=CBT_CLUSTER_ZONE,
instance_display_name=CBT_INSTANCE_DISPLAY_NAME,
instance_type=int(CBT_INSTANCE_TYPE),
instance_labels=json.loads(CBT_INSTANCE_LABELS),
cluster_nodes=int(CBT_CLUSTER_NODES),
cluster_storage_type=int(CBT_CLUSTER_STORAGE_TYPE),
task_id='create_instance_task',
)
create_instance_task2 = BigtableInstanceCreateOperator(
instance_id=CBT_INSTANCE_ID,
main_cluster_id=CBT_CLUSTER_ID,
main_cluster_zone=CBT_CLUSTER_ZONE,
instance_display_name=CBT_INSTANCE_DISPLAY_NAME,
instance_type=int(CBT_INSTANCE_TYPE),
instance_labels=json.loads(CBT_INSTANCE_LABELS),
cluster_nodes=int(CBT_CLUSTER_NODES),
cluster_storage_type=int(CBT_CLUSTER_STORAGE_TYPE),
task_id='create_instance_task2',
)
create_instance_task >> create_instance_task2
# [END howto_operator_gcp_bigtable_instance_create]
# [START howto_operator_gcp_bigtable_cluster_update]
cluster_update_task = BigtableClusterUpdateOperator(
project_id=GCP_PROJECT_ID,
instance_id=CBT_INSTANCE_ID,
cluster_id=CBT_CLUSTER_ID,
nodes=int(CBT_CLUSTER_NODES_UPDATED),
task_id='update_cluster_task',
)
cluster_update_task2 = BigtableClusterUpdateOperator(
instance_id=CBT_INSTANCE_ID,
cluster_id=CBT_CLUSTER_ID,
nodes=int(CBT_CLUSTER_NODES_UPDATED),
task_id='update_cluster_task2',
)
cluster_update_task >> cluster_update_task2
# [END howto_operator_gcp_bigtable_cluster_update]
# [START howto_operator_gcp_bigtable_instance_delete]
delete_instance_task = BigtableInstanceDeleteOperator(
project_id=GCP_PROJECT_ID,
instance_id=CBT_INSTANCE_ID,
task_id='delete_instance_task',
)
delete_instance_task2 = BigtableInstanceDeleteOperator(
instance_id=CBT_INSTANCE_ID,
task_id='delete_instance_task2',
)
# [END howto_operator_gcp_bigtable_instance_delete]
# [START howto_operator_gcp_bigtable_table_create]
create_table_task = BigtableTableCreateOperator(
project_id=GCP_PROJECT_ID,
instance_id=CBT_INSTANCE_ID,
table_id=CBT_TABLE_ID,
task_id='create_table',
)
create_table_task2 = BigtableTableCreateOperator(
instance_id=CBT_INSTANCE_ID,
table_id=CBT_TABLE_ID,
task_id='create_table_task2',
)
create_table_task >> create_table_task2
# [END howto_operator_gcp_bigtable_table_create]
# [START howto_operator_gcp_bigtable_table_wait_for_replication]
wait_for_table_replication_task = BigtableTableWaitForReplicationSensor(
project_id=GCP_PROJECT_ID,
instance_id=CBT_INSTANCE_ID,
table_id=CBT_TABLE_ID,
poke_interval=int(CBT_POKE_INTERVAL),
timeout=180,
task_id='wait_for_table_replication_task',
)
wait_for_table_replication_task2 = BigtableTableWaitForReplicationSensor(
instance_id=CBT_INSTANCE_ID,
table_id=CBT_TABLE_ID,
poke_interval=int(CBT_POKE_INTERVAL),
timeout=180,
task_id='wait_for_table_replication_task2',
)
# [END howto_operator_gcp_bigtable_table_wait_for_replication]
# [START howto_operator_gcp_bigtable_table_delete]
delete_table_task = BigtableTableDeleteOperator(
project_id=GCP_PROJECT_ID,
instance_id=CBT_INSTANCE_ID,
table_id=CBT_TABLE_ID,
task_id='delete_table_task',
)
delete_table_task2 = BigtableTableDeleteOperator(
instance_id=CBT_INSTANCE_ID,
table_id=CBT_TABLE_ID,
task_id='delete_table_task2',
)
# [END howto_operator_gcp_bigtable_table_delete]
wait_for_table_replication_task >> delete_table_task
wait_for_table_replication_task2 >> delete_table_task
wait_for_table_replication_task >> delete_table_task2
wait_for_table_replication_task2 >> delete_table_task2
create_instance_task \
>> create_table_task \
>> cluster_update_task \
>> delete_table_task
create_instance_task2 \
>> create_table_task2 \
>> cluster_update_task2 \
>> delete_table_task2
# Only delete instances after all tables are deleted
[delete_table_task, delete_table_task2] >> \
delete_instance_task >> delete_instance_task2
| 40.560386 | 179 | 0.761672 |
80603874856fe859addece1371fe85427515b91e | 343 | py | Python | core/workaround/reproducibility.py | zhangzhengde0225/SwinTrack | 526be17f8ef266cb924c6939bd8dda23e9b73249 | [
"MIT"
] | 143 | 2021-12-03T02:33:36.000Z | 2022-03-29T00:01:48.000Z | core/workaround/reproducibility.py | zhangzhengde0225/SwinTrack | 526be17f8ef266cb924c6939bd8dda23e9b73249 | [
"MIT"
] | 33 | 2021-12-03T10:32:05.000Z | 2022-03-31T02:13:55.000Z | core/workaround/reproducibility.py | zhangzhengde0225/SwinTrack | 526be17f8ef266cb924c6939bd8dda23e9b73249 | [
"MIT"
] | 24 | 2021-12-04T06:46:42.000Z | 2022-03-30T07:57:47.000Z | def seed_all_rng(seed=0):
import random
random.seed(seed)
import numpy as np
np.random.seed(random.randint(0, 255))
import torch
torch.manual_seed(random.randint(0, 255))
def enable_deterministic_computation():
import torch
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
| 24.5 | 45 | 0.720117 |
6e146515f3b0bd318657c54e74a4810d362c046a | 7,956 | py | Python | src/manager/om/script/gspylib/inspection/common/CheckResult.py | wotchin/openGauss-server | ebd92e92b0cfd76b121d98e4c57a22d334573159 | [
"MulanPSL-1.0"
] | 1 | 2020-06-30T15:00:50.000Z | 2020-06-30T15:00:50.000Z | src/manager/om/script/gspylib/inspection/common/CheckResult.py | wotchin/openGauss-server | ebd92e92b0cfd76b121d98e4c57a22d334573159 | [
"MulanPSL-1.0"
] | null | null | null | src/manager/om/script/gspylib/inspection/common/CheckResult.py | wotchin/openGauss-server | ebd92e92b0cfd76b121d98e4c57a22d334573159 | [
"MulanPSL-1.0"
] | null | null | null | # -*- coding:utf-8 -*-
#############################################################################
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms
# and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
#############################################################################
import os
import sys
import json
import time
import pwd
from gspylib.inspection.common import SharedFuncs
from gspylib.common.Common import DefaultValue
from gspylib.inspection.common.Log import LoggerFactory
class ResultStatus(object):
OK = "OK"
NA = "NA"
WARNING = "WARNING"
NG = "NG"
ERROR = "ERROR"
class LocalItemResult(object):
'''
the check result running on one host
'''
def __init__(self, name, host):
self.name = name
self.host = host
self.raw = ""
self.rst = ResultStatus.NA
self.val = ""
self.checkID = None
self.user = None
def output(self, outPath):
u"""
[HOST] {host}
[NAM] {name}
[RST] {rst}
[VAL]
{val}
[RAW]
{raw}
"""
val = self.val if self.val else ""
raw = self.raw if self.raw else ""
try:
content = self.output.__doc__.format(name=self.name, rst=self.rst,
host=self.host, val=val,
raw=raw)
except Exception:
content = self.output.__doc__.encode('utf-8').format(
name=self.name, rst=self.rst, host=self.host, val=val,
raw=raw).decode('utf-8', 'ignore')
fileName = "%s_%s_%s.out" % (self.name, self.host, self.checkID)
# output the result to local path
SharedFuncs.writeFile(fileName, content, outPath,
DefaultValue.KEY_FILE_MODE, self.user)
class ItemResult(object):
def __init__(self, name):
self.name = name
self._items = []
self.rst = ResultStatus.NA
self.standard = ""
self.suggestion = ""
self.category = 'other'
self.analysis = ""
def __iter__(self):
return iter(self._items)
def __getitem__(self, idx):
return self._items[idx]
def append(self, val):
self._items.append(val)
def formatOutput(self, detail=False):
result = u"{name:.<25}...............{rst:.>6}".format(name=self.name,
rst=self.rst)
result += u"\r\n%s\r\n" % self.analysis
return result
def getLocalItems(self):
return self._items
@staticmethod
def parse(output):
itemResult = None
localItemResult = None
host = None
idx = 0
for line in output.splitlines():
idx += 1
if (idx == len(
output.splitlines()) and localItemResult is not None):
itemResult.append(localItemResult)
current = line.strip()
if (not current):
continue
if (current.startswith('[HOST]')):
host = current.split()[1].strip()
if (current.startswith('[NAM]')):
name = current.split()[1].strip()
if (itemResult is None):
itemResult = ItemResult(name)
if (localItemResult is not None):
itemResult.append(localItemResult)
localItemResult = LocalItemResult(current.split()[1].strip(),
host)
if (current.startswith('[RST]')):
localItemResult.rst = current.split()[1].strip()
if (current.startswith('[VAL]')):
localItemResult.val = ItemResult.__parseMultiLine(
output.splitlines()[idx:])
if (current.startswith('[RAW]')):
localItemResult.raw = ItemResult.__parseMultiLine(
output.splitlines()[idx:])
return itemResult
@staticmethod
def __parseMultiLine(lines):
vals = []
starter = ('[HOST]', '[NAM]', '[RST]', '[VAL]', '[RAW]')
for line in lines:
current = line.strip()
if (current.startswith(starter)):
break
else:
vals.append(current)
return "\n".join(vals)
class CheckResult(object):
def __init__(self):
self._items = []
def __iter__(self):
return iter(self._items)
def __getitem__(self, idx):
return self._items[idx]
def append(self, val):
self._items.append(val)
def outputStatistic(self):
ok = 0
warning = 0
ng = 0
error = 0
for i in self._items:
if (i.rst == ResultStatus.ERROR):
error += 1
elif (i.rst == ResultStatus.NG):
ng += 1
elif (i.rst == ResultStatus.WARNING):
warning += 1
else:
ok += 1
okMsg = " Success:%s " % ok if ok > 0 else ""
warningMsg = " Warning:%s " % warning if warning > 0 else ""
ngMsg = " NG:%s " % ng if ng > 0 else ""
errorMsg = " Error:%s " % error if error > 0 else ""
result = ""
result += "Failed." if (ng + error) > 0 else "Success."
result += "\tAll check items run completed. Total:%s %s %s %s %s" % (
ok + warning + ng + error, okMsg, warningMsg, ngMsg, errorMsg)
return result
def outputRaw(self):
u"""
{date} [NAM] {name}
{date} [STD] {standard}
{date} [RST] {rst}
{val}
{date} [RAW]
{raw}
"""
result = ""
for i in self._items:
for j in i._items:
t = time.localtime(time.time())
dateString = time.strftime("%Y-%m-%d %H:%M:%S", t)
rst = j.rst
if (j.rst == ResultStatus.NA):
rst = "NONE"
elif (
j.rst == ResultStatus.WARNING
or j.rst == ResultStatus.ERROR):
rst = "NG"
result += self.outputRaw.__doc__.format(date=dateString,
name=j.name,
standard=i.standard,
rst=rst,
val=j.val, raw=j.raw)
result += "\r\n"
return result
def outputResult(self):
result = ""
for i in self._items:
result += i.formatOutput()
result += "\r\n"
result += self.outputStatistic()
return result
def outputJson(self):
resultDic = {}
for itemResult in self._items:
resultDic['name'] = itemResult.name
resultDic['category'] = itemResult.category
resultDic['std'] = itemResult.standard.decode('utf-8', 'ignore')
resultDic['rst'] = itemResult.rst
resultDic['analysis'] = itemResult.analysis
resultDic['suggestion'] = itemResult.suggestion
localList = []
for localitem in itemResult:
local = {}
local['host'] = localitem.host
local['rstd'] = localitem.val
local['raw'] = localitem.raw
localList.append(local)
resultDic['hosts'] = localList
return json.dumps(resultDic, indent=2)
| 32.341463 | 78 | 0.497235 |
407ac3983179ffbf0412a941bdb08e5d8b6176a0 | 399 | py | Python | firstNetwork.py | ggavriil/pyltes | 1c913d00deef5c08a8548548395b3ab2b5ceb0a1 | [
"MIT"
] | null | null | null | firstNetwork.py | ggavriil/pyltes | 1c913d00deef5c08a8548548395b3ab2b5ceb0a1 | [
"MIT"
] | null | null | null | firstNetwork.py | ggavriil/pyltes | 1c913d00deef5c08a8548548395b3ab2b5ceb0a1 | [
"MIT"
] | null | null | null | from pyltes.network import CellularNetwork
network = CellularNetwork()
network.Generator.createHexagonalBSdeployment(1666)
network.Generator.insertUErandomly(100)
network.connectUsersToTheBestBS()
network.Printer.drawHistogramOfUEThroughput("thrHistogram")
network.Printer.drawNetwork(fillMethod="SINR", filename="sinrMap")
network.Printer.drawNetwork(fillMethod="Sectors", filename="secorsMap")
| 33.25 | 71 | 0.847118 |
3a50ee6fd9b0afc8503355ac764d6c8bfa16899e | 8,395 | py | Python | whitetube/views.py | AmanGiri007/youtube | b58009581378bf74cabfd791691dee65c9516685 | [
"MIT"
] | null | null | null | whitetube/views.py | AmanGiri007/youtube | b58009581378bf74cabfd791691dee65c9516685 | [
"MIT"
] | null | null | null | whitetube/views.py | AmanGiri007/youtube | b58009581378bf74cabfd791691dee65c9516685 | [
"MIT"
] | null | null | null | from django.shortcuts import render,reverse
from django.http import HttpResponse,HttpResponseRedirect
from django.views.generic import View
from .models import Channel,Video,Comment
from .forms import ChannelForm,CommentForm,LoginForm,RegisterForm,NewVideoForm
from django.contrib.auth import logout,login,authenticate
import os
from wsgiref.util import FileWrapper
from django.core.files.storage import FileSystemStorage
import random,string
from django.contrib.auth.models import User
# Create your views here.
class HomeView(View):
def get(self,request):
most_recent_videos= Video.objects.order_by('-datetime')[:8]
most_recent_channels= Channel.objects.filter()
channel=False
if request.user.is_authenticated:
try:
channel=Channel.objects.filter(user__username=request.user).get()
except Channel.DoesNotExist:
channel=False
content={
'most_recent_videos':most_recent_videos,
'most_recent_channels':most_recent_channels,
'channel':channel,
'menu_active_item':'home'
}
return render(request,'whitetube/home.html',content)
class ChannelView(View):
def get(self,request,user):
if request.user.is_authenticated:
videos=Video.objects.filter(user__username=user).order_by('-datetime')
channel=Channel.objects.filter(user__username=user).get()
content={
'videos':videos,
'channel':channel
}
return render(request,'whitetube/channel.html',content)
return HttpResponseRedirect('/')
class CreateChannelView(View):
def get(self,request):
if request.user.is_authenticated:
try:
if Channel.objects.filter(user__username=request.user).get().channel_name!='':
return HttpResponseRedirect('/')
except Channel.DoesNotExist:
form=ChannelForm()
channel=False
content={
'form':form,
'channel':channel,
}
return render(request,'whitetube/createchannel.html',content)
else:
return HttpResponseRedirect('login/')
def post(self,request):
form=ChannelForm(request.POST)
if form.is_valid():
channel_name=form.cleaned_data['channel_name']
user=request.user
subscribers=0
new_channel=Channel(channel_name=channel_name,user=user,subscribers=subscribers)
new_channel.save()
return HttpResponseRedirect('/')
return HttpResponse('This is Resgister view. POST request.')
class LoginView(View):
def get(self,request):
if request.user.is_authenticated:
return HttpResponseRedirect('/')
form=LoginForm()
content={
'form':form,
}
return render(request,'whitetube/login.html',content)
def post(self,request):
form=LoginForm(request.POST)
if form.is_valid():
username=form.cleaned_data['username']
password=form.cleaned_data['password']
user=authenticate(username=username,password=password)
if user is not None:
login(request,user)
return HttpResponseRedirect('/')
else:
return HttpResponseRedirect('login/')
return HttpResponse('This is Login view.POST request.')
class LogoutView(View):
def get(self,request):
logout(request)
return HttpResponseRedirect('/')
class VideoFileView(View):
def get(self,request,file_name):
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
file=FileWrapper(open(BASE_DIR+'/whitetube/static/whitetube/videos/'+file_name,'rb'))
response=HttpResponse(file,content_type='video.mp4')
response['Content-Disposition']='attachment;filename={}'.format(file_name)
return response
class VideoView(View):
def get(self,request,id):
video_by_id=Video.objects.get(id=id)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
video_by_id.path='http://localhost:8000/get_video/'+video_by_id.path
content={
'video':video_by_id
}
if request.user.is_authenticated:
comment_form=CommentForm()
content['form']=comment_form
comments=Comment.objects.filter(video__id=id).order_by('-datetime')[:5]
content['comments']=comments
try:
if Channel.objects.filter(user__username = request.user).get().channel_name!='':
channel = Channel.objects.filter(user__username = request.user).get()
content['channel'] = channel
except Channel.DoesNotExist:
channel = False
return render(request,'whitetube/video.html', content)
class CommentView(View):
def post(self, request):
# pass filled out HTML-Form from View to CommentForm()
form = CommentForm(request.POST)
if form.is_valid():
# create a Comment DB Entry
text = form.cleaned_data['text']
video_id = request.POST['video']
video = Video.objects.get(id=video_id)
new_comment = Comment(text=text, user=request.user, video=video)
new_comment.save()
return HttpResponseRedirect('/video/{}'.format(str(video_id)))
return HttpResponse('This is Register view. POST Request.')
class RegisterView(View):
def get(self, request):
if request.user.is_authenticated:
print('already logged in. Redirecting.')
print(request.user)
return HttpResponseRedirect('/')
form = RegisterForm()
return render(request,'whitetube/register.html', {'form': form})
def post(self, request):
# pass filled out HTML-Form from View to RegisterForm()
form = RegisterForm(request.POST)
if form.is_valid():
# create a User account
print(form.cleaned_data['username'])
username = form.cleaned_data['username']
password = form.cleaned_data['password']
email = form.cleaned_data['email']
new_user = User(username=username, email=email)
new_user.set_password(password)
new_user.save()
return HttpResponseRedirect('login/')
return HttpResponse('This is Register view. POST Request.')
class NewVideo(View):
template_name = 'whitetube/new_video.html'
def get(self, request):
if request.user.is_authenticated == False:
return HttpResponseRedirect('register')
try:
if Channel.objects.filter(user__username = request.user).get().channel_name != "" :
form = NewVideoForm()
channel=Channel.objects.filter(user__username = request.user).get()
return render(request, self.template_name, {'form':form, 'channel':channel})
except Channel.DoesNotExist:
return HttpResponseRedirect('/')
def post(self, request):
form = NewVideoForm(request.POST, request.FILES)
if form.is_valid():
# create a new Video Entry
title = form.cleaned_data['title']
description = form.cleaned_data['description']
file = form.cleaned_data['file']
random_char = ''.join(random.choices(string.ascii_uppercase + string.digits, k=10))
path = random_char+file.name
fs = FileSystemStorage(location = os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
filename = fs.save("whitetube/static/whitetube/videos/"+path, file)
file_url = fs.url(filename)
print(fs)
print(filename)
print(file_url)
new_video = Video(title=title,
description=description,
user=request.user,
path=path)
new_video.save()
# redirect to detail view template of a Video
return HttpResponseRedirect('/video/{}'.format(new_video.id))
else:
return HttpResponse('Your form is not valid. Go back and try again.') | 42.18593 | 106 | 0.618344 |
5e33a819a29b34100b9949e221376a9c06da7c08 | 547 | py | Python | Algorithms/Medium/718. Maximum Length of Repeated Subarray/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | Algorithms/Medium/718. Maximum Length of Repeated Subarray/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | Algorithms/Medium/718. Maximum Length of Repeated Subarray/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | from typing import List
class Solution:
def findLength(self, A: List[int], B: List[int]) -> int:
memo = [[0] * (len(B) + 1) for _ in range(len(A) + 1)]
res = 0
for i in range(len(A)-1, -1, -1):
for j in range(len(B)-1, -1, -1):
if A[i] == B[j]:
memo[i][j] = memo[i+1][j+1] + 1
res = max(res, memo[i][j])
return res
if __name__ == "__main__":
s = Solution()
result = s.findLength([1, 2, 3, 2, 1], [3, 2, 1, 4, 7])
print(result)
| 27.35 | 62 | 0.447898 |
a1fd375d327d8086d8d4442c56400a114e4450b4 | 4,960 | py | Python | RI/flask_server/tapi_server/models/tapi_photonic_media_media_channel_pool_capability_pac.py | arthurMll/TAPI | e1171bb139c6791a953af09cfc2bc7ad928da73d | [
"Apache-2.0"
] | 57 | 2018-04-09T08:56:18.000Z | 2022-03-23T08:31:06.000Z | RI/flask_server/tapi_server/models/tapi_photonic_media_media_channel_pool_capability_pac.py | arthurMll/TAPI | e1171bb139c6791a953af09cfc2bc7ad928da73d | [
"Apache-2.0"
] | 143 | 2016-06-08T04:09:54.000Z | 2018-02-23T10:45:59.000Z | RI/flask_server/tapi_server/models/tapi_photonic_media_media_channel_pool_capability_pac.py | arthurMll/TAPI | e1171bb139c6791a953af09cfc2bc7ad928da73d | [
"Apache-2.0"
] | 64 | 2018-03-07T07:55:17.000Z | 2022-03-28T07:14:28.000Z | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_photonic_media_spectrum_band import TapiPhotonicMediaSpectrumBand # noqa: F401,E501
from tapi_server import util
class TapiPhotonicMediaMediaChannelPoolCapabilityPac(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, available_spectrum=None, supportable_spectrum=None, occupied_spectrum=None): # noqa: E501
"""TapiPhotonicMediaMediaChannelPoolCapabilityPac - a model defined in OpenAPI
:param available_spectrum: The available_spectrum of this TapiPhotonicMediaMediaChannelPoolCapabilityPac. # noqa: E501
:type available_spectrum: List[TapiPhotonicMediaSpectrumBand]
:param supportable_spectrum: The supportable_spectrum of this TapiPhotonicMediaMediaChannelPoolCapabilityPac. # noqa: E501
:type supportable_spectrum: List[TapiPhotonicMediaSpectrumBand]
:param occupied_spectrum: The occupied_spectrum of this TapiPhotonicMediaMediaChannelPoolCapabilityPac. # noqa: E501
:type occupied_spectrum: List[TapiPhotonicMediaSpectrumBand]
"""
self.openapi_types = {
'available_spectrum': List[TapiPhotonicMediaSpectrumBand],
'supportable_spectrum': List[TapiPhotonicMediaSpectrumBand],
'occupied_spectrum': List[TapiPhotonicMediaSpectrumBand]
}
self.attribute_map = {
'available_spectrum': 'available-spectrum',
'supportable_spectrum': 'supportable-spectrum',
'occupied_spectrum': 'occupied-spectrum'
}
self._available_spectrum = available_spectrum
self._supportable_spectrum = supportable_spectrum
self._occupied_spectrum = occupied_spectrum
@classmethod
def from_dict(cls, dikt) -> 'TapiPhotonicMediaMediaChannelPoolCapabilityPac':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.photonic.media.MediaChannelPoolCapabilityPac of this TapiPhotonicMediaMediaChannelPoolCapabilityPac. # noqa: E501
:rtype: TapiPhotonicMediaMediaChannelPoolCapabilityPac
"""
return util.deserialize_model(dikt, cls)
@property
def available_spectrum(self):
"""Gets the available_spectrum of this TapiPhotonicMediaMediaChannelPoolCapabilityPac.
none # noqa: E501
:return: The available_spectrum of this TapiPhotonicMediaMediaChannelPoolCapabilityPac.
:rtype: List[TapiPhotonicMediaSpectrumBand]
"""
return self._available_spectrum
@available_spectrum.setter
def available_spectrum(self, available_spectrum):
"""Sets the available_spectrum of this TapiPhotonicMediaMediaChannelPoolCapabilityPac.
none # noqa: E501
:param available_spectrum: The available_spectrum of this TapiPhotonicMediaMediaChannelPoolCapabilityPac.
:type available_spectrum: List[TapiPhotonicMediaSpectrumBand]
"""
self._available_spectrum = available_spectrum
@property
def supportable_spectrum(self):
"""Gets the supportable_spectrum of this TapiPhotonicMediaMediaChannelPoolCapabilityPac.
none # noqa: E501
:return: The supportable_spectrum of this TapiPhotonicMediaMediaChannelPoolCapabilityPac.
:rtype: List[TapiPhotonicMediaSpectrumBand]
"""
return self._supportable_spectrum
@supportable_spectrum.setter
def supportable_spectrum(self, supportable_spectrum):
"""Sets the supportable_spectrum of this TapiPhotonicMediaMediaChannelPoolCapabilityPac.
none # noqa: E501
:param supportable_spectrum: The supportable_spectrum of this TapiPhotonicMediaMediaChannelPoolCapabilityPac.
:type supportable_spectrum: List[TapiPhotonicMediaSpectrumBand]
"""
self._supportable_spectrum = supportable_spectrum
@property
def occupied_spectrum(self):
"""Gets the occupied_spectrum of this TapiPhotonicMediaMediaChannelPoolCapabilityPac.
none # noqa: E501
:return: The occupied_spectrum of this TapiPhotonicMediaMediaChannelPoolCapabilityPac.
:rtype: List[TapiPhotonicMediaSpectrumBand]
"""
return self._occupied_spectrum
@occupied_spectrum.setter
def occupied_spectrum(self, occupied_spectrum):
"""Sets the occupied_spectrum of this TapiPhotonicMediaMediaChannelPoolCapabilityPac.
none # noqa: E501
:param occupied_spectrum: The occupied_spectrum of this TapiPhotonicMediaMediaChannelPoolCapabilityPac.
:type occupied_spectrum: List[TapiPhotonicMediaSpectrumBand]
"""
self._occupied_spectrum = occupied_spectrum
| 40 | 140 | 0.740927 |
aa1a2dbd58b35f22a3e7ceb75c536ed9c763d43b | 9,286 | py | Python | qiskit_metal/_gui/widgets/edit_component/table_model_options.py | wdczdj/qiskit-metal | c77805f66da60021ef8d10d668715c1dc2ebcd1d | [
"Apache-2.0"
] | null | null | null | qiskit_metal/_gui/widgets/edit_component/table_model_options.py | wdczdj/qiskit-metal | c77805f66da60021ef8d10d668715c1dc2ebcd1d | [
"Apache-2.0"
] | null | null | null | qiskit_metal/_gui/widgets/edit_component/table_model_options.py | wdczdj/qiskit-metal | c77805f66da60021ef8d10d668715c1dc2ebcd1d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Handles editing a QComponent."""
import ast
import inspect
from inspect import getfile, signature
from pathlib import Path
from typing import TYPE_CHECKING, Union
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtCore import QAbstractTableModel, QModelIndex, Qt
from PySide2.QtGui import QFont
__all__ = ['parse_param_from_str']
if TYPE_CHECKING:
from .component_widget import ComponentWidget
from ....designs import QDesign
class QTableModel_Options(QAbstractTableModel):
"""Table model for the options of a given component.
This class inherits from the `QAbstractTableModel` class
MVC class
See https://doc.qt.io/qt-5/qabstracttablemodel.html
"""
# __timer_interval = 500 # ms
def __init__(self,
gui: 'MetalGUI',
parent: 'ComponentWidget' = None,
view=None):
"""
Args:
gui (MetalGUI): The GUI
parent (ComponentWidget): The parent ComponentWidget. Defaults to None.
view (object): The view. Defaults to None.
"""
super().__init__(parent=parent)
self.logger = gui.logger
self.gui = gui
self._row_count = -1
self._view = view
# self._create_timer()
self.columns = ['Name', 'Value', 'Parsed value']
@property
def design(self) -> 'QDesign':
"""Returns the QDesign."""
return self.gui.design
@property
def component(self):
"""Returns the component."""
return self.parent().component
def refresh(self):
"""Force refresh.
Completly rebuild the model.
"""
self.modelReset.emit()
def rowCount(self, parent: QModelIndex = None):
"""Returns the number of rows.
Args:
parent (QModelIndex): Unused. Defaults to None.
Returns:
int: The number of rows
"""
if self.component is None:
if self._view:
self._view.show_placeholder_text()
return 0
if self._view:
self._view.hide_placeholder_text()
return len(self.component.options)
def columnCount(self, parent: QModelIndex = None):
"""Returns the number of columns.
Args:
parent (QModelIndex): Unused. Defaults to None.
Returns:
int: The number of columns
"""
return 3
def headerData(self, section, orientation, role=Qt.DisplayRole):
"""Set the headers to be displayed.
Args:
section (int): Section number
orientation (Qt orientation): Section orientation
role (Qt display role): Display role. Defaults to DisplayRole.
Returns:
str: The header data, or None if not found
"""
if self.component is None:
return None
if role == Qt.DisplayRole:
if orientation == Qt.Horizontal:
if section < self.columnCount():
return self.columns[section]
elif role == Qt.FontRole:
if section == 0:
font = QFont()
font.setBold(True)
return font
def flags(self, index: QModelIndex):
"""Set the item flags at the given index. Seems like we're implementing
this function just to see how it's done, as we manually adjust each
treeView to have NoEditTriggers.
Args:
index (QModelIndex): The index
Returns:
Qt flags: Flags from Qt
"""
# https://doc.qt.io/qt-5/qt.html#ItemFlag-enum
if not index.isValid():
return Qt.ItemIsEnabled
# Returns the item flags for the given index.
# The base class implementation returns a combination of flags that enables
# the item (ItemIsEnabled) and allows it to be selected (ItemIsSelectable).
flags = QAbstractTableModel.flags(self, index)
if index.column() == 1:
flags |= Qt.ItemIsEditable
return Qt.ItemFlags(flags)
# https://doc.qt.io/qt-5/qt.html#ItemDataRole-enum
def data(self, index: QModelIndex, role=Qt.DisplayRole):
"""Depending on the index and role given, return data. If not returning
data, return None (PySide equivalent of QT's "invalid QVariant").
Returns:
str: Data matching theindex and role.
"""
if not index.isValid():
return
# if not 0 <= index.row() < self.rowCount():
# return None
if self.component is None:
return
# The key data to be rendered in the form of text. (QString)
if role == Qt.DisplayRole:
row = index.row()
column = index.column()
data = self.component.options
# There's probably a better way to access the data here
if column == 0:
data = list(data.keys())
elif column in [1, 2]:
data = list(data.values())
data = data[row]
if column == 2:
if isinstance(data, dict):
return 'This is a dictionary.'
else:
return str(self.design.parse_value(data))
else:
return str(data)
# The data in a form suitable for editing in an editor. (QString)
elif role == Qt.EditRole:
return self.data(index, QtCore.Qt.DisplayRole)
# The font used for items rendered with the default delegate. (QFont)
elif role == Qt.FontRole:
if index.column() == 0:
font = QtGui.QFont()
font.setBold(True)
return font
def setData(self,
index: QtCore.QModelIndex,
value,
role=QtCore.Qt.EditRole) -> bool:
"""Sets the role data for the item at index to value. The dataChanged()
signal should be emitted if the data was successfully set.
Arguments:
index (QtCore.QModelIndex): The index
value (str): The value
role (QtCore.Qt.EditRole): The edit role
Returns:
bool: Returns true if successful; otherwise returns false.
"""
# TODO: handle nested dictionaries
# See v0.1: get_nested_dict_item, pop_nested_dict_item
# TODO: ability to add dictionary such as to add pins
if not index.isValid():
return False
elif role == QtCore.Qt.EditRole:
if index.column() == 1:
self._value = value # QString
value = str(value)
data = self.component.options # type: dict
key, old_val = list(data.items())[index.row()]
# When we do nothing
if isinstance(old_val, dict):
self.logger.error(
'You selected a dicitonary this'
'cannot be edited directly edit its items.')
return False
if old_val == value:
return False
# When we do something to change the value
# try:
# TODO: should retry and if error then reset the value
if 1:
self.logger.info(
f'Component options: Old value={old_val}; New value={value};'
)
if isinstance(old_val, str):
data[key] = str(value)
else:
processed_value, used_ast = parse_param_from_str(value)
self.logger.info(
f' Used paring: Old value type={type(old_val)}; '
f'New value type={type(processed_value)}; New value={processed_value};'
f'; Used ast={used_ast}')
data[key] = processed_value
self.component.rebuild()
self.gui.refresh()
# except and finally restore the value
return True
# elif role == Qt.CheckStateRole:
return False
def parse_param_from_str(text):
"""Attempt to parse a value from a string using ast.
Args:
text (str): String to parse
Return:
tuple: value, used_ast
Raises:
Exception: An error occurred
"""
text = str(text).strip()
value = text
used_ast = False
try: # crude way to handle list and values
value = ast.literal_eval(text)
used_ast = True
except Exception as exception:
pass
# print(exception)
return value, used_ast
| 31.265993 | 100 | 0.56009 |
62a45ec747b3bc31bc5fe433962507ae0585a101 | 16,490 | py | Python | tools/TF2CaffeModel/itl/layers/inputs.py | chenaili6/FeatherCNN | 52cd8c8749ed584461a88b1f04749bb35a48f9a6 | [
"Apache-2.0"
] | 4 | 2018-05-14T09:00:33.000Z | 2021-05-14T08:11:54.000Z | tools/TF2CaffeModel/itl/layers/inputs.py | nihui/FeatherCNN | 2805f371bd8f33ef742cc9523979f29295d926fb | [
"Apache-2.0"
] | null | null | null | tools/TF2CaffeModel/itl/layers/inputs.py | nihui/FeatherCNN | 2805f371bd8f33ef742cc9523979f29295d926fb | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python
# -*- coding: utf-8 -*-
import tensorflow as tf
from itl.layers.core import Layer
from itl.layers.core import LayersConfig
from itl import logging
__all__ = [
'InputLayer',
'OneHotInputLayer',
'Word2vecEmbeddingInputlayer',
'EmbeddingInputlayer',
'AverageEmbeddingInputlayer',
]
class InputLayer(Layer):
"""
The :class:`InputLayer` class is the starting layer of a neural network.
Parameters
----------
inputs : placeholder or tensor
The input of a network.
name : str
A unique layer name.
"""
def __init__(self, inputs, name='input'):
super(InputLayer, self).__init__(prev_layer=inputs, name=name)
logging.info("InputLayer %s: %s" % (self.name, inputs.get_shape()))
self.outputs = inputs
self._add_layers(self.outputs)
self.need_update_caffe_model = True
def to_caffe_prototxt(self, recursive=True):
# change the name of NetParameter to self.name
self.get_caffe_model().name = self.name
# use DEPRECATED way to set the shape of the input blobs.
if self.outputs is None:
print('error: no outputs for this InputLayer')
exit(-1)
if isinstance(self.outputs, list):
for ipt in self.outputs:
# append NetParameter.input
self.get_caffe_model().input.append(ipt.name)
# append NetParameter.input_dim
shape = [(int(dim) if dim is not None else 1) for dim in ipt.shape.as_list()]
if len(shape) == 4:
shape = [shape[0], shape[3], shape[1], shape[2]]
self.get_caffe_model().input_dim.extend(shape)
else:
# append NetParameter.input
self.get_caffe_model().input.append(self.outputs.name)
# append NetParameter
shape = [(int(dim) if dim is not None else 1) for dim in self.inputs.shape.as_list()]
if len(shape) == 4:
shape = [shape[0], shape[3], shape[1], shape[2]]
self.get_caffe_model().input_dim.extend(shape)
class OneHotInputLayer(Layer):
"""
The :class:`OneHotInputLayer` class is the starting layer of a neural network, see ``tf.one_hot``.
Parameters
----------
inputs : placeholder or tensor
The input of a network.
depth : None or int
If the input indices is rank N, the output will have rank N+1. The new axis is created at dimension `axis` (default: the new axis is appended at the end).
on_value : None or number
The value to represnt `ON`. If None, it will default to the value 1.
off_value : None or number
The value to represnt `OFF`. If None, it will default to the value 0.
axis : None or int
The axis.
dtype : None or TensorFlow dtype
The data type, None means tf.float32.
name : str
A unique layer name.
Examples
---------
>>> import tensorflow as tf
>>> import itl as tl
>>> x = tf.placeholder(tf.int32, shape=[None])
>>> net = tl.layers.OneHotInputLayer(x, depth=8, name='one_hot_encoding')
(?, 8)
"""
def __init__(self, inputs=None, depth=None, on_value=None, off_value=None, axis=None, dtype=None, name='input'):
super(OneHotInputLayer, self).__init__(prev_layer=inputs, name=name)
logging.info("OneHotInputLayer %s: %s" % (self.name, inputs.get_shape()))
if depth is None:
raise RuntimeError(self.__class__.__name__ + ": depth == None the number of output units is undefined")
self.outputs = tf.one_hot(inputs, depth, on_value=on_value, off_value=off_value, axis=axis, dtype=dtype)
self._add_layers(self.outputs)
class Word2vecEmbeddingInputlayer(Layer):
"""
The :class:`Word2vecEmbeddingInputlayer` class is a fully connected layer.
For Word Embedding, words are input as integer index.
The output is the embedded word vector.
Parameters
----------
inputs : placeholder or tensor
The input of a network. For word inputs, please use integer index format, 2D tensor : [batch_size, num_steps(num_words)]
train_labels : placeholder
For word labels. integer index format
vocabulary_size : int
The size of vocabulary, number of words
embedding_size : int
The number of embedding dimensions
num_sampled : int
The mumber of negative examples for NCE loss
nce_loss_args : dictionary
The arguments for tf.nn.nce_loss()
E_init : initializer
The initializer for initializing the embedding matrix
E_init_args : dictionary
The arguments for embedding initializer
nce_W_init : initializer
The initializer for initializing the nce decoder weight matrix
nce_W_init_args : dictionary
The arguments for initializing the nce decoder weight matrix
nce_b_init : initializer
The initializer for initializing of the nce decoder bias vector
nce_b_init_args : dictionary
The arguments for initializing the nce decoder bias vector
name : str
A unique layer name
Attributes
----------
nce_cost : Tensor
The NCE loss.
outputs : Tensor
The embedding layer outputs.
normalized_embeddings : Tensor
Normalized embedding matrix.
Examples
--------
With itl : see ``itl/example/tutorial_word2vec_basic.py``
>>> import tensorflow as tf
>>> import itl as tl
>>> batch_size = 8
>>> train_inputs = tf.placeholder(tf.int32, shape=(batch_size))
>>> train_labels = tf.placeholder(tf.int32, shape=(batch_size, 1))
>>> net = tl.layers.Word2vecEmbeddingInputlayer(inputs=train_inputs,
... train_labels=train_labels, vocabulary_size=1000, embedding_size=200,
... num_sampled=64, name='word2vec')
(8, 200)
>>> cost = net.nce_cost
>>> train_params = net.all_params
>>> cost = net.nce_cost
>>> train_params = net.all_params
>>> train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost, var_list=train_params)
>>> normalized_embeddings = net.normalized_embeddings
Without itl : see ``tensorflow/examples/tutorials/word2vec/word2vec_basic.py``
>>> train_inputs = tf.placeholder(tf.int32, shape=(batch_size))
>>> train_labels = tf.placeholder(tf.int32, shape=(batch_size, 1))
>>> embeddings = tf.Variable(
... tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
>>> embed = tf.nn.embedding_lookup(embeddings, train_inputs)
>>> nce_weights = tf.Variable(
... tf.truncated_normal([vocabulary_size, embedding_size],
... stddev=1.0 / math.sqrt(embedding_size)))
>>> nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
>>> cost = tf.reduce_mean(
... tf.nn.nce_loss(weights=nce_weights, biases=nce_biases,
... inputs=embed, labels=train_labels,
... num_sampled=num_sampled, num_classes=vocabulary_size,
... num_true=1))
References
----------
`tensorflow/examples/tutorials/word2vec/word2vec_basic.py <https://github.com/tensorflow/tensorflow/blob/r0.7/tensorflow/examples/tutorials/word2vec/word2vec_basic.py>`__
"""
def __init__(
self,
inputs,
train_labels=None,
vocabulary_size=80000,
embedding_size=200,
num_sampled=64,
nce_loss_args=None,
E_init=tf.random_uniform_initializer(minval=-1.0, maxval=1.0),
E_init_args=None,
nce_W_init=tf.truncated_normal_initializer(stddev=0.03),
nce_W_init_args=None,
nce_b_init=tf.constant_initializer(value=0.0),
nce_b_init_args=None,
name='word2vec',
):
super(Word2vecEmbeddingInputlayer, self).__init__(
prev_layer=inputs, nce_loss_args=nce_loss_args, E_init_args=E_init_args, nce_W_init_args=nce_W_init_args,
nce_b_init_args=nce_b_init_args, name=name
)
logging.info("Word2vecEmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size))
# Look up embeddings for inputs.
# Note: a row of 'embeddings' is the vector representation of a word.
# for the sake of speed, it is better to slice the embedding matrix
# instead of transfering a word id to one-hot-format vector and then
# multiply by the embedding matrix.
# embed is the outputs of the hidden layer (embedding layer), it is a
# row vector with 'embedding_size' values.
with tf.variable_scope(name):
embeddings = tf.get_variable(
name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init,
dtype=LayersConfig.tf_dtype, **self.E_init_args
)
embed = tf.nn.embedding_lookup(embeddings, self.inputs)
# Construct the variables for the NCE loss (i.e. negative sampling)
nce_weights = tf.get_variable(
name='nce_weights', shape=(vocabulary_size, embedding_size), initializer=nce_W_init,
dtype=LayersConfig.tf_dtype, **self.nce_W_init_args
)
nce_biases = tf.get_variable(
name='nce_biases', shape=(vocabulary_size), initializer=nce_b_init, dtype=LayersConfig.tf_dtype,
**self.nce_b_init_args
)
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels
# each time we evaluate the loss.
self.nce_cost = tf.reduce_mean(
tf.nn.nce_loss(
weights=nce_weights, biases=nce_biases, inputs=embed, labels=train_labels, num_sampled=num_sampled,
num_classes=vocabulary_size, **self.nce_loss_args
)
)
self.outputs = embed
self.normalized_embeddings = tf.nn.l2_normalize(embeddings, 1)
self._add_layers(self.outputs)
self._add_params([embeddings, nce_weights, nce_biases])
class EmbeddingInputlayer(Layer):
"""
The :class:`EmbeddingInputlayer` class is a look-up table for word embedding.
Word content are accessed using integer indexes, then the output is the embedded word vector.
To train a word embedding matrix, you can used :class:`Word2vecEmbeddingInputlayer`.
If you have a pre-trained matrix, you can assign the parameters into it.
Parameters
----------
inputs : placeholder
The input of a network. For word inputs.
Please use integer index format, 2D tensor : (batch_size, num_steps(num_words)).
vocabulary_size : int
The size of vocabulary, number of words.
embedding_size : int
The number of embedding dimensions.
E_init : initializer
The initializer for the embedding matrix.
E_init_args : dictionary
The arguments for embedding matrix initializer.
name : str
A unique layer name.
Attributes
----------
outputs : tensor
The embedding layer output is a 3D tensor in the shape: (batch_size, num_steps(num_words), embedding_size).
Examples
--------
>>> import tensorflow as tf
>>> import itl as tl
>>> batch_size = 8
>>> x = tf.placeholder(tf.int32, shape=(batch_size, ))
>>> net = tl.layers.EmbeddingInputlayer(inputs=x, vocabulary_size=1000, embedding_size=50, name='embed')
(8, 50)
"""
def __init__(
self,
inputs,
vocabulary_size=80000,
embedding_size=200,
E_init=tf.random_uniform_initializer(-0.1, 0.1),
E_init_args=None,
name='embedding',
):
super(EmbeddingInputlayer, self).__init__(prev_layer=inputs, E_init_args=E_init_args, name=name)
logging.info("EmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size))
with tf.variable_scope(name):
embeddings = tf.get_variable(
name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init,
dtype=LayersConfig.tf_dtype, **self.E_init_args
)
self.outputs = tf.nn.embedding_lookup(embeddings, self.inputs)
self._add_layers(self.outputs)
self._add_params(embeddings)
class AverageEmbeddingInputlayer(Layer):
"""The :class:`AverageEmbeddingInputlayer` averages over embeddings of inputs.
This is often used as the input layer for models like DAN[1] and FastText[2].
Parameters
----------
inputs : placeholder or tensor
The network input.
For word inputs, please use integer index format, 2D tensor: (batch_size, num_steps(num_words)).
vocabulary_size : int
The size of vocabulary.
embedding_size : int
The dimension of the embedding vectors.
pad_value : int
The scalar padding value used in inputs, 0 as default.
embeddings_initializer : initializer
The initializer of the embedding matrix.
embeddings_kwargs : None or dictionary
The arguments to get embedding matrix variable.
name : str
A unique layer name.
References
----------
- [1] Iyyer, M., Manjunatha, V., Boyd-Graber, J., & Daum’e III, H. (2015). Deep Unordered Composition Rivals Syntactic Methods for Text Classification. In Association for Computational Linguistics.
- [2] Joulin, A., Grave, E., Bojanowski, P., & Mikolov, T. (2016). `Bag of Tricks for Efficient Text Classification. <http://arxiv.org/abs/1607.01759>`__
Examples
---------
>>> import tensorflow as tf
>>> import itl as tl
>>> batch_size = 8
>>> length = 5
>>> x = tf.placeholder(tf.int32, shape=(batch_size, length))
>>> net = tl.layers.AverageEmbeddingInputlayer(x, vocabulary_size=1000, embedding_size=50, name='avg')
(8, 50)
"""
def __init__(
self,
inputs,
vocabulary_size,
embedding_size,
pad_value=0,
embeddings_initializer=tf.random_uniform_initializer(-0.1, 0.1),
embeddings_kwargs=None,
name='average_embedding',
):
super(AverageEmbeddingInputlayer,
self).__init__(prev_layer=inputs, embeddings_kwargs=embeddings_kwargs, name=name)
logging.info("AverageEmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size))
# if embeddings_kwargs is None:
# embeddings_kwargs = {}
if inputs.get_shape().ndims != 2:
raise ValueError('inputs must be of size batch_size * batch_sentence_length')
with tf.variable_scope(name):
self.embeddings = tf.get_variable(
name='embeddings', shape=(vocabulary_size, embedding_size), initializer=embeddings_initializer,
dtype=LayersConfig.tf_dtype, **self.embeddings_kwargs
)
word_embeddings = tf.nn.embedding_lookup(
self.embeddings,
self.inputs,
name='word_embeddings',
)
# Zero out embeddings of pad value
masks = tf.not_equal(self.inputs, pad_value, name='masks')
word_embeddings *= tf.cast(
tf.expand_dims(masks, axis=-1),
dtype=LayersConfig.tf_dtype,
)
sum_word_embeddings = tf.reduce_sum(word_embeddings, axis=1)
# Count number of non-padding words in each sentence
sentence_lengths = tf.count_nonzero(
masks,
axis=1,
keepdims=True,
dtype=LayersConfig.tf_dtype,
name='sentence_lengths',
)
sentence_embeddings = tf.divide(
sum_word_embeddings,
sentence_lengths + 1e-8, # Add epsilon to avoid dividing by 0
name='sentence_embeddings'
)
self.outputs = sentence_embeddings
self._add_layers(self.outputs)
self._add_params(self.embeddings)
if __name__ == "__main__":
x = tf.placeholder(tf.float32, [None, 384], name='x')
network = InputLayer(x, name='input_layer')
network = Layer(network, name='some op')
network.dump_all_layers_to_caffe_prototxt()
print(str(network.get_caffe_model()))
| 37.307692 | 201 | 0.633535 |
d9d672869d661513e0679f3497246b67eb71d2eb | 7,208 | py | Python | extract_gear/index.py | kamerons/dde-extract-gear | 44464ae470bd5de6279d32e3587b469ce006ea42 | [
"Apache-2.0"
] | null | null | null | extract_gear/index.py | kamerons/dde-extract-gear | 44464ae470bd5de6279d32e3587b469ce006ea42 | [
"Apache-2.0"
] | null | null | null | extract_gear/index.py | kamerons/dde-extract-gear | 44464ae470bd5de6279d32e3587b469ce006ea42 | [
"Apache-2.0"
] | null | null | null | import os
from datetime import datetime
from extract_gear.cli import Cli
from folder.folder import Folder
# Creates an index file of the form:
# [
# {
# 'file_name': 'name.png',
# 'type': 'electric',
# 'num': 80
# },
# {}...
# ]
class Index:
NONE = "none"
STAT_OPTIONS = ['base', 'fire', 'electric', 'poison', 'hero_hp', 'hero_dmg', 'hero_rate', 'hero_speed',
'offense', 'defense', 'tower_hp', 'tower_dmg', 'tower_rate', 'tower_range', NONE]
STAT_TYPE_KEY = "type"
FILE_NAME_KEY = "file_name"
STAT_VALUE_KEY = "num"
def __init__(self, args, api_builtin, api_curses, api_cv2, api_json, api_time):
self.api_builtin = api_builtin
self.api_cv2 = api_cv2
self.api_curses = api_curses
self.api_json = api_json
self.api_time = api_time
self.file = args.file
self.img = []
self.data_index = []
self.stage = 0
self.cli = None
self.idx = 0
def run(self):
self.api_builtin.begin_message("manual index creation")
self.api_curses.wrapper(self.create_index_manual)
def create_index_manual(self, stdscr):
self.api_curses.start_color()
self.api_curses.use_default_colors()
for i in range(0, self.api_curses.get_COLORS()):
self.api_curses.init_pair(i+1, i, -1)
self.cli = Cli(stdscr, Index.STAT_OPTIONS + ['reshow', 'correct', 'break'], api_curses=self.api_curses)
if self.file:
self.set_state_for_resume()
if self.stage == 0:
self.cli.print("Beginning index creation.\n", Cli.GREEN)
self.collect_loop(stdscr)
self.write_file("collection-complete")
self.stage = 1
self.cli.print("Congratulations, index creation is complete!\n", Cli.GREEN)
self.cli.print("Beginning index correction. You will now have a chance to correct the data\n", Cli.GREEN)
self.api_time.sleep(3)
self.correct_loop(stdscr)
self.write_file("correction-complete")
self.cli.print("Congratulations, index correction is complete, this prompt will now exit\n", Cli.GREEN)
def collect_loop(self, stdscr):
files = sorted(os.listdir(Folder.STAT_CROP_FOLDER))
while self.idx < len(files):
file_name = files[self.idx]
self.img = self.api_cv2.imread(Folder.STAT_CROP_FOLDER + file_name)
data = self.collect_data_item()
if data == 'correct':
self.idx = max(self.idx - 1, 0)
self.data_index = self.data_index[:len(self.data_index)-1]
continue
data[Index.FILE_NAME_KEY] = file_name
self.data_index.append(data)
self.idx += 1
if self.idx % 100 == 0:
self.cli.print("\nComplete %d of %d. Auto-saving work.\n" % (self.idx, len(files)), Cli.BLUE)
self.write_file("autosave-")
elif self.idx % 10 == 0:
self.cli.print("\nComplete %d of %d\n" % (self.idx, len(files)), Cli.BLUE)
self.idx = 0
def collect_data_item(self):
data = {}
self.api_cv2.show_img(self.img)
stat_type = self.get_stat_type()
if stat_type == 'correct':
return stat_type
if stat_type != Index.NONE:
stat_value = self.get_stat_value()
if stat_value == 'correct':
return stat_value
else:
data[Index.STAT_VALUE_KEY] = stat_value
data[Index.STAT_TYPE_KEY] = stat_type
return data
def get_stat_type(self):
stat_type = ""
while not stat_type in Index.STAT_OPTIONS:
stat_type = self.cli.input("Enter the type of the stat: Valid options %s\n>" % str(Index.STAT_OPTIONS))
if self.process_special_cmd(stat_type):
continue
#most common typo is extra charater at the end
elif stat_type[0:len(stat_type) - 1] in Index.STAT_OPTIONS:
stat_type = stat_type[0:len(stat_type - 1)]
elif stat_type == 'correct':
return 'correct'
elif not stat_type in Index.STAT_OPTIONS:
self.cli.print("Invalid stat type entered!\n", Cli.RED)
return stat_type
def get_stat_value(self):
integer_value = ""
while not integer_value.isnumeric():
integer_value = self.cli.input("Enter the number associated with the stat: \n>")
if self.process_special_cmd(integer_value):
continue
elif integer_value == 'correct':
return 'correct'
if not integer_value.isnumeric():
self.cli.print("Please enter a valid integer\n", Cli.RED)
return int(integer_value)
def correct_loop(self, stdscr):
while self.idx < len(self.data_index):
correct = " "
data = self.data_index[self.idx]
self.img = self.api_cv2.imread(Folder.STAT_CROP_FOLDER + data[Index.FILE_NAME_KEY])
self.print_stat_data(data)
self.api_cv2.show_img(self.img)
while correct != "":
correct = self.cli.input("Correct the data?\n>")
if self.process_special_cmd(correct):
self.print_stat_data(data)
continue
elif correct == "correct":
self.idx = max(self.idx - 1, 0)
break
elif correct != "":
tmp = self.collect_data_item()
if tmp == 'correct':
self.idx = max(self.idx - 1, 0)
break
else:
new_data = tmp
new_data[Index.FILE_NAME_KEY] = data[Index.FILE_NAME_KEY]
data = new_data
break
if correct != 'correct' or tmp != 'correct':
self.idx += 1
if self.idx % 50 == 0:
self.cli.print("\nComplete %d of %d\n" % (self.idx, len(self.data_index)), Cli.BLUE)
def process_special_cmd(self, input):
if input == 'reshow':
self.api_cv2.show_img(self.img)
return True
elif input == 'break':
self.write_file("manual-")
self.save_progress({'stage': self.stage, 'idx': len(self.data_index)})
return True
else:
return False
def set_state_for_resume(self):
with self.api_builtin.open(self.file, "r") as fp:
self.data_index = self.api_json.load(fp)
total = len(os.listdir(Folder.STAT_CROP_FOLDER))
if len(self.data_index) == total:
self.idx = 0
self.stage = 1
else:
self.idx = len(self.data_index)
self.cli.print("Resuming collection from %d of %d\n" % (self.idx, total))
def print_stat_data(self, data):
stat_type = data[Index.STAT_TYPE_KEY]
print_str = str(data) + "\n"
if stat_type in ['electric', 'hero_speed', 'tower_range']:
self.cli.print(print_str, Cli.BLUE)
elif stat_type in ['fire', 'hero_hp', 'tower_hp']:
self.cli.print(print_str, Cli.RED)
elif stat_type in ['poison', 'hero_rate', 'tower_rate', 'defense']:
self.cli.print(print_str, Cli.GREEN)
elif stat_type in ['base', 'hero_dmg', 'tower_dmg', 'offense']:
self.cli.print(print_str, Cli.BROWN)
else:
self.cli.print(print_str)
def write_file(self, prefix=""):
with self.api_builtin.open(Folder.STAT_SAVE_FOLDER + prefix + Index.get_time() + '-index.json', 'w') as fp:
self.api_json.dump(self.data_index, fp)
def save_progress(self, data):
self.cli.print("saving progress and exiting")
with self.api_builtin.open(Folder.PROGRESS_FILE, 'w') as fp:
self.api_json.dump(data, fp)
exit()
def get_time():
return datetime.now().strftime("%d-%m-%Y_%H-%M-%S")
| 32.035556 | 111 | 0.638596 |
46653aa97034f223c9c990b0df13c5b9b0f6203d | 897 | pyw | Python | pybreak.pyw | loks0n/pybreak | 73f79c84ada224a8760449779c4e107eebee12b3 | [
"MIT"
] | 2 | 2020-04-26T09:16:40.000Z | 2021-04-19T16:17:02.000Z | pybreak.pyw | loks0n/pybreak | 73f79c84ada224a8760449779c4e107eebee12b3 | [
"MIT"
] | null | null | null | pybreak.pyw | loks0n/pybreak | 73f79c84ada224a8760449779c4e107eebee12b3 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import time
import threading
import ctypes
WORK_DURATION = 600
LONG_BREAK_EVERY = 4
TITLE = 'pybreak'
MAIN_MENU_TEXT = 'Press OK to end session.'
SHORT_BREAK_TEXT = 'Go take a short break! Press OK when finished.'
LONG_BREAK_TEXT = 'Go take a long break! Press OK when finished.'
MAIN_MENU_ICON = 0x00000
BREAK_ICON = 0x40000
MessageBox = ctypes.windll.user32.MessageBoxW
def main():
thread = threading.Thread(target=loop, daemon=True)
thread.start()
menu_popup()
def loop():
while True:
time.sleep(WORK_DURATION)
if i % LONG_BREAK_EVERY == 0:
break_popup(LONG_BREAK_TEXT)
else:
break_popup(SHORT_BREAK_TEXT)
def menu_popup():
MessageBox(None, MAIN_MENU_TEXT, TITLE, MAIN_MENU_ICON)
def break_popup(text):
MessageBox(None, text, TITLE, BREAK_ICON)
if __name__=='__main__':
main()
| 21.878049 | 67 | 0.695652 |
330bf6fd5670c105af4211a68d4a73493a715229 | 610 | py | Python | src/saicinpainting/training/modules/squeeze_excitation.py | achen353/lama | 57438f957d5178d1180040fa349423e84af58e11 | [
"Apache-2.0"
] | null | null | null | src/saicinpainting/training/modules/squeeze_excitation.py | achen353/lama | 57438f957d5178d1180040fa349423e84af58e11 | [
"Apache-2.0"
] | null | null | null | src/saicinpainting/training/modules/squeeze_excitation.py | achen353/lama | 57438f957d5178d1180040fa349423e84af58e11 | [
"Apache-2.0"
] | null | null | null | import torch.nn as nn
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid(),
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
res = x * y.expand_as(x)
return res
| 29.047619 | 65 | 0.547541 |
44e7aaacfe90515ef18a92a0ededeaaa1c88cc55 | 56 | py | Python | chest/run_test.py | nikicc/anaconda-recipes | 9c611a5854bf41bbc5e7ed9853dc71c0851a62ef | [
"BSD-3-Clause"
] | 130 | 2015-07-28T03:41:21.000Z | 2022-03-16T03:07:41.000Z | chest/run_test.py | nikicc/anaconda-recipes | 9c611a5854bf41bbc5e7ed9853dc71c0851a62ef | [
"BSD-3-Clause"
] | 119 | 2015-08-01T00:54:06.000Z | 2021-01-05T13:00:46.000Z | chest/run_test.py | nikicc/anaconda-recipes | 9c611a5854bf41bbc5e7ed9853dc71c0851a62ef | [
"BSD-3-Clause"
] | 72 | 2015-07-29T02:35:56.000Z | 2022-02-26T14:31:15.000Z | from chest import Chest
c = Chest()
c['x'] = [1, 2, 3]
| 11.2 | 23 | 0.553571 |
dd19b766822aa3235247aae663df78c756adace9 | 247 | py | Python | examples/template_video.py | VoIlAlex/cv2studio | 26ea110c141813b05fe1269a3e26493febe63e4d | [
"MIT"
] | 2 | 2019-10-31T06:17:48.000Z | 2020-12-13T20:24:44.000Z | examples/template_video.py | VoIlAlex/cv2studio | 26ea110c141813b05fe1269a3e26493febe63e4d | [
"MIT"
] | null | null | null | examples/template_video.py | VoIlAlex/cv2studio | 26ea110c141813b05fe1269a3e26493febe63e4d | [
"MIT"
] | null | null | null | import cv2studio
import cv2studio.components as components
import cv2
class VideoApp(cv2studio.App):
def __init__(self, path):
cv2studio.App.__init__(self, path, cv2studio.VIDEO)
app = VideoApp('../res/forest.mp4')
app.main_loop()
| 19 | 59 | 0.736842 |
f11f2c7c9db04e9d6d9dfc336c2daa1bd516c623 | 4,376 | py | Python | tests/datadriven/test_asserts.py | dkucsc/mavas | db3f7bd35944b7d0f510c62760eaa071bf53d7d8 | [
"Apache-2.0"
] | 83 | 2015-01-05T22:21:11.000Z | 2017-02-20T01:25:28.000Z | tests/datadriven/test_asserts.py | dkucsc/mavas | db3f7bd35944b7d0f510c62760eaa071bf53d7d8 | [
"Apache-2.0"
] | 1,508 | 2015-01-02T14:06:12.000Z | 2017-03-08T19:49:18.000Z | tests/datadriven/test_asserts.py | dkucsc/mavas | db3f7bd35944b7d0f510c62760eaa071bf53d7d8 | [
"Apache-2.0"
] | 99 | 2015-01-14T20:48:56.000Z | 2017-03-08T18:35:06.000Z | """
Tests the datadriven assert methods
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import tests.datadriven as datadriven
class TestAsserts(unittest.TestCase):
def setUp(self):
self.testCase = datadriven.TestCase()
def testAssertEqual(self):
self.testCase.assertEqual(0, 0)
with self.testCase.assertRaises(AssertionError):
self.testCase.assertEqual(0, 1)
def testAssertNotEqual(self):
self.testCase.assertNotEqual(0, 1)
with self.testCase.assertRaises(AssertionError):
self.testCase.assertNotEqual(0, 0)
def testAssertTrue(self):
self.testCase.assertTrue(True)
with self.testCase.assertRaises(AssertionError):
self.testCase.assertTrue(False)
def testAssertFalse(self):
self.testCase.assertFalse(False)
with self.testCase.assertRaises(AssertionError):
self.testCase.assertFalse(True)
def testAssertIs(self):
a = object()
b = object()
self.testCase.assertIs(a, a)
with self.testCase.assertRaises(AssertionError):
self.testCase.assertIs(a, b)
def testAssertIsNot(self):
a = object()
b = object()
self.testCase.assertIsNot(a, b)
with self.testCase.assertRaises(AssertionError):
self.testCase.assertIsNot(a, a)
def testAssertIsNone(self):
self.testCase.assertIsNone(None)
with self.testCase.assertRaises(AssertionError):
self.testCase.assertIsNone(1)
def testAssertIsNotNone(self):
self.testCase.assertIsNotNone(1)
with self.testCase.assertRaises(AssertionError):
self.testCase.assertIsNotNone(None)
def testAssertIn(self):
self.testCase.assertIn(0, [0])
with self.testCase.assertRaises(AssertionError):
self.testCase.assertIn(0, [])
def testAssertNotIn(self):
self.testCase.assertNotIn(0, [])
with self.testCase.assertRaises(AssertionError):
self.testCase.assertNotIn(0, [0])
def testAssertIsInstance(self):
self.testCase.assertIsInstance(0, int)
with self.testCase.assertRaises(AssertionError):
self.testCase.assertIsInstance(0, str)
def testAssertIsNotInstance(self):
self.testCase.assertNotIsInstance(0, str)
with self.testCase.assertRaises(AssertionError):
self.testCase.assertNotIsInstance(0, int)
def testAssertGreater(self):
self.testCase.assertGreater(1, 0)
with self.testCase.assertRaises(AssertionError):
self.testCase.assertGreater(0, 0)
with self.testCase.assertRaises(AssertionError):
self.testCase.assertGreater(0, 1)
def testAssertGreaterEqual(self):
self.testCase.assertGreaterEqual(0, 0)
self.testCase.assertGreaterEqual(1, 0)
with self.testCase.assertRaises(AssertionError):
self.testCase.assertGreaterEqual(0, 1)
def testAssertLess(self):
self.testCase.assertLess(0, 1)
with self.testCase.assertRaises(AssertionError):
self.testCase.assertLess(1, 0)
with self.testCase.assertRaises(AssertionError):
self.testCase.assertLess(0, 0)
def testAssertLessEqual(self):
self.testCase.assertLessEqual(0, 0)
self.testCase.assertLessEqual(0, 1)
with self.testCase.assertRaises(AssertionError):
self.testCase.assertLessEqual(1, 0)
def testAssertRaises(self):
def func(*args, **kwargs):
raise AssertionError()
self.testCase.assertRaises(AssertionError, func, 1, a=2)
def testAssertAlmostEqual(self):
self.testCase.assertAlmostEqual(0.12345678, 0.12345679)
with self.testCase.assertRaises(AssertionError):
self.testCase.assertAlmostEqual(0.123456, 0.1234568)
with self.testCase.assertRaises(AssertionError):
self.testCase.assertAlmostEqual(0.12345678, 0.12345679, 10)
def testAssertNotAlmostEqual(self):
self.testCase.assertNotAlmostEqual(0.123456, 0.1234568)
self.testCase.assertNotAlmostEqual(0.12345678, 0.12345679, 10)
with self.testCase.assertRaises(AssertionError):
self.testCase.assertNotAlmostEqual(0.12345678, 0.12345679)
| 35.290323 | 71 | 0.679159 |
21fa21af18edb516450ea607ceb34bf9f57b0b7e | 4,957 | py | Python | src/alldecays/data_handling/combined_data_set.py | LLR-ILD/alldecays | 08e51e99385ae7ca96edefddafd715e1d8cac3d3 | [
"Apache-2.0"
] | null | null | null | src/alldecays/data_handling/combined_data_set.py | LLR-ILD/alldecays | 08e51e99385ae7ca96edefddafd715e1d8cac3d3 | [
"Apache-2.0"
] | null | null | null | src/alldecays/data_handling/combined_data_set.py | LLR-ILD/alldecays | 08e51e99385ae7ca96edefddafd715e1d8cac3d3 | [
"Apache-2.0"
] | null | null | null | """Combination of data sets class"""
import numpy as np
from alldecays.exceptions import DataSetError
from .abstract_data_set import AbstractDataSet
class CombinedDataSet(AbstractDataSet):
"""Convenience wrapper around multiple `DataSet` objects.
Example:
>>> import alldecays
>>> isinstance(ds1, alldecays.DataSet) and isinstance(ds2, alldecays.DataSet)
True
>>> combined = alldecays.CombinedDataSet(decay_names, {"ds1": ds1})
>>> combined.add_data_sets({"ds2": ds2})
"""
def __init__(
self,
decay_names,
data_sets=None,
data_brs=None,
fit_start_brs=None,
signal_scaler=1.0,
):
self._decay_names = decay_names
self._data_brs = self._set_brs(data_brs)
if fit_start_brs is None:
self._fit_start_brs = np.array(self._data_brs)
else:
self._fit_start_brs = self._set_brs(fit_start_brs)
self._signal_scaler = signal_scaler
self._data_sets = data_sets if data_sets is not None else {}
for ds in self._data_sets.values():
self._validate_data_set(ds)
def _validate_data_set(self, ds):
"""Validate that a dataset fits to this CombinedDataSet."""
assert self._decay_names == ds._decay_names
# Note: You get an AssertionError when you initiate the CombinedDataSet
# with the default values, but try to add a DataSet with non-default values.
assert (self._data_brs == ds._data_brs).all()
assert (self.fit_start_brs == ds.fit_start_brs).all()
assert self.signal_scaler == ds.signal_scaler
@property
def _channels(self):
channels = {}
for prefix in self._data_sets:
for name, channel in self._data_sets[prefix].get_channels().items():
n = f"{prefix}:{name}"
if n in channels:
raise DataSetError(f"Multiple channels with same name: {n}.")
channels[n] = channel
return channels
def get_channels(self):
"""Return a dict of all channels."""
return self._channels
@property
def decay_names(self):
return self._decay_names
@decay_names.setter
def decay_names(self, new_names):
if len(self.decay_names) != len(new_names):
raise Exception(f"{self.decay_names=}, {new_names=}.")
for ds in self._data_sets.values():
ds.decay_names = new_names
self._decay_names = new_names
def _set_brs(self, brs=None):
n_decays = len(self.decay_names)
if brs is None:
return np.ones(n_decays) / n_decays
if len(brs) != n_decays:
raise DataSetError(f"{brs=}, {n_decays=}.")
return brs
@property
def data_brs(self):
return self._data_brs
@data_brs.setter
def data_brs(self, new_brs):
for ds in self._data_sets.values():
ds.data_brs = new_brs
self._data_brs = new_brs
@property
def fit_start_brs(self):
return self._fit_start_brs
@fit_start_brs.setter
def fit_start_brs(self, new_brs):
for ds in self._data_sets.values():
ds.fit_start_brs = new_brs
self._fit_start_brs = new_brs
def __raise_lumi_error(self):
raise NotImplementedError(
"`luminosity_ifb` is not implemented "
f"for class `{self.__class__.__name__}`."
)
@property
def luminosity_ifb(self):
return {k: ds.luminosity_ifb for k, ds in self._data_sets.items()}
@luminosity_ifb.setter
def luminosity_ifb(self, new_names):
raise NotImplementedError(
"`luminosity_ifb` is not implemented for class "
f"`{self.__class__.__name__}`. Set this on each DataSet directly."
)
@property
def signal_scaler(self):
return self._signal_scaler
@signal_scaler.setter
def signal_scaler(self, new_value):
for ds in self._data_sets.values():
ds.signal_scaler = new_value
self._signal_scaler = new_value
def add_data_sets(self, data_set_dict):
"""Combine the specified DataSet(s) into this CombinedDataSet."""
for prefix, ds in data_set_dict.items():
self._validate_data_set(ds)
if prefix in self._data_sets:
raise DataSetError(f"A DataSet with {prefix=} already exists.")
self._data_sets[prefix] = ds
def __repr__(self):
n_channels = len(self.get_channels())
n_data_sets = len(self._data_sets)
text = f"{self.__class__.__name__} with {n_channels} channels.\n"
text += f" {n_data_sets} DataSet objects: {list(self._data_sets)}.\n"
if self.signal_scaler != 1:
text += f" The signal strength is rescaled by {self.signal_scaler}.\n"
text += f" Considered signal decays: {self.decay_names}.\n"
return text
| 33.721088 | 85 | 0.626589 |
5a6f0c0c9dc0eac30a90708b82fb41b9eba201ef | 1,084 | py | Python | data/train/python/5a6f0c0c9dc0eac30a90708b82fb41b9eba201efurls.py | harshp8l/deep-learning-lang-detection | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | [
"MIT"
] | 84 | 2017-10-25T15:49:21.000Z | 2021-11-28T21:25:54.000Z | data/train/python/5a6f0c0c9dc0eac30a90708b82fb41b9eba201efurls.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 5 | 2018-03-29T11:50:46.000Z | 2021-04-26T13:33:18.000Z | data/train/python/5a6f0c0c9dc0eac30a90708b82fb41b9eba201efurls.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 24 | 2017-11-22T08:31:00.000Z | 2022-03-27T01:22:31.000Z | from django.conf.urls import patterns, url, include
from servicerating import api
from tastypie.api import Api
# Setting the API base name and registering the API resources using
# Tastypies API function
api_resources = Api(api_name='v1/servicerating')
api_resources.register(api.ContactResource())
api_resources.register(api.ConversationResource())
api_resources.register(api.ResponseResource())
api_resources.register(api.UserAccountResource())
api_resources.register(api.ExtraResource())
api_resources.register(api.ServiceRatingResource())
api_resources.prepend_urls()
urlpatterns = patterns('',
# Setting the urlpatterns to hook into the api urls
url(r'^api/', include(api_resources.urls)),
# Admin servicerating dashboard hookup
url(r'^admin/servicerating/dashboard/',
'servicerating.views.dashboard'),
url(r'^admin/servicerating/report/',
'servicerating.views.report_responses')
)
| 38.714286 | 74 | 0.666974 |
2929981262da0d5dc113783e3f02907f81395cdb | 3,077 | py | Python | tests/integ/sagemaker/jumpstart/conftest.py | HappyAmazonian/sagemaker-python-sdk | bb7563f450113a3ba18a8e24cf6092f4325bb321 | [
"Apache-2.0"
] | 1 | 2021-12-10T16:18:29.000Z | 2021-12-10T16:18:29.000Z | tests/integ/sagemaker/jumpstart/conftest.py | HappyAmazonian/sagemaker-python-sdk | bb7563f450113a3ba18a8e24cf6092f4325bb321 | [
"Apache-2.0"
] | 20 | 2021-09-17T20:50:11.000Z | 2021-12-09T00:29:02.000Z | tests/integ/sagemaker/jumpstart/conftest.py | HappyAmazonian/sagemaker-python-sdk | bb7563f450113a3ba18a8e24cf6092f4325bb321 | [
"Apache-2.0"
] | null | null | null | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import boto3
import pytest
from botocore.config import Config
from tests.integ.sagemaker.jumpstart.constants import (
ENV_VAR_JUMPSTART_SDK_TEST_SUITE_ID,
JUMPSTART_TAG,
)
from tests.integ.sagemaker.jumpstart.utils import (
get_test_artifact_bucket,
get_test_suite_id,
)
from sagemaker.jumpstart.constants import JUMPSTART_DEFAULT_REGION_NAME
def _setup():
print("Setting up...")
os.environ.update({ENV_VAR_JUMPSTART_SDK_TEST_SUITE_ID: get_test_suite_id()})
def _teardown():
print("Tearing down...")
test_cache_bucket = get_test_artifact_bucket()
test_suite_id = os.environ[ENV_VAR_JUMPSTART_SDK_TEST_SUITE_ID]
sagemaker_client = boto3.client(
"sagemaker",
config=Config(retries={"max_attempts": 10, "mode": "standard"}),
region_name=JUMPSTART_DEFAULT_REGION_NAME,
)
search_endpoints_result = sagemaker_client.search(
Resource="Endpoint",
SearchExpression={
"Filters": [
{"Name": f"Tags.{JUMPSTART_TAG}", "Operator": "Equals", "Value": test_suite_id}
]
},
)
endpoint_names = [
endpoint_info["Endpoint"]["EndpointName"]
for endpoint_info in search_endpoints_result["Results"]
]
endpoint_config_names = [
endpoint_info["Endpoint"]["EndpointConfigName"]
for endpoint_info in search_endpoints_result["Results"]
]
model_names = [
sagemaker_client.describe_endpoint_config(EndpointConfigName=endpoint_config_name)[
"ProductionVariants"
][0]["ModelName"]
for endpoint_config_name in endpoint_config_names
]
# delete test-suite-tagged endpoints
for endpoint_name in endpoint_names:
sagemaker_client.delete_endpoint(EndpointName=endpoint_name)
# delete endpoint configs for test-suite-tagged endpoints
for endpoint_config_name in endpoint_config_names:
sagemaker_client.delete_endpoint_config(EndpointConfigName=endpoint_config_name)
# delete models for test-suite-tagged endpoints
for model_name in model_names:
sagemaker_client.delete_model(ModelName=model_name)
# delete test artifact/cache s3 folder
s3_resource = boto3.resource("s3")
bucket = s3_resource.Bucket(test_cache_bucket)
bucket.objects.filter(Prefix=test_suite_id + "/").delete()
@pytest.fixture(scope="session", autouse=True)
def setup(request):
_setup()
request.addfinalizer(_teardown)
| 31.397959 | 95 | 0.724407 |
ada1e207bd9fea056085a297d6b73f207e268508 | 685 | py | Python | Shared/networkingSchemas/misc/auth.py | LukasSchmid97/elevatorbot | 77c45d8945c735c8dce9bc75563086bce265dc18 | [
"MIT"
] | null | null | null | Shared/networkingSchemas/misc/auth.py | LukasSchmid97/elevatorbot | 77c45d8945c735c8dce9bc75563086bce265dc18 | [
"MIT"
] | 89 | 2021-08-12T15:23:05.000Z | 2022-01-11T12:33:21.000Z | Shared/networkingSchemas/misc/auth.py | LukasSchmid97/elevatorbot | 77c45d8945c735c8dce9bc75563086bce265dc18 | [
"MIT"
] | 1 | 2021-10-20T20:07:22.000Z | 2021-10-20T20:07:22.000Z | from Shared.networkingSchemas.base import CustomBaseModel
class BackendUserModel(CustomBaseModel):
user_name: str
allowed_scopes: list[str]
has_write_permission: bool
has_read_permission: bool
disabled: bool
class Config:
orm_mode = True
class Token(CustomBaseModel):
access_token: str
token_type: str
class BungieRegistrationInput(CustomBaseModel):
code: str
state: str
class BungieTokenInput(CustomBaseModel):
access_token: str
token_type: str
expires_in: int
refresh_token: str
refresh_expires_in: int
membership_id: str
state: str
class BungieTokenOutput(CustomBaseModel):
bungie_name: str
| 18.513514 | 57 | 0.734307 |
956d59dca3192fc92e16b9f9d119f3a8e326ace5 | 213 | py | Python | defIntToStr.py | medifle/python_6.00.1x | d40629f83e09b02cd4fc4e79e790d51d9b0ebf63 | [
"MIT"
] | 4 | 2015-10-27T15:42:33.000Z | 2018-03-08T07:16:26.000Z | defIntToStr.py | medifle/python_6.00.1x | d40629f83e09b02cd4fc4e79e790d51d9b0ebf63 | [
"MIT"
] | null | null | null | defIntToStr.py | medifle/python_6.00.1x | d40629f83e09b02cd4fc4e79e790d51d9b0ebf63 | [
"MIT"
] | null | null | null | # convert integer into string
def intToStr(i):
digits = '0123456789'
if i==0:
return '0'
result = ''
while i > 0:
result = digits[i % 10] + result
i /= 10
return result | 19.363636 | 40 | 0.525822 |
db90bfb9d00e2e5c818e9ffba1778ef4380e5f71 | 2,968 | py | Python | run_model.py | Antoine-BL/chess-ai.py | c68ca76063c14b1b8b91d338c8cead9f411521ca | [
"MIT"
] | 2 | 2019-08-21T15:52:29.000Z | 2021-09-11T23:07:17.000Z | run_model.py | Antoine-BL/chess-ai.py | c68ca76063c14b1b8b91d338c8cead9f411521ca | [
"MIT"
] | 5 | 2020-09-25T23:15:31.000Z | 2022-02-10T00:07:33.000Z | run_model.py | Antoine-BL/EuroTruck-ai.py | c68ca76063c14b1b8b91d338c8cead9f411521ca | [
"MIT"
] | null | null | null | from __future__ import print_function
import math
import os
import cv2
import time
from tensorflow.python.keras.models import load_model
import trainer
from trainer.model import model
import pyvjoy
import numpy as np
from mss import mss
from tensorflow.python.keras import Sequential
from pyvjoy import VJoyDevice
from threading_ext.KeyboardTracker import KeyboardTracker
UINT8_MAXVALUE = 32768
MODEL_NAME = 'models/last_try.h5'
def main():
keyboard_tracker_thread = KeyboardTracker()
keyboard_tracker_thread.pause()
keyboard_tracker_thread.start()
print('Loading model...')
m = init_model()
holding_down = False
paused = True
j = pyvjoy.VJoyDevice(1)
with mss() as sct:
while True:
if keyboard_tracker_thread.check_for_kill():
print('killing all threads')
keyboard_tracker_thread.kill()
break
if not holding_down and keyboard_tracker_thread.check_for_pause():
if paused:
msg = 'unpausing'
else:
msg = 'pausing'
paused = not paused
print(msg + ' threads')
holding_down = True
elif holding_down \
and not keyboard_tracker_thread.check_for_rewind() \
and not keyboard_tracker_thread.check_for_pause():
holding_down = False
if not paused:
monitor = {"top": 40, "left": 0, "width": 1024, "height": 728}
pos_x = 108
pos_y = 128
dim_y = 252
dim_x = 126
screen = np.asarray(sct.grab(monitor))
screen = cv2.cvtColor(screen, cv2.COLOR_BGRA2GRAY)
screen = cv2.resize(screen, (480, 270))
screen = screen[pos_y:pos_y + dim_y][pos_x:pos_x + dim_x]
cv2.imshow('window', screen)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
screen = screen / 255 - 0.5
screen = np.array([np.resize(screen, (66, 200, 1))])
prediction = m.predict(screen)
print(prediction)
act_on_prediction(prediction, j)
else:
j.data.wAxisX = 16383
j.update()
time.sleep(0.1)
def init_model() -> Sequential:
model_path = os.path.realpath(MODEL_NAME)
m = trainer.model.model()
m.load_weights(model_path)
return m
def act_on_prediction(prediction, vjd: VJoyDevice):
adjusted_val = prediction[0][0] * UINT8_MAXVALUE
adjusted_val = int(round(adjusted_val, 0))
print(adjusted_val)
vjd.data.wAxisX = UINT8_MAXVALUE // 2
vjd.update()
def __std_image(img):
mean = np.mean(img)
std = np.std(img)
return (img - mean)/std
if __name__ == "__main__":
main()
| 26.5 | 78 | 0.574798 |
db511acfa657b4591b921a090565c923a7b31ecb | 473 | py | Python | napari/plugins/_tests/fixtures/napari_bad_plugin2.py | yinawang28/napari | 6ea95a9fa2f9150a4dbb5ec1286b8ff2020c3957 | [
"BSD-3-Clause"
] | null | null | null | napari/plugins/_tests/fixtures/napari_bad_plugin2.py | yinawang28/napari | 6ea95a9fa2f9150a4dbb5ec1286b8ff2020c3957 | [
"BSD-3-Clause"
] | null | null | null | napari/plugins/_tests/fixtures/napari_bad_plugin2.py | yinawang28/napari | 6ea95a9fa2f9150a4dbb5ec1286b8ff2020c3957 | [
"BSD-3-Clause"
] | null | null | null | """
This plugin registers fine, and passes hook_specification validation.
But errors when the actual reader function is called. This is used for testing
the read_data_with_plugins loop.
"""
import pluggy
napari_hook_implementation = pluggy.HookimplMarker("napari")
def reader_function(path):
raise IOError(f"Plugin failed to read path: {path}")
@napari_hook_implementation
def napari_get_reader(path):
if path.endswith('ext'):
return reader_function
| 23.65 | 79 | 0.773784 |
37bed5d4b5cfbebc6a58987cd46d699709931564 | 43,007 | py | Python | cellpack/mgl_tools/DejaVu/Tests/test_Geom.py | mesoscope/cellpack | ec6b736fc706c1fae16392befa814b5337a3a692 | [
"MIT"
] | null | null | null | cellpack/mgl_tools/DejaVu/Tests/test_Geom.py | mesoscope/cellpack | ec6b736fc706c1fae16392befa814b5337a3a692 | [
"MIT"
] | 21 | 2021-10-02T00:07:05.000Z | 2022-03-30T00:02:10.000Z | cellpack/mgl_tools/DejaVu/Tests/test_Geom.py | mesoscope/cellpack | ec6b736fc706c1fae16392befa814b5337a3a692 | [
"MIT"
] | null | null | null | ## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
#
#
# $Id: test_Geom.py,v 1.29 2009/09/02 21:15:07 vareille Exp $
#
#
import unittest
import sys, os, math, Tkinter
import numpy
from opengltk.OpenGL import GL
from geomutils.geomalgorithms import TriangleNormals
import numpy.oldnumeric as Numeric, types
from DejaVu.Geom import Geom
from DejaVu.IndexedGeom import IndexedGeom
# import Materials, viewerConst, datamodel, Clip
from DejaVu.colorTool import OneColor
from DejaVu.Transformable import Transformable
from DejaVu.Displayable import Displayable
from DejaVu.viewerFns import checkKeywords
from DejaVu.Camera import Camera
from DejaVu.Viewer import Viewer
from DejaVu import viewerConst
from DejaVu.ViewerGUI import ViewerGUI
class Geom__init__Tests(unittest.TestCase):
"""test keywords for __init__:
keywords = ['protected', # 0/1 when set geometry cannot be deleted
'listed', # 0/1 when set geometry appears in object list
'vertices',
'shape']
all other keywords are handled by Set method"""
def test_geom_not_protected(self):
"""default for protected False, (default listed is True)"""
g = Geom()
self.assertEqual(isinstance(g, Geom), True)
def test_geom_protected(self):
"""protected True, (default listed is True)"""
g = Geom(protected=True)
self.assertEqual(isinstance(g, Geom), True)
def test_geom_not_listed(self):
"""listed False, (default protected is False)"""
g = Geom(listed=False)
self.assertEqual(isinstance(g, Geom), True)
def test_geom_listed(self):
"""listed False, protected is True"""
g = Geom(listed=False, protected=True)
self.assertEqual(isinstance(g, Geom), True)
def test_geom_vertices(self):
"""vertices"""
g = Geom(vertices=((0, 0, 0),))
self.assertEqual(isinstance(g, Geom), True)
def test_geom_shape(self):
"""shape"""
g = Geom(shape=(0, 0))
self.assertEqual(isinstance(g, Geom), True)
def test_geom_vertices_shape(self):
"""vertices and shape"""
g = Geom(
vertices=(
(0, 0, 0),
(0, 0, 1),
),
shape=((0, 0),),
)
self.assertEqual(isinstance(g, Geom), True)
class Geom_Set_Tests(unittest.TestCase):
"""
keywords = ['protected', # 0/1 when set geometry cannot be deleted
'listed', # 0/1 when set geometry appears in object list
'tagModified', # use False to avoid toggling _modified
'vertices',
'vreshape',
'shape',
'texture', #not done
'textureCoords', #not done
'vnormals',
'materials',
'polyFace',
'matBind',
'propName',
'matName',
'matInd',
'rawMaterialB',
'rawMaterialF',
'matMask',
'transient',
'name',
#'antialiased',
'lineWidth',
'pointWidth',
'lighting',
'visible',
'outline',
'stippleLines',
'stipplePolygons',
'culling',
'pickable',
'pickableVertices',
'scissor',
'scissorX',
'scissorY',
'scissorW',
'scissorH',
'scissorAspectRatio',
'opacity',
'depthMask',
'blendFunctions',
'instanceMatrices',
'inheritMaterial',
'inheritXform',
'inheritPointWidth',
'inheritLineWidth',
'inheritStippleLines',
'inheritStipplePolygons',
'inheritFrontPolyMode',
'inheritBackPolyMode',
'inheritShading',
'inheritCulling',
'transparent', # is also set when materials are defines
'immediateRendering', # set to 1 to avoid using dpyList
'frontPolyMode',
'backPolyMode',
'shading',
'rotation',
'translation',
'scale',
'pivot',
]
"""
def setUp(self):
self.geom = Geom(name="baseTest")
def tearDown(self):
"""
clean-up
"""
try:
del self.geom
except:
pass
# protected
def test_geom_protected_valid(self):
"""valid input for protected"""
val = False
self.geom.Set(protected=val)
self.assertEqual(val, self.geom.getState()["protected"])
def test_geom_protected_invalid(self):
"""invalid input for protected"""
self.geom.Set(protected="hai")
self.assertNotEqual(self.geom.protected, "hai")
# listed
# def test_geom_cannot_set_listed(self):
# """NB:CANNOT Set listed even with valid input 'False'
# """
# self.geom.Set(listed=False)
# newstate=self.geom.getState()['listed']
# self.assertEqual(newstate, True)
def test_geom_listed_invalid(self):
"""invalid input for listed"""
rval = self.geom.Set(listed="hai")
self.assertNotEqual(self.geom.listed, "hai")
# tagModified
def test_geom_tagModified_valid(self):
"""valid input for tagModified"""
val = True
self.geom.Set(tagModified=val)
self.assertEqual(val, self.geom._modified)
def test_geom_tagModified_invalid(self):
"""invalid input for tagModified"""
self.assertRaises(AssertionError, self.geom.Set, tagModified="hai")
# vertices
def test_geom_vertices(self):
"""valid input for vertices"""
val = (
(0, 0, 0),
(1, 0, 0),
)
self.geom.Set(vertices=val)
flat = Numeric.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0])
self.assertEqual(
True, numpy.alltrue(flat == self.geom.vertexSet.vertices.array.ravel())
)
def test_geom_vertices_invalid(self):
"""invalid input for vertices"""
self.assertRaises(ValueError, self.geom.Set, vertices="hai")
##vreshape
# def test_geom_vreshape(self):
# """valid input for vreshape
# """
# val = ((0,0,0,1,0,0),)
# reshape=True
# self.geom.Set(vertices=val, reshape=reshape)
# flat = Numeric.array([0.,0.,0.,1.,0.,0.])
# self.assertEqual(flat, self.geom.vertexSet.vertices.array.ravel())
#
# def test_geom_vreshape_invalid(self):
# """CANNOT FIND invalid input for vreshape
# """
# self.geom.Set(vertices=((0,0,0),(1,0,0),), vreshape='False')
# flat = Numeric.array([0.,0.,0.,1.,0.,0.])
# #print self.geom.vertexSet.vertices
# self.assertEqual(flat, self.geom.vertexSet.vertices.array.ravel())
# shape
def test_geom_shape(self):
"""valid input for shape"""
val = ((0, 0, 0, 1, 0, 0),)
shape = (3, 2)
self.geom.Set(vertices=val, shape=shape)
flat = Numeric.array([0.0, 0.0, 0.0, 1.0, 0.0, 0.0])
# self.assertEqual(flat, self.geom.vertexSet.vertices.array.ravel())
self.assertEqual(
True, numpy.alltrue(flat == self.geom.vertexSet.vertices.array.ravel())
)
# def test_geom_shape_invalid(self):
# """CANNOT FIND invalid input for shape
# """
# verts =((0,0,0),(1,0,0))
# self.geom.Set(vertices=verts, shape=5)
# flat = Numeric.array([0.,0.,0.,1.,0.,0.])
# self.assertRaises(AttributeError, self.geom.Set,shape=5)
# texture
# textureCoords
# vnormals
# materials
def test_geom_materials(self):
"""valid input for materials"""
val = ((1, 0, 0),)
self.geom.Set(materials=val)
self.assertEqual(val, self.geom.materiasl)
def test_geom_materials(self):
"""invalid input for materials"""
self.assertRaises(ValueError, self.geom.Set, materials="hai")
# polyFace
# matBind
# propName
# matName
# matInd
# rawMaterial
# matMask
# transient
# name
def test_geom_name(self):
"""valid input for name"""
self.geom.Set(name="test")
newstate = self.geom.getState()["name"]
self.assertEqual(newstate, "test")
def xtest_geom_name_invalid(self):
"""invalid input for name
###### it seems that we have always been accepting this invalid data ####
"""
self.geom.Set(name=" ")
self.assertNotEqual(self.geom.name, " ")
##antialiased
# def test_geom_antialiased(self):
# """valid input for antialiased
# """
# self.geom.Set(antialiased=True)
# newstate=self.geom.getState()['antialiased']
# self.assertEqual(newstate,True)
#
#
# def test_geom_antialiased_invalid(self):
# """invalid input for antialiased
# """
# self.assertRaises(ValueError, self.geom.Set,antialiased='hai')
# lineWidth
def test_linewidth_invalid_input(self):
"""invalid input for lineWidth ,"""
self.assertRaises(AssertionError, self.geom.Set, lineWidth=-10)
def test_linewidth(self):
"""valid input for lineWidth"""
self.geom.Set(lineWidth=1)
newstate = self.geom.getState()["lineWidth"]
self.assertEqual(newstate, 1)
def test_linewidth_bad_input(self):
"""badinput for lineWidth ,"""
self.assertRaises(AssertionError, self.geom.Set, lineWidth="hai")
# pointWidth
def test_pointwidth(self):
"""valid input for pointWidth"""
self.geom.Set(pointWidth=16)
newstate = self.geom.getState()["pointWidth"]
self.assertEqual(newstate, 16)
def test_pointwidth_invalid(self):
"""invalid input for pointWidth"""
self.assertRaises(AssertionError, self.geom.Set, pointWidth=-1.0)
def test_pointwidth_bad_input(self):
"""bad input for pointWidth"""
self.assertRaises(AssertionError, self.geom.Set, pointWidth="hai")
def test_pointwidth(self):
"""valid input for pointWidth"""
self.geom.Set(pointWidth=16)
self.geom.Set(outline=False, backPolyMode=GL.GL_POINT)
newstate = self.geom.getState()["pointWidth"]
self.assertEqual(newstate, 16)
# lighting
def test_lighting(self):
"""valid input for lighting"""
self.geom.Set(lighting=True)
newstate = self.geom.getState()["lighting"]
self.assertEqual(newstate, True)
def test_geom_lighting_invalid(self):
"""invalid input for lighting"""
self.assertRaises(ValueError, self.geom.Set, lighting="hai")
def test_geom_visible_invalid(self):
"""invalid input for lighting"""
self.assertRaises(ValueError, self.geom.Set, lighting="hai")
# visible
def test_geom_visible(self):
"""valid input for visible"""
self.geom.Set(visible=False)
newstate = self.geom.getState()["visible"]
self.assertEqual(newstate, False)
# alternatively
self.assertEqual(self.geom.visible, False)
def test_geom_visible_invalid(self):
"""invalid input for visible"""
self.assertRaises(ValueError, self.geom.Set, visible=[2, 3, 4])
# outline
def test_outline(self):
"""valid input for outline"""
self.geom.Set(outline=True)
newstate = self.geom.getState()["outline"]
self.assertEqual(newstate, (True, True))
def test_geom_outline_invalid(self):
"""invalid input for outline"""
self.assertRaises(ValueError, self.geom.Set, outline="hai")
# stippleLines
def test_geom_stippleLines(self):
"""valid input for stippleLines"""
self.geom.Set(stippleLines=True)
newstate = self.geom.getState()["stippleLines"]
self.assertEqual(newstate, True)
def test_geom_stippleLines_invalid(self):
"""invalid input for stippleLines"""
self.assertRaises(ValueError, self.geom.Set, stippleLines="hai")
# stipplePolygons
def test_geom_stipplePolygons(self):
"""valid input for stipplePolygons"""
self.geom.Set(stipplePolygons=True)
newstate = self.geom.getState()["stipplePolygons"]
self.assertEqual(newstate, True)
def test_geom_stipplePolygons_invalid(self):
"""invalid input for stipplePolygons"""
self.assertRaises(ValueError, self.geom.Set, stipplePolygons="hai")
# cull
def test_geom_culling_none(self):
"""valid input for culling,none"""
self.geom.Set(culling=GL.GL_NONE)
newstate = self.geom.getState()["culling"]
self.assertEqual(newstate, "none")
def test_geom_culling_front(self):
"""valid input for culling,front"""
self.geom.Set(culling=GL.GL_FRONT)
newstate = self.geom.getState()["culling"]
self.assertEqual(newstate, "front")
def test_geom_culling_back(self):
"""valid input for culling,back"""
self.geom.Set(culling=GL.GL_BACK)
newstate = self.geom.getState()["culling"]
self.assertEqual(newstate, "back")
def test_geom_culling_front_back(self):
"""valid input for culling,front_back"""
self.geom.Set(culling=GL.GL_FRONT_AND_BACK)
newstate = self.geom.getState()["culling"]
self.assertEqual(newstate, "front_and_back")
def test_geom_culling_invalid(self):
"""invalid input for culling"""
self.geom.culling = viewerConst.INHERIT
self.assertRaises(AssertionError, self.geom.Set, culling="hai")
# pickable
def test_geom_pickable(self):
"""valid input for pickable"""
self.geom.Set(pickable=0)
newstate = self.geom.getState()["pickable"]
self.assertEqual(newstate, 0)
def test_geom_pickable_invalid(self):
"""invalid input for pickable"""
self.assertRaises(ValueError, self.geom.Set, pickable="hai")
# pickableVertices
def test_geom_pickableVertices(self):
"""valid input for pickableVertices"""
self.geom.Set(pickableVertices=True)
newstate = self.geom.getState()["pickableVertices"]
self.assertEqual(newstate, True)
def test_geom_pickableVertices_invalid(self):
"""invalid input for pickableVertices"""
self.assertRaises(ValueError, self.geom.Set, pickableVertices="hai")
# scissor
def test_geom_scissor_invalid(self):
"""invalid input for scissor on/off not working,a box is displayed"""
self.assertRaises(ValueError, self.geom.Set, scissor="hai")
# scissorX
def test_geom_scissorX(self):
"""valid input for scissorX"""
self.geom.Set(scissorX=1)
newstate = self.geom.getState()["scissorX"]
self.assertEqual(newstate, 1)
def test_geom_scissorX_invalid(self):
"""invalid input for scissorX"""
self.assertRaises(ValueError, self.geom.Set, scissorX="hai")
# scissorY
def test_geom_scissorY(self):
"""valid input for scissorY"""
self.geom.Set(scissorY=1)
newstate = self.geom.getState()["scissorY"]
self.assertEqual(newstate, 1)
def test_geom_scissorY_invalid(self):
"""valid input for scissorY"""
self.assertRaises(ValueError, self.geom.Set, scissorY="hai")
# scissorW
def test_geom_scissorW(self):
"""valid input for scissorW"""
self.geom.Set(scissorW=300)
newstate = self.geom.getState()["scissorW"]
self.assertEqual(newstate, 300)
def test_geom_scissorW_invalid(self):
"""invalid input for scissorW"""
self.assertRaises(ValueError, self.geom.Set, scissorW="hai")
# scissorH
def test_geom_scissorH(self):
"""valid input for scissorH"""
self.geom.Set(scissorH=300)
newstate = self.geom.getState()["scissorH"]
self.assertEqual(newstate, 300)
def test_geom_scissorH_invalid(self):
"""invalid input for scissorH"""
self.assertRaises(ValueError, self.geom.Set, scissorH="hai")
# scissorAspectRatio
def test_geom_scissorAspectRatio(self):
"""valid input for scissorAspectRatio"""
self.geom.Set(scissorAspectRatio=2.0)
newstate = self.geom.getState()["scissorAspectRatio"]
self.assertEqual(newstate, 2.0)
def test_geom_scissorAspectRatio_invalid(self):
"""invalid input for scissorAspectRatio"""
self.assertRaises(ValueError, self.geom.Set, scissorAspectRatio="hai")
# opacity
# def test_geom_opacity(self):
# """valid input for scissorAspectRatio
# NOT AVAILABLE in STATE
# """
# self.geom.Set(opacity = [.5, .5])
# ###THIS IS BURIED SOMEWHERE IN geom.materials[pf]
# ###where pf is something like GL.GL_FRONT
# print "\nopacity test not completed..."
# ###self.assertEqual(self.geom.opacity,2.0)
#
# def test_geom_opacity_invalid(self):
# """invalid input for opacity DOES NOT RAISE ERROR
# """
# val = [.5,.5]
# self.geom.Set(opacity=val)
# print "\ninvalid opacity test not completed..."
# ##self.assertEqual(self.geom.opacity, val)
# #self.assertRaises(ValueError, self.geom.Set, opacity=[2,3,4])
# depthMask
def test_geom_depthmask(self):
"""valid input for depthmask"""
self.geom.Set(depthMask=1)
newstate = self.geom.getState()["depthMask"]
self.assertEqual(newstate, 1)
def test_geom_depthmask_invalid(self):
"""invalid input for depthmask"""
self.assertRaises(ValueError, self.geom.Set, depthMask="hai")
# blendFunctions
def test_geom_blend_func_zero(self):
"""valid input for blend func zero"""
val1 = GL.GL_ZERO
val2 = GL.GL_ZERO
self.geom.Set(blendFunctions=(val1, val2))
newstate = self.geom.getState()["blendFunctions"]
self.assertEqual(newstate, ("GL_ZERO", "GL_ZERO"))
def test_geom_blend_func_invalid(self):
"""invalid input for blend func"""
self.assertRaises(AssertionError, self.geom.Set, blendFunctions="hai")
def test_geom_blend_func_one(self):
"""valid input for blend func one"""
val1 = GL.GL_ONE
val2 = GL.GL_ONE
self.geom.Set(blendFunctions=(val1, val2))
newstate = self.geom.getState()["blendFunctions"]
self.assertEqual(newstate, ("GL_ONE", "GL_ONE"))
def test_geom_blend_func_color(self):
"""valid input for blend func color"""
val1 = GL.GL_DST_COLOR
val2 = GL.GL_SRC_COLOR
self.geom.Set(blendFunctions=(val1, val2))
newstate = self.geom.getState()["blendFunctions"]
self.assertEqual(newstate, ("GL_DST_COLOR", "GL_SRC_COLOR"))
def test_geom_blend_func_one_minus_color(self):
"""valid input for blend func one minus
color
"""
val1 = GL.GL_ONE_MINUS_DST_COLOR
val2 = GL.GL_ONE_MINUS_SRC_COLOR
self.geom.Set(blendFunctions=(val1, val2))
newstate = self.geom.getState()["blendFunctions"]
self.assertEqual(
newstate, ("GL_ONE_MINUS_DST_COLOR", "GL.GL_ONE_MINUS_SRC_COLOR")
)
def test_geom_blend_func_src_alpha(self):
"""valid input for blend func src_alpha"""
val1 = GL.GL_SRC_ALPHA
val2 = GL.GL_SRC_ALPHA
self.geom.Set(blendFunctions=(val1, val2))
newstate = self.geom.getState()["blendFunctions"]
self.assertEqual(newstate, ("GL_SRC_ALPHA", "GL_SRC_ALPHA"))
def test_geom_blend_func_dst_alpha(self):
"""valid input for blend func dst_alpha"""
val1 = GL.GL_DST_ALPHA
val2 = GL.GL_DST_ALPHA
self.geom.Set(blendFunctions=(val1, val2))
newstate = self.geom.getState()["blendFunctions"]
self.assertEqual(newstate, ("GL_DST_ALPHA", "GL_DST_ALPHA"))
def test_geom_blend_func_dst_one_minus_alpha(self):
"""valid input for blend func dst_one_minus_alpha"""
val1 = GL.GL_ONE_MINUS_DST_ALPHA
val2 = GL.GL_ONE_MINUS_DST_ALPHA
self.geom.Set(blendFunctions=(val1, val2))
newstate = self.geom.getState()["blendFunctions"]
self.assertEqual(newstate, ("GL_ONE_MINUS_DST_ALPHA", "GL_ONE_MINUS_DST_ALPHA"))
def test_geom_blend_func_src_one_minus_alpha(self):
"""valid input for blend func src_one_minus_alpha"""
val1 = GL.GL_ONE_MINUS_SRC_ALPHA
val2 = GL.GL_ONE_MINUS_SRC_ALPHA
self.geom.Set(blendFunctions=(val1, val2))
newstate = self.geom.getState()["blendFunctions"]
self.assertEqual(newstate, ("GL_ONE_MINUS_SRC_ALPHA", "GL_ONE_MINUS_SRC_ALPHA"))
# instanceMatrices
# inheritMaterial
def xtest_geom_inheritMaterial_invalid(self):
"""invalid input for inheritMaterial
###### it seems that we have always been accepting this invalid data ####
"""
self.assertRaises(AssertionError, self.geom.Set, inheritMaterial="hai")
# inheritXform
def test_geom_inheritXform_invalid(self):
"""invalid input for inheritXform"""
self.assertRaises(AssertionError, self.geom.Set, inheritXform="hai")
# inheritPointWidth
def test_geom_inheritPointWidth_invalid(self):
"""invalid input for inheritPointWidth"""
self.assertRaises(AssertionError, self.geom.Set, inheritPointWidth="hai")
# inheritLineWidth
def test_geom_inheritLineWidth_invalid(self):
"""invalid input for inheritLineWidth"""
self.assertRaises(AssertionError, self.geom.Set, inheritLineWidth="hai")
# inheritStippleLines
def test_geom_inheritStippleLines_invalid(self):
"""invalid input for inheritStippleLines"""
self.assertRaises(AssertionError, self.geom.Set, inheritStippleLines="hai")
# inheritStipplePolygons
def test_geom_inheritStipplePolygons_invalid(self):
"""invalid input for inheritStipplePolygons"""
self.assertRaises(AssertionError, self.geom.Set, inheritStipplePolygons="hai")
# inheritBackPolyMode
def test_geom_inheritBackPolyMode_invalid(self):
"""invalid input for inheritBackPolyMode"""
self.assertRaises(AssertionError, self.geom.Set, inheritBackPolyMode="hai")
# inheritShading
def test_geom_inheritShading_invalid(self):
"""invalid input for inheritShading"""
self.assertRaises(AssertionError, self.geom.Set, inheritShading="hai")
# inheritFrontPolyMode
def test_geom_inheritFrontPolyMode_invalid(self):
"""invalid input for inheritFrontPolyMode"""
self.assertRaises(AssertionError, self.geom.Set, inheritFrontPolyMode="hai")
# inheritCulling
def test_geom_inheritCulling_invalid(self):
"""invalid input for inheritCulling"""
self.assertRaises(AssertionError, self.geom.Set, inheritCulling="hai")
def test_geom_culling_inherit(self):
"""valid input for culling, inherit"""
self.geom.culling = viewerConst.INHERIT
self.geom.Set(culling=self.geom.culling)
newstate = self.geom.getState()["culling"]
self.assertEqual(newstate, "inherit")
# transparent
def test_geom_transparent(self):
"""valid input for transparent"""
self.geom.Set(transparent=True)
newstate = self.geom.getState()["transparent"]
self.assertEqual(newstate, True)
def test_geom_transparent_invalid(self):
"""invalid input for transparent"""
self.assertRaises(AssertionError, self.geom.Set, transparent="hai")
# immediateRendering
def test_geom_immediateRendering(self):
"""valid input for immediateRendering"""
self.geom.Set(immediateRendering=True)
newstate = self.geom.getState()["immediateRendering"]
self.assertEqual(newstate, True)
def test_geom_immediateRendering_invalid(self):
"""invalid input for immediateRendering"""
self.assertRaises(AssertionError, self.geom.Set, immediateRendering="hai")
# frontPolyMode
def test_geom_front_polymode(self):
"""valid input for front_polymode"""
self.geom.Set(frontPolyMode="fill")
newstate = self.geom.getState()["frontPolyMode"]
self.assertEqual(newstate, "fill")
def test_geom_front_polymode_invalid(self):
"""invalid input for front_polymode"""
self.assertRaises(KeyError, self.geom.Set, frontPolyMode="hai")
# backPolyMode
def test_geom_back_polymode(self):
"""valid input for back_polymode"""
self.geom.Set(backPolyMode="line")
newstate = self.geom.getState()["backPolyMode"]
self.assertEqual(newstate, "line")
def test_geom_back_polymode_invalid(self):
"""invalid input for back_polymode"""
self.assertRaises(KeyError, self.geom.Set, backPolyMode="hai")
# shading
def test_geom_shading_mode(self):
"""valid input for shadingmode"""
self.geom.Set(shading="smooth")
newstate = self.geom.getState()["shading"]
self.assertEqual(newstate, "smooth")
def test_geom_shading_mode_invalid(self):
"""valid input for shadingmode"""
self.assertRaises(KeyError, self.geom.Set, shading="hai")
# rotation
def test_rotation(self):
"""valid input for rotation"""
old_state = self.geom.getState()["rotation"]
from mglutil.math.rotax import rotax
import math
matRot = rotax((0, 0, 0), (0, 0, 1), math.pi / 2.0)
self.geom.Set(rotation=matRot)
new_state = self.geom.getState()["rotation"]
# hard to compare list with [4][4]array
# self.assertEqual(new_state[0]-matRot[0][0] < .0000001, True)
self.assertEqual(old_state != new_state, True)
def test_rotation_invalid(self):
"""invalid input for rotation invalid"""
self.assertRaises(ValueError, self.geom.Set, rotation="hai")
def test_rotation_invalid_array_shape(self):
"""invalid input,bad array shape for rotation"""
self.assertRaises(ValueError, self.geom.Set, rotation=[1, 1])
# translation
def test_translation(self):
"""valid input for translation"""
self.geom.Set(translation=numpy.ones(3, "f"))
self.assertEqual(
numpy.alltrue(self.geom.getState()["translation"] == numpy.ones(3)), True
)
def test_translation_invalid(self):
"""invalid input for translation"""
# self.geom.Set(translation = 'hai')
# self.assertNotEqual(self.geom.translation, 'hai')
self.assertRaises(ValueError, self.geom.Set, translation="hai")
def test_translation_invalid_array_shape(self):
"""invalid input,bad array shape for translation"""
self.assertRaises(ValueError, self.geom.Set, translation=[1, 1])
# scale
def test_scale(self):
"""valid input for scale"""
self.geom.Set(scale=Numeric.ones(3) * 2)
self.assertEqual(
numpy.alltrue(self.geom.getState()["scale"] == Numeric.ones(3) * 2), True
)
def test_scale_invalid(self):
"""invalid input for scale"""
# self.geom.Set(scale = 'hai')
self.assertRaises(ValueError, self.geom.Set, scale="hai")
def test_scale_invalid_array_shape(self):
"""invalid input,bad array shape for scale"""
self.assertRaises(ValueError, self.geom.Set, scale=[1, 1])
# pivot
def test_pivot(self):
"""valid input for pivot"""
self.geom.Set(pivot=numpy.ones(3) * 0.5)
self.assertEqual(
True, numpy.alltrue(self.geom.getState()["pivot"] == numpy.ones(3) * 0.5)
)
def test_pivot_invalid(self):
"""invalid input for pivot"""
self.assertRaises(ValueError, self.geom.Set, pivot="hai")
def test_pivot_invalid(self):
"""invalid input for pivot"""
self.assertRaises(ValueError, self.geom.Set, pivot=[1, 1])
###class Geom_Method_Tests(Geom_Test):
# """tests for methods of Geom class
# getState
# getGeomMaterialCode
# getGeomClipPlanesCode
# delete <-abstract method
# getVertices
# getVNormals
# setViewer
# getDepthMask
# isTransparent
# GetFrontPolyMode
# GetShading
# MaterialBindingMode
# AddMaterial
# SetMaterial
# GetNormals
# Add
# SetForChildren
# setTransparency
# updateParentsForImmediateRendering
# _Hide
# _Remove
# BoundingBox
# DisplayFunction
# Draw
# RedoDisplayList
# AddClipPlane
# RemoveClipPlane
# LastParentBeforeRoot
# ApplyParentsTransform
# TransformCoords
# AllVisibleObjects
# AllObjects
# ObjSubTreeBB
# ComputeBB
# _DrawBox
# DrawTreeBoundingBox
# DrawBoundingBox
# RenderMode
# asIndexedPolygons
# sortPoly
# sortPoly_cb
# getFaces
# getFNormals
# _FixedLengthFaces
# _PrimitiveType
# """
class Geom_Viewer_Tests(unittest.TestCase):
"""tests for geom.Set in viewer"""
def setUp(self):
self.vi = Viewer(verbose=0)
self.geom = Geom(name="baseTest")
self.vi.AddObject(self.geom)
self.vi.currentObject = self.geom
def tearDown(self):
"""
clean-up
"""
try:
self.vi.Exit()
except:
pass
def test_geom_protected(self):
"""valid input for protected 0/1"""
self.geom.Set(protected=True)
for c in self.vi.GUI.inheritF.children.values():
if c.__class__ == Tkinter.Menubutton and c.configure("text")[-1] == (
"Current",
"geom",
"properties",
):
self.inheritF_menu = c.menu
protected_index = self.inheritF_menu.index("protected")
self.inheritF_menu.invoke(protected_index)
newstate = self.geom.getState()["protected"]
self.assertEqual(newstate, False)
# def test_geom_lighting(self):
# """valid input for lighting
# """
# self.geom.Set(lighting=False)
# for c in self.vi.GUI.inheritF.children.values():
# if c.__class__ == Tkinter.Menubutton \
# and c.configure('text')[-1] == ('Current', 'geom', 'properties'):
# self.inheritF_menu = c.menu
# lighting_index = self.inheritF_menu.index('lighting')
# self.geom.viewer.SetCurrentObject(self.geom)
# self.inheritF_menu.invoke(lighting_index)
# newstate=self.geom.getState()['lighting']
# self.assertEqual(newstate,True)
def test_geom_visible(self):
"""valid input for visible"""
self.geom.Set(visible=0)
newstate = self.geom.getState()["visible"]
self.assertEqual(newstate, 0)
for c in self.vi.GUI.inheritF.children.values():
if c.__class__ == Tkinter.Menubutton and c.configure("text")[-1] == (
"Current",
"geom",
"properties",
):
self.inheritF_menu = c.menu
break
visible_index = self.inheritF_menu.index("visible")
self.inheritF_menu.invoke(visible_index)
newstate = self.geom.getState()["visible"]
self.assertEqual(newstate, 1)
def test_geom_outline_front_polymode(self):
"""valid input for outline"""
self.vi.currentObject.frontPolyMode = "outlines"
self.geom.Set(outline=1.0)
mode = viewerConst.INHERIT
self.geom.Set(outline=True, frontPolyMode=GL.GL_FILL)
newstate = self.geom.getState()["outline"]
self.assertEqual(newstate, (True, True))
def test_geom_outline_back_polymode(self):
"""valid input for outline"""
self.vi.currentObject.backPolyMode = "outlines"
self.geom.Set(outline=1.0)
mode = viewerConst.INHERIT
self.geom.Set(outline=True, backPolyMode=GL.GL_FILL)
newstate = self.geom.getState()["outline"]
self.assertEqual(newstate, (True, True))
def test_geom_scissor(self):
"""valid input for scissor on/off not working,a box is displayed"""
self.geom.Set(scissor=True)
for c in self.vi.GUI.inheritF.children.values():
if c.__class__ == Tkinter.Menubutton and c.configure("text")[-1] == (
"Current",
"geom",
"properties",
):
self.inheritF_menu = c.menu
scissor_index = self.inheritF_menu.index("scissor")
self.inheritF_menu.invoke(scissor_index)
newstate = self.geom.getState()["scissor"]
self.assertEqual(newstate, True)
def test_geom_inheritMaterial(self):
"""valid input for inheritMaterial"""
for c in self.vi.GUI.inheritF.children.values():
if c.__class__ == Tkinter.Menubutton and c.configure("text")[-1] == (
"Current",
"geom",
"properties",
):
self.inheritF_menu = c.menu
inheritMaterial_index = self.inheritF_menu.index("inheritMaterial")
self.inheritF_menu.invoke(inheritMaterial_index)
newstate = self.geom.getState()["inheritMaterial"]
self.assertEqual(newstate, True)
def test_geom_inheritMaterial_red(self):
"""valid input for inheritMaterial set to red ,after toggle on
inherits from parent
"""
self.geom.Set(materials=((1, 0, 0),))
for c in self.vi.GUI.inheritF.children.values():
if c.__class__ == Tkinter.Menubutton and c.configure("text")[-1] == (
"Current",
"geom",
"properties",
):
self.inheritF_menu = c.menu
inheritMaterial_index = self.inheritF_menu.index("inheritMaterial")
self.inheritF_menu.invoke(inheritMaterial_index)
newstate = self.geom.getState()["inheritMaterial"]
self.assertEqual(self.geom.materials == (1, 0, 0), False)
def test_geom_inheritXform(self):
"""valid input for inheritXform"""
for c in self.vi.GUI.inheritF.children.values():
if c.__class__ == Tkinter.Menubutton and c.configure("text")[-1] == (
"Current",
"geom",
"properties",
):
self.inheritF_menu = c.menu
inheritXform_index = self.inheritF_menu.index("inheritXform")
self.inheritF_menu.invoke(inheritXform_index)
newstate = self.geom.getState()["inheritXform"]
self.assertEqual(newstate, True)
# def test_geom_inheritPointWidth(self):
# """valid input for inheritPointWidth
# """
# for c in self.vi.GUI.inheritF.children.values():
# if c.__class__ == Tkinter.Menubutton \
# and c.configure('text')[-1] == ('Current', 'geom', 'properties'):
# self.inheritF_menu = c.menu
# inheritPointWidth_index = self.inheritF_menu.index('inheritPointWidth')
# self.inheritF_menu.invoke(inheritPointWidth_index)
# newstate=self.geom.getState()['inheritPointWidth']
# self.assertEqual(newstate,True)
#
#
# def test_geom_inheritLineWidth(self):
# """valid input for inheritLineWidth
# """
# for c in self.vi.GUI.inheritF.children.values():
# if c.__class__ == Tkinter.Menubutton \
# and c.configure('text')[-1] == ('Current', 'geom', 'properties'):
# self.inheritF_menu = c.menu
# inheritLineWidth_index = self.inheritF_menu.index('inheritLineWidth')
# self.inheritF_menu.invoke(inheritLineWidth_index)
# newstate=self.geom.getState()['inheritLineWidth']
# self.assertEqual(newstate,True)
#
#
# def test_geom_inheritStippleLines(self):
# """valid input for inheritStippleLines
# """
# for c in self.vi.GUI.inheritF.children.values():
# if c.__class__ == Tkinter.Menubutton \
# and c.configure('text')[-1] == ('Current', 'geom', 'properties'):
# self.inheritF_menu = c.menu
# inheritStippleLines_index = self.inheritF_menu.index('inheritStippleLines')
# self.inheritF_menu.invoke(inheritStippleLines_index)
# newstate=self.geom.getState()['inheritStippleLines']
# self.assertEqual(newstate,True)
# def test_geom_inheritStipplePolygons(self):
# """valid input for inheritStipplePolygons
# """
# for c in self.vi.GUI.inheritF.children.values():
# if c.__class__ == Tkinter.Menubutton \
# and c.configure('text')[-1] == ('Current', 'geom', 'properties'):
# self.inheritF_menu = c.menu
# inheritStipplePolygons_index = self.inheritF_menu.index('inheritStipplePolygons')
# self.inheritF_menu.invoke(inheritStipplePolygons_index)
# newstate=self.geom.getState()['inheritStipplePolygons']
# self.assertEqual(newstate,True)
# def test_geom_inheritFrontPolyMode(self):
# """valid input for inheritFrontPolyMode
# """
# self.geom.Set(inheritFrontPolyMode = True)
# for c in self.vi.GUI.inheritF.children.values():
# if c.__class__==Tkinter.Menubutton:
# self.inheritF_menu = c.menu
# inheritFrontPolyMode_index = self.inheritF_menu.index('inheritFrontPolyMode')
# self.inheritF_menu.invoke(inheritFrontPolyMode_index)
# newstate=self.geom.getState()['inheritFrontPolyMode']
# self.assertEqual(newstate,True)
# def test_geom_inheritBacktPolyMode(self):
# """valid input for inheritBackPolyMode
# """
# for c in self.vi.GUI.inheritF.children.values():
# if c.__class__==Tkinter.Menubutton:
# self.inheritF_menu = c.menu
# inheritBackPolyMode_index = self.inheritF_menu.index('inheritBackPolyMode')
# self.inheritF_menu.invoke(inheritBackPolyMode_index)
# newstate=self.geom.getState()['inheritBackPolyMode']
# self.assertEqual(newstate,True)
# def test_geom_inheritShading(self):
# """valid input for inheritShading
# """
# for c in self.vi.GUI.inheritF.children.values():
# if c.__class__==Tkinter.Menubutton:
# self.inheritF_menu = c.menu
# inheritShading_index = self.inheritF_menu.index('inheritShading')
# self.inheritF_menu.invoke(inheritShading_index)
# newstate=self.geom.getState()['inheritShading']
# self.assertEqual(newstate,True)
# def test_geom_inheritCulling(self):
# """valid input for inheritCulling
# """
# self.geom.inheritCulling=True
# for c in self.vi.GUI.inheritF.children.values():
# if c.__class__==Tkinter.Menubutton:
# self.inheritF_menu = c.menu
# inheritCulling_index = self.inheritF_menu.index('inheritCulling')
# self.inheritF_menu.invoke(inheritCulling_index)
# newstate=self.geom.getState()['inheritCulling']
# self.assertEqual(newstate,True)
class IndexedGeom_Set_Tests(unittest.TestCase):
"""
keywords = Geom.keywords + [
'type',
'faces',
'fnormals',
'freshape',
]
"""
def setUp(self):
self.geom = IndexedGeom(name="indexed_baseTest")
def tearDown(self):
"""
clean-up
"""
try:
del self.geom
except:
pass
# type
def test_IndexedGeom_types_invalid(self):
"""invalid input for types"""
self.assertRaises(AttributeError, self.geom, primitiveType="hai")
# GL_TRIANGLES, GL_QUADS... are the same as type?????
def test_IndexedGeom_types_GL_TRIANGLES(self):
"""valid input GL_TRIANGLES"""
self.geom.primitiveType = GL.GL_TRIANGLES
self.assertEqual(self.geom.primitiveType, 4)
def test_IndexedGeom_types_GL_QUADS(self):
"""valid input GL_QUADS"""
self.geom.primitiveType = GL.GL_QUADS
self.assertEqual(self.geom.primitiveType, 7)
def test_IndexedGeom_types_GL_POLYGONS(self):
"""valid input GL_POLYGONS"""
self.geom.primitiveType = GL.GL_POLYGON
self.assertEqual(self.geom.primitiveType, 9)
# faces
def test_IndexedGeom_faces(self):
"""test faces"""
self.geom.Set(vertices=[[0, 0, 0], [1, 0, 0]], faces=((0, 1),))
self.assertEqual(len(self.geom.faceSet.faces.array), 1)
def test_IndexedGeom_faces_invalid_not_sequence(self):
"""invalid input for faces, afces should be list of lists of integers"""
self.assertRaises(TypeError, self.geom.Set, faces=20)
def test_IndexedGeom_faces_invalid_indices(self):
"""invalid input for faces, -20 not a good index"""
self.assertRaises(ValueError, self.geom.Set, faces=[[-20, 3, 4]])
def test_IndexedGeom_faces_set(self):
"""valid input for faces"""
self.geom.Set(
vertices=[[0, 0, 0], [1, 0, 0]],
faces=(
(0, 1),
(1, 0),
),
)
self.assertEqual(len(self.geom.faceSet.faces.array), 2)
# fnormals
def test_IndexedGeom_fnormals(self):
"""test fnormals"""
self.geom.Set(vertices=[[0, 0, 0], [1, 0, 0]], faces=((0, 1),))
self.assertEqual(self.geom.faceSet.normals.GetProperty(), None)
def test_IndexedGeom_fnormals_invalid(self):
"""invalid input fnormals"""
self.geom.Set(vertices=[[0, 0, 0], [1, 0, 0]], faces=((0, 1),))
self.assertRaises(ValueError, self.geom.Set, fnormals="hai")
# normals
def test_IndexedGeom_normals(self):
"""valid input for normals"""
self.geom.faceSet.normals.SetValues([(0, 1), (1, 0)])
self.assertEqual(len(self.geom.faceSet.normals), 2)
# freshape !!!!!!NOT DONE!!!!!!!
###class IndexedGeom_Method_Tests(Geom_Test):
### """tests for methods of IndexedGeom class
# methods:
# getFaces
# getFNormals
# _FixedLengthFaces
# _PrimitiveType
# Add
# Set
# ComputeVertexNormals
# ComputeFaceNormals
# VertexNormalFunction
# FaceNormalFunction
# sortPoly
# DisplayFunction
# Draw
# RedoDisplayList
# removeDuplicatedVertices
if __name__ == "__main__":
test_cases = [
"Geom__init__Tests",
"Geom_Set_Tests",
"Geom_Viewer_Tests",
"IndexedGeom_Set_Tests",
]
unittest.main(
argv=(
[
__name__,
]
+ test_cases
)
)
# remove the -v flag to make output cleaner
# unittest.main( argv=([__name__ ,'-v'] + test_cases) )
# unittest.main()
| 35.05053 | 90 | 0.619155 |
d6e7976e3d67997398c00b7d8d76b3795a395c1d | 269 | py | Python | lifx_api.py | maltese-poodle/lifx_agent | 7e9f9b9f3281f441a4725daba448275bc8ce5e97 | [
"MIT"
] | null | null | null | lifx_api.py | maltese-poodle/lifx_agent | 7e9f9b9f3281f441a4725daba448275bc8ce5e97 | [
"MIT"
] | null | null | null | lifx_api.py | maltese-poodle/lifx_agent | 7e9f9b9f3281f441a4725daba448275bc8ce5e97 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import os
import requests
def get_auth_header():
return {"Authorization": "Bearer %s" % os.getenv('LIFX_API_KEY')}
def get_devices():
return requests.get('https://api.lifx.com/v1/lights/all', headers=get_auth_header())
print(get_devices())
| 22.416667 | 88 | 0.717472 |
11a08e4fbfe30c3dea0cdb05b66dceffa03aff93 | 44,072 | py | Python | jax/experimental/maps.py | agudallago/jax | 3df58b9729e3991b64cecbd25c48f101ae92e99b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | jax/experimental/maps.py | agudallago/jax | 3df58b9729e3991b64cecbd25c48f101ae92e99b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | jax/experimental/maps.py | agudallago/jax | 3df58b9729e3991b64cecbd25c48f101ae92e99b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import contextlib
import numpy as np
import itertools as it
from collections import OrderedDict
from typing import (Callable, Iterable, Tuple, Optional, Dict, Any, Set,
NamedTuple, Union, Sequence)
from warnings import warn
from functools import wraps, partial
from .. import numpy as jnp
from .. import core
from .. import linear_util as lu
from ..api import _check_callable, _check_arg
from ..tree_util import (tree_flatten, tree_unflatten, all_leaves,
_replace_nones, tree_map, tree_leaves)
from ..api_util import (flatten_fun_nokwargs, flatten_axes, _ensure_index_tuple,
donation_vector)
from ..interpreters import partial_eval as pe
from ..interpreters import pxla
from ..interpreters import xla
from ..interpreters import batching
from ..lib import xla_bridge as xb
from ..lib import xla_client as xc
from .._src.util import safe_map, safe_zip, HashableFunction, as_hashable_function, unzip2
from .._src.lax.parallel import _axis_index_translation_rule
map, unsafe_map = safe_map, map
zip = safe_zip
xops = xc.ops
EXPERIMENTAL_SPMD_LOWERING = False
class FrozenDict: # dataclasses might remove some boilerplate here
def __init__(self, *args, **kwargs):
self.contents = dict(*args, **kwargs)
allowed_methods = {'items', 'values', 'keys', 'get'}
def __getattr__(self, name):
if name in self.allowed_methods:
return getattr(self.contents, name)
raise AttributeError(name)
def __iter__(self):
return self.contents.__iter__()
def __len__(self):
return self.contents.__len__()
def __getitem__(self, name):
return self.contents.__getitem__(name)
def __eq__(self, other):
return isinstance(other, FrozenDict) and self.contents == other.contents
def __hash__(self):
return hash(tuple(self.contents.items()))
def __repr__(self):
return f"FrozenDict({self.contents})"
# Multi-dimensional generalized map
AxisName = core.AxisName
ResourceAxisName = AxisName # Different name just for documentation purposes
Mesh = pxla.Mesh
# TODO: Support sequential mapping
class ResourceEnv:
__slots__ = ('physical_mesh',)
physical_mesh: Mesh
def __init__(self, physical_mesh: Mesh):
super().__setattr__('physical_mesh', physical_mesh)
@property
def physical_resource_axes(self) -> Set[ResourceAxisName]:
return set(self.physical_mesh.axis_names)
@property
def resource_axes(self) -> Set[ResourceAxisName]:
return self.physical_resource_axes
@property
def shape(self):
return OrderedDict(self.physical_mesh.shape)
def __setattr__(self, name, value):
raise RuntimeError("ResourceEnv is immutable!")
def __delattr__(self):
raise RuntimeError("ResourceEnv is immutable!")
def __eq__(self, other):
return (type(other) is ResourceEnv and
self.physical_mesh == other.physical_mesh)
def __hash__(self):
return hash(self.physical_mesh)
def __repr__(self):
return f"ResourceEnv({self.physical_mesh!r})"
thread_resources = threading.local()
thread_resources.env = ResourceEnv(Mesh(np.empty((), dtype=object), ()))
@contextlib.contextmanager
def mesh(devices: np.ndarray, axis_names: Sequence[ResourceAxisName]):
"""Declare the hardware resources available in scope of this manager.
In particular, all ``axis_names`` become valid resource names inside the
managed block and can be used e.g. in the ``axis_resources`` argument of
:py:func:`xmap`.
Args:
devices: A NumPy ndarray object containing JAX device objects (as
obtained e.g. from :py:func:`jax.devices`).
axis_names: A sequence of resource axis names to be assigned to the
dimensions of the ``devices`` argument. Its length should match the
rank of ``devices``.
Example::
devices = np.array(jax.devices())[:4].reshape((2, 2))
with mesh(devices, ('x', 'y')): # declare a 2D mesh with axes 'x' and 'y'
distributed_out = xmap(
jnp.vdot,
in_axes=({0: 'left', 1: 'right'}),
out_axes=['left', 'right', ...],
axis_resources={'left': 'x', 'right': 'y'})(x, x.T)
"""
old_env = thread_resources.env
thread_resources.env = ResourceEnv(Mesh(devices, axis_names))
try:
yield
finally:
thread_resources.env = old_env
_next_resource_id = 0
class _UniqueResourceName:
def __init__(self, uid, tag=None):
self.uid = uid
self.tag = tag
def __eq__(self, other):
return type(other) is _UniqueResourceName and self.uid == other.uid
def __hash__(self):
return hash(self.uid)
def __repr__(self):
return f"<UniqueResource {self.tag} {self.uid}>"
def fresh_resource_name(tag=None):
global _next_resource_id
try:
return _UniqueResourceName(_next_resource_id, tag)
finally:
_next_resource_id += 1
# This is really a Dict[AxisName, int], but we don't define a
# pytree instance for it, so that it is treated as a leaf.
class AxisNamePos(FrozenDict):
user_repr: str
expected_rank: Optional[int] = None
def __init__(self, *args, user_repr, **kwargs):
super().__init__(*args, **kwargs)
self.user_repr = user_repr
class AxisNamePosWithRank(AxisNamePos):
def __init__(self, *args, expected_rank, **kwargs):
super().__init__(*args, **kwargs)
self.expected_rank = expected_rank
# str(...) == 'Ellipsis' which is really annoying
class DotDotDotRepr:
def __repr__(self): return '...'
def _parse_entry(arg_name, entry):
# Dictionaries mapping axis names to positional axes
if isinstance(entry, dict) and all(isinstance(v, int) for v in entry.keys()):
result = AxisNamePos(((name, axis) for axis, name in entry.items()),
user_repr=str(entry))
num_mapped_dims = len(entry)
# Non-empty lists or tuples that optionally terminate with an ellipsis
elif isinstance(entry, (tuple, list)):
if entry and entry[-1] == ...:
constr = AxisNamePos
entry = entry[:-1]
user_repr = str(entry + [DotDotDotRepr()])
else:
constr = partial(AxisNamePosWithRank, expected_rank=len(entry))
user_repr = str(entry)
result = constr(((name, axis) for axis, name in enumerate(entry)
if name is not None),
user_repr=user_repr)
num_mapped_dims = sum(name is not None for name in entry)
else:
raise TypeError(f"""\
Value mapping specification in xmap {arg_name} pytree can be either:
- lists of axis names (possibly ending with the ellipsis object: ...)
- dictionaries that map axis names to positional axes (integers)
but got: {entry}""")
if len(result) != num_mapped_dims:
raise ValueError(f"Named axes should be unique within each {arg_name} argument "
f"specification, but one them is: {entry}")
for axis in result.values():
if axis < 0:
raise ValueError(f"xmap doesn't support negative axes in {arg_name}")
return result
def _is_axes_leaf(entry):
if isinstance(entry, dict) and all_leaves(entry.values()):
return True
# NOTE: `None`s are not considered leaves by `all_leaves`
if isinstance(entry, (tuple, list)) and all_leaves(v for v in entry if v is not None):
return True
return False
def _prepare_axes(axes, arg_name):
entries, treedef = tree_flatten(axes, is_leaf=_is_axes_leaf)
entries = map(partial(_parse_entry, arg_name), entries)
return tree_unflatten(treedef, entries), entries
# TODO: Some syntactic sugar to make the API more usable in a single-axis case?
# TODO: Are the resource axes scoped lexically or dynamically? Dynamically for now!
def xmap(fun: Callable,
in_axes,
out_axes,
*,
axis_sizes: Dict[AxisName, int] = {},
axis_resources: Dict[AxisName, Union[ResourceAxisName, Tuple[ResourceAxisName, ...]]] = {},
donate_argnums: Union[int, Sequence[int]] = (),
backend: Optional[str] = None):
"""Assign a positional signature to a program that uses named array axes.
.. warning::
This is an experimental feature and the details can change at
any time. Use at your own risk!
.. warning::
This docstring is aspirational. Not all features of the named axis
programming model have been implemented just yet.
The usual programming model of JAX (or really NumPy) associates each array
with two pieces of metadata describing its type: the element type (``dtype``)
and the ``shape``. :py:func:`xmap` extends this model by adding support for
*named axes*. In particular, each array used in a function wrapped by
:py:func:`xmap` can additionally have a non-empty ``named_shape`` attribute,
which can be used to query the set of named axes (introduced by
:py:func:`xmap`) appearing in that value along with their shapes.
Furthermore, in most places where positional axis indices are allowed (for
example the `axes` arguments in :py:func:`sum`), bound axis names are also
accepted. The :py:func:`einsum` language is extended inside :py:func:`xmap`
to additionally allow contractions that involve named axes. Broadcasting of
named axes happens *by name*, i.e. all axes with equal names are expected to
have equal shapes in all arguments of a broadcasting operation, while the
result has a (set) union of all named axes. The positional semantics of the
program remain unchanged, and broadcasting still implicitly right-aligns
positional axes for unification. For an extended description of the
:py:func:`xmap` programming model, please refer to ... (a link to a
non-existent detailed tutorial!).
Note that since all top-level JAX expressions are interpreted in the NumPy
programming model, :py:func:`xmap` can also be seen as an adapter that
converts a function that uses named axes (including in arguments and returned
values) into one that takes and returns values that only have positional
axes.
The default lowering strategy of :py:func:`xmap` converts all named axes into
positional axes, working similarly to multiple applications of
:py:func:`vmap`. However, this behavior can be further customized by the
``axis_resources`` argument. When specified, each axis introduced by
:py:func:`xmap` can be assigned to one or more *resource axes*. Those include
the axes of the hardware mesh, as defined by the :py:func:`mesh` context
manager. Each value that has a named axis in its ``named_shape`` will be
partitioned over all mesh axes that axis is assigned to. Hence,
:py:func:`xmap` can be seen as an alternative to :py:func:`pmap` that also
exposes a way to automatically partition the computation over multiple
devices.
.. warning::
While it is possible to assign multiple axis names to a single resource axis,
care has to be taken to ensure that none of those named axes co-occur in a
``named_shape`` of any value in the named program. At the moment this is
**completely unchecked** and will result in **undefined behavior**. Final
release of :py:func:`xmap` will enforce this invariant, but it is work
in progress.
Note that you do not have to worry about any of this for as long as no
resource axis is repeated in ``axis_resources.values()``.
Note that any assignment of ``axis_resources`` doesn't ever change the
results of the computation, but only how it is carried out (e.g. how many
devices are used). This makes it easy to try out various ways of
partitioning a single program in many distributed scenarions (both small- and
large-scale), to maximize the performance. As such, :py:func:`xmap` can be
seen as a way to seamlessly interpolate between :py:func:`vmap` and
:py:func:`pmap`-style execution.
Args:
fun: Function that uses named axes. Its arguments and return
value should be arrays, scalars, or (nested) standard Python containers
(tuple/list/dict) thereof (in general: valid pytrees).
in_axes: A Python object with the same container (pytree) structure as the
signature of arguments to ``fun``, but with a positional-to-named axis
mapping in place of every array argument. The valid positional-to-named
mappings are: (1) a ``Dict[int, AxisName]`` specifying that a positional
dimensions given by dictionary keys are to be converted to named axes
of given names (2) a list of axis names that ends with the Ellipsis object
(``...``) in which case a number of leading positional axes of the argument
will be converted into named axes inside the function. Note that ``in_axes``
can also be a prefix of the argument container structure, in which case the
mapping is repeated for all arrays in the collapsed subtree.
out_axes: A Python object with the same container (pytree) structure as the
returns of ``fun``, but with a positional-to-named axis mapping in place
of every returned array. The valid positional-to-named mappings are the same
as in ``in_axes``. Note that ``out_axes`` can also be a prefix of the return
container structure, in which case the mapping is repeated for all arrays
in the collapsed subtree.
axis_sizes: A dict mapping axis names to their sizes. All axes defined by xmap
have to appear either in ``in_axes`` or ``axis_sizes``. Sizes of axes
that appear in ``in_axes`` are inferred from arguments whenever possible.
axis_resources: A dictionary mapping the axes introduced in this
:py:func:`xmap` to one or more resource axes. Any array that has in its
shape an axis with some resources assigned will be partitioned over the
resources associated with the respective resource axes.
backend: This is an experimental feature and the API is likely to change.
Optional, a string representing the XLA backend. 'cpu', 'gpu', or 'tpu'.
Returns:
A version of ``fun`` that takes in arrays with positional axes in place of
named axes bound in this :py:func:`xmap` call, and results with all named
axes converted to positional axes. If ``axis_resources`` is specified,
``fun`` can additionally execute in parallel on multiple devices.
For example, :py:func:`xmap` makes it very easy to convert a function that
computes the vector inner product (such as :py:func:`jax.numpy.vdot`) into
one that computes a matrix multiplication:
>>> import jax.numpy as jnp
>>> x = jnp.arange(10).reshape((2, 5))
>>> xmap(jnp.vdot,
... in_axes=({0: 'left'}, {1: 'right'}),
... out_axes=['left', 'right', ...])(x, x.T)
[[ 2, 3],
[ 6, 11]]
Note that the contraction in the program is performed over the positional axes,
while named axes are just a convenient way to achieve batching. While this
might seem like a silly example at first, it might turn out to be useful in
practice, since with conjuction with ``axis_resources`` this makes it possible
to implement a distributed matrix-multiplication in just a few lines of code:
>>> devices = np.array(jax.devices())[:4].reshape((2, 2))
>>> with mesh(devices, ('x', 'y')): # declare a 2D mesh with axes 'x' and 'y'
... distributed_out = xmap(
... jnp.vdot,
... in_axes=({0: 'left', 1: 'right'}),
... out_axes=['left', 'right', ...],
... axis_resources={'left': 'x', 'right': 'y'})(x, x.T)
Still, the above examples are quite simple. After all, the xmapped
computation was a simple NumPy function that didn't use the axis names at all!
So, let's explore a slightly larger example which is linear regression::
def regression_loss(x, y, w, b):
# Contract over in_features. Batch and out_features are present in
# both inputs and output, so they don't need to be mentioned
y_pred = jnp.einsum('{in_features},{in_features}->{}') + b
return jnp.mean((y - y_pred) ** 2, axis='batch')
xmap(regression_loss,
in_axes=(['batch', 'in_features', ...],
['batch', 'out_features', ...],
['in_features', 'out_features', ...],
['out_features', ...]),
out_axes={}) # Loss is reduced over all axes, including batch!
.. note::
When using ``axis_resources`` along with a mesh that is controled by
multiple JAX hosts, keep in mind that in any given process :py:func:`xmap`
only expects the data slice that corresponds to its local devices to be
specified. This is in line with the current multi-host :py:func:`pmap`
programming model.
"""
warn("xmap is an experimental feature and probably has bugs!")
_check_callable(fun)
# To be a tree prefix of the positional args tuple, in_axes can never be a
# list: if in_axes is not a leaf, it must be a tuple of trees. However,
# in cases like these users expect tuples and lists to be treated
# essentially interchangeably, so we canonicalize lists to tuples here
# rather than raising an error. https://github.com/google/jax/issues/2367
if isinstance(in_axes, list) and not _is_axes_leaf(in_axes):
in_axes = tuple(in_axes)
if isinstance(out_axes, list) and not _is_axes_leaf(out_axes):
out_axes = tuple(out_axes)
if in_axes == (): # Allow empty argument lists
in_axes, in_axes_entries = (), []
else:
in_axes, in_axes_entries = _prepare_axes(in_axes, "in_axes")
if out_axes == ():
raise ValueError("xmapped functions cannot have no return values")
else:
out_axes, out_axes_entries = _prepare_axes(out_axes, "out_axes")
axis_sizes_names = set(axis_sizes.keys())
in_axes_names = set(it.chain(*(spec.keys() for spec in in_axes_entries)))
defined_names = axis_sizes_names | in_axes_names
out_axes_names = set(it.chain(*(spec.keys() for spec in out_axes_entries)))
normalized_axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]] = \
{axis: (resources if isinstance(resources, tuple) else (resources,))
for axis, resources in axis_resources.items()}
for axis in defined_names:
normalized_axis_resources.setdefault(axis, ())
frozen_axis_resources = FrozenDict(normalized_axis_resources)
necessary_resources = set(it.chain(*frozen_axis_resources.values()))
axes_with_resources = set(frozen_axis_resources.keys())
if axes_with_resources > defined_names:
raise ValueError(f"All axes that were assigned resources have to appear in "
f"in_axes or axis_sizes, but the following are missing: "
f"{axes_with_resources - defined_names}")
if out_axes_names > defined_names:
raise ValueError(f"All axis names appearing in out_axes must also appear in "
f"in_axes or axis_sizes, but the following are missing: "
f"{out_axes_names - defined_names}")
for axis, resources in frozen_axis_resources.items():
if len(set(resources)) != len(resources):
raise ValueError(f"Resource assignment of a single axis must be a tuple of "
f"distinct resources, but specified {resources} for axis {axis}")
donate_argnums = _ensure_index_tuple(donate_argnums)
# A little performance optimization to avoid iterating over all args unnecessarily
has_input_rank_assertions = any(spec.expected_rank is not None for spec in in_axes_entries)
has_output_rank_assertions = any(spec.expected_rank is not None for spec in out_axes_entries)
@wraps(fun)
def fun_mapped(*args):
# Putting this outside of fun_mapped would make resources lexically scoped
resource_env = thread_resources.env
available_resources = set(resource_env.shape.keys())
if necessary_resources > available_resources:
raise ValueError(f"In-scope resources are insufficient to execute the "
f"xmapped function. The missing resources are: "
f"{necessary_resources - available_resources}")
args_flat, in_tree = tree_flatten(args)
for arg in args_flat: _check_arg(arg)
fun_flat, out_tree = flatten_fun_nokwargs(lu.wrap_init(fun), in_tree)
if donate_argnums:
donated_invars = donation_vector(donate_argnums, args, ())
else:
donated_invars = (False,) * len(args_flat)
# TODO: Check that:
# - two axes mapped to the same resource never coincide (even inside f)
in_axes_flat = flatten_axes("xmap in_axes", in_tree, in_axes)
out_axes_thunk = HashableFunction(
lambda: tuple(flatten_axes("xmap out_axes", out_tree(), out_axes)),
closure=out_axes)
frozen_axis_sizes = FrozenDict(_get_axis_sizes(args_flat, in_axes_flat, axis_sizes))
missing_sizes = defined_names - set(frozen_axis_sizes.keys())
if missing_sizes:
raise ValueError(f"Failed to infer size of axes: {', '.join(unsafe_map(str, missing_sizes))}. "
f"You've probably passed in empty containers in place of arguments that had "
f"those axes in their in_axes. Provide the sizes of missing axes explicitly "
f"via axis_sizes to fix this error.")
if has_input_rank_assertions:
for arg, spec in zip(args_flat, in_axes_flat):
if spec.expected_rank is not None and spec.expected_rank != arg.ndim:
raise ValueError(f"xmap argument has an in_axes specification of {spec.user_repr}, "
f"which asserts that it should be of rank {spec.expected_rank}, "
f"but the argument has rank {arg.ndim} (and shape {arg.shape})")
out_flat = xmap_p.bind(
fun_flat, *args_flat,
name=getattr(fun, '__name__', '<unnamed function>'),
in_axes=tuple(in_axes_flat),
out_axes_thunk=out_axes_thunk,
donated_invars=donated_invars,
axis_sizes=frozen_axis_sizes,
axis_resources=frozen_axis_resources,
resource_env=resource_env,
backend=backend)
if has_output_rank_assertions:
for out, spec in zip(out_flat, out_axes_thunk()):
if spec.expected_rank is not None and spec.expected_rank != out.ndim:
raise ValueError(f"xmap output has an out_axes specification of {spec.user_repr}, "
f"which asserts that it should be of rank {spec.expected_rank}, "
f"but the output has rank {out.ndim} (and shape {out.shape})")
return tree_unflatten(out_tree(), out_flat)
return fun_mapped
def xmap_impl(fun: lu.WrappedFun, *args, name, in_axes, out_axes_thunk, donated_invars,
axis_sizes, axis_resources, resource_env, backend):
in_avals = [core.raise_to_shaped(core.get_aval(arg)) for arg in args]
return make_xmap_callable(fun, name, in_axes, out_axes_thunk, donated_invars, axis_sizes,
axis_resources, resource_env, backend, *in_avals)(*args)
@lu.cache
def make_xmap_callable(fun: lu.WrappedFun,
name,
in_axes, out_axes_thunk, donated_invars,
axis_sizes, axis_resources, resource_env, backend,
*in_avals):
plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, axis_sizes)
# TODO: Making axis substitution final style would allow us to avoid
# tracing to jaxpr here
mapped_in_avals = [_delete_aval_axes(aval, in_axes)
for aval, in_axes in zip(in_avals, in_axes)]
with core.extend_axis_env_nd(axis_sizes.items()):
jaxpr, _, consts = pe.trace_to_jaxpr_final(fun, mapped_in_avals)
out_axes = out_axes_thunk()
jaxpr = subst_jaxpr_axis_names(jaxpr, plan.axis_subst)
f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(jaxpr, consts)))
f = hide_mapped_axes(f, tuple(in_axes), tuple(out_axes))
f = plan.vectorize(f, in_axes, out_axes)
used_resources = _jaxpr_resources(jaxpr, resource_env) | set(it.chain(*axis_resources.values()))
used_mesh_axes = used_resources & resource_env.physical_resource_axes
if used_mesh_axes:
submesh = resource_env.physical_mesh[sorted(used_mesh_axes, key=str)]
mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)
return pxla.mesh_tiled_callable(f,
name,
backend,
submesh,
mesh_in_axes,
mesh_out_axes,
donated_invars,
EXPERIMENTAL_SPMD_LOWERING,
*in_avals)
else:
return xla._xla_callable(f, None, backend, name, donated_invars,
*((a, None) for a in in_avals))
class EvaluationPlan(NamedTuple):
"""Encapsulates preprocessing common to top-level xmap invocations and its translation rule."""
resource_env: ResourceEnv
axis_sizes: Dict[AxisName, int]
physical_axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]]
axis_subst: Dict[AxisName, Tuple[ResourceAxisName, ...]]
@classmethod
def from_axis_resources(cls,
axis_resources: Dict[AxisName, Tuple[ResourceAxisName, ...]],
resource_env: ResourceEnv,
axis_sizes: Dict[AxisName, int]):
# TODO: Support sequential resources
physical_axis_resources = axis_resources # NB: We only support physical resources at the moment
axis_subst = {name: axes + (fresh_resource_name(name),) for name, axes in axis_resources.items()}
return cls(resource_env, axis_sizes, physical_axis_resources, axis_subst)
def vectorize(self, f: lu.WrappedFun, in_axes, out_axes):
resource_shape = self.resource_env.shape
for naxis, raxes in self.axis_subst.items():
paxes, vaxis = raxes[:-1], raxes[-1]
map_in_axes = tuple(unsafe_map(lambda spec: spec.get(naxis, None), in_axes))
map_out_axes = tuple(unsafe_map(lambda spec: spec.get(naxis, None), out_axes))
paxes_size = int(np.prod([resource_shape[paxis] for paxis in paxes], dtype=np.int64))
if self.axis_sizes[naxis] % paxes_size != 0:
raise ValueError(f"Size of axis {naxis} ({self.axis_sizes[naxis]}) is not divisible "
f"by the total number of resources assigned to this axis ({paxes}, "
f"{paxes_size} in total)")
tile_size = self.axis_sizes[naxis] // paxes_size
f = pxla.vtile(f, map_in_axes, map_out_axes, tile_size=tile_size, axis_name=vaxis)
return f
def to_mesh_axes(self, in_axes, out_axes):
"""
Convert in/out_axes parameters ranging over logical dimensions to
in/out_axes that range over the mesh dimensions.
"""
def to_mesh(axes):
return OrderedDict((physical_axis, pos_axis)
for logical_axis, pos_axis in axes.items()
for physical_axis in self.physical_axis_resources[logical_axis])
return (tuple(unsafe_map(to_mesh, in_axes)),
tuple(unsafe_map(to_mesh, out_axes)))
# -------- xmap primitive and its transforms --------
# xmap has a different set of parameters than pmap, so we make it its own primitive type
class XMapPrimitive(core.Primitive):
multiple_results = True
map_primitive = True # Not really, but it gives us a few good behaviors
def __init__(self):
super().__init__('xmap')
self.def_impl(xmap_impl)
self.def_custom_bind(self.bind)
def bind(self, fun, *args, **params):
assert len(params['in_axes']) == len(args)
return core.call_bind(self, fun, *args, **params) # type: ignore
def process(self, trace, fun, tracers, params):
return trace.process_xmap(self, fun, tracers, params)
def post_process(self, trace, out_tracers, params):
raise NotImplementedError
xmap_p = XMapPrimitive()
core.EvalTrace.process_xmap = core.EvalTrace.process_call # type: ignore
def _process_xmap_default(self, call_primitive, f, tracers, params):
raise NotImplementedError(f"{type(self)} must override process_xmap to handle xmap")
core.Trace.process_xmap = _process_xmap_default # type: ignore
# This is DynamicJaxprTrace.process_map with some very minor modifications
def _dynamic_jaxpr_process_xmap(self, primitive, f, tracers, params):
from jax.interpreters.partial_eval import (
trace_to_subjaxpr_dynamic, DynamicJaxprTracer, source_info_util,
convert_constvars_jaxpr, call_param_updaters, new_jaxpr_eqn)
assert primitive is xmap_p
in_avals = [t.aval for t in tracers]
axis_sizes = params['axis_sizes']
mapped_in_avals = [_delete_aval_axes(a, a_in_axes)
for a, a_in_axes in zip(in_avals, params['in_axes'])]
with core.extend_axis_env_nd(params['axis_sizes'].items()):
jaxpr, mapped_out_avals, consts = trace_to_subjaxpr_dynamic(
f, self.main, mapped_in_avals)
out_axes = params['out_axes_thunk']()
out_avals = [_insert_aval_axes(a, a_out_axes, axis_sizes)
for a, a_out_axes in zip(mapped_out_avals, out_axes)]
source_info = source_info_util.current()
out_tracers = [DynamicJaxprTracer(self, a, source_info) for a in out_avals]
invars = map(self.getvar, tracers)
constvars = map(self.getvar, map(self.instantiate_const, consts))
outvars = map(self.makevar, out_tracers)
new_in_axes = (None,) * len(consts) + params['in_axes']
new_params = dict(params, in_axes=new_in_axes, out_axes=out_axes,
call_jaxpr=convert_constvars_jaxpr(jaxpr))
del new_params['out_axes_thunk']
update_params = call_param_updaters.get(primitive)
if update_params:
new_params = update_params(new_params, [True] * len(tracers))
eqn = new_jaxpr_eqn([*constvars, *invars], outvars, primitive,
new_params, source_info)
self.frame.eqns.append(eqn)
return out_tracers
pe.DynamicJaxprTrace.process_xmap = _dynamic_jaxpr_process_xmap # type: ignore
def _batch_trace_process_xmap(self, primitive, f: lu.WrappedFun, tracers, params):
not_mapped = batching.not_mapped
vals, dims = unzip2((t.val, t.batch_dim) for t in tracers)
assert primitive is xmap_p
if all(dim is not_mapped for dim in dims):
return primitive.bind(f, *vals, **params)
else:
assert len({x.shape[d] for x, d in zip(vals, dims) if d is not not_mapped}) == 1
def fmap_dims(axes, f):
return AxisNamePos(((name, f(axis)) for name, axis in axes.items()),
user_repr=axes.user_repr)
new_in_axes = tuple(
fmap_dims(in_axes, lambda a: a + (d is not not_mapped and d <= a))
for d, in_axes in zip(dims, params['in_axes']))
mapped_dims_in = tuple(
d if d is not_mapped else d - sum(a < d for a in in_axis.values())
for d, in_axis in zip(dims, params['in_axes']))
f, mapped_dims_out = batching.batch_subtrace(f, self.main, mapped_dims_in)
out_axes_thunk = params['out_axes_thunk']
# NOTE: This assumes that the choice of the dimensions over which outputs
# are batched is entirely dependent on the function and not e.g. on the
# data or its shapes.
@as_hashable_function(closure=out_axes_thunk)
def new_out_axes_thunk():
return tuple(
fmap_dims(out_axes, lambda a: a + (d is not not_mapped and d <= a))
for out_axes, d in zip(out_axes_thunk(), mapped_dims_out()))
new_params = dict(params, in_axes=new_in_axes, out_axes_thunk=new_out_axes_thunk)
vals_out = primitive.bind(f, *vals, **new_params)
dims_out = tuple(d if d is not_mapped else d + sum(a < d for a in out_axes.values())
for d, out_axes in zip(mapped_dims_out(), out_axes_thunk()))
return [batching.BatchTracer(self, v, d) for v, d in zip(vals_out, dims_out)]
batching.BatchTrace.process_xmap = _batch_trace_process_xmap # type: ignore
# -------- nested xmap handling --------
def _xmap_translation_rule(*args, **kwargs):
if EXPERIMENTAL_SPMD_LOWERING:
return _xmap_translation_rule_spmd(*args, **kwargs)
else:
return _xmap_translation_rule_replica(*args, **kwargs)
xla.call_translations[xmap_p] = _xmap_translation_rule
def _xmap_translation_rule_replica(c, axis_env,
in_nodes, name_stack, *,
call_jaxpr, name,
in_axes, out_axes, donated_invars,
axis_sizes, axis_resources, resource_env, backend):
plan = EvaluationPlan.from_axis_resources(axis_resources, resource_env, axis_sizes)
local_mesh = resource_env.physical_mesh.local_mesh
local_mesh_shape = local_mesh.shape
mesh_in_axes, mesh_out_axes = plan.to_mesh_axes(in_axes, out_axes)
local_avals = [pxla.tile_aval_nd(
local_mesh_shape, aval_mesh_in_axes,
_insert_aval_axes(v.aval, aval_in_axes, axis_sizes))
for v, aval_in_axes, aval_mesh_in_axes
in zip(call_jaxpr.invars, in_axes, mesh_in_axes)]
# We have to substitute before tracing, because we want the vectorized
# axes to be used in the jaxpr.
resource_call_jaxpr = subst_jaxpr_axis_names(call_jaxpr, plan.axis_subst)
f = lu.wrap_init(core.jaxpr_as_fun(core.ClosedJaxpr(resource_call_jaxpr, ())))
f = hide_mapped_axes(f, tuple(in_axes), tuple(out_axes))
f = plan.vectorize(f, in_axes, out_axes)
# NOTE: We don't extend the resource env with the mesh shape, because those
# resources are already in scope! It's the outermost xmap that introduces
# them!
vectorized_jaxpr, _, consts = pe.trace_to_jaxpr_final(f, local_avals)
assert not consts
tiled_ins = (
_xla_tile(c, axis_env, in_node, arg_in_axes, local_mesh_shape)
if v.aval is not core.abstract_unit else in_node
for v, in_node, arg_in_axes in zip(call_jaxpr.invars, in_nodes, mesh_in_axes))
# NOTE: We don't extend the resource env with the mesh shape, because those
# resources are already in scope! It's the outermost xmap that introduces
# them!
# We in-line here rather than generating a Call HLO as in the xla_call
# translation rule just because the extra tuple stuff is a pain.
tiled_outs = xla.jaxpr_subcomp(
c, vectorized_jaxpr, backend, axis_env, (),
xla.extend_name_stack(name_stack, xla.wrap_name(name, 'xmap')), *tiled_ins)
outs = [_xla_untile(c, axis_env, tiled_out, ans_out_axes, local_mesh_shape, backend)
if v.aval is not core.abstract_unit else tiled_out
for v, tiled_out, ans_out_axes
in zip(call_jaxpr.outvars, tiled_outs, mesh_out_axes)]
return xops.Tuple(c, outs)
def _xla_tile_base_indices(c, axis_env, tile_shape, axes, axis_sizes):
zero = xb.constant(c, np.zeros((), dtype=np.int32))
linear_idxs = [zero] * len(tile_shape)
strides = [1] * len(tile_shape)
for name, axis in reversed(axes.items()):
axis_index = _axis_index_translation_rule(
c, axis_name=name, axis_env=axis_env, platform=None)
stride_c = xb.constant(c, np.array(strides[axis], np.int32))
if linear_idxs[axis] is zero and strides[axis] == 1:
linear_idxs[axis] = axis_index
else:
linear_idxs[axis] = xops.Add(linear_idxs[axis], xops.Mul(axis_index, stride_c))
strides[axis] *= axis_sizes[name]
return [zero if linear_idx is zero else
xops.Mul(linear_idx, xb.constant(c, np.array(tile_dim_size, np.int32)))
for linear_idx, tile_dim_size in zip(linear_idxs, tile_shape)]
def _xla_tile(c, axis_env, x, in_axes, axis_sizes):
if not in_axes:
return x
shape = list(c.get_shape(x).dimensions())
tile_shape = list(shape)
for name, axis in in_axes.items():
axis_size = axis_sizes[name]
assert tile_shape[axis] % axis_size == 0
tile_shape[axis] //= axis_size
base_idxs = _xla_tile_base_indices(c, axis_env, tile_shape, in_axes, axis_sizes)
return xops.DynamicSlice(x, base_idxs, tile_shape)
# TODO(b/110096942): more efficient gather
def _xla_untile(c, axis_env, x, out_axes, axis_sizes, backend):
xla_shape = c.get_shape(x)
x_dtype = xla_shape.numpy_dtype()
# TODO(mattjj): remove this logic when AllReduce PRED supported on CPU / GPU
convert_bool = (np.issubdtype(x_dtype, np.bool_)
and xb.get_backend(backend).platform in ('cpu', 'gpu'))
if convert_bool:
x = xops.ConvertElementType(x, xb.dtype_to_etype(np.float32))
tile_shape = list(xla_shape.dimensions())
shape = list(tile_shape)
for name, axis in out_axes.items():
shape[axis] *= axis_sizes[name]
base_idxs = _xla_tile_base_indices(c, axis_env, tile_shape, out_axes, axis_sizes)
padded = xops.Broadcast(xb.constant(c, np.array(0, x_dtype)), shape)
padded = xops.DynamicUpdateSlice(padded, x, base_idxs)
replica_groups_protos = xc.make_replica_groups(
xla.axis_groups(axis_env, tuple(out_axes.keys())))
out = xops.CrossReplicaSum(padded, replica_groups_protos)
# TODO(mattjj): remove this logic when AllReduce PRED supported on CPU / GPU
if convert_bool:
nonzero = xops.Ne(out, xb.constant(c, np.array(0, dtype=np.float32)))
out = xops.ConvertElementType(nonzero, xb.dtype_to_etype(np.bool_))
return out
def _xmap_translation_rule_spmd(c, axis_env,
in_nodes, name_stack, *,
call_jaxpr, name,
in_axes, out_axes, donated_invars,
axis_sizes, axis_resources, resource_env, backend):
# TODO(apaszke): This is quite difficult to implement given the current lowering
# in mesh_tiled_callable. There, we vmap the mapped axes, but we
# have no idea which positional axes they end up being in this
# translation rule!
raise NotImplementedError
# -------- helper functions --------
def _delete_aval_axes(aval, axes: AxisNamePos):
assert isinstance(aval, core.ShapedArray)
shape = list(aval.shape)
for i in sorted(axes.values(), reverse=True):
del shape[i]
return aval.update(shape=tuple(shape))
def _insert_aval_axes(aval, axes: AxisNamePos, axis_sizes):
assert isinstance(aval, core.ShapedArray)
shape = list(aval.shape)
for name, axis in sorted(axes.items(), key=lambda x: x[1]):
shape.insert(axis, axis_sizes[name])
return aval.update(shape=tuple(shape))
def _get_axis_sizes(args_flat: Iterable[Any],
in_axes_flat: Iterable[AxisNamePos],
axis_sizes: Dict[AxisName, int]):
axis_sizes = dict(axis_sizes)
for arg, in_axes in zip(args_flat, in_axes_flat):
for name, dim in in_axes.items():
if name in axis_sizes and axis_sizes[name] != arg.shape[dim]:
raise ValueError(f"The size of axis {name} was previously inferred to be "
f"{axis_sizes[name]}, but found an argument of shape {arg.shape} "
f"with in_axes specification {in_axes.user_repr}. Shape mismatch "
f"occurs in dimension {dim}: {arg.shape[dim]} != {axis_sizes[name]}")
else:
try:
axis_sizes[name] = arg.shape[dim]
except IndexError:
# TODO(apaszke): Handle negative indices. Check for overlap too!
raise ValueError(f"One of xmap arguments has an in_axes specification of "
f"{in_axes.user_repr}, which implies that it has at least "
f"{max(in_axes.values()) + 1} dimensions, but the argument "
f"has rank {arg.ndim}")
return axis_sizes
def lookup_exactly_one_of(d: AxisNamePos, names: Set[AxisName]) -> Optional[int]:
res = None
for name in names:
if name in d:
if res is not None:
raise ValueError("An input was mapped to the same resource twice")
res = d[name]
return res
@lu.transformation
def hide_mapped_axes(flat_in_axes, flat_out_axes, *flat_args):
def _squeeze_mapped_axes(arg, axes: AxisNamePos):
for dim in sorted(axes.values(), reverse=True):
arg = arg.squeeze(dim)
return arg
def _unsqueeze_mapped_axes(out, axes: AxisNamePos):
try:
return jnp.expand_dims(out, tuple(axes.values()))
except ValueError as e:
# Improve the axis out of bounds errors
# TODO(apaszke): Handle negative indices. Check for overlap too!
if e.args[0].startswith('axis') and 'out of bounds' in e.args[0]:
raise ValueError(f"One of xmap outputs has an out_axes specification of "
f"{axes.user_repr}, which requires the result of the xmapped "
f"function to have at least {max(axes.values()) - len(axes) + 1} "
f"positional dimensions, but it only has {out.ndim}")
raise
squeezed_args = map(_squeeze_mapped_axes, flat_args, flat_in_axes)
flat_outputs = yield squeezed_args, {}
yield map(_unsqueeze_mapped_axes, flat_outputs, flat_out_axes)
def _jaxpr_resources(jaxpr, resource_env) -> Set[ResourceAxisName]:
used_resources = set()
for eqn in jaxpr.eqns:
if eqn.primitive is xmap_p:
if eqn.params['resource_env'] != resource_env:
raise RuntimeError("Changing the resource environment (e.g. hardware mesh "
"spec) is not allowed inside xmap.")
used_resources |= set(it.chain(*eqn.params['axis_resources'].values()))
updates = core.traverse_jaxpr_params(
partial(_jaxpr_resources, resource_env=resource_env), eqn.params)
for update in updates:
used_resources |= update
return used_resources
def subst_jaxpr_axis_names(jaxpr, axis_subst: Dict[AxisName, Tuple[AxisName]]):
eqns = [subst_eqn_axis_names(eqn, axis_subst) for eqn in jaxpr.eqns]
return core.Jaxpr(jaxpr.constvars, jaxpr.invars, jaxpr.outvars, eqns)
def subst_eqn_axis_names(eqn, axis_subst: Dict[AxisName, Tuple[AxisName]]):
# TODO: Support custom_vjp, custom_jvp
if eqn.primitive is xmap_p:
shadowed_axes = set(eqn.params['axis_sizes']) & set(axis_subst)
if shadowed_axes:
shadowed_subst = dict(axis_subst)
for saxis in shadowed_axes:
del shadowed_subst[saxis]
else:
shadowed_subst = axis_subst
new_call_jaxpr = subst_jaxpr_axis_names(eqn.params['call_jaxpr'], shadowed_subst)
return eqn._replace(params=dict(eqn.params, call_jaxpr=new_call_jaxpr))
if isinstance(eqn.primitive, (core.CallPrimitive, core.MapPrimitive)):
bound_name = eqn.params.get('axis_name', None)
if bound_name in axis_subst: # Check for shadowing
sub_subst = dict(axis_subst)
del sub_subst[bound_name]
else:
sub_subst = axis_subst
new_call_jaxpr = subst_jaxpr_axis_names(eqn.params['call_jaxpr'], sub_subst)
return eqn._replace(params=dict(eqn.params, call_jaxpr=new_call_jaxpr))
new_params = core.subst_axis_names(eqn.primitive, eqn.params,
lambda name: axis_subst.get(name, (name,)))
return eqn if new_params is eqn.params else eqn._replace(params=new_params)
# -------- soft_pmap --------
def soft_pmap(fun: Callable, axis_name: Optional[AxisName] = None, in_axes=0
) -> Callable:
warn("soft_pmap is an experimental feature and probably has bugs!")
_check_callable(fun)
axis_name = core._TempAxisName(fun) if axis_name is None else axis_name
if any(axis != 0 for axis in tree_leaves(in_axes)):
raise ValueError(f"soft_pmap in_axes leaves must be 0 or None, got {in_axes}")
proxy = object()
in_axes = _replace_nones(proxy, in_axes)
in_axes = tree_map(lambda i: {i: axis_name} if i is not proxy else {}, in_axes)
@wraps(fun)
def f_pmapped(*args, **kwargs):
mesh_devices = np.array(xb.local_devices())
with mesh(mesh_devices, ['devices']):
return xmap(fun, in_axes=in_axes, out_axes={0: axis_name},
axis_resources={axis_name: 'devices'})(*args, **kwargs)
return f_pmapped
| 45.38826 | 101 | 0.693751 |
65199beaa454d28f2747e63b49af429a3af191f2 | 1,874 | py | Python | extract_data.py | pcg15/bme590hrm | ac8d4865d870d0aebdbe32396b740dae5babee7b | [
"MIT"
] | null | null | null | extract_data.py | pcg15/bme590hrm | ac8d4865d870d0aebdbe32396b740dae5babee7b | [
"MIT"
] | null | null | null | extract_data.py | pcg15/bme590hrm | ac8d4865d870d0aebdbe32396b740dae5babee7b | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import logging
logging.basicConfig(filename='hrmonitorlog.txt', format='%(levelname)s \
%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG)
def extract_voltage_data(filename):
"""pulls voltage data out of pandas data frame and normalizes values
:param filename: the name of a file located in the /test_data folder \
entered as a string
:returns norm_voltage: normalized voltage data
"""
from import_data import import_data
df = import_data(filename)
values = df.values
voltage = values[:, 1]
norm_voltage = voltage - np.mean(voltage)
logging.info("extract_voltage_data: norm_voltage found")
logging.debug("norm_voltage="+str(norm_voltage))
return norm_voltage
def extract_time_data(filename):
"""pulls time data out of pandas data frame from ECG input
:param filename: the name of a file located in the /test_data folder \
entered as a string
:returns time: array of time values from ECG input
"""
from import_data import import_data
df = import_data(filename)
values = df.values
time = values[:, 0]
logging.info("extract_time_data: time data found")
logging.debug("time="+str(time))
return time
def extract_template_data(template):
"""pulls ECG template data out of pandas data frame and normalizes values
:param filename: the name of a file located in the /test_data folder \
entered as a string
:returns norm_template: normalized template data
"""
template = pd.read_csv("test_data/template.csv", header=None)
temp_values = template.values
temp_vol = temp_values[:, 1]
norm_template = temp_vol - np.mean(temp_vol)
logging.info("extract_template_data: norm_template found")
logging.debug("norm_template="+str(norm_template))
return norm_template
| 32.310345 | 78 | 0.713981 |
4b4dc3d62b0145573cfbdfd362e33d2baf29a1e3 | 1,729 | py | Python | tests/test_assets.py | simongarisch/pytrade | 6245c0a47017a880299fa7704a49580f394fa87b | [
"MIT"
] | 2 | 2020-10-19T02:44:57.000Z | 2021-11-08T10:45:25.000Z | tests/test_assets.py | simongarisch/pytrade | 6245c0a47017a880299fa7704a49580f394fa87b | [
"MIT"
] | 1 | 2020-12-24T02:59:58.000Z | 2020-12-24T02:59:58.000Z | tests/test_assets.py | simongarisch/pytrade | 6245c0a47017a880299fa7704a49580f394fa87b | [
"MIT"
] | null | null | null | import pytest
from pxtrade.settings import get_default_currency_code
from pxtrade.assets import reset, Asset, Stock
from pxtrade.observable import Observable
def test_is_observable():
""" All assets must be observable. """
stock = Stock("AAPL", currency_code="USD")
assert isinstance(stock, Observable)
def test_default_ccy_usd():
""" Check that assets have a default currency. """
stock = Stock("AAPL")
default_code = get_default_currency_code()
assert stock.currency_code == default_code
def test_is_unique():
""" Each asset must have a unique ticker. """
stock = Stock("AAPL")
with pytest.raises(ValueError):
Stock("AAPL")
assert stock.code == "AAPL"
def test_get_asset_for_code():
reset()
stock1 = Stock("GOOG")
stock2 = Asset.get_asset_for_code("GOOG")
assert stock1 is stock2
def test_get_instances():
stock1 = Stock("AAA")
stock2 = Stock("BBB")
assets = Asset.get_instances()
assert len(assets) == 2
assert stock1 in assets
assert stock2 in assets
def test_local_value():
stock = Stock("AAPL")
assert stock.local_value is None
stock.price = 300
assert stock.price == 300
assert stock.local_value == 300
def test_price_setter():
stock = Stock(
code="AAPL",
currency_code="USD",
price=121,
)
assert stock.code == "AAPL"
assert stock.currency_code == "USD"
assert stock.price == 121
assert stock.local_value == 121
stock.price = None # so we cannot value
assert stock.local_value is None
stock.price = 120
assert stock.price == 120
assert stock.local_value == 120
with pytest.raises(TypeError):
stock.price = "123"
| 24.013889 | 54 | 0.667438 |
ecc06a0067b3ef5339b5365953052c1785dd20c4 | 104 | py | Python | kafka_postgres/web_health_monitor/exceptions.py | eykop/Web-Monitoring-Kafka-PostgreSQL | d80ae6b1998c7c00af23cb805843162b9e6135b8 | [
"MIT"
] | null | null | null | kafka_postgres/web_health_monitor/exceptions.py | eykop/Web-Monitoring-Kafka-PostgreSQL | d80ae6b1998c7c00af23cb805843162b9e6135b8 | [
"MIT"
] | null | null | null | kafka_postgres/web_health_monitor/exceptions.py | eykop/Web-Monitoring-Kafka-PostgreSQL | d80ae6b1998c7c00af23cb805843162b9e6135b8 | [
"MIT"
] | 1 | 2021-06-03T11:53:43.000Z | 2021-06-03T11:53:43.000Z | """Module that defines exceptions for wbe monitoring"""
class WebMonitorException(Exception):
pass | 20.8 | 55 | 0.769231 |
b21bc420f024ce95d1a6590632025353f6a2c7d2 | 4,596 | py | Python | parkourdex/settings.py | moralrecordings/parkourdex | be0badf084b583588dbe672e53948a24ea82de95 | [
"BSD-3-Clause"
] | null | null | null | parkourdex/settings.py | moralrecordings/parkourdex | be0badf084b583588dbe672e53948a24ea82de95 | [
"BSD-3-Clause"
] | 8 | 2021-04-08T18:31:49.000Z | 2022-03-11T23:31:32.000Z | parkourdex/settings.py | moralrecordings/parkourdex | be0badf084b583588dbe672e53948a24ea82de95 | [
"BSD-3-Clause"
] | null | null | null | """
Django settings for parkourdex project.
"""
from confy import env, database, cache
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname( os.path.dirname( os.path.abspath( __file__ ) ) )
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
SECRET_KEY = env( 'SECRET_KEY' )
DEBUG = env( 'DEBUG' )
if not DEBUG:
ALLOWED_HOSTS = env( 'ALLOWED_HOSTS', '' ).split( ',' )
else:
ALLOWED_HOSTS = ['*']
DEFAULT_FROM_EMAIL = env( 'DEFAULT_FROM_EMAIL' )
# Application definition
INSTALLED_APPS = [
'parkourdex',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'locations',
'frontend',
'corsheaders',
'simple_history',
'rest_framework',
'rest_framework_gis',
'django_extensions',
'django_registration',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'simple_history.middleware.HistoryRequestMiddleware',
]
ROOT_URLCONF = 'parkourdex.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'parkourdex.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': database.config()
}
CACHES = {'default': cache.config()}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTHENTICATION_BACKENDS = ('parkourdex.auth.Backend',)
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '{levelname} {asctime} {module} {process:d} {thread:d} {message}',
'style': '{',
},
'simple': {
'format': '{levelname} {message}',
'style': '{',
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'django': {
'handlers': ['console'],
'propagate': True,
},
'parkourdex': {
'handlers': ['console'],
'level': 'INFO'
},
}
}
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-au'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
CORS_ORIGIN_WHITELIST = []
if '*' not in ALLOWED_HOSTS:
CORS_ORIGIN_WHITELIST = [host for host in ALLOWED_HOSTS]
if DEBUG:
CORS_ORIGIN_WHITELIST += [
'http://localhost:8000',
'http://localhost:8080',
'http://127.0.0.1:8000',
'http://127.0.0.1:8080',
]
CORS_ALLOW_CREDENTIALS = True
# django_registration expiry
ACCOUNT_ACTIVATION_DAYS = 7
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join( BASE_DIR, 'staticroot' )
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join( BASE_DIR, 'media' )
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAdminUser',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
),
}
| 24.978261 | 91 | 0.635988 |
081567500f74584145d9d480074948699e05c6bb | 89,775 | py | Python | nilearn/datasets/neurovault.py | ctw/nilearn | 932eee9c69cd8fbf40ee6af5cee77f8f93b25da3 | [
"BSD-2-Clause"
] | 827 | 2015-01-30T23:11:42.000Z | 2022-03-29T21:21:05.000Z | nilearn/datasets/neurovault.py | ctw/nilearn | 932eee9c69cd8fbf40ee6af5cee77f8f93b25da3 | [
"BSD-2-Clause"
] | 2,845 | 2015-01-04T22:14:41.000Z | 2022-03-31T20:28:09.000Z | nilearn/datasets/neurovault.py | ctw/nilearn | 932eee9c69cd8fbf40ee6af5cee77f8f93b25da3 | [
"BSD-2-Clause"
] | 484 | 2015-02-03T10:58:19.000Z | 2022-03-29T21:57:16.000Z | """
Download statistical maps available on Neurovault (http://neurovault.org).
"""
# Author: Jerome Dockes
# License: simplified BSD
import os
import warnings
import traceback
from copy import copy, deepcopy
import shutil
import re
import json
from glob import glob
from tempfile import mkdtemp
import requests
from urllib.parse import urljoin, urlencode
from collections.abc import Container
import numpy as np
from sklearn.utils import Bunch
from sklearn.feature_extraction import DictVectorizer
from .utils import _fetch_file, _get_dataset_dir, _get_dataset_descr
from ..image import resample_img
import uuid
_NEUROVAULT_BASE_URL = 'http://neurovault.org/api/'
_NEUROVAULT_COLLECTIONS_URL = urljoin(_NEUROVAULT_BASE_URL, 'collections/')
_NEUROVAULT_IMAGES_URL = urljoin(_NEUROVAULT_BASE_URL, 'images/')
_NEUROSYNTH_FETCH_WORDS_URL = 'http://neurosynth.org/api/decode/'
_COL_FILTERS_AVAILABLE_ON_SERVER = ('DOI', 'name', 'owner', 'id')
_IM_FILTERS_AVAILABLE_ON_SERVER = tuple()
_DEFAULT_BATCH_SIZE = 100
_DEFAULT_MAX_IMAGES = 100
STD_AFFINE = np.array([[3., 0., 0., -90.],
[0., 3., 0., -126.],
[0., 0., 3., -72.],
[0., 0., 0., 1.]])
# if _MAX_CONSECUTIVE_FAILS downloads fail in a row, we consider there is a
# problem(e.g. no internet connection, or the Neurovault server is down), and
# we abort the fetching.
_MAX_CONSECUTIVE_FAILS = 100
# if _MAX_FAILS_IN_COLLECTION images fail to be downloaded from the same
# collection, we consider this collection is garbage and we move on to the
# next collection.
_MAX_FAILS_IN_COLLECTION = 30
_DEBUG = 3
_INFO = 2
_WARNING = 1
_ERROR = 0
def _requests_session():
if getattr(_requests_session, "session", None) is None:
_requests_session.session = requests.Session()
return _requests_session.session
# Helpers for filtering images and collections.
class _SpecialValue(object):
"""Base class for special values used to filter terms.
Derived classes should override ``__eq__`` in order to create
objects that can be used for comparisons to particular sets of
values in filters.
"""
def __eq__(self, other):
raise NotImplementedError('Use a derived class for _SpecialValue')
def __req__(self, other):
return self.__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def __rne__(self, other):
return self.__ne__(other)
def __repr__(self):
if hasattr(self, 'repr_arg_'):
return '{0}({1!r})'.format(self.__class__.__name__, self.repr_arg_)
return '{0}()'.format(self.__class__.__name__)
class IsNull(_SpecialValue):
"""Special value used to filter terms.
An instance of this class will always be equal to, and only to,
any null value of any type (by null we mean for which bool
returns False).
See Also
--------
nilearn.datasets.neurovault.NotNull,
nilearn.datasets.neurovault.NotEqual,
nilearn.datasets.neurovault.GreaterOrEqual,
nilearn.datasets.neurovault.GreaterThan,
nilearn.datasets.neurovault.LessOrEqual,
nilearn.datasets.neurovault.LessThan,
nilearn.datasets.neurovault.IsIn,
nilearn.datasets.neurovault.NotIn,
nilearn.datasets.neurovault.Contains,
nilearn.datasets.neurovault.NotContains,
nilearn.datasets.neurovault.Pattern.
Examples
--------
>>> from nilearn.datasets.neurovault import IsNull
>>> null = IsNull()
>>> null == 0
True
>>> null == ''
True
>>> null == None
True
>>> null == 'a'
False
"""
def __eq__(self, other):
return not bool(other)
class NotNull(_SpecialValue):
"""Special value used to filter terms.
An instance of this class will always be equal to, and only to,
any non-zero value of any type (by non-zero we mean for which bool
returns True).
See Also
--------
nilearn.datasets.neurovault.IsNull,
nilearn.datasets.neurovault.NotEqual,
nilearn.datasets.neurovault.GreaterOrEqual,
nilearn.datasets.neurovault.GreaterThan,
nilearn.datasets.neurovault.LessOrEqual,
nilearn.datasets.neurovault.LessThan,
nilearn.datasets.neurovault.IsIn,
nilearn.datasets.neurovault.NotIn,
nilearn.datasets.neurovault.Contains,
nilearn.datasets.neurovault.NotContains,
nilearn.datasets.neurovault.Pattern.
Examples
--------
>>> from nilearn.datasets.neurovault import NotNull
>>> not_null = NotNull()
>>> not_null == 0
False
>>> not_null == ''
False
>>> not_null == None
False
>>> not_null == 'a'
True
"""
def __eq__(self, other):
return bool(other)
class NotEqual(_SpecialValue):
"""Special value used to filter terms.
An instance of this class is constructed with `NotEqual(obj)`. It
will always be equal to, and only to, any value for which
``obj == value`` is ``False``.
Parameters
----------
negated : object
The object from which a candidate should be different in order
to pass through the filter.
See Also
--------
nilearn.datasets.neurovault.IsNull,
nilearn.datasets.neurovault.NotNull,
nilearn.datasets.neurovault.GreaterOrEqual,
nilearn.datasets.neurovault.GreaterThan,
nilearn.datasets.neurovault.LessOrEqual,
nilearn.datasets.neurovault.LessThan,
nilearn.datasets.neurovault.IsIn,
nilearn.datasets.neurovault.NotIn,
nilearn.datasets.neurovault.Contains,
nilearn.datasets.neurovault.NotContains,
nilearn.datasets.neurovault.Pattern.
Examples
--------
>>> from nilearn.datasets.neurovault import NotEqual
>>> not_0 = NotEqual(0)
>>> not_0 == 0
False
>>> not_0 == '0'
True
"""
def __init__(self, negated):
self.negated_ = negated
self.repr_arg_ = self.negated_
def __eq__(self, other):
return not self.negated_ == other
class _OrderComp(_SpecialValue):
"""Base class for special values based on order comparisons."""
def __init__(self, bound):
self.bound_ = bound
self._cast = type(bound)
self.repr_arg_ = self.bound_
def __eq__(self, other):
try:
return self._eq_impl(self._cast(other))
except (TypeError, ValueError):
return False
class GreaterOrEqual(_OrderComp):
"""Special value used to filter terms.
An instance of this class is constructed with `GreaterOrEqual(obj)`. It
will always be equal to, and only to, any value for which
``obj <= value`` is ``True``.
Parameters
----------
bound : object
The object to which a candidate should be superior or equal in
order to pass through the filter.
See Also
--------
nilearn.datasets.neurovault.IsNull,
nilearn.datasets.neurovault.NotNull,
nilearn.datasets.neurovault.NotEqual,
nilearn.datasets.neurovault.GreaterThan,
nilearn.datasets.neurovault.LessOrEqual,
nilearn.datasets.neurovault.LessThan,
nilearn.datasets.neurovault.IsIn,
nilearn.datasets.neurovault.NotIn,
nilearn.datasets.neurovault.Contains,
nilearn.datasets.neurovault.NotContains,
nilearn.datasets.neurovault.Pattern.
Examples
--------
>>> from nilearn.datasets.neurovault import GreaterOrEqual
>>> nonnegative = GreaterOrEqual(0.)
>>> nonnegative == -.1
False
>>> nonnegative == 0
True
>>> nonnegative == .1
True
"""
def _eq_impl(self, other):
return self.bound_ <= other
class GreaterThan(_OrderComp):
"""Special value used to filter terms.
An instance of this class is constructed with `GreaterThan(obj)`. It
will always be equal to, and only to, any value for which
``obj < value`` is ``True``.
Parameters
----------
bound : object
The object to which a candidate should be strictly superior in
order to pass through the filter.
See Also
--------
nilearn.datasets.neurovault.IsNull,
nilearn.datasets.neurovault.NotNull,
nilearn.datasets.neurovault.NotEqual,
nilearn.datasets.neurovault.GreaterOrEqual,
nilearn.datasets.neurovault.LessOrEqual,
nilearn.datasets.neurovault.LessThan,
nilearn.datasets.neurovault.IsIn,
nilearn.datasets.neurovault.NotIn,
nilearn.datasets.neurovault.Contains,
nilearn.datasets.neurovault.NotContains,
nilearn.datasets.neurovault.Pattern.
Examples
--------
>>> from nilearn.datasets.neurovault import GreaterThan
>>> positive = GreaterThan(0.)
>>> positive == 0.
False
>>> positive == 1.
True
>>> positive == -1.
False
"""
def _eq_impl(self, other):
return self.bound_ < other
class LessOrEqual(_OrderComp):
"""Special value used to filter terms.
An instance of this class is constructed with `LessOrEqual(obj)`. It
will always be equal to, and only to, any value for which
``value <= obj`` is ``True``.
Parameters
----------
bound : object
The object to which a candidate should be inferior or equal in
order to pass through the filter.
See Also
--------
nilearn.datasets.neurovault.IsNull,
nilearn.datasets.neurovault.NotNull,
nilearn.datasets.neurovault.NotEqual,
nilearn.datasets.neurovault.GreaterOrEqual,
nilearn.datasets.neurovault.GreaterThan,
nilearn.datasets.neurovault.LessThan,
nilearn.datasets.neurovault.IsIn,
nilearn.datasets.neurovault.NotIn,
nilearn.datasets.neurovault.Contains,
nilearn.datasets.neurovault.NotContains,
nilearn.datasets.neurovault.Pattern.
Examples
--------
>>> from nilearn.datasets.neurovault import LessOrEqual
>>> nonpositive = LessOrEqual(0.)
>>> nonpositive == -1.
True
>>> nonpositive == 0.
True
>>> nonpositive == 1.
False
"""
def _eq_impl(self, other):
return other <= self.bound_
class LessThan(_OrderComp):
"""Special value used to filter terms.
An instance of this class is constructed with `LessThan(obj)`. It
will always be equal to, and only to, any value for which
``value < obj`` is ``True``.
Parameters
----------
bound : object
The object to which a candidate should be strictly inferior in
order to pass through the filter.
See Also
--------
nilearn.datasets.neurovault.IsNull,
nilearn.datasets.neurovault.NotNull,
nilearn.datasets.neurovault.NotEqual,
nilearn.datasets.neurovault.GreaterOrEqual,
nilearn.datasets.neurovault.GreaterThan,
nilearn.datasets.neurovault.LessOrEqual,
nilearn.datasets.neurovault.IsIn,
nilearn.datasets.neurovault.NotIn,
nilearn.datasets.neurovault.Contains,
nilearn.datasets.neurovault.NotContains,
nilearn.datasets.neurovault.Pattern.
Examples
--------
>>> from nilearn.datasets.neurovault import LessThan
>>> negative = LessThan(0.)
>>> negative == -1.
True
>>> negative == 0.
False
>>> negative == 1.
False
"""
def _eq_impl(self, other):
return other < self.bound_
class IsIn(_SpecialValue):
"""Special value used to filter terms.
An instance of this class is constructed with
`IsIn(*accepted)`. It will always be equal to, and only to, any
value for which ``value in accepted`` is ``True``.
Parameters
----------
accepted : container
A value will pass through the filter if it is present in
`accepted`.
See Also
--------
nilearn.datasets.neurovault.IsNull,
nilearn.datasets.neurovault.NotNull,
nilearn.datasets.neurovault.NotEqual,
nilearn.datasets.neurovault.GreaterOrEqual,
nilearn.datasets.neurovault.GreaterThan,
nilearn.datasets.neurovault.LessOrEqual,
nilearn.datasets.neurovault.LessThan,
nilearn.datasets.neurovault.NotIn,
nilearn.datasets.neurovault.Contains,
nilearn.datasets.neurovault.NotContains,
nilearn.datasets.neurovault.Pattern.
Examples
--------
>>> from nilearn.datasets.neurovault import IsIn
>>> vowels = IsIn('a', 'e', 'i', 'o', 'u', 'y')
>>> 'a' == vowels
True
>>> vowels == 'b'
False
"""
def __init__(self, *accepted):
self.accepted_ = accepted
def __eq__(self, other):
return other in self.accepted_
def __repr__(self):
return '{0}{1!r}'.format(
self.__class__.__name__, self.accepted_)
class NotIn(_SpecialValue):
"""Special value used to filter terms.
An instance of this class is constructed with
`NotIn(*rejected)`. It will always be equal to, and only to, any
value for which ``value in rejected`` is ``False``.
Parameters
----------
rejected : container
A value will pass through the filter if it is absent from
`rejected`.
See Also
--------
nilearn.datasets.neurovault.IsNull,
nilearn.datasets.neurovault.NotNull,
nilearn.datasets.neurovault.NotEqual,
nilearn.datasets.neurovault.GreaterOrEqual,
nilearn.datasets.neurovault.GreaterThan,
nilearn.datasets.neurovault.LessOrEqual,
nilearn.datasets.neurovault.LessThan,
nilearn.datasets.neurovault.IsIn,
nilearn.datasets.neurovault.Contains,
nilearn.datasets.neurovault.NotContains,
nilearn.datasets.neurovault.Pattern.
Examples
--------
>>> from nilearn.datasets.neurovault import NotIn
>>> consonants = NotIn('a', 'e', 'i', 'o', 'u', 'y')
>>> 'b' == consonants
True
>>> consonants == 'a'
False
"""
def __init__(self, *rejected):
self.rejected_ = rejected
def __eq__(self, other):
return other not in self.rejected_
def __repr__(self):
return '{0}{1!r}'.format(
self.__class__.__name__, self.rejected_)
class Contains(_SpecialValue):
"""Special value used to filter terms.
An instance of this class is constructed with
`Contains(*must_be_contained)`. It will always be equal to, and
only to, any value for which ``item in value`` is ``True`` for
every item in ``must_be_contained``.
Parameters
----------
must_be_contained : container
A value will pass through the filter if it contains all the
items in must_be_contained.
See Also
--------
nilearn.datasets.neurovault.IsNull,
nilearn.datasets.neurovault.NotNull,
nilearn.datasets.neurovault.NotEqual,
nilearn.datasets.neurovault.GreaterOrEqual,
nilearn.datasets.neurovault.GreaterThan,
nilearn.datasets.neurovault.LessOrEqual,
nilearn.datasets.neurovault.LessThan,
nilearn.datasets.neurovault.IsIn,
nilearn.datasets.neurovault.NotIn,
nilearn.datasets.neurovault.NotContains,
nilearn.datasets.neurovault.Pattern.
Examples
--------
>>> from nilearn.datasets.neurovault import Contains
>>> contains = Contains('house', 'face')
>>> 'face vs house' == contains
True
>>> 'smiling face vs frowning face' == contains
False
"""
def __init__(self, *must_be_contained):
self.must_be_contained_ = must_be_contained
def __eq__(self, other):
if not isinstance(other, Container):
return False
for item in self.must_be_contained_:
if item not in other:
return False
return True
def __repr__(self):
return '{0}{1!r}'.format(
self.__class__.__name__, self.must_be_contained_)
class NotContains(_SpecialValue):
"""Special value used to filter terms.
An instance of this class is constructed with
`NotContains(*must_not_be_contained)`. It will always be equal
to, and only to, any value for which ``item in value`` is
``False`` for every item in ``must_not_be_contained``.
Parameters
----------
must_not_be_contained : container
A value will pass through the filter if it does not contain
any of the items in must_not_be_contained.
See Also
--------
nilearn.datasets.neurovault.IsNull,
nilearn.datasets.neurovault.NotNull,
nilearn.datasets.neurovault.NotEqual,
nilearn.datasets.neurovault.GreaterOrEqual,
nilearn.datasets.neurovault.GreaterThan,
nilearn.datasets.neurovault.LessOrEqual,
nilearn.datasets.neurovault.LessThan,
nilearn.datasets.neurovault.IsIn,
nilearn.datasets.neurovault.NotIn,
nilearn.datasets.neurovault.Contains,
nilearn.datasets.neurovault.Pattern.
Examples
--------
>>> from nilearn.datasets.neurovault import NotContains
>>> no_garbage = NotContains('bad', 'test')
>>> no_garbage == 'test image'
False
>>> no_garbage == 'good image'
True
"""
def __init__(self, *must_not_be_contained):
self.must_not_be_contained_ = must_not_be_contained
def __eq__(self, other):
if not isinstance(other, Container):
return False
for item in self.must_not_be_contained_:
if item in other:
return False
return True
def __repr__(self):
return '{0}{1!r}'.format(
self.__class__.__name__, self.must_not_be_contained_)
class Pattern(_SpecialValue):
"""Special value used to filter terms.
An instance of this class is constructed with
`Pattern(pattern[, flags])`. It will always be equal to, and only
to, any value for which ``re.match(pattern, value, flags)`` is
``True``.
Parameters
----------
pattern : str
The pattern to try to match to candidates.
flags : int, optional (default=0)
Value for ``re.match`` `flags` parameter,
e.g. ``re.IGNORECASE``. The default (0), is the default value
used by ``re.match``.
See Also
--------
nilearn.datasets.neurovault.IsNull,
nilearn.datasets.neurovault.NotNull,
nilearn.datasets.neurovault.NotEqual,
nilearn.datasets.neurovault.GreaterOrEqual,
nilearn.datasets.neurovault.GreaterThan,
nilearn.datasets.neurovault.LessOrEqual,
nilearn.datasets.neurovault.LessThan,
nilearn.datasets.neurovault.IsIn,
nilearn.datasets.neurovault.NotIn,
nilearn.datasets.neurovault.Contains,
nilearn.datasets.neurovault.NotContains.
Documentation for standard library ``re`` module.
Examples
--------
>>> from nilearn.datasets.neurovault import Pattern
>>> poker = Pattern(r'[0-9akqj]{5}$')
>>> 'ak05q' == poker
True
>>> 'ak05e' == poker
False
"""
def __init__(self, pattern, flags=0):
# Don't use re.compile because compiled patterns
# can't be deepcopied.
self.pattern_ = pattern
self.flags_ = flags
def __eq__(self, other):
if not isinstance(other, str) or re.match(
self.pattern_, other, self.flags_) is None:
return False
return True
def __repr__(self):
return '{0}(pattern={1!r}, flags={2})'.format(
self.__class__.__name__, self.pattern_, self.flags_)
def _empty_filter(arg):
"""Place holder for a filter which always returns True.
This is the default ``image_filter`` and ``collection_filter``
argument for ``fetch_neurovault``.
"""
return True
class ResultFilter(object):
"""Easily create callable (local) filters for ``fetch_neurovault``.
Constructed from a mapping of key-value pairs (optional) and a
callable filter (also optional), instances of this class are meant
to be used as ``image_filter`` or ``collection_filter`` parameters
for ``fetch_neurovault``.
Such filters can be combined using their methods ``AND``, ``OR``,
``XOR``, and ``NOT``, with the usual semantics.
Key-value pairs can be added by treating a ``ResultFilter`` as a
dictionary: after evaluating ``res_filter[key] = value``, only
metadata such that ``metadata[key] == value`` can pass through the
filter.
Parameters
----------
query_terms : dict, optional
A ``metadata`` dictionary will be blocked by the filter if it
does not respect ``metadata[key] == value`` for all
``key``, ``value`` pairs in `query_terms`. If ``None``, the
empty dictionary is used.
callable_filter : callable, optional
A ``metadata`` dictionary will be blocked by the filter if
`callable_filter` does not return ``True`` for ``metadata``.
Default=empty_filter
As an alternative to the `query_terms` dictionary parameter,
key, value pairs can be passed as keyword arguments.
Attributes
----------
query_terms_ : dict
In order to pass through the filter, metadata must verify
``metadata[key] == value`` for each ``key``, ``value`` pair in
`query_terms_`.
callable_filters_ : list of callables
In addition to ``(key, value)`` pairs, we can use this
attribute to specify more elaborate requirements. Called with
a dict representing metadata for an image or collection, each
element of this list returns ``True`` if the metadata should
pass through the filter and ``False`` otherwise.
A dict of metadata will only pass through the filter if it
satisfies all the `query_terms` AND all the elements of
`callable_filters_`.
See Also
--------
nilearn.datasets.neurovault.IsNull,
nilearn.datasets.neurovault.NotNull,
nilearn.datasets.neurovault.NotEqual,
nilearn.datasets.neurovault.GreaterOrEqual,
nilearn.datasets.neurovault.GreaterThan,
nilearn.datasets.neurovault.LessOrEqual,
nilearn.datasets.neurovault.LessThan,
nilearn.datasets.neurovault.IsIn,
nilearn.datasets.neurovault.NotIn,
nilearn.datasets.neurovault.Contains,
nilearn.datasets.neurovault.NotContains,
nilearn.datasets.neurovault.Pattern.
Examples
--------
>>> from nilearn.datasets.neurovault import ResultFilter
>>> filt = ResultFilter(a=0).AND(ResultFilter(b=1).OR(ResultFilter(b=2)))
>>> filt({'a': 0, 'b': 1})
True
>>> filt({'a': 0, 'b': 0})
False
"""
def __init__(self, query_terms=None,
callable_filter=_empty_filter, **kwargs):
if query_terms is None:
query_terms = {}
query_terms = dict(query_terms, **kwargs)
self.query_terms_ = query_terms
self.callable_filters_ = [callable_filter]
def __call__(self, candidate):
"""Return True if candidate satisfies the requirements.
Parameters
----------
candidate : dict
A dictionary representing metadata for a file or a
collection, to be filtered.
Returns
-------
bool
``True`` if `candidate` passes through the filter and ``False``
otherwise.
"""
for key, value in self.query_terms_.items():
if not (value == candidate.get(key)):
return False
for callable_filter in self.callable_filters_:
if not callable_filter(candidate):
return False
return True
def OR(self, other_filter):
filt1, filt2 = deepcopy(self), deepcopy(other_filter)
new_filter = ResultFilter(
callable_filter=lambda r: filt1(r) or filt2(r))
return new_filter
def AND(self, other_filter):
filt1, filt2 = deepcopy(self), deepcopy(other_filter)
new_filter = ResultFilter(
callable_filter=lambda r: filt1(r) and filt2(r))
return new_filter
def XOR(self, other_filter):
filt1, filt2 = deepcopy(self), deepcopy(other_filter)
new_filter = ResultFilter(
callable_filter=lambda r: filt1(r) != filt2(r))
return new_filter
def NOT(self):
filt = deepcopy(self)
new_filter = ResultFilter(
callable_filter=lambda r: not filt(r))
return new_filter
def __getitem__(self, item):
"""Get item from query_terms_"""
return self.query_terms_[item]
def __setitem__(self, item, value):
"""Set item in query_terms_"""
self.query_terms_[item] = value
def __delitem__(self, item):
"""Remove item from query_terms_"""
if item in self.query_terms_:
del self.query_terms_[item]
def add_filter(self, callable_filter):
"""Add a function to the callable_filters_.
After a call add_filter(additional_filt), in addition to all
the previous requirements, a candidate must also verify
additional_filt(candidate) in order to pass through the
filter.
"""
self.callable_filters_.append(callable_filter)
def __str__(self):
return self.__class__.__name__
# Utilities for composing queries and interacting with
# neurovault and neurosynth
class _TemporaryDirectory(object):
"""Context manager that provides a temporary directory
A temporary directory is created on __enter__
and removed on __exit__ .
Attributes
----------
temp_dir_ : str or None
location of temporary directory or None if not created.
"""
def __init__(self):
self.temp_dir_ = None
def __enter__(self):
self.temp_dir_ = mkdtemp()
return self.temp_dir_
def __exit__(self, *args):
if self.temp_dir_ is None:
return
shutil.rmtree(self.temp_dir_)
self.temp_dir_ = None
def _print_if(message, level, threshold_level,
with_traceback=False):
"""Print a message if its importance is above a threshold.
Parameters
----------
message : str
the message to print if `level` is strictly above
`threshold_level`.
level : int
importance of the message.
threshold_level : int
the message is printed if `level` is strictly above
`threshold_level`.
with_traceback : bool, optional
if `message` is printed, also print the last traceback.
Default=False.
"""
if level > threshold_level:
return
print(message)
if with_traceback:
traceback.print_exc()
def _append_filters_to_query(query, filters):
"""Encode dict or sequence of key-value pairs into a URL query string
Parameters
----------
query : str
URL to which the filters should be appended
filters : dict or sequence of pairs
Filters to append to the URL.
Returns
-------
str
The query with filters appended to it.
Notes
-----
If one of the `filters` keys is 'id', we get the url that points
directly to that id,
e.g. 'http://neurovault.org/api/collections/40', and the other
filters are ignored.
"""
if not filters:
return query
if 'id' in filters:
return urljoin(query, str(filters['id']))
new_query = urljoin(
query, '?{0}'.format(urlencode(filters)))
return new_query
def _get_batch(query, prefix_msg='', timeout=10., verbose=3):
"""Given an URL, get the HTTP response and transform it to python dict.
The URL is used to send an HTTP GET request and the response is
transformed into a dictionary.
Parameters
----------
query : str
The URL from which to get data.
prefix_msg : str, optional
Prefix for all log messages.
Default=''.
timeout : float, optional
Timeout in seconds. Default=10.
verbose : int, optional
An integer in [0, 1, 2, 3] to control the verbosity level.
Default=3.
Returns
-------
batch : dict
Python dict representing the response's content.
Raises
------
requests.RequestException
If there was a problem opening the URL.
ValueError
If the response could not be decoded, was not json, or did not contain
either 'id' (single result), or 'results' and 'count' (actual batch).
"""
session = _requests_session()
req = requests.Request(
method="GET", url=query, headers={"Connection": "Keep-Alive"})
prepped = session.prepare_request(req)
_print_if('{0}getting new batch: {1}'.format(
prefix_msg, query), _DEBUG, verbose)
try:
resp = session.send(prepped, timeout=timeout)
resp.raise_for_status()
batch = resp.json()
except Exception:
_print_if('Could not get batch from {0}'.format(query),
_ERROR, verbose, with_traceback=True)
raise
if 'id' in batch:
batch = {'count': 1, 'results': [batch]}
for key in ['results', 'count']:
if batch.get(key) is None:
msg = ('Could not find required key "{0}" '
'in batch retrieved from {1}'.format(key, query))
_print_if(msg, _ERROR, verbose)
raise ValueError(msg)
return batch
def _scroll_server_results(url, local_filter=_empty_filter,
query_terms=None, max_results=None,
batch_size=None, prefix_msg='', verbose=3):
"""Download list of metadata from Neurovault.
Parameters
----------
url : str
The base url (without the filters) from which to get data.
local_filter : callable, optional
Used to filter the results based on their metadata:
must return True if the result is to be kept and False otherwise.
Is called with the dict containing the metadata as sole argument.
Default=_empty_filter.
query_terms : dict, sequence of pairs or None, optional
Key-value pairs to add to the base url in order to form query.
If ``None``, nothing is added to the url.
max_results : int or None, optional
Maximum number of results to fetch; if ``None``, all available data
that matches the query is fetched.
batch_size : int or None, optional
Neurovault returns the metadata for hits corresponding to a query
in batches. batch_size is used to choose the (maximum) number of
elements in a batch. If None, ``_DEFAULT_BATCH_SIZE`` is used.
prefix_msg : str, optional
Prefix for all log messages. Default=''.
verbose : int, optional (default=3)
An integer in [0, 1, 2, 3] to control the verbosity level.
Yields
------
result : dict
A result in the retrieved batch.
None
Once for each batch that could not be downloaded or decoded,
to indicate a failure.
"""
query = _append_filters_to_query(url, query_terms)
if batch_size is None:
batch_size = _DEFAULT_BATCH_SIZE
query = '{0}{1}limit={2}&offset={{0}}'.format(
query, ('&' if '?' in query else '?'), batch_size)
downloaded = 0
n_available = None
while(max_results is None or downloaded < max_results):
new_query = query.format(downloaded)
try:
batch = _get_batch(new_query, prefix_msg, verbose=verbose)
except Exception:
yield None
batch = None
if batch is not None:
batch_size = len(batch['results'])
downloaded += batch_size
_print_if('{0}batch size: {1}'.format(prefix_msg, batch_size),
_DEBUG, verbose)
if n_available is None:
n_available = batch['count']
max_results = (n_available if max_results is None
else min(max_results, n_available))
for result in batch['results']:
if local_filter(result):
yield result
def _yield_from_url_list(url_list, verbose=3):
"""Get metadata coming from an explicit list of URLs.
This is different from ``_scroll_server_results``, which is used
to get all the metadata that matches certain filters.
Parameters
----------
url_list : Container of str
URLs from which to get data
verbose : int, optional
An integer in [0, 1, 2, 3] to control the verbosity level.
Default=3.
Yields
------
content : dict
The metadata from one URL.
None
Once for each URL that resulted in an error, to signify failure.
"""
for url in url_list:
try:
batch = _get_batch(url, verbose=verbose)
except Exception:
yield None
batch = None
if batch is not None:
yield batch['results'][0]
def _simple_download(url, target_file, temp_dir, verbose=3):
"""Wrapper around ``utils._fetch_file``.
This allows specifying the target file name.
Parameters
----------
url : str
URL of the file to download.
target_file : str
Location of the downloaded file on filesystem.
temp_dir : str
Location of sandbox directory used by ``_fetch_file``.
verbose : int, optional
An integer in [0, 1, 2, 3] to control the verbosity level.
Default=3.
Returns
-------
target_file : str
The location in which the file was downloaded.
Raises
------
RequestException, ValueError
If an error occurred when downloading the file.
See Also
--------
nilearn.datasets._utils._fetch_file
"""
_print_if('Downloading file: {0}'.format(url), _DEBUG, verbose)
try:
downloaded = _fetch_file(url, temp_dir, resume=False,
overwrite=True, verbose=0)
except Exception:
_print_if('Problem downloading file from {0}'.format(url),
_ERROR, verbose)
raise
shutil.move(downloaded, target_file)
_print_if(
'Download succeeded, downloaded to: {0}'.format(target_file),
_DEBUG, verbose)
return target_file
def neurosynth_words_vectorized(word_files, verbose=3, **kwargs):
"""Load Neurosynth data from disk into an (n images, voc size) matrix
Neurosynth data is saved on disk as ``{word: weight}``
dictionaries for each image, this function reads it and returns a
vocabulary list and a term weight matrix.
Parameters:
-----------
word_files : Container
The paths to the files from which to read word weights (each
is supposed to contain the Neurosynth response for a
particular image).
verbose : int, optional
An integer in [0, 1, 2, 3] to control the verbosity level.
Default=3.
Keyword arguments are passed on to
``sklearn.feature_extraction.DictVectorizer``.
Returns:
--------
frequencies : numpy.ndarray
An (n images, vocabulary size) array. Each row corresponds to
an image, and each column corresponds to a word. The words are
in the same order as in returned value `vocabulary`, so that
`frequencies[i, j]` corresponds to the weight of
`vocabulary[j]` for image ``i``. This matrix is computed by
an ``sklearn.feature_extraction.DictVectorizer`` instance.
vocabulary : list of str
A list of all the words encountered in the word files.
See Also
--------
sklearn.feature_extraction.DictVectorizer
"""
_print_if('Computing word features.', _INFO, verbose)
words = []
voc_empty = True
for file_name in word_files:
try:
with open(file_name, 'rb') as word_file:
info = json.loads(word_file.read().decode('utf-8'))
words.append(info['data']['values'])
if info['data']['values'] != {}:
voc_empty = False
except Exception:
_print_if(
'Could not load words from file {0}; error: {1}'.format(
file_name, traceback.format_exc()),
_ERROR, verbose)
words.append({})
if voc_empty:
warnings.warn('No word weight could be loaded, '
'vectorizing Neurosynth words failed.')
return None, None
vectorizer = DictVectorizer(**kwargs)
frequencies = vectorizer.fit_transform(words).toarray()
vocabulary = np.asarray(vectorizer.feature_names_)
_print_if('Computing word features done; vocabulary size: {0}'.format(
vocabulary.size), _INFO, verbose)
return frequencies, vocabulary
def _remove_none_strings(metadata):
"""Replace strings representing a null value with ``None``.
Some collections and images in Neurovault, for some fields, use the
string "None", "None / Other", or "null", instead of having ``null``
in the json file; we replace these strings with ``None`` so that
they are consistent with the rest and for correct behaviour when we
want to select or filter out null values.
Parameters
----------
metadata : dict
Metadata to transform
Returns
-------
metadata : dict
Original metadata in which strings representing null values
have been replaced by ``None``.
"""
metadata = metadata.copy()
for key, value in metadata.items():
if (isinstance(value, str) and
re.match(r'($|n/?a$|none|null)', value, re.IGNORECASE)):
metadata[key] = None
return metadata
def _write_metadata(metadata, file_name):
"""Save metadata to disk.
Absolute paths are not written; they are recomputed using the
relative paths when data is loaded again, so that if the
Neurovault directory has been moved paths are still valid.
Parameters
----------
metadata : dict
Dictionary representing metadata for a file or a
collection. Any key containing 'absolute' is ignored.
file_name : str
Path to the file in which to write the data.
"""
metadata = dict([(k, v) for k, v in metadata.items() if
'absolute' not in k])
with open(file_name, 'wb') as metadata_file:
metadata_file.write(json.dumps(metadata).encode('utf-8'))
def _add_absolute_paths(root_dir, metadata, force=True):
"""Add absolute paths to a dictionary containing relative paths.
Parameters
----------
root_dir : str
The root of the data directory, to prepend to relative paths
in order to form absolute paths.
metadata : dict
Dictionary containing metadata for a file or a collection. Any
key containing 'relative' is understood to be mapped to a
relative path and the corresponding absolute path is added to
the dictionary.
force : bool, optional
If ``True``, if an absolute path is already present in the
metadata, it is replaced with the recomputed value. If
``False``, already specified absolute paths have priority.
Default=True.
Returns
-------
metadata : dict
The metadata enriched with absolute paths.
"""
absolute_paths = {}
for name, value in metadata.items():
match = re.match(r'(.*)relative_path(.*)', name)
if match is not None:
abs_name = '{0}absolute_path{1}'.format(*match.groups())
absolute_paths[abs_name] = os.path.join(root_dir, value)
if not absolute_paths:
return metadata
new_metadata = metadata.copy()
set_func = new_metadata.__setitem__ if force else new_metadata.setdefault
for name, value in absolute_paths.items():
set_func(name, value)
return new_metadata
def _json_from_file(file_name):
"""Load a json file encoded with UTF-8."""
with open(file_name, 'rb') as dumped:
loaded = json.loads(dumped.read().decode('utf-8'))
return loaded
def _json_add_collection_dir(file_name, force=True):
"""Load a json file and add is parent dir to resulting dict."""
loaded = _json_from_file(file_name)
set_func = loaded.__setitem__ if force else loaded.setdefault
dir_path = os.path.dirname(file_name)
set_func('absolute_path', dir_path)
set_func('relative_path', os.path.basename(dir_path))
return loaded
def _json_add_im_files_paths(file_name, force=True):
"""Load a json file and add image and words paths."""
loaded = _json_from_file(file_name)
set_func = loaded.__setitem__ if force else loaded.setdefault
dir_path = os.path.dirname(file_name)
dir_relative_path = os.path.basename(dir_path)
image_file_name = 'image_{0}.nii.gz'.format(loaded['id'])
words_file_name = 'neurosynth_words_for_image_{0}.json'.format(
loaded['id'])
set_func('relative_path', os.path.join(dir_relative_path, image_file_name))
if os.path.isfile(os.path.join(dir_path, words_file_name)):
set_func('ns_words_relative_path',
os.path.join(dir_relative_path, words_file_name))
loaded = _add_absolute_paths(
os.path.dirname(dir_path), loaded, force=force)
return loaded
def _download_collection(collection, download_params):
"""Create directory and download metadata for a collection.
Parameters
----------
collection : dict
Collection metadata.
download_params : dict
General information about download session, containing e.g. the
data directory (see `_read_download_params` and
`_prepare_download_params for details`)
Returns
-------
collection : dict
Collection metadata, with local path added to it.
"""
if collection is None:
return None
collection = _remove_none_strings(collection)
collection_id = collection['id']
collection_name = 'collection_{0}'.format(collection_id)
collection_dir = os.path.join(download_params['nv_data_dir'],
collection_name)
collection['relative_path'] = collection_name
collection['absolute_path'] = collection_dir
if not os.path.isdir(collection_dir):
os.makedirs(collection_dir)
metadata_file_path = os.path.join(collection_dir,
'collection_metadata.json')
_write_metadata(collection, metadata_file_path)
return collection
def _fetch_collection_for_image(image_info, download_params):
"""Find the collection metadata for an image.
If necessary, the collection metadata is downloaded and its
directory is created.
Parameters
----------
image_info : dict
Image metadata.
download_params : dict
General information about download session, containing e.g. the
data directory (see `_read_download_params` and
`_prepare_download_params for details`)
Returns
-------
collection : dict
The collection metadata.
"""
collection_id = image_info['collection_id']
collection_relative_path = 'collection_{0}'.format(collection_id)
collection_absolute_path = os.path.join(
download_params['nv_data_dir'], collection_relative_path)
if not os.path.isdir(collection_absolute_path):
col_batch = _get_batch(urljoin(
_NEUROVAULT_COLLECTIONS_URL, str(collection_id)),
verbose=download_params['verbose'])
collection = _download_collection(
col_batch['results'][0], download_params)
else:
collection = _json_add_collection_dir(os.path.join(
collection_absolute_path, 'collection_metadata.json'))
return collection
def _download_image_nii_file(image_info, collection, download_params):
"""Download an image (.nii.gz) file from Neurovault.
Parameters
----------
image_info : dict
Image metadata.
collection : dict
Corresponding collection metadata.
download_params : dict
General information about download session, containing e.g. the
data directory (see `_read_download_params` and
`_prepare_download_params for details`)
Returns
-------
image_info : dict
Image metadata with local paths added to it.
collection : dict
Corresponding collection metadata with local paths added to it.
"""
image_info = image_info.copy()
image_id = image_info['id']
image_url = image_info['file']
image_file_name = 'image_{0}.nii.gz'.format(image_id)
image_relative_path = os.path.join(
collection['relative_path'], image_file_name)
image_absolute_path = os.path.join(
collection['absolute_path'], image_file_name)
resampled_image_file_name = 'image_{0}_resampled.nii.gz'.format(image_id)
resampled_image_absolute_path = os.path.join(
collection['absolute_path'], resampled_image_file_name)
resampled_image_relative_path = os.path.join(
collection['relative_path'], resampled_image_file_name)
image_info['absolute_path'] = image_absolute_path
image_info['relative_path'] = image_relative_path
image_info['resampled_absolute_path'] = resampled_image_absolute_path
image_info['resampled_relative_path'] = resampled_image_relative_path
if download_params['resample']:
# Generate a temporary file name
struuid = str(uuid.uuid1())
tmp_file = 'tmp_{0}.nii.gz'.format(struuid)
tmp_path = os.path.join(
collection['absolute_path'], tmp_file)
_simple_download(
image_url, tmp_path,
download_params['temp_dir'], verbose=download_params['verbose'])
# Resample here
print('Resampling...')
im_resampled = resample_img(
img=tmp_path,
target_affine=STD_AFFINE,
interpolation=download_params['interpolation'],
)
im_resampled.to_filename(resampled_image_absolute_path)
# Remove temporary file
os.remove(tmp_path)
else:
_simple_download(
image_url, image_absolute_path,
download_params['temp_dir'], verbose=download_params['verbose'])
return image_info, collection
def _check_has_words(file_name):
if not os.path.isfile(file_name):
return False
info = _remove_none_strings(_json_from_file(file_name))
try:
assert len(info['data']['values'])
return True
except (AttributeError, TypeError, AssertionError):
pass
os.remove(file_name)
return False
def _download_image_terms(image_info, collection, download_params):
"""Download Neurosynth words for an image.
Parameters
----------
image_info : dict
Image metadata.
collection : dict
Corresponding collection metadata.
download_params : dict
General information about download session, containing e.g. the
data directory (see `_read_download_params` and
`_prepare_download_params for details`)
Returns
-------
image_info : dict
Image metadata with neurosynth words file path added to it.
collection : dict
Corresponding collection metadata.
"""
if not download_params['fetch_neurosynth_words']:
return image_info, collection
ns_words_file_name = 'neurosynth_words_for_image_{0}.json'.format(
image_info['id'])
image_info = image_info.copy()
image_info['ns_words_relative_path'] = os.path.join(
collection['relative_path'], ns_words_file_name)
image_info['ns_words_absolute_path'] = os.path.join(
collection['absolute_path'], ns_words_file_name)
if os.path.isfile(image_info['ns_words_absolute_path']):
return image_info, collection
query = urljoin(_NEUROSYNTH_FETCH_WORDS_URL,
'?neurovault={0}'.format(image_info['id']))
try:
_simple_download(query, image_info['ns_words_absolute_path'],
download_params['temp_dir'],
verbose=download_params['verbose'])
assert _check_has_words(image_info['ns_words_absolute_path'])
except Exception:
message = 'Could not fetch words for image {0}'.format(
image_info['id'])
if not download_params.get('allow_neurosynth_failure', True):
raise RuntimeError(message)
_print_if(
message, _ERROR, download_params['verbose'], with_traceback=True)
return image_info, collection
def _download_image(image_info, download_params):
"""Download a Neurovault image.
If necessary, create the corresponding collection's directory and
download the collection's metadata.
Parameters
----------
image_info : dict
Image metadata.
download_params : dict
General information about download session, containing e.g. the
data directory (see `_read_download_params` and
`_prepare_download_params for details`)
Returns
-------
image_info : dict
Image metadata with local paths added to it.
"""
if image_info is None:
return None
image_info = _remove_none_strings(image_info)
# image_info = self._image_hook(image_info)
collection = _fetch_collection_for_image(
image_info, download_params)
image_info, collection = _download_image_nii_file(
image_info, collection, download_params)
image_info, collection = _download_image_terms(
image_info, collection, download_params)
metadata_file_path = os.path.join(
collection['absolute_path'], 'image_{0}_metadata.json'.format(
image_info['id']))
_write_metadata(image_info, metadata_file_path)
return image_info
def _update_image(image_info, download_params):
"""Update local metadata for an image.
If required and necessary, download the Neurosynth tags.
Parameters
----------
image_info : dict
Image metadata.
download_params : dict
General information about download session, containing e.g. the
data directory (see `_read_download_params` and
`_prepare_download_params for details`)
Returns
-------
image_info : dict
Image metadata.
"""
if not download_params['write_ok']:
return image_info
try:
collection = _fetch_collection_for_image(
image_info, download_params)
image_info, collection = _download_image_terms(
image_info, collection, download_params)
metadata_file_path = os.path.join(
os.path.dirname(image_info['absolute_path']),
'image_{0}_metadata.json'.format(image_info['id']))
_write_metadata(image_info, metadata_file_path)
except OSError:
warnings.warn(
"could not update metadata for image {}, "
"most likely because you do not have write "
"permissions to its metadata file".format(image_info["id"]))
return image_info
def _update(image_info, collection, download_params):
"Update local metadata for an image and its collection."""
image_info = _update_image(image_info, download_params)
return image_info, collection
def _scroll_local(download_params):
"""Iterate over local neurovault data.
Parameters
----------
download_params : dict
General information about download session, containing e.g. the
data directory (see `_read_download_params` and
`_prepare_download_params for details`)
Yields
------
image : dict
Metadata for an image.
collection : dict
Metadata for the corresponding collection.
"""
_print_if('Reading local neurovault data.', _DEBUG,
download_params['verbose'])
collections = glob(
os.path.join(
download_params['nv_data_dir'], '*', 'collection_metadata.json'))
good_collections = (col for col in
(_json_add_collection_dir(col) for col in collections)
if download_params['local_collection_filter'](col))
for collection in good_collections:
images = glob(os.path.join(
collection['absolute_path'], 'image_*_metadata.json'))
good_images = (img for img in
(_json_add_im_files_paths(img) for img in images)
if download_params['local_image_filter'](img))
for image in good_images:
image, collection = _update(image, collection, download_params)
if not download_params['resample']:
if os.path.isfile(image['absolute_path']):
download_params['visited_images'].add(image['id'])
download_params['visited_collections'].add(collection['id'])
yield image, collection
else:
pass
else:
if os.path.isfile(image['resampled_absolute_path']):
download_params['visited_images'].add(image['id'])
download_params['visited_collections'].add(collection['id'])
yield image, collection
else:
im_resampled = resample_img(
img=image['absolute_path'],
target_affine=STD_AFFINE,
interpolation=download_params['interpolation'],
)
im_resampled.to_filename(image['resampled_absolute_path'])
download_params['visited_images'].add(image['id'])
download_params['visited_collections'].add(collection['id'])
yield image, collection
def _scroll_collection(collection, download_params):
"""Iterate over the content of a collection on Neurovault server.
Images that are found and match filter criteria are downloaded.
Parameters
----------
collection : dict
Metadata for the collection
download_params : dict
General information about download session, containing e.g. the
data directory (see `_read_download_params` and
`_prepare_download_params for details`)
Yields
------
image : dict
Metadata for an image.
Notes
-----
``image`` can be ``None`` to signify a failed download.
"""
if collection is None:
yield None
return
n_im_in_collection = 0
fails_in_collection = 0
query = urljoin(_NEUROVAULT_COLLECTIONS_URL,
'{0}/images/'.format(collection['id']))
images = _scroll_server_results(
query, query_terms=download_params['image_terms'],
local_filter=download_params['image_filter'],
prefix_msg='Scroll images from collection {0}: '.format(
collection['id']), batch_size=download_params['batch_size'],
verbose=download_params['verbose'])
for image in images:
if image is None:
yield None
try:
image = _download_image(image, download_params)
fails_in_collection = 0
n_im_in_collection += 1
yield image
except Exception:
fails_in_collection += 1
_print_if(
'_scroll_collection: bad image: {0}'.format(image),
_ERROR, download_params['verbose'], with_traceback=True)
yield None
if fails_in_collection == download_params['max_fails_in_collection']:
_print_if('Too many bad images in collection {0}: '
'{1} bad images.'.format(
collection['id'], fails_in_collection),
_ERROR, download_params['verbose'])
return
_print_if(
'On neurovault.org: '
'{0} image{1} matched query in collection {2}'.format(
(n_im_in_collection if n_im_in_collection else 'no'),
('s' if n_im_in_collection > 1 else ''), collection['id']),
_INFO, download_params['verbose'])
def _scroll_filtered(download_params):
"""Iterate over Neurovault data that matches specified filters.
Images and collections which match the filters provided in the
download parameters are fetched from the server.
Parameters
----------
download_params : dict
General information about download session, containing e.g. the
data directory (see `_read_download_params` and
`_prepare_download_params for details`)
Yields
------
image : dict
Metadata for an image.
collection : dict
Metadata for the corresponding collection.
Notes
-----
``image``, ``collection`` can be ``None``, ``None`` to signify a
failed download.
"""
_print_if('Reading server neurovault data.',
_DEBUG, download_params['verbose'])
download_params['collection_filter'] = ResultFilter(
{'id': NotIn(*download_params['visited_collections'])}).AND(
download_params['collection_filter'])
download_params['image_filter'] = ResultFilter(
{'id': NotIn(*download_params['visited_images'])}).AND(
download_params['image_filter'])
collections = _scroll_server_results(
_NEUROVAULT_COLLECTIONS_URL,
query_terms=download_params['collection_terms'],
local_filter=download_params['collection_filter'],
prefix_msg='Scroll collections: ',
batch_size=download_params['batch_size'],
verbose=download_params['verbose'])
for collection in collections:
collection = _download_collection(collection, download_params)
collection_content = _scroll_collection(collection, download_params)
for image in collection_content:
yield image, collection
def _scroll_collection_ids(download_params):
"""Download a specific list of collections from Neurovault.
The collections listed in the download parameters, and all
the images they contain, are downloaded.
Parameters
----------
download_params : dict
General information about download session, containing e.g. the
data directory (see `_read_download_params` and
`_prepare_download_params for details`)
Yields
------
image : dict
Metadata for an image.
collection : dict
Metadata for the corresponding collection.
Notes
-----
``image``, ``collection`` can be ``None``, ``None`` to signify a
failed download.
"""
collection_urls = [
urljoin(_NEUROVAULT_COLLECTIONS_URL, str(col_id)) for
col_id in download_params['wanted_collection_ids']]
if(collection_urls):
_print_if('Reading server neurovault data.',
_DEBUG, download_params['verbose'])
collections = _yield_from_url_list(
collection_urls, verbose=download_params['verbose'])
for collection in collections:
collection = _download_collection(collection, download_params)
for image in _scroll_collection(collection, download_params):
yield image, collection
def _scroll_image_ids(download_params):
"""Download a specific list of images from Neurovault.
The images listed in the download parameters, and the metadata for
the collections they belong to, are downloaded.
Parameters
----------
download_params : dict
General information about download session, containing e.g. the
data directory (see `_read_download_params` and
`_prepare_download_params for details`)
Yields
------
image : dict
Metadata for an image.
collection : dict
Metadata for the corresponding collection.
Notes
-----
``image``, ``collection`` can be ``None``, ``None`` to signify a
failed download.
"""
image_urls = [urljoin(_NEUROVAULT_IMAGES_URL, str(im_id)) for
im_id in download_params['wanted_image_ids']]
images = _yield_from_url_list(
image_urls, verbose=download_params['verbose'])
for image in images:
try:
image = _download_image(image, download_params)
collection = _json_add_collection_dir(os.path.join(
os.path.dirname(image['absolute_path']),
'collection_metadata.json'))
except Exception:
image, collection = None, None
yield image, collection
def _scroll_explicit(download_params):
"""Download specific lists of collections and images from Neurovault.
Parameters
----------
download_params : dict
General information about download session, containing e.g. the
data directory (see `_read_download_params` and
`_prepare_download_params for details`)
Yields
------
image : dict
Metadata for an image.
collection : dict
Metadata for the corresponding collection.
Notes
-----
``image``, ``collection`` can be ``None``, ``None`` to signify a
failed download.
"""
download_params['wanted_collection_ids'] = set(
download_params['wanted_collection_ids']).difference(
download_params['visited_collections'])
for image, collection in _scroll_collection_ids(download_params):
if image is not None:
download_params['visited_images'].add(image['id'])
yield image, collection
download_params['wanted_image_ids'] = set(
download_params['wanted_image_ids']).difference(
download_params['visited_images'])
for image, collection in _scroll_image_ids(download_params):
yield image, collection
def _print_progress(found, download_params, level=_INFO):
"""Print number of images fetched so far."""
_print_if('Already fetched {0} image{1}'.format(
found, ('s' if found > 1 else '')),
level, download_params['verbose'])
def _scroll(download_params):
"""Iterate over Neurovault data.
Relevant images and collections are loaded from local disk, then
from neurovault.org
Parameters
----------
download_params : dict
General information about download session, containing e.g. the
data directory (see `_read_download_params` and
`_prepare_download_params for details`)
Yields
------
image : dict
Metadata for an image.
collection : dict
Metadata for the corresponding collection.
Notes
-----
Stops if:
- All available images have been fetched.
- Or a max number of images has been specified by user and
reached.
- Or too many downloads have failed in a row.
"""
scroll_modes = {'filtered': _scroll_filtered, 'explicit': _scroll_explicit}
if download_params['max_images'] == 0:
return
found = 0
if download_params['download_mode'] != 'overwrite':
for image, collection in _scroll_local(download_params):
found = len(download_params['visited_images'])
_print_progress(found, download_params, _DEBUG)
yield image, collection
if found == download_params['max_images']:
break
_print_if('{0} image{1} found on local disk.'.format(
('No' if not found else found), ('s' if found > 1 else '')),
_INFO, download_params['verbose'])
if download_params['download_mode'] == 'offline':
return
if found == download_params['max_images']:
return
server_data = scroll_modes[download_params['scroll_mode']](download_params)
n_consecutive_fails = 0
for image, collection in server_data:
if image is None or collection is None:
n_consecutive_fails += 1
else:
n_consecutive_fails = 0
found += 1
_print_progress(found, download_params)
yield image, collection
if n_consecutive_fails >= download_params['max_consecutive_fails']:
warnings.warn('Neurovault download stopped early: '
'too many downloads failed in a row ({0})'.format(
n_consecutive_fails))
return
if found == download_params['max_images']:
return
# Utilities for providing defaults and transforming input and output
def _split_terms(terms, available_on_server):
"""Isolate term filters that can be applied by server."""
terms_ = dict(terms)
server_terms = dict([(k, terms_.pop(k)) for k in
available_on_server if k in terms_ and
(isinstance(terms_[k], str) or
isinstance(terms_[k], int))])
return terms_, server_terms
def _move_unknown_terms_to_local_filter(terms, local_filter,
available_on_server):
"""Move filters handled by the server inside URL.
Some filters are available on the server and can be inserted into
the URL query. The rest will have to be applied on metadata
locally.
"""
local_terms, server_terms = _split_terms(terms, available_on_server)
local_filter = ResultFilter(query_terms=local_terms).AND(local_filter)
return server_terms, local_filter
def basic_collection_terms():
"""Return a term filter that excludes empty collections."""
return {'number_of_images': NotNull()}
def basic_image_terms():
"""Filter that selects unthresholded F, T and Z maps in mni space
More precisely, an image is excluded if one of the following is
true:
- It is not in MNI space.
- It is thresholded.
- Its map type is one of "ROI/mask", "anatomical", or "parcellation".
- Its image type is "atlas"
"""
return {'not_mni': False, 'is_thresholded': False,
'map_type': NotIn('ROI/mask', 'anatomical', 'parcellation'),
'image_type': NotEqual('atlas')}
def _move_col_id(im_terms, col_terms):
"""Reposition 'collection_id' term.
If the collection id was specified in image filters, move it to
the collection filters for efficiency.
This makes specifying the collection id as a keyword argument for
``fetch_neurovault`` efficient.
"""
if 'collection_id' not in im_terms:
return im_terms, col_terms
im_terms = copy(im_terms)
col_terms = copy(col_terms)
if 'id' not in col_terms:
col_terms['id'] = im_terms.pop('collection_id')
elif col_terms['id'] == im_terms['collection_id']:
col_terms['id'] = im_terms.pop('collection_id')
else:
warnings.warn('You specified contradictory collection ids, '
'one in the image filters and one in the '
'collection filters')
return im_terms, col_terms
def _read_download_params(
data_dir, download_mode='download_new', collection_terms=None,
collection_filter=_empty_filter, image_terms=None,
image_filter=_empty_filter, wanted_collection_ids=None,
wanted_image_ids=None, max_images=None,
max_consecutive_fails=_MAX_CONSECUTIVE_FAILS,
max_fails_in_collection=_MAX_FAILS_IN_COLLECTION,
resample=False, interpolation='linear',
batch_size=None, verbose=3, fetch_neurosynth_words=False,
vectorize_words=True):
"""Create a dictionary containing download information.
"""
download_params = {}
download_params['verbose'] = verbose
download_mode = download_mode.lower()
if download_mode not in ['overwrite', 'download_new', 'offline']:
raise ValueError(
'supported download modes are overwrite,'
' download_new, offline; got {0}'.format(download_mode))
download_params['download_mode'] = download_mode
if collection_terms is None:
collection_terms = {}
if image_terms is None:
image_terms = {}
if max_images is not None and max_images < 0:
max_images = None
download_params['nv_data_dir'] = data_dir
download_params['collection_terms'] = dict(collection_terms)
download_params['collection_filter'] = collection_filter
download_params['image_terms'] = dict(image_terms)
download_params['image_filter'] = image_filter
download_params['visited_images'] = set()
download_params['visited_collections'] = set()
download_params['max_images'] = max_images
download_params['max_consecutive_fails'] = max_consecutive_fails
download_params['max_fails_in_collection'] = max_fails_in_collection
download_params['batch_size'] = batch_size
download_params['resample'] = resample
download_params['interpolation'] = interpolation
download_params['wanted_image_ids'] = wanted_image_ids
download_params['wanted_collection_ids'] = wanted_collection_ids
download_params['fetch_neurosynth_words'] = fetch_neurosynth_words
download_params['write_ok'] = os.access(
download_params['nv_data_dir'], os.W_OK)
download_params['vectorize_words'] = vectorize_words
return download_params
def _prepare_explicit_ids_download_params(download_params):
"""Prepare the download parameters if explicit ids are specified."""
if download_params.get('wanted_image_ids') is None:
download_params['wanted_image_ids'] = []
if download_params.get('wanted_collection_ids') is None:
download_params['wanted_collection_ids'] = []
download_params['max_images'] = None
download_params['scroll_mode'] = 'explicit'
download_params['image_terms'] = {}
download_params['image_filter'] = _empty_filter
download_params['collection_terms'] = {}
download_params['collection_filter'] = _empty_filter
download_params['local_collection_filter'] = _empty_filter
download_params['local_image_filter'] = ResultFilter(
{'id': IsIn(*download_params['wanted_image_ids'])}).OR(
ResultFilter(
collection_id=IsIn(
*download_params['wanted_collection_ids'])))
return download_params
def _prepare_filtered_download_params(download_params):
"""Prepare the download parameters if filters are used."""
(download_params['image_terms'],
download_params['collection_terms']) = _move_col_id(
download_params['image_terms'], download_params['collection_terms'])
(download_params['collection_terms'],
download_params['collection_filter']
) = _move_unknown_terms_to_local_filter(
download_params['collection_terms'],
download_params['collection_filter'],
_COL_FILTERS_AVAILABLE_ON_SERVER)
(download_params['image_terms'],
download_params[
'image_filter']) = _move_unknown_terms_to_local_filter(
download_params['image_terms'], download_params['image_filter'],
_IM_FILTERS_AVAILABLE_ON_SERVER)
download_params['local_collection_filter'] = ResultFilter(
**download_params['collection_terms']).AND(
download_params['collection_filter'])
download_params['local_image_filter'] = ResultFilter(
**download_params['image_terms']).AND(
download_params['image_filter'])
download_params['scroll_mode'] = 'filtered'
return download_params
def _prepare_download_params(download_params):
"""Adjust the download parameters.
Information for the downloaders is added. The result depends on
whether we are downloading a set of collections and images
explicitly specified by the user (by id), or we are downloading
all the collections and images that match certain filters.
"""
if (download_params['wanted_collection_ids'] is not None or
download_params['wanted_image_ids'] is not None):
return _prepare_explicit_ids_download_params(download_params)
return _prepare_filtered_download_params(download_params)
def _result_list_to_bunch(result_list, download_params):
"""Transform a list of results into a Bunch.
If necessary, a vocabulary list and a matrix of vectorized tags are
added.
"""
if not result_list:
images_meta, collections_meta = [], []
else:
images_meta, collections_meta = zip(*result_list)
images_meta = list(images_meta)
collections_meta = list(collections_meta)
if download_params['resample']:
images = [im_meta.get('resampled_absolute_path') for im_meta in images_meta]
else:
images = [im_meta.get('absolute_path') for im_meta in images_meta]
result = Bunch(images=images, images_meta=images_meta,
collections_meta=collections_meta,
description=_get_dataset_descr('neurovault'))
if download_params[
'fetch_neurosynth_words'] and download_params['vectorize_words']:
(result['word_frequencies'],
result['vocabulary']) = neurosynth_words_vectorized(
[meta.get('ns_words_absolute_path') for
meta in images_meta], verbose=download_params['verbose'])
return result
# High-level functions that provide access to neurovault and neurosynth.
# _fetch_neurovault_implementation does the work, and two interfaces
# are available:
# fetch_neurovault, to filter results based on metadata
# fetch_neurovault_ids, to ask for specific images or collections
def _fetch_neurovault_implementation(
max_images=_DEFAULT_MAX_IMAGES, collection_terms=basic_collection_terms(),
collection_filter=_empty_filter, image_terms=basic_image_terms(),
image_filter=_empty_filter, collection_ids=None, image_ids=None,
mode='download_new', data_dir=None, fetch_neurosynth_words=False, resample=False,
interpolation='continuous', vectorize_words=True, verbose=3, **kwarg_image_filters):
"""Download data from neurovault.org and neurosynth.org."""
image_terms = dict(image_terms, **kwarg_image_filters)
neurovault_data_dir = _get_dataset_dir('neurovault', data_dir)
if mode != 'offline' and not os.access(neurovault_data_dir, os.W_OK):
warnings.warn("You don't have write access to neurovault dir: {0}; "
"fetch_neurovault is working offline.".format(
neurovault_data_dir))
mode = 'offline'
download_params = _read_download_params(
neurovault_data_dir, download_mode=mode,
collection_terms=collection_terms,
collection_filter=collection_filter, image_terms=image_terms,
image_filter=image_filter, wanted_collection_ids=collection_ids,
wanted_image_ids=image_ids, max_images=max_images, resample=resample,
interpolation=interpolation, verbose=verbose,
fetch_neurosynth_words=fetch_neurosynth_words,
vectorize_words=vectorize_words)
download_params = _prepare_download_params(download_params)
with _TemporaryDirectory() as temp_dir:
download_params['temp_dir'] = temp_dir
scroller = list(_scroll(download_params))
return _result_list_to_bunch(scroller, download_params)
def fetch_neurovault(
max_images=_DEFAULT_MAX_IMAGES,
collection_terms=basic_collection_terms(),
collection_filter=_empty_filter,
image_terms=basic_image_terms(),
image_filter=_empty_filter,
mode='download_new', data_dir=None,
fetch_neurosynth_words=False, resample=False, vectorize_words=True,
verbose=3, **kwarg_image_filters):
"""Download data from neurovault.org that match certain criteria.
Any downloaded data is saved on the local disk and subsequent
calls to this function will first look for the data locally before
querying the server for more if necessary.
We explore the metadata for Neurovault collections and images,
keeping those that match a certain set of criteria, until we have
skimmed through the whole database or until an (optional) maximum
number of images to fetch has been reached.
For more information, see :footcite:`Gorgolewski2015neurovault`,
and :footcite:`yarkoni2011large`.
Parameters
----------
max_images : int, optional
Maximum number of images to fetch. Default=100.
collection_terms : dict, optional
Key, value pairs used to filter collection
metadata. Collections for which
``collection_metadata['key'] == value`` is not ``True`` for
every key, value pair will be discarded.
See documentation for ``basic_collection_terms`` for a
description of the default selection criteria.
Default=basic_collection_terms().
collection_filter : Callable, optional
Collections for which `collection_filter(collection_metadata)`
is ``False`` will be discarded.
Default=empty_filter.
image_terms : dict, optional
Key, value pairs used to filter image metadata. Images for
which ``image_metadata['key'] == value`` is not ``True`` for
if image_filter != _empty_filter and image_terms =
every key, value pair will be discarded.
See documentation for ``basic_image_terms`` for a
description of the default selection criteria.
Default=basic_image_terms().
image_filter : Callable, optional
Images for which `image_filter(image_metadata)` is ``False``
will be discarded. Default=empty_filter.
mode : {'download_new', 'overwrite', 'offline'}
When to fetch an image from the server rather than the local
disk.
- 'download_new' (the default) means download only files that
are not already on disk (regardless of modify date).
- 'overwrite' means ignore files on disk and overwrite them.
- 'offline' means load only data from disk; don't query server.
data_dir : str, optional
The directory we want to use for nilearn data. A subdirectory
named "neurovault" will contain neurovault data.
fetch_neurosynth_words : bool, optional
whether to collect words from Neurosynth. Default=False.
vectorize_words : bool, optional
If neurosynth words are downloaded, create a matrix of word
counts and add it to the result. Also add to the result a
vocabulary list. See ``sklearn.CountVectorizer`` for more info.
Default=True.
resample : bool, optional (default=False)
Resamples downloaded images to a 3x3x3 grid before saving them, to save disk space.
interpolation : str, optional
Can be 'continuous', 'linear', or 'nearest'. Indicates the resample
method. Default='continuous'. Argument passed to nilearn.image.resample_img.
verbose : int, optional
An integer in [0, 1, 2, 3] to control the verbosity level.
Default=3.
kwarg_image_filters
Keyword arguments are understood to be filter terms for
images, so for example ``map_type='Z map'`` means only
download Z-maps; ``collection_id=35`` means download images
from collection 35 only.
Returns
-------
Bunch
A dict-like object which exposes its items as attributes. It contains:
- 'images', the paths to downloaded files.
- 'images_meta', the metadata for the images in a list of
dictionaries.
- 'collections_meta', the metadata for the
collections.
- 'description', a short description of the Neurovault dataset.
If `fetch_neurosynth_words` and `vectorize_words` were set, it
also contains:
- 'vocabulary', a list of words
- 'word_frequencies', the weight of the words returned by
neurosynth.org for each image, such that the weight of word
`vocabulary[j]` for the image found in `images[i]` is
`word_frequencies[i, j]`
See Also
--------
nilearn.datasets.fetch_neurovault_ids
Fetch collections and images from Neurovault by explicitly specifying
their ids.
Notes
-----
Images and collections from disk are fetched before remote data.
Some helpers are provided in the ``neurovault`` module to express
filtering criteria more concisely:
``ResultFilter``, ``IsNull``, ``NotNull``, ``NotEqual``,
``GreaterOrEqual``, ``GreaterThan``, ``LessOrEqual``,
``LessThan``, ``IsIn``, ``NotIn``, ``Contains``,
``NotContains``, ``Pattern``.
If you pass a single value to match against the collection id
(whether as the 'id' field of the collection metadata or as the
'collection_id' field of the image metadata), the server is
directly queried for that collection, so
``fetch_neurovault(collection_id=40)`` is as efficient as
``fetch_neurovault(collection_ids=[40])`` (but in the former
version the other filters will still be applied). This is not true
for the image ids. If you pass a single value to match against any
of the fields listed in ``_COL_FILTERS_AVAILABLE_ON_SERVER``,
i.e., 'DOI', 'name', and 'owner', these filters can be
applied by the server, limiting the amount of metadata we have to
download: filtering on those fields makes the fetching faster
because the filtering takes place on the server side.
In `download_new` mode, if a file exists on disk, it is not
downloaded again, even if the version on the server is newer. Use
`overwrite` mode to force a new download (you can filter on the
field ``modify_date`` to re-download the files that are newer on
the server - see Examples section).
Tries to yield `max_images` images; stops early if we have fetched
all the images matching the filters or if too many images fail to
be downloaded in a row.
References
----------
.. footbibliography::
Examples
--------
To download **all** the collections and images from Neurovault::
fetch_neurovault(max_images=None, collection_terms={}, image_terms={})
To further limit the default selection to collections which
specify a DOI (which reference a published paper, as they may be
more likely to contain good images)::
fetch_neurovault(
max_images=None,
collection_terms=dict(basic_collection_terms(), DOI=NotNull()))
To update all the images (matching the default filters)::
fetch_neurovault(
max_images=None, mode='overwrite',
modify_date=GreaterThan(newest))
"""
if max_images == _DEFAULT_MAX_IMAGES:
_print_if(
'fetch_neurovault: using default value of {0} for max_images. '
'Set max_images to another value or None '
'if you want more images.'.format(_DEFAULT_MAX_IMAGES),
_INFO, verbose)
# Users may get confused if they write their image_filter function
# and the default filters contained in image_terms still apply, so we
# issue a warning.
if image_filter != _empty_filter and image_terms == basic_image_terms():
warnings.warn(
"You specified a value for `image_filter` but the "
"default filters in `image_terms` still apply. "
"If you want to disable them, pass `image_terms={}`")
if (collection_filter != _empty_filter
and collection_terms == basic_collection_terms()):
warnings.warn(
"You specified a value for `collection_filter` but the "
"default filters in `collection_terms` still apply. "
"If you want to disable them, pass `collection_terms={}`")
return _fetch_neurovault_implementation(
max_images=max_images, collection_terms=collection_terms,
collection_filter=collection_filter, image_terms=image_terms,
image_filter=image_filter, mode=mode,
data_dir=data_dir,
fetch_neurosynth_words=fetch_neurosynth_words, resample=resample,
vectorize_words=vectorize_words, verbose=verbose,
**kwarg_image_filters)
def fetch_neurovault_ids(
collection_ids=(), image_ids=(), mode='download_new', data_dir=None,
fetch_neurosynth_words=False, resample=False, vectorize_words=True, verbose=3):
"""Download specific images and collections from neurovault.org.
Any downloaded data is saved on the local disk and subsequent
calls to this function will first look for the data locally before
querying the server for more if necessary.
This is the fast way to get the data from the server if we already
know which images or collections we want.
For more information, see :footcite:`Gorgolewski2015neurovault`,
and :footcite:`yarkoni2011large`.
Parameters
----------
collection_ids : Container, optional
The ids of whole collections to be downloaded.
Default=().
image_ids : Container, optional
The ids of particular images to be downloaded. The metadata for the
corresponding collections is also downloaded.
Default=().
mode : {'download_new', 'overwrite', 'offline'}, optional
When to fetch an image from the server rather than the local
disk. Default='download_new'.
- 'download_new' (the default) means download only files that
are not already on disk (regardless of modify date).
- 'overwrite' means ignore files on disk and overwrite them.
- 'offline' means load only data from disk; don't query server.
data_dir : str, optional
The directory we want to use for nilearn data. A subdirectory
named "neurovault" will contain neurovault data.
fetch_neurosynth_words : bool, optional
Whether to collect words from Neurosynth. Default=False.
resample : bool, optional (default=False)
Resamples downloaded images to a 3x3x3 grid before saving them, to save disk space.
vectorize_words : bool, optional
If neurosynth words are downloaded, create a matrix of word
counts and add it to the result. Also add to the result a
vocabulary list. See ``sklearn.CountVectorizer`` for more info.
Default=True.
verbose : int, optional
An integer in [0, 1, 2, 3] to control the verbosity level.
Default=3.
Returns
-------
Bunch
A dict-like object which exposes its items as attributes. It contains:
- 'images', the paths to downloaded files.
- 'images_meta', the metadata for the images in a list of
dictionaries.
- 'collections_meta', the metadata for the
collections.
- 'description', a short description of the Neurovault dataset.
If `fetch_neurosynth_words` and `vectorize_words` were set, it
also contains:
- 'vocabulary', a list of words
- 'word_frequencies', the weight of the words returned by
neurosynth.org for each image, such that the weight of word
`vocabulary[j]` for the image found in `images[i]` is
`word_frequencies[i, j]`
See Also
--------
nilearn.datasets.fetch_neurovault
Fetch data from Neurovault, but use filters on metadata to select
images and collections rather than giving explicit lists of ids.
Notes
-----
Images and collections from disk are fetched before remote data.
In `download_new` mode, if a file exists on disk, it is not
downloaded again, even if the version on the server is newer. Use
`overwrite` mode to force a new download.
Stops early if too many images fail to be downloaded in a row.
References
----------
.. footbibliography::
"""
return _fetch_neurovault_implementation(
mode=mode,
collection_ids=collection_ids, image_ids=image_ids,
data_dir=data_dir,
fetch_neurosynth_words=fetch_neurosynth_words, resample=resample,
vectorize_words=vectorize_words, verbose=verbose)
def fetch_neurovault_motor_task(data_dir=None, verbose=1):
"""Fetch left vs right button press group contrast map from NeuroVault.
Parameters
----------
data_dir : string, optional
Path of the data directory. Used to force data storage in a specified
location.
verbose : int, optional
Verbosity level (0 means no message). Default=1.
Returns
-------
data : Bunch
A dict-like object which exposes its items as attributes. It contains:
- 'images', the paths to downloaded files.
- 'images_meta', the metadata for the images in a list of
dictionaries.
- 'collections_meta', the metadata for the
collections.
- 'description', a short description of the Neurovault dataset.
Notes
------
The 'left vs right button press' contrast is used:
https://neurovault.org/images/10426/
See Also
---------
nilearn.datasets.fetch_neurovault_ids
nilearn.datasets.fetch_neurovault
nilearn.datasets.fetch_neurovault_auditory_computation_task
"""
data = fetch_neurovault_ids(image_ids=[10426], data_dir=data_dir,
verbose=verbose)
return data
def fetch_neurovault_auditory_computation_task(data_dir=None, verbose=1):
"""Fetch a contrast map from NeuroVault showing
the effect of mental subtraction upon auditory instructions
Parameters
----------
data_dir : string, optional
Path of the data directory. Used to force data storage in a specified
location.
verbose : int, optional
Verbosity level (0 means no message). Default=1.
Returns
-------
data : Bunch
A dict-like object which exposes its items as attributes. It contains:
- 'images', the paths to downloaded files.
- 'images_meta', the metadata for the images in a list of
dictionaries.
- 'collections_meta', the metadata for the
collections.
- 'description', a short description of the Neurovault dataset.
Notes
------
The 'auditory_calculation_vs_baseline' contrast is used:
https://neurovault.org/images/32980/
See Also
---------
nilearn.datasets.fetch_neurovault_ids
nilearn.datasets.fetch_neurovault
nilearn.datasets.fetch_neurovault_motor_task
"""
data = fetch_neurovault_ids(image_ids=[32980], data_dir=data_dir,
verbose=verbose)
return data
| 33.324053 | 91 | 0.659504 |
fdfdd6e40687167a447bc765a830761657f92198 | 14,410 | py | Python | model_zoo/official/nlp/bert/src/bert_for_finetune.py | huxian123/mindspore | ec5ba10c82bbd6eccafe32d3a1149add90105bc8 | [
"Apache-2.0"
] | 1 | 2021-06-02T02:46:20.000Z | 2021-06-02T02:46:20.000Z | model_zoo/official/nlp/bert/src/bert_for_finetune.py | nudt-eddie/mindspore | 55372b41fdfae6d2b88d7078971e06d537f6c558 | [
"Apache-2.0"
] | null | null | null | model_zoo/official/nlp/bert/src/bert_for_finetune.py | nudt-eddie/mindspore | 55372b41fdfae6d2b88d7078971e06d537f6c558 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''
Bert for finetune script.
'''
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops import composite as C
from mindspore.common.tensor import Tensor
from mindspore.common.parameter import Parameter
from mindspore.common import dtype as mstype
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
from mindspore.context import ParallelMode
from mindspore.communication.management import get_group_size
from mindspore import context
from .bert_for_pre_training import clip_grad
from .finetune_eval_model import BertCLSModel, BertNERModel, BertSquadModel
from .utils import CrossEntropyCalculation
GRADIENT_CLIP_TYPE = 1
GRADIENT_CLIP_VALUE = 1.0
grad_scale = C.MultitypeFuncGraph("grad_scale")
reciprocal = P.Reciprocal()
@grad_scale.register("Tensor", "Tensor")
def tensor_grad_scale(scale, grad):
return grad * reciprocal(scale)
_grad_overflow = C.MultitypeFuncGraph("_grad_overflow")
grad_overflow = P.FloatStatus()
@_grad_overflow.register("Tensor")
def _tensor_grad_overflow(grad):
return grad_overflow(grad)
class BertFinetuneCell(nn.Cell):
"""
Especifically defined for finetuning where only four inputs tensor are needed.
"""
def __init__(self, network, optimizer, scale_update_cell=None):
super(BertFinetuneCell, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.weights = optimizer.parameters
self.optimizer = optimizer
self.grad = C.GradOperation(get_by_list=True,
sens_param=True)
self.reducer_flag = False
self.allreduce = P.AllReduce()
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
self.reducer_flag = True
self.grad_reducer = None
if self.reducer_flag:
mean = context.get_auto_parallel_context("gradients_mean")
degree = get_group_size()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree)
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
self.cast = P.Cast()
self.gpu_target = False
if context.get_context("device_target") == "GPU":
self.gpu_target = True
self.float_status = P.FloatStatus()
self.addn = P.AddN()
self.reshape = P.Reshape()
else:
self.alloc_status = P.NPUAllocFloatStatus()
self.get_status = P.NPUGetFloatStatus()
self.clear_before_grad = P.NPUClearFloatStatus()
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.depend_parameter_use = P.ControlDepend(depend_mode=1)
self.base = Tensor(1, mstype.float32)
self.less_equal = P.LessEqual()
self.hyper_map = C.HyperMap()
self.loss_scale = None
self.loss_scaling_manager = scale_update_cell
if scale_update_cell:
self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32),
name="loss_scale")
def construct(self,
input_ids,
input_mask,
token_type_id,
label_ids,
sens=None):
"""Bert Finetune"""
weights = self.weights
init = False
loss = self.network(input_ids,
input_mask,
token_type_id,
label_ids)
if sens is None:
scaling_sens = self.loss_scale
else:
scaling_sens = sens
if not self.gpu_target:
init = self.alloc_status()
clear_before_grad = self.clear_before_grad(init)
F.control_depend(loss, init)
self.depend_parameter_use(clear_before_grad, scaling_sens)
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
label_ids,
self.cast(scaling_sens,
mstype.float32))
grads = self.hyper_map(F.partial(grad_scale, scaling_sens), grads)
grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
if self.reducer_flag:
grads = self.grad_reducer(grads)
if not self.gpu_target:
flag = self.get_status(init)
flag_sum = self.reduce_sum(init, (0,))
F.control_depend(grads, flag)
F.control_depend(flag, flag_sum)
else:
flag_sum = self.hyper_map(F.partial(_grad_overflow), grads)
flag_sum = self.addn(flag_sum)
flag_sum = self.reshape(flag_sum, (()))
if self.is_distributed:
flag_reduce = self.allreduce(flag_sum)
cond = self.less_equal(self.base, flag_reduce)
else:
cond = self.less_equal(self.base, flag_sum)
overflow = cond
if sens is None:
overflow = self.loss_scaling_manager(self.loss_scale, cond)
if overflow:
succ = False
else:
succ = self.optimizer(grads)
ret = (loss, cond)
return F.depend(ret, succ)
class BertSquadCell(nn.Cell):
"""
specifically defined for finetuning where only four inputs tensor are needed.
"""
def __init__(self, network, optimizer, scale_update_cell=None):
super(BertSquadCell, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.weights = optimizer.parameters
self.optimizer = optimizer
self.grad = C.GradOperation(get_by_list=True, sens_param=True)
self.reducer_flag = False
self.allreduce = P.AllReduce()
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
self.reducer_flag = True
self.grad_reducer = None
if self.reducer_flag:
mean = context.get_auto_parallel_context("gradients_mean")
degree = get_group_size()
self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree)
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
self.cast = P.Cast()
self.alloc_status = P.NPUAllocFloatStatus()
self.get_status = P.NPUGetFloatStatus()
self.clear_before_grad = P.NPUClearFloatStatus()
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.depend_parameter_use = P.ControlDepend(depend_mode=1)
self.base = Tensor(1, mstype.float32)
self.less_equal = P.LessEqual()
self.hyper_map = C.HyperMap()
self.loss_scale = None
self.loss_scaling_manager = scale_update_cell
if scale_update_cell:
self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32),
name="loss_scale")
def construct(self,
input_ids,
input_mask,
token_type_id,
start_position,
end_position,
unique_id,
is_impossible,
sens=None):
"""BertSquad"""
weights = self.weights
init = self.alloc_status()
loss = self.network(input_ids,
input_mask,
token_type_id,
start_position,
end_position,
unique_id,
is_impossible)
if sens is None:
scaling_sens = self.loss_scale
else:
scaling_sens = sens
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
start_position,
end_position,
unique_id,
is_impossible,
self.cast(scaling_sens,
mstype.float32))
clear_before_grad = self.clear_before_grad(init)
F.control_depend(loss, init)
self.depend_parameter_use(clear_before_grad, scaling_sens)
grads = self.hyper_map(F.partial(grad_scale, scaling_sens), grads)
grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
if self.reducer_flag:
grads = self.grad_reducer(grads)
flag = self.get_status(init)
flag_sum = self.reduce_sum(init, (0,))
if self.is_distributed:
flag_reduce = self.allreduce(flag_sum)
cond = self.less_equal(self.base, flag_reduce)
else:
cond = self.less_equal(self.base, flag_sum)
F.control_depend(grads, flag)
F.control_depend(flag, flag_sum)
overflow = cond
if sens is None:
overflow = self.loss_scaling_manager(self.loss_scale, cond)
if overflow:
succ = False
else:
succ = self.optimizer(grads)
ret = (loss, cond)
return F.depend(ret, succ)
class BertCLS(nn.Cell):
"""
Train interface for classification finetuning task.
"""
def __init__(self, config, is_training, num_labels=2, dropout_prob=0.0, use_one_hot_embeddings=False,
assessment_method=""):
super(BertCLS, self).__init__()
self.bert = BertCLSModel(config, is_training, num_labels, dropout_prob, use_one_hot_embeddings,
assessment_method)
self.loss = CrossEntropyCalculation(is_training)
self.num_labels = num_labels
self.assessment_method = assessment_method
self.is_training = is_training
def construct(self, input_ids, input_mask, token_type_id, label_ids):
logits = self.bert(input_ids, input_mask, token_type_id)
if self.assessment_method == "spearman_correlation":
if self.is_training:
loss = self.loss(logits, label_ids)
else:
loss = logits
else:
loss = self.loss(logits, label_ids, self.num_labels)
return loss
class BertNER(nn.Cell):
"""
Train interface for sequence labeling finetuning task.
"""
def __init__(self, config, is_training, num_labels=11, use_crf=False, tag_to_index=None, dropout_prob=0.0,
use_one_hot_embeddings=False):
super(BertNER, self).__init__()
self.bert = BertNERModel(config, is_training, num_labels, use_crf, dropout_prob, use_one_hot_embeddings)
if use_crf:
if not tag_to_index:
raise Exception("The dict for tag-index mapping should be provided for CRF.")
from src.CRF import CRF
self.loss = CRF(tag_to_index, config.batch_size, config.seq_length, is_training)
else:
self.loss = CrossEntropyCalculation(is_training)
self.num_labels = num_labels
self.use_crf = use_crf
def construct(self, input_ids, input_mask, token_type_id, label_ids):
logits = self.bert(input_ids, input_mask, token_type_id)
if self.use_crf:
loss = self.loss(logits, label_ids)
else:
loss = self.loss(logits, label_ids, self.num_labels)
return loss
class BertSquad(nn.Cell):
'''
Train interface for SQuAD finetuning task.
'''
def __init__(self, config, is_training, num_labels=2, dropout_prob=0.0, use_one_hot_embeddings=False):
super(BertSquad, self).__init__()
self.bert = BertSquadModel(config, is_training, num_labels, dropout_prob, use_one_hot_embeddings)
self.loss = CrossEntropyCalculation(is_training)
self.num_labels = num_labels
self.seq_length = config.seq_length
self.is_training = is_training
self.total_num = Parameter(Tensor([0], mstype.float32), name='total_num')
self.start_num = Parameter(Tensor([0], mstype.float32), name='start_num')
self.end_num = Parameter(Tensor([0], mstype.float32), name='end_num')
self.sum = P.ReduceSum()
self.equal = P.Equal()
self.argmax = P.ArgMaxWithValue(axis=1)
self.squeeze = P.Squeeze(axis=-1)
def construct(self, input_ids, input_mask, token_type_id, start_position, end_position, unique_id, is_impossible):
"""interface for SQuAD finetuning task"""
logits = self.bert(input_ids, input_mask, token_type_id)
if self.is_training:
unstacked_logits_0 = self.squeeze(logits[:, :, 0:1])
unstacked_logits_1 = self.squeeze(logits[:, :, 1:2])
start_loss = self.loss(unstacked_logits_0, start_position, self.seq_length)
end_loss = self.loss(unstacked_logits_1, end_position, self.seq_length)
total_loss = (start_loss + end_loss) / 2.0
else:
start_logits = self.squeeze(logits[:, :, 0:1])
end_logits = self.squeeze(logits[:, :, 1:2])
total_loss = (unique_id, start_logits, end_logits)
return total_loss
| 43.534743 | 118 | 0.610548 |
19af0a1c151ba14dc4294b5b3685360069ccf70a | 5,002 | py | Python | resilient-circuits/tests/test_decorators.py | ibmresilient/resilient-python-api | 85e0ff684a88f744645c0ace414f51d769bcc3c2 | [
"MIT"
] | 28 | 2017-12-22T00:26:59.000Z | 2022-01-22T14:51:33.000Z | resilient-circuits/tests/test_decorators.py | ibmresilient/resilient-python-api | 85e0ff684a88f744645c0ace414f51d769bcc3c2 | [
"MIT"
] | 18 | 2018-03-06T19:04:20.000Z | 2022-03-21T15:06:30.000Z | resilient-circuits/tests/test_decorators.py | ibmresilient/resilient-python-api | 85e0ff684a88f744645c0ace414f51d769bcc3c2 | [
"MIT"
] | 28 | 2018-05-01T17:53:22.000Z | 2022-03-28T09:56:59.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2021. All Rights Reserved.
import pytest
from resilient_lib import IntegrationError
from resilient_circuits import constants, ResilientComponent, inbound_app, app_function
from tests import helpers, mock_constants, MockInboundAppComponent, AppFunctionMockComponent
resilient_mock = mock_constants.RESILIENT_MOCK
config_data = mock_constants.CONFIG_DATA
class TestInboundAppDecorator:
def test_basic_decoration(self):
assert MockInboundAppComponent.inbound_app_mock.handler is True
assert MockInboundAppComponent.inbound_app_mock.inbound_handler is True
assert MockInboundAppComponent.inbound_app_mock.names == (mock_constants.MOCK_INBOUND_Q_NAME,)
assert MockInboundAppComponent.inbound_app_mock.priority == 0
assert MockInboundAppComponent.inbound_app_mock.channel == "{0}.{1}".format(constants.INBOUND_MSG_DEST_PREFIX, mock_constants.MOCK_INBOUND_Q_NAME)
assert MockInboundAppComponent.inbound_app_mock.override is False
assert MockInboundAppComponent.inbound_app_mock.event is True
def test_inbound_app_mock_runs(self, circuits_app):
MockInboundAppComponent(opts=mock_constants.MOCK_OPTS).register(circuits_app.app.component_loader)
event_args = helpers.call_inbound_app(circuits_app, mock_constants.MOCK_INBOUND_Q_NAME_CREATE)
assert event_args[1] == [u"Mock incident created with unicode զ է ը թ"]
def test_inbound_app_mock_handles_Exception(self, circuits_app):
MockInboundAppComponent(opts=mock_constants.MOCK_OPTS).register(circuits_app.app.component_loader)
with pytest.raises(IntegrationError, match=r"mock error message with unicode"):
helpers.call_inbound_app(circuits_app, mock_constants.MOCK_INBOUND_Q_NAME_EX)
def test_too_many_q_names(self):
with pytest.raises(ValueError, match=r"Usage: @inbound_app\(<inbound_destination_api_name>\)"):
class MockInboundAppComponent2(ResilientComponent):
@inbound_app("mock_q_2", "mock_q_3")
def inbound_app_mock_2(self, message, *args, **kwargs):
return
class TestAppFunctionDecorator:
def test_basic_decoration(self):
assert AppFunctionMockComponent._app_function_mock_one.handler is True
assert AppFunctionMockComponent._app_function_mock_one.function is True
assert AppFunctionMockComponent._app_function_mock_one.names == (mock_constants.MOCK_APP_FN_NAME_ONE,)
assert AppFunctionMockComponent._app_function_mock_one.priority == 0
assert AppFunctionMockComponent._app_function_mock_one.channel == u"functions." + mock_constants.MOCK_APP_FN_NAME_ONE
assert AppFunctionMockComponent._app_function_mock_one.override is False
assert AppFunctionMockComponent._app_function_mock_one.event is True
def test_runs(self, circuits_app):
AppFunctionMockComponent(opts=mock_constants.MOCK_OPTS).register(circuits_app.app.component_loader)
helpers.call_app_function(mock_constants.MOCK_APP_FN_NAME_ONE, {"input_one": "abc"}, circuits_app)
def test_handles_FunctionResult(self, circuits_app):
mock_fn_inputs = {"input_one": u"abc", "input_two": u"unicode ઠ ડ ઢ ણ ત થ દ ધ ન પ ફ input"}
AppFunctionMockComponent(opts=mock_constants.MOCK_OPTS).register(circuits_app.app.component_loader)
mock_results = helpers.call_app_function(mock_constants.MOCK_APP_FN_NAME_ONE, mock_fn_inputs, circuits_app)
assert mock_results["version"] == 2.0
assert mock_results["success"] is True
assert mock_results["reason"] is None
assert mock_results["inputs"]["input_one"] == mock_fn_inputs["input_one"]
assert mock_results["inputs"]["input_two"] == mock_fn_inputs["input_two"]
assert mock_results["content"]["malware"] is True
def test_handles_StatusMessage(self, circuits_app):
AppFunctionMockComponent(opts=mock_constants.MOCK_OPTS).register(circuits_app.app.component_loader)
mock_status_message = helpers.call_app_function(mock_constants.MOCK_APP_FN_NAME_ONE, {"input_one": "abc"}, circuits_app, status_message_only=True)
assert mock_status_message.text == u"Mock զ է ը թ ժ ի լ StatusMessage 1"
def test_handles_Exception(self, circuits_app):
AppFunctionMockComponent(opts=mock_constants.MOCK_OPTS).register(circuits_app.app.component_loader)
with pytest.raises(IntegrationError, match=r"mock error message with unicode"):
helpers.call_app_function(mock_constants.MOCK_APP_FN_NAME_EX, {"input_one": "abc"}, circuits_app)
def test_too_many_function_names(self):
with pytest.raises(ValueError, match=r"Usage: @app_function\(api_name\)"):
class AppFunctionMockComponent2(ResilientComponent):
@app_function("mock_function_2", "mock_function_3")
def mock_function_2(self, fn_inputs, **kwargs):
return
| 58.162791 | 154 | 0.762495 |
96803f9f7c7311398b7912308e32bd367f0f3d3c | 2,961 | py | Python | covid/disease_objects/course.py | c200chromebook/Covid_Model | 860221a222ffab6a12d3894b58c7170af6e6b8bb | [
"MIT"
] | 2 | 2020-03-15T22:32:06.000Z | 2020-03-18T11:05:56.000Z | covid/disease_objects/course.py | c200chromebook/Covid_Model | 860221a222ffab6a12d3894b58c7170af6e6b8bb | [
"MIT"
] | null | null | null | covid/disease_objects/course.py | c200chromebook/Covid_Model | 860221a222ffab6a12d3894b58c7170af6e6b8bb | [
"MIT"
] | null | null | null | from collections import defaultdict
from covid.disease_objects.state import State
from covid.util.util import parse_tuple_list
class CourseState:
def __init__(self, course):
self.course = course
self.day_state = [state for state, days in self.course.disease_course for _ in range(days)]
self.day_cases = [0.0 for _ in range(course.maxlength)]
@property
def infected(self):
return sum(self.day_cases)
def infect(self, n):
self.day_cases[0] += n
def roll(self, r0, growth_mult=1.0):
contacts = sum([infect_frac * lives * r0 * growth_mult for infect_frac, lives in
zip(self.course.infection_fraction_vec, self.day_cases)])
deaths = self.day_cases[self.course.death_on - 1] * self.course.prob_death if self.course.prob_death else 0.0
if deaths:
self.day_cases[self.course.death_on - 1] -= deaths
self.day_cases.insert(0, 0.0)
recoveries = self.day_cases.pop()
return contacts, deaths, recoveries
def report(self):
ret = defaultdict(float)
for state, cases in zip(self.day_state, self.day_cases):
ret[state] += cases
return ret
def scale(self, factor):
self.day_cases = [factor * cases for cases in self.day_cases]
class Course:
def __init__(self, cfg, course_name, states):
def add_state(st):
if st in states:
return states[st]
else:
new_state = State(cfg, st)
states[st] = new_state
return new_state
course_temp = parse_tuple_list(cfg[course_name]['COURSE'])
self.prob_death = cfg[course_name].getfloat('PROB_DEATH') if 'PROB_DEATH' in cfg[course_name] else None
self.death_on = cfg[course_name].getint('DEATH_ON') if self.prob_death else None
self.disease_course = [(add_state(state_name), int(days)) for state_name, days in course_temp]
self.probability = cfg['COURSES'].getfloat(course_name)
total_state_days = defaultdict(int)
for state, days in self.disease_course:
total_state_days[state] += days
daily_infection_fraction = {}
for state in total_state_days:
daily_infection_fraction[state] = state.percent_of_infections / float(total_state_days[state])
unadjusted_infection_frac_vec = [daily_infection_fraction[state]
for state, days in self.disease_course
for _ in range(days)]
sum_inf_frac = sum(unadjusted_infection_frac_vec)
assert sum_inf_frac # Could relax this constraint.
self.infection_fraction_vec = [frac/sum_inf_frac for frac in unadjusted_infection_frac_vec]
@property
def maxlength(self):
return sum([days for state, days in self.disease_course])
def init_state(self):
return CourseState(self)
| 37.0125 | 117 | 0.643701 |
b7538554c20c79a8c84cbd38ec0ae941fbd35e65 | 1,592 | py | Python | oxygine/system_data_demo/build.py | savegame/oxygine-framework | bf49a097a86a2b99690a4fdc97efa73f2dfb70e4 | [
"MIT"
] | 803 | 2015-01-03T13:11:43.000Z | 2022-03-15T17:38:58.000Z | oxygine/system_data_demo/build.py | savegame/oxygine-framework | bf49a097a86a2b99690a4fdc97efa73f2dfb70e4 | [
"MIT"
] | 100 | 2015-01-07T17:07:56.000Z | 2021-12-21T20:09:20.000Z | oxygine/system_data_demo/build.py | savegame/oxygine-framework | bf49a097a86a2b99690a4fdc97efa73f2dfb70e4 | [
"MIT"
] | 265 | 2015-01-03T13:11:43.000Z | 2022-03-01T04:37:15.000Z | import os
import shutil
import zipfile
shutil.rmtree("data", True)
shutil.copytree("original", "data")
os.system("..\\..\\tools\\oxyresbuild.py -x sys_demo\\res.xml --src_data data --dest_data data\\sys_demo --nopng")
shutil.rmtree("data/sys_demo/anims/")
def recursive_zip(zipf, directory, folder = ""):
for item in os.listdir(directory):
if os.path.isfile(os.path.join(directory, item)):
zipf.write(os.path.join(directory, item), folder + os.sep + item)
elif os.path.isdir(os.path.join(directory, item)):
recursive_zip(zipf, os.path.join(directory, item), folder + os.sep + item)
import cStringIO
data = cStringIO.StringIO()
with zipfile.ZipFile(data, "w", zipfile.ZIP_DEFLATED) as zp:
recursive_zip(zp, "data")
fmtH = """
#pragma once
#ifndef %(MODULE)s_DATA
#define %(MODULE)s_DATA
extern unsigned int %(module)s_size;
extern const unsigned char %(module)s_data[];
#endif
"""
fmtCPP = """
#include "%(HEADER)s"
unsigned int %(module)s_size = %(SIZE)d;
const unsigned char %(module)s_data[] = {%(DATA)s};
"""
def gen(module, data):
st = ",".join("0x{:02x}".format(ord(c)) for c in data)
MODULE = module.upper()
SIZE = len(data)
DATA = st
args = {"MODULE":MODULE, "SIZE":SIZE, "DATA":DATA, "module":module, "HEADER":module + "_data.h"}
return (fmtH % args, fmtCPP % args, )
rs = gen("system", data.getvalue())
with open("demo_data.h", "w") as fh:
fh.write(rs[0])
with open("demo_data.cpp", "w") as fh:
fh.write(rs[1])
with open("demo.zip", "wb") as zp:
zp.write(data.getvalue()) | 28.428571 | 114 | 0.644472 |
3362202e23bfcb5b731d60ae56b6d9975bf6a20e | 745 | py | Python | posts/urls.py | KingVulkan/Awards | 00d537f32029e9a21c26dcf95bca7e7a7854a4dc | [
"Unlicense"
] | null | null | null | posts/urls.py | KingVulkan/Awards | 00d537f32029e9a21c26dcf95bca7e7a7854a4dc | [
"Unlicense"
] | null | null | null | posts/urls.py | KingVulkan/Awards | 00d537f32029e9a21c26dcf95bca7e7a7854a4dc | [
"Unlicense"
] | null | null | null | from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
url(r'^$', views.home, name='home'),
url(r'^user/(?P<username>\w+)', views.profile, name='profile'),
url(r'^upload/$', views.project, name='upload_project'),
url(r'^review/(?P<project_id>\d+)',views.project_review ,name='project_review'),
url(r'^accounts/edit', views.edit_profile, name='edit_profile'),
url(r'^search/', views.search_project, name='search'),
url(r'^api/profile/$', views.ProfileList.as_view()),
url(r'^api/project/$', views.ProjectList.as_view())
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| 37.25 | 84 | 0.697987 |
93a5e3aab0f8cef8cd787ce29ba92bc939d8d391 | 25,844 | py | Python | qcodes/tests/instrument_mocks.py | RasmusBC59/Qcodes | 467ca557745278e6ff467965c57c77c2aa6d699b | [
"MIT"
] | 1 | 2021-04-07T08:53:05.000Z | 2021-04-07T08:53:05.000Z | qcodes/tests/instrument_mocks.py | M1racleShih/Qcodes | c03029a6968e16379155aadc8b083a02e01876a6 | [
"MIT"
] | 166 | 2020-12-25T07:09:57.000Z | 2022-03-30T00:16:23.000Z | qcodes/tests/instrument_mocks.py | nicholgroup/Qcodes | 6b9701bf469421fcf2ced58f67c01f69eba9d1f4 | [
"MIT"
] | 4 | 2017-12-11T12:13:41.000Z | 2018-08-01T13:13:04.000Z | from functools import partial
import logging
from typing import Any, Sequence, Dict, Optional
import numpy as np
from qcodes.instrument.base import Instrument, InstrumentBase
from qcodes.utils.validators import Numbers, Arrays, Strings, ComplexNumbers
from qcodes.instrument.parameter import MultiParameter, Parameter, \
ArrayParameter, ParameterWithSetpoints
from qcodes.instrument.channel import InstrumentChannel, ChannelList
import random
log = logging.getLogger(__name__)
class MockParabola(Instrument):
"""
Holds dummy parameters which are get and set able as well as provides
some basic functions that depends on these parameters for testing
purposes.
This instrument is intended to be simpler than the mock model in that it
does not emulate communications.
It has 3 main parameters (x, y, z) in order to allow for testing of 3D
sweeps. The function (parabola with optional noise) is chosen to allow
testing of numerical optimizations.
"""
def __init__(self, name, **kw):
super().__init__(name, **kw)
# Instrument parameters
for parname in ['x', 'y', 'z']:
self.add_parameter(parname, unit='a.u.',
parameter_class=Parameter,
vals=Numbers(), initial_value=0,
get_cmd=None, set_cmd=None)
self.add_parameter('noise', unit='a.u.',
label='white noise amplitude',
parameter_class=Parameter,
vals=Numbers(), initial_value=0,
get_cmd=None, set_cmd=None)
self.add_parameter('parabola', unit='a.u.',
get_cmd=self._measure_parabola)
self.add_parameter('skewed_parabola', unit='a.u.',
get_cmd=self._measure_skewed_parabola)
def _measure_parabola(self):
return (self.x.get()**2 + self.y.get()**2 + self.z.get()**2 +
self.noise.get()*np.random.rand(1))
def _measure_skewed_parabola(self):
"""
Adds an -x term to add a corelation between the parameters.
"""
return ((self.x.get()**2 + self.y.get()**2 +
self.z.get()**2)*(1 + abs(self.y.get()-self.x.get())) +
self.noise.get()*np.random.rand(1))
class MockMetaParabola(InstrumentBase):
"""
Test for a meta instrument, has a tunable gain knob
Unlike a MockParabola, a MockMetaParabola does not have a connection, or
access to ask_raw/write_raw, i.e. it would not be connected to a real instrument.
It is also not tracked in the global _all_instruments list, but is still
snapshottable in a station.
"""
def __init__(self, name, mock_parabola_inst, **kw):
"""
Create a new MockMetaParabola, connected to an existing MockParabola instance.
"""
super().__init__(name, **kw)
self.mock_parabola_inst = mock_parabola_inst
# Instrument parameters
for parname in ['x', 'y', 'z']:
self.parameters[parname] = getattr(mock_parabola_inst, parname)
self.add_parameter('gain', parameter_class=Parameter,
initial_value=1,
get_cmd=None, set_cmd=None)
self.add_parameter('parabola', unit='a.u.',
get_cmd=self._get_parabola)
self.add_parameter('skewed_parabola', unit='a.u.',
get_cmd=self._get_skew_parabola)
def _get_parabola(self):
val = self.mock_parabola_inst.parabola.get()
return val*self.gain.get()
def _get_skew_parabola(self):
val = self.mock_parabola_inst.skewed_parabola.get()
return val*self.gain.get()
class DummyInstrument(Instrument):
def __init__(self, name: str = 'dummy',
gates: Sequence[str] = ('dac1', 'dac2', 'dac3'), **kwargs):
"""
Create a dummy instrument that can be used for testing
Args:
name: name for the instrument
gates: list of names that is used to create parameters for
the instrument
"""
super().__init__(name, **kwargs)
# make gates
for _, g in enumerate(gates):
self.add_parameter(g,
parameter_class=Parameter,
initial_value=0,
label=f'Gate {g}',
unit="V",
vals=Numbers(-800, 400),
get_cmd=None, set_cmd=None)
class DmmExponentialParameter(Parameter):
def __init__(self, name, **kwargs):
super().__init__(name, **kwargs)
self._ed = self._exponential_decay(5, 0.2)
next(self._ed)
def get_raw(self):
"""
This method is automatically wrapped to
provide a ``get`` method on the parameter instance.
"""
dac = self.root_instrument._setter_instr
val = self._ed.send(dac.ch1())
next(self._ed)
return val
@staticmethod
def _exponential_decay(a: float, b: float):
"""
Yields a*exp(-b*x) where x is put in
"""
x = 0
while True:
x = yield
yield a * np.exp(-b * x) + 0.02 * a * np.random.randn()
class DmmGaussParameter(Parameter):
def __init__(self, name, **kwargs):
super().__init__(name, **kwargs)
self.x0 = 0.1
self.y0 = 0.2
self.sigma = 0.25
self.noise: float = 0.0005
self._gauss = self._gauss_model()
next(self._gauss)
def get_raw(self):
"""
This method is automatically wrapped to
provide a ``get`` method on the parameter instance.
"""
dac = self.root_instrument._setter_instr
val = self._gauss.send((dac.ch1.get(), dac.ch2.get()))
next(self._gauss)
return val
def _gauss_model(self):
"""
Returns a generator sampling a gaussian. The gaussian is
normalised such that its maximal value is simply 1
"""
while True:
(x, y) = yield
model = np.exp(-((self.x0-x)**2+(self.y0-y)**2)/2/self.sigma**2)*np.exp(2*self.sigma**2)
noise = np.random.randn()*self.noise
yield model + noise
class DummyInstrumentWithMeasurement(Instrument):
def __init__(
self,
name: str,
setter_instr: DummyInstrument,
**kwargs):
super().__init__(name=name, **kwargs)
self._setter_instr = setter_instr
self.add_parameter('v1',
parameter_class=DmmExponentialParameter,
initial_value=0,
label='Gate v1',
unit="V",
vals=Numbers(-800, 400),
get_cmd=None, set_cmd=None)
self.add_parameter('v2',
parameter_class=DmmGaussParameter,
initial_value=0,
label='Gate v2',
unit="V",
vals=Numbers(-800, 400),
get_cmd=None, set_cmd=None)
class DummyChannel(InstrumentChannel):
"""
A single dummy channel implementation
"""
def __init__(self, parent, name, channel):
super().__init__(parent, name)
self._channel = channel
# Add the various channel parameters
self.add_parameter('temperature',
parameter_class=Parameter,
initial_value=0,
label=f"Temperature_{channel}",
unit='K',
vals=Numbers(0, 300),
get_cmd=None, set_cmd=None)
self.add_parameter(name='dummy_multi_parameter',
parameter_class=MultiSetPointParam)
self.add_parameter(name='dummy_scalar_multi_parameter',
parameter_class=MultiScalarParam)
self.add_parameter(name='dummy_2d_multi_parameter',
parameter_class=Multi2DSetPointParam)
self.add_parameter(name='dummy_2d_multi_parameter_2',
parameter_class=Multi2DSetPointParam2Sizes)
self.add_parameter(name='dummy_array_parameter',
parameter_class=ArraySetPointParam)
self.add_parameter(name='dummy_complex_array_parameter',
parameter_class=ComplexArraySetPointParam)
self.add_parameter('dummy_start',
initial_value=0,
unit='some unit',
label='f start',
vals=Numbers(0, 1e3),
get_cmd=None,
set_cmd=None)
self.add_parameter('dummy_stop',
unit='some unit',
label='f stop',
vals=Numbers(1, 1e3),
get_cmd=None,
set_cmd=None)
self.add_parameter('dummy_n_points',
unit='',
vals=Numbers(1, 1e3),
get_cmd=None,
set_cmd=None)
self.add_parameter('dummy_start_2',
initial_value=0,
unit='some unit',
label='f start',
vals=Numbers(0, 1e3),
get_cmd=None,
set_cmd=None)
self.add_parameter('dummy_stop_2',
unit='some unit',
label='f stop',
vals=Numbers(1, 1e3),
get_cmd=None,
set_cmd=None)
self.add_parameter('dummy_n_points_2',
unit='',
vals=Numbers(1, 1e3),
get_cmd=None,
set_cmd=None)
self.add_parameter('dummy_sp_axis',
unit='some unit',
label='Dummy sp axis',
parameter_class=GeneratedSetPoints,
startparam=self.dummy_start,
stopparam=self.dummy_stop,
numpointsparam=self.dummy_n_points,
vals=Arrays(shape=(self.dummy_n_points,)))
self.add_parameter('dummy_sp_axis_2',
unit='some unit',
label='Dummy sp axis',
parameter_class=GeneratedSetPoints,
startparam=self.dummy_start_2,
stopparam=self.dummy_stop_2,
numpointsparam=self.dummy_n_points_2,
vals=Arrays(shape=(self.dummy_n_points_2,)))
self.add_parameter(name='dummy_parameter_with_setpoints',
label='Dummy Parameter with Setpoints',
unit='some other unit',
setpoints=(self.dummy_sp_axis,),
vals=Arrays(shape=(self.dummy_n_points,)),
parameter_class=DummyParameterWithSetpoints1D)
self.add_parameter(name='dummy_parameter_with_setpoints_2d',
label='Dummy Parameter with Setpoints',
unit='some other unit',
setpoints=(self.dummy_sp_axis,self.dummy_sp_axis_2),
vals=Arrays(shape=(self.dummy_n_points,self.dummy_n_points_2)),
parameter_class=DummyParameterWithSetpoints2D)
self.add_parameter(name='dummy_text',
label='Dummy text',
unit='text unit',
initial_value='thisisastring',
set_cmd=None,
vals=Strings())
self.add_parameter(name='dummy_complex',
label='Dummy complex',
unit='complex unit',
initial_value=1+1j,
set_cmd=None,
vals=ComplexNumbers())
self.add_parameter(name='dummy_parameter_with_setpoints_complex',
label='Dummy Parameter with Setpoints complex',
unit='some other unit',
setpoints=(self.dummy_sp_axis,),
vals=Arrays(shape=(self.dummy_n_points,),
valid_types=(np.complexfloating,)),
parameter_class=DummyParameterWithSetpointsComplex)
self.add_function(name='log_my_name',
call_cmd=partial(log.debug, f'{name}'))
class DummyChannelInstrument(Instrument):
"""
Dummy instrument with channels
"""
def __init__(self, name, **kwargs):
super().__init__(name, **kwargs)
channels = ChannelList(self, "TempSensors", DummyChannel, snapshotable=False)
for chan_name in ('A', 'B', 'C', 'D', 'E', 'F'):
channel = DummyChannel(self, f'Chan{chan_name}', chan_name)
channels.append(channel)
self.add_submodule(chan_name, channel)
self.add_submodule("channels", channels)
class MultiGetter(MultiParameter):
"""
Test parameters with complicated return values
instantiate with kwargs::
MultiGetter(name1=return_val1, name2=return_val2)
to set the names and (constant) return values of the
pieces returned. Each return_val can be any array-like
object
eg::
MultiGetter(one=1, onetwo=(1, 2))
"""
def __init__(self, **kwargs):
names = tuple(sorted(kwargs.keys()))
self._return = tuple(kwargs[k] for k in names)
shapes = tuple(np.shape(v) for v in self._return)
super().__init__(name='multigetter', names=names, shapes=shapes)
def get_raw(self):
return self._return
class MultiSetPointParam(MultiParameter):
"""
Multiparameter which only purpose it to test that units, setpoints
and so on are copied correctly to the individual arrays in the datarray.
"""
def __init__(self, instrument=None, name='multi_setpoint_param'):
shapes = ((5,), (5,))
names = ('multi_setpoint_param_this', 'multi_setpoint_param_that')
labels = ('this label', 'that label')
units = ('this unit', 'that unit')
sp_base = tuple(np.linspace(5, 9, 5))
setpoints = ((sp_base,), (sp_base,))
setpoint_names = (('multi_setpoint_param_this_setpoint',), ('multi_setpoint_param_this_setpoint',))
setpoint_labels = (('this setpoint',), ('this setpoint',))
setpoint_units = (('this setpointunit',), ('this setpointunit',))
super().__init__(name, names, shapes,
instrument=instrument,
labels=labels,
units=units,
setpoints=setpoints,
setpoint_labels=setpoint_labels,
setpoint_names=setpoint_names,
setpoint_units=setpoint_units)
def get_raw(self):
items = (np.zeros(5), np.ones(5))
return items
class Multi2DSetPointParam(MultiParameter):
"""
Multiparameter which only purpose it to test that units, setpoints
and so on are copied correctly to the individual arrays in the datarray.
"""
def __init__(self, instrument=None, name='multi_2d_setpoint_param'):
shapes = ((5, 3), (5, 3))
names = ('this', 'that')
labels = ('this label', 'that label')
units = ('this unit', 'that unit')
sp_base_1 = tuple(np.linspace(5, 9, 5))
sp_base_2 = tuple(np.linspace(9, 11, 3))
array_setpoints = setpoint_generator(sp_base_1, sp_base_2)
setpoints = (array_setpoints, array_setpoints)
setpoint_names = (('multi_2d_setpoint_param_this_setpoint', 'multi_2d_setpoint_param_that_setpoint'),
('multi_2d_setpoint_param_this_setpoint', 'multi_2d_setpoint_param_that_setpoint'))
setpoint_labels = (('this setpoint', 'that setpoint'),
('this setpoint', 'that setpoint'))
setpoint_units = (('this setpointunit',
'that setpointunit'),
('this setpointunit',
'that setpointunit'))
super().__init__(name, names, shapes,
instrument=instrument,
labels=labels,
units=units,
setpoints=setpoints,
setpoint_labels=setpoint_labels,
setpoint_names=setpoint_names,
setpoint_units=setpoint_units)
def get_raw(self):
items = (np.zeros((5, 3)), np.ones((5, 3)))
return items
class Multi2DSetPointParam2Sizes(MultiParameter):
"""
Multiparameter for testing containing individual parameters with different
shapes.
"""
def __init__(self, instrument=None, name='multi_2d_setpoint_param'):
shapes = ((5, 3), (2, 7))
names = ('this_5_3', 'this_2_7')
labels = ('this label', 'that label')
units = ('this unit', 'that unit')
sp_base_1_1 = tuple(np.linspace(5, 9, 5))
sp_base_2_1 = tuple(np.linspace(9, 11, 3))
array_setpoints_1 = setpoint_generator(sp_base_1_1, sp_base_2_1)
sp_base_1_2 = tuple(np.linspace(5, 9, 2))
sp_base_2_2 = tuple(np.linspace(9, 11, 7))
array_setpoints_2 = setpoint_generator(sp_base_1_2, sp_base_2_2)
setpoints = (array_setpoints_1, array_setpoints_2)
setpoint_names = (('multi_2d_setpoint_param_this_setpoint_1', 'multi_2d_setpoint_param_that_setpoint_1'),
('multi_2d_setpoint_param_this_setpoint_2', 'multi_2d_setpoint_param_that_setpoint_2'))
setpoint_labels = (('this setpoint 1', 'that setpoint 1'),
('this setpoint 2', 'that setpoint 2'))
setpoint_units = (('this setpointunit',
'that setpointunit'),
('this setpointunit',
'that setpointunit'))
super().__init__(name, names, shapes,
instrument=instrument,
labels=labels,
units=units,
setpoints=setpoints,
setpoint_labels=setpoint_labels,
setpoint_names=setpoint_names,
setpoint_units=setpoint_units)
def get_raw(self):
items = (np.zeros((5, 3)), np.ones((2, 7)))
return items
class MultiScalarParam(MultiParameter):
"""
Multiparameter whos elements are scalars i.e. similar to
Parameter with no setpoints etc.
"""
def __init__(self, instrument=None, name='multiscalarparameter'):
shapes = ((), ())
names = ('thisparam', 'thatparam')
labels = ('thisparam label', 'thatparam label')
units = ('thisparam unit', 'thatparam unit')
setpoints = ((), ())
super().__init__(name, names, shapes,
instrument=instrument,
labels=labels,
units=units,
setpoints=setpoints)
def get_raw(self):
items = (0, 1)
return items
class ArraySetPointParam(ArrayParameter):
"""
Arrayparameter which only purpose it to test that units, setpoints
and so on are copied correctly to the individual arrays in the datarray.
"""
def __init__(self, instrument=None, name='array_setpoint_param'):
shape = (5,)
label = 'this label'
unit = 'this unit'
sp_base = tuple(np.linspace(5, 9, 5))
setpoints = (sp_base,)
setpoint_names = ('array_setpoint_param_this_setpoint',)
setpoint_labels = ('this setpoint',)
setpoint_units = ('this setpointunit',)
super().__init__(name,
shape,
instrument,
label=label,
unit=unit,
setpoints=setpoints,
setpoint_labels=setpoint_labels,
setpoint_names=setpoint_names,
setpoint_units=setpoint_units)
def get_raw(self):
item = np.ones(5) + 1
return item
class ComplexArraySetPointParam(ArrayParameter):
"""
Arrayparameter that returns complex numbers
"""
def __init__(self, instrument=None, name='testparameter'):
shape = (5,)
label = 'this label'
unit = 'this unit'
sp_base = tuple(np.linspace(5, 9, 5))
setpoints = (sp_base,)
setpoint_names = ('this_setpoint',)
setpoint_labels = ('this setpoint',)
setpoint_units = ('this setpointunit',)
super().__init__(name,
shape,
instrument,
label=label,
unit=unit,
setpoints=setpoints,
setpoint_labels=setpoint_labels,
setpoint_names=setpoint_names,
setpoint_units=setpoint_units)
def get_raw(self):
item = np.arange(5) - 1j*np.arange(5)
return item
class GeneratedSetPoints(Parameter):
"""
A parameter that generates a setpoint array from start, stop and num points
parameters.
"""
def __init__(self, startparam, stopparam, numpointsparam, *args, **kwargs):
super().__init__(*args, **kwargs)
self._startparam = startparam
self._stopparam = stopparam
self._numpointsparam = numpointsparam
def get_raw(self):
return np.linspace(self._startparam(), self._stopparam(),
self._numpointsparam())
class DummyParameterWithSetpoints1D(ParameterWithSetpoints):
"""
Dummy parameter that returns data with a shape based on the
`dummy_n_points` parameter in the instrument.
"""
def get_raw(self):
npoints = self.instrument.dummy_n_points()
return np.random.rand(npoints)
class DummyParameterWithSetpoints2D(ParameterWithSetpoints):
"""
Dummy parameter that returns data with a shape based on the
`dummy_n_points` and `dummy_n_points_2` parameters in the instrument.
"""
def get_raw(self):
npoints = self.instrument.dummy_n_points()
npoints_2 = self.instrument.dummy_n_points_2()
return np.random.rand(npoints, npoints_2)
class DummyParameterWithSetpointsComplex(ParameterWithSetpoints):
"""
Dummy parameter that returns data with a shape based on the
`dummy_n_points` parameter in the instrument. Returns Complex values
"""
def get_raw(self):
npoints = self.instrument.dummy_n_points()
return np.random.rand(npoints) + 1j*np.random.rand(npoints)
def setpoint_generator(*sp_bases):
"""
Helper function to generate setpoints in the format that ArrayParameter
(and MultiParameter) expects
Args:
*sp_bases:
Returns:
"""
setpoints = []
for i, sp_base in enumerate(sp_bases):
if i == 0:
setpoints.append(sp_base)
else:
repeats = [len(sp) for sp in sp_bases[:i]]
repeats.append(1)
setpoints.append(np.tile(sp_base, repeats))
return tuple(setpoints)
class SnapShotTestInstrument(Instrument):
"""
A highly specialized dummy instrument for testing the snapshot. Used by
test_snapshot.py
Args:
name: name for the instrument
params: parameter names. The instrument will have these as parameters
params_to_skip: parameters to skip updating in the snapshot. Must be
a subset of params
"""
def __init__(self, name: str, params: Sequence[str] = ('v1', 'v2', 'v3'),
params_to_skip: Sequence[str] = ('v2')):
super().__init__(name)
if not(set(params_to_skip).issubset(params)):
raise ValueError('Invalid input; params_to_skip must be a subset '
'of params')
self._params_to_skip = params_to_skip
self._params = params
# dict to keep track of how many time 'get' has been called on each
# parameter. Useful for testing params_to_skip_update in the snapshot
self._get_calls = {p: 0 for p in params}
for p_name in params:
self.add_parameter(p_name, label=f'{name} Label', unit='V',
set_cmd=None,
get_cmd=partial(self._getter, p_name))
def _getter(self, name: str):
val = self.parameters[name].cache.get(get_if_invalid=False)
self._get_calls[name] += 1
return val
def snapshot_base(self, update: Optional[bool] = True,
params_to_skip_update: Optional[Sequence[str]] = None
) -> Dict[Any, Any]:
if params_to_skip_update is None:
params_to_skip_update = self._params_to_skip
snap = super().snapshot_base(
update=update, params_to_skip_update=params_to_skip_update)
return snap
| 36.867332 | 113 | 0.553513 |
ef351182014148959df6e1eaa296ddde4cd8f7e4 | 13,376 | py | Python | simba/sklearn_plot_scripts/plot_sklearn_results_old.py | justinshenk/simba | a58ccd0ceeda201c1452d186033ce6b25fbab564 | [
"MIT"
] | 172 | 2019-12-18T22:19:42.000Z | 2022-03-29T01:58:25.000Z | simba/sklearn_plot_scripts/plot_sklearn_results_old.py | justinshenk/simba | a58ccd0ceeda201c1452d186033ce6b25fbab564 | [
"MIT"
] | 165 | 2020-01-10T19:05:16.000Z | 2022-03-31T16:08:36.000Z | simba/sklearn_plot_scripts/plot_sklearn_results_old.py | justinshenk/simba | a58ccd0ceeda201c1452d186033ce6b25fbab564 | [
"MIT"
] | 80 | 2019-12-20T00:01:43.000Z | 2022-03-29T16:20:10.000Z | import numpy as np
import cv2
import os
import pandas as pd
from scipy import ndimage
from configparser import ConfigParser, MissingSectionHeaderError, NoSectionError
import glob
from simba.drop_bp_cords import getBpNames
from pylab import *
from simba.rw_dfs import *
import random
def plotsklearnresult(configini,videoSetting, frameSetting):
config = ConfigParser()
configFile = str(configini)
try:
config.read(configFile)
except MissingSectionHeaderError:
print('ERROR: Not a valid project_config file. Please check the project_config.ini path.')
csv_dir = config.get('General settings', 'csv_path')
csv_dir_in = os.path.join(csv_dir, "machine_results")
animalsNo = config.getint('General settings', 'animal_no')
projectPath = config.get('General settings', 'project_path')
frames_dir_out = config.get('Frame settings', 'frames_dir_out')
frames_dir_out = os.path.join(frames_dir_out, 'sklearn_results')
wfileType = config.get('General settings', 'workflow_file_type')
poseConfSetting = config.get('create ensemble settings', 'pose_estimation_body_parts')
if not os.path.exists(frames_dir_out):
os.makedirs(frames_dir_out)
counters_no = config.getint('SML settings', 'No_targets')
vidInfPath = os.path.join(projectPath, 'logs', 'video_info.csv')
mulltiAnimalIDList= config.get('Multi animal IDs', 'id_list')
mulltiAnimalIDList = mulltiAnimalIDList.split(",")
if mulltiAnimalIDList[0] != '':
mulltiAnimalStatus = True
if mulltiAnimalIDList[0] == '':
mulltiAnimalStatus = False
print(mulltiAnimalStatus)
vidinfDf = pd.read_csv(vidInfPath)
target_names, colorList_animal_1, colorList_animal_2, loopy = [], [], [], 0
Xcols, Ycols, Pcols = getBpNames(configini)
cmap = cm.get_cmap('hot', len(Xcols) + 1)
for i in range(cmap.N):
rgb = list((cmap(i)[:3]))
rgb = [i * 255 for i in rgb]
rgb.reverse()
colorList_animal_1.append(rgb)
if animalsNo >= 2:
cmap = cm.get_cmap('winter', len(Xcols) + 1)
for i in range(cmap.N):
rgb = list((cmap(i)[:3]))
rgb = [i * 255 for i in rgb]
rgb.reverse()
colorList_animal_2.append(rgb)
filesFound = glob.glob(csv_dir_in + '/*.' + wfileType)
print('Processing ' + str(len(filesFound)) + ' videos ...')
########### GET MODEL NAMES ###########
for i in range(counters_no):
currentModelNames = 'target_name_' + str(i + 1)
currentModelNames = config.get('SML settings', currentModelNames)
target_names.append(currentModelNames)
cmap = cm.get_cmap('Set1', counters_no + 3)
colors = []
for i in range(cmap.N):
rgb = list((cmap(i)[:3]))
rgb = [i * 255 for i in rgb]
rgb.reverse()
colors.append(rgb)
########### FIND PREDICTION COLUMNS ###########
for currentVideo in filesFound:
target_counters, target_timers = ([0] * counters_no, [0] * counters_no)
loopy += 1
CurrentVideoName = os.path.basename(currentVideo)
if frameSetting == 1:
videoFrameDir = os.path.join(frames_dir_out, CurrentVideoName.replace('.' + wfileType, ''))
if not os.path.exists(videoFrameDir):
os.makedirs(videoFrameDir)
CurrentVideoRow = vidinfDf.loc[vidinfDf['Video'] == str(CurrentVideoName.replace('.' + wfileType, ''))]
try:
fps = int(CurrentVideoRow['fps'])
except TypeError:
print('Error: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file')
currentDf = read_df(currentVideo, wfileType)
currentDf = currentDf.fillna(0)
currentDf = currentDf.astype(int)
currentDf = currentDf.loc[:, ~currentDf.columns.str.contains('^Unnamed')]
currentDf = currentDf.reset_index()
animalBpHeaderList, animalBpHeaderListY, animalBpHeaderListX = ([], [], [])
animal1_BPsX, animal1_BPsY = (currentDf[Xcols], currentDf[Ycols])
for i in range(len(animal1_BPsX.columns)):
animalBpHeaderListX.append(animal1_BPsX.columns[i])
animalBpHeaderListY.append(animal1_BPsY.columns[i])
animalBpHeaderList.append(animal1_BPsX.columns[i])
animalBpHeaderList.append(animal1_BPsY.columns[i])
animalBpHeaderListX, animalBpHeaderListY, animalBpHeaderList = ([x for x in animalBpHeaderListX if "Tail_end" not in x], [x for x in animalBpHeaderListY if "Tail_end" not in x], [x for x in animalBpHeaderList if "Tail_end" not in x])
if (animalsNo == 2) and (poseConfSetting == 'user_defined'):
animal_1_BpHeaderList = [s for s in animalBpHeaderList if mulltiAnimalIDList[0] in s]
animal_2_BpHeaderList = [s for s in animalBpHeaderList if mulltiAnimalIDList[1] in s]
elif (animalsNo == 2) and (poseConfSetting != 'user_defined'):
animal_1_BpHeaderList = [s for s in animalBpHeaderList if '_1_' in s]
animal_2_BpHeaderList = [s for s in animalBpHeaderList if '_2_' in s]
else:
animal_1_BpHeaderList = animalBpHeaderList.copy()
if os.path.exists(os.path.join(projectPath,'videos', CurrentVideoName.replace('.' +wfileType, '.mp4'))):
videoPathName = os.path.join(projectPath,'videos', CurrentVideoName.replace('.' + wfileType, '.mp4'))
elif os.path.exists(os.path.join(projectPath,'videos', CurrentVideoName.replace('.' + wfileType, '.avi'))):
videoPathName = os.path.join(projectPath,'videos', CurrentVideoName.replace('.' + wfileType, '.avi'))
else:
print('Cannot locate video ' + str(CurrentVideoName.replace('.' + wfileType, '')) + ' in mp4 or avi format')
break
cap = cv2.VideoCapture(videoPathName)
width, height, frames = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
outputFileName = os.path.join(frames_dir_out, CurrentVideoName)
if height < width:
videoHeight, videoWidth = width, height
if height >= width:
videoHeight, videoWidth = height, width
writer = cv2.VideoWriter(outputFileName.replace('.' + wfileType, '.mp4'), fourcc, fps, (videoWidth, videoHeight))
mySpaceScale, myRadius, myResolution, myFontScale = 60, 12, 1500, 1.5
maxResDimension = max(width, height)
circleScale, fontScale, spacingScale = int(myRadius / (myResolution / maxResDimension)), float(myFontScale / (myResolution / maxResDimension)), int(mySpaceScale / (myResolution / maxResDimension))
currRow = 0
a = np.deg2rad(90)
while (cap.isOpened()):
ret, frame = cap.read()
IDlabelLoc, rotationFlag = [], False
if ret == True:
if (animalsNo == 1) and (poseConfSetting != 'user_defined'):
currAnimal1 = currentDf.loc[currentDf.index[currRow], animalBpHeaderList]
currAnimal1 = np.array(currAnimal1).astype(int)
currAnimal1 = np.reshape(currAnimal1, (-1, 2))
M1polyglon_array_hull = cv2.convexHull((currAnimal1.astype(int)))
cv2.drawContours(frame, [M1polyglon_array_hull.astype(int)], 0, (255, 255, 255), 2)
if (animalsNo == 2) and (poseConfSetting != 'user_defined'):
currAnimal1, currAnimal2 = (currentDf.loc[currentDf.index[currRow], animal_1_BpHeaderList],currentDf.loc[currentDf.index[currRow], animal_2_BpHeaderList])
currAnimal1, currAnimal2 = (np.array(currAnimal1).astype(int), np.array(currAnimal2).astype(int))
currAnimal1, currAnimal2 = (np.reshape(currAnimal1, (-1, 2)), np.reshape(currAnimal2, (-1, 2)))
M1polyglon_array_hull, M2polyglon_array_hull = (cv2.convexHull((currAnimal1.astype(int))), cv2.convexHull((currAnimal2.astype(int))))
cv2.drawContours(frame, [M1polyglon_array_hull.astype(int)], 0, (255, 255, 255), 2)
cv2.drawContours(frame, [M2polyglon_array_hull.astype(int)], 0, (255, 255, 255), 2)
for cords in range(len(animalBpHeaderListX)):
currXval = animal1_BPsX.loc[animal1_BPsX.index[currRow], animalBpHeaderListX[cords]]
currYval = animal1_BPsY.loc[animal1_BPsY.index[currRow], animalBpHeaderListY[cords]]
if animalBpHeaderListX[cords] in animal_1_BpHeaderList:
color = colorList_animal_1[cords]
elif animalBpHeaderListX[cords] in animal_2_BpHeaderList:
color = colorList_animal_2[cords]
cv2.circle(frame, (int(currXval), int(currYval)), circleScale, color, -1, lineType=cv2.LINE_AA)
if (mulltiAnimalStatus == True) and ('Center' in animalBpHeaderListX[cords]) and (animalBpHeaderListX[cords] in animal_1_BpHeaderList):
IDlabelLoc.append([currXval, currYval])
if (mulltiAnimalStatus == True) and ('Center' in animalBpHeaderListX[cords]) and (animalBpHeaderListX[cords] in animal_2_BpHeaderList):
IDlabelLoc.append([currXval, currYval])
if (not IDlabelLoc) and (mulltiAnimalStatus == True):
animal1_x, animal1_y = currentDf.at[currRow, animal_1_BpHeaderList[0]], currentDf.at[currRow, animal_1_BpHeaderList[1]]
animal2_x, animal2_y = currentDf.at[currRow, animal_2_BpHeaderList[0]], currentDf.at[currRow, animal_2_BpHeaderList[1]]
IDlabelLoc.append([animal1_x, animal1_y])
IDlabelLoc.append([animal2_x, animal2_y])
if height < width:
frame = ndimage.rotate(frame, 90)
rotationFlag = True
if (mulltiAnimalStatus == True):
if rotationFlag == False:
cv2.putText(frame, str(mulltiAnimalIDList[0]), (IDlabelLoc[0][0], IDlabelLoc[0][1]), cv2.FONT_HERSHEY_COMPLEX, fontScale, (0, 255, 0), 4)
cv2.putText(frame, str(mulltiAnimalIDList[1]), (IDlabelLoc[1][0], IDlabelLoc[1][1]), cv2.FONT_HERSHEY_COMPLEX,fontScale, (0, 255, 0), 4)
if rotationFlag == True:
newX1, newY1 = abs(int(IDlabelLoc[0][0]*cos(a) + IDlabelLoc[0][1]*sin(a))), int(frame.shape[0] - int(((-IDlabelLoc[0][1])*cos(a) + IDlabelLoc[0][0]*sin(a))))
newX2, newY2 = abs(int(IDlabelLoc[1][0] * cos(a) + IDlabelLoc[1][1] * sin(a))), int(frame.shape[0] - int(((-IDlabelLoc[1][1]) * cos(a) + IDlabelLoc[1][0] * sin(a))))
cv2.putText(frame, str(mulltiAnimalIDList[0]), (newX1, newY1), cv2.FONT_HERSHEY_COMPLEX, fontScale, (0,0,255), 4)
cv2.putText(frame, str(mulltiAnimalIDList[1]), (newX2, newY2), cv2.FONT_HERSHEY_COMPLEX, fontScale, (255,0,0), 4)
# draw event timers
for b in range(counters_no):
target_timers[b] = (1 / fps) * target_counters[b]
target_timers[b] = round(target_timers[b], 2)
cv2.putText(frame, str('Timers'), (10, ((height - height) + spacingScale)), cv2.FONT_HERSHEY_COMPLEX, fontScale, (0, 255, 0), 4)
addSpacer = 2
for k in range(counters_no):
cv2.putText(frame, (str(target_names[k]) + ' ' + str(target_timers[k]) + str('s')), (10, (height - height) + spacingScale * addSpacer), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255, 0, 0), 4)
addSpacer += 1
cv2.putText(frame, str('ensemble prediction'), (10, (height - height) + spacingScale * addSpacer), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (0, 255, 0), 4)
addSpacer += 1
for p in range(counters_no):
TargetVal = int(currentDf.loc[currRow, [target_names[p]]])
if TargetVal == 1:
cv2.putText(frame, str(target_names[p]), (10, (height - height) + spacingScale * addSpacer), cv2.FONT_HERSHEY_TRIPLEX, int(fontScale*1.8), colors[p], 4)
target_counters[p] += 1
addSpacer += 1
if videoSetting == 1:
writer.write(frame)
if frameSetting == 1:
frameName = os.path.join(videoFrameDir, str(currRow) + '.png')
cv2.imwrite(frameName, frame)
if (videoSetting == 0) and (frameSetting == 0):
print('Error: Please choose video and/or frames.')
break
currRow+=1
print('Frame ' + str(currRow) + '/' + str(frames) + '. Video ' + str(loopy) + '/' + str(len(filesFound)))
if frame is None:
print('Video ' + str(os.path.basename(CurrentVideoName.replace('.' + wfileType, '.mp4'))) + ' saved.')
cap.release()
break
| 63.09434 | 242 | 0.606459 |
943a2febb11c31034fbd1b36e9903b0ec9618deb | 828 | py | Python | src/iBeatles/fitting/fitting_functions.py | ornlneutronimaging/iBeatles | 0a6ca1e18780cf08ad97b6cedede5a23f52bc953 | [
"MIT"
] | 3 | 2017-04-27T06:58:05.000Z | 2020-01-21T07:12:30.000Z | src/iBeatles/fitting/fitting_functions.py | ornlneutronimaging/iBeatles | 0a6ca1e18780cf08ad97b6cedede5a23f52bc953 | [
"MIT"
] | 99 | 2019-05-09T14:05:56.000Z | 2022-03-30T19:13:31.000Z | src/iBeatles/fitting/fitting_functions.py | ornlneutronimaging/iBeatles | 0a6ca1e18780cf08ad97b6cedede5a23f52bc953 | [
"MIT"
] | null | null | null | import math
import numpy as np
from scipy.special import erfc
def basic_fit(t, d_spacing, alpha, sigma, a1, a2):
t0 = 2 * d_spacing
term2 = erfc(-((t - t0) / (sigma * math.sqrt(2))) + sigma / alpha)
term1 = np.exp((t - t0) / alpha + (sigma * sigma) / (2 * alpha * alpha))
term0 = erfc(-((t - t0) / (sigma * math.sqrt(2))))
y = a1 + a2 * (term0 - (term1 * term2))
return y
def advanced_fit(t, d_spacing, alpha, sigma, a1, a2, a5, a6):
t0 = 2 * d_spacing
term0 = a2 * (t - a6)
term1 = ((a5 - a2) / 2) * (t - a6)
term3 = erfc(-((t - t0) / (sigma * math.sqrt(2))))
term4 = np.exp(-((t - t0) / alpha) + ((sigma * sigma) / (2 * alpha * alpha)))
term5 = erfc(-((t - t0) / (sigma * math.sqrt(2))) + sigma / alpha)
y = a1 + term0 + term1 * (term3 - term4 * term5)
return y
| 29.571429 | 81 | 0.532609 |
780708de0e3bbf65713227b1fc3c753e64b65f88 | 7,753 | py | Python | mnist-collection/dcgan.py | krishnaw14/nnabla-examples | cddbb7ddf036628ae722273b13574d5ee5e9b87a | [
"Apache-2.0"
] | 1 | 2020-08-03T12:49:25.000Z | 2020-08-03T12:49:25.000Z | mnist-collection/dcgan.py | takuseno/nnabla-examples | 070d25078ad3d5458744dbfd390cdd926e20e573 | [
"Apache-2.0"
] | null | null | null | mnist-collection/dcgan.py | takuseno/nnabla-examples | 070d25078ad3d5458744dbfd390cdd926e20e573 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from six.moves import range
import numpy as np
import nnabla as nn
import nnabla.logger as logger
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import nnabla.utils.save as save
from args import get_args
from mnist_data import data_iterator_mnist
import os
def generator(z, maxh=256, test=False, output_hidden=False):
"""
Building generator network which takes (B, Z, 1, 1) inputs and generates
(B, 1, 28, 28) outputs.
"""
# Define shortcut functions
def bn(x):
# Batch normalization
return PF.batch_normalization(x, batch_stat=not test)
def upsample2(x, c):
# Twice upsampling with deconvolution.
return PF.deconvolution(x, c, kernel=(4, 4), pad=(1, 1), stride=(2, 2), with_bias=False)
assert maxh / 4 > 0
with nn.parameter_scope("gen"):
# (Z, 1, 1) --> (256, 4, 4)
with nn.parameter_scope("deconv1"):
d1 = F.elu(bn(PF.deconvolution(z, maxh, (4, 4), with_bias=False)))
# (256, 4, 4) --> (128, 8, 8)
with nn.parameter_scope("deconv2"):
d2 = F.elu(bn(upsample2(d1, maxh / 2)))
# (128, 8, 8) --> (64, 16, 16)
with nn.parameter_scope("deconv3"):
d3 = F.elu(bn(upsample2(d2, maxh / 4)))
# (64, 16, 16) --> (32, 28, 28)
with nn.parameter_scope("deconv4"):
# Convolution with kernel=4, pad=3 and stride=2 transforms a 28 x 28 map
# to a 16 x 16 map. Deconvolution with those parameters behaves like an
# inverse operation, i.e. maps 16 x 16 to 28 x 28.
d4 = F.elu(bn(PF.deconvolution(
d3, maxh / 8, (4, 4), pad=(3, 3), stride=(2, 2), with_bias=False)))
# (32, 28, 28) --> (1, 28, 28)
with nn.parameter_scope("conv5"):
x = F.tanh(PF.convolution(d4, 1, (3, 3), pad=(1, 1)))
if output_hidden:
return x, [d1, d2, d3, d4]
return x
def discriminator(x, maxh=256, test=False, output_hidden=False):
"""
Building discriminator network which maps a (B, 1, 28, 28) input to
a (B, 1).
"""
# Define shortcut functions
def bn(xx):
# Batch normalization
return PF.batch_normalization(xx, batch_stat=not test)
def downsample2(xx, c):
return PF.convolution(xx, c, (3, 3), pad=(1, 1), stride=(2, 2), with_bias=False)
assert maxh / 8 > 0
with nn.parameter_scope("dis"):
# (1, 28, 28) --> (32, 16, 16)
with nn.parameter_scope("conv1"):
c1 = F.elu(bn(PF.convolution(x, maxh / 8,
(3, 3), pad=(3, 3), stride=(2, 2), with_bias=False)))
# (32, 16, 16) --> (64, 8, 8)
with nn.parameter_scope("conv2"):
c2 = F.elu(bn(downsample2(c1, maxh / 4)))
# (64, 8, 8) --> (128, 4, 4)
with nn.parameter_scope("conv3"):
c3 = F.elu(bn(downsample2(c2, maxh / 2)))
# (128, 4, 4) --> (256, 4, 4)
with nn.parameter_scope("conv4"):
c4 = bn(PF.convolution(c3, maxh, (3, 3),
pad=(1, 1), with_bias=False))
# (256, 4, 4) --> (1,)
with nn.parameter_scope("fc1"):
f = PF.affine(c4, 1)
if output_hidden:
return f, [c1, c2, c3, c4]
return f
def train(args):
"""
Main script.
"""
# Get context.
from nnabla.ext_utils import get_extension_context
logger.info("Running in %s" % args.context)
ctx = get_extension_context(
args.context, device_id=args.device_id, type_config=args.type_config)
nn.set_default_context(ctx)
# Create CNN network for both training and testing.
# TRAIN
# Fake path
z = nn.Variable([args.batch_size, 100, 1, 1])
fake = generator(z)
fake.persistent = True # Not to clear at backward
pred_fake = discriminator(fake)
loss_gen = F.mean(F.sigmoid_cross_entropy(
pred_fake, F.constant(1, pred_fake.shape)))
fake_dis = fake.get_unlinked_variable(need_grad=True)
fake_dis.need_grad = True # TODO: Workaround until v1.0.2
pred_fake_dis = discriminator(fake_dis)
loss_dis = F.mean(F.sigmoid_cross_entropy(
pred_fake_dis, F.constant(0, pred_fake_dis.shape)))
# Real path
x = nn.Variable([args.batch_size, 1, 28, 28])
pred_real = discriminator(x)
loss_dis += F.mean(F.sigmoid_cross_entropy(pred_real,
F.constant(1, pred_real.shape)))
# Create Solver.
solver_gen = S.Adam(args.learning_rate, beta1=0.5)
solver_dis = S.Adam(args.learning_rate, beta1=0.5)
with nn.parameter_scope("gen"):
solver_gen.set_parameters(nn.get_parameters())
with nn.parameter_scope("dis"):
solver_dis.set_parameters(nn.get_parameters())
# Create monitor.
import nnabla.monitor as M
monitor = M.Monitor(args.monitor_path)
monitor_loss_gen = M.MonitorSeries("Generator loss", monitor, interval=10)
monitor_loss_dis = M.MonitorSeries(
"Discriminator loss", monitor, interval=10)
monitor_time = M.MonitorTimeElapsed("Time", monitor, interval=100)
monitor_fake = M.MonitorImageTile(
"Fake images", monitor, normalize_method=lambda x: (x + 1) / 2.)
data = data_iterator_mnist(args.batch_size, True)
# Training loop.
for i in range(args.max_iter):
if i % args.model_save_interval == 0:
with nn.parameter_scope("gen"):
nn.save_parameters(os.path.join(
args.model_save_path, "generator_param_%06d.h5" % i))
with nn.parameter_scope("dis"):
nn.save_parameters(os.path.join(
args.model_save_path, "discriminator_param_%06d.h5" % i))
# Training forward
image, _ = data.next()
x.d = image / 255. - 0.5 # [0, 255] to [-1, 1]
z.d = np.random.randn(*z.shape)
# Generator update.
solver_gen.zero_grad()
loss_gen.forward(clear_no_need_grad=True)
loss_gen.backward(clear_buffer=True)
solver_gen.weight_decay(args.weight_decay)
solver_gen.update()
monitor_fake.add(i, fake)
monitor_loss_gen.add(i, loss_gen.d.copy())
# Discriminator update.
solver_dis.zero_grad()
loss_dis.forward(clear_no_need_grad=True)
loss_dis.backward(clear_buffer=True)
solver_dis.weight_decay(args.weight_decay)
solver_dis.update()
monitor_loss_dis.add(i, loss_dis.d.copy())
monitor_time.add(i)
with nn.parameter_scope("gen"):
nn.save_parameters(os.path.join(
args.model_save_path, "generator_param_%06d.h5" % i))
with nn.parameter_scope("dis"):
nn.save_parameters(os.path.join(
args.model_save_path, "discriminator_param_%06d.h5" % i))
if __name__ == '__main__':
monitor_path = 'tmp.monitor.dcgan'
args = get_args(monitor_path=monitor_path, model_save_path=monitor_path,
max_iter=20000, learning_rate=0.0002, batch_size=64,
weight_decay=0.0001)
train(args)
| 36.919048 | 96 | 0.617438 |
8bf374be507cfef048677ab826491844b028b56c | 5,399 | py | Python | deep_preprocessing/DATA.py | cc-ai/floods-gans | 787dc2a3c08483c68a687b4355c0f0f6f2711ab9 | [
"Apache-2.0"
] | 5 | 2019-05-07T15:14:58.000Z | 2020-11-23T00:21:50.000Z | deep_preprocessing/DATA.py | cc-ai/floods | 787dc2a3c08483c68a687b4355c0f0f6f2711ab9 | [
"Apache-2.0"
] | 13 | 2019-04-25T01:06:20.000Z | 2022-03-11T23:51:04.000Z | deep_preprocessing/DATA.py | cc-ai/floods | 787dc2a3c08483c68a687b4355c0f0f6f2711ab9 | [
"Apache-2.0"
] | 4 | 2019-04-24T18:06:10.000Z | 2020-07-15T18:02:56.000Z | import tensorflow as tf
import numpy as np
import random, os
from .CONVNET import *
# Configure
FLAGS = {}
FLAGS['num_gpu'] = '1'
FLAGS['num_exp'] = 577
FLAGS['num_epoch'] = 134
FLAGS['method'] = 'Unsupervised on MIT-Adobe-5K'
FLAGS['mode_use_debug'] = False
FLAGS['netG_init_method'] = 'var_scale' #var_scale, rand_uniform, rand_normal, truncated_normal
FLAGS['netG_init_weight'] = 1e-3
FLAGS['netG_base_learning_rate'] = 1e-5
FLAGS['format_log_step'] = '%.3f'
FLAGS['root_path'] = os.path.dirname(__file__)
FLAGS['load_model_path'] = FLAGS['root_path'] + '/model/' + '%s.ckpt' % (FLAGS['format_log_step'] % FLAGS['num_epoch'])
FLAGS['load_model_path_new'] = FLAGS['root_path'] + '/model/' + '%s-new.ckpt' % (FLAGS['format_log_step'] % FLAGS['num_epoch'])
FLAGS['data_output_ext'] = '.png'
FLAGS['data_input_dtype'] = np.uint8
FLAGS['data_compute_dtype'] = np.float32
FLAGS['data_image_size'] = 512
FLAGS['data_image_channel'] = 3
FLAGS['process_random_seed'] = 2
FLAGS['folder_input'] = '../folder_input/'
FLAGS['folder_output'] = '../folder_output/'
random.seed(FLAGS['process_random_seed'])
class DataFlowMat(object):
def __init__(self, b):
self.rect = [tf.placeholder(tf.int32) for _ in range(b)]
self.rot = [tf.placeholder(tf.int32) for _ in range(b)]
class DataFlow(object):
def __init__(self):
b = 1
self.input1_src = tf.placeholder(tf.as_dtype(FLAGS['data_input_dtype']), shape=[b, FLAGS['data_image_size'], FLAGS['data_image_size'], FLAGS['data_image_channel']])
self.input1 = tf.cast(self.input1_src, FLAGS['data_compute_dtype']) / self.input1_src.dtype.max
self.mat1 = DataFlowMat(b)
def flatten_list(xs):
result = []
if isinstance(xs, (list, tuple)):
for x in xs:
result.extend(flatten_list(x))
else:
result.append(xs)
return result
class NetInfo(object):
def __init__(self, name):
self.CONV_NETS = []
seed = FLAGS['process_random_seed']
ich = FLAGS['data_image_channel']
if name[:4] == "netG":
init_w = FLAGS['netG_init_weight']
if FLAGS['netG_init_method'] == "var_scale":
initializer = tf.contrib.layers.variance_scaling_initializer(init_w, seed=seed)
elif FLAGS['netG_init_method'] == "rand_uniform":
initializer = tf.random_uniform_initializer(-init_w*np.sqrt(3), init_w*np.sqrt(3), seed=seed)
elif FLAGS['netG_init_method'] == "rand_normal":
initializer = tf.random_normal_initializer(mean=0., stddev=init_w, seed=seed)
elif FLAGS['netG_init_method'] == "truncated_normal":
initializer = tf.truncated_normal_initializer(mean=0., stddev=init_w, seed=seed)
nonlinearity = selu_layer() #prelu_layer()
norm = bn_layer(True, True)
act = [nonlinearity, norm]
net_1 = dict(net_name='%s_1' % name, trainable=True)
net_1['input_index'] = 0
net_1['layers'] = flatten_list([\
conv_layer( 3, 1, 16, "SYMMETRIC", initializer), act, \
conv_layer( 5, 2, 32, "SYMMETRIC", initializer), act, \
conv_layer( 5, 2, 64, "SYMMETRIC", initializer), act, \
conv_layer( 5, 2, 128, "SYMMETRIC", initializer), act, \
conv_layer( 5, 2, 128, "SYMMETRIC", initializer), act, \
])
self.CONV_NETS.append(net_1)
net_2 = dict(net_name='%s_2' % name, trainable=True)
net_2['input_index'] = 15
net_2['layers'] = flatten_list([\
conv_layer( 5, 2, 128, "SYMMETRIC", initializer), act, \
conv_layer( 5, 2, 128, "SYMMETRIC", initializer), act, \
conv_layer( 8, 1, 128, None, initializer), nonlinearity, \
conv_layer( 1, 1, 128, None, initializer) \
])
self.CONV_NETS.append(net_2)
net_3 = dict(net_name='%s_3' % name, trainable=True)
net_3['input_index'] = 15
net_3['layers'] = flatten_list([\
conv_layer( 3, 1, 128, "SYMMETRIC", initializer), global_concat_layer(24), \
conv_layer( 1, 1, 128, "SYMMETRIC", initializer), act, \
conv_layer( 3, 1, 128, "SYMMETRIC", initializer), resize_layer(2, tf.image.ResizeMethod.NEAREST_NEIGHBOR), concat_layer(10), act, \
conv_layer( 3, 1, 128, "SYMMETRIC", initializer), resize_layer(2, tf.image.ResizeMethod.NEAREST_NEIGHBOR), concat_layer( 7), act, \
conv_layer( 3, 1, 64, "SYMMETRIC", initializer), resize_layer(2, tf.image.ResizeMethod.NEAREST_NEIGHBOR), concat_layer( 4), act, \
conv_layer( 3, 1, 32, "SYMMETRIC", initializer), resize_layer(2, tf.image.ResizeMethod.NEAREST_NEIGHBOR), concat_layer( 1), act, \
conv_layer( 3, 1, 16, "SYMMETRIC", initializer), act, \
conv_layer( 3, 1, ich, "SYMMETRIC", initializer), res_layer(0, [0, 1, 2]) \
#, clip_layer() \
])
self.CONV_NETS.append(net_3)
else:
assert False, 'net name error'
self.architecture_log = []
self.weights = []
self.parameter_names = []
self.name = name
self.variable_scope_name = name + '_var_scope'
| 49.081818 | 172 | 0.601963 |
fe496b76441bdf4586c12ac6fd10b85345584712 | 6,269 | py | Python | basicsr/data/vimeo90k_dataset.py | yuangan/Simple-SR | 630d2f9441b116620af88ff882eca4673dedc047 | [
"MIT"
] | null | null | null | basicsr/data/vimeo90k_dataset.py | yuangan/Simple-SR | 630d2f9441b116620af88ff882eca4673dedc047 | [
"MIT"
] | null | null | null | basicsr/data/vimeo90k_dataset.py | yuangan/Simple-SR | 630d2f9441b116620af88ff882eca4673dedc047 | [
"MIT"
] | null | null | null | import mmcv
import numpy as np
import random
import torch
from pathlib import Path
from torch.utils import data as data
from basicsr.data.transforms import augment, paired_random_crop, totensor
from basicsr.utils import FileClient, get_root_logger
def normalize_list(img_results):
out = []
for i in img_results:
out.append((i - 0.5) * 2)
return out
class Vimeo90KDataset(data.Dataset):
"""Vimeo90K dataset for training.
The keys are generated from a meta info txt file.
basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt
Each line contains:
1. clip name; 2. frame number; 3. image shape, seperated by a white space.
Examples:
00001/0001 7 (256,448,3)
00001/0002 7 (256,448,3)
Key examples: "00001/0001"
GT (gt): Ground-Truth;
LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames.
The neighboring frame list for different num_frame:
num_frame | frame list
1 | 4
3 | 3,4,5
5 | 2,3,4,5,6
7 | 1,2,3,4,5,6,7
Args:
opt (dict): Config for train dataset. It contains the following keys:
dataroot_gt (str): Data root path for gt.
dataroot_lq (str): Data root path for lq.
meta_info_file (str): Path for meta information file.
io_backend (dict): IO backend type and other kwarg.
num_frame (int): Window size for input frames.
gt_size (int): Cropped patched size for gt patches.
random_reverse (bool): Random reverse input frames.
use_flip (bool): Use horizontal flips.
use_rot (bool): Use rotation (use vertical flip and transposing h
and w for implementation).
scale (bool): Scale, which will be added automatically.
"""
def __init__(self, opt):
super(Vimeo90KDataset, self).__init__()
self.opt = opt
self.gt_root, self.lq_root = Path(opt['dataroot_gt']), Path(
opt['dataroot_lq'])
with open(opt['meta_info_file'], 'r') as fin:
self.keys = [line.split(' ')[0] for line in fin]
# file client (io backend)
self.file_client = None
self.io_backend_opt = opt['io_backend']
self.is_lmdb = False
if self.io_backend_opt['type'] == 'lmdb':
self.is_lmdb = True
self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root]
self.io_backend_opt['client_keys'] = ['lq', 'gt']
# indices of input images
self.neighbor_list = [
i + (9 - opt['num_frame']) // 2 for i in range(opt['num_frame'])
]
# temporal augmentation configs
self.random_reverse = False
logger = get_root_logger()
logger.info(f'Random reverse is {self.random_reverse}.')
def __getitem__(self, index):
if self.file_client is None:
self.file_client = FileClient(
self.io_backend_opt.pop('type'), **self.io_backend_opt)
# random reverse
if self.random_reverse and random.random() < 0.5:
self.neighbor_list.reverse()
# shuffle neighbor list to expand train dataset
random.shuffle(self.neighbor_list)
scale = self.opt['scale']
gt_size = self.opt['gt_size']
key = self.keys[index]
clip, seq = key.split('/') # key example: 00001/0001
# get the GT frame (im4.png)
# if self.is_lmdb:
# img_gt_path = f'{key}/im4'
# else:
# img_gt_path = self.gt_root / clip / seq / 'im4.png'
# img_bytes = self.file_client.get(img_gt_path, 'gt')
# img_gt = mmcv.imfrombytes(img_bytes).astype(np.float32) / 255.
# get the neighboring LQ frames
img_lqs = []
count = 0
for neighbor in self.neighbor_list:
count += 1
if count == 4:
img_gt_path = self.gt_root / clip / seq / f'im{neighbor}.png'
img_bytes = self.file_client.get(img_gt_path, 'gt')
img_gt = mmcv.imfrombytes(img_bytes).astype(np.float32) / 255.
if self.is_lmdb:
img_lq_path = f'{clip}/{seq}/im{neighbor}'
else:
img_lq_path = self.lq_root / clip / seq / f'im{neighbor}.png'
img_bytes = self.file_client.get(img_lq_path, 'lq')
img_lq = mmcv.imfrombytes(img_bytes).astype(np.float32) / 255.
img_lqs.append(img_lq)
# randomly crop
# img_gt, img_lqs = paired_random_crop(img_gt, img_lqs, gt_size, scale,
# img_gt_path)
# augmentation - flip, rotate
img_lqs.append(img_gt)
img_results = augment(img_lqs, self.opt['use_flip'],
self.opt['use_rot'])
img_results = totensor(img_results)
# normalize image
img_results = normalize_list(img_results)
img_lqs = torch.stack(img_results[0:-1], dim=0)
img_gt = img_results[-1]
# img_lqs: (t, c, h, w)
# img_gt: (c, h, w)
# key: str
### get 18
# ztm = np.load(path_flow,allow_pickle=True)
# result_7 = []
# for test in ztm:
# test = np.transpose(test, [2,1,0])
# width = test.shape[1]
# height = test.shape[2]
# ndarray=np.pad(test,((0,0),(1,1),(1,1)),'constant', constant_values=0)
# result=[]
# for i in range(0,3):
# for j in range(0,3):
# result.append(ndarray[:,i:i+448,j:j+448])
# result = np.array(result).reshape(18,448,448)
# #result = np.repeat(result,8,axis=0)
# result_7.append(np.array(result))
# save_path = path_flow.replace('flow.npy','flow_7.npy')
# np.save(save_path,np.array(result_7))
### get18
#return np.array(result_7)
return {'lq': img_lqs, 'gt': img_gt, 'key': key}
def __len__(self):
return len(self.keys)
| 36.236994 | 85 | 0.555431 |
0112277549548a4c9aa5f11d0ce4ee46af403330 | 3,560 | py | Python | bindings/python/ensmallen/datasets/string/burkholderiaspccge1002.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/burkholderiaspccge1002.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/burkholderiaspccge1002.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Burkholderia sp. CCGE1002.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def BurkholderiaSpCcge1002(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Burkholderia sp. CCGE1002 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Burkholderia sp. CCGE1002 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="BurkholderiaSpCcge1002",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 32.962963 | 223 | 0.676966 |
be6e33a93259028fd313a8b58b6096a1b1311577 | 257 | py | Python | src/asm/layoutpage/layouts.py | ctheune/assembly-cms | 20e000373fc30d9a14cb5dc882499b5eed1d86ee | [
"ZPL-2.1"
] | null | null | null | src/asm/layoutpage/layouts.py | ctheune/assembly-cms | 20e000373fc30d9a14cb5dc882499b5eed1d86ee | [
"ZPL-2.1"
] | null | null | null | src/asm/layoutpage/layouts.py | ctheune/assembly-cms | 20e000373fc30d9a14cb5dc882499b5eed1d86ee | [
"ZPL-2.1"
] | null | null | null | import asm.layoutpage.interfaces
import asm.layoutpage.layoutpage
import grok
class DefaultLayout(asm.layoutpage.layoutpage.Layout, grok.GlobalUtility):
grok.provides(asm.layoutpage.interfaces.ILayoutPage)
grok.name('default')
layout = '${}'
| 23.363636 | 74 | 0.774319 |
96b12e5adbcb6a48a88ebeb44b88f5063e13a4d8 | 3,553 | py | Python | downloads/views.py | cedadev/download-stats | 3d18b08ce239e82e53c5a9bd4dd77b35a1f040bc | [
"BSD-3-Clause"
] | null | null | null | downloads/views.py | cedadev/download-stats | 3d18b08ce239e82e53c5a9bd4dd77b35a1f040bc | [
"BSD-3-Clause"
] | 6 | 2019-08-29T10:35:09.000Z | 2021-04-07T12:24:37.000Z | downloads/views.py | cedadev/access-stats | 3d18b08ce239e82e53c5a9bd4dd77b35a1f040bc | [
"BSD-3-Clause"
] | 1 | 2018-11-01T16:31:16.000Z | 2018-11-01T16:31:16.000Z | from django.shortcuts import render
from django.http import HttpResponse, JsonResponse, HttpResponseNotFound
from django.views.generic import TemplateView
from django.conf import settings
from downloads.forms import FilterForm
from common.json_maker_factory import JsonMakerFactory
from common.file_response_factory import FileResponseFactory
default_404_response = HttpResponseNotFound("<h1>404 - Not found</h1>")
class IndexView(TemplateView):
template_name = "downloads/index.html"
def get(self, request):
if request.GET:
form = FilterForm(request.GET)
else:
form = FilterForm()
return render(request, self.template_name, {"form": form, "public": settings.PUBLIC_SITE})
class JsonView(TemplateView):
def get(self, request, analysis_method):
form = FilterForm(request.GET)
if not (self.valid_analysis_method(analysis_method) and form.is_valid()):
return default_404_response
return JsonResponse(JsonMakerFactory().get(form.cleaned_data, analysis_method).json(), json_dumps_params={"indent": 2})
def valid_analysis_method(self, analysis_method):
if settings.PUBLIC_SITE:
if analysis_method in ["methods", "timeline", "dataset", "user"]:
return True
else:
if analysis_method in ["methods", "timeline", "dataset", "user", "users", "trace"]:
return True
return False
class TxtView(TemplateView):
def generate_text_file(self, filters, analysis_method):
json_data = JsonMakerFactory().get(filters, analysis_method).json()
logs = ""
for log in json_data["logs"]:
logs += f"{log}\n"
return logs
def get(self, request, analysis_method):
form = FilterForm(request.GET)
if not (self.valid_analysis_method(analysis_method) and form.is_valid()):
return default_404_response
return HttpResponse(self.generate_text_file(form.cleaned_data, analysis_method), content_type="text/plain")
def valid_analysis_method(self, analysis_method):
if not settings.PUBLIC_SITE:
if analysis_method == "trace":
return True
return False
class CsvView(TemplateView):
def get(self, request, analysis_method):
form = FilterForm(request.GET)
if not (self.valid_analysis_method(analysis_method) and form.is_valid()):
return default_404_response
return FileResponseFactory().get(form.cleaned_data, analysis_method).make_csv()
def valid_analysis_method(self, analysis_method):
if settings.PUBLIC_SITE:
if analysis_method in ["methods", "timeline", "dataset"]:
return True
else:
if analysis_method in ["methods", "timeline", "dataset", "users"]:
return True
return False
class XlsxView(TemplateView):
def get(self, request, analysis_method):
form = FilterForm(request.GET)
if not (self.valid_analysis_method(analysis_method) and form.is_valid()):
return default_404_response
return FileResponseFactory().get(form.cleaned_data, analysis_method).make_xlsx()
def valid_analysis_method(self, analysis_method):
if settings.PUBLIC_SITE:
if analysis_method in ["methods", "timeline", "dataset"]:
return True
else:
if analysis_method in ["methods", "timeline", "dataset", "users"]:
return True
return False
| 38.204301 | 127 | 0.667042 |
2f062e88c1a50959d1ea4ba042340560ed997523 | 14,224 | py | Python | venv/lib/python3.8/site-packages/telegram/utils/helpers.py | anthonyricci123/python-telegram-bot-heroku | 96d75c1eca1f14fe68c81bf0b4cf30a34677dcde | [
"MIT"
] | 2 | 2021-09-17T10:55:14.000Z | 2021-09-17T10:55:38.000Z | telegram/amazon_bot/lib/python3.7/site-packages/telegram/utils/helpers.py | neerajp99/twitter_update_bot | 116c95f477af354e6016cbc4477e1d3bdd2ff484 | [
"MIT"
] | null | null | null | telegram/amazon_bot/lib/python3.7/site-packages/telegram/utils/helpers.py | neerajp99/twitter_update_bot | 116c95f477af354e6016cbc4477e1d3bdd2ff484 | [
"MIT"
] | 1 | 2021-05-31T18:23:02.000Z | 2021-05-31T18:23:02.000Z | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2020
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains helper functions."""
import datetime as dtm # dtm = "DateTime Module"
import time
from collections import defaultdict
from numbers import Number
try:
import ujson as json
except ImportError:
import json
from html import escape
import re
import signal
# From https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python
_signames = {v: k
for k, v in reversed(sorted(vars(signal).items()))
if k.startswith('SIG') and not k.startswith('SIG_')}
def get_signal_name(signum):
"""Returns the signal name of the given signal number."""
return _signames[signum]
def escape_markdown(text, version=1, entity_type=None):
"""
Helper function to escape telegram markup symbols.
Args:
text (:obj:`str`): The text.
version (:obj:`int` | :obj:`str`): Use to specify the version of telegrams Markdown.
Either ``1`` or ``2``. Defaults to ``1``.
entity_type (:obj:`str`, optional): For the entity types ``PRE``, ``CODE`` and the link
part of ``TEXT_LINKS``, only certain characters need to be escaped in ``MarkdownV2``.
See the official API documentation for details. Only valid in combination with
``version=2``, will be ignored else.
"""
if int(version) == 1:
escape_chars = '\*_`\['
elif int(version) == 2:
if entity_type == 'pre' or entity_type == 'code':
escape_chars = '`\\\\'
elif entity_type == 'text_link':
escape_chars = ')\\\\'
else:
escape_chars = '_*\[\]()~`>\#\+\-=|{}\.!'
else:
raise ValueError('Markdown version musst be either 1 or 2!')
return re.sub(r'([%s])' % escape_chars, r'\\\1', text)
# -------- date/time related helpers --------
# TODO: add generic specification of UTC for naive datetimes to docs
def _datetime_to_float_timestamp(dt_obj):
"""Converts a datetime object to a float timestamp (with sub-second precision).
If the datetime object is timezone-naive, it is assumed to be in UTC."""
if dt_obj.tzinfo is None:
dt_obj = dt_obj.replace(tzinfo=dtm.timezone.utc)
return dt_obj.timestamp()
def to_float_timestamp(t, reference_timestamp=None):
"""
Converts a given time object to a float POSIX timestamp.
Used to convert different time specifications to a common format. The time object
can be relative (i.e. indicate a time increment, or a time of day) or absolute.
Any objects from the :class:`datetime` module that are timezone-naive will be assumed
to be in UTC.
``None`` s are left alone (i.e. ``to_float_timestamp(None)`` is ``None``).
Args:
t (int | float | datetime.timedelta | datetime.datetime | datetime.time):
Time value to convert. The semantics of this parameter will depend on its type:
* :obj:`int` or :obj:`float` will be interpreted as "seconds from ``reference_t``"
* :obj:`datetime.timedelta` will be interpreted as
"time increment from ``reference_t``"
* :obj:`datetime.datetime` will be interpreted as an absolute date/time value
* :obj:`datetime.time` will be interpreted as a specific time of day
reference_timestamp (float, optional): POSIX timestamp that indicates the absolute time
from which relative calculations are to be performed (e.g. when ``t`` is given as an
:obj:`int`, indicating "seconds from ``reference_t``"). Defaults to now (the time at
which this function is called).
If ``t`` is given as an absolute representation of date & time (i.e. a
``datetime.datetime`` object), ``reference_timestamp`` is not relevant and so its
value should be ``None``. If this is not the case, a ``ValueError`` will be raised.
Returns:
(float | None) The return value depends on the type of argument ``t``. If ``t`` is
given as a time increment (i.e. as a obj:`int`, :obj:`float` or
:obj:`datetime.timedelta`), then the return value will be ``reference_t`` + ``t``.
Else if it is given as an absolute date/time value (i.e. a :obj:`datetime.datetime`
object), the equivalent value as a POSIX timestamp will be returned.
Finally, if it is a time of the day without date (i.e. a :obj:`datetime.time`
object), the return value is the nearest future occurrence of that time of day.
Raises:
TypeError: if `t`'s type is not one of those described above
"""
if reference_timestamp is None:
reference_timestamp = time.time()
elif isinstance(t, dtm.datetime):
raise ValueError('t is an (absolute) datetime while reference_timestamp is not None')
if isinstance(t, dtm.timedelta):
return reference_timestamp + t.total_seconds()
elif isinstance(t, Number):
return reference_timestamp + t
elif isinstance(t, dtm.time):
if t.tzinfo is not None:
reference_dt = dtm.datetime.fromtimestamp(reference_timestamp, tz=t.tzinfo)
else:
reference_dt = dtm.datetime.utcfromtimestamp(reference_timestamp) # assume UTC
reference_date = reference_dt.date()
reference_time = reference_dt.timetz()
if reference_time > t: # if the time of day has passed today, use tomorrow
reference_date += dtm.timedelta(days=1)
return _datetime_to_float_timestamp(dtm.datetime.combine(reference_date, t))
elif isinstance(t, dtm.datetime):
return _datetime_to_float_timestamp(t)
raise TypeError('Unable to convert {} object to timestamp'.format(type(t).__name__))
def to_timestamp(dt_obj, reference_timestamp=None):
"""
Wrapper over :func:`to_float_timestamp` which returns an integer (the float value truncated
down to the nearest integer).
See the documentation for :func:`to_float_timestamp` for more details.
"""
return int(to_float_timestamp(dt_obj, reference_timestamp)) if dt_obj is not None else None
def from_timestamp(unixtime, tzinfo=dtm.timezone.utc):
"""
Converts an (integer) unix timestamp to a timezone aware datetime object.
``None`` s are left alone (i.e. ``from_timestamp(None)`` is ``None``).
Args:
unixtime (int): integer POSIX timestamp
tzinfo (:obj:`datetime.tzinfo`, optional): The timezone, the timestamp is to be converted
to. Defaults to UTC.
Returns:
timezone aware equivalent :obj:`datetime.datetime` value if ``timestamp`` is not
``None``; else ``None``
"""
if unixtime is None:
return None
if tzinfo is not None:
return dtm.datetime.fromtimestamp(unixtime, tz=tzinfo)
else:
return dtm.datetime.utcfromtimestamp(unixtime)
# -------- end --------
def mention_html(user_id, name):
"""
Args:
user_id (:obj:`int`) The user's id which you want to mention.
name (:obj:`str`) The name the mention is showing.
Returns:
:obj:`str`: The inline mention for the user as html.
"""
if isinstance(user_id, int):
return u'<a href="tg://user?id={}">{}</a>'.format(user_id, escape(name))
def mention_markdown(user_id, name, version=1):
"""
Args:
user_id (:obj:`int`) The user's id which you want to mention.
name (:obj:`str`) The name the mention is showing.
version (:obj:`int` | :obj:`str`): Use to specify the version of telegrams Markdown.
Either ``1`` or ``2``. Defaults to ``1``
Returns:
:obj:`str`: The inline mention for the user as markdown.
"""
if isinstance(user_id, int):
return u'[{}](tg://user?id={})'.format(escape_markdown(name, version=version), user_id)
def effective_message_type(entity):
"""
Extracts the type of message as a string identifier from a :class:`telegram.Message` or a
:class:`telegram.Update`.
Args:
entity (:obj:`Update` | :obj:`Message`) The ``update`` or ``message`` to extract from
Returns:
str: One of ``Message.MESSAGE_TYPES``
"""
# Importing on file-level yields cyclic Import Errors
from telegram import Message
from telegram import Update
if isinstance(entity, Message):
message = entity
elif isinstance(entity, Update):
message = entity.effective_message
else:
raise TypeError("entity is not Message or Update (got: {})".format(type(entity)))
for i in Message.MESSAGE_TYPES:
if getattr(message, i, None):
return i
return None
def create_deep_linked_url(bot_username, payload=None, group=False):
"""
Creates a deep-linked URL for this ``bot_username`` with the specified ``payload``.
See https://core.telegram.org/bots#deep-linking to learn more.
The ``payload`` may consist of the following characters: ``A-Z, a-z, 0-9, _, -``
Note:
Works well in conjunction with
``CommandHandler("start", callback, filters = Filters.regex('payload'))``
Examples:
``create_deep_linked_url(bot.get_me().username, "some-params")``
Args:
bot_username (:obj:`str`): The username to link to
payload (:obj:`str`, optional): Parameters to encode in the created URL
group (:obj:`bool`, optional): If `True` the user is prompted to select a group to add the
bot to. If `False`, opens a one-on-one conversation with the bot. Defaults to `False`.
Returns:
:obj:`str`: An URL to start the bot with specific parameters
"""
if bot_username is None or len(bot_username) <= 3:
raise ValueError("You must provide a valid bot_username.")
base_url = 'https://t.me/{}'.format(bot_username)
if not payload:
return base_url
if len(payload) > 64:
raise ValueError("The deep-linking payload must not exceed 64 characters.")
if not re.match(r'^[A-Za-z0-9_-]+$', payload):
raise ValueError("Only the following characters are allowed for deep-linked "
"URLs: A-Z, a-z, 0-9, _ and -")
if group:
key = 'startgroup'
else:
key = 'start'
return '{0}?{1}={2}'.format(
base_url,
key,
payload
)
def encode_conversations_to_json(conversations):
"""Helper method to encode a conversations dict (that uses tuples as keys) to a
JSON-serializable way. Use :attr:`_decode_conversations_from_json` to decode.
Args:
conversations (:obj:`dict`): The conversations dict to transofrm to JSON.
Returns:
:obj:`str`: The JSON-serialized conversations dict
"""
tmp = {}
for handler, states in conversations.items():
tmp[handler] = {}
for key, state in states.items():
tmp[handler][json.dumps(key)] = state
return json.dumps(tmp)
def decode_conversations_from_json(json_string):
"""Helper method to decode a conversations dict (that uses tuples as keys) from a
JSON-string created with :attr:`_encode_conversations_to_json`.
Args:
json_string (:obj:`str`): The conversations dict as JSON string.
Returns:
:obj:`dict`: The conversations dict after decoding
"""
tmp = json.loads(json_string)
conversations = {}
for handler, states in tmp.items():
conversations[handler] = {}
for key, state in states.items():
conversations[handler][tuple(json.loads(key))] = state
return conversations
def decode_user_chat_data_from_json(data):
"""Helper method to decode chat or user data (that uses ints as keys) from a
JSON-string.
Args:
data (:obj:`str`): The user/chat_data dict as JSON string.
Returns:
:obj:`dict`: The user/chat_data defaultdict after decoding
"""
tmp = defaultdict(dict)
decoded_data = json.loads(data)
for user, data in decoded_data.items():
user = int(user)
tmp[user] = {}
for key, value in data.items():
try:
key = int(key)
except ValueError:
pass
tmp[user][key] = value
return tmp
class DefaultValue:
"""Wrapper for immutable default arguments that allows to check, if the default value was set
explicitly. Usage::
DefaultOne = DefaultValue(1)
def f(arg=DefaultOne):
if arg is DefaultOne:
print('`arg` is the default')
arg = arg.value
else:
print('`arg` was set explicitly')
print('`arg` = ' + str(arg))
This yields::
>>> f()
`arg` is the default
`arg` = 1
>>> f(1)
`arg` was set explicitly
`arg` = 1
>>> f(2)
`arg` was set explicitly
`arg` = 2
Also allows to evaluate truthiness::
default = DefaultValue(value)
if default:
...
is equivalent to::
default = DefaultValue(value)
if value:
...
Attributes:
value (:obj:`obj`): The value of the default argument
Args:
value (:obj:`obj`): The value of the default argument
"""
def __init__(self, value=None):
self.value = value
def __bool__(self):
return bool(self.value)
DEFAULT_NONE = DefaultValue(None)
""":class:`DefaultValue`: Default `None`"""
| 34.524272 | 98 | 0.63653 |
91957d7bd1a6e77d79bfe81ffffb07f965dd235d | 727 | py | Python | Asignaciones/Recursividad, Listas y arboles/Quicksort.py | TEC-2014092195/IC1802-introduccion-a-la-programacion | 20391cc2e301993cacb27178f2deab403016ed84 | [
"MIT"
] | null | null | null | Asignaciones/Recursividad, Listas y arboles/Quicksort.py | TEC-2014092195/IC1802-introduccion-a-la-programacion | 20391cc2e301993cacb27178f2deab403016ed84 | [
"MIT"
] | null | null | null | Asignaciones/Recursividad, Listas y arboles/Quicksort.py | TEC-2014092195/IC1802-introduccion-a-la-programacion | 20391cc2e301993cacb27178f2deab403016ed84 | [
"MIT"
] | null | null | null | def quicksort(lista):
if lista == []:
return []
else:
pivote = lista[0]
menor, igual, mayor = ordenar1(lista[1:],[],[pivote],[])
return quicksort(menor) + igual + quicksort(mayor)
def ordenar1(lista,menor,igual,mayor):
if lista == []:
return (menor,igual,mayor)
else:
cabeza = lista[0]
if cabeza < igual[0]:
return ordenar1(lista[1:], menor + [cabeza] , igual, mayor)
elif cabeza > igual[0]:
return ordenar1(lista[1:], menor, igual, mayor + [cabeza])
else:
return ordenar1(lista[1:], menor, igual + [cabeza], mayor)
lista=[6,-1,56,3,3,32,2,3]
print(quicksort(lista))
| 26.925926 | 72 | 0.530949 |
6b8d8d8663b017bf4ad149b1a8d24d6fd15d02ca | 4,476 | py | Python | weconnect/elements/window_heating_status.py | tillsteinbach/WeConnect-python | 8e8eade47eab9479a65d714de5009c5be975a59f | [
"MIT"
] | 22 | 2021-08-06T21:01:11.000Z | 2022-03-21T14:54:25.000Z | weconnect/elements/window_heating_status.py | tillsteinbach/WeConnect-python | 8e8eade47eab9479a65d714de5009c5be975a59f | [
"MIT"
] | 30 | 2021-06-20T21:11:26.000Z | 2022-03-22T21:11:04.000Z | weconnect/elements/window_heating_status.py | tillsteinbach/WeConnect-python | 8e8eade47eab9479a65d714de5009c5be975a59f | [
"MIT"
] | 8 | 2021-08-06T21:01:29.000Z | 2022-02-02T21:32:14.000Z | from enum import Enum
import logging
from weconnect.addressable import AddressableAttribute, AddressableDict, AddressableObject
from weconnect.elements.generic_status import GenericStatus
LOG = logging.getLogger("weconnect")
class WindowHeatingStatus(GenericStatus):
def __init__(
self,
vehicle,
parent,
statusId,
fromDict=None,
fixAPI=True,
):
self.windows = AddressableDict(localAddress='windows', parent=self)
super().__init__(vehicle=vehicle, parent=parent, statusId=statusId, fromDict=fromDict, fixAPI=fixAPI)
def update(self, fromDict, ignoreAttributes=None):
ignoreAttributes = ignoreAttributes or []
LOG.debug('Update window heating status from dict')
if 'value' in fromDict:
if 'windowHeatingStatus' in fromDict['value'] and fromDict['value']['windowHeatingStatus'] is not None:
for windowDict in fromDict['value']['windowHeatingStatus']:
if 'windowLocation' in windowDict:
if windowDict['windowLocation'] in self.windows:
self.windows[windowDict['windowLocation']].update(fromDict=windowDict)
else:
self.windows[windowDict['windowLocation']] = WindowHeatingStatus.Window(
fromDict=windowDict, parent=self.windows)
for windowName in [windowName for windowName in self.windows.keys()
if windowName not in [window['windowLocation']
for window in fromDict['value']['windowHeatingStatus'] if 'windowLocation' in window]]:
del self.windows[windowName]
else:
self.windows.clear()
self.windows.enabled = False
else:
self.windows.clear()
self.windows.enabled = False
super().update(fromDict=fromDict, ignoreAttributes=(ignoreAttributes + ['windowHeatingStatus']))
def __str__(self):
string = super().__str__()
string += f'\n\tWindows: {len(self.windows)} items'
for window in self.windows.values():
string += f'\n\t\t{window}'
return string
class Window(AddressableObject):
def __init__(
self,
parent,
fromDict=None,
):
super().__init__(localAddress=None, parent=parent)
self.windowHeatingState = AddressableAttribute(
localAddress='windowHeatingState', parent=self, value=None,
valueType=WindowHeatingStatus.Window.WindowHeatingState)
if fromDict is not None:
self.update(fromDict)
def update(self, fromDict):
LOG.debug('Update window from dict')
if 'windowLocation' in fromDict:
self.id = fromDict['windowLocation']
self.localAddress = self.id
else:
LOG.error('Window is missing windowLocation attribute')
if 'windowHeatingState' in fromDict and fromDict['windowHeatingState']:
try:
self.windowHeatingState.setValueWithCarTime(WindowHeatingStatus.Window.WindowHeatingState(
fromDict['windowHeatingState']), lastUpdateFromCar=None, fromServer=True)
except ValueError:
self.windowHeatingState.setValueWithCarTime(WindowHeatingStatus.Window.WindowHeatingState.UNKNOWN,
lastUpdateFromCar=None, fromServer=True)
LOG.warning('An unsupported windowHeatingState: %s was provided,'
' please report this as a bug', fromDict['windowHeatingState'])
else:
self.windowHeatingState.enabled = False
for key, value in {key: value for key, value in fromDict.items()
if key not in ['windowLocation', 'windowHeatingState']}.items():
LOG.warning('%s: Unknown attribute %s with value %s', self.getGlobalAddress(), key, value)
def __str__(self):
return f'{self.id}: {self.windowHeatingState.value.value}' # pylint: disable=no-member
class WindowHeatingState(Enum,):
ON = 'on'
OFF = 'off'
INVALID = 'invalid'
UNKNOWN = 'unknown open state'
| 44.316832 | 122 | 0.590259 |
8043721e8eb06f2c86934cb13698155f007e9c21 | 6,628 | py | Python | website/models.py | NewWorldInnovations/fme | a1558d7957ad26759c485c617f4fc25864da1925 | [
"MIT"
] | null | null | null | website/models.py | NewWorldInnovations/fme | a1558d7957ad26759c485c617f4fc25864da1925 | [
"MIT"
] | 5 | 2021-03-18T20:32:49.000Z | 2022-03-11T23:21:21.000Z | website/models.py | NewWorldInnovations/fme-backend | a1558d7957ad26759c485c617f4fc25864da1925 | [
"MIT"
] | null | null | null | from django.db import models
from django.urls import reverse
from django.db import models
from django.conf import settings
from django.utils import timezone
from django.utils.html import format_html
from django.contrib.auth.models import User
import datetime,os
from decimal import Decimal
from rest_framework.reverse import reverse as api_reverse
#django host lib for url with subdomain for reverse
# Create your models here.
#Path for User Images
def GetPath(intance,filename):
return os.path.join( settings.USER_URL, str(intance.id), str(filename) )
class DetailedProfile(models.Model):
usrid = models.ForeignKey(User, null=True,verbose_name='User',on_delete=models.CASCADE)
int_eml = models.EmailField(max_length=50, null=True, default='default@fme.com',verbose_name='Interact Email Address')
address = models.TextField(null=True,blank=True,verbose_name='Address')
province = models.CharField(max_length=100,null=True,blank=True,verbose_name='Province')
postalcode = models.CharField(max_length=20,null=True,blank=True,verbose_name='Postalcode')
cp = models.CharField(max_length=100,null=True,blank=True,verbose_name='Cellphone')
landline = models.CharField(max_length=100,null=True,blank=True,verbose_name='Landline')
usr_img = models.ImageField(upload_to=GetPath, blank=True, null=True,verbose_name='Profile Image')
#Public/Private (if private cant see you in public user or search people)
CHOICES_PRIVACY = (
('pub', 'Public'),
('pri', 'Private'),
)
privacy = models.CharField(max_length=3000, default='pub', choices=CHOICES_PRIVACY, verbose_name='Privacy')
class UserFriends(models.Model):
usrid = models.ForeignKey(User,verbose_name='User',on_delete=models.CASCADE,related_name="userfriends_usrid_set")
friend = models.ForeignKey(User,verbose_name='Friend',on_delete=models.CASCADE, related_name="userfriends_friend_set")
#Friend/Family/Acquaintance
CHOICES_FTYPE = (
('fri', 'Friend'),
('fam', 'Family'),
('acq', 'Acquaintances'),
)
ftype = models.CharField(max_length=100, default='fri', choices=CHOICES_FTYPE, verbose_name='Friend Type')
date_added = models.DateTimeField(("Friend Since"), default=timezone.now)
class Events(models.Model):
usrid = models.ForeignKey(User,verbose_name='User ID',on_delete=models.CASCADE)
title = models.CharField(max_length=300,verbose_name='Title')
desc = models.CharField(max_length=3000,null=True,blank=True,verbose_name='Description')
edate = models.DateTimeField(("Date"), default=timezone.now)
target_fund = models.DecimalField(max_digits=20, decimal_places=2, default=Decimal('0.00'),verbose_name='Target')
#On Going/Passed/Incoming
CHOICES_ESTATUS = (
('ong', 'On Going'),
('pas', 'Passed'),
('inc', 'Incoming'),
)
status = models.CharField(max_length=100, default='pub', choices=CHOICES_ESTATUS, verbose_name='Status')
#Only Friends/Public
CHOICES_VISIBLE = (
('all', 'All'),
('fri', 'Only Friends'),
('fam', 'Family'),
('acq', 'Acquaintances')
)
visibleto = models.CharField(max_length=100, default='public', choices=CHOICES_VISIBLE, verbose_name='Visible To')
date_created = models.DateTimeField(("Date Created"), default=timezone.now)
def __str__(self):
return self.title
title.short_description = "Event Title"
@property
def owner(self):
return self.usrid
#to get url in api request
def get_api_url(self, request=None):
return api_reverse("api-world:events-rud", kwargs={'pk':self.pk},request=request)
class Wishlist(models.Model):
eid = models.ForeignKey(Events,verbose_name='Event', related_name='wishlist',on_delete=models.CASCADE)
name = models.CharField(max_length=300,verbose_name='Wish')
desc = models.CharField(max_length=3000,null=True,blank=True,verbose_name='Description')
message = models.CharField(max_length=3000,null=True,blank=True,verbose_name='Message')
alotted = models.DecimalField(max_digits=20, decimal_places=2, default=Decimal('0.00'),verbose_name='Alotted')
prod_link = models.URLField(max_length=200,null=True,blank=True,verbose_name='Url')
price = models.DecimalField(max_digits=20, decimal_places=2, default=Decimal('1.00'),verbose_name='Price')
date_created = models.DateTimeField(("Date Created"), default=timezone.now)
def __str__(self):
return self.name
@property
def owner(self):
return self.eid.usrid
class EventInvitees(models.Model):
eid = models.ForeignKey(Events,verbose_name='Event',on_delete=models.CASCADE)
usrid = models.ForeignKey(User,verbose_name='User ID',on_delete=models.CASCADE)
#Coming/Maybe/Next time
CHOICES_EPRIVACY = (
('cm', 'Coming'),
('mb', 'Maybe'),
('nt', 'Next Time')
)
status = models.CharField(max_length=100, default='cm', choices=CHOICES_EPRIVACY, verbose_name='Status')
date_invited = models.DateTimeField(("Date Invited"), default=timezone.now)
# class UserWallPosts(models.Model):
# class WallThread(models.Model):
# class EventThread(models.Model):
# class WishThread(models.Model):
# class Notifications(models.Model):
# usrid = models.ForeignKey(User,verbose_name='User ID',on_delete=models.CASCADE)
# msg = models.CharField(max_length=100,null=True,blank=True,verbose_name='Province')
# #Event Incoming/Fund raised/Target reach or near/Friend stuff(added,invite, invite confirmation)/ads
# # CHOICES_EPRIVACY = (
# # ('p', 'Peding'),
# # ('d', 'Done'),
# # )
# # types = models.CharField(max_length=100, default='p', choices=CHOICES_SNOTIF, verbose_name='Type')
# #pending/done
# CHOICES_SNOTIF = (
# ('p', 'Peding'),
# ('d', 'Done'),
# )
# status = models.CharField(max_length=100, default='p', choices=CHOICES_SNOTIF, verbose_name='Status')
# date_performed = models.DateTimeField(("Date perfomed"), default=timezone.now)
# #Admin Table
# def fullname(self):
# return self.fnme + ' ' + self.mnme + ' ' + self.lnme
# fullname.short_description = "Full name"
# #defining objects on foreign key rely on dropdown ADMIN
# def __str__(self):
# return self.fnme + ' ' + self.mnme + ' ' + self.lnme
# def usr(self):
# return self.usrn
# usr.short_description = "Username"
# def eml(self):
# return self.emladdress
# eml.short_description = "Email Address"
# def date_registered(self):
# return self.regdate
# date_registered.short_description = "Date Registered"
# def action_buttons(self):
# return format_html(
# '<a class="btn" href="/admin/profiles/profile/{}/change/">Change</a> | '+
# '<a class="btn" href="/admin/profiles/profile/{}/delete/">Delete</a>',
# self.mid, self.mid, self.mid)
# action_buttons.short_description = "Actions"
| 36.822222 | 119 | 0.734007 |
19b4640b290e1c4bb1ced71642e2a903e254fc47 | 4,244 | py | Python | src/oci/circuit_breaker/circuit_breaker.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/circuit_breaker/circuit_breaker.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/circuit_breaker/circuit_breaker.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# Contains classes for defining and building circuit breaker strategies.
import logging
from oci.exceptions import TransientServiceError
logger = logging.getLogger(__name__)
DEFAULT_CIRCUIT_BREAKER_FAILURE_STATUSES_AND_CODES = {
409: ['IncorrectState'],
429: [],
500: [],
502: [],
503: [],
504: []
}
class CircuitBreakerStrategy(object):
"""
A class which can build a circuit breaker strategy based on provided criteria.
This builder is intended as a convenience, but callers are also able to bypass this and construct
circuit breaker strategies directly.
:param int failure_threshold: (optional)
The failure_threshold parameter specifies the number of subsequent failures that causes to circuit breaker
to move from CLOSED to OPEN state. This parameter takes an integer value with 10 being the default value.
:param int recovery_timeout: (optional)
The recovery timeout is the time in seconds that the circuit breaker waits till it moves from an
OPEN to HALF-OPEN state. This parameter takes in an integer with 30 seconds being the default value.
:param dict failure_statuses_and_codes: (optional)
This parameter takes in a dict object of type (int, list(str)) which consist of
the status code and the list of error codes the circuit breaker takes in account for failure threshold.
If the list of error code is an empty list the circuit breaker would mark all errors as failure which has
that specific status code regardless of the error code.
defaults to:
{
409: ['IncorrectState'],
429: [],
500: [],
502: [],
503: [],
504: []
}
:param str name: (optional)
The name of Circuit Breaker. Each circuit breaker instance is by internally made unique to a client. This
makes sure that a circuit breaker of one client does not interfere with the operations of the other clients,
even if they belong to the same service. In a rare case, when it is intended to share the same Circuit Breaker
across multiple clients, a CircuitBreakerStrategy object can be made with the name value set as desired
and passed to the clients meant to share the circuit breaker. In this scenario failures from any client
add to the failure threshold of the circuit breaker shared by all clients.
"""
def __init__(self, **kwargs):
"""
Creates a new builder and initializes it based on any provided parameters.
"""
self.failure_threshold = kwargs.get('failure_threshold', 10)
self.recovery_timeout = kwargs.get('recovery_timeout', 30)
self.expected_exception = TransientServiceError
self.failure_statuses_and_codes = kwargs.get('failure_statuses_and_codes',
DEFAULT_CIRCUIT_BREAKER_FAILURE_STATUSES_AND_CODES)
self.name = kwargs.get('name', None)
def is_transient_error(self, status_code, service_code):
logger.debug('Is transient error status code:{} error code:{}'.format(status_code, service_code))
if status_code in self.failure_statuses_and_codes:
error_code = self.failure_statuses_and_codes[status_code]
if not error_code:
return True
return service_code in error_code
logger.debug(
'status code:{} not in failure_statuses_and_codes:{}'.format(status_code, self.failure_statuses_and_codes))
return False
class NoCircuitBreakerStrategy(object):
"""
A class which represents that no circuit breaker strategy is to be used for the Client.
"""
def __init__(self):
pass
| 46.637363 | 245 | 0.671772 |
4ddd4a01ff8e08b9a99afa503da348dc2fa840d8 | 641 | py | Python | src/provider/views.py | mahidul-islam/shopmanagementsystem | 22679aa420e7408cd44cf1aa6b556bb94a30398a | [
"MIT"
] | null | null | null | src/provider/views.py | mahidul-islam/shopmanagementsystem | 22679aa420e7408cd44cf1aa6b556bb94a30398a | [
"MIT"
] | null | null | null | src/provider/views.py | mahidul-islam/shopmanagementsystem | 22679aa420e7408cd44cf1aa6b556bb94a30398a | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.views import generic
from django.shortcuts import get_object_or_404, redirect
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from . import models
class ShowProvider(LoginRequiredMixin, generic.TemplateView):
template_name = "provider/show_one_provider.html"
http_method_names = ['get']
def get(self, request, *args, **kwargs):
slug = self.kwargs.get('slug')
provider = get_object_or_404(models.Provider, slug=slug)
kwargs['show_provider'] = provider
return super().get(request, *args, **kwargs)
| 35.611111 | 64 | 0.75039 |
e6af9078defc1a6c4d12346b363a32a0af5e3d75 | 8,430 | py | Python | test/test_matchengine.py | TinEye/tineyeservices_python | d30098b734b703f4ff933dfbcf1f0f8321c8d0d7 | [
"MIT"
] | 13 | 2016-05-06T05:23:27.000Z | 2022-03-17T01:27:07.000Z | test/test_matchengine.py | seanwallawalla-forks/tineyeservices_python | d30098b734b703f4ff933dfbcf1f0f8321c8d0d7 | [
"MIT"
] | 1 | 2016-05-27T11:45:54.000Z | 2018-11-08T21:58:38.000Z | test/test_matchengine.py | seanwallawalla-forks/tineyeservices_python | d30098b734b703f4ff933dfbcf1f0f8321c8d0d7 | [
"MIT"
] | 8 | 2017-09-22T03:36:28.000Z | 2022-03-17T01:29:18.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2018 TinEye. All rights reserved worldwide.
import os
import sys
import unittest
from tineyeservices import MatchEngineRequest
from tineyeservices import Image
from tineyeservices.exception import TinEyeServiceError, TinEyeServiceWarning
imagepath = os.path.abspath("test/images")
sys.path.append('../')
class TestMatchEngine(unittest.TestCase):
""" Test MatchEngineRequest class. """
def setUp(self):
self.request = MatchEngineRequest(api_url='http://staging02.tc:5001/rest/')
r = self.request.list(limit=1000)
if len(r['result']) > 0:
r = self.request.delete(r['result'])
def tearDown(self):
r = self.request.list(limit=1000)
if len(r['result']) > 0:
r = self.request.delete(r['result'])
def test_add(self):
# Image upload
images = [Image(filepath='%s/banana.jpg' % imagepath, collection_filepath='folder/banana.jpg'),
Image(filepath='%s/banana_flip.jpg' % imagepath, collection_filepath='banana_flip.jpg')]
r = self.request.add_image(images)
self.assertEqual(r['status'], 'ok')
self.assertEqual(r['method'], 'add')
self.assertEqual(r['error'], [])
self.assertEqual(r['result'], [])
r = self.request.list()
self.assertEqual(r['result'], ['folder/banana.jpg', 'banana_flip.jpg'])
# URL
images = [Image(url='https://tineye.com/images/meloncat.jpg')]
r = self.request.add_url(images)
self.assertEqual(r['status'], 'ok')
self.assertEqual(r['method'], 'add')
self.assertEqual(r['error'], [])
self.assertEqual(r['result'], [])
r = self.request.list()
self.assertEqual(r['result'], ['folder/banana.jpg', 'banana_flip.jpg', 'meloncat.jpg'])
def test_delete(self):
images = [Image(filepath='%s/banana.jpg' % imagepath, collection_filepath='folder/banana.jpg'),
Image(filepath='%s/banana_flip.jpg' % imagepath, collection_filepath='banana_flip.jpg')]
r = self.request.add_image(images)
self.assertEqual(r['status'], 'ok')
r = self.request.delete(['folder/banana.jpg'])
self.assertEqual(r['status'], 'ok')
self.assertEqual(r['method'], 'delete')
self.assertEqual(r['error'], [])
self.assertEqual(r['result'], [])
r = self.request.list()
self.assertEqual(r['result'], ['banana_flip.jpg'])
# Try deleting a file not in the collection
try:
self.request.delete(['banana_flip.jpg', 'folder/banana.jpg'])
except TinEyeServiceWarning as e:
self.assertEqual(e.args[0], ['folder/banana.jpg: Failed to remove from index.'])
r = self.request.list()
self.assertEqual(r['result'], [])
def test_search(self):
# Image upload
images = [Image(filepath='%s/banana.jpg' % imagepath, collection_filepath='banana.jpg')]
r = self.request.add_image(images)
self.assertEqual(r['status'], 'ok')
image = Image('%s/banana.png' % imagepath)
r = self.request.search_image(image)
self.assertEqual(r['status'], 'ok')
self.assertEqual(r['method'], 'search')
self.assertEqual(r['error'], [])
self.assertEqual(len(r['result']), 1)
# URL
images = [Image(url='https://tineye.com/images/meloncat.jpg')]
r = self.request.add_url(images)
self.assertEqual(r['status'], 'ok')
r = self.request.search_url('https://tineye.com/images/meloncat.jpg')
self.assertEqual(r['status'], 'ok')
self.assertEqual(r['method'], 'search')
self.assertEqual(r['error'], [])
if len(r['result']) > 0:
self.assertTrue(r['result'][0]['score'] > 90)
self.assertEqual(len(r['result']), 1)
# Test bad URL
try:
r = self.request.search_url('https://tineye.com/404')
except TinEyeServiceError as e:
self.assertEqual(e.args[0], ['https://tineye.com/404: Failed to download file.'])
# Filepath
r = self.request.search_filepath('meloncat.jpg')
self.assertEqual(r['status'], 'ok')
self.assertEqual(r['method'], 'search')
self.assertEqual(r['error'], [])
if len(r['result']) > 0:
self.assertTrue(float(r['result'][0]['score']) > 90)
self.assertEqual(len(r['result']), 1)
# Test min score
image = Image('%s/banana.png' % imagepath)
r = self.request.search_image(image, min_score=70)
self.assertEqual(r['status'], 'ok')
self.assertEqual(r['method'], 'search')
self.assertEqual(r['error'], [])
self.assertEqual(len(r['result']), 1)
# Test offset and limit
image = Image('%s/banana.png' % imagepath)
r = self.request.search_image(image, offset=0, limit=1)
self.assertEqual(r['status'], 'ok')
self.assertEqual(r['method'], 'search')
self.assertEqual(r['error'], [])
self.assertEqual(len(r['result']), 1)
def test_compare(self):
# Image upload
image_1 = Image(filepath='%s/banana.jpg' % imagepath)
image_2 = Image(filepath='%s/banana_flip.jpg' % imagepath)
r = self.request.compare_image(image_1, image_2, check_horizontal_flip=False)
self.assertEqual(r['status'], 'ok')
self.assertEqual(r['method'], 'compare')
self.assertEqual(r['error'], [])
self.assertEqual(len(r['result']), 0)
# With flip enabled
r = self.request.compare_image(image_1, image_2, check_horizontal_flip=True)
self.assertEqual(r['status'], 'ok')
self.assertEqual(r['method'], 'compare')
self.assertEqual(r['error'], [])
self.assertEqual(len(r['result']), 1)
# Flip disabled
r = self.request.compare_image(image_1, image_2, min_score=100, check_horizontal_flip=True)
self.assertEqual(r['status'], 'ok')
self.assertEqual(r['method'], 'compare')
self.assertEqual(r['error'], [])
self.assertEqual(len(r['result']), 0)
# URL
r = self.request.compare_url(
'https://tineye.com/images/meloncat.jpg',
'https://tineye.com/images/meloncat.jpg',
min_score=100)
self.assertEqual(r['status'], 'ok')
self.assertEqual(r['method'], 'compare')
self.assertEqual(r['error'], [])
self.assertEqual(len(r['result']), 1)
def test_count(self):
# No images
r = self.request.count()
self.assertEqual(r['status'], 'ok')
self.assertEqual(r['method'], 'count')
self.assertEqual(r['error'], [])
self.assertEqual(r['result'][0], 0)
images = [Image(filepath='%s/banana.png' % imagepath, collection_filepath='banana.png'),
Image(filepath='%s/banana_flip.jpg' % imagepath)]
r = self.request.add_image(images)
self.assertEqual(r['status'], 'ok')
# Added two images
r = self.request.count()
self.assertEqual(r['status'], 'ok')
self.assertEqual(r['method'], 'count')
self.assertEqual(r['error'], [])
self.assertEqual(r['result'][0], 2)
r = self.request.delete(['banana.png'])
self.assertEqual(r['status'], 'ok')
# Deleted one image
r = self.request.count()
self.assertEqual(r['status'], 'ok')
self.assertEqual(r['method'], 'count')
self.assertEqual(r['error'], [])
self.assertEqual(r['result'][0], 1)
def test_list(self):
# No images
r = self.request.list()
self.assertEqual(r['status'], 'ok')
self.assertEqual(r['method'], 'list')
self.assertEqual(r['error'], [])
self.assertEqual(r['result'], [])
# Added one image
images = [Image(filepath='%s/banana.png' % imagepath, collection_filepath='banana.png')]
r = self.request.add_image(images)
self.assertEqual(r['status'], 'ok')
r = self.request.list()
self.assertEqual(r['status'], 'ok')
self.assertEqual(r['method'], 'list')
self.assertEqual(r['error'], [])
self.assertEqual(r['result'], ['banana.png'])
def test_ping(self):
# Ping!
r = self.request.ping()
self.assertEqual(r['status'], 'ok')
self.assertEqual(r['method'], 'ping')
| 37.802691 | 106 | 0.586833 |
f5aadfb3a9c8a816eed066d43fe94ab07045fc2b | 331 | py | Python | scholarmetrics/__init__.py | Michael-E-Rose/scholarmetrics | 16d6f3b149016ca9ce0da4ba3fa46069d67394ff | [
"MIT"
] | 4 | 2015-05-02T17:51:20.000Z | 2022-01-20T20:55:09.000Z | scholarmetrics/__init__.py | Michael-E-Rose/scholarmetrics | 16d6f3b149016ca9ce0da4ba3fa46069d67394ff | [
"MIT"
] | 8 | 2015-05-02T17:52:28.000Z | 2017-04-04T20:25:50.000Z | scholarmetrics/__init__.py | Michael-E-Rose/scholarmetrics | 16d6f3b149016ca9ce0da4ba3fa46069d67394ff | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = """Michael E. Rose"""
__email__ = 'Michael.Ernst.Rose@gmail.com'
__all__ = ['euclidean', 'gindex', 'hindex']
from pbr.version import VersionInfo
_v = VersionInfo('scholarmetrics').semantic_version()
__version__ = _v.release_string()
version_info = _v.version_tuple()
from .metrics import *
| 23.642857 | 53 | 0.719033 |
58c48899c39c2bdd21c9ecc48ba1ae34d163b3fa | 121 | py | Python | users/swagger/__init__.py | marianojabdala/microservices | 4a020cb9d703f20baa40446d525363d159d008ac | [
"Apache-2.0"
] | 1 | 2018-04-20T20:12:31.000Z | 2018-04-20T20:12:31.000Z | users/swagger/__init__.py | marianojabdala/microservices | 4a020cb9d703f20baa40446d525363d159d008ac | [
"Apache-2.0"
] | 17 | 2021-03-19T22:51:22.000Z | 2021-08-30T20:22:33.000Z | users/swagger/__init__.py | marianojabdala/microservices | 4a020cb9d703f20baa40446d525363d159d008ac | [
"Apache-2.0"
] | 2 | 2018-04-20T20:12:36.000Z | 2018-10-07T15:37:46.000Z | # -*- coding: utf-8 -*-
"""Expose the User schema for the swagger documentation."""
from .schemas.user import UserSchema
| 30.25 | 59 | 0.710744 |
2ea2694d7e574738860939ba096089dc8e93c0a1 | 5,579 | py | Python | util_uploader.py | um-flint/nix_bsd_mac_inventory | 0791e0d43b6dc01cb44721be1356ee81de04b0e1 | [
"MIT"
] | null | null | null | util_uploader.py | um-flint/nix_bsd_mac_inventory | 0791e0d43b6dc01cb44721be1356ee81de04b0e1 | [
"MIT"
] | null | null | null | util_uploader.py | um-flint/nix_bsd_mac_inventory | 0791e0d43b6dc01cb44721be1356ee81de04b0e1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import base64
import requests
try:
requests.packages.urllib3.disable_warnings()
except AttributeError:
pass
# To upload, or not to upload, question is now?
DRY_RUN = False
class Rest:
def __init__(self, base_url, username, secret, debug):
self.base_url = base_url
self.username = username
self.password = secret
self.debug = debug
self.headers = {
'Authorization': 'Basic ' + base64.b64encode(self.username + ':' + self.password),
'Content-Type': 'application/x-www-form-urlencoded'
}
def uploader(self, data, url, method=None):
payload = data
if method == 'put':
r = requests.put(url, data=payload, headers=self.headers, verify=False)
else:
r = requests.post(url, data=payload, headers=self.headers, verify=False)
msg = unicode(payload)
if self.debug:
print msg
scode = r.status_code
msg = 'Status code: %s' % str(scode)
print msg
msg = str(r.text)
if self.debug:
print msg
return r.json(), scode
def fetcher(self, url):
r = requests.get(url, headers=self.headers, verify=False)
status_code = r.status_code
if status_code == 200:
if self.debug:
msg = '%d\t%s' % (status_code, str(r.text))
print msg
return r.json()
else:
return status_code
def deleter(self, url):
r = requests.delete(url, headers=self.headers, verify=False)
status_code = r.status_code
if status_code == 200:
if self.debug:
msg = '%d\t%s' % (status_code, str(r.text))
print msg
return r.json()
else:
return status_code
def put_device(self, data):
if not DRY_RUN:
url = self.base_url + '/api/1.0/device/'
msg = '\r\nUpdating device by mac %s ' % url
if self.debug:
print msg
method = 'put'
result, scode = self.uploader(data, url, method)
return result, scode
def post_device(self, data):
if not DRY_RUN:
url = self.base_url + '/api/device/'
msg = '\r\nPosting data to %s ' % url
if self.debug:
print msg
result, scode = self.uploader(data, url)
return result, scode
def post_multinodes(self, data):
if not DRY_RUN:
url = self.base_url + '/api/1.0/multinodes/'
msg = '\r\nPosting multidata to %s ' % url
if self.debug:
print msg
result, scode = self.uploader(data, url)
return result, scode
def post_ip(self, data):
if not DRY_RUN:
url = self.base_url + '/api/ip/'
msg = '\r\nPosting IP data to %s ' % url
if self.debug:
print msg
self.uploader(data, url)
def post_mac(self, data):
if not DRY_RUN:
url = self.base_url + '/api/1.0/macs/'
msg = '\r\nPosting MAC data to %s ' % url
if self.debug:
print msg
self.uploader(data, url)
def post_parts(self, data, category):
if not DRY_RUN:
url = self.base_url + '/api/1.0/parts/'
msg = '\r\nPosting %s parts to %s ' % (category, url)
if self.debug:
print msg
self.uploader(data, url)
def post_software(self, data):
if not DRY_RUN:
url = self.base_url + '/api/1.0/software_details/'
msg = '\r\nPosting software data to %s ' % (url)
if self.debug:
print msg
self.uploader(data, url)
def get_device_by_name(self, name):
if not DRY_RUN:
url = self.base_url + '/api/1.0/devices/name/%s/?include_cols=ip_addresses' % name
msg = '\r\nFetching IP addresses for device: %s ' % name
if self.debug:
print msg
response = self.fetcher(url)
if isinstance(response, dict) and 'ip_addresses' in response:
fetched_ips = [x['ip'] for x in response['ip_addresses'] if 'ip' in x]
return fetched_ips
def delete_ip(self, ip):
if not DRY_RUN:
msg = '\r\nDeleting IP addresses: %s ' % ip
if self.debug:
print msg
url = self.base_url + '/api/1.0/ips/?ip=%s' % ip
response = self.fetcher(url)
ip_ids = [x['id'] for x in response['ips']]
for ip_id in ip_ids:
url = self.base_url + '/api/1.0/ips/%s' % ip_id
self.deleter(url)
def get_device_by_mac(self, mac):
if not DRY_RUN:
url = self.base_url + '/api/1.0/macs/?mac=%s' % mac
msg = '\r\nFind device by mac: %s ' % mac
if self.debug:
print msg
response = self.fetcher(url)
if isinstance(response, dict) and 'macaddresses' in response:
dev_id = [x['device']['device_id'] for x in response['macaddresses'] if 'device' in x]
if dev_id:
try:
return dev_id[0]
except:
pass
| 34.438272 | 103 | 0.500807 |
3967c69c600390e5988077b5938ed3969782ae5f | 12,248 | py | Python | cumulus/management/commands/syncstatic.py | sunscrapers/django-cumulus | e89bbba4bed729465c1e05993b8bae9461f7612b | [
"BSD-3-Clause"
] | null | null | null | cumulus/management/commands/syncstatic.py | sunscrapers/django-cumulus | e89bbba4bed729465c1e05993b8bae9461f7612b | [
"BSD-3-Clause"
] | null | null | null | cumulus/management/commands/syncstatic.py | sunscrapers/django-cumulus | e89bbba4bed729465c1e05993b8bae9461f7612b | [
"BSD-3-Clause"
] | null | null | null | import datetime
import fnmatch
import mimetypes
import optparse
import os
import pyrax
import re
import swiftclient
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError, NoArgsCommand
from cumulus.settings import CUMULUS
from cumulus.storage import sync_headers, get_gzipped_contents
class Command(NoArgsCommand):
help = "Synchronizes static media to cloud files."
option_list = NoArgsCommand.option_list + (
optparse.make_option("-i", "--include", action="append", default=[],
dest="includes", metavar="PATTERN",
help="Include file or directories matching this glob-style "
"pattern. Use multiple times to include more."),
optparse.make_option("-e", "--exclude", action="append", default=[],
dest="excludes", metavar="PATTERN",
help="Exclude files or directories matching this glob-style "
"pattern. Use multiple times to exclude more."),
optparse.make_option("-w", "--wipe",
action="store_true", dest="wipe", default=False,
help="Wipes out entire contents of container first."),
optparse.make_option("-t", "--test-run",
action="store_true", dest="test_run", default=False,
help="Performs a test run of the sync."),
optparse.make_option("-q", "--quiet",
action="store_true", dest="test_run", default=False,
help="Do not display any output."),
optparse.make_option("-c", "--container",
dest="container", help="Override STATIC_CONTAINER."),
)
def set_options(self, options):
"""
Sets instance variables based on an options dict
"""
# COMMAND LINE OPTIONS
self.wipe = options.get("wipe")
self.test_run = options.get("test_run")
self.quiet = options.get("test_run")
self.container_name = options.get("container")
self.verbosity = int(options.get("verbosity"))
if self.test_run:
self.verbosity = 2
cli_includes = options.get("includes")
cli_excludes = options.get("excludes")
# CUMULUS CONNECTION AND SETTINGS FROM SETTINGS.PY
if not self.container_name:
self.container_name = CUMULUS["STATIC_CONTAINER"]
settings_includes = CUMULUS["INCLUDE_LIST"]
settings_excludes = CUMULUS["EXCLUDE_LIST"]
# PATH SETTINGS
self.static_root = os.path.abspath(settings.STATIC_ROOT)
self.static_url = settings.STATIC_URL
if not self.static_root.endswith("/"):
self.static_root = self.static_root + "/"
if self.static_url.startswith("/"):
self.static_url = self.static_url[1:]
# SYNCSTATIC VARS
# combine includes and excludes from the cli and django settings file
self.includes = list(set(cli_includes + settings_includes))
self.excludes = list(set(cli_excludes + settings_excludes))
# transform glob patterns to regular expressions
self.local_filenames = []
self.create_count = 0
self.upload_count = 0
self.update_count = 0
self.skip_count = 0
self.delete_count = 0
def connect_container(self):
"""
Connects to a container using the swiftclient api.
The container will be created and/or made public using the
pyrax api if not already so.
"""
self.conn = swiftclient.Connection(authurl=CUMULUS["AUTH_URL"],
user=CUMULUS["USERNAME"],
key=CUMULUS["API_KEY"],
snet=CUMULUS["SERVICENET"],
auth_version=CUMULUS["AUTH_VERSION"],
tenant_name=CUMULUS["AUTH_TENANT_NAME"])
try:
self.conn.head_container(self.container_name)
except swiftclient.client.ClientException as exception:
if exception.msg == "Container HEAD failed":
call_command("container_create", self.container_name)
else:
raise
if CUMULUS["USE_PYRAX"]:
if CUMULUS["PYRAX_IDENTITY_TYPE"]:
pyrax.set_setting("identity_type", CUMULUS["PYRAX_IDENTITY_TYPE"])
public = not CUMULUS["SERVICENET"]
if CUMULUS["PYRAX_IDENTITY_TYPE"]:
pyrax.set_setting("identity_type", CUMULUS["PYRAX_IDENTITY_TYPE"])
pyrax.set_credentials(CUMULUS["USERNAME"], CUMULUS["API_KEY"])
connection = pyrax.connect_to_cloudfiles(region=CUMULUS["REGION"],
public=public)
container = connection.get_container(self.container_name)
if not container.cdn_enabled:
container.make_public(ttl=CUMULUS["TTL"])
else:
headers = {"X-Container-Read": ".r:*"}
self.conn.post_container(self.container_name, headers=headers)
self.container = self.conn.get_container(self.container_name)
def handle_noargs(self, *args, **options):
# setup
self.set_options(options)
self.connect_container()
# wipe first
if self.wipe:
self.wipe_container()
# match local files
abspaths = self.match_local(self.static_root, self.includes, self.excludes)
relpaths = []
for path in abspaths:
filename = path.split(self.static_root)[1]
if filename.startswith("/"):
filename = filename[1:]
relpaths.append(filename)
if not relpaths:
raise CommandError("The STATIC_ROOT directory is empty "
"or all files have been ignored.")
for path in abspaths:
if not os.path.isfile(path):
raise CommandError("Unsupported filetype: {0}.".format(path))
# match cloud objects
cloud_objs = self.match_cloud(self.includes, self.excludes)
# sync
self.upload_files(abspaths, relpaths)
self.delete_extra_files(relpaths, cloud_objs)
if not self.quiet or self.verbosity > 1:
self.print_tally()
def match_cloud(self, includes, excludes):
"""
Returns the cloud objects that match the include and exclude patterns.
"""
cloud_objs = [cloud_obj["name"] for cloud_obj in self.container[1]]
includes_pattern = r"|".join([fnmatch.translate(x) for x in includes])
excludes_pattern = r"|".join([fnmatch.translate(x) for x in excludes]) or r"$."
excludes = [o for o in cloud_objs if re.match(excludes_pattern, o)]
includes = [o for o in cloud_objs if re.match(includes_pattern, o)]
return [o for o in includes if o not in excludes]
def match_local(self, prefix, includes, excludes):
"""
Filters os.walk() with include and exclude patterns.
See: http://stackoverflow.com/a/5141829/93559
"""
includes_pattern = r"|".join([fnmatch.translate(x) for x in includes])
excludes_pattern = r"|".join([fnmatch.translate(x) for x in excludes]) or r"$."
matches = []
for root, dirs, files in os.walk(prefix, topdown=True):
# exclude dirs
dirs[:] = [os.path.join(root, d) for d in dirs]
dirs[:] = [d for d in dirs if not re.match(excludes_pattern,
d.split(root)[1])]
# exclude/include files
files = [os.path.join(root, f) for f in files]
files = [os.path.join(root, f) for f in files
if not re.match(excludes_pattern, f)]
files = [os.path.join(root, f) for f in files
if re.match(includes_pattern, f.split(prefix)[1])]
for fname in files:
matches.append(fname)
return matches
def upload_files(self, abspaths, relpaths):
"""
Determines files to be uploaded and call ``upload_file`` on each.
"""
for relpath in relpaths:
abspath = [p for p in abspaths if p.endswith(relpath)][0]
try:
head = self.conn.head_object(self.container_name, relpath)
except swiftclient.client.ClientException as exception:
if exception.msg != "Object HEAD failed":
raise exception
self.upload_file(abspath, relpath)
continue
cloud_datetime = (head["last-modified"] and
datetime.datetime.strptime(
head["last-modified"],
"%a, %d %b %Y %H:%M:%S %Z")
or None)
local_datetime = datetime.datetime.utcfromtimestamp(os.stat(abspath).st_mtime)
if cloud_datetime and local_datetime < cloud_datetime:
self.skip_count += 1
if not self.quiet:
print("Skipped {0}: not modified.".format(relpath))
continue
self.upload_file(abspath, relpath)
def upload_file(self, abspath, cloud_filename):
"""
Uploads a file to the container.
"""
if not self.test_run:
headers = None
contents = open(abspath, "rb")
size = os.stat(abspath).st_size
mime_type, encoding = mimetypes.guess_type(abspath)
if mime_type in CUMULUS.get("GZIP_CONTENT_TYPES", []):
headers = {'Content-Encoding': 'gzip'}
contents = get_gzipped_contents(contents)
size = contents.size
self.conn.put_object(container=self.container_name,
obj=cloud_filename,
contents=contents,
content_length=size,
etag=None,
content_type=mime_type,
headers=headers)
# TODO syncheaders
#sync_headers(cloud_obj)
self.create_count += 1
if not self.quiet or self.verbosity > 1:
print("Uploaded: {0}".format(cloud_filename))
def delete_extra_files(self, relpaths, cloud_objs):
"""
Deletes any objects from the container that do not exist locally.
"""
for cloud_obj in cloud_objs:
if cloud_obj not in relpaths:
if not self.test_run:
self.delete_cloud_obj(cloud_obj)
self.delete_count += 1
if not self.quiet or self.verbosity > 1:
print("Deleted: {0}".format(cloud_obj))
def delete_cloud_obj(self, cloud_obj):
"""
Deletes an object from the container.
"""
self.conn.delete_object(container=self.container_name,
obj=cloud_obj)
def wipe_container(self):
"""
Completely wipes out the contents of the container.
"""
if self.test_run:
print("Wipe would delete {0} objects.".format(len(self.container[1])))
else:
if not self.quiet or self.verbosity > 1:
print("Deleting {0} objects...".format(len(self.container[1])))
for cloud_obj in self.container[1]:
self.conn.delete_object(self.container_name, cloud_obj["name"])
def print_tally(self):
"""
Prints the final tally to stdout.
"""
self.update_count = self.upload_count - self.create_count
if self.test_run:
print("Test run complete with the following results:")
print("Skipped {0}. Created {1}. Updated {2}. Deleted {3}.".format(
self.skip_count, self.create_count, self.update_count, self.delete_count))
| 42.825175 | 90 | 0.565562 |
402dee23e8dc66e8b61530eafcbee401d5944b22 | 1,309 | py | Python | fractal/mandelbrot.py | Gagaro/pyfractal | 50bfad41e45cf268074c9c184c85da6d25909c35 | [
"MIT"
] | null | null | null | fractal/mandelbrot.py | Gagaro/pyfractal | 50bfad41e45cf268074c9c184c85da6d25909c35 | [
"MIT"
] | null | null | null | fractal/mandelbrot.py | Gagaro/pyfractal | 50bfad41e45cf268074c9c184c85da6d25909c35 | [
"MIT"
] | null | null | null | import logging
try:
import cmandelbrot
CMANDELBROT = True
except ImportError:
CMANDELBROT = False
logging.warning("cmandelbrot not present, using python implementation.")
ITERATIONS = 300
class Mandelbrot(object):
def __init__(self, width, height, min_r=-2.0, max_r=1.0, min_i=-1.5,
iterations=ITERATIONS):
self.min_r = min_r
self.max_r = max_r
self.min_i = min_i
self.max_i = min_i + (max_r - min_r) * height / width
self.width = width
self.height = height
self.iterations=iterations
self.pre_height = (self.max_r - self.min_r) / (self.width - 1)
self.pre_width = (self.max_i - self.min_i) / (self.height - 1)
def get_c(self, x, y):
real = self.min_r + x * self.pre_height
imaginary = self.min_i + y * self.pre_width
return (real, imaginary)
def get(self, x, y):
return self.mandelbrot(*(self.get_c(x, y)))
def mandelbrot(self, real, imaginary):
if CMANDELBROT:
return cmandelbrot.mandelbrot(real, imaginary, self.iterations)
c = complex(real, imaginary)
z = c
for i in xrange(self.iterations):
if abs(z) > 2:
return i
z = z * z + c
return self.iterations
| 29.75 | 76 | 0.593583 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.