hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
711ec939f77114f558ae32de6989f06e57d48c3a
| 1,223
|
py
|
Python
|
best_new_music_digest/email.py
|
vanillaSlice/best-new-music-digest
|
feaa01cf477d3fef944ad2b4047b4f2b9cd44d1c
|
[
"MIT"
] | 1
|
2021-01-16T17:48:05.000Z
|
2021-01-16T17:48:05.000Z
|
best_new_music_digest/email.py
|
vanillaSlice/best-new-music-digest
|
feaa01cf477d3fef944ad2b4047b4f2b9cd44d1c
|
[
"MIT"
] | 2
|
2021-01-21T12:53:47.000Z
|
2021-10-20T06:30:38.000Z
|
best_new_music_digest/email.py
|
vanillaSlice/best-new-music-digest
|
feaa01cf477d3fef944ad2b4047b4f2b9cd44d1c
|
[
"MIT"
] | null | null | null |
# pylint: disable=broad-except
"""
Emails.
"""
from datetime import datetime
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
from best_new_music_digest import settings
def send_email(digest, dad_joke=None, albums_playlist_url=None, tracks_playlist_url=None):
"""
Sends out digest email.
"""
if settings.ALWAYS_EMAIL:
print("Always email is enabled")
should_send = any(d["items"] or d["errors"] for d in digest) or settings.ALWAYS_EMAIL
if not should_send:
print("No items or errors to email about")
return
message = Mail(
from_email=(settings.SENDER_EMAIL, settings.SENDER_NAME),
to_emails=settings.RECIPIENT_EMAIL,
)
message.template_id = settings.SENDGRID_TEMPLATE_ID
message.dynamic_template_data = {
"date": datetime.utcnow().strftime("%d/%m/%Y"),
"dad_joke": dad_joke,
"digest": digest,
"albums_playlist_url": albums_playlist_url,
"tracks_playlist_url": tracks_playlist_url,
}
try:
SendGridAPIClient(settings.SENDGRID_API_KEY).send(message)
except Exception as exception:
print("Failed to send email")
print(exception)
| 24.959184
| 90
| 0.686018
|
97513dfea26c9288a06e091593ec2f86912a9ff0
| 303
|
py
|
Python
|
iotbx/pdb/nucleic_acid_codes.py
|
toastisme/cctbx_project
|
d1a25147b5958822b6923fb55260749ccf9350ff
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2018-02-01T14:25:48.000Z
|
2021-09-15T16:36:29.000Z
|
iotbx/pdb/nucleic_acid_codes.py
|
toastisme/cctbx_project
|
d1a25147b5958822b6923fb55260749ccf9350ff
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2018-06-14T17:04:17.000Z
|
2019-06-24T20:54:12.000Z
|
iotbx/pdb/nucleic_acid_codes.py
|
toastisme/cctbx_project
|
d1a25147b5958822b6923fb55260749ccf9350ff
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2022-02-08T10:11:07.000Z
|
2022-02-08T10:11:07.000Z
|
from __future__ import absolute_import, division, print_function
rna_one_letter_code_dict = {
'ADE':'A',
'CYT':'C',
'URI':'U',
'GUA':'G',
'A':'A',
'C':'C',
'U':'U',
'G':'G',}
dna_one_letter_code_dict = {
'ADE':'A',
'CYT':'C',
'THY':'T',
'GUA':'G',
'A':'A',
'C':'C',
'T':'T',
'G':'G',}
| 13.772727
| 64
| 0.50495
|
d0029b373720e5025b49232c97c5e79a8120486a
| 448
|
py
|
Python
|
django_geo/admin.py
|
roverdotcom/django-geo
|
f832a7f434fa4e2baa54a859596468a283ce7f43
|
[
"MIT"
] | null | null | null |
django_geo/admin.py
|
roverdotcom/django-geo
|
f832a7f434fa4e2baa54a859596468a283ce7f43
|
[
"MIT"
] | 1
|
2018-05-14T20:04:03.000Z
|
2018-05-17T22:11:09.000Z
|
django_geo/admin.py
|
roverdotcom/django-geo
|
f832a7f434fa4e2baa54a859596468a283ce7f43
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
from django.contrib import admin
from .models import ZipCode
class ZipCodeAdmin(admin.ModelAdmin):
list_display = ('zip_code', 'latitude', 'longitude', 'state', 'city')
list_filter = ('state',)
search_fields = ('zip_code', 'state', 'city')
admin.site.register(ZipCode, ZipCodeAdmin)
| 28
| 73
| 0.767857
|
c2a75185eca108ec3066c1510e4e8604764fa009
| 41,300
|
py
|
Python
|
venv/Lib/site-packages/sklearn/datasets/_base.py
|
star10919/drf
|
77c005794087484d72ffc0d76612a6ac9845821e
|
[
"BSD-3-Clause"
] | 7
|
2021-01-30T17:42:00.000Z
|
2022-01-09T08:08:48.000Z
|
venv/Lib/site-packages/sklearn/datasets/_base.py
|
star10919/drf
|
77c005794087484d72ffc0d76612a6ac9845821e
|
[
"BSD-3-Clause"
] | 25
|
2020-11-16T15:36:41.000Z
|
2021-06-01T05:15:31.000Z
|
venv/Lib/site-packages/sklearn/datasets/_base.py
|
star10919/drf
|
77c005794087484d72ffc0d76612a6ac9845821e
|
[
"BSD-3-Clause"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
"""
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import csv
import hashlib
import os
import shutil
from collections import namedtuple
from os import environ, listdir, makedirs
from os.path import dirname, expanduser, isdir, join, splitext
from ..utils import Bunch
from ..utils import check_random_state
from ..utils import check_pandas_support
from ..utils.validation import _deprecate_positional_args
import numpy as np
from urllib.request import urlretrieve
RemoteFileMetadata = namedtuple('RemoteFileMetadata',
['filename', 'url', 'checksum'])
def get_data_home(data_home=None) -> str:
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid downloading the
data several times.
By default the data dir is set to a folder named 'scikit_learn_data' in the
user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The '~'
symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
Parameters
----------
data_home : str, default=None
The path to scikit-learn data directory. If `None`, the default path
is `~/sklearn_learn_data`.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
makedirs(data_home, exist_ok=True)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache.
Parameters
----------
data_home : str, default=None
The path to scikit-learn data directory. If `None`, the default path
is `~/sklearn_learn_data`.
"""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def _convert_data_dataframe(caller_name, data, target,
feature_names, target_names, sparse_data=False):
pd = check_pandas_support('{} with as_frame=True'.format(caller_name))
if not sparse_data:
data_df = pd.DataFrame(data, columns=feature_names)
else:
data_df = pd.DataFrame.sparse.from_spmatrix(
data, columns=feature_names
)
target_df = pd.DataFrame(target, columns=target_names)
combined_df = pd.concat([data_df, target_df], axis=1)
X = combined_df[feature_names]
y = combined_df[target_names]
if y.shape[1] == 1:
y = y.iloc[:, 0]
return combined_df, X, y
@_deprecate_positional_args
def load_files(container_path, *, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The individual
file names are not important.
This function does not try to extract features into a numpy array or scipy
sparse matrix. In addition, if load_content is false it does not try to
load the files in memory.
To use text files in a scikit-learn classification or clustering algorithm,
you will need to use the :mod`~sklearn.feature_extraction.text` module to
build a feature extraction transformer that suits your problem.
If you set load_content=True, you should also specify the encoding of the
text using the 'encoding' parameter. For many modern text files, 'utf-8'
will be the correct encoding. If you leave encoding equal to None, then the
content will be made of bytes instead of Unicode, and you will not be able
to use most functions in :mod:`~sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : str or unicode
Path to the main folder holding one subfolder per category
description : str or unicode, default=None
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : list of str, default=None
If None (default), load all the categories. If not None, list of
category names to load (other categories ignored).
load_content : bool, default=True
Whether to load or not the content of the different files. If true a
'data' attribute containing the text information is present in the data
structure returned. If not, a filenames attribute gives the path to the
files.
shuffle : bool, default=True
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
encoding : str, default=None
If None, do not try to decode the content of the files (e.g. for images
or other non-text content). If not None, encoding to use to decode text
files to Unicode if load_content is True.
decode_error : {'strict', 'ignore', 'replace'}, default='strict'
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
random_state : int, RandomState instance or None, default=0
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : list of str
Only present when `load_content=True`.
The raw text data to learn.
target : ndarray
The target labels (integer index).
target_names : list
The names of target classes.
DESCR : str
The full description of the dataset.
filenames: ndarray
The filenames holding the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_data(module_path, data_file_name):
"""Loads data from module_path/data/data_file_name.
Parameters
----------
module_path : string
The module path.
data_file_name : string
Name of csv file to be loaded from
module_path/data/data_file_name. For example 'wine_data.csv'.
Returns
-------
data : Numpy array
A 2D array with each row representing one sample and each column
representing the features of a given sample.
target : Numpy array
A 1D array holding target variables for all the samples in `data.
For example target[0] is the target varible for data[0].
target_names : Numpy array
A 1D array containing the names of the classifications. For example
target_names[0] is the name of the target[0] class.
"""
with open(join(module_path, 'data', data_file_name)) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float64)
target[i] = np.asarray(ir[-1], dtype=int)
return data, target, target_names
@_deprecate_positional_args
def load_wine(*, return_X_y=False, as_frame=False):
"""Load and return the wine dataset (classification).
.. versionadded:: 0.18
The wine dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class [59,71,48]
Samples total 178
Dimensionality 13
Features real, positive
================= ==============
Read more in the :ref:`User Guide <wine_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (178, 13)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, Series} of shape (178,)
The classification target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names: list
The names of the dataset columns.
target_names: list
The names of target classes.
frame: DataFrame of shape (178, 14)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
The copy of UCI ML Wine Data Set dataset is downloaded and modified to fit
standard format from:
https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data
Examples
--------
Let's say you are interested in the samples 10, 80, and 140, and want to
know their class name.
>>> from sklearn.datasets import load_wine
>>> data = load_wine()
>>> data.target[[10, 80, 140]]
array([0, 1, 2])
>>> list(data.target_names)
['class_0', 'class_1', 'class_2']
"""
module_path = dirname(__file__)
data, target, target_names = load_data(module_path, 'wine_data.csv')
with open(join(module_path, 'descr', 'wine_data.rst')) as rst_file:
fdescr = rst_file.read()
feature_names = ['alcohol',
'malic_acid',
'ash',
'alcalinity_of_ash',
'magnesium',
'total_phenols',
'flavanoids',
'nonflavanoid_phenols',
'proanthocyanins',
'color_intensity',
'hue',
'od280/od315_of_diluted_wines',
'proline']
frame = None
target_columns = ['target', ]
if as_frame:
frame, data, target = _convert_data_dataframe("load_wine",
data,
target,
feature_names,
target_columns)
if return_X_y:
return data, target
return Bunch(data=data,
target=target,
frame=frame,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names)
@_deprecate_positional_args
def load_iris(*, return_X_y=False, as_frame=False):
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <iris_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object. See
below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (150, 4)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, Series} of shape (150,)
The classification target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names: list
The names of the dataset columns.
target_names: list
The names of target classes.
frame: DataFrame of shape (150, 5)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
filename: str
The path to the location of the data.
.. versionadded:: 0.20
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
Notes
-----
.. versionchanged:: 0.20
Fixed two wrong data points according to Fisher's paper.
The new version is the same as in R, but not as in the UCI
Machine Learning Repository.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
data, target, target_names = load_data(module_path, 'iris.csv')
iris_csv_filename = join(module_path, 'data', 'iris.csv')
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
feature_names = ['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)']
frame = None
target_columns = ['target', ]
if as_frame:
frame, data, target = _convert_data_dataframe("load_iris",
data,
target,
feature_names,
target_columns)
if return_X_y:
return data, target
return Bunch(data=data,
target=target,
frame=frame,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names,
filename=iris_csv_filename)
@_deprecate_positional_args
def load_breast_cancer(*, return_X_y=False, as_frame=False):
"""Load and return the breast cancer wisconsin dataset (classification).
The breast cancer dataset is a classic and very easy binary classification
dataset.
================= ==============
Classes 2
Samples per class 212(M),357(B)
Samples total 569
Dimensionality 30
Features real, positive
================= ==============
Read more in the :ref:`User Guide <breast_cancer_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (569, 30)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, Series} of shape (569,)
The classification target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names: list
The names of the dataset columns.
target_names: list
The names of target classes.
frame: DataFrame of shape (569, 31)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
filename: str
The path to the location of the data.
.. versionadded:: 0.20
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is
downloaded from:
https://goo.gl/U2Uwz2
Examples
--------
Let's say you are interested in the samples 10, 50, and 85, and want to
know their class name.
>>> from sklearn.datasets import load_breast_cancer
>>> data = load_breast_cancer()
>>> data.target[[10, 50, 85]]
array([0, 1, 0])
>>> list(data.target_names)
['malignant', 'benign']
"""
module_path = dirname(__file__)
data, target, target_names = load_data(module_path, 'breast_cancer.csv')
csv_filename = join(module_path, 'data', 'breast_cancer.csv')
with open(join(module_path, 'descr', 'breast_cancer.rst')) as rst_file:
fdescr = rst_file.read()
feature_names = np.array(['mean radius', 'mean texture',
'mean perimeter', 'mean area',
'mean smoothness', 'mean compactness',
'mean concavity', 'mean concave points',
'mean symmetry', 'mean fractal dimension',
'radius error', 'texture error',
'perimeter error', 'area error',
'smoothness error', 'compactness error',
'concavity error', 'concave points error',
'symmetry error', 'fractal dimension error',
'worst radius', 'worst texture',
'worst perimeter', 'worst area',
'worst smoothness', 'worst compactness',
'worst concavity', 'worst concave points',
'worst symmetry', 'worst fractal dimension'])
frame = None
target_columns = ['target', ]
if as_frame:
frame, data, target = _convert_data_dataframe("load_breast_cancer",
data,
target,
feature_names,
target_columns)
if return_X_y:
return data, target
return Bunch(data=data,
target=target,
frame=frame,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names,
filename=csv_filename)
@_deprecate_positional_args
def load_digits(*, n_class=10, return_X_y=False, as_frame=False):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <digits_dataset>`.
Parameters
----------
n_class : int, default=10
The number of classes to return. Between 0 and 10.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (1797, 64)
The flattened data matrix. If `as_frame=True`, `data` will be
a pandas DataFrame.
target: {ndarray, Series} of shape (1797,)
The classification target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names: list
The names of the dataset columns.
target_names: list
The names of target classes.
.. versionadded:: 0.20
frame: DataFrame of shape (1797, 65)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
images: {ndarray} of shape (1797, 8, 8)
The raw image data.
DESCR: str
The full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
This is a copy of the test set of the UCI ML hand-written digits datasets
https://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import matplotlib.pyplot as plt #doctest: +SKIP
>>> plt.gray() #doctest: +SKIP
>>> plt.matshow(digits.images[0]) #doctest: +SKIP
>>> plt.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1].astype(int, copy=False)
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
feature_names = ['pixel_{}_{}'.format(row_idx, col_idx)
for row_idx in range(8)
for col_idx in range(8)]
frame = None
target_columns = ['target', ]
if as_frame:
frame, flat_data, target = _convert_data_dataframe("load_digits",
flat_data,
target,
feature_names,
target_columns)
if return_X_y:
return flat_data, target
return Bunch(data=flat_data,
target=target,
frame=frame,
feature_names=feature_names,
target_names=np.arange(10),
images=images,
DESCR=descr)
@_deprecate_positional_args
def load_diabetes(*, return_X_y=False, as_frame=False):
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
.. note::
The meaning of each feature (i.e. `feature_names`) might be unclear
(especially for `ltg`) as the documentation of the original dataset is
not explicit. We provide information that seems correct in regard with
the scientific literature in this field of research.
Read more in the :ref:`User Guide <diabetes_dataset>`.
Parameters
----------
return_X_y : bool, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (442, 10)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, Series} of shape (442,)
The regression target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names: list
The names of the dataset columns.
frame: DataFrame of shape (442, 11)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
data_filename: str
The path to the location of the data.
target_filename: str
The path to the location of the target.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
"""
module_path = dirname(__file__)
base_dir = join(module_path, 'data')
data_filename = join(base_dir, 'diabetes_data.csv.gz')
data = np.loadtxt(data_filename)
target_filename = join(base_dir, 'diabetes_target.csv.gz')
target = np.loadtxt(target_filename)
with open(join(module_path, 'descr', 'diabetes.rst')) as rst_file:
fdescr = rst_file.read()
feature_names = ['age', 'sex', 'bmi', 'bp',
's1', 's2', 's3', 's4', 's5', 's6']
frame = None
target_columns = ['target', ]
if as_frame:
frame, data, target = _convert_data_dataframe("load_diabetes",
data,
target,
feature_names,
target_columns)
if return_X_y:
return data, target
return Bunch(data=data,
target=target,
frame=frame,
DESCR=fdescr,
feature_names=feature_names,
data_filename=data_filename,
target_filename=target_filename)
@_deprecate_positional_args
def load_linnerud(*, return_X_y=False, as_frame=False):
"""Load and return the physical excercise linnerud dataset.
This dataset is suitable for multi-ouput regression tasks.
============== ============================
Samples total 20
Dimensionality 3 (for both data and target)
Features integer
Targets integer
============== ============================
Read more in the :ref:`User Guide <linnerrud_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (20, 3)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, dataframe} of shape (20, 3)
The regression targets. If `as_frame=True`, `target` will be
a pandas DataFrame.
feature_names: list
The names of the dataset columns.
target_names: list
The names of the target columns.
frame: DataFrame of shape (20, 6)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
data_filename: str
The path to the location of the data.
target_filename: str
The path to the location of the target.
.. versionadded:: 0.20
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
"""
base_dir = join(dirname(__file__), 'data/')
data_filename = join(base_dir, 'linnerud_exercise.csv')
target_filename = join(base_dir, 'linnerud_physiological.csv')
# Read data
data_exercise = np.loadtxt(data_filename, skiprows=1)
data_physiological = np.loadtxt(target_filename, skiprows=1)
# Read header
with open(data_filename) as f:
header_exercise = f.readline().split()
with open(target_filename) as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
frame = None
if as_frame:
(frame,
data_exercise,
data_physiological) = _convert_data_dataframe("load_linnerud",
data_exercise,
data_physiological,
header_exercise,
header_physiological)
if return_X_y:
return data_exercise, data_physiological
return Bunch(data=data_exercise,
feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
frame=frame,
DESCR=descr,
data_filename=data_filename,
target_filename=target_filename)
@_deprecate_positional_args
def load_boston(*, return_X_y=False):
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Read more in the :ref:`User Guide <boston_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray of shape (506, 13)
The data matrix.
target : ndarray of shape (506, )
The regression target.
filename : str
The physical location of boston csv dataset.
.. versionadded:: 0.20
DESCR : str
The full description of the dataset.
feature_names : ndarray
The names of features
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
Notes
-----
.. versionchanged:: 0.20
Fixed a wrong data point at [445, 0].
Examples
--------
>>> from sklearn.datasets import load_boston
>>> X, y = load_boston(return_X_y=True)
>>> print(X.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float64)
target[i] = np.asarray(d[-1], dtype=np.float64)
if return_X_y:
return data, target
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text,
filename=data_file_name)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Read more in the :ref:`User Guide <sample_images>`.
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
images : list of ndarray of shape (427, 640, 3)
The two sample image.
filenames : list
The filenames for the images.
DESCR : str
The full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# import PIL only when needed
from ..externals._pilutil import imread
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in sorted(os.listdir(module_path))
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Read more in the :ref:`User Guide <sample_images>`.
Parameters
----------
image_name : {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img : 3D array
The image as a numpy array: height x width x color
Examples
--------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
def _pkl_filepath(*args, **kwargs):
"""Return filename for Python 3 pickles
args[-1] is expected to be the ".pkl" filename. For compatibility with
older scikit-learn versions, a suffix is inserted before the extension.
_pkl_filepath('/path/to/folder', 'filename.pkl') returns
'/path/to/folder/filename_py3.pkl'
"""
py3_suffix = kwargs.get("py3_suffix", "_py3")
basename, ext = splitext(args[-1])
basename += py3_suffix
new_args = args[:-1] + (basename + ext,)
return join(*new_args)
def _sha256(path):
"""Calculate the sha256 hash of the file at path."""
sha256hash = hashlib.sha256()
chunk_size = 8192
with open(path, "rb") as f:
while True:
buffer = f.read(chunk_size)
if not buffer:
break
sha256hash.update(buffer)
return sha256hash.hexdigest()
def _fetch_remote(remote, dirname=None):
"""Helper function to download a remote dataset into path
Fetch a dataset pointed by remote's url, save into path using remote's
filename and ensure its integrity based on the SHA256 Checksum of the
downloaded file.
Parameters
----------
remote : RemoteFileMetadata
Named tuple containing remote dataset meta information: url, filename
and checksum
dirname : string
Directory to save the file to.
Returns
-------
file_path: string
Full path of the created file.
"""
file_path = (remote.filename if dirname is None
else join(dirname, remote.filename))
urlretrieve(remote.url, file_path)
checksum = _sha256(file_path)
if remote.checksum != checksum:
raise IOError("{} has an SHA256 checksum ({}) "
"differing from expected ({}), "
"file may be corrupted.".format(file_path, checksum,
remote.checksum))
return file_path
| 34.359401
| 85
| 0.578523
|
e1e58615ee4d6023ecaa9b21ca669b84f6d26279
| 387
|
py
|
Python
|
src/unittest/02/survey.py
|
mrdulin/python-codelab
|
3d960a14a96b3a673b7dc2277d202069b1f8e778
|
[
"MIT"
] | null | null | null |
src/unittest/02/survey.py
|
mrdulin/python-codelab
|
3d960a14a96b3a673b7dc2277d202069b1f8e778
|
[
"MIT"
] | null | null | null |
src/unittest/02/survey.py
|
mrdulin/python-codelab
|
3d960a14a96b3a673b7dc2277d202069b1f8e778
|
[
"MIT"
] | 3
|
2020-02-19T08:02:04.000Z
|
2021-06-08T13:27:51.000Z
|
class AnonymousSurvey():
def __init__(self, question):
self.question = question
self.responses = []
def show_question(self):
print(self.question)
def store_response(self, response):
self.responses.append(response)
def show_results(self):
print('Survey results:')
for res in self.responses:
print('- ' + res)
| 24.1875
| 39
| 0.607235
|
5b38fc13c035f7e4111b7500aceaf1d13cea58b0
| 4,848
|
py
|
Python
|
pypisd/main.py
|
fegoa89/pypisd
|
1fefbf77fd2535c415a1342ffa4a0b10c87d657b
|
[
"MIT"
] | null | null | null |
pypisd/main.py
|
fegoa89/pypisd
|
1fefbf77fd2535c415a1342ffa4a0b10c87d657b
|
[
"MIT"
] | null | null | null |
pypisd/main.py
|
fegoa89/pypisd
|
1fefbf77fd2535c415a1342ffa4a0b10c87d657b
|
[
"MIT"
] | null | null | null |
import sys
import os
import re
import csv
import toml
import argparse
import requests
import pathlib
from subprocess import Popen, PIPE, STDOUT
import concurrent.futures
from bs4 import BeautifulSoup
def cli():
parser = argparse.ArgumentParser()
parser.add_argument(
"-i",
"--input_file",
help="File use to read libraries from instead of the environment",
)
parser.add_argument(
"-o",
"--output_file",
help="File where the source distribution links will be saved, default 'pypi_sd_links.csv'",
)
args = parser.parse_args()
if args.input_file:
# Get libraries from file
lib_list = fetch_libraries_from_file(args.input_file)
else:
# Get libraries from environment
lib_list = fetch_libraries_from_environment()
# Fetch source distribution download link for each library & version
source_distribution_list = fetch_and_extract_details_for_library_list(lib_list)
# Write source distribution list to CSV
write_library_info_to_csv(source_distribution_list, args.output_file)
def fetch_libraries_from_environment() -> list(list()):
lib_list_bytes = get_pip_list_stdout()
return extract_lib_list_from_bytes_output(lib_list_bytes)
def fetch_libraries_from_file(file_path: str) -> list(list()):
if not os.path.isfile(file_path):
print(f"Input file {file_path} do not exist")
sys.exit(1)
file_suffix = pathlib.Path(file_path).suffix
if file_suffix == ".toml":
return fetch_lib_list_from_toml_file(file_path)
else:
return fetch_lib_list_from_standard_file(file_path)
def fetch_lib_list_from_toml_file(file_path: str) -> list(list()):
data = toml.load(file_path)
dependencies = data["tool"]["poetry"]["dependencies"]
return [
[key, re.sub(r"[(\^\s*)|(\~\s*)]", "", val)]
for key, val in dependencies.items()
]
def fetch_lib_list_from_standard_file(file_path: str) -> list(list()):
with open(file_path) as f:
lines = f.readlines()
return [
re.split("[<|>|~=|==|!=|<=|>=|===|, \!?:]+", line.strip()) for line in lines
]
def fetch_and_extract_details_for_library_list(lib_list: list) -> list(list()):
source_distribution_list = list()
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
futures = []
for library in lib_list:
version = library[1] if len(library) == 2 else None
futures.append(
executor.submit(
get_source_distribution_link_for_library,
library=library[0],
version=version,
)
)
for future in concurrent.futures.as_completed(futures):
source_distribution_list.append(future.result())
return source_distribution_list
def get_pip_list_stdout() -> bytes:
pip_freeze_process = Popen(["pip", "list"], stdout=PIPE, stderr=STDOUT)
output, error = pip_freeze_process.communicate()
if error:
print(f"Error while getting list of libraries from environment {error}")
sys.exit(1)
return output
def extract_lib_list_from_bytes_output(pip_stdout: bytes) -> list:
lib_list = list()
for output_line in pip_stdout.splitlines()[2:]:
line = output_line.decode("utf-8").split()
if len(line) == 2:
lib_list.append(line)
return lib_list
def get_source_distribution_link_for_library(library: str, version: str) -> list:
if version:
url = f"https://pypi.org/project/{library}/{version}/#files"
else:
url = f"https://pypi.org/project/{library}/#files"
page = requests.get(url)
soup = BeautifulSoup(page.text, "html.parser")
library_license = soup.find("strong", text="License:")
library_license = (
library_license.next_sibling.strip() if library_license else "Not found"
)
get_download_link_div = soup.find("div", {"class": "card file__card"})
source_download_link = (
get_download_link_div.find("a")["href"]
if get_download_link_div
else f"Can not find download link for {library}, version {version}"
)
return [
library,
version if version else "using latest version",
library_license,
source_download_link,
]
def write_library_info_to_csv(sd_list: list(list()), file_name: str):
file_name = file_name if file_name else "pypi_sd_links.csv"
with open(file_name, "w", encoding="UTF8", newline="") as f:
writer = csv.writer(f)
# write the header
writer.writerow(
["library_name", "version", "license", "source_distribution_link"]
)
# write multiple rows
writer.writerows(sd_list)
print(f"Results available in {file_name}")
| 32.10596
| 99
| 0.656147
|
0c4553cf4f13caa3177d80fc3fa1ede7061c60a9
| 1,172
|
py
|
Python
|
moredata/parser/__init__.py
|
thomassonobe/more-data
|
b3d4a8e32f385a69749c8139915e3638fcced37b
|
[
"BSD-3-Clause"
] | null | null | null |
moredata/parser/__init__.py
|
thomassonobe/more-data
|
b3d4a8e32f385a69749c8139915e3638fcced37b
|
[
"BSD-3-Clause"
] | null | null | null |
moredata/parser/__init__.py
|
thomassonobe/more-data
|
b3d4a8e32f385a69749c8139915e3638fcced37b
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import csv
from h3 import h3
from ..utils import read_json_from_file, load_json
from shapely.geometry import asPoint
from numpy import array
def _add_geo_location(doc):
doc["geo_location"] = asPoint(array([doc["longitude"], doc["latitude"]])).wkt
return doc
def _add_code_point(doc):
doc["code_h3"] = h3.geo_to_h3(doc["latitude"], doc["longitude"], 8)
return doc
def parse_document(data, **kwargs):
array_point_field = kwargs.get("array_point_field")
geo_location = kwargs.get("geo_location")
code_h3 = kwargs.get("code_h3")
for doc in read_json_from_file(data):
if geo_location:
if array_point_field != None:
doc[array_point_field] = [
_add_geo_location(points) for points in doc[array_point_field]
]
else:
doc = _add_geo_location(doc)
if code_h3:
if array_point_field != None:
doc[array_point_field] = [
_add_code_point(points) for points in doc[array_point_field]
]
else:
doc = _add_code_point(doc)
yield doc
| 28.585366
| 82
| 0.614334
|
4acc2ff137c1fe5fa06cfd0748f65746ddb51daa
| 13,176
|
py
|
Python
|
python/IndividualizedSpatialModels/ProgramFinal/ccobra_models/spatial_model_prem3_understood.py
|
ShiroDevC/portfolio
|
f31ce2bc4a5a2725d4de40a22997dca2b361d367
|
[
"MIT"
] | null | null | null |
python/IndividualizedSpatialModels/ProgramFinal/ccobra_models/spatial_model_prem3_understood.py
|
ShiroDevC/portfolio
|
f31ce2bc4a5a2725d4de40a22997dca2b361d367
|
[
"MIT"
] | null | null | null |
python/IndividualizedSpatialModels/ProgramFinal/ccobra_models/spatial_model_prem3_understood.py
|
ShiroDevC/portfolio
|
f31ce2bc4a5a2725d4de40a22997dca2b361d367
|
[
"MIT"
] | null | null | null |
'''
Module for a individualized spatial model to be used in the CCOBRA Framework.
Each individualized model uses one or more individualizations implemented in the
modified spatial model.
This Module uses the 'premise three misinterpreted' individualization.
@author: Christian Breu <breuch@web.de>
'''
from copy import deepcopy
import ccobra
from spatial_reasoner import main_module_param
class SpatialModelParam(ccobra.CCobraModel):
"""
Model to test the individualization, which is responsible for the third premise.
If the individualization is active, the third premise will be inverted. The
individualization can be activated in the adapt function. The individualization
can be activated and deactivated dynamically. The rating and prior values need
to be over a specified threshold, to activate the individualization.
"""
# list for the previously given answers by the model
previous_model_ans = []
# Variable for the parameter assignment for the spatial model
parameter_assignment = [[False, False, False, False, False, False],
[False, False, False, False, False]]
# rating value for the individualization
ind_rating = 0
# prior of the individualization
ind_rating_prior = 0
def __init__(self, name='SpatialModelPrem3Understood'):
""" Initializes the Model by calling the parent-class constructor
and passing information about the name as well as supported domains
and response-types.
Each individualized model(except the categories model) can solve problems of
the "verify" and "single-choice" type.
Parameters
----------
name : str
Name of the model. Will be used as an identifier throughout the
evaluation phase. Should be unique.
"""
super(SpatialModelParam, self).__init__(
name, ["spatial-relational"], ["verify", "single-choice"])
# initializes the spatial model and the parameter settings
def start_participant(self, **kwargs):
""" Model initialization method. Used to setup the initial state of its
datastructures, memory, etc.
Resets the individualization's rating, activation and the stored problem answers.
**Attention**: Should reset the internal state of the model.
"""
# reset the stored answers of the spatial model for each participant
self.previous_model_ans = []
# reset the parameters for the next participant
self.parameter_assignment = [[False, False, False, False, False, False],
[False, False, False, False, False]]
# reset the rating of the individualization.
self.ind_rating = 0
def predict(self, item, parameters=None, **kwargs):
""" Generates a prediction based on a given task item.
Can make a prediction with verify and single-choice tasks.
The prediction will be made with the spatial model with individualizations.
The spatial model takes a list of activation parameters. With these parameters,
the spatial model will perform the task and return an appropriate answer.
Depending on the task type, different answer types are returned(see below)
Parameters
----------
item : ccobra.data.Item
Task item container. Holds information about the task, domain,
response type and response choices.
parameters : list
The list with the activation values for all the individualizations
in the model. (e.g., [[False, False, False, False, False, False],
[False, False, False, False, False]]).
Returns
-------
list(str)
Single-choice relational response in list representation (e.g.,
['A', 'left', 'B']) or a verification relational response in Boolean
representation.
"""
if parameters is None: # no parameters were given, use the current general assignment.
parameters = self.parameter_assignment
# initialize the spatial model
spatial_model = main_module_param.MainModule()
rel_prob = deepcopy(item.task) # get the problem premises
# convert the premises to the form [A, relation, B] to be used by the spatial model
for rel_prem in rel_prob:
relation = rel_prem[0]
rel_prem[0] = rel_prem[1]
rel_prem[1] = relation
# checks for the response type to choose an appropriate function from the
# spatial model and a correct conversion of the question premise format.
if item.response_type == "single-choice":
# for single choice problems, the format of the question premises
# is different.
rel_questions = deepcopy(item.choices)
for rel_pre in rel_questions:
rel_pr = rel_pre[0] # unwrap the list of the actual premise
relation = rel_pr[0]
rel_pr[0] = rel_pr[1]
rel_pr[1] = relation
rel_pre = rel_pr
rel_prob.append(rel_pr)
# calls an apropriate function from the spatial model that will return
# the given answer with the correct format for the evaluation in the framework.
answer = spatial_model.interpret_spatial2exp_parameters(rel_prob, deepcopy(parameters))
# store the answer given by the model for later use in the adapt function.
self.previous_model_ans.append(answer)
return answer
# for all verification problems the standard function will be called from the
# spatial model. The format of the question premises is different, that's
# why the conversion is different.
rel_questions = deepcopy(item.choices[0]) # get the question premise
for rel_pre in rel_questions:
relation = rel_pre[0]
rel_pre[0] = rel_pre[1]
rel_pre[1] = relation
rel_prob.append(rel_pre)
answer = spatial_model.interpret_spatial_parameters(rel_prob, deepcopy(parameters))
# store the answer given by the model for later use in the adapt function.
self.previous_model_ans.append(answer)
return answer
def adapt(self, item, target, **kwargs):
"""This function will rate the effect of the individualizations used in the
model on the current task. The individualizations will only be rated, if the
answer given by the model and the participant is different. The model will
make the previous prediction again with the activated individualization. If
the answer is now the same as the participant answer, the rating will be increased.
If the answer is still different, the rating will be decreased.
After all individualzations are rated, they will be activaed or deactivated
depending on their rating and their prior value, which is computed in the
pretrain function. For the activation of an individualization, a threshold
has to be passed( e.g. rating + x * prior >= threshold; where x is a multiplicator,
to give the prior a certain weight)
For each individualization and each experiment, the threshold, x and the
gains of the rating(positive and negative) need to be optimized, to use
the individualization only, and only if the model benefits from it.
configurations for the experiments:
-premiseorder: (0.5 * self.ind_rating_prior) >= 1, gains +0.1/-0.1
-figural: no configuration, since the individualization has no activation at all
-verification: (10 * self.ind_rating_prior) >= 0.8, gain +0.2/-0.1
Parameters
----------
item : ccobra.data.Item
Task information container. Holds the task text, response type,
response choices, etc.
target : str
True response given by the human reasoner.
"""
# first of all, check if the answer was correct with regard to the participant answer
if target != self.previous_model_ans[-1]:
# compute the answer that the model would've given with the individualisation
ans_ind = self.predict(item, [[False, False, False, False, True, False],
[False, False, False, False, False]])
# check if the answer would be correct now:
if ans_ind == target:
# now add something to the rating of the individualisation
self.ind_rating += 0.1 # 0.1
#print("increase rating", self.ind_rating)
else:
# decrease the rating of the individualisaitons
if self.ind_rating >= 0.1:
#print("decrease rating", self.ind_rating)
self.ind_rating -= 0.1
if self.ind_rating + (0.5 * self.ind_rating_prior) >= 1:
#print("individualization activated")
self.parameter_assignment[0][4] = True
else:
#print("deactivate individualization")
self.parameter_assignment[0][4] = False
def pre_train(self, dataset):
"""
In the pre_train function, a prior value for all individualizations will
be computed. The prior value represents, how often the corresponding
individualization was able to correct the answer of the original model.
This function works similarily to the adapt function, but the rating will
be a counter for the corrected answers by the individualization.
The pre_train function uses a given set of problems and answers for
several participants. The prior will be computed using all these data,
so it this a general pre tuning for the model.
Parameters
----------
dataset : list(list(dict(str, object)))
Training data for the model. List of participants which each
contain lists of tasks represented as dictionaries with the
corresponding task information (e.g., the item container and
given response).
"""
for participant_data in dataset:
part_data = deepcopy(participant_data) # refresh working copy of problems etx.
# reset the parameter assignment for each participant
self.parameter_assignment = [[False, False, False, False, False, False],
[False, False, False, False, False]]
self.previous_model_ans = []
for problem_data in part_data:
prob_item = problem_data['item'] # the data of the problem
prob_ans = problem_data['response'] # the response from the participant
self.predict(prob_item) # predict the answer with the currect parameters
# call adapt_prior to check if the individualization
# can change the result to the better
self.adapt_prior(prob_item, prob_ans)
self.ind_rating_prior += self.ind_rating
self.ind_rating = 0 # reset the rating counter
# compute the actual prior
self.ind_rating_prior /= (len(dataset) * len(part_data))
print("rating after pretrain ", self.ind_rating_prior)
def adapt_prior(self, item, target):
"""
This adapt function only counts the times when the individualization
lead to a better result than the baseline. In the pre_train function this
function will be used. Changes the self.ind_rating, to be used in the
pre_train function.
All priors are normed like this, because they represent a percentual value.
In the adapt function of the model, this prior value will be used to decide
whether to activate an individualization or not.
In this function, no individualization will be activated or deactivated.
Parameters
----------
item : ccobra.data.Item
Task information container. Holds the task text, response type,
response choices, etc.
target : str
True response given by the human reasoner.
"""
# first of all, check if the answer was correct with regard to the participant answer
if target != self.previous_model_ans[-1]:
# compute the answer that the model would've given with the individualisation
ans_ind = self.predict(item, [[False, False, False, False, True, False],
[False, False, False, False, False]])
# check if the answer would be correct now:
if ans_ind == target:
# now add something to the rating of the individualisation
self.ind_rating += 1
| 50.872587
| 100
| 0.631755
|
3a3caffa97f0ee75275c85a6d2d15c7613e0be2e
| 11,357
|
py
|
Python
|
featuremapper/analysis/pinwheels.py
|
fcr/featuremapper
|
b999110dce9bbbdf4b6dbd2d13bfca1596064c6a
|
[
"BSD-3-Clause"
] | 2
|
2018-03-29T18:52:58.000Z
|
2019-05-07T17:36:35.000Z
|
featuremapper/analysis/pinwheels.py
|
fcr/featuremapper
|
b999110dce9bbbdf4b6dbd2d13bfca1596064c6a
|
[
"BSD-3-Clause"
] | 7
|
2016-11-15T13:02:41.000Z
|
2019-10-21T19:59:31.000Z
|
featuremapper/analysis/pinwheels.py
|
fcr/featuremapper
|
b999110dce9bbbdf4b6dbd2d13bfca1596064c6a
|
[
"BSD-3-Clause"
] | 5
|
2015-09-06T18:11:55.000Z
|
2018-12-19T10:48:52.000Z
|
import sys
import cmath
import math
import numpy as np
from matplotlib import pyplot as plt
import param
from holoviews.core.options import Store, Options
from holoviews import Points, Overlay
from holoviews.element import Contours
from holoviews.operation import Operation
__author__ = "Jean-Luc Stevens"
class WarningCounter(object):
"""
A simple class to count 'divide by zero' and 'invalid value'
exceptions to allow a suitable warning message to be generated.
"""
def __init__(self):
self.div_by_zeros = 0
self.invalid_values = 0
def __call__(self, errtype, flag):
if errtype == "divide by zero":
self.div_by_zeros += 1
elif errtype == "invalid value":
self.invalid_values += 1
def warn(self):
total_events = self.div_by_zeros + self.invalid_values
if total_events == 0: return
info = (total_events, self.div_by_zeros, self.invalid_values)
self.div_by_zeros = 0;
self.invalid_values = 0
message = ("Warning: There were %d invalid intersection events:"
"\n\tNumpy 'divide by zero' events: %d"
"\n\tNumpy 'invalid value' events: %d\n")
sys.stderr.write(message % info)
class PinwheelAnalysis(Operation):
"""
Given a Matrix or HoloMap of a cyclic feature preference, compute
the position of all pinwheel singularities in the map. Optionally
includes the contours for the real and imaginary components of the
preference map used to determine the pinwheel locations.
Returns the original Matrix input overlayed with a Points object
containing the computed pinwheel locations and (optionally)
Contours overlays including the real and imaginary contour lines
respectively.
"""
output_type = Overlay
# TODO: Optional computation of pinwheel polarities.
include_contours = param.Boolean(default=True, doc="""
Whether or not to include the computed contours for the real and
imaginary components of the map.""")
silence_warnings =param.Boolean(default=True, doc="""
Whether or not to show warnings about invalid intersection
events when locating pinwheels.""")
label = param.String(None, allow_None=True, precedence=-1, constant=True,
doc="""Label suffixes are fixed as there are too many labels to specify.""")
def _process(self, view, key=None):
cyclic_matrix = None
inputs = view.values() if isinstance(view, Overlay) else [view]
for input_element in inputs:
if input_element.vdims[0].cyclic:
cyclic_matrix = input_element
bounds = cyclic_matrix.bounds
cyclic_range = input_element.vdims[0].range
label = cyclic_matrix.label
break
else:
raise Exception("Pinwheel analysis requires a Matrix over a cyclic quantity")
if None in cyclic_range:
raise Exception("Pinwheel analysis requires a Matrix with defined cyclic range")
cyclic_data = cyclic_matrix.data / (cyclic_range[1] - cyclic_range[0])
polar_map = self.polar_preference(cyclic_data)
try:
contour_info = self.polarmap_contours(polar_map, bounds)
except Exception as e:
self.warning("Contour identification failed:\n%s" % str(e))
contour_info = None
pinwheels = None
if contour_info is not None:
(re_contours, im_contours, intersections) = contour_info
pinwheels = self.identify_pinwheels(*(re_contours, im_contours, intersections),
silence_warnings=self.p.silence_warnings)
else:
re_contours, im_contours = [], []
if not pinwheels:
pinwheels = np.zeros((0, 2))
pinwheels = Points(np.array(pinwheels), label=label, group='Pinwheels')
if self.p.include_contours:
re_lines = Contours(re_contours, label=label, group='Real')
im_lines = Contours(im_contours, label=label, group='Imaginary')
overlay = cyclic_matrix * re_lines * im_lines * pinwheels
else:
overlay = (cyclic_matrix * pinwheels)
return overlay.relabel(group='PinwheelAnalysis', label=label)
def polar_preference(self, pref):
"""
Turns hue representation to polar representation.
Hue representation uses values expected in the range 0-1.0
"""
polarfn = lambda x: cmath.rect(1.0, x * 2 * math.pi)
polar_vecfn = np.vectorize(polarfn)
return polar_vecfn(pref)
def normalize_polar_channel(self, polar_channel):
"""
This functions normalizes an OR map (polar_channel) taking into
account the region of interest (ROI). The ROI is specified by
values set to 99. Note that this functionality is implemented to
reproduce the experimental approach and has not been tested (not
required for Topographica simulations)
"""
def grad(r):
(r_x, r_y) = np.gradient(r)
(r_xx, r_xy) = np.gradient(r_x);
(r_yx, r_yy) = np.gradient(r_y);
return r_xx ** 2 + r_yy ** 2 + 2 * r_xy ** 2
# Set ROI to 0 to ignore values of -99.
roi = np.ones(polar_channel.shape)
# In Matlab: roi(find(z==-99))=0
roi[roi == -99] = 0
fst_grad = grad(roi)
snd_grad = grad(fst_grad)
# Find non-zero elements in second grad and sets to unity
snd_grad[snd_grad != 0] = 1
# These elements now mask out ROI region (set to zero)
roi[snd_grad == 1] = 0
# Find the unmasked coordinates
ind = (polar_channel != 99)
# The complex abs of unmasked
normalisation = np.mean(np.abs(polar_channel))
# Only normalize with unmasked
return polar_channel / normalisation
def polarmap_contours(self, polarmap, bounds):
"""
Identifies the real and imaginary contours in a polar map.
Returns the real and imaginary contours as 2D vertex arrays
together with the pairs of contours known to intersect. The
coordinate system used is specified by the supplied bounds.
Contour plotting requires origin='upper' for consistency with
image coordinate system.
"""
l,b,r,t = bounds.lbrt()
# Convert to polar and normalise
normalized_polar = self.normalize_polar_channel(polarmap)
figure_handle = plt.figure()
# Real component
re_contours_plot = plt.contour(normalized_polar.real, 0, origin='upper',
extent=[l,r,b,t])
re_path_collections = re_contours_plot.collections[0]
re_contour_paths = re_path_collections.get_paths()
# Imaginary component
im_contours_plot = plt.contour(normalized_polar.imag, 0, origin='upper',
extent=[l,r,b,t])
im_path_collections = im_contours_plot.collections[0]
im_contour_paths = im_path_collections.get_paths()
plt.close(figure_handle)
intersections = [(re_ind, im_ind)
for (re_ind, re_path) in enumerate(re_contour_paths)
for (im_ind, im_path) in enumerate(im_contour_paths)
if im_path.intersects_path(re_path)]
# Contour vertices 0.5 pixel inset. Eg. (0,0)-(48,48)=>(0.5, 0.5)-(47.5, 47.5)
# Returned values will not therefore reach limits of 0.0 and 1.0
re_contours = [self.remove_path_duplicates(re_path.vertices) for re_path in re_contour_paths]
im_contours = [self.remove_path_duplicates(im_path.vertices) for im_path in im_contour_paths]
return (re_contours, im_contours, intersections)
def remove_path_duplicates(self, vertices):
"Removes successive duplicates along a path of vertices."
zero_diff_bools = np.all(np.diff(vertices, axis=0) == 0, axis=1)
duplicate_indices, = np.nonzero(zero_diff_bools)
return np.delete(vertices, duplicate_indices, axis=0)
def find_intersections(self, contour1, contour2):
"""
Vectorized code to find intersections between contours. All
successive duplicate vertices along the input contours must be
removed to help avoid division-by-zero errors.
There are cases were no intersection exists (eg. parallel lines)
where division by zero and invalid value exceptions occur. These
exceptions should be caught as warnings: these edge cases are
unavoidable with this algorithm and do not indicate that the
output is erroneous.
"""
# Elementwise min selection
amin = lambda x1, x2: np.where(x1 < x2, x1, x2)
# Elementwise max selection
amax = lambda x1, x2: np.where(x1 > x2, x1, x2)
# dstacks, checks True depthwise
aall = lambda abools: np.dstack(abools).all(axis=2)
# Uses delta (using np.diff) to find successive slopes along path
slope = lambda line: (lambda d: d[:, 1] / d[:, 0])(np.diff(line, axis=0))
# Meshgrids between both paths (x and y). One element sliced off end/beginning
x11, x21 = np.meshgrid(contour1[:-1, 0], contour2[:-1, 0])
x12, x22 = np.meshgrid(contour1[1:, 0], contour2[1:, 0])
y11, y21 = np.meshgrid(contour1[:-1, 1], contour2[:-1, 1])
y12, y22 = np.meshgrid(contour1[1:, 1], contour2[1:, 1])
# Meshgrid of all slopes for both paths
m1, m2 = np.meshgrid(slope(contour1), slope(contour2))
m2inv = 1 / m2 # m1inv was not used.
yi = (m1 * (x21 - x11 - m2inv * y21) + y11) / (1 - m1 * m2inv)
xi = (yi - y21) * m2inv + x21 # (xi, yi) is intersection candidate
# Bounding box type conditions for intersection candidates
xconds = (amin(x11, x12) < xi, xi <= amax(x11, x12),
amin(x21, x22) < xi, xi <= amax(x21, x22) )
yconds = (amin(y11, y12) < yi, yi <= amax(y11, y12),
amin(y21, y22) < yi, yi <= amax(y21, y22) )
return xi[aall(xconds)], yi[aall(yconds)]
def identify_pinwheels(self, re_contours, im_contours, intersections,
silence_warnings=True):
"""
Locates the pinwheels from the intersection of the real and
imaginary contours of of polar OR map.
"""
warning_counter = WarningCounter()
pinwheels = []
np.seterrcall(warning_counter)
for (re_ind, im_ind) in intersections:
re_contour = re_contours[re_ind]
im_contour = im_contours[im_ind]
np.seterr(divide='call', invalid='call')
x, y = self.find_intersections(re_contour, im_contour)
np.seterr(divide='raise', invalid='raise')
pinwheels += zip(x, y)
if not silence_warnings:
warning_counter.warn()
return pinwheels
options = Store.options(backend='matplotlib')
options.Points.Pinwheels = Options('style', color= '#f0f0f0', marker= 'o', edgecolors= 'k')
options.Contours.Imaginary = Options('style', color= 'k', linewidth=1.5)
options.Contours.Real = Options('style', color= 'w', linewidth=1.5)
| 40.27305
| 101
| 0.630888
|
45d7dcaa1aef4f6d87741360cd8e4efcfb7bc1aa
| 4,709
|
py
|
Python
|
catboost/python-package/catboost/monoforest.py
|
jochenater/catboost
|
de2786fbc633b0d6ea6a23b3862496c6151b95c2
|
[
"Apache-2.0"
] | 6,989
|
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
catboost/python-package/catboost/monoforest.py
|
birichie/catboost
|
de75c6af12cf490700e76c22072fbdc15b35d679
|
[
"Apache-2.0"
] | 1,978
|
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
catboost/python-package/catboost/monoforest.py
|
birichie/catboost
|
de75c6af12cf490700e76c22072fbdc15b35d679
|
[
"Apache-2.0"
] | 1,228
|
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
import math
from . import _catboost
from .core import CatBoost, CatBoostError
from .utils import _import_matplotlib
FeatureExplanation = _catboost.FeatureExplanation
def _check_model(model):
if not isinstance(model, CatBoost):
raise CatBoostError("Model should be CatBoost")
def to_polynom(model):
_check_model(model)
return _catboost.to_polynom(model._object)
def to_polynom_string(model):
_check_model(model)
return _catboost.to_polynom_string(model._object)
def explain_features(model):
_check_model(model)
return _catboost.explain_features(model._object)
def calc_features_strength(model):
explanations = explain_features(model)
features_strength = [expl.calc_strength() for expl in explanations]
return features_strength
def plot_pdp(arg, size_per_plot=(5, 5), plots_per_row=None):
with _import_matplotlib() as _plt:
plt = _plt
if isinstance(arg, CatBoost):
arg = explain_features(arg)
if isinstance(arg, _catboost.FeatureExplanation):
arg = [arg]
assert len(arg) > 0
assert isinstance(arg, list)
for element in arg:
assert isinstance(element, _catboost.FeatureExplanation)
figs = []
for feature_explanation in arg:
dimension = feature_explanation.dimension()
if not plots_per_row:
plots_per_row = min(5, dimension)
rows = int(math.ceil(dimension / plots_per_row))
fig, axes = plt.subplots(rows, plots_per_row)
fig.suptitle("Feature #{}".format(feature_explanation.feature))
if rows == 1:
axes = [axes]
if plots_per_row == 1:
axes = [[row_axes] for row_axes in axes]
fig.set_size_inches(size_per_plot[0] * plots_per_row, size_per_plot[1] * rows)
for dim in range(dimension):
ax = axes[dim // plots_per_row][dim % plots_per_row]
ax.set_title("Dimension={}".format(dim))
ax.set_xlabel("feature value")
ax.set_ylabel("model value")
borders, values = feature_explanation.calc_pdp(dim)
xs = []
ys = []
if feature_explanation.type == "Float":
if len(borders) == 0:
xs.append(-0.1)
xs.append(0.1)
ys.append(feature_explanation.expected_bias[dim])
ys.append(feature_explanation.expected_bias[dim])
ax.plot(xs, ys)
else:
offset = max(0.1, (borders[0] + borders[-1]) / 2)
xs.append(borders[0] - offset)
ys.append(feature_explanation.expected_bias[dim])
for border, value in zip(borders, values):
xs.append(border)
ys.append(ys[-1])
xs.append(border)
ys.append(value)
xs.append(borders[-1] + offset)
ys.append(ys[-1])
ax.plot(xs, ys)
else:
xs = ['bias'] + list(map(str, borders))
ys = feature_explanation.expected_bias[dim] + values
ax.bar(xs, ys)
figs.append(fig)
return figs
def plot_features_strength(model, height_per_feature=0.5, width_per_plot=5, plots_per_row=None):
with _import_matplotlib() as _plt:
plt = _plt
strengths = calc_features_strength(model)
dimension = len(strengths[0])
features = len(strengths)
if not plots_per_row:
plots_per_row = min(5, dimension)
rows = int(math.ceil(dimension / plots_per_row))
fig, axes = plt.subplots(rows, plots_per_row)
if rows == 1:
axes = [axes]
if plots_per_row == 1:
axes = [[row_axes] for row_axes in axes]
fig.suptitle("Features Strength")
fig.set_size_inches(width_per_plot * plots_per_row, height_per_feature * features * rows)
for dim in range(dimension):
strengths = [(s[dim], i) for i, s in enumerate(strengths)]
# strengths = list(reversed(sorted(strengths)))
strengths = list(sorted(strengths))
labels = ["Feature #{}".format(f) for _, f in strengths]
strengths = [s for s, _ in strengths]
ax = axes[dim // plots_per_row][dim % plots_per_row]
colors = [(1, 0, 0) if s > 0 else (0, 0, 1) for s in strengths]
ax.set_title("Dimension={}".format(dim))
ax.barh(range(len(strengths)), strengths, align='center', color=colors)
ax.set_yticks(range(len(strengths)))
ax.set_yticklabels(labels)
# ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Prediction value change')
return fig
| 35.141791
| 96
| 0.606498
|
021197105c68c266681d3e275ea490207944f64d
| 3,063
|
py
|
Python
|
cvxpy/atoms/affine/sum.py
|
dougalsutherland/cvxpy
|
34349b5e41c124a6a1e32426e68af95b5044498c
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-04-15T14:01:03.000Z
|
2019-04-15T14:01:03.000Z
|
cvxpy/atoms/affine/sum.py
|
dougalsutherland/cvxpy
|
34349b5e41c124a6a1e32426e68af95b5044498c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cvxpy/atoms/affine/sum.py
|
dougalsutherland/cvxpy
|
34349b5e41c124a6a1e32426e68af95b5044498c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
"""
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.atoms.affine.affine_atom import AffAtom
from cvxpy.atoms.axis_atom import AxisAtom
import cvxpy.lin_ops.lin_utils as lu
import cvxpy.interface as intf
import numpy as np
from functools import wraps
class Sum(AxisAtom, AffAtom):
"""Sum the entries of an expression.
Parameters
----------
expr : Expression
The expression to sum the entries of.
axis : int
The axis along which to sum.
keepdims : bool
Whether to drop dimensions after summing.
"""
def __init__(self, expr, axis=None, keepdims=False):
super(Sum, self).__init__(expr, axis=axis, keepdims=keepdims)
def numeric(self, values):
"""Sums the entries of value.
"""
if intf.is_sparse(values[0]):
result = np.sum(values[0], axis=self.axis)
if not self.keepdims and self.axis is not None:
result = result.A.flatten()
else:
result = np.sum(values[0], axis=self.axis, keepdims=self.keepdims)
return result
@staticmethod
def graph_implementation(arg_objs, shape, data=None):
"""Sum the linear expression's entries.
Parameters
----------
arg_objs : list
LinExpr for each argument.
shape : tuple
The shape of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
axis = data[0]
keepdims = data[1]
if axis is None:
obj = lu.sum_entries(arg_objs[0], shape=shape)
elif axis == 1:
if keepdims:
const_shape = (arg_objs[0].shape[1], 1)
else:
const_shape = (arg_objs[0].shape[1],)
ones = lu.create_const(np.ones(const_shape), const_shape)
obj = lu.rmul_expr(arg_objs[0], ones, shape)
else: # axis == 0
if keepdims:
const_shape = (1, arg_objs[0].shape[0])
else:
const_shape = (arg_objs[0].shape[0],)
ones = lu.create_const(np.ones(const_shape), const_shape)
obj = lu.mul_expr(ones, arg_objs[0], shape)
return (obj, [])
@wraps(Sum)
def sum(expr, axis=None, keepdims=False):
"""Wrapper for Sum class.
"""
if isinstance(expr, list):
return __builtins__['sum'](expr)
else:
return Sum(expr, axis, keepdims)
| 30.63
| 78
| 0.611166
|
e08e673ef380ac0202ad464ea9f497a05b63cb31
| 4,845
|
py
|
Python
|
hw_06/linear_regression_sgd.py
|
coinflip112/ml_101
|
9e56ffdb99ac241ed396e25d7f7818a58ee5c4de
|
[
"MIT"
] | null | null | null |
hw_06/linear_regression_sgd.py
|
coinflip112/ml_101
|
9e56ffdb99ac241ed396e25d7f7818a58ee5c4de
|
[
"MIT"
] | null | null | null |
hw_06/linear_regression_sgd.py
|
coinflip112/ml_101
|
9e56ffdb99ac241ed396e25d7f7818a58ee5c4de
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import sys
import matplotlib.pyplot as plt
import numpy as np
import sklearn.datasets
import sklearn.linear_model
import sklearn.metrics
import sklearn.model_selection
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=10, type=int, help="Batch size")
parser.add_argument("--examples", default=100, type=int, help="Number of examples")
parser.add_argument(
"--iterations", default=50, type=int, help="Number of iterations over the data"
)
parser.add_argument("--folds", default=10, type=int, help="Number of folds")
parser.add_argument(
"--learning_rate", default=0.01, type=float, help="Learning rate"
)
parser.add_argument(
"--plot", default=False, action="store_true", help="Plot progress"
)
parser.add_argument("--seed", default=42, type=int, help="Random seed")
args = parser.parse_args()
# Set random seed
np.random.seed(args.seed)
# Generate an artifical regression dataset
data, target = sklearn.datasets.make_regression(
n_samples=args.examples, random_state=args.seed
)
# TODO: Append a constant feature with value 1 to the end of every input data
data = np.column_stack([data, np.ones(shape=(args.examples))])
rmses = []
# TODO: Using `split` method of `sklearn.model_selection.KFold(args.folds)`,
# generate the required number of folds. The folds are returned as
# a generator of (train_data_indices, test_data_indices) pairs.
folds = sklearn.model_selection.KFold(n_splits=args.folds)
for train_indices, test_indices in folds.split(X=data):
# TODO: Generate train_data, train_target, test_data, test_target using the fold indices
train_data, test_data = data[train_indices], data[test_indices]
train_target, test_target = target[train_indices], target[test_indices]
# Generate initial linear regression weights
weights = np.random.uniform(size=train_data.shape[1])
rmses.append([])
for iteration in range(args.iterations):
permutation = np.random.permutation(train_data.shape[0])
permuted_x_train, permuted_y_train = (
train_data[permutation],
train_target[permutation],
)
batch_count = int(train_data.shape[0] / args.batch_size)
for batch_x, batch_y in zip(
np.split(permuted_x_train, batch_count),
np.split(permuted_y_train, batch_count),
):
grads = []
for x_i, t_i in zip(batch_x, batch_y):
grad_i = (np.dot(x_i, weights) - t_i) * x_i
grads.append(grad_i)
grads = np.array(grads)
weights = weights - args.learning_rate * np.mean(grads, axis=0)
# TODO: Process the data in the order of `permutation`.
# For every `args.batch_size`, average their gradient, and update the weights.
# A gradient for example (x_i, t_i) is `(x_i^T weights - t_i) * x_i`,
# and the SGD update is `weights = weights - learning_rate * gradient`.
# You can assume that `args.batch_size` exactly divides `train_data.shape[0]`.
# We evaluate RMSE on train and test
rmses[-1].append(
{
"train": np.sqrt(
sklearn.metrics.mean_squared_error(
train_target, train_data @ weights
)
),
"test": np.sqrt(
sklearn.metrics.mean_squared_error(
test_target, test_data @ weights
)
),
}
)
lr = sklearn.linear_model.LinearRegression()
lr.fit(train_data, train_target)
y_pred = lr.predict(test_data)
explicit_rmse = np.sqrt(sklearn.metrics.mean_squared_error(y_pred, test_target))
# TODO: Compute into `explicit_rmse` test data RMSE when
# fitting `sklearn.linear_model.LinearRegression` on train_data.
print(
"Test RMSE on fold {}: SGD {:.2f}, explicit {:.2f}".format(
len(rmses), rmses[-1][-1]["test"], explicit_rmse
)
)
if args.plot:
for i in range(len(rmses)):
plt.plot(
range(args.iterations),
[x["train"] for x in rmses[i]],
label="train-{}".format(i),
)
plt.plot(
range(args.iterations),
[x["test"] for x in rmses[i]],
label="test-{}".format(i),
)
plt.legend(loc="lower right")
plt.show()
| 38.149606
| 96
| 0.586997
|
5f24abbe9e57fd1d66a74759fa00ed135875596e
| 4,685
|
py
|
Python
|
tests/nodeos_chainbase_allocation_test.py
|
Remmeauth/core
|
578996ce408ca0adbe6a6b895177199017ee907b
|
[
"MIT"
] | 140
|
2019-07-01T13:13:06.000Z
|
2021-11-23T12:50:39.000Z
|
tests/nodeos_chainbase_allocation_test.py
|
Remmeauth/core
|
578996ce408ca0adbe6a6b895177199017ee907b
|
[
"MIT"
] | 26
|
2019-07-12T09:07:36.000Z
|
2020-05-20T20:33:06.000Z
|
tests/nodeos_chainbase_allocation_test.py
|
Remmeauth/core
|
578996ce408ca0adbe6a6b895177199017ee907b
|
[
"MIT"
] | 7
|
2019-08-30T08:29:46.000Z
|
2020-07-30T15:35:10.000Z
|
#!/usr/bin/env python3
from testUtils import Utils, Account
from Cluster import Cluster
from TestHelper import TestHelper
from WalletMgr import WalletMgr
from Node import Node
import signal
import json
import time
import os
import filecmp
###############################################################
# nodeos_chainbase_allocation_test
#
# Test snapshot creation and restarting from snapshot
#
###############################################################
# Parse command line arguments
args = TestHelper.parse_args({"-v","--clean-run","--dump-error-details","--leave-running","--keep-logs"})
Utils.Debug = args.v
killAll=args.clean_run
dumpErrorDetails=args.dump_error_details
dontKill=args.leave_running
killEosInstances=not dontKill
killWallet=not dontKill
keepLogs=args.keep_logs
walletMgr=WalletMgr(True)
cluster=Cluster(walletd=True)
cluster.setWalletMgr(walletMgr)
testSuccessful = False
try:
TestHelper.printSystemInfo("BEGIN")
cluster.killall(allInstances=killAll)
cluster.cleanup()
# The following is the list of chainbase objects that need to be verified:
# - account_object (bootstrap)
# - code_object (bootstrap)
# - generated_transaction_object
# - global_property_object
# - key_value_object (bootstrap)
# - protocol_state_object (bootstrap)
# - permission_object (bootstrap)
# The bootstrap process has created account_object and code_object (by uploading the bios contract),
# key_value_object (token creation), protocol_state_object (preactivation feature), and permission_object
# (automatically taken care by the automatically generated rem account)
assert cluster.launch(
pnodes=1,
prodCount=1,
totalProducers=1,
totalNodes=2,
useBiosBootFile=False,
loadSystemContract=False,
specificExtraNodeosArgs={
1:"--read-mode irreversible --plugin eosio::producer_api_plugin"})
producerNodeId = 0
irrNodeId = 1
producerNode = cluster.getNode(producerNodeId)
irrNode = cluster.getNode(irrNodeId)
# Create delayed transaction to create "generated_transaction_object"
cmd = "create account -j rem sample EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV\
EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV --delay-sec 600 -p rem"
trans = producerNode.processCleosCmd(cmd, cmd, silentErrors=False)
assert trans
# Schedule a new producer to trigger new producer schedule for "global_property_object"
newProducerAcc = Account("newprod")
newProducerAcc.ownerPublicKey = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV"
newProducerAcc.activePublicKey = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV"
producerNode.createAccount(newProducerAcc, cluster.eosioAccount)
setProdsStr = '{"schedule": ['
setProdsStr += '{"producer_name":' + newProducerAcc.name + ',"block_signing_key":' + newProducerAcc.activePublicKey + '}'
setProdsStr += ']}'
cmd="push action -j rem setprods '{}' -p rem".format(setProdsStr)
trans = producerNode.processCleosCmd(cmd, cmd, silentErrors=False)
assert trans
setProdsBlockNum = int(trans["processed"]["block_num"])
# Wait until the block where set prods is executed become irreversible so the producer schedule
def isSetProdsBlockNumIrr():
return producerNode.getIrreversibleBlockNum() >= setProdsBlockNum
Utils.waitForBool(isSetProdsBlockNumIrr, timeout=30, sleepTime=0.1)
# Once it is irreversible, immediately pause the producer so the promoted producer schedule is not cleared
producerNode.processCurlCmd("producer", "pause", "")
producerNode.kill(signal.SIGTERM)
# Create the snapshot and rename it to avoid name conflict later on
res = irrNode.createSnapshot()
beforeShutdownSnapshotPath = res["snapshot_name"]
snapshotPathWithoutExt, snapshotExt = os.path.splitext(beforeShutdownSnapshotPath)
os.rename(beforeShutdownSnapshotPath, snapshotPathWithoutExt + "_before_shutdown" + snapshotExt)
# Restart irr node and ensure the snapshot is still identical
irrNode.kill(signal.SIGTERM)
isRelaunchSuccess = irrNode.relaunch(irrNodeId, "", timeout=5, cachePopen=True)
assert isRelaunchSuccess, "Fail to relaunch"
res = irrNode.createSnapshot()
afterShutdownSnapshotPath = res["snapshot_name"]
assert filecmp.cmp(beforeShutdownSnapshotPath, afterShutdownSnapshotPath), "snapshot is not identical"
testSuccessful = True
finally:
TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails)
exitCode = 0 if testSuccessful else 1
exit(exitCode)
| 40.042735
| 126
| 0.740021
|
799ade49c144ac5aa0523477678e891766a1a380
| 4,496
|
py
|
Python
|
azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/operations/virtual_machine_sizes_operations.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/operations/virtual_machine_sizes_operations.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/operations/virtual_machine_sizes_operations.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class VirtualMachineSizesOperations(object):
"""VirtualMachineSizesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2018-06-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-06-01"
self.config = config
def list(
self, location, custom_headers=None, raw=False, **operation_config):
"""This API is deprecated. Use [Resources
Skus](https://docs.microsoft.com/en-us/rest/api/compute/resourceskus/list).
:param location: The location upon which virtual-machine-sizes is
queried.
:type location: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualMachineSize
:rtype:
~azure.mgmt.compute.v2018_06_01.models.VirtualMachineSizePaged[~azure.mgmt.compute.v2018_06_01.models.VirtualMachineSize]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualMachineSizePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualMachineSizePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/vmSizes'}
| 41.62963
| 144
| 0.636121
|
fadf4bda0342b2659bf95ce65ff827da298ea257
| 808
|
py
|
Python
|
dataloader/__init__.py
|
entn-at/AGAIN-VC
|
dbf94bf55882f897c312c7760cd892c51c93c9ab
|
[
"MIT"
] | 78
|
2020-10-24T02:55:59.000Z
|
2022-03-08T03:09:13.000Z
|
dataloader/__init__.py
|
entn-at/AGAIN-VC
|
dbf94bf55882f897c312c7760cd892c51c93c9ab
|
[
"MIT"
] | 15
|
2020-11-03T18:34:15.000Z
|
2022-03-26T19:47:59.000Z
|
dataloader/__init__.py
|
entn-at/AGAIN-VC
|
dbf94bf55882f897c312c7760cd892c51c93c9ab
|
[
"MIT"
] | 16
|
2020-11-09T21:17:53.000Z
|
2022-03-17T04:07:26.000Z
|
from torch.utils.data import DataLoader
import logging
import importlib
logger = logging.getLogger(__name__)
def get_dataset(dset, dataset_config, njobs, metadata=None):
# dataset = 'Dataset'
# l = locals()
# exec(f'from .{name} import Dataset', globals(), l)
# exec(f'ret = {dataset}(config, args, dset, metadata)', globals(), l)
# ret = l['ret']
# return ret
Dataset = importlib.import_module(f'.{dataset_config.dataset_name}', package=__package__).Dataset
return Dataset(dset,
dataset_config.indexes_path,
dataset_config.feat,
dataset_config.feat_path,
dataset_config.seglen,
njobs,
metadata)
def get_dataloader(dset, dataloader_config, dataset):
ret = DataLoader(dataset, **dataloader_config[dset])
return ret
| 29.925926
| 101
| 0.680693
|
1ec098314b7460cacb7dbe6d4867e77378ab4a04
| 363
|
py
|
Python
|
exercicios/Mundo1/Pintando_paredes.py
|
soualeques/Curso-em-Video-Python
|
ea651c687ce7daf810fc235ab2fcd0fb6c218a2c
|
[
"MIT"
] | null | null | null |
exercicios/Mundo1/Pintando_paredes.py
|
soualeques/Curso-em-Video-Python
|
ea651c687ce7daf810fc235ab2fcd0fb6c218a2c
|
[
"MIT"
] | null | null | null |
exercicios/Mundo1/Pintando_paredes.py
|
soualeques/Curso-em-Video-Python
|
ea651c687ce7daf810fc235ab2fcd0fb6c218a2c
|
[
"MIT"
] | null | null | null |
#Faça um programa que leia a altura e largura de uma parede e pinte ela, sabendo que cada litro pinta 2M²#
alt = float(input("Digite a altura em metros da parede a ser pintada: "))
larg = float(input("Digite a largura em metros da parede a ser pintada: "))
área = larg * alt
tinta = área / 2
print("sera necesario {}L de tinta para pintar a parede".format(tinta))
| 60.5
| 106
| 0.732782
|
45d3bcec38b72a6d7740164f34515579516273b1
| 328
|
py
|
Python
|
day09/tests.py
|
Moremar/advent_of_code_2018
|
db57f2c6858dc4a1b70888ddb45c9bdbeccab9bb
|
[
"MIT"
] | null | null | null |
day09/tests.py
|
Moremar/advent_of_code_2018
|
db57f2c6858dc4a1b70888ddb45c9bdbeccab9bb
|
[
"MIT"
] | null | null | null |
day09/tests.py
|
Moremar/advent_of_code_2018
|
db57f2c6858dc4a1b70888ddb45c9bdbeccab9bb
|
[
"MIT"
] | null | null | null |
import unittest
import script1
import script2
test_file = "sample.txt"
class Tests(unittest.TestCase):
def test_part_1(self):
self.assertEqual(script1.compute(test_file), 32)
def test_part_2(self):
self.assertEqual(script2.compute(test_file), 22563)
if __name__ == '__main__':
unittest.main()
| 17.263158
| 59
| 0.710366
|
a06e480c5d9a5547bd7119728195abfeec3dd851
| 7,374
|
py
|
Python
|
draft/truefx/truefx_api.py
|
movermeyer/pandas_datareaders_unofficial
|
458dcf473d070cd7686d53d4a9b479cbe0ab9218
|
[
"BSD-3-Clause"
] | 18
|
2015-02-05T01:42:51.000Z
|
2020-12-27T19:24:25.000Z
|
draft/truefx/truefx_api.py
|
movermeyer/pandas_datareaders_unofficial
|
458dcf473d070cd7686d53d4a9b479cbe0ab9218
|
[
"BSD-3-Clause"
] | 1
|
2015-01-12T11:08:02.000Z
|
2015-01-13T09:14:47.000Z
|
draft/truefx/truefx_api.py
|
femtotrader/pandas_datareaders
|
458dcf473d070cd7686d53d4a9b479cbe0ab9218
|
[
"BSD-3-Clause"
] | 13
|
2015-09-10T19:39:51.000Z
|
2022-01-06T17:08:35.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
API to get FX prices from TrueFX
http://www.truefx.com/
http://www.truefx.com/?page=download&description=api
http://www.truefx.com/dev/data/TrueFX_MarketDataWebAPI_DeveloperGuide.pdf
"""
import logging
logger = logging.getLogger(__name__)
import click
import os
import requests
import requests_cache
import datetime
import pandas as pd
#pd.set_option('max_rows', 10)
pd.set_option('expand_frame_repr', False)
pd.set_option('max_columns', 8)
from datetime import timedelta
from pandas.io.common import urlencode
import pandas.compat as compat
SYMBOLS_NOT_AUTH = ['EUR/USD', 'USD/JPY', 'GBP/USD', 'EUR/GBP', 'USD/CHF', \
'EUR/JPY', 'EUR/CHF', 'USD/CAD', 'AUD/USD', 'GBP/JPY']
SYMBOLS_ALL = ['EUR/USD', 'USD/JPY', 'GBP/USD', 'EUR/GBP', 'USD/CHF', 'AUD/NZD', \
'CAD/CHF', 'CHF/JPY', 'EUR/AUD', 'EUR/CAD', 'EUR/JPY', 'EUR/CHF', 'USD/CAD', \
'AUD/USD', 'GBP/JPY', 'AUD/CAD', 'AUD/CHF', 'AUD/JPY', 'EUR/NOK', 'EUR/NZD', \
'GBP/CAD', 'GBP/CHF', 'NZD/JPY', 'NZD/USD', 'USD/NOK', 'USD/SEK']
def _send_request(session, params):
base_url = "http://webrates.truefx.com/rates"
endpoint = "/connect.html"
url = base_url + endpoint
s_url = url+'?'+urlencode(params)
logging.debug("Request to '%s' with '%s' using '%s'" % (url, params, s_url))
response = session.get(url, params=params)
return(response)
def _connect(session, username, password, lst_symbols, qualifier, \
api_format, snapshot):
s = 'y' if snapshot else 'n'
params = {
'u': username,
'p': password,
'q': qualifier,
'c': ','.join(lst_symbols),
'f': api_format,
's': s
}
response = _send_request(session, params)
if response.status_code != 200:
raise(Exception("Can't connect"))
session_data = response.text
session_data = session_data.strip()
return(session_data)
def _disconnect(session, session_data):
params = {
'di': session_data,
}
response = _send_request(session, params)
return(response)
def _query_auth_send(session, session_data):
params = {
'id': session_data,
}
response = _send_request(session, params)
return(response)
def _parse_data(data):
data_io = compat.StringIO(data)
df = pd.read_csv(data_io, header=None, \
names=['Symbol', 'Date', 'Bid', 'Bid_point', \
'Ask', 'Ask_point', 'High', 'Low', 'Open'])
df['Date'] = pd.to_datetime(df['Date'], unit='ms')
df = df.set_index('Symbol')
return(df)
def _query_not_auth(session, lst_symbols, api_format, snapshot):
s = 'y' if snapshot else 'n'
params = {
'c': ','.join(lst_symbols),
'f': api_format,
's': s
}
response = _send_request(session, params)
if response.status_code != 200:
raise(Exception("Can't connect"))
return(response)
def _is_registered(username, password):
return(not (username=='' and password==''))
def _init_session(session=None):
if session is None:
return(requests.Session())
else:
return(session)
def _query(symbols='', qualifier='default', api_format='csv', snapshot=True, \
username='', password='', force_unregistered=False, flag_parse_data=True, session=None):
(username, password) = _init_credentials(username, password)
session = _init_session(session)
is_registered = _is_registered(username, password)
if isinstance(symbols, compat.string_types):
symbols = symbols.upper()
symbols = symbols.split(',')
else:
symbols = list(map(lambda s: s.upper(), symbols))
if symbols == ['']:
if not is_registered:
symbols = SYMBOLS_NOT_AUTH
else:
symbols = SYMBOLS_ALL
if not is_registered or force_unregistered:
response = _query_not_auth(session, symbols, api_format, snapshot)
data = response.text
else:
session_data = _connect(session, username, password, symbols, qualifier, \
api_format, snapshot)
error_msg = 'not authorized'
if error_msg in session_data:
raise(Exception(error_msg))
response = _query_auth_send(session, session_data)
data = response.text
response = _disconnect(session, session_data)
if flag_parse_data:
df = _parse_data(data)
return(df)
else:
return(data)
def read(symbols, username, password, force_unregistered, session):
qualifier = 'default'
api_format = 'csv'
snapshot = True
flag_parse_data = True
data = _query(symbols, qualifier, api_format, snapshot, username, password, \
force_unregistered, flag_parse_data, session)
return data
def _init_credentials(username='', password=''):
if username=='':
username = os.getenv('TRUEFX_USERNAME')
if password=='':
password = os.getenv('TRUEFX_PASSWORD')
return(username, password)
def _get_session(expire_after, cache_name='cache'):
"""
Returns a `requests.Session` or a `requests_cache.CachedSession`
Parameters
----------
expire_after : `str`
cache expiration delay
'-1' : no cache
'0' : no expiration
'00:15:00.0' : expiration delay
cache_filename : `str`
Name of cache file
"""
if expire_after=='-1':
expire_after = None
logger.debug("expire_after==0 no cache")
session = requests.Session()
else:
if expire_after=='0':
expire_after = 0
logger.debug("Installing cache '%s.sqlite' without expiration" % cache_name)
else:
expire_after = pd.to_timedelta(expire_after, unit='s')
logger.debug("Installing cache '%s.sqlite' with expire_after=%s (d days hh:mm:ss)" % (cache_name, expire_after))
session = requests_cache.CachedSession(\
cache_name=cache_name, expire_after=expire_after)
return session
@click.command()
@click.option('--symbols', default='', help="Symbols list (separated with ','")
@click.option('--username', default='', help="TrueFX username")
@click.option('--password', default='', help="TrueFX password")
@click.option('--force-unregistered/--no-force-unregistered', default=False, \
help=u"Force unregistered")
@click.option('--expire_after', default='00:15:00.0', \
help=u"Cache expiration (-1: no cache, 0: no expiration, 00:15:00.0: expiration delay)")
def main(symbols, username, password, force_unregistered, expire_after):
logging.basicConfig(level=logging.DEBUG)
print("""TrueFX - Python API call
========================
""")
session = _get_session(expire_after)
(username, password) = _init_credentials(username, password)
is_registered = _is_registered(username, password)
if not is_registered or force_unregistered:
print("""You should register to TrueFX at
http://www.truefx.com/
and pass username and password using CLI flag
--username your_username
--password your_password
or setting environment variables using:
export TRUEFX_USERNAME="your_username"
export TRUEFX_PASSWORD="your_password"
""")
data = read(symbols, username=username, password=password, force_unregistered=force_unregistered, session=session)
print(data)
if __name__ == "__main__":
main()
| 29.733871
| 124
| 0.644833
|
4b73d21d5a2080bf486cac24935cf83477810267
| 5,009
|
py
|
Python
|
adaptor/tests/test_adaptor.py
|
LeoComandini/adaptor-py
|
b9157b18718fed11115d6f3f59b2067411193306
|
[
"MIT"
] | 1
|
2021-08-30T11:18:02.000Z
|
2021-08-30T11:18:02.000Z
|
adaptor/tests/test_adaptor.py
|
LeoComandini/adaptor-py
|
b9157b18718fed11115d6f3f59b2067411193306
|
[
"MIT"
] | null | null | null |
adaptor/tests/test_adaptor.py
|
LeoComandini/adaptor-py
|
b9157b18718fed11115d6f3f59b2067411193306
|
[
"MIT"
] | null | null | null |
import os
import unittest
from adaptor.adaptor import *
from adaptor.ecdsa import *
from adaptor.schnorr import *
def rand32():
return int_from_bytes(os.urandom(32))
class TestsAdaptor(unittest.TestCase):
def test_adaptor_ecdsa(self):
x = 10
y = 14
message_hash = b'\xaa'*32
Y = y * G
X = x * G
a = ecdsa_adaptor_encrypt(x, Y, message_hash)
self.assertTrue(ecdsa_adaptor_verify(X, Y, message_hash, a))
sig = ecdsa_adaptor_decrypt(a, y)
y_recovered = ecdsa_adaptor_recover(Y, a, sig)
self.assertEqual(y, y_recovered)
def test_adaptor_schnorr(self):
x = 10
y = 14
message_hash = b'\xaa'*32
Y = y * G
X = x * G
a = schnorr_adaptor_encrypt(x, y, message_hash)
self.assertTrue(schnorr_adaptor_verify(X, Y, message_hash, a))
sig = schnorr_adaptor_decrypt(a, y)
y_recovered = schnorr_adaptor_recover(a, sig)
self.assertEqual(y, y_recovered)
def test_dlc_bet_ecdsa(self):
# Source: https://github.com/discreetlogcontracts/dlcspecs/blob/master/Introduction.md
# Alice and Bob want to bet on an outcome of some event.
# For the sake of simplicity, the event has a binary outcome, either
# Alice or Bob wins.
# Alice and Bob have access to an Oracle.
# The Oracle will publish its pubkey P and the nonce R that it will use
# for the event.
# Alice's keys
a = rand32()
A = a * G
# Bob's keys
b = rand32()
B = b * G
# Oracle's keys
p = rand32()
P = p * G
# Oracle's event nonces
k = rand32()
R = k * G
# Alice and Bob create a "Funding Transaction" sending funds to a 2of2
# output multi(2, A, B) without signing it.
# We need to know the txid of the tx even if it's not signed, thus all
# tx inputs must be segwit.
# Each gambler constructs a tx spending such output and sending the
# funds to his/her desired destination, such tx are called Contract
# Execution Transactions (CET).
# CETs require some signature hashes to be signed, CET sending funds to
# Alice requires m_ta, CET sending funds to Bob requires m_tb.
# They associate m_ea and m_eb to the outcomes of the event.
# If Alice wins, Oracle will schnorr sign m_ea.
# If Bob wins, Oracle will schnorr sign m_eb.
# Why schnorr? We need linearity.
m_ta = b'\x1a' * 32 # signature hash to be signed if Alice wins
m_tb = b'\x1b' * 32 # signature hash to be signed if Bob wins
m_ea = b'\xea' * 32 # message that Oracle will sign if Alice wins
m_eb = b'\xeb' * 32 # message that Oracle will sign if Bob wins
# Alice and Bob compute the signature points for the two messages.
S_a = R + schnorr_challenge(P, R, m_ea) * P
S_b = R + schnorr_challenge(P, R, m_eb) * P
# What is the signature point?
# s, R = schnorr_sign(p, m)
# S = s * G # signature point
# Note that the signature points can be computed without secrets.
# If Alice wins, Oracle will sign m_ea, and thus reveal the discrete
# logarithm of S_a.
# Alice produces an adaptor signature for message m_tb, encrypted with
# signature point S_b.
adaptor_sig_a = ecdsa_adaptor_encrypt(a, S_b, m_tb)
# Bob produces an adaptor signature for message m_ta, encrypted with
# signature point S_a.
adaptor_sig_b = ecdsa_adaptor_encrypt(b, S_a, m_ta)
# They both exchange the adaptor signatures.
# Alice verifies Bob's adaptor signature:
self.assertTrue(ecdsa_adaptor_verify(B, S_a, m_ta, adaptor_sig_b))
# Bob verifies Alice's adaptor signature:
self.assertTrue(ecdsa_adaptor_verify(A, S_b, m_tb, adaptor_sig_a))
# After verification succeeds, each party signs the Funding
# Transaction, which can then be broadcast.
# Alice and Bob wait for the event to happen.
# If the Oracle becomes unavailable, Alice and Bob can cooperate to
# spend, ignoring the event result.
# Now suppose WLOG that Alice wins.
# Oracle signs m_ea, using nonce (k, R) and publishes the signature.
sig = schnorr_sign(p, m_ea, k=k)
# Alice sees the signature and extracts the decryption key s_a for the
# adaptor signature produced by Bob.
s_a, R_recovered = sig
self.assertEqual(R_recovered, R)
# Alice decrypts Bob's adaptor signature and extract a valid signature
# from Bob for message m_ta.
sig_b = ecdsa_adaptor_decrypt(adaptor_sig_b, s_a)
self.assertTrue(ecdsa_verify(B, m_ta, sig_b))
# Finally Alice sings message m_ta and is now able to spend the 2of2
# output.
sig_a = ecdsa_sign(a, m_ta)
self.assertTrue(ecdsa_verify(A, m_ta, sig_a))
| 41.741667
| 94
| 0.632262
|
67d68ee1688ac8435826dd8515a39dd2adb213bb
| 2,089
|
py
|
Python
|
certbot-dns-dnsimple/setup.py
|
mausinixterra/letsencrypt
|
2d4f36cc9fedafa91b9877ec2ec1f9f83da4ecd3
|
[
"Apache-2.0"
] | 2
|
2018-10-03T15:33:57.000Z
|
2020-06-09T20:28:29.000Z
|
certbot-dns-dnsimple/setup.py
|
mausinixterra/letsencrypt
|
2d4f36cc9fedafa91b9877ec2ec1f9f83da4ecd3
|
[
"Apache-2.0"
] | null | null | null |
certbot-dns-dnsimple/setup.py
|
mausinixterra/letsencrypt
|
2d4f36cc9fedafa91b9877ec2ec1f9f83da4ecd3
|
[
"Apache-2.0"
] | 3
|
2018-02-09T21:05:38.000Z
|
2019-06-10T00:39:00.000Z
|
import sys
from setuptools import setup
from setuptools import find_packages
version = '0.20.0.dev0'
# Please update tox.ini when modifying dependency version requirements
install_requires = [
'acme=={0}'.format(version),
'certbot=={0}'.format(version),
'dns-lexicon',
'mock',
# For pkg_resources. >=1.0 so pip resolves it to a version cryptography
# will tolerate; see #2599:
'setuptools>=1.0',
'zope.interface',
]
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
]
setup(
name='certbot-dns-dnsimple',
version=version,
description="DNSimple DNS Authenticator plugin for Certbot",
url='https://github.com/certbot/certbot',
author="Certbot Project",
author_email='client-dev@letsencrypt.org',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Plugins',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'docs': docs_extras,
},
entry_points={
'certbot.plugins': [
'dns-dnsimple = certbot_dns_dnsimple.dns_dnsimple:Authenticator',
],
},
test_suite='certbot_dns_dnsimple',
)
| 30.275362
| 78
| 0.624222
|
46bbbcc46a2cbb459fc8e6e53592378049997cf8
| 24,092
|
py
|
Python
|
leanerp/helpdesk/tests/test_get_email.py
|
seLain/Leaf
|
f02e15576071429a29f76a06328d024b58a2d69e
|
[
"Apache-2.0"
] | null | null | null |
leanerp/helpdesk/tests/test_get_email.py
|
seLain/Leaf
|
f02e15576071429a29f76a06328d024b58a2d69e
|
[
"Apache-2.0"
] | 6
|
2018-02-20T13:59:07.000Z
|
2018-03-06T17:35:41.000Z
|
leanerp/helpdesk/tests/test_get_email.py
|
seLain/Leaf
|
f02e15576071429a29f76a06328d024b58a2d69e
|
[
"Apache-2.0"
] | 1
|
2018-03-06T17:28:07.000Z
|
2018-03-06T17:28:07.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from helpdesk.models import Queue, Ticket, TicketCC, FollowUp, Attachment
from django.test import TestCase
from django.core.management import call_command
from django.utils import six
from django.shortcuts import get_object_or_404
from django.contrib.auth.models import User
from django.contrib.auth.hashers import make_password
import itertools
from shutil import rmtree
import sys
from tempfile import mkdtemp
try: # python 3
from urllib.parse import urlparse
except ImportError: # python 2
from urlparse import urlparse
try:
# Python >= 3.3
from unittest import mock
except ImportError:
# Python < 3.3
import mock
# class A addresses can't have first octet of 0
unrouted_socks_server = "0.0.0.1"
unrouted_email_server = "0.0.0.1"
# the last user port, reserved by IANA
unused_port = "49151"
class GetEmailCommonTests(TestCase):
# tests correct syntax for command line option
def test_get_email_quiet_option(self):
"""Test quiet option is properly propagated"""
with mock.patch('helpdesk.management.commands.get_email.process_email') as mocked_processemail:
call_command('get_email', quiet=True)
mocked_processemail.assert_called_with(quiet=True)
call_command('get_email')
mocked_processemail.assert_called_with(quiet=False)
class GetEmailParametricTemplate(object):
"""TestCase that checks basic email functionality across methods and socks configs."""
def setUp(self):
self.temp_logdir = mkdtemp()
kwargs = {
"title": 'Basic Queue',
"slug": 'QQ',
"allow_public_submission": True,
"allow_email_submission": True,
"email_box_type": self.method,
"logging_dir": self.temp_logdir,
"logging_type": 'none'
}
if self.method == 'local':
kwargs["email_box_local_dir"] = '/var/lib/mail/helpdesk/'
else:
kwargs["email_box_host"] = unrouted_email_server
kwargs["email_box_port"] = unused_port
if self.socks:
kwargs["socks_proxy_type"] = self.socks
kwargs["socks_proxy_host"] = unrouted_socks_server
kwargs["socks_proxy_port"] = unused_port
self.queue_public = Queue.objects.create(**kwargs)
def tearDown(self):
rmtree(self.temp_logdir)
def test_read_plain_email(self):
"""Tests reading plain text emails from a queue and creating tickets.
For each email source supported, we mock the backend to provide
authentically formatted responses containing our test data."""
# example email text from Django docs: https://docs.djangoproject.com/en/1.10/ref/unicode/
test_email_from = "Arnbjörg Ráðormsdóttir <arnbjorg@example.com>"
test_email_subject = "My visit to Sør-Trøndelag"
test_email_body = "Unicode helpdesk comment with an s-hat (ŝ) via email."
test_email = "To: helpdesk@example.com\nFrom: " + test_email_from + "\nSubject: " + test_email_subject + "\n\n" + test_email_body
test_mail_len = len(test_email)
if self.socks:
from socks import ProxyConnectionError
with self.assertRaisesRegexp(ProxyConnectionError, '%s:%s' % (unrouted_socks_server, unused_port)):
call_command('get_email')
else:
# Test local email reading
if self.method == 'local':
with mock.patch('helpdesk.management.commands.get_email.listdir') as mocked_listdir, \
mock.patch('helpdesk.management.commands.get_email.isfile') as mocked_isfile, \
mock.patch('builtins.open' if six.PY3 else '__builtin__.open', mock.mock_open(read_data=test_email)):
mocked_isfile.return_value = True
mocked_listdir.return_value = ['filename1', 'filename2']
call_command('get_email')
mocked_listdir.assert_called_with('/var/lib/mail/helpdesk/')
mocked_isfile.assert_any_call('/var/lib/mail/helpdesk/filename1')
mocked_isfile.assert_any_call('/var/lib/mail/helpdesk/filename2')
elif self.method == 'pop3':
# mock poplib.POP3's list and retr methods to provide responses as per RFC 1939
pop3_emails = {
'1': ("+OK", test_email.split('\n')),
'2': ("+OK", test_email.split('\n')),
}
pop3_mail_list = ("+OK 2 messages", ("1 %d" % test_mail_len, "2 %d" % test_mail_len))
mocked_poplib_server = mock.Mock()
mocked_poplib_server.list = mock.Mock(return_value=pop3_mail_list)
mocked_poplib_server.retr = mock.Mock(side_effect=lambda x: pop3_emails[x])
with mock.patch('helpdesk.management.commands.get_email.poplib', autospec=True) as mocked_poplib:
mocked_poplib.POP3 = mock.Mock(return_value=mocked_poplib_server)
call_command('get_email')
elif self.method == 'imap':
# mock imaplib.IMAP4's search and fetch methods with responses from RFC 3501
imap_emails = {
"1": ("OK", (("1", test_email),)),
"2": ("OK", (("2", test_email),)),
}
imap_mail_list = ("OK", ("1 2",))
mocked_imaplib_server = mock.Mock()
mocked_imaplib_server.search = mock.Mock(return_value=imap_mail_list)
# we ignore the second arg as the data item/mime-part is constant (RFC822)
mocked_imaplib_server.fetch = mock.Mock(side_effect=lambda x, _: imap_emails[x])
with mock.patch('helpdesk.management.commands.get_email.imaplib', autospec=True) as mocked_imaplib:
mocked_imaplib.IMAP4 = mock.Mock(return_value=mocked_imaplib_server)
call_command('get_email')
ticket1 = get_object_or_404(Ticket, pk=1)
self.assertEqual(ticket1.ticket_for_url, "QQ-%s" % ticket1.id)
self.assertEqual(ticket1.title, test_email_subject)
self.assertEqual(ticket1.description, test_email_body)
ticket2 = get_object_or_404(Ticket, pk=2)
self.assertEqual(ticket2.ticket_for_url, "QQ-%s" % ticket2.id)
self.assertEqual(ticket2.title, test_email_subject)
self.assertEqual(ticket2.description, test_email_body)
def test_read_email_with_template_tag(self):
"""Tests reading plain text emails from a queue and creating tickets,
except this time the email body contains a Django template tag.
For each email source supported, we mock the backend to provide
authentically formatted responses containing our test data."""
# example email text from Django docs: https://docs.djangoproject.com/en/1.10/ref/unicode/
test_email_from = "Arnbjörg Ráðormsdóttir <arnbjorg@example.com>"
test_email_subject = "My visit to Sør-Trøndelag"
test_email_body = "Reporting some issue with the template tag: {% if helpdesk %}."
test_email = "To: helpdesk@example.com\nFrom: " + test_email_from + "\nSubject: " + test_email_subject + "\n\n" + test_email_body
test_mail_len = len(test_email)
if self.socks:
from socks import ProxyConnectionError
with self.assertRaisesRegexp(ProxyConnectionError, '%s:%s' % (unrouted_socks_server, unused_port)):
call_command('get_email')
else:
# Test local email reading
if self.method == 'local':
with mock.patch('helpdesk.management.commands.get_email.listdir') as mocked_listdir, \
mock.patch('helpdesk.management.commands.get_email.isfile') as mocked_isfile, \
mock.patch('builtins.open' if six.PY3 else '__builtin__.open', mock.mock_open(read_data=test_email)):
mocked_isfile.return_value = True
mocked_listdir.return_value = ['filename1', 'filename2']
call_command('get_email')
mocked_listdir.assert_called_with('/var/lib/mail/helpdesk/')
mocked_isfile.assert_any_call('/var/lib/mail/helpdesk/filename1')
mocked_isfile.assert_any_call('/var/lib/mail/helpdesk/filename2')
elif self.method == 'pop3':
# mock poplib.POP3's list and retr methods to provide responses as per RFC 1939
pop3_emails = {
'1': ("+OK", test_email.split('\n')),
'2': ("+OK", test_email.split('\n')),
}
pop3_mail_list = ("+OK 2 messages", ("1 %d" % test_mail_len, "2 %d" % test_mail_len))
mocked_poplib_server = mock.Mock()
mocked_poplib_server.list = mock.Mock(return_value=pop3_mail_list)
mocked_poplib_server.retr = mock.Mock(side_effect=lambda x: pop3_emails[x])
with mock.patch('helpdesk.management.commands.get_email.poplib', autospec=True) as mocked_poplib:
mocked_poplib.POP3 = mock.Mock(return_value=mocked_poplib_server)
call_command('get_email')
elif self.method == 'imap':
# mock imaplib.IMAP4's search and fetch methods with responses from RFC 3501
imap_emails = {
"1": ("OK", (("1", test_email),)),
"2": ("OK", (("2", test_email),)),
}
imap_mail_list = ("OK", ("1 2",))
mocked_imaplib_server = mock.Mock()
mocked_imaplib_server.search = mock.Mock(return_value=imap_mail_list)
# we ignore the second arg as the data item/mime-part is constant (RFC822)
mocked_imaplib_server.fetch = mock.Mock(side_effect=lambda x, _: imap_emails[x])
with mock.patch('helpdesk.management.commands.get_email.imaplib', autospec=True) as mocked_imaplib:
mocked_imaplib.IMAP4 = mock.Mock(return_value=mocked_imaplib_server)
call_command('get_email')
ticket1 = get_object_or_404(Ticket, pk=1)
self.assertEqual(ticket1.ticket_for_url, "QQ-%s" % ticket1.id)
self.assertEqual(ticket1.title, test_email_subject)
self.assertEqual(ticket1.description, test_email_body)
ticket2 = get_object_or_404(Ticket, pk=2)
self.assertEqual(ticket2.ticket_for_url, "QQ-%s" % ticket2.id)
self.assertEqual(ticket2.title, test_email_subject)
self.assertEqual(ticket2.description, test_email_body)
def test_read_html_multipart_email(self):
"""Tests reading multipart MIME (HTML body and plain text alternative)
emails from a queue and creating tickets.
For each email source supported, we mock the backend to provide
authentically formatted responses containing our test data."""
# example email text from Python docs: https://docs.python.org/3/library/email-examples.html
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
me = "my@example.com"
you = "your@example.com"
# NOTE: CC'd emails need to be alphabetical and tested as such!
# implementation uses sets, so only way to ensure tickets created
# in right order is to change set to list and sort it
cc_one = "nobody@example.com"
cc_two = "other@example.com"
cc = cc_one + ", " + cc_two
subject = "Link"
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = me
msg['To'] = you
msg['Cc'] = cc
# Create the body of the message (a plain-text and an HTML version).
text = "Hi!\nHow are you?\nHere is the link you wanted:\nhttps://www.python.org"
html = """\
<html>
<head></head>
<body>
<p>Hi!<br>
How are you?<br>
Here is the <a href="https://www.python.org">link</a> you wanted.
</p>
</body>
</html>
"""
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(part1)
msg.attach(part2)
test_mail_len = len(msg)
if self.socks:
from socks import ProxyConnectionError
with self.assertRaisesRegexp(ProxyConnectionError, '%s:%s' % (unrouted_socks_server, unused_port)):
call_command('get_email')
else:
# Test local email reading
if self.method == 'local':
with mock.patch('helpdesk.management.commands.get_email.listdir') as mocked_listdir, \
mock.patch('helpdesk.management.commands.get_email.isfile') as mocked_isfile, \
mock.patch('builtins.open' if six.PY3 else '__builtin__.open', mock.mock_open(read_data=msg.as_string())):
mocked_isfile.return_value = True
mocked_listdir.return_value = ['filename1', 'filename2']
call_command('get_email')
mocked_listdir.assert_called_with('/var/lib/mail/helpdesk/')
mocked_isfile.assert_any_call('/var/lib/mail/helpdesk/filename1')
mocked_isfile.assert_any_call('/var/lib/mail/helpdesk/filename2')
elif self.method == 'pop3':
# mock poplib.POP3's list and retr methods to provide responses as per RFC 1939
pop3_emails = {
'1': ("+OK", msg.as_string().split('\n')),
'2': ("+OK", msg.as_string().split('\n')),
}
pop3_mail_list = ("+OK 2 messages", ("1 %d" % test_mail_len, "2 %d" % test_mail_len))
mocked_poplib_server = mock.Mock()
mocked_poplib_server.list = mock.Mock(return_value=pop3_mail_list)
mocked_poplib_server.retr = mock.Mock(side_effect=lambda x: pop3_emails[x])
with mock.patch('helpdesk.management.commands.get_email.poplib', autospec=True) as mocked_poplib:
mocked_poplib.POP3 = mock.Mock(return_value=mocked_poplib_server)
call_command('get_email')
elif self.method == 'imap':
# mock imaplib.IMAP4's search and fetch methods with responses from RFC 3501
imap_emails = {
"1": ("OK", (("1", msg.as_string()),)),
"2": ("OK", (("2", msg.as_string()),)),
}
imap_mail_list = ("OK", ("1 2",))
mocked_imaplib_server = mock.Mock()
mocked_imaplib_server.search = mock.Mock(return_value=imap_mail_list)
# we ignore the second arg as the data item/mime-part is constant (RFC822)
mocked_imaplib_server.fetch = mock.Mock(side_effect=lambda x, _: imap_emails[x])
with mock.patch('helpdesk.management.commands.get_email.imaplib', autospec=True) as mocked_imaplib:
mocked_imaplib.IMAP4 = mock.Mock(return_value=mocked_imaplib_server)
call_command('get_email')
ticket1 = get_object_or_404(Ticket, pk=1)
self.assertEqual(ticket1.ticket_for_url, "QQ-%s" % ticket1.id)
self.assertEqual(ticket1.title, subject)
# plain text should become description
self.assertEqual(ticket1.description, text)
# HTML MIME part should be attached to follow up
followup1 = get_object_or_404(FollowUp, pk=1)
self.assertEqual(followup1.ticket.id, 1)
attach1 = get_object_or_404(Attachment, pk=1)
self.assertEqual(attach1.followup.id, 1)
self.assertEqual(attach1.filename, 'email_html_body.html')
cc1 = get_object_or_404(TicketCC, pk=1)
self.assertEqual(cc1.email, cc_one)
cc2 = get_object_or_404(TicketCC, pk=2)
self.assertEqual(cc2.email, cc_two)
self.assertEqual(len(TicketCC.objects.filter(ticket=1)), 2)
ticket2 = get_object_or_404(Ticket, pk=2)
self.assertEqual(ticket2.ticket_for_url, "QQ-%s" % ticket2.id)
self.assertEqual(ticket2.title, subject)
# plain text should become description
self.assertEqual(ticket2.description, text)
# HTML MIME part should be attached to follow up
followup2 = get_object_or_404(FollowUp, pk=2)
self.assertEqual(followup2.ticket.id, 2)
attach2 = get_object_or_404(Attachment, pk=2)
self.assertEqual(attach2.followup.id, 2)
self.assertEqual(attach2.filename, 'email_html_body.html')
class GetEmailCCHandling(TestCase):
"""TestCase that checks CC handling in email. Needs its own test harness."""
def setUp(self):
self.temp_logdir = mkdtemp()
kwargs = {
"title": 'CC Queue',
"slug": 'CC',
"allow_public_submission": True,
"allow_email_submission": True,
"email_address": 'queue@example.com',
"email_box_type": 'local',
"email_box_local_dir": '/var/lib/mail/helpdesk/',
"logging_dir": self.temp_logdir,
"logging_type": 'none'
}
self.queue_public = Queue.objects.create(**kwargs)
user1_kwargs = {
'username': 'staff',
'email': 'staff@example.com',
'password': make_password('Test1234'),
'is_staff': True,
'is_superuser': False,
'is_active': True
}
self.staff_user = User.objects.create(**user1_kwargs)
user2_kwargs = {
'username': 'assigned',
'email': 'assigned@example.com',
'password': make_password('Test1234'),
'is_staff': True,
'is_superuser': False,
'is_active': True
}
self.assigned_user = User.objects.create(**user2_kwargs)
user3_kwargs = {
'username': 'observer',
'email': 'observer@example.com',
'password': make_password('Test1234'),
'is_staff': True,
'is_superuser': False,
'is_active': True
}
self.observer_user = User.objects.create(**user3_kwargs)
ticket_kwargs = {
'title': 'Original Ticket',
'queue': self.queue_public,
'submitter_email': 'submitter@example.com',
'assigned_to': self.assigned_user,
'status': 1
}
self.original_ticket = Ticket.objects.create(**ticket_kwargs)
cc_kwargs = {
'ticket': self.original_ticket,
'user': self.staff_user,
'can_view': True,
'can_update': True
}
self.original_cc = TicketCC.objects.create(**cc_kwargs)
def tearDown(self):
rmtree(self.temp_logdir)
def test_read_email_cc(self):
"""Tests reading plain text emails from a queue and adding to a ticket,
particularly to test appropriate handling of CC'd emails."""
# first, check that test ticket exists
ticket1 = get_object_or_404(Ticket, pk=1)
self.assertEqual(ticket1.ticket_for_url, "CC-1")
self.assertEqual(ticket1.title, "Original Ticket")
# only the staff_user is CC'd for now
self.assertEqual(len(TicketCC.objects.filter(ticket=1)), 1)
ccstaff = get_object_or_404(TicketCC, pk=1)
self.assertEqual(ccstaff.user, User.objects.get(username='staff'))
self.assertEqual(ticket1.assigned_to, User.objects.get(username='assigned'))
# example email text from Django docs: https://docs.djangoproject.com/en/1.10/ref/unicode/
test_email_from = "submitter@example.com"
# NOTE: CC emails are in alphabetical order and must be tested as such!
# implementation uses sets, so only way to ensure tickets created
# in right order is to change set to list and sort it
test_email_cc_one = "Alice Ráðormsdóttir <alice@example.com>"
test_email_cc_two = "nobody@example.com"
test_email_cc_three = "other@example.com"
test_email_cc_four = "someone@example.com"
ticket_user_emails = "assigned@example.com, staff@example.com, submitter@example.com, observer@example.com, queue@example.com"
test_email_subject = "[CC-1] My visit to Sør-Trøndelag"
test_email_body = "Unicode helpdesk comment with an s-hat (ŝ) via email."
test_email = "To: queue@example.com\nCc: " + test_email_cc_one + ", " + test_email_cc_one + ", " + test_email_cc_two + ", " + test_email_cc_three + "\nCC: " + test_email_cc_one + ", " + test_email_cc_three + ", " + test_email_cc_four + ", " + ticket_user_emails + "\nFrom: " + test_email_from + "\nSubject: " + test_email_subject + "\n\n" + test_email_body
test_mail_len = len(test_email)
with mock.patch('helpdesk.management.commands.get_email.listdir') as mocked_listdir, \
mock.patch('helpdesk.management.commands.get_email.isfile') as mocked_isfile, \
mock.patch('builtins.open' if six.PY3 else '__builtin__.open', mock.mock_open(read_data=test_email)):
mocked_isfile.return_value = True
mocked_listdir.return_value = ['filename1']
call_command('get_email')
mocked_listdir.assert_called_with('/var/lib/mail/helpdesk/')
mocked_isfile.assert_any_call('/var/lib/mail/helpdesk/filename1')
# ensure these 4 CCs (test_email_cc one thru four) are the only ones
# created and added to the existing staff_user that was CC'd,
# and the observer user that gets CC'd to new email.,
# and that submitter and assignee are not added as CC either
# (in other words, even though everyone was CC'd to this email,
# we should come out with only 6 CCs after filtering)
self.assertEqual(len(TicketCC.objects.filter(ticket=1)), 6)
# next we make sure no duplicates were added, and the
# staff users nor submitter were not re-added as email TicketCCs
cc0 = get_object_or_404(TicketCC, pk=2)
self.assertEqual(cc0.user, User.objects.get(username='observer'))
cc1 = get_object_or_404(TicketCC, pk=3)
self.assertEqual(cc1.email, test_email_cc_one)
cc2 = get_object_or_404(TicketCC, pk=4)
self.assertEqual(cc2.email, test_email_cc_two)
cc3 = get_object_or_404(TicketCC, pk=5)
self.assertEqual(cc3.email, test_email_cc_three)
cc4 = get_object_or_404(TicketCC, pk=6)
self.assertEqual(cc4.email, test_email_cc_four)
# build matrix of test cases
case_methods = [c[0] for c in Queue._meta.get_field('email_box_type').choices]
case_socks = [False] + [c[0] for c in Queue._meta.get_field('socks_proxy_type').choices]
case_matrix = list(itertools.product(case_methods, case_socks))
# Populate TestCases from the matrix of parameters
thismodule = sys.modules[__name__]
for method, socks in case_matrix:
if method == "local" and socks:
continue
socks_str = "Nosocks"
if socks:
socks_str = socks.capitalize()
test_name = str(
"TestGetEmail%s%s" % (method.capitalize(), socks_str))
cl = type(test_name, (GetEmailParametricTemplate, TestCase), {"method": method, "socks": socks})
setattr(thismodule, test_name, cl)
| 47.054688
| 364
| 0.620538
|
f8d7050e863c1b7756d6e8ce317fc81c05ed12dd
| 915
|
py
|
Python
|
utils/minSubArrayLen.py
|
wisesky/LeetCode-Practice
|
65549f72c565d9f11641c86d6cef9c7988805817
|
[
"MIT"
] | null | null | null |
utils/minSubArrayLen.py
|
wisesky/LeetCode-Practice
|
65549f72c565d9f11641c86d6cef9c7988805817
|
[
"MIT"
] | null | null | null |
utils/minSubArrayLen.py
|
wisesky/LeetCode-Practice
|
65549f72c565d9f11641c86d6cef9c7988805817
|
[
"MIT"
] | null | null | null |
""" 题目描述
给定一个含有 n 个正整数的数组和一个正整数 s ,找出该数组中满足其和≥ s的长度最小的连续子数组。如果不存在符合条件的连续子数组,返回 0。
示例:
输入: s = 7, nums = [2,3,1,2,4,3]
输出: 2
解释: 子数组 [4,3] 是该条件下的长度最小的连续子数组。
"""
class Solution:
def minSubArrayLen(self, s, nums):
pre = 0
su = 0
le = 0
for i,num in enumerate(nums):
# print(su)
if num >= s:
return 1
su += num
if i == 0 or su < s:
continue
for j in range(pre, i):
resu = su - nums[j]
if resu >= s:
su = resu
else:
break
pre = j
le = i - pre + 1 if le == 0 else min(i-pre+1, le)
return le
if __name__ == "__main__":
s = 7
nums = [2, 3, 1, 2, 4, 3, 7]
nums = [1,1]
so = Solution()
# so.minSubArrayLen(s, nums)
print(so.minSubArrayLen(s, nums))
| 22.875
| 72
| 0.437158
|
de3b59c604e89af117a582b1e1d355dc24c9efc0
| 8,060
|
py
|
Python
|
event_three.py
|
Malcolm1998/CS412T1C2
|
5a79bc27452c162dcd0ac94fa32b5fb717345bb5
|
[
"MIT"
] | null | null | null |
event_three.py
|
Malcolm1998/CS412T1C2
|
5a79bc27452c162dcd0ac94fa32b5fb717345bb5
|
[
"MIT"
] | null | null | null |
event_three.py
|
Malcolm1998/CS412T1C2
|
5a79bc27452c162dcd0ac94fa32b5fb717345bb5
|
[
"MIT"
] | null | null | null |
import signal
import rospy
import smach
import smach_ros
import math
import time
import cv2
from math import tanh
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image
import numpy as np
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import Joy
from kobuki_msgs.msg import Sound
import detect_shape
import event_two
global shutdown_requested
global number_of_checks
class OdomFollow(smach.State):
def __init__(self, callbacks):
smach.State.__init__(self, outcomes=['done3', 'rotate_left'])
self.callbacks = callbacks
self.twist = Twist()
self.cmd_vel_pub = rospy.Publisher('cmd_vel_mux/input/teleop', Twist, queue_size=1)
self.prev_error = None
self.Kp = 1.0 / 50.0
self.Ki = 1.0 / 50.0
self.Kd = 1.0 / 50.0
self.speed = 0.8
def execute(self, userdata):
global shutdown_requested
global number_of_checks
if number_of_checks == 0:
distance = 0.6
else:
distance = 0.3
while self.callbacks.pose is None:
time.sleep(1)
sp = self.callbacks.pose
ep = sp
while math.sqrt((sp.x - ep.x) ** 2 + (sp.y - ep.y) ** 2) < distance:
if shutdown_requested:
return 'done3'
h = self.callbacks.h
w = self.callbacks.w
search_top = 3 * h / 4
search_bot = h
bottom_white_mask = self.callbacks.white_mask.copy()
bottom_white_mask[0:search_top, 0:w] = 0
bottom_white_mask[search_bot:h, 0:w] = 0
M = cv2.moments(bottom_white_mask)
if M['m00'] > 0:
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
# BEGIN CONTROL
if self.prev_error is None:
error = cx - self.callbacks.w / 2
rotation = -(self.Kp * float(error))
self.prev_error = error
else:
error = cx - self.callbacks.w / 2
rotation = -(self.Kp * float(error) + self.Kd * (error - self.prev_error))
self.prev_error = error
self.twist.linear.x = self.speed
self.twist.angular.z = rotation
self.cmd_vel_pub.publish(self.twist)
# END CONTROL
ep = self.callbacks.pose
self.twist.linear.x = 0
self.twist.angular.z = 0
self.cmd_vel_pub.publish(self.twist)
return 'rotate_left'
class RotateLeft(smach.State):
def __init__(self, callbacks):
smach.State.__init__(self, outcomes=['done3', 'check'])
self.callbacks = callbacks
self.twist = Twist()
self.cmd_vel_pub = rospy.Publisher('cmd_vel_mux/input/teleop', Twist, queue_size=1)
def execute(self, userdata):
global shutdown_requested
while not shutdown_requested:
target_heading = (self.callbacks.heading + 90) % 360
turning = True
previous_difference = None
while turning:
if shutdown_requested:
return 'done3'
difference = minimum_angle_between_headings(target_heading, self.callbacks.heading)
#print(difference)
if previous_difference is None:
self.twist.angular.z = 0.4
self.cmd_vel_pub.publish(self.twist)
else:
if difference < 1:
turning = False
self.twist.angular.z = 0
self.cmd_vel_pub.publish(self.twist)
else:
self.twist.angular.z = 0.4
self.cmd_vel_pub.publish(self.twist)
if previous_difference != difference:
previous_difference = difference
return 'check'
return 'done3'
class Check(smach.State):
def __init__(self, callbacks):
smach.State.__init__(self, outcomes=['done3', 'rotate_right'])
self.callbacks = callbacks
self.twist = Twist()
self.cmd_vel_pub = rospy.Publisher('cmd_vel_mux/input/teleop', Twist, queue_size=1)
self.sound_pub = rospy.Publisher('/mobile_base/commands/sound', Sound, queue_size=1)
def execute(self, userdata):
global shutdown_requested
global number_of_checks
while not shutdown_requested:
number_of_checks += 1
h = self.callbacks.h
w = self.callbacks.w
symbol_red_mask = self.callbacks.symbol_red_mask.copy()
symbol_red_mask[0:h / 2, 0:w] = 0
shapes = detect_shape.detect_shape(symbol_red_mask)[0]
if len(shapes) > 0:
current_shape = shapes[0]
print(current_shape)
if current_shape.value == event_two.previous_shape:
self.sound_pub.publish(1)
if current_shape.value == 3:
if event_two.previous_shape in [-1, 5]:
self.sound_pub.publish(1)
time.sleep(1)
return "rotate_right"
return 'done3'
class RotateRight(smach.State):
def __init__(self, callbacks):
smach.State.__init__(self, outcomes=['done3', 'success3', 'odom_follow'])
self.callbacks = callbacks
self.twist = Twist()
self.cmd_vel_pub = rospy.Publisher('cmd_vel_mux/input/teleop', Twist, queue_size=1)
def execute(self, userdata):
global shutdown_requested
global number_of_checks
while not shutdown_requested:
target_heading = self.callbacks.heading - 90
if target_heading < 0:
target_heading = target_heading + 360
turning = True
previous_difference = None
while turning:
if shutdown_requested:
return 'done3'
difference = minimum_angle_between_headings(target_heading, self.callbacks.heading)
if previous_difference is None:
self.twist.angular.z = -0.4
self.cmd_vel_pub.publish(self.twist)
else:
if difference < 0.5:
turning = False
self.twist.angular.z = 0
self.cmd_vel_pub.publish(self.twist)
else:
self.twist.angular.z = -0.4
self.cmd_vel_pub.publish(self.twist)
if previous_difference != difference:
previous_difference = difference
if number_of_checks >= 3:
return 'success3'
else:
return 'odom_follow'
return 'done3'
def minimum_angle_between_headings(a, b):
heading_difference = a - b
if heading_difference < 0:
heading_difference += 360
if heading_difference > 180:
heading_difference = b - a
if heading_difference < 0:
heading_difference += 360
return heading_difference
def get_state_machine(callbacks):
global number_of_checks
number_of_checks = 0
sm_event_3 = smach.StateMachine(outcomes=['DONE3', 'SUCCESS3'])
with sm_event_3:
smach.StateMachine.add('ODOM_FOLLOW', OdomFollow(callbacks),
transitions={'done3': 'DONE3', 'rotate_left': 'ROTATE_LEFT'})
smach.StateMachine.add('ROTATE_LEFT', RotateLeft(callbacks),
transitions={'done3': 'DONE3', 'check': 'CHECK'})
smach.StateMachine.add('CHECK', Check(callbacks),
transitions={'done3': 'DONE3', 'rotate_right': 'ROTATE_RIGHT'})
smach.StateMachine.add('ROTATE_RIGHT', RotateRight(callbacks),
transitions={'done3': 'DONE3', 'success3': 'SUCCESS3', 'odom_follow': 'ODOM_FOLLOW'})
return sm_event_3
| 34.444444
| 116
| 0.562779
|
82b42c28b9a4f60f1f0cb9cfd7c51c57dab7ffb9
| 16,977
|
py
|
Python
|
collector.py
|
couchbase/healthchecker
|
a43acce50fe273772205270f41a6f2a13ea6a7b5
|
[
"Apache-2.0"
] | 2
|
2016-01-14T18:25:14.000Z
|
2018-10-24T10:16:58.000Z
|
collector.py
|
couchbase/healthchecker
|
a43acce50fe273772205270f41a6f2a13ea6a7b5
|
[
"Apache-2.0"
] | null | null | null |
collector.py
|
couchbase/healthchecker
|
a43acce50fe273772205270f41a6f2a13ea6a7b5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import traceback
import copy
import logging
import simplejson as json
import listservers
import buckets
import info
import util_cli as util
import cb_bin_client
import stats_buffer
class StatsCollector:
def __init__(self, log):
self.log = log
def seg(self, k, v):
# Parse ('some_stat_x_y', 'v') into (('some_stat', x, y), v)
ka = k.split('_')
k = '_'.join(ka[0:-1])
kstart, kend = [int(x) for x in ka[-1].split(',')]
return ((k, kstart, kend), int(v))
def write_file(self, filename, info):
f = open(filename, 'w')
print >> f, util.pretty_print(info)
f.close()
def retrieve_node_stats(self, nodeInfo, nodeStats):
nodeStats['portDirect'] = nodeInfo['ports']['direct']
nodeStats['portProxy'] = nodeInfo['ports']['proxy']
nodeStats['clusterMembership'] = nodeInfo['clusterMembership']
nodeStats['os'] = nodeInfo['os']
nodeStats['uptime'] = nodeInfo['uptime']
nodeStats['version'] = nodeInfo['version']
nodeStats['num_processor'] = 1 #TODO: read from cbcollectinfo
#memory
nodeStats['memory'] = {}
nodeStats['memory']['allocated'] = nodeInfo['mcdMemoryAllocated']
nodeStats['memory']['reserved'] = nodeInfo['mcdMemoryReserved']
nodeStats['memory']['free'] = nodeInfo['memoryFree']
nodeStats['memory']['quota'] = nodeInfo['memoryQuota']
nodeStats['memory']['total'] = nodeInfo['memoryTotal']
#availableStorage
nodeStats['availableStorage'] = copy.deepcopy(nodeInfo['availableStorage'])
#storageInfo
nodeStats['StorageInfo'] = {}
if nodeInfo['storageTotals'] and nodeInfo['storage']:
#print nodeInfo
hdd = nodeInfo['storageTotals']['hdd']
if hdd:
nodeStats['StorageInfo']['hdd'] = {}
nodeStats['StorageInfo']['hdd']['free'] = hdd['free']
nodeStats['StorageInfo']['hdd']['quotaTotal'] = hdd['quotaTotal']
nodeStats['StorageInfo']['hdd']['total'] = hdd['total']
nodeStats['StorageInfo']['hdd']['used'] = hdd['used']
nodeStats['StorageInfo']['hdd']['usedByData'] = hdd['usedByData']
if nodeInfo['storage']['hdd']:
nodeStats['StorageInfo']['type'] = 'hdd'
nodeStats['StorageInfo']['storage'] = copy.deepcopy(nodeInfo['storage']['hdd'])
elif nodeinfo['storage']['ssd']:
nodeStats['StorageInfo']['type'] = 'ssd'
nodeStats['StorageInfo']['storage'] = copy.deepcopy(nodeInfo['storage']['ssd'])
else:
nodeStats['StorageInfo']['type'] = None
nodeStats['StorageInfo']['storage'] = {}
ram = nodeInfo['storageTotals']['ram']
if ram:
nodeStats['StorageInfo']['ram'] = {}
nodeStats['StorageInfo']['ram']['quotaTotal'] = ram['quotaTotal']
nodeStats['StorageInfo']['ram']['total'] = ram['total']
nodeStats['StorageInfo']['ram']['used'] = ram['used']
nodeStats['StorageInfo']['ram']['usedByData'] = ram['usedByData']
if ram.has_key('quotaUsed'):
nodeStats['StorageInfo']['ram']['quotaUsed'] = ram['quotaUsed']
else:
nodeStats['StorageInfo']['ram']['quotaUsed'] = 0
#system stats
nodeStats['systemStats'] = {}
nodeStats['systemStats']['cpu_utilization_rate'] = nodeInfo['systemStats']['cpu_utilization_rate']
nodeStats['systemStats']['swap_total'] = nodeInfo['systemStats']['swap_total']
nodeStats['systemStats']['swap_used'] = nodeInfo['systemStats']['swap_used']
curr_items = 0
curr_items_tot = 0
vb_rep_curr_items = 0
if nodeInfo['interestingStats']:
if nodeInfo['interestingStats'].has_key('curr_items'):
curr_items = nodeInfo['interestingStats']['curr_items']
else:
curr_items = 0
if nodeInfo['interestingStats'].has_key('curr_items_tot'):
curr_items_tot = nodeInfo['interestingStats']['curr_items_tot']
else:
curr_items_tot = 0
if nodeInfo['interestingStats'].has_key('vb_replica_curr_items'):
vb_rep_curr_items = nodeInfo['interestingStats']['vb_replica_curr_items']
else:
vb_rep_curr_items = 0
nodeStats['systemStats']['currentItems'] = curr_items
nodeStats['systemStats']['currentItemsTotal'] = curr_items_tot
nodeStats['systemStats']['replicaCurrentItems'] = vb_rep_curr_items
def get_hostlist(self, server, port, user, password, opts):
try:
opts.append(("-o", "return"))
nodes = listservers.ListServers().runCmd('host-list', server, port, user, password, False, opts)
for node in nodes:
(node_server, node_port) = util.hostport(node['hostname'])
node_stats = {"host" : node_server,
"port" : node_port,
"status" : node['status'],
"master" : server}
stats_buffer.nodes[node['hostname']] = node_stats
if node['status'] == 'healthy':
node_info = info.Info().runCmd('get-server-info', node_server, node_port, user, password, False, opts)
self.retrieve_node_stats(node_info, node_stats)
else:
self.log.error("Unhealthy node: %s:%s" %(node_server, node['status']))
return nodes
except Exception, err:
traceback.print_exc()
sys.exit(1)
def get_bucketlist(self, server, port, user, password, bucketname, opts):
try:
bucketlist = buckets.Buckets().runCmd('bucket-get', server, port, user, password, False, opts)
for bucket in bucketlist:
bucket_name = bucket['name']
if bucketname == 'all' or bucket_name == bucketname:
bucketinfo = {}
bucketinfo['name'] = bucket_name
bucketinfo['bucketType'] = bucket['bucketType']
bucketinfo['authType'] = bucket['authType']
bucketinfo['saslPassword'] = bucket['saslPassword']
bucketinfo['numReplica'] = bucket['replicaNumber']
bucketinfo['ramQuota'] = bucket['quota']['ram']
bucketinfo['master'] = server
if bucket.has_key('vBucketServerMap'):
bucketinfo['vBucketServerMap'] = bucket['vBucketServerMap']
else:
if bucket['bucketType'] != "memcached":
self.log.error("vBucketServerMap doesn't exist from bucket info")
self.log.error(bucket)
bucketinfo['numDdoc'], bucketinfo['numView'] = \
self.number_bucketddocs(server, port, user, password, bucket_name, \
bucket['bucketType'], opts)
stats_buffer.bucket_info[bucket_name] = bucketinfo
return bucketlist
except Exception, err:
traceback.print_exc()
sys.exit(1)
def number_bucketddocs(self, server, port, user, password, bucketname, buckettype, opts):
if buckettype == 'memcached':
return (0, 0)
else:
try:
opts_tmp = opts
opts_tmp.append(('-b', bucketname))
docs = buckets.Buckets().runCmd('bucket-ddocs', server, port, user, password, False, opts_tmp)
total_ddocs = 0
total_view = 0
if docs:
for row in docs["rows"]:
if row["doc"]["meta"]["id"].find("_design/dev_") >= 0:
continue
total_ddocs += 1
total_view += len(row["doc"]["json"]["views"])
if total_ddocs:
total_view /= total_ddocs
return (total_ddocs, total_view)
except Exception, err:
traceback.print_exc()
sys.exit(1)
def process_histogram_data(self, histogram_data):
vals = sorted([self.seg(*kv) for kv in histogram_data.items()])
dd = {}
totals = {}
for s in vals:
if s[0][2] > util.BIG_VALUE:
# Ignore the upper bound if it is exemely big
avg = s[0][1]
else:
avg = (s[0][1] + s[0][2]) / 2
k = s[0][0]
l = dd.get(k, [])
l.append((avg, s[1]))
dd[k] = l
totals[k] = totals.get(k, 0) + s[1]
return (dd, totals)
def get_mc_stats_per_node(self, mc, stats):
cmd_list = ["timings", "tap", "checkpoint", "memory", ""]
try:
for cmd in cmd_list:
if mc:
node_stats = mc.stats(cmd)
else:
node_stats = stats
if node_stats:
if cmd == "timings":
dd = {}
if mc:
dd, totals = self.process_histogram_data(node_stats)
else:
for key in stats.iterkeys():
if key.find("timing_") >= 0 or key.find("timging_") >= 0:
node_stats = stats[key]
dd, totals = self.process_histogram_data(node_stats)
break
if dd:
for k in sorted(dd):
ccount = 0
for lbl,v in dd[k]:
ccount += v * lbl
stats[k] = ccount / totals[k]
stats["timing_"] = node_stats
else:
for key, val in node_stats.items():
stats[key] = val
except Exception, err:
traceback.print_exc()
def get_mc_stats(self, server, bucketlist, nodes, bucketname):
for bucket in bucketlist:
bucket_name = bucket['name']
if bucketname == 'all' or bucket_name == bucketname:
self.log.info("bucket: %s" % bucket_name)
stats_buffer.node_stats[bucket_name] = {}
if stats_buffer.bucket_info[bucket_name]["bucketType"] == 'memcached':
self.log.info("Skip memcached bucket: %s" % bucket_name)
continue
for node in nodes:
(node_server, node_port) = util.hostport(node['hostname'])
self.log.info(" node: %s %s" % (node_server, node['ports']['direct']))
if node['status'] == 'healthy':
try:
stats = {}
mc = cb_bin_client.MemcachedClient(node_server, node['ports']['direct'])
if bucket["name"] != "Default":
mc.sasl_auth_plain(bucket_name.encode("utf8"), bucket["saslPassword"].encode("utf8"))
self.get_mc_stats_per_node(mc, stats)
stats_buffer.node_stats[bucket_name][node['hostname']] = stats
except Exception, err:
#stats_buffer.nodes[node['hostname']]['status'] = 'down'
traceback.print_exc()
def get_mc_stats_fromfile(self, bucketname, collected_buckets, collected_nodes):
for bucket_name in collected_buckets.iterkeys():
if bucketname == 'all' or bucket_name == bucketname:
#stats_buffer.node_stats[bucket_name] = {}
if stats_buffer.bucket_info[bucket_name]["bucketType"] == 'memcached':
self.log.info("Skip memcached bucket: %s" % bucket_name)
continue
for node in collected_nodes.iterkeys():
(node_server, node_port) = util.hostport(node)
if collected_nodes[node]['status'] == 'healthy':
try:
self.get_mc_stats_per_node(None, stats_buffer.node_stats[bucket_name][node])
except Exception, err:
traceback.print_exc()
sys.exit(1)
def get_ns_stats(self, bucketlist, server, port, user, password, bucketname, scale, opts):
stats_buffer.stats[scale] = copy.deepcopy(stats_buffer.counters)
for bucket in bucketlist:
bucket_name = bucket['name']
if bucketname == 'all' or bucket_name == bucketname:
if stats_buffer.bucket_info[bucket_name]["bucketType"] == 'memcached':
continue
stats_buffer.buckets[bucket_name] = copy.deepcopy(stats_buffer.stats)
cmd = 'bucket-node-stats'
for scale, stat_set in stats_buffer.buckets[bucket_name].iteritems():
for stat in stat_set.iterkeys():
try :
sys.stderr.write('.')
self.log.debug("retrieve: %s" % stat)
c = buckets.BucketNodeStats(bucket_name, stat, scale)
json = c.runCmd('bucket-node-stats', server, port, user, password, False, opts)
for node, vals in json["nodeStats"].iteritems():
for i, val in enumerate(vals):
if isinstance(val, basestring):
vals[i] = 0
stats_buffer.buckets[bucket_name][scale][stat] = json
except Exception, err:
self.log.debug("%s doesn't exist from ns stats" % stat)
stats_buffer.buckets[bucket_name][scale][stat] = None
pass
sys.stderr.write('\n')
def collect_data(self, bucketname, cluster, user, password, inputfile, statsfile, scale_val, opts, output_dir):
scale_set = []
if scale_val == 'all':
scale_set = ['minute', 'hour', 'day', 'week', 'month', 'year']
else:
scale_set = [scale_val]
if not inputfile:
server, port = util.hostport(cluster)
#get node list info
nodes = self.get_hostlist(server, port, user, password, opts)
self.log.debug(util.pretty_print(stats_buffer.nodes))
#get bucket list
bucketlist = self.get_bucketlist(server, port, user, password, bucketname, opts)
self.log.debug(util.pretty_print(stats_buffer.bucket_info))
self.log.debug(util.pretty_print(stats_buffer.buckets_summary))
#get stats from ep-engine
self.get_mc_stats(server, bucketlist, nodes, bucketname)
self.log.debug(util.pretty_print(stats_buffer.node_stats))
collected_data = {}
for scale in scale_set:
#get stats from ns-server
self.get_ns_stats(bucketlist, server, port, user, password, bucketname, scale, opts)
self.log.debug(util.pretty_print(stats_buffer.buckets))
collected_data["scale"] = scale_val
collected_data["nodes"] = stats_buffer.nodes
collected_data["bucket_info"] = stats_buffer.bucket_info
collected_data["buckets_summary"] = stats_buffer.buckets_summary
collected_data["node_stats"] = stats_buffer.node_stats
collected_data["buckets"] = stats_buffer.buckets
self.write_file(os.path.join(output_dir, statsfile), collected_data)
else:
json_data=open(inputfile)
collected_data = json.load(json_data)
json_data.close()
stats_buffer.nodes = collected_data["nodes"]
stats_buffer.bucket_info = collected_data["bucket_info"]
stats_buffer.buckets_summary = collected_data["buckets_summary"]
stats_buffer.node_stats = collected_data["node_stats"]
self.get_mc_stats_fromfile(bucketname,
collected_data["buckets"],
collected_data["nodes"])
stats_buffer.buckets = collected_data["buckets"]
scale_val = collected_data["scale"]
return scale_val
| 47.289694
| 122
| 0.523473
|
9551c1a2ca9c32f337034987d9fb0203d8b2b7cf
| 5,060
|
py
|
Python
|
tfplus/nn/cnn.py
|
renmengye/tfplus
|
9b1f71d33f24af510c701c7d6f6bb816720ed701
|
[
"MIT"
] | 2
|
2016-10-11T11:01:07.000Z
|
2016-10-11T11:02:32.000Z
|
tfplus/nn/cnn.py
|
renmengye/tfplus
|
9b1f71d33f24af510c701c7d6f6bb816720ed701
|
[
"MIT"
] | null | null | null |
tfplus/nn/cnn.py
|
renmengye/tfplus
|
9b1f71d33f24af510c701c7d6f6bb816720ed701
|
[
"MIT"
] | 2
|
2016-08-03T19:20:48.000Z
|
2019-06-24T07:36:22.000Z
|
from __future__ import division
from graph_builder import GraphBuilder
import numpy as np
import tensorflow as tf
from ops import Conv2D, MaxPool
from batch_norm import BatchNorm
class CNN(GraphBuilder):
def __init__(self, f, ch, pool, act, use_bn, wd=None, use_stride=False,
scope='cnn', trainable=True, initialization='msra'):
"""Add CNN. N = number of layers.
Args:
f: filter size, list of N int
ch: number of channels, list of (N + 1) int
pool: pooling ratio, list of N int
act: activation function, list of N function
use_bn: whether to use batch normalization, list of N bool
wd: weight decay
"""
self.filter_size = f
self.channels = ch
self.pool = pool
self.act = act
self.use_bn = use_bn
self.wd = wd
self.scope = scope
self.trainable = trainable
self.nlayers = len(f)
self.w = [None] * self.nlayers
self.b = [None] * self.nlayers
self.batch_norm = []
self.num_copies = 0
if initialization == 'msra':
self.compute_std = lambda s: np.sqrt(2 / s[0] * s[1] * s[3])
elif initialization is None:
self.compute_std = lambda s: 0.01
else:
self.compute_std = initialization
super(CNN, self).__init__()
self.log.info('CNN: {}'.format(scope))
self.log.info('Channels: {}'.format(ch))
self.log.info('Activation: {}'.format(act))
self.log.info('Pool: {}'.format(pool))
self.log.info('BN: {}'.format(use_bn))
pass
def init_var(self):
"""Initialize variables."""
f = self.filter_size
ch = self.channels
wd = self.wd
trainable = self.trainable
with tf.variable_scope(self.scope):
for ii in xrange(self.nlayers):
with tf.variable_scope('layer_{}'.format(ii)):
self.w[ii] = self.declare_var(
[f[ii], f[ii], ch[ii], ch[ii + 1]],
name='w', wd=wd,
trainable=trainable,
stddev=self.compute_std(
[f[ii], f[ii], ch[ii], ch[ii + 1]])
)
self.b[ii] = self.declare_var(
[ch[ii + 1]], name='b',
trainable=trainable,
stddev=0
)
self.log.info('Filter: {}, Trainable: {}'.format(
[f[ii], f[ii], ch[ii], ch[ii + 1]], trainable))
pass
pass
pass
pass
def get_layer(self, n):
"""Get a layer."""
return self.hidden_layers[n]
def build(self, inp):
"""Run CNN on an input.
Args:
input: input image, [B, H, W, D]
phase_train: phase train, bool
"""
self.lazy_init_var()
x = inp['input']
phase_train = inp['phase_train']
h = [None] * self.nlayers
self.batch_norm.append([None] * self.nlayers)
with tf.variable_scope(self.scope):
for ii in xrange(self.nlayers):
with tf.variable_scope('layer_{}'.format(ii)):
out_ch = self.channels[ii + 1]
if ii == 0:
prev_inp = x
else:
prev_inp = h[ii - 1]
h[ii] = Conv2D(self.w[ii])(prev_inp) + self.b[ii]
if self.use_bn[ii]:
self.batch_norm[self.num_copies][
ii] = BatchNorm(out_ch)
h[ii] = self.batch_norm[self.num_copies][ii](
{'input': h[ii], 'phase_train': phase_train})
if self.act[ii] is not None:
h[ii] = self.act[ii](h[ii])
if self.pool[ii] > 1:
h[ii] = MaxPool(self.pool[ii])(h[ii])
pass
# h[ii] = tf.Print(h[ii], [ii, tf.reduce_mean(h[ii])])
pass
pass
self.num_copies += 1
self.hidden_layers = h
return h[-1]
def get_save_var_dict(self):
results = {}
for ii in xrange(self.nlayers):
prefix = 'layer_{}/'.format(ii)
results[prefix + 'w'] = self.w[ii]
results[prefix + 'b'] = self.b[ii]
for cc in xrange(self.num_copies):
for ii in xrange(self.nlayers):
prefix = 'layer_{}/'.format(ii)
if len(self.batch_norm) == 1:
bn_name = 'bn'
else:
bn_name = 'bn_{}'.format(cc)
bn = self.batch_norm[cc][ii]
if bn is not None:
self.add_prefix_to(
prefix + bn_name, bn.get_save_var_dict(), results)
return results
pass
| 33.733333
| 75
| 0.470553
|
ce0a5fd236b8d2380b5c2266229e61e99e49953f
| 781
|
py
|
Python
|
common/slewlimiter.py
|
frc5024/RocketShip
|
db3aed3d169292663036a9f5fd047dc11d92ca6f
|
[
"MIT"
] | 3
|
2019-04-29T02:47:30.000Z
|
2019-04-30T16:35:15.000Z
|
common/slewlimiter.py
|
frc5024/RocketShip
|
db3aed3d169292663036a9f5fd047dc11d92ca6f
|
[
"MIT"
] | null | null | null |
common/slewlimiter.py
|
frc5024/RocketShip
|
db3aed3d169292663036a9f5fd047dc11d92ca6f
|
[
"MIT"
] | null | null | null |
class SlewLimiter:
def __init__(self, limit):
self.limit = limit
self.output = 0.0
def Feed(self, value):
error = value - self.output
if error > self.limit:
error = self.limit
elif error < (self.limit * -1):
error = self.limit * -1
self.output += error
return self.output
class MultipleSlewLimiter:
def __init__(self, limit):
self.limit = limit
self.output = 0.0
def Feed(self, value):
error = value - self.output
if error > value *self.limit:
error = self.limit
elif error < (value * self.limit * -1):
error = self.limit * -1
self.output += error
return self.output
MultipleSlewLimiter
| 27.892857
| 47
| 0.541613
|
727fdb5c07ab90d6ac5e956a24cbec200adb8772
| 1,912
|
py
|
Python
|
gitmostwanted/tasks/repo_stars.py
|
JS555/YayanRuhianResearch_
|
313df2ff308b2c7444bcd64094dd64ff69a8911d
|
[
"MIT"
] | null | null | null |
gitmostwanted/tasks/repo_stars.py
|
JS555/YayanRuhianResearch_
|
313df2ff308b2c7444bcd64094dd64ff69a8911d
|
[
"MIT"
] | null | null | null |
gitmostwanted/tasks/repo_stars.py
|
JS555/YayanRuhianResearch_
|
313df2ff308b2c7444bcd64094dd64ff69a8911d
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
from gitmostwanted.app import app, db, celery
from gitmostwanted.lib.bigquery.job import Job
from gitmostwanted.models.repo import Repo, RepoStars
from gitmostwanted.services import bigquery
from time import sleep
def results_of(j: Job): # @todo #0:15m copy-paste code in multiple tasks
while not j.complete:
app.logger.debug('The job is not complete, waiting...')
sleep(10)
return j.results
@celery.task()
def stars_mature(num_days):
service = bigquery.instance(app)
jobs = []
repos = Repo.query\
.filter(Repo.mature.is_(True))\
.filter(Repo.status == 'new')\
.order_by(Repo.checked_at.asc())\
.limit(40) # we are at the free plan
for repo in repos:
query = query_stars_by_repo(
repo_id=repo.id, date_from=datetime.now() + timedelta(days=num_days * -1),
date_to=datetime.now()
)
job = Job(service, query, batch=True)
job.execute()
jobs.append((job, repo))
for job in jobs:
for row in results_of(job[0]):
db.session.add(RepoStars(repo_id=job[1].id, stars=row[0], year=row[1], day=row[2]))
job[1].status = 'unknown'
db.session.commit()
# @todo #192:1h move BQ queries to a separate place
def query_stars_by_repo(repo_id: int, date_from: datetime, date_to: datetime):
query = """
SELECT
COUNT(1) AS stars, YEAR(created_at) AS y, DAYOFYEAR(created_at) AS doy,
MONTH(created_at) as mon
FROM
TABLE_DATE_RANGE([githubarchive:day.], TIMESTAMP('{date_from}'), TIMESTAMP('{date_to}'))
WHERE
repo.id = {id} AND type IN ('WatchEvent', 'ForkEvent')
GROUP BY y, mon, doy
"""
return query.format(
id=repo_id, date_from=date_from.strftime('%Y-%m-%d'), date_to=date_to.strftime('%Y-%m-%d')
)
| 30.83871
| 100
| 0.626046
|
62e947c0a594776834d00b0cc9f86070821186bd
| 15,188
|
py
|
Python
|
plugins/youtube_dl_button.py
|
Naysabots/Youtube-Downloader-Bot
|
643312dcd577625c257f72f1cc27da94d78d0b20
|
[
"MIT"
] | null | null | null |
plugins/youtube_dl_button.py
|
Naysabots/Youtube-Downloader-Bot
|
643312dcd577625c257f72f1cc27da94d78d0b20
|
[
"MIT"
] | null | null | null |
plugins/youtube_dl_button.py
|
Naysabots/Youtube-Downloader-Bot
|
643312dcd577625c257f72f1cc27da94d78d0b20
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (C) Shrimadhav U K
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
logging.getLogger("pyrogram").setLevel(logging.WARNING)
import asyncio
import json
import os
import shutil
import time
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from sample_config import Config
from datetime import datetime
from hachoir.parser import createParser
from hachoir.metadata import extractMetadata
from pyrogram.types import InputMediaPhoto
from translation import Translation
from helper_funcs.help_Nekmo_ffmpeg import generate_screen_shots
from helper_funcs.display_progress import progress_for_pyrogram, humanbytes
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from PIL import Image
async def youtube_dl_call_back(bot, update):
cb_data = update.data
tg_send_type, youtube_dl_format, youtube_dl_ext = cb_data.split("|")
thumb_image_path = Config.DOWNLOAD_LOCATION + \
"/" + str(update.from_user.id) + ".jpg"
save_ytdl_json_path = Config.DOWNLOAD_LOCATION + \
"/" + str(update.from_user.id) + ".json"
try:
with open(save_ytdl_json_path, "r", encoding="utf8") as f:
response_json = json.load(f)
except (FileNotFoundError) as e:
await bot.delete_messages(
chat_id=update.message.chat.id,
message_ids=update.message.message_id,
revoke=True
)
return False
youtube_dl_url = update.message.reply_to_message.text
custom_file_name = str(response_json.get("title")) + \
"_" + youtube_dl_format + "." + youtube_dl_ext
youtube_dl_username = None
youtube_dl_password = None
if "|" in youtube_dl_url:
url_parts = youtube_dl_url.split("|")
if len(url_parts) == 2:
youtube_dl_url = url_parts[0]
custom_file_name = url_parts[1]
elif len(url_parts) == 4:
youtube_dl_url = url_parts[0]
custom_file_name = url_parts[1]
youtube_dl_username = url_parts[2]
youtube_dl_password = url_parts[3]
else:
for entity in update.message.reply_to_message.entities:
if entity.type == "text_link":
youtube_dl_url = entity.url
elif entity.type == "url":
o = entity.offset
l = entity.length
youtube_dl_url = youtube_dl_url[o:o + l]
if youtube_dl_url is not None:
youtube_dl_url = youtube_dl_url.strip()
if custom_file_name is not None:
custom_file_name = custom_file_name.strip()
if youtube_dl_username is not None:
youtube_dl_username = youtube_dl_username.strip()
if youtube_dl_password is not None:
youtube_dl_password = youtube_dl_password.strip()
else:
for entity in update.message.reply_to_message.entities:
if entity.type == "text_link":
youtube_dl_url = entity.url
elif entity.type == "url":
o = entity.offset
l = entity.length
youtube_dl_url = youtube_dl_url[o:o + l]
await bot.edit_message_text(
text=Translation.DOWNLOAD_START,
chat_id=update.message.chat.id,
message_id=update.message.message_id
)
description = Translation.CUSTOM_CAPTION_UL_FILE
if "fulltitle" in response_json:
description = response_json["fulltitle"][0:1021]
tmp_directory_for_each_user = Config.DOWNLOAD_LOCATION + "/" + str(update.from_user.id)
if not os.path.isdir(tmp_directory_for_each_user):
os.makedirs(tmp_directory_for_each_user)
download_directory = tmp_directory_for_each_user + "/" + custom_file_name
command_to_exec = []
if tg_send_type == "audio":
command_to_exec = [
"yt-dlp",
"-c",
"--max-filesize", str(Config.TG_MAX_FILE_SIZE),
"--prefer-ffmpeg",
"--extract-audio",
"--audio-format", youtube_dl_ext,
"--audio-quality", youtube_dl_format,
youtube_dl_url,
"-o", download_directory
]
else:
minus_f_format = youtube_dl_format
if "youtu" in youtube_dl_url:
minus_f_format = youtube_dl_format + "+bestaudio"
command_to_exec = [
"yt-dlp",
"-c",
"--max-filesize", str(Config.TG_MAX_FILE_SIZE),
"--embed-subs",
"-f", minus_f_format,
"--hls-prefer-ffmpeg", youtube_dl_url,
"-o", download_directory
]
if Config.HTTP_PROXY != "":
command_to_exec.append("--proxy")
command_to_exec.append(Config.HTTP_PROXY)
if youtube_dl_username is not None:
command_to_exec.append("--username")
command_to_exec.append(youtube_dl_username)
if youtube_dl_password is not None:
command_to_exec.append("--password")
command_to_exec.append(youtube_dl_password)
command_to_exec.append("--no-warnings")
if "hotstar" in youtube_dl_url:
command_to_exec.append("--geo-bypass-country")
command_to_exec.append("IN")
start = datetime.now()
process = await asyncio.create_subprocess_exec(
*command_to_exec,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
e_response = stderr.decode().strip()
t_response = stdout.decode().strip()
# logger.info(e_response)
# logger.info(t_response)
ad_string_to_replace = "please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output."
if e_response and ad_string_to_replace in e_response:
error_message = e_response.replace(ad_string_to_replace, "")
await bot.edit_message_text(
chat_id=update.message.chat.id,
message_id=update.message.message_id,
text=error_message
)
return False
if t_response:
try:
os.remove(save_ytdl_json_path)
except:
pass
end_one = datetime.now()
time_taken_for_download = (end_one -start).seconds
file_size = Config.TG_MAX_FILE_SIZE + 1
try:
file_size = os.stat(download_directory).st_size
except FileNotFoundError as exc:
try:
download_directory = os.path.splitext(download_directory)[0] + "." + "mkv"
file_size = os.stat(download_directory).st_size
except Exception as e:
await bot.edit_message_text(
chat_id=update.message.chat.id,
text="Some errors occured while downloading video!",
message_id=update.message.message_id
)
logger.info("FnF error - " + str(e))
return
if file_size > Config.TG_MAX_FILE_SIZE:
await bot.edit_message_text(
chat_id=update.message.chat.id,
text=Translation.RCHD_TG_API_LIMIT.format(time_taken_for_download, humanbytes(file_size)),
message_id=update.message.message_id
)
else:
if Config.SCREENSHOTS:
is_w_f = False
images = await generate_screen_shots(
download_directory,
tmp_directory_for_each_user,
is_w_f,
Config.DEF_WATER_MARK_FILE,
300,
9
)
try:
await bot.edit_message_text(text=Translation.UPLOAD_START, chat_id=update.message.chat.id, message_id=update.message.message_id)
except:
pass
# get the correct width, height, and duration for videos greater than 10MB
width = 0
height = 0
duration = 0
if tg_send_type != "file":
metadata = extractMetadata(createParser(download_directory))
if metadata is not None:
if metadata.has("duration"):
duration = metadata.get('duration').seconds
if os.path.exists(thumb_image_path) or Config.DEF_THUMB_NAIL_VID_S:
width = 0
height = 0
if os.path.exists(thumb_image_path):
metadata = extractMetadata(createParser(thumb_image_path))
else:
metadata = Config.DEF_THUMB_NAIL_VID_S
if metadata.has("width"):
width = metadata.get("width")
if metadata.has("height"):
height = metadata.get("height")
if tg_send_type == "vm":
height = width
Image.open(thumb_image_path).convert(
"RGB").save(thumb_image_path)
img = Image.open(thumb_image_path)
if tg_send_type == "file":
img.resize((320, height))
else:
img.resize((90, height))
img.save(thumb_image_path, "JPEG")
else:
thumb_image_path = None
start_time = time.time()
if tg_send_type == "audio":
await update.message.reply_to_message.reply_chat_action("upload_audio")
await bot.send_audio(
chat_id=update.message.chat.id,
audio=download_directory,
caption=description,
parse_mode="HTML",
duration=duration,
# performer=response_json["uploader"],
# title=response_json["title"],
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('⚙ Join Updates Channel ⚙', url='https://telegram.me/BX_Botz')]]),
thumb=thumb_image_path,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
elif tg_send_type == "file":
await update.message.reply_to_message.reply_chat_action("upload_document")
await bot.send_document(
chat_id=update.message.chat.id,
document=download_directory,
thumb=thumb_image_path,
caption=description,
parse_mode="HTML",
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('⚙ Join Updates Channel ⚙', url='https://telegram.me/BX_Botz')]]),
reply_to_message_id=update.message.reply_to_message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
elif tg_send_type == "vm":
await update.message.reply_to_message.reply_chat_action("upload_video_note")
await bot.send_video_note(
chat_id=update.message.chat.id,
video_note=download_directory,
duration=duration,
length=width,
thumb=thumb_image_path,
reply_to_message_id=update.message.reply_to_message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
elif tg_send_type == "video":
await update.message.reply_to_message.reply_chat_action("upload_video")
await bot.send_video(
chat_id=update.message.chat.id,
video=download_directory,
caption=description,
parse_mode="HTML",
duration=duration,
width=width,
height=height,
supports_streaming=True,
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton('⚙ Join Updates Channel ⚙', url='https://telegram.me/BX_Botz')]]),
thumb=thumb_image_path,
reply_to_message_id=update.message.reply_to_message.message_id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
else:
logger.info("Did this happen? :\\")
end_two = datetime.now()
time_taken_for_upload = (end_two - end_one).seconds
media_album_p = []
if Config.SCREENSHOTS:
if images is not None:
i = 0
caption = ""
if is_w_f:
caption = ""
for image in images:
if os.path.exists(image):
if i == 0:
media_album_p.append(
InputMediaPhoto(
media=image,
caption=caption,
parse_mode="html"
)
)
else:
media_album_p.append(
InputMediaPhoto(
media=image
)
)
i = i + 1
await bot.send_media_group(
chat_id=update.message.chat.id,
disable_notification=True,
reply_to_message_id=update.message.message_id,
media=media_album_p
)
try:
shutil.rmtree(tmp_directory_for_each_user)
except:
pass
try:
os.remove(thumb_image_path)
except:
pass
await bot.edit_message_text(
text=Translation.AFTER_SUCCESSFUL_UPLOAD_MSG_WITH_TS.format(time_taken_for_download, time_taken_for_upload),
chat_id=update.message.chat.id,
message_id=update.message.message_id,
disable_web_page_preview=True
)
| 41.610959
| 257
| 0.546023
|
946f477990f0af63c951062bf0630ea0a826ecfb
| 9,918
|
py
|
Python
|
src/yafowil/widget/dynatree/tests.py
|
conestack/yafowil.widget.dynatree
|
d9242c109030dd812b79208d221479c571806d20
|
[
"BSD-2-Clause"
] | 1
|
2019-07-09T12:56:45.000Z
|
2019-07-09T12:56:45.000Z
|
src/yafowil/widget/dynatree/tests.py
|
conestack/yafowil.widget.dynatree
|
d9242c109030dd812b79208d221479c571806d20
|
[
"BSD-2-Clause"
] | 1
|
2015-01-28T16:11:22.000Z
|
2015-01-28T16:11:22.000Z
|
src/yafowil/widget/dynatree/tests.py
|
conestack/yafowil.widget.dynatree
|
d9242c109030dd812b79208d221479c571806d20
|
[
"BSD-2-Clause"
] | 1
|
2019-03-11T09:22:42.000Z
|
2019-03-11T09:22:42.000Z
|
from node.utils import UNSET
from odict import odict
from yafowil.base import factory
from yafowil.compat import IS_PY2
from yafowil.tests import fxml
from yafowil.tests import YafowilTestCase
from yafowil.utils import tag
from yafowil.widget.dynatree.widget import build_inline_dynatree
import unittest
import yafowil.loader # noqa
if not IS_PY2:
from importlib import reload
class TestDynatreeWidget(YafowilTestCase):
def setUp(self):
super(TestDynatreeWidget, self).setUp()
from yafowil.widget.dynatree import widget
reload(widget)
@property
def mock_tree(self):
# A test tree
tree = odict()
animals = odict()
tree['animal'] = ('Animals', animals)
mammal = odict()
mammal['horse'] = ('Horse', None)
mammal['ape'] = ('Ape', None)
mammal['elephant'] = ('Elephant', None)
animals['mammal'] = ('Mammals', mammal)
bird = odict()
bird['turkey'] = ('Turkey', None)
bird['swan'] = ('Swan', None)
bird['hummingbird'] = ('Hummingbird', None)
bird['duck'] = ('Duck', None)
animals['bird'] = ('Birds', bird)
return tree
def test_inline_tree_renderer(self):
html = build_inline_dynatree(
self.mock_tree,
'animal',
tag,
ulid='dynatree-source'
)
self.check_output("""
<ul class="hiddenStructure" id="dynatree-source">
<li class="selected" id="animal">Animals<ul>
<li id="mammal">Mammals<ul>
<li id="horse">Horse
</li><li id="ape">Ape
</li><li id="elephant">Elephant
</li></ul>
</li><li id="bird">Birds<ul>
<li id="turkey">Turkey
</li><li id="swan">Swan
</li><li id="hummingbird">Hummingbird
</li><li id="duck">Duck
</li></ul>
</li></ul>
</li></ul>
""", fxml(html))
def test_plain_widget_source_is_string(self):
# Render plain widget, source is string
widget = factory(
'dynatree',
name='root',
props={
'source': 'http://www.foo.bar/baz.json'
})
self.check_output("""
<div class="yafowil-widget-dynatree">
<input id="input-root" name="root" type="hidden"/>
<div class="dynatree-source hiddenStructure">http://www.foo.bar/baz.json</div>
<div class="dynatree-params hiddenStructure">selectMode,1|minExpandLevel,1|rootVisible,False|autoCollapse,False|checkbox,True|imagePath,skin-bootstrap|type,remote</div>
<div class="yafowil-widget-dynatree-tree"/>
</div>
""", fxml(widget()))
def test_plain_widget_source_is_tree(self):
# Render plain widget, source is tree
widget = factory(
'dynatree',
name='root',
props={
'source': self.mock_tree
})
self.check_output("""
<div class="yafowil-widget-dynatree">
<input id="input-root" name="root" type="hidden"/>
<ul class="hiddenStructure" id="dynatree-source-root">
<li id="animal">Animals<ul>
<li id="mammal">Mammals<ul>
<li id="horse">Horse
</li><li id="ape">Ape
</li><li id="elephant">Elephant
</li></ul>
</li><li id="bird">Birds<ul>
<li id="turkey">Turkey
</li><li id="swan">Swan
</li><li id="hummingbird">Hummingbird
</li><li id="duck">Duck
</li></ul>
</li></ul>
</li></ul>
<div class="dynatree-params hiddenStructure">selectMode,1|minExpandLevel,1|rootVisible,False|autoCollapse,False|checkbox,True|imagePath,skin-bootstrap|type,local|initId,dynatree-source-root</div>
<div class="yafowil-widget-dynatree-tree"/>
</div>
""", fxml(widget()))
def test_plain_widget_source_is_callable(self):
# Render plain widget, source is callable
def tree_callable(widget, data):
return self.mock_tree
widget = factory(
'dynatree',
name='root',
props={
'source': tree_callable
})
self.check_output("""
<div class="yafowil-widget-dynatree">
<input id="input-root" name="root" type="hidden"/>
<ul class="hiddenStructure" id="dynatree-source-root">
<li id="animal">Animals<ul>
<li id="mammal">Mammals<ul>
<li id="horse">Horse
</li><li id="ape">Ape
</li><li id="elephant">Elephant
</li></ul>
</li><li id="bird">Birds<ul>
<li id="turkey">Turkey
</li><li id="swan">Swan
</li><li id="hummingbird">Hummingbird
</li><li id="duck">Duck
</li></ul>
</li></ul>
</li></ul>
<div class="dynatree-params hiddenStructure">selectMode,1|minExpandLevel,1|rootVisible,False|autoCollapse,False|checkbox,True|imagePath,skin-bootstrap|type,local|initId,dynatree-source-root</div>
<div class="yafowil-widget-dynatree-tree"/>
</div>
""", fxml(widget()))
def test_plain_widget_source_is_invalid(self):
# Try to render plain widget, source is invalid
widget = factory(
'dynatree',
name='root',
value='ape',
props={
'source': object()
})
err = self.expect_error(
ValueError,
widget
)
self.assertEqual(str(err), 'resulting source must be [o]dict or string')
def test_plain_widget_source_is_tree_preset_values_single_select(self):
# Render plain widget, source is tree, preselect ape, single select
widget = factory(
'dynatree',
name='root',
value='ape',
props={
'source': self.mock_tree
})
self.check_output("""
<div class="yafowil-widget-dynatree">
<input id="input-root" name="root" type="hidden" value="ape"/>
<ul class="hiddenStructure" id="dynatree-source-root">
<li id="animal">Animals<ul>
<li id="mammal">Mammals<ul>
<li id="horse">Horse
</li><li class="selected" id="ape">Ape
</li><li id="elephant">Elephant
</li></ul>
</li><li id="bird">Birds<ul>
<li id="turkey">Turkey
</li><li id="swan">Swan
</li><li id="hummingbird">Hummingbird
</li><li id="duck">Duck
</li></ul>
</li></ul>
</li></ul>
<div class="dynatree-params hiddenStructure">selectMode,1|minExpandLevel,1|rootVisible,False|autoCollapse,False|checkbox,True|imagePath,skin-bootstrap|type,local|initId,dynatree-source-root</div>
<div class="yafowil-widget-dynatree-tree"/>
</div>
""", fxml(widget()))
def test_plain_widget_source_is_tree_preset_values_multi_select(self):
# Render plain widget, source is tree, preselect ape and swan,
# multi select
widget = factory(
'dynatree',
name='root',
value=['ape', 'swan'],
props={
'source': self.mock_tree,
'selectMode': 1
})
self.check_output("""
<div class="yafowil-widget-dynatree">
<input id="input-root" name="root" type="hidden" value="ape|swan"/>
<ul class="hiddenStructure" id="dynatree-source-root">
<li id="animal">Animals<ul>
<li id="mammal">Mammals<ul>
<li id="horse">Horse
</li><li class="selected" id="ape">Ape
</li><li id="elephant">Elephant
</li></ul>
</li><li id="bird">Birds<ul>
<li id="turkey">Turkey
</li><li class="selected" id="swan">Swan
</li><li id="hummingbird">Hummingbird
</li><li id="duck">Duck
</li></ul>
</li></ul>
</li></ul>
<div class="dynatree-params hiddenStructure">selectMode,1|minExpandLevel,1|rootVisible,False|autoCollapse,False|checkbox,True|imagePath,skin-bootstrap|type,local|initId,dynatree-source-root</div>
<div class="yafowil-widget-dynatree-tree"/>
</div>
""", fxml(widget()))
def test_extract_from_select_mode_1(self):
# Extract from selectMode=1 - means single selection
widget = factory(
'dynatree',
name='root',
props={
'source': self.mock_tree,
'selectMode': 1
})
data = widget.extract({'root': 'somevalue|'})
self.assertEqual(
[data.name, data.value, data.extracted, data.errors],
['root', UNSET, 'somevalue', []]
)
data
def test_extract_from_select_mode_2(self):
# Extract from selectMode=2 - means multi selection
widget = factory(
'dynatree',
name='root',
props={
'source': self.mock_tree,
'selectMode': 2
})
data = widget.extract({'root': 'somevalue|'})
self.assertEqual(
[data.name, data.value, data.extracted, data.errors],
['root', UNSET, ['somevalue'], []]
)
data = widget.extract({'root': 'somevalue|othervalue'})
self.assertEqual(
[data.name, data.value, data.extracted, data.errors],
['root', UNSET, ['somevalue', 'othervalue'], []]
)
def test_extract_empty(self):
widget = factory(
'dynatree',
name='root',
props={
'source': self.mock_tree,
'selectMode': 2
})
data = widget.extract({})
self.assertEqual(
[data.name, data.value, data.extracted, data.errors],
['root', UNSET, UNSET, []]
)
if __name__ == '__main__':
unittest.main()
| 34.557491
| 205
| 0.549708
|
4cae13c72a34f58c6725bd87fbe0c1a4b2118a68
| 418
|
py
|
Python
|
deproxy/request.py
|
malconis/deproxy
|
5e5366ef16ee77419ade1ca47547a75ba9125ec5
|
[
"MIT"
] | 1
|
2015-10-12T07:56:41.000Z
|
2015-10-12T07:56:41.000Z
|
deproxy/request.py
|
malconis/deproxy
|
5e5366ef16ee77419ade1ca47547a75ba9125ec5
|
[
"MIT"
] | null | null | null |
deproxy/request.py
|
malconis/deproxy
|
5e5366ef16ee77419ade1ca47547a75ba9125ec5
|
[
"MIT"
] | null | null | null |
class Request:
"""A simple HTTP Request, with method, path, headers, and body."""
def __init__(self, method, path, headers, body):
self.method = method
self.path = path
self.headers = dict(headers)
self.body = body
def __repr__(self):
return ('Request(method=%r, path=%r, headers=%r, body=%r)' %
(self.method, self.path, self.headers, self.body))
| 29.857143
| 70
| 0.588517
|
26d4f2c9a3517f6cfcc0bec7155d57519f59fa8a
| 3,054
|
py
|
Python
|
tests/demoproject/demo/sample/views.py
|
roman-karpovich/etools-validator
|
e5050a675d506f3d18a4703e8b73425d93919f0c
|
[
"Apache-2.0"
] | null | null | null |
tests/demoproject/demo/sample/views.py
|
roman-karpovich/etools-validator
|
e5050a675d506f3d18a4703e8b73425d93919f0c
|
[
"Apache-2.0"
] | null | null | null |
tests/demoproject/demo/sample/views.py
|
roman-karpovich/etools-validator
|
e5050a675d506f3d18a4703e8b73425d93919f0c
|
[
"Apache-2.0"
] | null | null | null |
from rest_framework import status
from rest_framework.generics import CreateAPIView, UpdateAPIView
from rest_framework.response import Response
from rest_framework.serializers import ValidationError
from etools_validator.mixins import ValidatorViewMixin
from .models import DemoModel
from .serializers import DemoChildModelSerializer, DemoModelSerializer, SpecialModelSerializer
from .validations import DemoModelValidation
class DemoCreateView(ValidatorViewMixin, CreateAPIView):
serializer_class = DemoModelSerializer
SERIALIZER_MAP = {
"children": DemoChildModelSerializer
}
def create(self, request, *args, **kwargs):
related_fields = ['children']
serializer = self.my_create(request, related_fields, **kwargs)
instance = serializer.instance
validator = DemoModelValidation(instance, user=request.user)
if not validator.is_valid:
raise ValidationError({'errors': validator.errors})
headers = self.get_success_headers(serializer.data)
data = DemoModelSerializer(
instance,
context=self.get_serializer_context()
).data
return Response(
data,
status=status.HTTP_201_CREATED,
headers=headers
)
class DemoUpdateView(ValidatorViewMixin, UpdateAPIView):
queryset = DemoModel.objects.all()
serializer_class = DemoModelSerializer
SERIALIZER_MAP = {
"children": DemoChildModelSerializer,
"special": SpecialModelSerializer,
}
def update(self, request, *args, **kwargs):
related_fields = ['children', 'special']
instance, old_instance, serializer = self.my_update(
request,
related_fields,
**kwargs
)
validator = DemoModelValidation(
instance,
old=old_instance,
user=request.user
)
if not validator.is_valid:
raise ValidationError(validator.errors)
return Response(
DemoModelSerializer(
instance,
context=self.get_serializer_context()
).data
)
class DemoUpdateNonSerializedView(ValidatorViewMixin, UpdateAPIView):
queryset = DemoModel.objects.all()
serializer_class = DemoModelSerializer
def update(self, request, *args, **kwargs):
related_fields = []
related_non_serialized_fields = ['others']
instance, old_instance, serializer = self.my_update(
request,
related_fields,
related_non_serialized_fields=related_non_serialized_fields,
**kwargs
)
validator = DemoModelValidation(
instance,
old=old_instance,
user=request.user
)
if not validator.is_valid:
raise ValidationError(validator.errors)
return Response(
DemoModelSerializer(
instance,
context=self.get_serializer_context()
).data
)
| 29.085714
| 94
| 0.646365
|
f168d843e436217f942a6654299c95766ebeed7c
| 56,256
|
py
|
Python
|
nova/api/openstack/compute/servers.py
|
teresa-ho/stx-nova
|
1f82323439da2449edbbaed2fe1c8414a550c86f
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/servers.py
|
teresa-ho/stx-nova
|
1f82323439da2449edbbaed2fe1c8414a550c86f
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/servers.py
|
teresa-ho/stx-nova
|
1f82323439da2449edbbaed2fe1c8414a550c86f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2010 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2017 Wind River Systems, Inc.
#
import copy
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
import webob
from webob import exc
from nova.api.openstack import api_version_request
from nova.api.openstack import common
from nova.api.openstack.compute import availability_zone
from nova.api.openstack.compute import block_device_mapping
from nova.api.openstack.compute import block_device_mapping_v1
from nova.api.openstack.compute import config_drive
from nova.api.openstack.compute import helpers
from nova.api.openstack.compute import keypairs
from nova.api.openstack.compute import multiple_create
from nova.api.openstack.compute import scheduler_hints
from nova.api.openstack.compute.schemas import servers as schema_servers
from nova.api.openstack.compute import security_groups
from nova.api.openstack.compute import user_data
from nova.api.openstack.compute.views import servers as views_servers
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova.compute import flavors
from nova.compute import utils as compute_utils
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.image import glance
from nova import objects
from nova.objects import service as service_obj
from nova.pci import devspec
from nova.pci import utils as pci_utils
from nova.policies import servers as server_policies
from nova import utils
TAG_SEARCH_FILTERS = ('tags', 'tags-any', 'not-tags', 'not-tags-any')
DEVICE_TAGGING_MIN_COMPUTE_VERSION = 14
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class ServersController(wsgi.Controller):
"""The Server API base controller class for the OpenStack API."""
_view_builder_class = views_servers.ViewBuilder
schema_server_create = schema_servers.base_create
schema_server_update = schema_servers.base_update
schema_server_rebuild = schema_servers.base_rebuild
schema_server_create_v20 = schema_servers.base_create_v20
schema_server_update_v20 = schema_servers.base_update_v20
schema_server_rebuild_v20 = schema_servers.base_rebuild_v20
schema_server_create_v219 = schema_servers.base_create_v219
schema_server_update_v219 = schema_servers.base_update_v219
schema_server_rebuild_v219 = schema_servers.base_rebuild_v219
schema_server_create_v232 = schema_servers.base_create_v232
schema_server_create_v237 = schema_servers.base_create_v237
schema_server_create_v242 = schema_servers.base_create_v242
schema_server_create_v252 = schema_servers.base_create_v252
# NOTE(alex_xu): Please do not add more items into this list. This list
# should be removed in the future.
schema_func_list = [
availability_zone.get_server_create_schema,
block_device_mapping.get_server_create_schema,
block_device_mapping_v1.get_server_create_schema,
config_drive.get_server_create_schema,
keypairs.get_server_create_schema,
multiple_create.get_server_create_schema,
scheduler_hints.get_server_create_schema,
security_groups.get_server_create_schema,
user_data.get_server_create_schema,
]
# NOTE(alex_xu): Please do not add more items into this list. This list
# should be removed in the future.
server_create_func_list = [
availability_zone.server_create,
block_device_mapping.server_create,
block_device_mapping_v1.server_create,
config_drive.server_create,
keypairs.server_create,
multiple_create.server_create,
scheduler_hints.server_create,
security_groups.server_create,
user_data.server_create,
]
@staticmethod
def _add_location(robj):
# Just in case...
if 'server' not in robj.obj:
return robj
link = [l for l in robj.obj['server']['links'] if l['rel'] == 'self']
if link:
robj['Location'] = utils.utf8(link[0]['href'])
# Convenience return
return robj
def __init__(self, **kwargs):
# TODO(alex_xu): Remove this line when 'extension_info' won't be passed
# in when creating controller.
kwargs.pop('extension_info', None)
super(ServersController, self).__init__(**kwargs)
self.compute_api = compute.API()
# TODO(alex_xu): The final goal is that merging all of
# extended json-schema into server main json-schema.
self._create_schema(self.schema_server_create_v252, '2.52')
self._create_schema(self.schema_server_create_v242, '2.42')
self._create_schema(self.schema_server_create_v237, '2.37')
self._create_schema(self.schema_server_create_v232, '2.32')
self._create_schema(self.schema_server_create_v219, '2.19')
self._create_schema(self.schema_server_create, '2.1')
self._create_schema(self.schema_server_create_v20, '2.0')
@extensions.expected_errors((400, 403))
@validation.query_schema(schema_servers.query_params_v226, '2.26')
@validation.query_schema(schema_servers.query_params_v21, '2.1', '2.25')
def index(self, req):
"""Returns a list of server names and ids for a given user."""
context = req.environ['nova.context']
context.can(server_policies.SERVERS % 'index')
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
@extensions.expected_errors((400, 403))
@validation.query_schema(schema_servers.query_params_v226, '2.26')
@validation.query_schema(schema_servers.query_params_v21, '2.1', '2.25')
def detail(self, req):
"""Returns a list of server details for a given user."""
context = req.environ['nova.context']
context.can(server_policies.SERVERS % 'detail')
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
def _get_servers(self, req, is_detail):
"""Returns a list of servers, based on any search options specified."""
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts,
self._get_server_search_options(req))
for search_opt in search_opts:
if (search_opt in
schema_servers.JOINED_TABLE_QUERY_PARAMS_SERVERS.keys() or
search_opt.startswith('_')):
msg = _("Invalid filter field: %s.") % search_opt
raise exc.HTTPBadRequest(explanation=msg)
# Verify search by 'status' contains a valid status.
# Convert it to filter by vm_state or task_state for compute_api.
# For non-admin user, vm_state and task_state are filtered through
# remove_invalid_options function, based on value of status field.
# Set value to vm_state and task_state to make search simple.
search_opts.pop('status', None)
if 'status' in req.GET.keys():
statuses = req.GET.getall('status')
states = common.task_and_vm_state_from_status(statuses)
vm_state, task_state = states
if not vm_state and not task_state:
if api_version_request.is_supported(req, min_version='2.38'):
msg = _('Invalid status value')
raise exc.HTTPBadRequest(explanation=msg)
return {'servers': []}
search_opts['vm_state'] = vm_state
# When we search by vm state, task state will return 'default'.
# So we don't need task_state search_opt.
if 'default' not in task_state:
search_opts['task_state'] = task_state
if 'changes-since' in search_opts:
search_opts['changes-since'] = timeutils.parse_isotime(
search_opts['changes-since'])
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
# to filter out deleted instances by setting the filter ourselves.
# ... Unless 'changes-since' is specified, because 'changes-since'
# should return recently deleted instances according to the API spec.
if 'deleted' not in search_opts:
if 'changes-since' not in search_opts:
# No 'changes-since', so we only want non-deleted servers
search_opts['deleted'] = False
else:
# Convert deleted filter value to a valid boolean.
# Return non-deleted servers if an invalid value
# is passed with deleted filter.
search_opts['deleted'] = strutils.bool_from_string(
search_opts['deleted'], default=False)
if search_opts.get("vm_state") == ['deleted']:
if context.is_admin:
search_opts['deleted'] = True
else:
msg = _("Only administrators may list deleted instances")
raise exc.HTTPForbidden(explanation=msg)
if api_version_request.is_supported(req, min_version='2.26'):
for tag_filter in TAG_SEARCH_FILTERS:
if tag_filter in search_opts:
search_opts[tag_filter] = search_opts[
tag_filter].split(',')
# If tenant_id is passed as a search parameter this should
# imply that all_tenants is also enabled unless explicitly
# disabled. Note that the tenant_id parameter is filtered out
# by remove_invalid_options above unless the requestor is an
# admin.
# TODO(gmann): 'all_tenants' flag should not be required while
# searching with 'tenant_id'. Ref bug# 1185290
# +microversions to achieve above mentioned behavior by
# uncommenting below code.
# if 'tenant_id' in search_opts and 'all_tenants' not in search_opts:
# We do not need to add the all_tenants flag if the tenant
# id associated with the token is the tenant id
# specified. This is done so a request that does not need
# the all_tenants flag does not fail because of lack of
# policy permission for compute:get_all_tenants when it
# doesn't actually need it.
# if context.project_id != search_opts.get('tenant_id'):
# search_opts['all_tenants'] = 1
all_tenants = common.is_all_tenants(search_opts)
# use the boolean from here on out so remove the entry from search_opts
# if it's present
search_opts.pop('all_tenants', None)
elevated = None
if all_tenants:
if is_detail:
context.can(server_policies.SERVERS % 'detail:get_all_tenants')
else:
context.can(server_policies.SERVERS % 'index:get_all_tenants')
elevated = context.elevated()
else:
# As explained in lp:#1185290, if `all_tenants` is not passed
# we must ignore the `tenant_id` search option. As explained
# in a above code comment, any change to this behavior would
# require a microversion bump.
search_opts.pop('tenant_id', None)
if context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
limit, marker = common.get_limit_and_marker(req)
sort_keys, sort_dirs = common.get_sort_params(req.params)
sort_keys, sort_dirs = remove_invalid_sort_keys(
context, sort_keys, sort_dirs,
schema_servers.SERVER_LIST_IGNORE_SORT_KEY, ('host', 'node'))
expected_attrs = []
if is_detail:
expected_attrs.append('services')
if api_version_request.is_supported(req, '2.26'):
expected_attrs.append("tags")
# merge our expected attrs with what the view builder needs for
# showing details
expected_attrs = self._view_builder.get_show_expected_attrs(
expected_attrs)
try:
instance_list = self.compute_api.get_all(elevated or context,
search_opts=search_opts, limit=limit, marker=marker,
expected_attrs=expected_attrs,
sort_keys=sort_keys, sort_dirs=sort_dirs)
except exception.MarkerNotFound:
msg = _('marker [%s] not found') % marker
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
LOG.debug("Flavor '%s' could not be found ",
search_opts['flavor'])
instance_list = objects.InstanceList()
if is_detail:
instance_list._context = context
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
response = self._view_builder.index(req, instance_list)
req.cache_db_instances(instance_list)
return response
def _get_server(self, context, req, instance_uuid, is_detail=False):
"""Utility function for looking up an instance by uuid.
:param context: request context for auth
:param req: HTTP request. The instance is cached in this request.
:param instance_uuid: UUID of the server instance to get
:param is_detail: True if you plan on showing the details of the
instance in the response, False otherwise.
"""
expected_attrs = ['flavor', 'numa_topology']
if is_detail:
if api_version_request.is_supported(req, '2.26'):
expected_attrs.append("tags")
expected_attrs = self._view_builder.get_show_expected_attrs(
expected_attrs)
instance = common.get_instance(self.compute_api, context,
instance_uuid,
expected_attrs=expected_attrs)
req.cache_db_instance(instance)
return instance
# WRS: extension
def _validate_vif_pci_address(self, vif_model, pci_address):
domain, bus, slot, func = pci_utils.get_pci_address_fields(pci_address)
if domain != '0000':
msg = _("Only domain 0000 is supported")
raise exc.HTTPBadRequest(explanation=msg)
if bus == '00' and slot in ('00', '01'):
msg = _("Slots 0,1 are reserved for PCI bus 0")
raise exc.HTTPBadRequest(explanation=msg)
if bus != '00' and slot == '00':
msg = _("Slots 0 is reserved for any PCI bus")
raise exc.HTTPBadRequest(explanation=msg)
if func != '0':
msg = _("Only function 0 is supported")
raise exc.HTTPBadRequest(explanation=msg)
# WRS: Max bus is 8, possibly kvm/qemu limitation.
if int(bus, 16) > 8:
msg = _("PCI bus maximum value is 8")
raise exc.HTTPBadRequest(explanation=msg)
if int(slot, 16) > devspec.MAX_SLOT:
msg = _("PCI slot maximum value is %s") % devspec.MAX_SLOT
raise exc.HTTPBadRequest(explanation=msg)
@staticmethod
def _validate_network_id(net_id, network_uuids):
"""Validates that a requested network id.
This method performs two checks:
1. That the network id is in the proper uuid format.
2. That the network is not a duplicate when using nova-network.
:param net_id: The network id to validate.
:param network_uuids: A running list of requested network IDs that have
passed validation already.
:raises: webob.exc.HTTPBadRequest if validation fails
"""
if not uuidutils.is_uuid_like(net_id):
# NOTE(mriedem): Neutron would allow a network id with a br- prefix
# back in Folsom so continue to honor that.
# TODO(mriedem): Need to figure out if this is still a valid case.
br_uuid = net_id.split('-', 1)[-1]
if not uuidutils.is_uuid_like(br_uuid):
msg = _("Bad networks format: network uuid is "
"not in proper format (%s)") % net_id
raise exc.HTTPBadRequest(explanation=msg)
# duplicate networks are allowed only for neutron v2.0
if net_id in network_uuids and not utils.is_neutron():
expl = _("Duplicate networks (%s) are not allowed") % net_id
raise exc.HTTPBadRequest(explanation=expl)
def _get_requested_networks(self, requested_networks,
supports_device_tagging=False):
"""Create a list of requested networks from the networks attribute."""
# Starting in the 2.37 microversion, requested_networks is either a
# list or a string enum with value 'auto' or 'none'. The auto/none
# values are verified via jsonschema so we don't check them again here.
if isinstance(requested_networks, six.string_types):
return objects.NetworkRequestList(
objects=[objects.NetworkRequest(
network_id=requested_networks)])
networks = []
network_uuids = []
vif_pci_addresses = []
for network in requested_networks:
request = objects.NetworkRequest()
try:
# fixed IP address is optional
# if the fixed IP address is not provided then
# it will use one of the available IP address from the network
request.address = network.get('fixed_ip', None)
request.port_id = network.get('port', None)
request.tag = network.get('tag', None)
if request.tag and not supports_device_tagging:
msg = _('Network interface tags are not yet supported.')
raise exc.HTTPBadRequest(explanation=msg)
if request.port_id:
request.network_id = None
if not utils.is_neutron():
# port parameter is only for neutron v2.0
msg = _("Unknown argument: port")
raise exc.HTTPBadRequest(explanation=msg)
if request.address is not None:
msg = _("Specified Fixed IP '%(addr)s' cannot be used "
"with port '%(port)s': the two cannot be "
"specified together.") % {
"addr": request.address,
"port": request.port_id}
raise exc.HTTPBadRequest(explanation=msg)
else:
request.network_id = network['uuid']
self._validate_network_id(
request.network_id, network_uuids)
# WRS: vif_model and vif_pci_address are optional
if utils.is_neutron():
request.vif_model = network.get('wrs-if:vif_model', None)
request.vif_pci_address = network.get(
'wrs-if:vif_pci_address', None)
if request.vif_pci_address is not None:
try:
pci_utils.parse_address(request.vif_pci_address)
except exception.PciDeviceWrongAddressFormat:
msg = _("Bad PCI address format")
raise exc.HTTPBadRequest(explanation=msg)
self._validate_vif_pci_address(request.vif_model,
request.vif_pci_address)
vif_pci_addresses.append(request.vif_pci_address)
# duplicate networks are allowed only for neutron v2.0
if (not utils.is_neutron() and request.network_id and
request.network_id in network_uuids):
expl = (_("Duplicate networks"
" (%s) are not allowed") %
request.network_id)
raise exc.HTTPBadRequest(explanation=expl)
network_uuids.append(request.network_id)
networks.append(request)
except KeyError as key:
expl = _('Bad network format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
# WRS: Verify that all virtual PCI addresses for network devices
# are unique.
duplicates = [x for x in vif_pci_addresses
if vif_pci_addresses.count(x) > 1]
if duplicates:
expl = _('PCI addresses must be unique')
raise exc.HTTPBadRequest(explanation=expl)
return objects.NetworkRequestList(objects=networks)
@extensions.expected_errors(404)
def show(self, req, id):
"""Returns server details by server id."""
context = req.environ['nova.context']
context.can(server_policies.SERVERS % 'show')
instance = self._get_server(context, req, id, is_detail=True)
return self._view_builder.show(req, instance)
@wsgi.response(202)
@extensions.expected_errors((400, 403, 409))
@validation.schema(schema_server_create_v20, '2.0', '2.0')
@validation.schema(schema_server_create, '2.1', '2.18')
@validation.schema(schema_server_create_v219, '2.19', '2.31')
@validation.schema(schema_server_create_v232, '2.32', '2.36')
@validation.schema(schema_server_create_v237, '2.37', '2.41')
@validation.schema(schema_server_create_v242, '2.42', '2.51')
@validation.schema(schema_server_create_v252, '2.52')
def create(self, req, body):
"""Creates a new server for a given user."""
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
name = common.normalize_name(server_dict['name'])
description = name
# Validate Metadata before instance creation
meta = server_dict.get('metadata', {})
if meta:
common.validate_metadata(meta)
if api_version_request.is_supported(req, min_version='2.19'):
description = server_dict.get('description')
# Arguments to be passed to instance create function
create_kwargs = {}
# TODO(alex_xu): This is for back-compatible with stevedore
# extension interface. But the final goal is that merging
# all of extended code into ServersController.
self._create_by_func_list(server_dict, create_kwargs, body)
availability_zone = create_kwargs.pop("availability_zone", None)
if api_version_request.is_supported(req, min_version='2.52'):
create_kwargs['tags'] = server_dict.get('tags')
helpers.translate_attributes(helpers.CREATE,
server_dict, create_kwargs)
target = {
'project_id': context.project_id,
'user_id': context.user_id,
'availability_zone': availability_zone}
context.can(server_policies.SERVERS % 'create', target)
# TODO(Shao He, Feng) move this policy check to os-availability-zone
# extension after refactor it.
parse_az = self.compute_api.parse_availability_zone
try:
availability_zone, host, node = parse_az(context,
availability_zone)
except exception.InvalidInput as err:
raise exc.HTTPBadRequest(explanation=six.text_type(err))
if host or node:
context.can(server_policies.SERVERS % 'create:forced_host', {})
min_compute_version = service_obj.get_minimum_version_all_cells(
nova_context.get_admin_context(), ['nova-compute'])
supports_device_tagging = (min_compute_version >=
DEVICE_TAGGING_MIN_COMPUTE_VERSION)
block_device_mapping = create_kwargs.get("block_device_mapping")
# TODO(Shao He, Feng) move this policy check to os-block-device-mapping
# extension after refactor it.
if block_device_mapping:
context.can(server_policies.SERVERS % 'create:attach_volume',
target)
for bdm in block_device_mapping:
if bdm.get('tag', None) and not supports_device_tagging:
msg = _('Block device tags are not yet supported.')
raise exc.HTTPBadRequest(explanation=msg)
image_uuid = self._image_from_req_data(server_dict, create_kwargs)
# NOTE(cyeoh): Although upper layer can set the value of
# return_reservation_id in order to request that a reservation
# id be returned to the client instead of the newly created
# instance information we do not want to pass this parameter
# to the compute create call which always returns both. We use
# this flag after the instance create call to determine what
# to return to the client
return_reservation_id = create_kwargs.pop('return_reservation_id',
False)
requested_networks = server_dict.get('networks', None)
if requested_networks is not None:
requested_networks = self._get_requested_networks(
requested_networks, supports_device_tagging)
# Skip policy check for 'create:attach_network' if there is no
# network allocation request.
if requested_networks and len(requested_networks) and \
not requested_networks.no_allocate:
context.can(server_policies.SERVERS % 'create:attach_network',
target)
flavor_id = self._flavor_id_from_req_data(body)
try:
inst_type = flavors.get_flavor_by_flavor_id(
flavor_id, ctxt=context, read_deleted="no")
(instances, resv_id) = self.compute_api.create(context,
inst_type,
image_uuid,
display_name=name,
display_description=description,
availability_zone=availability_zone,
forced_host=host, forced_node=node,
metadata=server_dict.get('metadata', {}),
admin_password=password,
requested_networks=requested_networks,
check_server_group_quota=True,
**create_kwargs)
except (exception.QuotaError,
exception.PortLimitExceeded) as error:
raise exc.HTTPForbidden(
explanation=error.format_message())
except exception.ImageNotFound:
msg = _("Can not find requested image")
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound:
msg = _("Invalid key_name provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.ConfigDriveInvalidValue:
msg = _("Invalid config_drive provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.ExternalNetworkAttachForbidden as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except messaging.RemoteError as err:
msg = "%(err_type)s: %(err_msg)s" % {'err_type': err.exc_type,
'err_msg': err.value}
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
msg = "UnicodeError: %s" % error
raise exc.HTTPBadRequest(explanation=msg)
except (exception.CPUThreadPolicyConfigurationInvalid,
exception.ImageNotActive,
exception.ImageBadRequest,
exception.ImageNotAuthorized,
exception.FixedIpNotFoundForAddress,
exception.FlavorNotFound,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata,
exception.InvalidRequest,
exception.InvalidVolume,
exception.MultiplePortsNotApplicable,
exception.InvalidFixedIpAndMaxCountRequest,
exception.InstanceUserDataMalformed,
exception.InstanceUserDataTooLarge,
exception.PortNotFound,
exception.FixedIpAlreadyInUse,
exception.SecurityGroupNotFound,
exception.PortRequiresFixedIP,
exception.NetworkRequiresSubnet,
exception.NetworkNotFound,
exception.InvalidBDM,
exception.InvalidBDMSnapshot,
exception.InvalidBDMVolume,
exception.InvalidBDMImage,
exception.InvalidBDMBootSequence,
exception.InvalidBDMLocalsLimit,
exception.InvalidBDMVolumeNotBootable,
exception.InvalidBDMEphemeralSize,
exception.InvalidBDMFormat,
exception.InvalidBDMSwapSize,
exception.AutoDiskConfigDisabledByImage,
exception.ImageCPUPinningForbidden,
exception.ImageCPUThreadPolicyForbidden,
exception.ImageNUMATopologyIncomplete,
exception.ImageNUMATopologyForbidden,
exception.ImageNUMATopologyAsymmetric,
exception.ImageNUMATopologyCPUOutOfRange,
exception.ImageNUMATopologyCPUDuplicates,
exception.ImageNUMATopologyCPUsUnassigned,
exception.ImageNUMATopologyNodesForbidden,
exception.ImageNUMATopologyNodesIncomplete,
exception.ImageNUMATopologyNodesDuplicates,
exception.ImageNUMATopologyMemoryOutOfRange,
exception.InvalidNUMANodesNumber,
exception.InstanceGroupNotFound,
exception.MemoryPageSizeInvalid,
exception.MemoryPageSizeForbidden,
exception.PciRequestAliasNotDefined,
exception.RealtimeConfigurationInvalid,
exception.RealtimeMaskNotFoundOrInvalid,
exception.SnapshotNotFound,
exception.UnableToAutoAllocateNetwork,
exception.ImageVCPUModelForbidden) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse,
exception.InstanceExists,
exception.NetworkAmbiguous,
exception.NoUniqueMatch) as error:
raise exc.HTTPConflict(explanation=error.format_message())
# If the caller wanted a reservation_id, return it
if return_reservation_id:
return wsgi.ResponseObject({'reservation_id': resv_id})
req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if CONF.api.enable_instance_password:
server['server']['adminPass'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj)
# NOTE(gmann): Parameter 'req_body' is placed to handle scheduler_hint
# extension for V2.1. No other extension supposed to use this as
# it will be removed soon.
def _create_by_func_list(self, server_dict,
create_kwargs, req_body):
for func in self.server_create_func_list:
func(server_dict, create_kwargs, req_body)
def _create_schema(self, create_schema, version):
for schema_func in self.schema_func_list:
self._create_schema_by_func(create_schema, version, schema_func)
def _create_schema_by_func(self, create_schema, version, schema_func):
schema = schema_func(version)
if (schema_func.__module__ ==
'nova.api.openstack.compute.scheduler_hints'):
# NOTE(oomichi): The request parameter position of scheduler-hint
# extension is different from the other extensions, so here handles
# the difference.
create_schema['properties'].update(schema)
else:
create_schema['properties']['server']['properties'].update(schema)
def _delete(self, context, req, instance_uuid):
instance = self._get_server(context, req, instance_uuid)
context.can(server_policies.SERVERS % 'delete',
target={'user_id': instance.user_id,
'project_id': instance.project_id})
if CONF.reclaim_instance_interval:
try:
self.compute_api.soft_delete(context, instance)
except exception.InstanceInvalidState:
# Note(yufang521247): instance which has never been active
# is not allowed to be soft_deleted. Thus we have to call
# delete() to clean up the instance.
self.compute_api.delete(context, instance)
else:
self.compute_api.delete(context, instance)
@extensions.expected_errors((400, 404))
@validation.schema(schema_server_update_v20, '2.0', '2.0')
@validation.schema(schema_server_update, '2.1', '2.18')
@validation.schema(schema_server_update_v219, '2.19')
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
ctxt = req.environ['nova.context']
update_dict = {}
instance = self._get_server(ctxt, req, id, is_detail=True)
ctxt.can(server_policies.SERVERS % 'update',
target={'user_id': instance.user_id,
'project_id': instance.project_id})
server = body['server']
if 'name' in server:
update_dict['display_name'] = common.normalize_name(
server['name'])
if 'description' in server:
# This is allowed to be None (remove description)
update_dict['display_description'] = server['description']
helpers.translate_attributes(helpers.UPDATE, server, update_dict)
try:
instance = self.compute_api.update_instance(ctxt, instance,
update_dict)
return self._view_builder.show(req, instance,
extend_address=False)
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
# NOTE(gmann): Returns 204 for backwards compatibility but should be 202
# for representing async API as this API just accepts the request and
# request hypervisor driver to complete the same in async mode.
@wsgi.response(204)
@extensions.expected_errors((400, 404, 409))
@wsgi.action('confirmResize')
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
context.can(server_policies.SERVERS % 'confirm_resize')
instance = self._get_server(context, req, id)
try:
self.compute_api.confirm_resize(context, instance)
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'confirmResize', id)
@wsgi.response(202)
@extensions.expected_errors((400, 404, 409))
@wsgi.action('revertResize')
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
context.can(server_policies.SERVERS % 'revert_resize')
instance = self._get_server(context, req, id)
try:
self.compute_api.revert_resize(context, instance)
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _("Flavor used by the instance could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revertResize', id)
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('reboot')
@validation.schema(schema_servers.reboot)
def _action_reboot(self, req, id, body):
reboot_type = body['reboot']['type'].upper()
context = req.environ['nova.context']
context.can(server_policies.SERVERS % 'reboot')
instance = self._get_server(context, req, id)
try:
self.compute_api.reboot(context, instance, reboot_type)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'reboot', id)
def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
instance = self._get_server(context, req, instance_id)
context.can(server_policies.SERVERS % 'resize',
target={'user_id': instance.user_id,
'project_id': instance.project_id})
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.QuotaError as error:
raise exc.HTTPForbidden(
explanation=error.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resize', instance_id)
except exception.ImageNotAuthorized:
msg = _("You are not authorized to access the image "
"the instance was started with.")
raise exc.HTTPUnauthorized(explanation=msg)
except exception.ImageNotFound:
msg = _("Image that the instance was started "
"with could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except (exception.AutoDiskConfigDisabledByImage,
exception.CannotResizeDisk,
exception.CannotResizeToSameFlavor,
exception.FlavorNotFound,
exception.NoValidHost,
exception.PciRequestAliasNotDefined) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.Invalid:
msg = _("Invalid instance image.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.ResizeError as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
@wsgi.response(204)
@extensions.expected_errors((404, 409))
def delete(self, req, id):
"""Destroys a server."""
try:
self._delete(req.environ['nova.context'], req, id)
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete', id)
def _image_from_req_data(self, server_dict, create_kwargs):
"""Get image data from the request or raise appropriate
exceptions.
The field imageRef is mandatory when no block devices have been
defined and must be a proper uuid when present.
"""
image_href = server_dict.get('imageRef')
if not image_href and create_kwargs.get('block_device_mapping'):
return ''
elif image_href:
return image_href
else:
msg = _("Missing imageRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
def _flavor_id_from_req_data(self, data):
flavor_ref = data['server']['flavorRef']
return common.get_id_from_href(flavor_ref)
@wsgi.response(202)
@extensions.expected_errors((400, 401, 403, 404, 409))
@wsgi.action('resize')
@validation.schema(schema_servers.resize)
def _action_resize(self, req, id, body):
"""Resizes a given instance to the flavor size requested."""
resize_dict = body['resize']
flavor_ref = str(resize_dict["flavorRef"])
kwargs = {}
helpers.translate_attributes(helpers.RESIZE, resize_dict, kwargs)
self._resize(req, id, flavor_ref, **kwargs)
@wsgi.response(202)
@extensions.expected_errors((400, 403, 404, 409))
@wsgi.action('rebuild')
@validation.schema(schema_server_rebuild_v20, '2.0', '2.0')
@validation.schema(schema_server_rebuild, '2.1', '2.18')
@validation.schema(schema_server_rebuild_v219, '2.19')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
rebuild_dict = body['rebuild']
image_href = rebuild_dict["imageRef"]
# validate metadata before rebuilding
meta = rebuild_dict.get('metadata', {})
if meta:
common.validate_metadata(meta)
password = self._get_server_admin_password(rebuild_dict)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
context.can(server_policies.SERVERS % 'rebuild',
target={'user_id': instance.user_id,
'project_id': instance.project_id})
attr_map = {
'name': 'display_name',
'description': 'display_description',
'metadata': 'metadata',
# WRS adding userdata to rebuild args
'userdata': 'userdata',
}
kwargs = {}
helpers.translate_attributes(helpers.REBUILD, rebuild_dict, kwargs)
for request_attribute, instance_attribute in attr_map.items():
try:
if request_attribute == 'name':
kwargs[instance_attribute] = common.normalize_name(
rebuild_dict[request_attribute])
else:
kwargs[instance_attribute] = rebuild_dict[
request_attribute]
except (KeyError, TypeError):
pass
try:
self.compute_api.rebuild(context,
instance,
image_href,
password,
**kwargs)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rebuild', id)
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.ImageNotFound:
msg = _("Cannot find image for rebuild")
raise exc.HTTPBadRequest(explanation=msg)
except exception.QuotaError as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except (exception.ImageNotActive,
exception.ImageUnacceptable,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata,
exception.AutoDiskConfigDisabledByImage) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
instance = self._get_server(context, req, id, is_detail=True)
view = self._view_builder.show(req, instance, extend_address=False)
# Add on the admin_password attribute since the view doesn't do it
# unless instance passwords are disabled
if CONF.api.enable_instance_password:
view['server']['adminPass'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj)
@wsgi.response(202)
@extensions.expected_errors((400, 403, 404, 409))
@wsgi.action('createImage')
@common.check_snapshots_enabled
@validation.schema(schema_servers.create_image, '2.0', '2.0')
@validation.schema(schema_servers.create_image, '2.1')
def _action_create_image(self, req, id, body):
"""Snapshot a server instance."""
context = req.environ['nova.context']
context.can(server_policies.SERVERS % 'create_image')
entity = body["createImage"]
image_name = common.normalize_name(entity["name"])
metadata = entity.get('metadata', {})
# Starting from microversion 2.39 we don't check quotas on createImage
if api_version_request.is_supported(
req, max_version=
api_version_request.MAX_IMAGE_META_PROXY_API_VERSION):
common.check_img_metadata_properties_quota(context, metadata)
instance = self._get_server(context, req, id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
try:
if compute_utils.is_volume_backed_instance(context, instance,
bdms):
context.can(server_policies.SERVERS %
'create_image:allow_volume_backed')
image = self.compute_api.snapshot_volume_backed(
context,
instance,
image_name,
extra_properties=
metadata)
else:
image = self.compute_api.snapshot(context,
instance,
image_name,
extra_properties=metadata)
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createImage', id)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
except exception.OverQuota as e:
raise exc.HTTPForbidden(explanation=e.format_message())
# Starting with microversion 2.45 we return a response body containing
# the snapshot image id without the Location header.
if api_version_request.is_supported(req, '2.45'):
return {'image_id': image['id']}
# build location of newly-created image entity
image_id = str(image['id'])
image_ref = glance.generate_image_url(image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp
def _get_server_admin_password(self, server):
"""Determine the admin password for a server on creation."""
if 'adminPass' in server:
password = server['adminPass']
else:
password = utils.generate_password()
return password
def _get_server_search_options(self, req):
"""Return server search options allowed by non-admin."""
opt_list = ('reservation_id', 'name', 'status', 'image', 'flavor',
'ip', 'changes-since', 'all_tenants')
if api_version_request.is_supported(req, min_version='2.5'):
opt_list += ('ip6',)
if api_version_request.is_supported(req, min_version='2.26'):
opt_list += TAG_SEARCH_FILTERS
return opt_list
def _get_instance(self, context, instance_uuid):
try:
attrs = ['system_metadata', 'metadata']
if not CONF.cells.enable:
# NOTE(danms): We can't target a cell database if we're
# in cellsv1 otherwise we'll short-circuit the replication.
mapping = objects.InstanceMapping.get_by_instance_uuid(
context, instance_uuid)
nova_context.set_target_cell(context, mapping.cell_mapping)
return objects.Instance.get_by_uuid(
context, instance_uuid, expected_attrs=attrs)
except (exception.InstanceNotFound,
exception.InstanceMappingNotFound) as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('os-start')
def _start_server(self, req, id, body):
"""Start an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
context.can(server_policies.SERVERS % 'start', instance)
try:
self.compute_api.start(context, instance)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'start', id)
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('os-stop')
def _stop_server(self, req, id, body):
"""Stop an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
context.can(server_policies.SERVERS % 'stop',
target={'user_id': instance.user_id,
'project_id': instance.project_id})
try:
self.compute_api.stop(context, instance)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'stop', id)
@wsgi.Controller.api_version("2.17")
@wsgi.response(202)
@extensions.expected_errors((400, 404, 409))
@wsgi.action('trigger_crash_dump')
@validation.schema(schema_servers.trigger_crash_dump)
def _action_trigger_crash_dump(self, req, id, body):
"""Trigger crash dump in an instance"""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
context.can(server_policies.SERVERS % 'trigger_crash_dump',
target={'user_id': instance.user_id,
'project_id': instance.project_id})
try:
self.compute_api.trigger_crash_dump(context, instance)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'trigger_crash_dump', id)
except exception.TriggerCrashDumpNotSupported as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Only remove parameters for sorting and pagination
for key in ('sort_key', 'sort_dir', 'limit', 'marker'):
search_options.pop(key, None)
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
if unknown_options:
LOG.debug("Removing options '%s' from query",
", ".join(unknown_options))
for opt in unknown_options:
search_options.pop(opt, None)
def remove_invalid_sort_keys(context, sort_keys, sort_dirs,
blacklist, admin_only_fields):
key_list = copy.deepcopy(sort_keys)
for key in key_list:
# NOTE(Kevin Zheng): We are intend to remove the sort_key
# in the blacklist and its' corresponding sort_dir, since
# the sort_key and sort_dir are not strict to be provide
# in pairs in the current implement, sort_dirs could be
# less than sort_keys, in order to avoid IndexError, we
# only pop sort_dir when number of sort_dirs is no less
# than the sort_key index.
if key in blacklist:
if len(sort_dirs) > sort_keys.index(key):
sort_dirs.pop(sort_keys.index(key))
sort_keys.pop(sort_keys.index(key))
elif key in admin_only_fields and not context.is_admin:
msg = _("Only administrators can sort servers "
"by %s") % key
raise exc.HTTPForbidden(explanation=msg)
return sort_keys, sort_dirs
| 45.258246
| 79
| 0.629231
|
7000d909c236c7060d4f3ed6ef3d285f670f879e
| 9,264
|
py
|
Python
|
lpot/pruning.py
|
deb-intel/lp-opt-tool
|
881bde402db387b04c2f33cc96fb817f47c4d623
|
[
"Apache-2.0"
] | null | null | null |
lpot/pruning.py
|
deb-intel/lp-opt-tool
|
881bde402db387b04c2f33cc96fb817f47c4d623
|
[
"Apache-2.0"
] | null | null | null |
lpot/pruning.py
|
deb-intel/lp-opt-tool
|
881bde402db387b04c2f33cc96fb817f47c4d623
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .adaptor import FRAMEWORKS
from .conf.config import Conf
from .policy import POLICIES
from .utils import logger
from .utils.utility import singleton
@singleton
class Pruning(object):
"""This is base class of pruning object.
Since DL use cases vary in the accuracy metrics (Top-1, MAP, ROC etc.), loss criteria
(<1% or <0.1% etc.) and pruning objectives (performance, memory footprint etc.).
Pruning class provides a flexible configuration interface via YAML for users to specify
these parameters.
Args:
conf_fname (string): The path to the YAML configuration file containing accuracy goal,
pruning objective and related dataloaders etc.
"""
def __init__(self, conf_fname):
self.conf = Conf(conf_fname)
def on_epoch_begin(self, epoch):
""" called on the begining of epochs"""
for policy in self.policies:
policy.on_epoch_begin(epoch)
def on_batch_begin(self, batch_id):
""" called on the begining of batches"""
for policy in self.policies:
policy.on_batch_begin(batch_id)
def on_batch_end(self):
""" called on the end of batches"""
for policy in self.policies:
policy.on_batch_end()
def on_epoch_end(self):
""" called on the end of epochs"""
for policy in self.policies:
policy.on_epoch_end()
stats, sparsity = self.adaptor.report_sparsity(self.model)
logger.info(stats)
logger.info(sparsity)
def __call__(self, model, q_dataloader=None, q_func=None, eval_dataloader=None,
eval_func=None):
"""The main entry point of pruning.
This interface currently only works on pytorch
and provides three usages:
a) Fully yaml configuration: User specifies all the info through yaml,
including dataloaders used in calibration and evaluation phases
and quantization tuning settings.
For this usage, only model parameter is mandotory.
b) Partial yaml configuration: User specifies dataloaders used in calibration
and evaluation phase by code.
The tool provides built-in dataloaders and evaluators, user just need provide
a dataset implemented __iter__ or __getitem__ methods and invoke dataloader()
with dataset as input parameter to create lpot dataloader before calling this
function.
After that, User specifies fp32 "model", calibration dataset "q_dataloader"
and evaluation dataset "eval_dataloader".
The calibrated and quantized model is evaluated with "eval_dataloader"
with evaluation metrics specified in the configuration file. The evaluation tells
the tuner whether the quantized model meets the accuracy criteria. If not,
the tuner starts a new calibration and tuning flow.
For this usage, model, q_dataloader and eval_dataloader parameters are mandotory.
c) Partial yaml configuration: User specifies dataloaders used in calibration phase
by code.
This usage is quite similar with b), just user specifies a custom "eval_func"
which encapsulates the evaluation dataset by itself.
The calibrated and quantized model is evaluated with "eval_func".
The "eval_func" tells the tuner whether the quantized model meets
the accuracy criteria. If not, the Tuner starts a new calibration and tuning flow.
For this usage, model, q_dataloader and eval_func parameters are mandotory.
Args:
model (object): For PyTorch model, it's torch.nn.model
instance.
q_dataloader (generator): Data loader for calibration. It is iterable
and should yield a tuple (input, label) for
calibration dataset containing label,
or yield (input, _) for label-free calibration
dataset. The input could be a object, list,
tuple or dict, depending on user implementation,
as well as it can be taken as model input.
q_func (function, optional): Training function for pruning.
This function takes "model" as input parameter
and executes entire training process with self
contained training hyper-parameters. If this
parameter specified, eval_dataloader parameter
plus metric defined in yaml, or eval_func
parameter should also be specified at same time.
eval_dataloader (generator, optional): Data loader for evaluation. It is iterable
and should yield a tuple of (input, label).
The input could be a object, list, tuple or
dict, depending on user implementation,
as well as it can be taken as model input.
The label should be able to take as input of
supported metrics. If this parameter is
not None, user needs to specify pre-defined
evaluation metrics through configuration file
and should set "eval_func" paramter as None.
Tuner will combine model, eval_dataloader
and pre-defined metrics to run evaluation
process.
eval_func (function, optional): The evaluation function provided by user.
This function takes model as parameter,
and evaluation dataset and metrics should be
encapsulated in this function implementation
and outputs a higher-is-better accuracy scalar
value.
The pseudo code should be something like:
def eval_func(model):
input, label = dataloader()
output = model(input)
accuracy = metric(output, label)
return accuracy
Returns:
pruned model: best pruned model found, otherwise return None
"""
self.cfg = self.conf.usr_cfg
framework_specific_info = {'device': self.cfg.device,
'approach': self.cfg.quantization.approach,
'random_seed': self.cfg.tuning.random_seed}
framework = self.cfg.model.framework.lower()
if framework == 'tensorflow':
framework_specific_info.update(
{"inputs": self.cfg.model.inputs, "outputs": self.cfg.model.outputs})
self.adaptor = FRAMEWORKS[framework](framework_specific_info)
self.model = model
policies = {}
for policy in POLICIES:
for name in self.cfg["pruning"][policy]:
policies[name] = {"policy_name": policy,
"policy_spec": self.cfg["pruning"][policy][name]}
self.policies = []
for name, policy_spec in policies.items():
print(policy_spec)
self.policies.append(POLICIES[policy_spec["policy_name"]](
self.model, policy_spec["policy_spec"], self.cfg, self.adaptor))
return q_func(model)
| 52.044944
| 99
| 0.542422
|
e5dc59c10802fd39cb3ac44833caf942a5c33b5a
| 1,840
|
py
|
Python
|
happiness/happiness/bokeh_utils.py
|
joshua-barber/bokeh_happiness
|
fce19a1002ebdace0f386a0e86e03bd14475bb9a
|
[
"MIT"
] | null | null | null |
happiness/happiness/bokeh_utils.py
|
joshua-barber/bokeh_happiness
|
fce19a1002ebdace0f386a0e86e03bd14475bb9a
|
[
"MIT"
] | null | null | null |
happiness/happiness/bokeh_utils.py
|
joshua-barber/bokeh_happiness
|
fce19a1002ebdace0f386a0e86e03bd14475bb9a
|
[
"MIT"
] | null | null | null |
from contextlib import closing
from bokeh.client import push_session, pull_session
from bokeh.document import Document
from bokeh.embed import autoload_server
from .viz.individuals import update_individuals_data
from .viz.team import update_team_data
from .viz.teams import update_teams_data
def get_bokeh_script(user, plot, suffix):
from .models import UserSession
document = Document()
document.add_root(plot)
document.title = suffix
with closing(push_session(document)) as session:
# Save the session id to a UserSession
UserSession.objects.create(user=user, bokeh_session_id=session.id)
# Get the script to pass into the template
script = autoload_server(None, session_id=session.id)
return script
def update_bokeh_sessions(user_sessions):
for us in user_sessions:
with closing(pull_session(session_id=us.bokeh_session_id)) as session:
if len(session.document.roots) == 0:
# In this case, the session_id was from a dead session and
# calling pull_session caused a new empty session to be
# created. So we just delete the UserSession and move on.
# It would be nice if there was a more efficient way - where I
# could just ask bokeh if session x is a session.
us.delete()
else:
# Call the appropriate update method based on the document's title
if session.document.title == 'individuals':
update_individuals_data(user=us.user, session=session)
if session.document.title == 'team':
update_team_data(user=us.user, session=session)
if session.document.title == 'teams':
update_teams_data(user=us.user, session=session)
| 40
| 82
| 0.665217
|
53138eb16e2c6bdeb13527f66a1187cdc0251d55
| 19,529
|
py
|
Python
|
pydl/tests/test_layers.py
|
nash911/PyDL
|
b0b6f599184c0046f503b9ee1703dc3dfe9a89f2
|
[
"MIT"
] | null | null | null |
pydl/tests/test_layers.py
|
nash911/PyDL
|
b0b6f599184c0046f503b9ee1703dc3dfe9a89f2
|
[
"MIT"
] | null | null | null |
pydl/tests/test_layers.py
|
nash911/PyDL
|
b0b6f599184c0046f503b9ee1703dc3dfe9a89f2
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------
# MIT License
#
# Copyright (c) [2021] [Avinash Ranganath]
#
# This code is part of the library PyDL <https://github.com/nash911/PyDL>
# This code is licensed under MIT license (see LICENSE.txt for details)
# ------------------------------------------------------------------------
import unittest
import numpy as np
import numpy.testing as npt
import itertools
from pydl.nn.layers import FC
from pydl import conf
class TestLayers(unittest.TestCase):
def test_score_fn(self):
def test(inp, w, true_out, bias=False):
fc = FC(inp, w.shape[-1], w, bias)
out_fc = fc.score_fn(inp)
npt.assert_almost_equal(out_fc, true_out, decimal=5)
# Manually calculated
# -------------------
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
true_out = np.array([[38, 44, 50, 56],
[83, 98, 113, 128]], dtype=conf.dtype)
test(X, w, true_out)
test(X, w, true_out + bias, bias)
# Combinatorial Test Cases
# ------------------------
batch_size = [1, 2, 3, 6, 11]
feature_size = [1, 2, 3, 6, 11]
num_neurons = [1, 2, 3, 6, 11]
scale = [1e-6, 1e-3, 1e-1, 1e-0, 2, 3, 10]
for batch, feat, neur, scl in list(itertools.product(batch_size, feature_size, num_neurons,
scale)):
X = np.random.uniform(-scl, scl, (batch, feat))
w = np.random.randn(feat, neur) * scl
bias = np.zeros(neur)
true_out = np.matmul(X, w)
test(X, w, true_out)
test(X, w, true_out + bias, bias)
def test_forward(self):
def test(inp, w, true_out, bias=False, actv_fn='Sigmoid', bchnorm=False, p=None, mask=None):
fc = FC(inp, w.shape[-1], w, bias, activation_fn=actv_fn, batchnorm=bchnorm, dropout=p)
out_fc = fc.forward(inp, mask=mask)
npt.assert_almost_equal(out_fc, true_out, decimal=5)
# Manually calculated
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
score_out = np.array([[38, 44, 50, 56],
[83, 98, 113, 128]], dtype=conf.dtype)
true_out = 1.0 / (1.0 + np.exp(-score_out))
test(X, w, true_out)
true_out = 1.0 / (1.0 + np.exp(-(score_out + bias)))
test(X, w, true_out, bias)
# Combinatorial Test Cases
# ------------------------
batch_size = [1, 2, 3, 6, 11]
feature_size = [1, 2, 3, 6, 11]
num_neurons = [1, 2, 3, 6, 11]
scale = [1e-6, 1e-3, 1e-1, 1e-0, 2]
batchnorm = [True, False]
dropout = [True, False]
for batch, feat, scl, neur, bn, dout in \
list(itertools.product(batch_size, feature_size, scale, num_neurons, batchnorm,
dropout)):
X = np.random.uniform(-scl, scl, (batch, feat))
w = np.random.randn(feat, neur) * scl
bias = np.zeros(neur)
score = np.matmul(X, w) + bias
if bn:
score = (score - np.mean(score, axis=0)) / np.sqrt(np.var(score, axis=0) + 1e-32)
if dout:
p = np.random.rand()
mask = np.array(np.random.rand(*score.shape) < p, dtype=conf.dtype)
else:
p = None
mask = None
true_out_sig = 1.0 / (1.0 + np.exp(-np.matmul(X, w)))
if dout:
true_out_sig *= mask
test(X, w, true_out_sig, bias=False, actv_fn='Sigmoid', bchnorm=False, p=p, mask=mask)
true_out_sig = 1.0 / (1.0 + np.exp(-score))
if dout:
true_out_sig *= mask
test(X, w, true_out_sig, bias, actv_fn='Sigmoid', bchnorm=bn, p=p, mask=mask)
true_out_tanh = (2.0 / (1.0 + np.exp(-2.0 * score))) - 1.0
if dout:
true_out_tanh *= mask
test(X, w, true_out_tanh, bias, actv_fn='Tanh', bchnorm=bn, p=p, mask=mask)
unnorm_prob = np.exp(score)
true_out_softmax = unnorm_prob / np.sum(unnorm_prob, axis=-1, keepdims=True)
if dout:
true_out_softmax *= mask
test(X, w, true_out_softmax, bias, actv_fn='Softmax', bchnorm=bn, p=p, mask=mask)
true_out_relu = np.maximum(0, score)
if dout:
mask /= p
true_out_relu *= mask
test(X, w, true_out_relu, bias, actv_fn='ReLU', bchnorm=bn, p=p, mask=mask)
true_out_linear = score
if dout:
true_out_linear *= mask
test(X, w, true_out_linear, bias, actv_fn='Linear', bchnorm=bn, p=p, mask=mask)
def test_gradients_manually(self):
def test(inp, w, inp_grad, true_weights_grad, true_inputs_grad, bias=False,
true_bias_grad=None):
fc = FC(inp, w.shape[-1], w, bias)
weights_grad = fc.weight_gradients(inp_grad, inputs=X)
bias_grad = fc.bias_gradients(inp_grad)
inputs_grad = fc.input_gradients(inp_grad)
npt.assert_almost_equal(weights_grad, true_weights_grad, decimal=5)
npt.assert_almost_equal(bias_grad, true_bias_grad, decimal=5)
npt.assert_almost_equal(inputs_grad, true_inputs_grad, decimal=5)
# Manually calculated - Unit input gradients
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
inp_grad = np.ones((2, 4), dtype=conf.dtype)
true_weights_grad = np.sum(X, axis=0, keepdims=True).T * np.ones(w.shape, dtype=conf.dtype)
true_inputs_grad = np.sum(w, axis=-1, keepdims=True).T * np.ones(X.shape, dtype=conf.dtype)
true_bias_grad = np.sum(inp_grad, axis=0, keepdims=False)
test(X, w, inp_grad, true_weights_grad, true_inputs_grad, bias, true_bias_grad)
# Manually calculated
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
inp_grad = np.array([[3, 3, 3, 3],
[2, 2, 2, 2]], dtype=conf.dtype)
true_weights_grad = np.array([[11, 11, 11, 11],
[16, 16, 16, 16],
[21, 21, 21, 21]], dtype=conf.dtype)
true_bias_grad = np.sum(inp_grad, axis=0, keepdims=False)
true_inputs_grad = np.array([[30, 78, 126],
[20, 52, 84]], dtype=conf.dtype)
test(X, w, inp_grad, true_weights_grad, true_inputs_grad, bias, true_bias_grad)
def test_gradients_finite_difference(self):
self.delta = 1e-5
def test(inp, w, inp_grad, bias=False):
fc = FC(inp, w.shape[-1], w, bias)
weights_grad = fc.weight_gradients(inp_grad, inputs=X)
bias_grad = fc.bias_gradients(inp_grad)
inputs_grad = fc.input_gradients(inp_grad)
# Weights finite difference gradients
weights_finite_diff = np.empty(weights_grad.shape)
for i in range(weights_grad.shape[0]):
w_delta = np.zeros(w.shape, dtype=conf.dtype)
w_delta[i] = self.delta
weights_finite_diff[i] = np.sum(((fc.score_fn(inp, w + w_delta) -
fc.score_fn(inp, w - w_delta)) /
(2 * self.delta)) * inp_grad, axis=0)
# Bias finite difference gradients
fc.bias = bias + self.delta
lhs = fc.score_fn(inp)
fc.bias = bias - self.delta
rhs = fc.score_fn(inp)
bias_finite_diff = np.sum(((lhs - rhs) / (2 * self.delta)) * inp_grad, axis=0)
fc.bias = bias
# Inputs finite difference gradients
inputs_finite_diff = np.empty(inputs_grad.shape)
for i in range(inputs_grad.shape[1]):
i_delta = np.zeros(inp.shape, dtype=conf.dtype)
i_delta[:, i] = self.delta
inputs_finite_diff[:, i] = np.sum(((fc.score_fn(inp + i_delta, w) -
fc.score_fn(inp - i_delta, w)) /
(2 * self.delta)) * inp_grad, axis=-1,
keepdims=False)
# Threshold Gradient Diff Check
npt.assert_almost_equal(weights_grad, weights_finite_diff, decimal=5)
npt.assert_almost_equal(bias_grad, bias_finite_diff, decimal=5)
npt.assert_almost_equal(inputs_grad, inputs_finite_diff, decimal=5)
# # Relative gradient error check
# max_abs_w_grads = np.maximum(np.abs(weights_grad), np.abs(weights_finite_diff))
# max_abs_w_grads[max_abs_w_grads==0] = 1
# w_grads_accuracy = np.abs(weights_grad - weights_finite_diff) / max_abs_w_grads
# npt.assert_almost_equal(np.zeros_like(w_grads_accuracy), w_grads_accuracy, decimal=5)
#
# max_abs_b_grads = np.maximum(np.abs(bias_grad), np.abs(bias_finite_diff))
# max_abs_b_grads[max_abs_b_grads==0] = 1
# b_grads_accuracy = np.abs(bias_grad - bias_finite_diff) / max_abs_b_grads
# npt.assert_almost_equal(np.zeros_like(b_grads_accuracy), b_grads_accuracy, decimal=5)
#
# max_abs_inp_grads = np.maximum(np.abs(inputs_grad), np.abs(inputs_finite_diff))
# max_abs_inp_grads[max_abs_inp_grads==0] = 1
# inp_grads_accuracy = np.abs(inputs_grad - inputs_finite_diff) / max_abs_inp_grads
# npt.assert_almost_equal(np.zeros_like(inp_grads_accuracy), inp_grads_accuracy,
# decimal=5)
# Manually calculated - Unit input gradients
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
inp_grad = np.ones((2, 4), dtype=conf.dtype)
test(X, w, inp_grad, bias)
# Manually calculated
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
inp_grad = np.array([[1, 2, 3, 4],
[-5, -6, -7, -8]], dtype=conf.dtype)
test(X, w, inp_grad, bias)
# Combinatorial Test Cases
# ------------------------
batch_size = [1, 2, 3, 6, 11]
feature_size = [1, 2, 3, 6, 11]
num_neurons = [1, 2, 3, 6, 11]
scale = [1e-4, 1e-3, 1e-1, 1e-0, 2, 3, 10]
unit_inp_grad = [True, False]
for batch, feat, neur, scl, unit in list(itertools.product(batch_size, feature_size,
num_neurons, scale,
unit_inp_grad)):
X = np.random.uniform(-scl, scl, (batch, feat))
w = np.random.randn(feat, neur) * scl
bias = np.random.rand(neur) * scl
inp_grad = np.ones((batch, neur), dtype=conf.dtype) if unit else \
np.random.uniform(-10, 10, (batch, neur))
test(X, w, inp_grad, bias)
def test_backward_gradients_finite_difference(self):
self.delta = 1e-8
def test(inp, w, inp_grad, bias=False, actv_fn='Sigmoid', batchnorm=False, p=None,
mask=None):
fc = FC(inp, w.shape[-1], w, bias, activation_fn=actv_fn, batchnorm=batchnorm,
dropout=p)
_ = fc.forward(inp, mask=mask)
inputs_grad = fc.backward(inp_grad)
weights_grad = fc.weights_grad
bias_grad = fc.bias_grad
# Weights finite difference gradients
weights_finite_diff = np.empty(weights_grad.shape)
for i in range(weights_grad.shape[0]):
for j in range(weights_grad.shape[1]):
w_delta = np.zeros(w.shape, dtype=conf.dtype)
w_delta[i, j] = self.delta
fc.weights = w + w_delta
lhs = fc.forward(inp, mask=mask)
fc.weights = w - w_delta
rhs = fc.forward(inp, mask=mask)
weights_finite_diff[i, j] = np.sum(((lhs - rhs) / (2 * self.delta)) * inp_grad)
# Replace finite-diff gradients calculated close to 0 with NN calculated
# gradients to pass assertion test
grad_kink = np.sum(np.array(np.logical_xor(lhs > 0, rhs > 0), dtype=np.int32))
if grad_kink > 0:
weights_finite_diff[i, j] = weights_grad[i, j]
fc.weights = w
# Bias finite difference gradients
bias_finite_diff = np.empty(bias_grad.shape)
for i in range(bias_grad.shape[0]):
bias_delta = np.zeros(bias.shape, dtype=conf.dtype)
bias_delta[i] = self.delta
fc.bias = bias + bias_delta
lhs = fc.forward(inp, mask=mask)
fc.bias = bias - bias_delta
rhs = fc.forward(inp, mask=mask)
bias_finite_diff[i] = np.sum(((lhs - rhs) / (2 * self.delta)) * inp_grad)
# Replace finite-diff gradients calculated close to 0 with NN calculated
# gradients to pass assertion test
grad_kink = np.sum(np.array(np.logical_xor(lhs > 0, rhs > 0), dtype=np.int32))
if grad_kink > 0:
bias_finite_diff[i] = bias_grad[i]
fc.bias = bias
# Inputs finite difference gradients
inputs_finite_diff = np.empty(inputs_grad.shape)
for i in range(inputs_grad.shape[0]):
for j in range(inputs_grad.shape[1]):
i_delta = np.zeros(inp.shape, dtype=conf.dtype)
i_delta[i, j] = self.delta
lhs = fc.forward(inp + i_delta, mask=mask)
rhs = fc.forward(inp - i_delta, mask=mask)
inputs_finite_diff[i, j] = np.sum(((lhs - rhs) / (2 * self.delta)) * inp_grad,
keepdims=False)
# Replace finite-diff gradients calculated close to 0 with NN calculated
# gradients to pass assertion test
grad_kink = np.sum(np.array(np.logical_xor(lhs > 0, rhs > 0), dtype=np.int32))
if grad_kink > 0:
inputs_finite_diff[i, j] = inputs_grad[i, j]
npt.assert_almost_equal(weights_grad, weights_finite_diff, decimal=2)
npt.assert_almost_equal(bias_grad, bias_finite_diff, decimal=2)
npt.assert_almost_equal(inputs_grad, inputs_finite_diff, decimal=2)
# Manually calculated - Unit input gradients
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
inp_grad = np.ones((2, 4), dtype=conf.dtype)
activation_fn = ['Linear', 'Sigmoid', 'Tanh', 'Softmax']
batchnorm = [True, False]
dropout = [True, False]
for actv, bn, dout in list(itertools.product(activation_fn, batchnorm, dropout)):
if dout and actv == 'Softmax':
continue
if dout:
p = np.random.rand()
mask = np.array(np.random.rand(*inp_grad.shape) < p, dtype=conf.dtype)
if actv in ['Linear', 'ReLU']:
mask /= p
else:
p = None
mask = None
test(X, w, inp_grad, bias, actv, bn, p, mask)
# Manually calculated
X = np.array([[1, 2, 3],
[4, 5, 6]], dtype=conf.dtype)
w = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=conf.dtype)
bias = np.array([0.1, 0.2, 0.3, 0.4], dtype=conf.dtype)
inp_grad = np.array([[5, 6, 7, 8],
[1, 2, 3, 4]], dtype=conf.dtype)
activation_fn = ['Linear', 'Sigmoid', 'Tanh', 'Softmax']
batchnorm = [True, False]
dropout = [True, False]
for actv, bn, dout in list(itertools.product(activation_fn, batchnorm, dropout)):
if dout and actv == 'Softmax':
continue
if dout:
p = np.random.rand()
mask = np.array(np.random.rand(*inp_grad.shape) < p, dtype=conf.dtype)
if actv in ['Linear', 'ReLU']:
mask /= p
else:
p = None
mask = None
test(X, w, inp_grad, bias, actv, bn, p, mask)
# Combinatorial Test Cases
# ------------------------
batch_size = [1, 2, 8, 11]
feature_size = [1, 2, 3, 11]
num_neurons = [1, 2, 3, 11]
scale = [1e-3, 1e-0, 2]
unit_inp_grad = [True, False]
activation_fn = ['Linear', 'Sigmoid', 'Tanh', 'Softmax', 'ReLU']
batchnorm = [True, False]
dropout = [True, False]
for batch, feat, neur, scl, unit, actv, bn, dout in \
list(itertools.product(batch_size, feature_size, num_neurons, scale, unit_inp_grad,
activation_fn, batchnorm, dropout)):
if dout and actv == 'Softmax':
continue
X = np.random.uniform(-scl, scl, (batch, feat))
w = np.random.randn(feat, neur) * scl
# bias = np.random.randn(neur) * scl
bias = np.zeros(neur)
inp_grad = np.ones((batch, neur), dtype=conf.dtype) if unit else \
np.random.uniform(-1, 1, (batch, neur))
if dout:
p = np.random.rand()
mask = np.array(np.random.rand(batch, neur) < p, dtype=conf.dtype)
if actv in ['Linear', 'ReLU']:
mask /= p
else:
p = None
mask = None
test(X, w, inp_grad, bias, actv, bn, p, mask)
if __name__ == '__main__':
unittest.main()
| 45.416279
| 100
| 0.503252
|
68982d195965d35d03a279ef1b45ceae82a47964
| 6,039
|
py
|
Python
|
automol/pot/_fit.py
|
lpratalimaffei/autochem
|
fd51f6899de17a014b4c1c7e18cefbc3df283b5e
|
[
"Apache-2.0"
] | 2
|
2021-03-01T14:23:25.000Z
|
2021-11-28T19:17:08.000Z
|
automol/pot/_fit.py
|
lpratalimaffei/autochem
|
fd51f6899de17a014b4c1c7e18cefbc3df283b5e
|
[
"Apache-2.0"
] | 1
|
2021-02-12T21:02:22.000Z
|
2021-02-12T21:35:33.000Z
|
automol/pot/_fit.py
|
lpratalimaffei/autochem
|
fd51f6899de17a014b4c1c7e18cefbc3df283b5e
|
[
"Apache-2.0"
] | 6
|
2020-12-12T18:41:13.000Z
|
2021-11-11T20:12:14.000Z
|
"""
Handle fits to potentials
"""
import numpy
from scipy.interpolate import interp1d
def fit_1d_potential(pot_dct, min_thresh=-0.0001, max_thresh=50.0):
""" Get a physical hindered rotor potential via a series of spline fits
"""
pot = list(pot_dct.values())
# Initialize a variable for the size of the potential
lpot = len(pot)+1
pot.append(0.0)
# Print warning messages
print_pot = False
if any(val > max_thresh for val in pot):
print_pot = True
max_pot = max(pot)
print('Warning: Found pot val of {0:.2f}'.format(max_pot),
' which is larger than',
'the typical maximum for a torsional potential')
# reset any negative values for the first grid point to 0.
if pot[0] < 0.:
print('ERROR: The first potential value should be 0.')
pot[0] = 0.
if any(val < min_thresh for val in pot):
print_pot = True
min_pot = min(pot)
print('Warning: Found pot val of {0:.2f}'.format(min_pot),
' which is below',
'{0} kcal. Refit w/ positives'.format(min_thresh))
if print_pot:
print('Potential before spline:', pot)
# Build a potential list from only successful calculations
# First replace high potential values with max_thresh
# Then replace any negative potential values cubic spline fit values
idx_success = []
pot_success = []
for idx in range(lpot):
if pot[idx] < 600. and pot[idx] > min_thresh:
idx_success.append(idx)
if pot[idx] < max_thresh:
pot_success.append(pot[idx])
else:
pot_success.append(max_thresh)
if len(pot_success) > 3:
# Build a new potential list using a spline fit of the HR potential
pot_spl = interp1d(
numpy.array(idx_success), numpy.array(pot_success), kind='cubic')
for idx in range(lpot):
pot[idx] = float(pot_spl(idx))
# Do second spline fit of only positive values if any negative values found
if any(val < min_thresh for val in pot):
print('Still found negative potential values after first spline')
print('Potential after spline:', pot)
if len(pot_success) > 3:
x_pos = numpy.array([i for i in range(lpot)
if pot[i] >= min_thresh])
y_pos = numpy.array([pot[i] for i in range(lpot)
if pot[i] >= min_thresh])
pos_pot_spl = interp1d(x_pos, y_pos, kind='cubic')
pot_pos_fit = []
for idx in range(lpot):
pot_pos_fit.append(pos_pot_spl(idx))
else:
pot_pos_fit = []
for idx in range(lpot):
pot_pos_fit.append(pot[idx])
print('Potential after spline:', pot_pos_fit)
# Perform second check to see if negative potentials have been fixed
if any(val < min_thresh for val in pot_pos_fit):
print('Still found negative potential values after second spline')
print('Replace with linear interpolation of positive values')
neg_idxs = [i for i in range(lpot) if pot_pos_fit[i] < min_thresh]
clean_pot = []
for i in range(lpot):
if i in neg_idxs:
# Find the indices for positive vals around negative value
idx_0 = i - 1
while idx_0 in neg_idxs:
idx_0 = idx_0 - 1
for j in range(i, lpot):
if pot_pos_fit[j] >= min_thresh:
idx_1 = j
break
# Get a new value for this point on the potential by
# doing a linear interp of positives
interp_val = (
pot_pos_fit[idx_0] * (1.0-((i-idx_0)/(idx_1-idx_0))) +
pot_pos_fit[idx_1] * ((i-idx_0)/(idx_1-idx_0))
)
clean_pot.append(interp_val)
else:
clean_pot.append(pot_pos_fit[i])
final_potential = clean_pot.copy()
else:
final_potential = pot_pos_fit.copy()
else:
final_potential = pot.copy()
final_potential = final_potential[:-1]
fin_dct = {}
for i, val in enumerate(final_potential):
val_fin = min(val, max_thresh)
fin_dct[(i,)] = val_fin
return fin_dct
# def spline_fitter(xarr, yarr):
# """
# """
# x_pos = numpy.array([i for i in range(lpot)
# if pot[i] >= min_thresh])
# y_pos = numpy.array([pot[i] for i in range(lpot)
# if pot[i] >= min_thresh])
# pos_pot_spl = interp1d(x_pos, y_pos, kind='cubic')
#
#
# def linear_fitter(pot):
# """ Do a one-dimensional linear fitter
# """
#
# neg_idxs = [i for i in range(lpot) if pot_pos_fit[i] < min_thresh]
# clean_pot = []
# if i in neg_idxs:
# # Find the indices for positive vals around negative value
# idx_0 = i - 1
# while idx_0 in neg_idxs:
# idx_0 = idx_0 - 1
# for j in range(i, lpot):
# if pot_pos_fit[j] >= min_thresh:
# idx_1 = j
# break
# pot = _linear_fitter(pot)
#
#
# def _linear_fit(pot, idx_0, idx_1):
# """ Linear fitter
# """
# interp_val = (
# pot[idx_0] * (1.0-((i-idx_0)/(idx_1-idx_0))) +
# pot[idx_1] * ((i-idx_0)/(idx_1-idx_0))
# )
#
#
# def _re_set_high_values(pot, max_thresh=600., min_thresh=-0.0001):
# """ Rebuild the potential
# """
#
# idx_success = []
# pot_success = []
# for idx in range(lpot):
# if pot[idx] < 600. and pot[idx] > min_thresh:
# idx_success.append(idx)
# if pot[idx] < max_thresh:
# pot_success.append(pot[idx])
# else:
# pot_success.append(max_thresh)
#
# return pot
| 34.3125
| 79
| 0.545951
|
05cdb9409b9b7f779ffec46cf6c83cadf3cdc40d
| 19,712
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/storage/operations/account.py
|
heaths/azure-cli
|
baae1d17ffc4f3abfeccea17116bfd61de5770f1
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/storage/operations/account.py
|
heaths/azure-cli
|
baae1d17ffc4f3abfeccea17116bfd61de5770f1
|
[
"MIT"
] | 1
|
2021-06-02T00:40:34.000Z
|
2021-06-02T00:40:34.000Z
|
src/azure-cli/azure/cli/command_modules/storage/operations/account.py
|
heaths/azure-cli
|
baae1d17ffc4f3abfeccea17116bfd61de5770f1
|
[
"MIT"
] | 1
|
2020-07-30T13:35:39.000Z
|
2020-07-30T13:35:39.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""Custom operations for storage account commands"""
import os
from azure.cli.command_modules.storage._client_factory import storage_client_factory, cf_sa_for_keys
from azure.cli.core.util import get_file_json, shell_safe_json_parse
from knack.log import get_logger
logger = get_logger(__name__)
# pylint: disable=too-many-locals
def create_storage_account(cmd, resource_group_name, account_name, sku=None, location=None, kind=None,
tags=None, custom_domain=None, encryption_services=None, access_tier=None, https_only=None,
enable_files_aadds=None, bypass=None, default_action=None, assign_identity=False,
enable_large_file_share=None, enable_files_adds=None, domain_name=None,
net_bios_domain_name=None, forest_name=None, domain_guid=None, domain_sid=None,
azure_storage_sid=None):
StorageAccountCreateParameters, Kind, Sku, CustomDomain, AccessTier, Identity, Encryption, NetworkRuleSet = \
cmd.get_models('StorageAccountCreateParameters', 'Kind', 'Sku', 'CustomDomain', 'AccessTier', 'Identity',
'Encryption', 'NetworkRuleSet')
scf = storage_client_factory(cmd.cli_ctx)
logger.warning("The default kind for created storage account will change to 'StorageV2' from 'Storage' "
"in the future")
params = StorageAccountCreateParameters(sku=Sku(name=sku), kind=Kind(kind), location=location, tags=tags)
if custom_domain:
params.custom_domain = CustomDomain(name=custom_domain, use_sub_domain=None)
if encryption_services:
params.encryption = Encryption(services=encryption_services)
if access_tier:
params.access_tier = AccessTier(access_tier)
if assign_identity:
params.identity = Identity()
if https_only is not None:
params.enable_https_traffic_only = https_only
AzureFilesIdentityBasedAuthentication = cmd.get_models('AzureFilesIdentityBasedAuthentication')
if enable_files_aadds is not None:
params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
directory_service_options='AADDS' if enable_files_aadds else 'None')
if enable_files_adds is not None:
from knack.util import CLIError
ActiveDirectoryProperties = cmd.get_models('ActiveDirectoryProperties')
if enable_files_adds: # enable AD
if not (domain_name and net_bios_domain_name and forest_name and domain_guid and domain_sid and
azure_storage_sid):
raise CLIError("To enable ActiveDirectoryDomainServicesForFile, user must specify all of: "
"--domain-name, --net-bios-domain-name, --forest-name, --domain-guid, --domain-sid and "
"--azure_storage_sid arguments in Azure Active Directory Properties Argument group.")
active_directory_properties = ActiveDirectoryProperties(domain_name=domain_name,
net_bios_domain_name=net_bios_domain_name,
forest_name=forest_name, domain_guid=domain_guid,
domain_sid=domain_sid,
azure_storage_sid=azure_storage_sid)
# TODO: Enabling AD will automatically disable AADDS. Maybe we should throw error message
params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
directory_service_options='AD',
active_directory_properties=active_directory_properties)
else: # disable AD
if domain_name or net_bios_domain_name or forest_name or domain_guid or domain_sid or azure_storage_sid: # pylint: disable=too-many-boolean-expressions
raise CLIError("To disable ActiveDirectoryDomainServicesForFile, user can't specify any of: "
"--domain-name, --net-bios-domain-name, --forest-name, --domain-guid, --domain-sid and "
"--azure_storage_sid arguments in Azure Active Directory Properties Argument group.")
params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
directory_service_options='None')
if enable_large_file_share:
LargeFileSharesState = cmd.get_models('LargeFileSharesState')
params.large_file_shares_state = LargeFileSharesState("Enabled")
if NetworkRuleSet and (bypass or default_action):
if bypass and not default_action:
from knack.util import CLIError
raise CLIError('incorrect usage: --default-action ACTION [--bypass SERVICE ...]')
params.network_rule_set = NetworkRuleSet(bypass=bypass, default_action=default_action, ip_rules=None,
virtual_network_rules=None)
return scf.storage_accounts.create(resource_group_name, account_name, params)
def list_storage_accounts(cmd, resource_group_name=None):
scf = storage_client_factory(cmd.cli_ctx)
if resource_group_name:
accounts = scf.storage_accounts.list_by_resource_group(resource_group_name)
else:
accounts = scf.storage_accounts.list()
return list(accounts)
def show_storage_account_connection_string(cmd, resource_group_name, account_name, protocol='https', blob_endpoint=None,
file_endpoint=None, queue_endpoint=None, table_endpoint=None, sas_token=None,
key_name='primary'):
endpoint_suffix = cmd.cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={}'.format(protocol, endpoint_suffix)
if account_name is not None:
scf = cf_sa_for_keys(cmd.cli_ctx, None)
obj = scf.list_keys(resource_group_name, account_name) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
connection_string = '{}{}{}'.format(
connection_string,
';AccountName={}'.format(account_name),
';AccountKey={}'.format(keys[0] if key_name == 'primary' else keys[1])) # pylint: disable=no-member
connection_string = '{}{}'.format(connection_string,
';BlobEndpoint={}'.format(blob_endpoint) if blob_endpoint else '')
connection_string = '{}{}'.format(connection_string,
';FileEndpoint={}'.format(file_endpoint) if file_endpoint else '')
connection_string = '{}{}'.format(connection_string,
';QueueEndpoint={}'.format(queue_endpoint) if queue_endpoint else '')
connection_string = '{}{}'.format(connection_string,
';TableEndpoint={}'.format(table_endpoint) if table_endpoint else '')
connection_string = '{}{}'.format(connection_string,
';SharedAccessSignature={}'.format(sas_token) if sas_token else '')
return {'connectionString': connection_string}
def show_storage_account_usage(cmd, location):
scf = storage_client_factory(cmd.cli_ctx)
try:
client = scf.usages
except NotImplementedError:
client = scf.usage
return next((x for x in client.list_by_location(location) if x.name.value == 'StorageAccounts'), None) # pylint: disable=no-member
def show_storage_account_usage_no_location(cmd):
scf = storage_client_factory(cmd.cli_ctx)
return next((x for x in scf.usage.list() if x.name.value == 'StorageAccounts'), None) # pylint: disable=no-member
def get_storage_account_properties(cli_ctx, account_id):
scf = storage_client_factory(cli_ctx)
from msrestazure.tools import parse_resource_id
result = parse_resource_id(account_id)
return scf.storage_accounts.get_properties(result['resource_group'], result['name'])
# pylint: disable=too-many-locals, too-many-statements, too-many-branches, too-many-boolean-expressions
def update_storage_account(cmd, instance, sku=None, tags=None, custom_domain=None, use_subdomain=None,
encryption_services=None, encryption_key_source=None, encryption_key_vault_properties=None,
access_tier=None, https_only=None, enable_files_aadds=None, assign_identity=False,
bypass=None, default_action=None, enable_large_file_share=None, enable_files_adds=None,
domain_name=None, net_bios_domain_name=None, forest_name=None, domain_guid=None,
domain_sid=None, azure_storage_sid=None):
StorageAccountUpdateParameters, Sku, CustomDomain, AccessTier, Identity, Encryption, NetworkRuleSet = \
cmd.get_models('StorageAccountUpdateParameters', 'Sku', 'CustomDomain', 'AccessTier', 'Identity',
'Encryption', 'NetworkRuleSet')
domain = instance.custom_domain
if custom_domain is not None:
domain = CustomDomain(name=custom_domain)
if use_subdomain is not None:
domain.use_sub_domain_name = use_subdomain == 'true'
encryption = instance.encryption
if not encryption and any((encryption_services, encryption_key_source, encryption_key_vault_properties)):
encryption = Encryption()
if encryption_services:
encryption.services = encryption_services
if encryption_key_source:
encryption.key_source = encryption_key_source
if encryption_key_vault_properties:
if encryption.key_source != 'Microsoft.Keyvault':
raise ValueError('Specify `--encryption-key-source=Microsoft.Keyvault` to configure key vault properties.')
encryption.key_vault_properties = encryption_key_vault_properties
params = StorageAccountUpdateParameters(
sku=Sku(name=sku) if sku is not None else instance.sku,
tags=tags if tags is not None else instance.tags,
custom_domain=domain,
encryption=encryption,
access_tier=AccessTier(access_tier) if access_tier is not None else instance.access_tier,
enable_https_traffic_only=https_only if https_only is not None else instance.enable_https_traffic_only
)
AzureFilesIdentityBasedAuthentication = cmd.get_models('AzureFilesIdentityBasedAuthentication')
from knack.util import CLIError
if enable_files_aadds is not None:
if enable_files_aadds: # enable AADDS
origin_storage_account = get_storage_account_properties(cmd.cli_ctx, instance.id)
if origin_storage_account.azure_files_identity_based_authentication and \
origin_storage_account.azure_files_identity_based_authentication.directory_service_options == 'AD':
raise CLIError("The Storage account already enabled ActiveDirectoryDomainServicesForFile, "
"please disable it by running this cmdlets with \"--enable-files-adds false\" "
"before enable AzureActiveDirectoryDomainServicesForFile.")
params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
directory_service_options='AADDS' if enable_files_aadds else 'None')
else: # Only disable AADDS and keep others unchanged
origin_storage_account = get_storage_account_properties(cmd.cli_ctx, instance.id)
if not origin_storage_account.azure_files_identity_based_authentication or \
origin_storage_account.azure_files_identity_based_authentication.directory_service_options\
== 'AADDS':
params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
directory_service_options='None')
else:
params.azure_files_identity_based_authentication = \
origin_storage_account.azure_files_identity_based_authentication
if enable_files_adds is not None:
ActiveDirectoryProperties = cmd.get_models('ActiveDirectoryProperties')
if enable_files_adds: # enable AD
if not(domain_name and net_bios_domain_name and forest_name and domain_guid and domain_sid and
azure_storage_sid):
raise CLIError("To enable ActiveDirectoryDomainServicesForFile, user must specify all of: "
"--domain-name, --net-bios-domain-name, --forest-name, --domain-guid, --domain-sid and "
"--azure_storage_sid arguments in Azure Active Directory Properties Argument group.")
origin_storage_account = get_storage_account_properties(cmd.cli_ctx, instance.id)
if origin_storage_account.azure_files_identity_based_authentication and \
origin_storage_account.azure_files_identity_based_authentication.directory_service_options \
== 'AADDS':
raise CLIError("The Storage account already enabled AzureActiveDirectoryDomainServicesForFile, "
"please disable it by running this cmdlets with \"--enable-files-aadds false\" "
"before enable ActiveDirectoryDomainServicesForFile.")
active_directory_properties = ActiveDirectoryProperties(domain_name=domain_name,
net_bios_domain_name=net_bios_domain_name,
forest_name=forest_name, domain_guid=domain_guid,
domain_sid=domain_sid,
azure_storage_sid=azure_storage_sid)
# TODO: Enabling AD will automatically disable AADDS. Maybe we should throw error message
params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
directory_service_options='AD',
active_directory_properties=active_directory_properties)
else: # disable AD
if domain_name or net_bios_domain_name or forest_name or domain_guid or domain_sid or azure_storage_sid:
raise CLIError("To disable ActiveDirectoryDomainServicesForFile, user can't specify any of: "
"--domain-name, --net-bios-domain-name, --forest-name, --domain-guid, --domain-sid and "
"--azure_storage_sid arguments in Azure Active Directory Properties Argument group.")
# Only disable AD and keep others unchanged
origin_storage_account = get_storage_account_properties(cmd.cli_ctx, instance.id)
if not origin_storage_account.azure_files_identity_based_authentication or \
origin_storage_account.azure_files_identity_based_authentication.directory_service_options == 'AD':
params.azure_files_identity_based_authentication = AzureFilesIdentityBasedAuthentication(
directory_service_options='None')
else:
params.azure_files_identity_based_authentication = \
origin_storage_account.azure_files_identity_based_authentication
if assign_identity:
params.identity = Identity()
if enable_large_file_share:
LargeFileSharesState = cmd.get_models('LargeFileSharesState')
params.large_file_shares_state = LargeFileSharesState("Enabled")
if NetworkRuleSet:
acl = instance.network_rule_set
if acl:
if bypass:
acl.bypass = bypass
if default_action:
acl.default_action = default_action
elif default_action:
acl = NetworkRuleSet(bypass=bypass, virtual_network_rules=None, ip_rules=None,
default_action=default_action)
elif bypass:
raise CLIError('incorrect usage: --default-action ACTION [--bypass SERVICE ...]')
params.network_rule_set = acl
return params
def list_network_rules(client, resource_group_name, account_name):
sa = client.get_properties(resource_group_name, account_name)
rules = sa.network_rule_set
delattr(rules, 'bypass')
delattr(rules, 'default_action')
return rules
def add_network_rule(cmd, client, resource_group_name, account_name, action='Allow', subnet=None,
vnet_name=None, ip_address=None): # pylint: disable=unused-argument
sa = client.get_properties(resource_group_name, account_name)
rules = sa.network_rule_set
if subnet:
from msrestazure.tools import is_valid_resource_id
if not is_valid_resource_id(subnet):
from knack.util import CLIError
raise CLIError("Expected fully qualified resource ID: got '{}'".format(subnet))
VirtualNetworkRule = cmd.get_models('VirtualNetworkRule')
if not rules.virtual_network_rules:
rules.virtual_network_rules = []
rules.virtual_network_rules.append(VirtualNetworkRule(virtual_network_resource_id=subnet, action=action))
if ip_address:
IpRule = cmd.get_models('IPRule')
if not rules.ip_rules:
rules.ip_rules = []
rules.ip_rules.append(IpRule(ip_address_or_range=ip_address, action=action))
StorageAccountUpdateParameters = cmd.get_models('StorageAccountUpdateParameters')
params = StorageAccountUpdateParameters(network_rule_set=rules)
return client.update(resource_group_name, account_name, params)
def remove_network_rule(cmd, client, resource_group_name, account_name, ip_address=None, subnet=None,
vnet_name=None): # pylint: disable=unused-argument
sa = client.get_properties(resource_group_name, account_name)
rules = sa.network_rule_set
if subnet:
rules.virtual_network_rules = [x for x in rules.virtual_network_rules
if not x.virtual_network_resource_id.endswith(subnet)]
if ip_address:
rules.ip_rules = [x for x in rules.ip_rules if x.ip_address_or_range != ip_address]
StorageAccountUpdateParameters = cmd.get_models('StorageAccountUpdateParameters')
params = StorageAccountUpdateParameters(network_rule_set=rules)
return client.update(resource_group_name, account_name, params)
def create_management_policies(client, resource_group_name, account_name, policy=None):
if policy:
if os.path.exists(policy):
policy = get_file_json(policy)
else:
policy = shell_safe_json_parse(policy)
return client.create_or_update(resource_group_name, account_name, policy=policy)
def update_management_policies(client, resource_group_name, account_name, parameters=None):
if parameters:
parameters = parameters.policy
return client.create_or_update(resource_group_name, account_name, policy=parameters)
| 59.017964
| 164
| 0.674006
|
ae5041504f1564f14d2f8772091b64cf5ef5e8fb
| 1,844
|
py
|
Python
|
parsers/xilinx/virtex_six.py
|
bradysalz/fpga-pin-trends
|
19c489a1f14233f4f739a2b8a4a64bfca69c5807
|
[
"MIT"
] | null | null | null |
parsers/xilinx/virtex_six.py
|
bradysalz/fpga-pin-trends
|
19c489a1f14233f4f739a2b8a4a64bfca69c5807
|
[
"MIT"
] | null | null | null |
parsers/xilinx/virtex_six.py
|
bradysalz/fpga-pin-trends
|
19c489a1f14233f4f739a2b8a4a64bfca69c5807
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import csv
from pathlib import Path
from typing import Dict, List
import toml
from db.pin import Pin
def _pin_type_cleanup(ptype: str) -> str:
if 'VCC' in ptype:
return 'VCC'
if 'GND' in ptype:
return 'GND'
if 'IO' in ptype:
return 'IO'
return ptype
def parse_xilinx_virtex_six(filepath: Path, config: Dict) -> List[Pin]:
"""Parse a Xilinx Virtex-6 pinout to a Pin list.
Args:
fname: filename as a string
config: dictionary from loaded configuration TOML file
Returns:
Pin list of parsed data
"""
part_name = filepath.stem[:-3].upper()
year = config['year']
node = config['node']
manufacturer = config['manufacturer']
family = config['family']
pins = []
with open(filepath, 'r') as f:
reader = csv.reader(f, delimiter='\t')
_, _, _, _, *data, _, _ = reader # removes junk rows
for row in data:
if len(row) == 0 or row[0] == '':
continue
pin_id = row[0]
if len(row) == 2:
pin_name = row[1]
else:
pin_name = row[2]
pin_type = _pin_type_cleanup(pin_name)
new_pin = Pin(
year,
node,
manufacturer,
family,
part_name,
pin_name,
pin_id,
pin_type,
)
pins.append(new_pin)
return pins
if __name__ == '__main__':
with open('data/xilinx/virtex_six/overview.toml', 'r') as f:
config = toml.load(f)
x = parse_xilinx_virtex_six(
Path('data/xilinx/virtex_six/6vlx760ff1760pkg.txt'),
config,
)
for y in x:
print(y.as_dict())
# print([y.as_dict() for y in x])
| 22.765432
| 71
| 0.527657
|
aaf35ee37c5ccca597620f9f4613cdba71f8d645
| 110,409
|
py
|
Python
|
django/db/models/sql/query.py
|
zeth/django
|
547656c85027eda85a24edcab907022ce313f772
|
[
"BSD-3-Clause",
"0BSD"
] | null | null | null |
django/db/models/sql/query.py
|
zeth/django
|
547656c85027eda85a24edcab907022ce313f772
|
[
"BSD-3-Clause",
"0BSD"
] | null | null | null |
django/db/models/sql/query.py
|
zeth/django
|
547656c85027eda85a24edcab907022ce313f772
|
[
"BSD-3-Clause",
"0BSD"
] | null | null | null |
"""
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import copy
import difflib
import functools
import sys
from collections import Counter, namedtuple
from collections.abc import Iterator, Mapping
from itertools import chain, count, product
from string import ascii_uppercase
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections
from django.db.models.aggregates import Count
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import (
BaseExpression, Col, Exists, F, OuterRef, Ref, ResolvedOuterRef,
)
from django.db.models.fields import Field
from django.db.models.fields.related_lookups import MultiColSource
from django.db.models.lookups import Lookup
from django.db.models.query_utils import (
Q, check_rel_lookup_compatibility, refs_expression,
)
from django.db.models.sql.constants import INNER, LOUTER, ORDER_DIR, SINGLE
from django.db.models.sql.datastructures import (
BaseTable, Empty, Join, MultiJoin,
)
from django.db.models.sql.where import (
AND, OR, ExtraWhere, NothingNode, WhereNode,
)
from django.utils.functional import cached_property
from django.utils.tree import Node
__all__ = ['Query', 'RawQuery']
def get_field_names_from_opts(opts):
return set(chain.from_iterable(
(f.name, f.attname) if f.concrete else (f.name,)
for f in opts.get_fields()
))
def get_children_from_q(q):
for child in q.children:
if isinstance(child, Node):
yield from get_children_from_q(child)
else:
yield child
JoinInfo = namedtuple(
'JoinInfo',
('final_field', 'targets', 'opts', 'joins', 'path', 'transform_function')
)
class RawQuery:
"""A single raw SQL query."""
def __init__(self, sql, using, params=()):
self.params = params
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.annotation_select = {}
def chain(self, using):
return self.clone(using)
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.identifier_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
@property
def params_type(self):
if self.params is None:
return None
return dict if isinstance(self.params, Mapping) else tuple
def __str__(self):
if self.params_type is None:
return self.sql
return self.sql % self.params_type(self.params)
def _execute_query(self):
connection = connections[self.using]
# Adapt parameters to the database, as much as possible considering
# that the target type isn't known. See #17755.
params_type = self.params_type
adapter = connection.ops.adapt_unknown_value
if params_type is tuple:
params = tuple(adapter(val) for val in self.params)
elif params_type is dict:
params = {key: adapter(val) for key, val in self.params.items()}
elif params_type is None:
params = None
else:
raise RuntimeError("Unexpected params type: %s" % params_type)
self.cursor = connection.cursor()
self.cursor.execute(self.sql, params)
ExplainInfo = namedtuple('ExplainInfo', ('format', 'options'))
class Query(BaseExpression):
"""A single SQL query."""
alias_prefix = 'T'
empty_result_set_value = None
subq_aliases = frozenset([alias_prefix])
compiler = 'SQLCompiler'
base_table_class = BaseTable
join_class = Join
def __init__(self, model, alias_cols=True):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# types they are. The key is the alias of the joined table (possibly
# the table name) and the value is a Join-like object (see
# sql.datastructures.Join for more information).
self.alias_map = {}
# Whether to provide alias to columns during reference resolving.
self.alias_cols = alias_cols
# Sometimes the query contains references to aliases in outer queries (as
# a result of split_exclude). Correct alias quoting needs to know these
# aliases too.
# Map external tables to whether they are aliased.
self.external_aliases = {}
self.table_map = {} # Maps table names to list of aliases.
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.used_aliases = set()
self.filter_is_sticky = False
self.subquery = False
# SQL-related attributes
# Select and related select clauses are expressions to use in the
# SELECT clause of the query.
# The select is used for cases where we want to set up the select
# clause to contain other than default fields (values(), subqueries...)
# Note that annotations go to annotations dictionary.
self.select = ()
self.where = WhereNode()
# The group_by attribute can have one of the following forms:
# - None: no group by at all in the query
# - A tuple of expressions: group by (at least) those expressions.
# String refs are also allowed for now.
# - True: group by all select fields of the model
# See compiler.get_group_by() for details.
self.group_by = None
self.order_by = ()
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.distinct_fields = ()
self.select_for_update = False
self.select_for_update_nowait = False
self.select_for_update_skip_locked = False
self.select_for_update_of = ()
self.select_for_no_key_update = False
self.select_related = False
# Arbitrary limit for select_related to prevents infinite recursion.
self.max_depth = 5
# Holds the selects defined by a call to values() or values_list()
# excluding annotation_select and extra_select.
self.values_select = ()
# SQL annotation-related attributes
self.annotations = {} # Maps alias -> Annotation Expression
self.annotation_select_mask = None
self._annotation_select_cache = None
# Set combination attributes
self.combinator = None
self.combinator_all = False
self.combined_queries = ()
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
self.extra = {} # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (frozenset(), True)
self._filtered_relations = {}
self.explain_info = None
@property
def output_field(self):
if len(self.select) == 1:
select = self.select[0]
return getattr(select, 'target', None) or select.field
elif len(self.annotation_select) == 1:
return next(iter(self.annotation_select.values())).output_field
@property
def has_select_fields(self):
return bool(self.select or self.annotation_select_mask or self.extra_select_mask)
@cached_property
def base_table(self):
for alias in self.alias_map:
return alias
def __str__(self):
"""
Return the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Return the query as an SQL string and the parameters that will be
substituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
"""Limit the amount of work when a Query is deepcopied."""
result = self.clone()
memo[id(self)] = result
return result
def get_compiler(self, using=None, connection=None, elide_empty=True):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
return connection.ops.compiler(self.compiler)(self, connection, using, elide_empty)
def get_meta(self):
"""
Return the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self):
"""
Return a copy of the current Query. A lightweight alternative to
to deepcopy().
"""
obj = Empty()
obj.__class__ = self.__class__
# Copy references to everything.
obj.__dict__ = self.__dict__.copy()
# Clone attributes that can't use shallow copy.
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.external_aliases = self.external_aliases.copy()
obj.table_map = self.table_map.copy()
obj.where = self.where.clone()
obj.annotations = self.annotations.copy()
if self.annotation_select_mask is not None:
obj.annotation_select_mask = self.annotation_select_mask.copy()
if self.combined_queries:
obj.combined_queries = tuple([
query.clone() for query in self.combined_queries
])
# _annotation_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both annotations and
# _annotation_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._annotation_select_cache = None
obj.extra = self.extra.copy()
if self.extra_select_mask is not None:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is not None:
obj._extra_select_cache = self._extra_select_cache.copy()
if self.select_related is not False:
# Use deepcopy because select_related stores fields in nested
# dicts.
obj.select_related = copy.deepcopy(obj.select_related)
if 'subq_aliases' in self.__dict__:
obj.subq_aliases = self.subq_aliases.copy()
obj.used_aliases = self.used_aliases.copy()
obj._filtered_relations = self._filtered_relations.copy()
# Clear the cached_property
try:
del obj.base_table
except AttributeError:
pass
return obj
def chain(self, klass=None):
"""
Return a copy of the current Query that's ready for another operation.
The klass argument changes the type of the Query, e.g. UpdateQuery.
"""
obj = self.clone()
if klass and obj.__class__ != klass:
obj.__class__ = klass
if not obj.filter_is_sticky:
obj.used_aliases = set()
obj.filter_is_sticky = False
if hasattr(obj, '_setup_query'):
obj._setup_query()
return obj
def relabeled_clone(self, change_map):
clone = self.clone()
clone.change_aliases(change_map)
return clone
def _get_col(self, target, field, alias):
if not self.alias_cols:
alias = None
return target.get_col(alias, field)
def rewrite_cols(self, annotation, col_cnt):
# We must make sure the inner query has the referred columns in it.
# If we are aggregating over an annotation, then Django uses Ref()
# instances to note this. However, if we are annotating over a column
# of a related model, then it might be that column isn't part of the
# SELECT clause of the inner query, and we must manually make sure
# the column is selected. An example case is:
# .aggregate(Sum('author__awards'))
# Resolving this expression results in a join to author, but there
# is no guarantee the awards column of author is in the select clause
# of the query. Thus we must manually add the column to the inner
# query.
orig_exprs = annotation.get_source_expressions()
new_exprs = []
for expr in orig_exprs:
# FIXME: These conditions are fairly arbitrary. Identify a better
# method of having expressions decide which code path they should
# take.
if isinstance(expr, Ref):
# Its already a Ref to subquery (see resolve_ref() for
# details)
new_exprs.append(expr)
elif isinstance(expr, (WhereNode, Lookup)):
# Decompose the subexpressions further. The code here is
# copied from the else clause, but this condition must appear
# before the contains_aggregate/is_summary condition below.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
else:
# Reuse aliases of expressions already selected in subquery.
for col_alias, selected_annotation in self.annotation_select.items():
if selected_annotation is expr:
new_expr = Ref(col_alias, expr)
break
else:
# An expression that is not selected the subquery.
if isinstance(expr, Col) or (expr.contains_aggregate and not expr.is_summary):
# Reference column or another aggregate. Select it
# under a non-conflicting alias.
col_cnt += 1
col_alias = '__col%d' % col_cnt
self.annotations[col_alias] = expr
self.append_annotation_mask([col_alias])
new_expr = Ref(col_alias, expr)
else:
# Some other expression not referencing database values
# directly. Its subexpression might contain Cols.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
annotation.set_source_expressions(new_exprs)
return annotation, col_cnt
def get_aggregation(self, using, added_aggregate_names):
"""
Return the dictionary with the values of the existing aggregations.
"""
if not self.annotation_select:
return {}
existing_annotations = [
annotation for alias, annotation
in self.annotations.items()
if alias not in added_aggregate_names
]
# Decide if we need to use a subquery.
#
# Existing annotations would cause incorrect results as get_aggregation()
# must produce just one result and thus must not use GROUP BY. But we
# aren't smart enough to remove the existing annotations from the
# query, so those would force us to use GROUP BY.
#
# If the query has limit or distinct, or uses set operations, then
# those operations must be done in a subquery so that the query
# aggregates on the limit and/or distinct results instead of applying
# the distinct and limit after the aggregation.
if (isinstance(self.group_by, tuple) or self.is_sliced or existing_annotations or
self.distinct or self.combinator):
from django.db.models.sql.subqueries import AggregateQuery
inner_query = self.clone()
inner_query.subquery = True
outer_query = AggregateQuery(self.model, inner_query)
inner_query.select_for_update = False
inner_query.select_related = False
inner_query.set_annotation_mask(self.annotation_select)
# Queries with distinct_fields need ordering and when a limit is
# applied we must take the slice from the ordered query. Otherwise
# no need for ordering.
inner_query.clear_ordering(force=False)
if not inner_query.distinct:
# If the inner query uses default select and it has some
# aggregate annotations, then we must make sure the inner
# query is grouped by the main model's primary key. However,
# clearing the select clause can alter results if distinct is
# used.
has_existing_aggregate_annotations = any(
annotation for annotation in existing_annotations
if getattr(annotation, 'contains_aggregate', True)
)
if inner_query.default_cols and has_existing_aggregate_annotations:
inner_query.group_by = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),)
inner_query.default_cols = False
relabels = {t: 'subquery' for t in inner_query.alias_map}
relabels[None] = 'subquery'
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
col_cnt = 0
for alias, expression in list(inner_query.annotation_select.items()):
annotation_select_mask = inner_query.annotation_select_mask
if expression.is_summary:
expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt)
outer_query.annotations[alias] = expression.relabeled_clone(relabels)
del inner_query.annotations[alias]
annotation_select_mask.remove(alias)
# Make sure the annotation_select wont use cached results.
inner_query.set_annotation_mask(inner_query.annotation_select_mask)
if inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask:
# In case of Model.objects[0:3].count(), there would be no
# field selected in the inner query, yet we must use a subquery.
# So, make sure at least one field is selected.
inner_query.select = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),)
else:
outer_query = self
self.select = ()
self.default_cols = False
self.extra = {}
empty_set_result = [
expression.empty_result_set_value
for expression in outer_query.annotation_select.values()
]
elide_empty = not any(result is NotImplemented for result in empty_set_result)
outer_query.clear_ordering(force=True)
outer_query.clear_limits()
outer_query.select_for_update = False
outer_query.select_related = False
compiler = outer_query.get_compiler(using, elide_empty=elide_empty)
result = compiler.execute_sql(SINGLE)
if result is None:
result = empty_set_result
converters = compiler.get_converters(outer_query.annotation_select.values())
result = next(compiler.apply_converters((result,), converters))
return dict(zip(outer_query.annotation_select, result))
def get_count(self, using):
"""
Perform a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count('*'), alias='__count', is_summary=True)
return obj.get_aggregation(using, ['__count'])['__count']
def has_filters(self):
return self.where
def exists(self, using, limit=True):
q = self.clone()
if not q.distinct:
if q.group_by is True:
q.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
# Disable GROUP BY aliases to avoid orphaning references to the
# SELECT clause which is about to be cleared.
q.set_group_by(allow_aliases=False)
q.clear_select_clause()
if q.combined_queries and q.combinator == 'union':
limit_combined = connections[using].features.supports_slicing_ordering_in_compound
q.combined_queries = tuple(
combined_query.exists(using, limit=limit_combined)
for combined_query in q.combined_queries
)
q.clear_ordering(force=True)
if limit:
q.set_limits(high=1)
q.add_extra({'a': 1}, None, None, None, None, None)
q.set_extra_mask(['a'])
return q
def has_results(self, using):
q = self.exists(using)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def explain(self, using, format=None, **options):
q = self.clone()
q.explain_info = ExplainInfo(format, options)
compiler = q.get_compiler(using=using)
return '\n'.join(compiler.explain_query())
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
if self.model != rhs.model:
raise TypeError('Cannot combine queries on two different base models.')
if self.is_sliced:
raise TypeError('Cannot combine queries once a slice has been taken.')
if self.distinct != rhs.distinct:
raise TypeError('Cannot combine a unique query with a non-unique query.')
if self.distinct_fields != rhs.distinct_fields:
raise TypeError('Cannot combine queries with different distinct fields.')
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = (connector == AND)
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.alias_map)
# Base table must be present in the query - this is the same
# table on both sides.
self.get_initial_alias()
joinpromoter = JoinPromoter(connector, 2, False)
joinpromoter.add_votes(
j for j in self.alias_map if self.alias_map[j].join_type == INNER)
rhs_votes = set()
# Now, add the joins from rhs query into the new query (skipping base
# table).
rhs_tables = list(rhs.alias_map)[1:]
for alias in rhs_tables:
join = rhs.alias_map[alias]
# If the left side of the join was already relabeled, use the
# updated alias.
join = join.relabeled_clone(change_map)
new_alias = self.join(join, reuse=reuse)
if join.join_type == INNER:
rhs_votes.add(new_alias)
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
if alias != new_alias:
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
joinpromoter.add_votes(rhs_votes)
joinpromoter.update_join_types(self)
# Combine subqueries aliases to ensure aliases relabelling properly
# handle subqueries when combining where and select clauses.
self.subq_aliases |= rhs.subq_aliases
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
w = rhs.where.clone()
w.relabel_aliases(change_map)
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
if rhs.select:
self.set_select([col.relabeled_clone(change_map) for col in rhs.select])
else:
self.select = ()
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra and rhs.extra:
raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by or self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Convert the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialized on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
orig_opts = self.get_meta()
seen = {}
must_include = {orig_opts.concrete_model: {orig_opts.pk}}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model._meta.concrete_model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
if name in self._filtered_relations:
name = self._filtered_relations[name].relation_name
source = opts.get_field(name)
if is_reverse_o2o(source):
cur_model = source.related_model
else:
cur_model = source.remote_field.model
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# (if it's not a reverse relation) to the things we select.
if not is_reverse_o2o(source):
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field = opts.get_field(parts[-1])
is_reverse_object = field.auto_created and not field.concrete
model = field.related_model if is_reverse_object else field.model
model = model._meta.concrete_model
if model == opts.model:
model = cur_model
if not is_reverse_o2o(field):
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in seen.items():
for field in model._meta.local_fields:
if field not in values:
m = field.model._meta.concrete_model
add_to_dict(workset, m, field)
for model, values in must_include.items():
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in workset.items():
callback(target, model, values)
else:
for model, values in must_include.items():
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
seen.setdefault(model, set())
for model, values in seen.items():
callback(target, model, values)
def table_alias(self, table_name, create=False, filtered_relation=None):
"""
Return a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = filtered_relation.alias if filtered_relation is not None else table_name
self.table_map[table_name] = [alias]
self.alias_refcount[alias] = 1
return alias, True
def ref_alias(self, alias):
"""Increases the reference count for this alias."""
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
"""Decreases the reference count for this alias."""
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases):
"""
Promote recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, only promote the join if
it is nullable or the parent join is an outer join.
The children promotion is done to avoid join chains that contain a LOUTER
b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
then we must also promote b->c automatically, or otherwise the promotion
of a->b doesn't actually change anything in the query results.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
# Only the first alias (skipped above) should have None join_type
assert self.alias_map[alias].join_type is not None
parent_alias = self.alias_map[alias].parent_alias
parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER
already_louter = self.alias_map[alias].join_type == LOUTER
if ((self.alias_map[alias].nullable or parent_louter) and
not already_louter):
self.alias_map[alias] = self.alias_map[alias].promote()
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join for join in self.alias_map
if self.alias_map[join].parent_alias == alias and join not in aliases
)
def demote_joins(self, aliases):
"""
Change join type from LOUTER to INNER for all joins in aliases.
Similarly to promote_joins(), this method must ensure no join chains
containing first an outer, then an inner join are generated. If we
are demoting b->c join in chain a LOUTER b LOUTER c then we must
demote a->b automatically, or otherwise the demotion of b->c doesn't
actually change anything in the query results. .
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type == LOUTER:
self.alias_map[alias] = self.alias_map[alias].demote()
parent_alias = self.alias_map[alias].parent_alias
if self.alias_map[parent_alias].join_type == INNER:
aliases.append(parent_alias)
def reset_refcounts(self, to_counts):
"""
Reset reference counts for aliases so that they match the value passed
in `to_counts`.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def change_aliases(self, change_map):
"""
Change the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map).isdisjoint(change_map.values())
# 1. Update references in "select" (normal columns plus aliases),
# "group by" and "where".
self.where.relabel_aliases(change_map)
if isinstance(self.group_by, tuple):
self.group_by = tuple([col.relabeled_clone(change_map) for col in self.group_by])
self.select = tuple([col.relabeled_clone(change_map) for col in self.select])
self.annotations = self.annotations and {
key: col.relabeled_clone(change_map) for key, col in self.annotations.items()
}
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in change_map.items():
if old_alias not in self.alias_map:
continue
alias_data = self.alias_map[old_alias].relabeled_clone(change_map)
self.alias_map[new_alias] = alias_data
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
self.external_aliases = {
# Table is aliased or it's being changed and thus is aliased.
change_map.get(alias, alias): (aliased or alias in change_map)
for alias, aliased in self.external_aliases.items()
}
def bump_prefix(self, outer_query):
"""
Change the alias prefix to the next letter in the alphabet in a way
that the outer query's aliases and this query's aliases will not
conflict. Even tables that previously had no alias will get an alias
after this call.
"""
def prefix_gen():
"""
Generate a sequence of characters in alphabetical order:
-> 'A', 'B', 'C', ...
When the alphabet is finished, the sequence will continue with the
Cartesian product:
-> 'AA', 'AB', 'AC', ...
"""
alphabet = ascii_uppercase
prefix = chr(ord(self.alias_prefix) + 1)
yield prefix
for n in count(1):
seq = alphabet[alphabet.index(prefix):] if prefix else alphabet
for s in product(seq, repeat=n):
yield ''.join(s)
prefix = None
if self.alias_prefix != outer_query.alias_prefix:
# No clashes between self and outer query should be possible.
return
# Explicitly avoid infinite loop. The constant divider is based on how
# much depth recursive subquery references add to the stack. This value
# might need to be adjusted when adding or removing function calls from
# the code path in charge of performing these operations.
local_recursion_limit = sys.getrecursionlimit() // 16
for pos, prefix in enumerate(prefix_gen()):
if prefix not in self.subq_aliases:
self.alias_prefix = prefix
break
if pos > local_recursion_limit:
raise RecursionError(
'Maximum recursion depth exceeded: too many subqueries.'
)
self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases)
self.change_aliases({
alias: '%s%d' % (self.alias_prefix, pos)
for pos, alias in enumerate(self.alias_map)
})
def get_initial_alias(self):
"""
Return the first alias for this query, after increasing its reference
count.
"""
if self.alias_map:
alias = self.base_table
self.ref_alias(alias)
else:
alias = self.join(self.base_table_class(self.get_meta().db_table, None))
return alias
def count_active_tables(self):
"""
Return the number of tables in this query with a non-zero reference
count. After execution, the reference counts are zeroed, so tables
added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, join, reuse=None):
"""
Return an alias for the 'join', either reusing an existing alias for
that join or creating a new one. 'join' is either a base_table_class or
join_class.
The 'reuse' parameter can be either None which means all joins are
reusable, or it can be a set containing the aliases that can be reused.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new
joins are created as LOUTER if the join is nullable.
"""
reuse_aliases = [
a for a, j in self.alias_map.items()
if (reuse is None or a in reuse) and j.equals(join)
]
if reuse_aliases:
if join.table_alias in reuse_aliases:
reuse_alias = join.table_alias
else:
# Reuse the most recent alias of the joined table
# (a many-to-many relation may be joined multiple times).
reuse_alias = reuse_aliases[-1]
self.ref_alias(reuse_alias)
return reuse_alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(join.table_name, create=True, filtered_relation=join.filtered_relation)
if join.join_type:
if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:
join_type = LOUTER
else:
join_type = INNER
join.join_type = join_type
join.table_alias = alias
self.alias_map[alias] = join
return alias
def join_parent_model(self, opts, model, alias, seen):
"""
Make sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if not chain:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
curr_opts = int_model._meta
alias = seen[int_model]
continue
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
join_info = self.setup_joins([link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = join_info.joins[-1]
return alias or seen[None]
def add_annotation(self, annotation, alias, is_summary=False, select=True):
"""Add a single annotation expression to the Query."""
annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None,
summarize=is_summary)
if select:
self.append_annotation_mask([alias])
else:
self.set_annotation_mask(set(self.annotation_select).difference({alias}))
self.annotations[alias] = annotation
def resolve_expression(self, query, *args, **kwargs):
clone = self.clone()
# Subqueries need to use a different set of aliases than the outer query.
clone.bump_prefix(query)
clone.subquery = True
clone.where.resolve_expression(query, *args, **kwargs)
for key, value in clone.annotations.items():
resolved = value.resolve_expression(query, *args, **kwargs)
if hasattr(resolved, 'external_aliases'):
resolved.external_aliases.update(clone.external_aliases)
clone.annotations[key] = resolved
# Outer query's aliases are considered external.
for alias, table in query.alias_map.items():
clone.external_aliases[alias] = (
(isinstance(table, Join) and table.join_field.related_model._meta.db_table != alias) or
(isinstance(table, BaseTable) and table.table_name != table.table_alias)
)
return clone
def get_external_cols(self):
exprs = chain(self.annotations.values(), self.where.children)
return [
col for col in self._gen_cols(exprs, include_external=True)
if col.alias in self.external_aliases
]
def get_group_by_cols(self, alias=None):
if alias:
return [Ref(alias, self)]
external_cols = self.get_external_cols()
if any(col.possibly_multivalued for col in external_cols):
return [self]
return external_cols
def as_sql(self, compiler, connection):
# Some backends (e.g. Oracle) raise an error when a subquery contains
# unnecessary ORDER BY clause.
if (
self.subquery and
not connection.features.ignores_unnecessary_order_by_in_subqueries
):
self.clear_ordering(force=False)
sql, params = self.get_compiler(connection=connection).as_sql()
if self.subquery:
sql = '(%s)' % sql
return sql, params
def resolve_lookup_value(self, value, can_reuse, allow_joins):
if hasattr(value, 'resolve_expression'):
value = value.resolve_expression(
self, reuse=can_reuse, allow_joins=allow_joins,
)
elif isinstance(value, (list, tuple)):
# The items of the iterable may be expressions and therefore need
# to be resolved independently.
values = (
self.resolve_lookup_value(sub_value, can_reuse, allow_joins)
for sub_value in value
)
type_ = type(value)
if hasattr(type_, '_make'): # namedtuple
return type_(*values)
return type_(values)
return value
def solve_lookup_type(self, lookup):
"""
Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains').
"""
lookup_splitted = lookup.split(LOOKUP_SEP)
if self.annotations:
expression, expression_lookups = refs_expression(lookup_splitted, self.annotations)
if expression:
return expression_lookups, (), expression
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)]
if len(lookup_parts) > 1 and not field_parts:
raise FieldError(
'Invalid lookup "%s" for model %s".' %
(lookup, self.get_meta().model.__name__)
)
return lookup_parts, field_parts, False
def check_query_object_type(self, value, opts, field):
"""
Check whether the object passed while querying is of the correct type.
If not, raise a ValueError specifying the wrong object.
"""
if hasattr(value, '_meta'):
if not check_rel_lookup_compatibility(value._meta.model, opts, field):
raise ValueError(
'Cannot query "%s": Must be "%s" instance.' %
(value, opts.object_name))
def check_related_objects(self, field, value, opts):
"""Check the type of object passed to query relations."""
if field.is_relation:
# Check that the field and the queryset use the same model in a
# query like .filter(author=Author.objects.all()). For example, the
# opts would be Author's (from the author field) and value.model
# would be Author.objects.all() queryset's .model (Author also).
# The field is the related field on the lhs side.
if (isinstance(value, Query) and not value.has_select_fields and
not check_rel_lookup_compatibility(value.model, opts, field)):
raise ValueError(
'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' %
(value.model._meta.object_name, opts.object_name)
)
elif hasattr(value, '_meta'):
self.check_query_object_type(value, opts, field)
elif hasattr(value, '__iter__'):
for v in value:
self.check_query_object_type(v, opts, field)
def check_filterable(self, expression):
"""Raise an error if expression cannot be used in a WHERE clause."""
if (
hasattr(expression, 'resolve_expression') and
not getattr(expression, 'filterable', True)
):
raise NotSupportedError(
expression.__class__.__name__ + ' is disallowed in the filter '
'clause.'
)
if hasattr(expression, 'get_source_expressions'):
for expr in expression.get_source_expressions():
self.check_filterable(expr)
def build_lookup(self, lookups, lhs, rhs):
"""
Try to extract transforms and lookup from given lhs.
The lhs value is something that works like SQLExpression.
The rhs value is what the lookup is going to compare against.
The lookups is a list of names to extract using get_lookup()
and get_transform().
"""
# __exact is the default lookup if one isn't given.
*transforms, lookup_name = lookups or ['exact']
for name in transforms:
lhs = self.try_transform(lhs, name)
# First try get_lookup() so that the lookup takes precedence if the lhs
# supports both transform and lookup for the name.
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
if lhs.field.is_relation:
raise FieldError('Related Field got invalid lookup: {}'.format(lookup_name))
# A lookup wasn't found. Try to interpret the name as a transform
# and do an Exact lookup against it.
lhs = self.try_transform(lhs, lookup_name)
lookup_name = 'exact'
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
return
lookup = lookup_class(lhs, rhs)
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value unless the lookup supports it.
if lookup.rhs is None and not lookup.can_use_none_as_rhs:
if lookup_name not in ('exact', 'iexact'):
raise ValueError("Cannot use None as a query value")
return lhs.get_lookup('isnull')(lhs, True)
# For Oracle '' is equivalent to null. The check must be done at this
# stage because join promotion can't be done in the compiler. Using
# DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here.
# A similar thing is done in is_nullable(), too.
if (
lookup_name == 'exact' and
lookup.rhs == '' and
connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
):
return lhs.get_lookup('isnull')(lhs, True)
return lookup
def try_transform(self, lhs, name):
"""
Helper method for build_lookup(). Try to fetch and initialize
a transform for name parameter from lhs.
"""
transform_class = lhs.get_transform(name)
if transform_class:
return transform_class(lhs)
else:
output_field = lhs.output_field.__class__
suggested_lookups = difflib.get_close_matches(name, output_field.get_lookups())
if suggested_lookups:
suggestion = ', perhaps you meant %s?' % ' or '.join(suggested_lookups)
else:
suggestion = '.'
raise FieldError(
"Unsupported lookup '%s' for %s or join on the field not "
"permitted%s" % (name, output_field.__name__, suggestion)
)
def build_filter(self, filter_expr, branch_negated=False, current_negated=False,
can_reuse=None, allow_joins=True, split_subq=True,
check_filterable=True):
"""
Build a WhereNode for a single filter clause but don't add it
to this Query. Query.add_q() will then add this filter to the where
Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_negated and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
if isinstance(filter_expr, dict):
raise FieldError("Cannot parse keyword query as dict")
if isinstance(filter_expr, Q):
return self._add_q(
filter_expr,
branch_negated=branch_negated,
current_negated=current_negated,
used_aliases=can_reuse,
allow_joins=allow_joins,
split_subq=split_subq,
check_filterable=check_filterable,
)
if hasattr(filter_expr, 'resolve_expression'):
if not getattr(filter_expr, 'conditional', False):
raise TypeError('Cannot filter against a non-conditional expression.')
condition = filter_expr.resolve_expression(self, allow_joins=allow_joins)
if not isinstance(condition, Lookup):
condition = self.build_lookup(['exact'], condition, True)
return WhereNode([condition], connector=AND), []
arg, value = filter_expr
if not arg:
raise FieldError("Cannot parse keyword query %r" % arg)
lookups, parts, reffed_expression = self.solve_lookup_type(arg)
if check_filterable:
self.check_filterable(reffed_expression)
if not allow_joins and len(parts) > 1:
raise FieldError("Joined field references are not permitted in this query")
pre_joins = self.alias_refcount.copy()
value = self.resolve_lookup_value(value, can_reuse, allow_joins)
used_joins = {k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)}
if check_filterable:
self.check_filterable(value)
if reffed_expression:
condition = self.build_lookup(lookups, reffed_expression, value)
return WhereNode([condition], connector=AND), []
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated or not split_subq
try:
join_info = self.setup_joins(
parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many,
)
# Prevent iterator from being consumed by check_related_objects()
if isinstance(value, Iterator):
value = list(value)
self.check_related_objects(join_info.final_field, value, join_info.opts)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_info.joins
except MultiJoin as e:
return self.split_exclude(filter_expr, can_reuse, e.names_with_path)
# Update used_joins before trimming since they are reused to determine
# which joins could be later promoted to INNER.
used_joins.update(join_info.joins)
targets, alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path)
if can_reuse is not None:
can_reuse.update(join_list)
if join_info.final_field.is_relation:
# No support for transforms for relational fields
num_lookups = len(lookups)
if num_lookups > 1:
raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0]))
if len(targets) == 1:
col = self._get_col(targets[0], join_info.final_field, alias)
else:
col = MultiColSource(alias, targets, join_info.targets, join_info.final_field)
else:
col = self._get_col(targets[0], join_info.final_field, alias)
condition = self.build_lookup(lookups, col, value)
lookup_type = condition.lookup_name
clause = WhereNode([condition], connector=AND)
require_outer = lookup_type == 'isnull' and condition.rhs is True and not current_negated
if current_negated and (lookup_type != 'isnull' or condition.rhs is False) and condition.rhs is not None:
require_outer = True
if lookup_type != 'isnull':
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
if (
self.is_nullable(targets[0]) or
self.alias_map[join_list[-1]].join_type == LOUTER
):
lookup_class = targets[0].get_lookup('isnull')
col = self._get_col(targets[0], join_info.targets[0], alias)
clause.add(lookup_class(col, False), AND)
# If someval is a nullable column, someval IS NOT NULL is
# added.
if isinstance(value, Col) and self.is_nullable(value.target):
lookup_class = value.target.get_lookup('isnull')
clause.add(lookup_class(value, False), AND)
return clause, used_joins if not require_outer else ()
def add_filter(self, filter_lhs, filter_rhs):
self.add_q(Q((filter_lhs, filter_rhs)))
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for doing final
join promotion.
"""
# For join promotion this case is doing an AND for the added q_object
# and existing conditions. So, any existing inner join forces the join
# type to remain inner. Existing outer joins can however be demoted.
# (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
# rel_a doesn't produce any rows, then the whole condition must fail.
# So, demotion is OK.
existing_inner = {a for a in self.alias_map if self.alias_map[a].join_type == INNER}
clause, _ = self._add_q(q_object, self.used_aliases)
if clause:
self.where.add(clause, AND)
self.demote_joins(existing_inner)
def build_where(self, filter_expr):
return self.build_filter(filter_expr, allow_joins=False)[0]
def clear_where(self):
self.where = WhereNode()
def _add_q(self, q_object, used_aliases, branch_negated=False,
current_negated=False, allow_joins=True, split_subq=True,
check_filterable=True):
"""Add a Q-object to the current filter."""
connector = q_object.connector
current_negated = current_negated ^ q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = WhereNode(connector=connector, negated=q_object.negated)
joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated)
for child in q_object.children:
child_clause, needed_inner = self.build_filter(
child, can_reuse=used_aliases, branch_negated=branch_negated,
current_negated=current_negated, allow_joins=allow_joins,
split_subq=split_subq, check_filterable=check_filterable,
)
joinpromoter.add_votes(needed_inner)
if child_clause:
target_clause.add(child_clause, connector)
needed_inner = joinpromoter.update_join_types(self)
return target_clause, needed_inner
def build_filtered_relation_q(self, q_object, reuse, branch_negated=False, current_negated=False):
"""Add a FilteredRelation object to the current filter."""
connector = q_object.connector
current_negated ^= q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = WhereNode(connector=connector, negated=q_object.negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause = self.build_filtered_relation_q(
child, reuse=reuse, branch_negated=branch_negated,
current_negated=current_negated,
)
else:
child_clause, _ = self.build_filter(
child, can_reuse=reuse, branch_negated=branch_negated,
current_negated=current_negated,
allow_joins=True, split_subq=False,
)
target_clause.add(child_clause, connector)
return target_clause
def add_filtered_relation(self, filtered_relation, alias):
filtered_relation.alias = alias
lookups = dict(get_children_from_q(filtered_relation.condition))
relation_lookup_parts, relation_field_parts, _ = self.solve_lookup_type(filtered_relation.relation_name)
if relation_lookup_parts:
raise ValueError(
"FilteredRelation's relation_name cannot contain lookups "
"(got %r)." % filtered_relation.relation_name
)
for lookup in chain(lookups):
lookup_parts, lookup_field_parts, _ = self.solve_lookup_type(lookup)
shift = 2 if not lookup_parts else 1
lookup_field_path = lookup_field_parts[:-shift]
for idx, lookup_field_part in enumerate(lookup_field_path):
if len(relation_field_parts) > idx:
if relation_field_parts[idx] != lookup_field_part:
raise ValueError(
"FilteredRelation's condition doesn't support "
"relations outside the %r (got %r)."
% (filtered_relation.relation_name, lookup)
)
else:
raise ValueError(
"FilteredRelation's condition doesn't support nested "
"relations deeper than the relation_name (got %r for "
"%r)." % (lookup, filtered_relation.relation_name)
)
self._filtered_relations[filtered_relation.alias] = filtered_relation
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walk the list of names and turns them into PathInfo tuples. A single
name in 'names' can generate multiple PathInfos (m2m, for example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
If fail_on_missing is set to True, then a name that can't be resolved
will generate a FieldError.
Return a list of PathInfo tuples. In addition return the final field
(the last used join field) and target (which is a field guaranteed to
contain the same value as the final field). Finally, return those names
that weren't found (which are likely transforms and the final lookup).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == 'pk':
name = opts.pk.name
field = None
filtered_relation = None
try:
field = opts.get_field(name)
except FieldDoesNotExist:
if name in self.annotation_select:
field = self.annotation_select[name].output_field
elif name in self._filtered_relations and pos == 0:
filtered_relation = self._filtered_relations[name]
if LOOKUP_SEP in filtered_relation.relation_name:
parts = filtered_relation.relation_name.split(LOOKUP_SEP)
filtered_relation_path, field, _, _ = self.names_to_path(
parts, opts, allow_many, fail_on_missing,
)
path.extend(filtered_relation_path[:-1])
else:
field = opts.get_field(filtered_relation.relation_name)
if field is not None:
# Fields that contain one-to-many relations with a generic
# model (like a GenericForeignKey) cannot generate reverse
# relations and therefore cannot be used for reverse querying.
if field.is_relation and not field.related_model:
raise FieldError(
"Field %r does not generate an automatic reverse "
"relation and therefore cannot be used for reverse "
"querying. If it is a GenericForeignKey, consider "
"adding a GenericRelation." % name
)
try:
model = field.model._meta.concrete_model
except AttributeError:
# QuerySet.annotate() may introduce fields that aren't
# attached to a model.
model = None
else:
# We didn't find the current field, so move position back
# one step.
pos -= 1
if pos == -1 or fail_on_missing:
available = sorted([
*get_field_names_from_opts(opts),
*self.annotation_select,
*self._filtered_relations,
])
raise FieldError("Cannot resolve keyword '%s' into field. "
"Choices are: %s" % (name, ", ".join(available)))
break
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if model is not opts.model:
path_to_parent = opts.get_path_to_parent(model)
if path_to_parent:
path.extend(path_to_parent)
cur_names_with_path[1].extend(path_to_parent)
opts = path_to_parent[-1].to_opts
if hasattr(field, 'path_infos'):
if filtered_relation:
pathinfos = field.get_path_info(filtered_relation)
else:
pathinfos = field.path_infos
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
if fail_on_missing and pos + 1 != len(names):
raise FieldError(
"Cannot resolve keyword %r into field. Join on '%s'"
" not permitted." % (names[pos + 1], name))
break
return path, final_field, targets, names[pos + 1:]
def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
Return the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins, the
field path traveled to generate the joins, and a transform function
that takes a field and alias and is equivalent to `field.get_col(alias)`
in the simple case but wraps field transforms if they were included in
names.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# The transform can't be applied yet, as joins must be trimmed later.
# To avoid making every caller of this method look up transforms
# directly, compute transforms here and create a partial that converts
# fields to the appropriate wrapped version.
def final_transformer(field, alias):
if not self.alias_cols:
alias = None
return field.get_col(alias)
# Try resolving all the names as fields first. If there's an error,
# treat trailing names as lookups until a field can be resolved.
last_field_exception = None
for pivot in range(len(names), 0, -1):
try:
path, final_field, targets, rest = self.names_to_path(
names[:pivot], opts, allow_many, fail_on_missing=True,
)
except FieldError as exc:
if pivot == 1:
# The first item cannot be a lookup, so it's safe
# to raise the field error here.
raise
else:
last_field_exception = exc
else:
# The transforms are the remaining items that couldn't be
# resolved into fields.
transforms = names[pivot:]
break
for name in transforms:
def transform(field, alias, *, name, previous):
try:
wrapped = previous(field, alias)
return self.try_transform(wrapped, name)
except FieldError:
# FieldError is raised if the transform doesn't exist.
if isinstance(final_field, Field) and last_field_exception:
raise last_field_exception
else:
raise
final_transformer = functools.partial(transform, name=name, previous=final_transformer)
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for join in path:
if join.filtered_relation:
filtered_relation = join.filtered_relation.clone()
table_alias = filtered_relation.alias
else:
filtered_relation = None
table_alias = None
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = self.join_class(
opts.db_table, alias, table_alias, INNER, join.join_field,
nullable, filtered_relation=filtered_relation,
)
reuse = can_reuse if join.m2m else None
alias = self.join(connection, reuse=reuse)
joins.append(alias)
if filtered_relation:
filtered_relation.path = joins[:]
return JoinInfo(final_field, targets, opts, joins, path, final_transformer)
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Return the final target field and table alias and the new active
joins.
Always trim any direct join if the target column is already in the
previous table. Can't trim reverse joins as it's unknown if there's
anything on the other side of the join.
"""
joins = joins[:]
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
if info.filtered_relation:
break
join_targets = {t.column for t in info.join_field.foreign_related_fields}
cur_targets = {t.column for t in targets}
if not cur_targets.issubset(join_targets):
break
targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets}
targets = tuple(targets_dict[t.column] for t in targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
@classmethod
def _gen_cols(cls, exprs, include_external=False):
for expr in exprs:
if isinstance(expr, Col):
yield expr
elif include_external and callable(getattr(expr, 'get_external_cols', None)):
yield from expr.get_external_cols()
elif hasattr(expr, 'get_source_expressions'):
yield from cls._gen_cols(
expr.get_source_expressions(),
include_external=include_external,
)
@classmethod
def _gen_col_aliases(cls, exprs):
yield from (expr.alias for expr in cls._gen_cols(exprs))
def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False):
annotation = self.annotations.get(name)
if annotation is not None:
if not allow_joins:
for alias in self._gen_col_aliases([annotation]):
if isinstance(self.alias_map[alias], Join):
raise FieldError(
'Joined field references are not permitted in '
'this query'
)
if summarize:
# Summarize currently means we are doing an aggregate() query
# which is executed as a wrapped subquery if any of the
# aggregate() elements reference an existing annotation. In
# that case we need to return a Ref to the subquery's annotation.
if name not in self.annotation_select:
raise FieldError(
"Cannot aggregate over the '%s' alias. Use annotate() "
"to promote it." % name
)
return Ref(name, self.annotation_select[name])
else:
return annotation
else:
field_list = name.split(LOOKUP_SEP)
annotation = self.annotations.get(field_list[0])
if annotation is not None:
for transform in field_list[1:]:
annotation = self.try_transform(annotation, transform)
return annotation
join_info = self.setup_joins(field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse)
targets, final_alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path)
if not allow_joins and len(join_list) > 1:
raise FieldError('Joined field references are not permitted in this query')
if len(targets) > 1:
raise FieldError("Referencing multicolumn fields with F() objects "
"isn't supported")
# Verify that the last lookup in name is a field or a transform:
# transform_function() raises FieldError if not.
transform = join_info.transform_function(targets[0], final_alias)
if reuse is not None:
reuse.update(join_list)
return transform
def split_exclude(self, filter_expr, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
For example, if the origin filter is ~Q(child__name='foo'), filter_expr
is ('child__name', 'foo') and can_reuse is a set of joins usable for
filters in the original query.
We will turn this into equivalent of:
WHERE NOT EXISTS(
SELECT 1
FROM child
WHERE name = 'foo' AND child.parent_id = parent.id
LIMIT 1
)
"""
# Generate the inner query.
query = self.__class__(self.model)
query._filtered_relations = self._filtered_relations
filter_lhs, filter_rhs = filter_expr
if isinstance(filter_rhs, OuterRef):
filter_rhs = OuterRef(filter_rhs)
elif isinstance(filter_rhs, F):
filter_rhs = OuterRef(filter_rhs.name)
query.add_filter(filter_lhs, filter_rhs)
query.clear_ordering(force=True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
col = query.select[0]
select_field = col.target
alias = col.alias
if alias in can_reuse:
pk = select_field.model._meta.pk
# Need to add a restriction so that outer query's filters are in effect for
# the subquery, too.
query.bump_prefix(self)
lookup_class = select_field.get_lookup('exact')
# Note that the query.select[0].alias is different from alias
# due to bump_prefix above.
lookup = lookup_class(pk.get_col(query.select[0].alias),
pk.get_col(alias))
query.where.add(lookup, AND)
query.external_aliases[alias] = True
lookup_class = select_field.get_lookup('exact')
lookup = lookup_class(col, ResolvedOuterRef(trimmed_prefix))
query.where.add(lookup, AND)
condition, needed_inner = self.build_filter(Exists(query))
if contains_louter:
or_null_condition, _ = self.build_filter(
('%s__isnull' % trimmed_prefix, True),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition, needed_inner
def set_empty(self):
self.where.add(NothingNode(), AND)
for query in self.combined_queries:
query.set_empty()
def is_empty(self):
return any(isinstance(c, NothingNode) for c in self.where.children)
def set_limits(self, low=None, high=None):
"""
Adjust the limits on the rows retrieved. Use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, convert them to the appropriate offset and limit values.
Apply any limits passed in here to the existing constraints. Add low
to the current low value and clamp both to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
if self.low_mark == self.high_mark:
self.set_empty()
def clear_limits(self):
"""Clear any existing limits."""
self.low_mark, self.high_mark = 0, None
@property
def is_sliced(self):
return self.low_mark != 0 or self.high_mark is not None
def has_limit_one(self):
return self.high_mark is not None and (self.high_mark - self.low_mark) == 1
def can_filter(self):
"""
Return True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.is_sliced
def clear_select_clause(self):
"""Remove all fields from SELECT clause."""
self.select = ()
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_annotation_mask(())
def clear_select_fields(self):
"""
Clear the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = ()
self.values_select = ()
def add_select_col(self, col, name):
self.select += col,
self.values_select += name,
def set_select(self, cols):
self.default_cols = False
self.select = tuple(cols)
def add_distinct_fields(self, *field_names):
"""
Add and resolve the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Add the given (model) fields to the select set. Add the field names in
the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
cols = []
for name in field_names:
# Join promotion note - we must not remove any rows here, so
# if there is no existing joins, use outer join.
join_info = self.setup_joins(name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m)
targets, final_alias, joins = self.trim_joins(
join_info.targets,
join_info.joins,
join_info.path,
)
for target in targets:
cols.append(join_info.transform_function(target, final_alias))
if cols:
self.set_select(cols)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
elif name in self.annotations:
raise FieldError(
"Cannot select the '%s' alias. Use annotate() to promote "
"it." % name
)
else:
names = sorted([
*get_field_names_from_opts(opts), *self.extra,
*self.annotation_select, *self._filtered_relations
])
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
def add_ordering(self, *ordering):
"""
Add items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or OrderBy
expressions.
If 'ordering' is empty, clear all ordering from the query.
"""
errors = []
for item in ordering:
if isinstance(item, str):
if item == '?':
continue
if item.startswith('-'):
item = item[1:]
if item in self.annotations:
continue
if self.extra and item in self.extra:
continue
# names_to_path() validates the lookup. A descriptive
# FieldError will be raise if it's not.
self.names_to_path(item.split(LOOKUP_SEP), self.model._meta)
elif not hasattr(item, 'resolve_expression'):
errors.append(item)
if getattr(item, 'contains_aggregate', False):
raise FieldError(
'Using an aggregate in order_by() without also including '
'it in annotate() is not allowed: %s' % item
)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by += ordering
else:
self.default_ordering = False
def clear_ordering(self, force=False, clear_default=True):
"""
Remove any ordering settings if the current query allows it without
side effects, set 'force' to True to clear the ordering regardless.
If 'clear_default' is True, there will be no ordering in the resulting
query (not even the model's default).
"""
if not force and (self.is_sliced or self.distinct_fields or self.select_for_update):
return
self.order_by = ()
self.extra_order_by = ()
if clear_default:
self.default_ordering = False
def set_group_by(self, allow_aliases=True):
"""
Expand the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
# Column names from JOINs to check collisions with aliases.
if allow_aliases:
column_names = set()
seen_models = set()
for join in list(self.alias_map.values())[1:]: # Skip base table.
model = join.join_field.related_model
if model not in seen_models:
column_names.update({
field.column
for field in model._meta.local_concrete_fields
})
seen_models.add(model)
group_by = list(self.select)
if self.annotation_select:
for alias, annotation in self.annotation_select.items():
if not allow_aliases or alias in column_names:
alias = None
group_by_cols = annotation.get_group_by_cols(alias=alias)
group_by.extend(group_by_cols)
self.group_by = tuple(group_by)
def add_select_related(self, fields):
"""
Set up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
if isinstance(self.select_related, bool):
field_dict = {}
else:
field_dict = self.select_related
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Add data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = {}
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = str(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
if pos == 0 or entry[pos - 1] != '%':
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""Remove any fields from the deferred loading set."""
self.deferred_loading = (frozenset(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. Add the new field names to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL column names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
if new_existing := existing.difference(field_names):
self.deferred_loading = new_existing, False
else:
self.clear_deferred_loading()
if new_only := set(field_names).difference(existing):
self.deferred_loading = new_only, True
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, remove
those names from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if 'pk' in field_names:
field_names.remove('pk')
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = frozenset(field_names), False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, return a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of its fields are
deferred.
If no fields are marked for deferral, return an empty dictionary.
"""
# We cache this because we call this function multiple times
# (compiler.fill_related_selections, query.iterator)
try:
return self._loaded_field_names_cache
except AttributeError:
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
self._loaded_field_names_cache = collection
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""Callback used by get_deferred_field_names()."""
target[model] = {f.attname for f in fields}
def set_annotation_mask(self, names):
"""Set the mask of annotations that will be returned by the SELECT."""
if names is None:
self.annotation_select_mask = None
else:
self.annotation_select_mask = set(names)
self._annotation_select_cache = None
def append_annotation_mask(self, names):
if self.annotation_select_mask is not None:
self.set_annotation_mask(self.annotation_select_mask.union(names))
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT.
Don't remove them from the Query since they might be used later.
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def set_values(self, fields):
self.select_related = False
self.clear_deferred_loading()
self.clear_select_fields()
if fields:
field_names = []
extra_names = []
annotation_names = []
if not self.extra and not self.annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
self.default_cols = False
for f in fields:
if f in self.extra_select:
extra_names.append(f)
elif f in self.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
self.set_extra_mask(extra_names)
self.set_annotation_mask(annotation_names)
selected = frozenset(field_names + extra_names + annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
selected = frozenset(field_names)
# Selected annotations must be known before setting the GROUP BY
# clause.
if self.group_by is True:
self.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
# Disable GROUP BY aliases to avoid orphaning references to the
# SELECT clause which is about to be cleared.
self.set_group_by(allow_aliases=False)
self.clear_select_fields()
elif self.group_by:
# Resolve GROUP BY annotation references if they are not part of
# the selected fields anymore.
group_by = []
for expr in self.group_by:
if isinstance(expr, Ref) and expr.refs not in selected:
expr = self.annotations[expr.refs]
group_by.append(expr)
self.group_by = tuple(group_by)
self.values_select = tuple(field_names)
self.add_fields(field_names, True)
@property
def annotation_select(self):
"""
Return the dictionary of aggregate columns that are not masked and
should be used in the SELECT clause. Cache this result for performance.
"""
if self._annotation_select_cache is not None:
return self._annotation_select_cache
elif not self.annotations:
return {}
elif self.annotation_select_mask is not None:
self._annotation_select_cache = {
k: v for k, v in self.annotations.items()
if k in self.annotation_select_mask
}
return self._annotation_select_cache
else:
return self.annotations
@property
def extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
if not self.extra:
return {}
elif self.extra_select_mask is not None:
self._extra_select_cache = {
k: v for k, v in self.extra.items()
if k in self.extra_select_mask
}
return self._extra_select_cache
else:
return self.extra
def trim_start(self, names_with_path):
"""
Trim joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also set the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Return a lookup usable for doing outerq.filter(lookup=self) and a
boolean indicating if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [
t for t in self.alias_map
if t in self._lookup_joins or t == self.base_table
]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER:
contains_louter = True
alias = lookup_tables[trimmed_paths]
self.unref_alias(alias)
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(
join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for:
# - LEFT JOINs because we would miss those rows that have nothing on
# the outer side,
# - INNER JOINs from filtered relations because we would miss their
# filters.
first_join = self.alias_map[lookup_tables[trimmed_paths + 1]]
if first_join.join_type != LOUTER and not first_join.filtered_relation:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(None, lookup_tables[trimmed_paths + 1])
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
# The found starting point is likely a join_class instead of a
# base_table_class reference. But the first entry in the query's FROM
# clause must not be a JOIN.
for table in self.alias_map:
if self.alias_refcount[table] > 0:
self.alias_map[table] = self.base_table_class(
self.alias_map[table].table_name,
table,
)
break
self.set_select([f.get_col(select_alias) for f in select_fields])
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
Check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
return field.null or (
field.empty_strings_allowed and
connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
)
def get_order_dir(field, default='ASC'):
"""
Return the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def add_to_dict(data, key, value):
"""
Add "value" to the set of values for "key", whether or not "key" already
exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = {value}
def is_reverse_o2o(field):
"""
Check if the given field is reverse-o2o. The field is expected to be some
sort of relation field or related object.
"""
return field.is_relation and field.one_to_one and not field.concrete
class JoinPromoter:
"""
A class to abstract away join promotion problems for complex filter
conditions.
"""
def __init__(self, connector, num_children, negated):
self.connector = connector
self.negated = negated
if self.negated:
if connector == AND:
self.effective_connector = OR
else:
self.effective_connector = AND
else:
self.effective_connector = self.connector
self.num_children = num_children
# Maps of table alias to how many times it is seen as required for
# inner and/or outer joins.
self.votes = Counter()
def __repr__(self):
return (
f'{self.__class__.__qualname__}(connector={self.connector!r}, '
f'num_children={self.num_children!r}, negated={self.negated!r})'
)
def add_votes(self, votes):
"""
Add single vote per item to self.votes. Parameter can be any
iterable.
"""
self.votes.update(votes)
def update_join_types(self, query):
"""
Change join types so that the generated query is as efficient as
possible, but still correct. So, change as many joins as possible
to INNER, but don't make OUTER joins INNER if that could remove
results from the query.
"""
to_promote = set()
to_demote = set()
# The effective_connector is used so that NOT (a AND b) is treated
# similarly to (a OR b) for join promotion.
for table, votes in self.votes.items():
# We must use outer joins in OR case when the join isn't contained
# in all of the joins. Otherwise the INNER JOIN itself could remove
# valid results. Consider the case where a model with rel_a and
# rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
# if rel_a join doesn't produce any results is null (for example
# reverse foreign key or null value in direct foreign key), and
# there is a matching row in rel_b with col=2, then an INNER join
# to rel_a would remove a valid match from the query. So, we need
# to promote any existing INNER to LOUTER (it is possible this
# promotion in turn will be demoted later on).
if self.effective_connector == 'OR' and votes < self.num_children:
to_promote.add(table)
# If connector is AND and there is a filter that can match only
# when there is a joinable row, then use INNER. For example, in
# rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
# as join output, then the col=1 or col=2 can't match (as
# NULL=anything is always false).
# For the OR case, if all children voted for a join to be inner,
# then we can use INNER for the join. For example:
# (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
# then if rel_a doesn't produce any rows, the whole condition
# can't match. Hence we can safely use INNER join.
if self.effective_connector == 'AND' or (
self.effective_connector == 'OR' and votes == self.num_children):
to_demote.add(table)
# Finally, what happens in cases where we have:
# (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
# Now, we first generate the OR clause, and promote joins for it
# in the first if branch above. Both rel_a and rel_b are promoted
# to LOUTER joins. After that we do the AND case. The OR case
# voted no inner joins but the rel_a__col__gte=0 votes inner join
# for rel_a. We demote it back to INNER join (in AND case a single
# vote is enough). The demotion is OK, if rel_a doesn't produce
# rows, then the rel_a__col__gte=0 clause can't be true, and thus
# the whole clause must be false. So, it is safe to use INNER
# join.
# Note that in this example we could just as well have the __gte
# clause and the OR clause swapped. Or we could replace the __gte
# clause with an OR clause containing rel_a__col=1|rel_a__col=2,
# and again we could safely demote to INNER.
query.promote_joins(to_promote)
query.demote_joins(to_demote)
return to_demote
| 44.537717
| 119
| 0.608474
|
4de14b1035336d91a0ebd3534175563333337507
| 3,459
|
py
|
Python
|
main/settings.py
|
RomeoEncinares/Dog-Classifier
|
1d1c5c78a2f7d2ac1fd3d20e0413c9a0ac9b0053
|
[
"MIT"
] | null | null | null |
main/settings.py
|
RomeoEncinares/Dog-Classifier
|
1d1c5c78a2f7d2ac1fd3d20e0413c9a0ac9b0053
|
[
"MIT"
] | null | null | null |
main/settings.py
|
RomeoEncinares/Dog-Classifier
|
1d1c5c78a2f7d2ac1fd3d20e0413c9a0ac9b0053
|
[
"MIT"
] | null | null | null |
"""
Django settings for main project.
Generated by 'django-admin startproject' using Django 4.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-$kkki071jwu_wxf2d=9ccaa@x1%z(w9ml$t&m=myg*=@bhp*5-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.classifier'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'main.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'main.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
#STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 26.007519
| 91
| 0.700781
|
072a48225027dede7d3a427fdcb16b2f4aed8702
| 3,029
|
py
|
Python
|
tweetrssfeed/tweetrssfeed.py
|
gam-phon/tweetrssfeed
|
1bb21cf9bfe59af424328a221212a26814436ba4
|
[
"BSD-3-Clause"
] | null | null | null |
tweetrssfeed/tweetrssfeed.py
|
gam-phon/tweetrssfeed
|
1bb21cf9bfe59af424328a221212a26814436ba4
|
[
"BSD-3-Clause"
] | null | null | null |
tweetrssfeed/tweetrssfeed.py
|
gam-phon/tweetrssfeed
|
1bb21cf9bfe59af424328a221212a26814436ba4
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sqlite3
import tweepy
import feedparser
import bitly_api
BITLY_ACCESS_TOKEN = "BITLY_ACCESS_TOKEN"
TWITTER_CONSUMER_KEY = "TWITTER_CONSUMER_KEY"
TWITTER_CONSUMER_SECRET = "TWITTER_CONSUMER_SECRET"
TWITTER_ACCESS_TOKEN = "TWITTER_ACCESS_TOKEN"
TWITTER_ACCESS_TOKEN_SECRET = "TWITTER_ACCESS_TOKEN_SECRET"
DATABASE = "tweets.sqlite"
def check_env(*args):
for arg in args:
if arg not in os.environ:
raise ValueError("Environment variable '{}' required".format(arg))
def bitly_connection(username):
#Check Environments
env_access_token = "{}_{}".format(username, BITLY_ACCESS_TOKEN)
check_env(env_access_token)
#Get Environments
access_token = os.getenv(env_access_token)
#Access
bitly = bitly_api.Connection(access_token=access_token)
return bitly
def twitter_connection(username):
#Check Environments
env_consumer_key = "{}_{}".format(username, TWITTER_CONSUMER_KEY)
env_consumer_secret = "{}_{}".format(username, TWITTER_CONSUMER_SECRET)
env_access_token = "{}_{}".format(username, TWITTER_ACCESS_TOKEN)
env_access_token_secret = "{}_{}".format(username, TWITTER_ACCESS_TOKEN_SECRET)
check_env(env_consumer_key, env_consumer_secret, env_access_token, env_access_token_secret)
#Get Environments
consumer_key = os.getenv(env_consumer_key)
consumer_secret = os.getenv(env_consumer_secret)
access_token = os.getenv(env_access_token)
access_token_secret = os.getenv(env_access_token_secret)
#Access
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
return api
def tweet_rss(username, url, short=False, fetch=False, all=False):
#Access
api_bitly = bitly_connection(username)
api_twitter = twitter_connection(username)
#Database Connection
conn = sqlite3.connect(DATABASE)
conn.row_factory = sqlite3.Row
c = conn.cursor()
#create the table if it doesn't exist
c.execute('CREATE TABLE IF NOT EXISTS RSSContent (key TEXT, value TEXT)')
# Fetch Feed
d = feedparser.parse(url)
#print(len(d['entries']))
for entry in d['entries']:
if not all:
#check for duplicates
c.execute('select * from RSSContent where key=? AND value=?', (username, entry['link']))
if not c.fetchall():
#Tweet feeds
if not fetch:
if short:
bitly_short = api_bitly.shorten(entry['link'])
api_twitter.update_status("%s %s" % (entry['title'][:115], bitly_short['url']))
else:
api_twitter.update_status("%s %s" % (entry['title'][:115], entry['link']))
print("Tweeted \"%s\" to %s account." % (entry['title'][:115], username))
#Update database
c.execute('insert into RSSContent values (?,?)', (username, entry['link']))
conn.commit()
| 32.923913
| 100
| 0.680753
|
bddee339ecc3e19e4bc8ad7186665b32614a5c1e
| 5,265
|
py
|
Python
|
benzinga/param_check.py
|
Benzinga/benzinga-python-client
|
62fb44e90dc3e3c8224e5dc1edf3e3426a8d0654
|
[
"MIT"
] | 5
|
2020-01-10T03:39:00.000Z
|
2021-11-15T09:55:05.000Z
|
benzinga/param_check.py
|
infovisualnetwork/benzinga-python-client
|
62fb44e90dc3e3c8224e5dc1edf3e3426a8d0654
|
[
"MIT"
] | 1
|
2019-06-13T14:13:43.000Z
|
2019-06-13T14:30:08.000Z
|
benzinga/param_check.py
|
infovisualnetwork/benzinga-python-client
|
62fb44e90dc3e3c8224e5dc1edf3e3426a8d0654
|
[
"MIT"
] | 11
|
2019-06-11T19:24:51.000Z
|
2022-01-16T23:05:47.000Z
|
from .benzinga_errors import (TokenAuthenticationError, RequestAPIEndpointError, IncorrectParameterEntry,
URLIncorrectlyFormattedError,MissingParameter)
class Param_Check:
def __init__(self):
self.stri = "str"
self.inte = "int"
self.nonetype = "NoneType"
self.float = "float"
def __para_type_matching(self, param_metadata, para_dict):
for param, value in para_dict.items():
if (type(value).__name__ != param_metadata[param]) and (type(value).__name__ != self.nonetype):
raise IncorrectParameterEntry("Parameter Type for %s doesn't match: Correct Type: %s. "
"You entered %s" %
(param, param_metadata[param], type(value).__name__ ))
def calendar_check(self, dict):
param_type = {
'token': self.stri,
"page": self.inte ,
"pagesize": self.inte,
"parameters[date]": self.stri,
"parameters[date_from]": self.stri,
"parameters[date_to]": self.stri,
"parameters[tickers]": self.stri,
"parameters[importance]": self.inte,
"parameters[date_sort]": self.stri,
"parameters[updated]": self.inte,
"paramaters[dividend_yield_operation]": self.stri,
"parameters[dividend_yield]": self.float,
"parameters[action]": self.stri,
"country": self.stri,
"parameters[eps_surprise_percent]": self.stri,
"parameters[revenue_surprise_percent]": self.stri
}
self.__para_type_matching(param_type, dict)
def fundamentals_check(self, dict):
param_type = {
'apikey': self.stri,
"symbols": self.stri,
"symbol": self.stri,
"isin": self.stri,
"cik": self.stri,
"asOf": self.stri,
"period": self.stri,
"reportType": self.stri,
"token": self.stri
}
self.__para_type_matching(param_type, dict)
def delayed_quote_check(self, dict):
param_type = {
'token': self.stri,
"symbols": self.stri,
"isin": self.stri,
"cik": self.stri
}
self.__para_type_matching(param_type, dict)
def logos_check(self, dict):
param_type = {
'token': self.stri,
"symbols": self.stri,
"filters": self.stri
}
self.__para_type_matching(param_type, dict)
def instruments_check(self, dict):
param_type = {
"apikey": self.stri,
"fields": self.stri,
"query": self.stri,
"to": self.stri,
"from": self.stri,
"asOf": self.stri,
"sortfield": self.stri,
"sortdir": self.stri
}
self.__para_type_matching(param_type, dict)
def security_check(self, dict):
param_type = {
"apikey": self.stri,
"symbol": self.stri,
"cusip": self.stri
}
self.__para_type_matching(param_type, dict)
def bars_check(self, dict):
param_type = {
"token": self.stri,
"symbols": self.stri,
"from": self.stri,
"to": self.stri,
"interval": self.stri
}
self.__para_type_matching(param_type, dict)
def ticker_check(self, dict):
param_type = {
"apikey": self.stri,
"symbols": self.stri
}
self.__para_type_matching(param_type, dict)
def autocomplete_check(self, dict):
param_type = {
"token": self.stri,
"query": self.stri,
"limit": self.inte,
"searchMethod": self.stri,
"exchanges": self.stri,
"types": self.stri
}
self.__para_type_matching(param_type, dict)
def batchhistory_check(self, dict):
param_type = {
"apikey": self.stri,
"symbol": self.stri
}
self.__para_type_matching(param_type, dict)
def news_check(self, dict):
param_type = {
"token": self.stri,
"pageSize": self.inte,
"page": self.inte,
"displayOutput": self.stri,
"date": self.stri,
"dateFrom": self.stri,
"dateTo": self.stri,
"lastId": self.stri,
"updatedSince": self.stri,
"publishedSince": self.stri,
"tickers": self.stri,
"channels": self.stri,
"type": self.stri,
"limit": self.inte,
"channel": self.stri
}
self.__para_type_matching(param_type, dict)
def options_check(self, dict):
param_type = {
"token": self.stri,
"page": self.inte,
"pagesize": self.inte,
"parameters[date]": self.stri,
"parameters[date_from]": self.stri,
"parameters[date_to]": self.stri,
"parameters[tickers]": self.stri,
"parameters[updated]": self.inte
}
self.__para_type_matching(param_type, dict)
| 30.610465
| 107
| 0.524976
|
b5d063b39b71768d23c50ec91eee6a60c28ec353
| 2,720
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/discussion/django_comment_client/tests/test_models.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/discussion/django_comment_client/tests/test_models.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/discussion/django_comment_client/tests/test_models.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
"""
Tests for the django comment client integration models
"""
from django.test.testcases import TestCase
from opaque_keys.edx.keys import CourseKey
import openedx.core.djangoapps.django_comment_common.models as models
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_MODULESTORE, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import ToyCourseFactory
class RoleClassTestCase(ModuleStoreTestCase):
"""
Tests for roles of the comment client service integration
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
super().setUp()
# For course ID, syntax edx/classname/classdate is important
# because xmodel.course_module.id_to_location looks for a string to split
self.course_id = ToyCourseFactory.create().id
self.student_role = models.Role.objects.get_or_create(name="Student",
course_id=self.course_id)[0]
self.student_role.add_permission("delete_thread")
self.student_2_role = models.Role.objects.get_or_create(name="Student",
course_id=self.course_id)[0]
self.TA_role = models.Role.objects.get_or_create(name="Community TA",
course_id=self.course_id)[0]
self.course_id_2 = CourseKey.from_string("edX/6.002x/2012_Fall")
self.TA_role_2 = models.Role.objects.get_or_create(name="Community TA",
course_id=self.course_id_2)[0]
def test_has_permission(self):
# Whenever you add a permission to student_role,
# Roles with the same FORUM_ROLE in same class also receives the same
# permission.
# Is this desirable behavior?
assert self.student_role.has_permission('delete_thread')
assert self.student_2_role.has_permission('delete_thread')
assert not self.TA_role.has_permission('delete_thread')
def test_inherit_permission(self):
self.TA_role.inherit_permissions(self.student_role)
assert self.TA_role.has_permission('delete_thread')
# Despite being from 2 different courses, TA_role_2 can still inherit
# permissions from TA_role without error
self.TA_role_2.inherit_permissions(self.TA_role)
class PermissionClassTestCase(TestCase):
"""
Tests for permissions of the comment client service integration
"""
def setUp(self):
super().setUp()
self.permission = models.Permission.objects.get_or_create(name="test")[0]
def test_unicode(self):
assert str(self.permission) == 'test'
| 41.212121
| 99
| 0.669853
|
48d6cd40388ee0d51def28d8391bb071fc1b9be1
| 5,107
|
py
|
Python
|
Example/callbacks.py
|
chrhansk/BayesianRNN
|
594e59737651db2eed9ab02869b4df417d631143
|
[
"MIT"
] | null | null | null |
Example/callbacks.py
|
chrhansk/BayesianRNN
|
594e59737651db2eed9ab02869b4df417d631143
|
[
"MIT"
] | null | null | null |
Example/callbacks.py
|
chrhansk/BayesianRNN
|
594e59737651db2eed9ab02869b4df417d631143
|
[
"MIT"
] | 1
|
2020-07-31T08:28:00.000Z
|
2020-07-31T08:28:00.000Z
|
import numpy as np
from keras.callbacks import Callback
from keras import backend as K
from keras import models
class ModelTest(Callback):
''' Test model at the end of every X epochs.
The model is tested using both MC dropout and the dropout
approximation. Output metrics for various losses are supported.
# Arguments
Xt: model inputs to test.
Yt: model outputs to get accuracy / error (ground truth).
T: number of samples to use in MC dropout.
test_every_X_epochs: test every test_every_X_epochs epochs.
batch_size: number of data points to put in each batch
(often larger than training batch size).
verbose: verbosity mode, 0 or 1.
loss: a string from ['binary', 'categorical', 'euclidean']
used to calculate the testing metric.
mean_y_train: mean of outputs in regression cases to add back
to model output ('euclidean' loss).
std_y_train: std of outputs in regression cases to add back
to model output ('euclidean' loss).
# References
- [Dropout: A simple way to prevent neural networks from overfitting](http://jmlr.org/papers/v15/srivastava14a.html)
- [Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning](http://arxiv.org/abs/1506.02142)
'''
def __init__(self, Xt, Yt, T=10, test_every_X_epochs=1, batch_size=500, verbose=1,
loss=None, mean_y_train=None, std_y_train=None):
super(ModelTest, self).__init__()
self.Xt = Xt
self.Yt = np.array(Yt)
self.T = T
self.test_every_X_epochs = test_every_X_epochs
self.batch_size = batch_size
self.verbose = verbose
self.loss = loss
self.mean_y_train = mean_y_train
self.std_y_train = std_y_train
self._predict_stochastic = None
def predict_stochastic(self, X, batch_size=128, verbose=0):
'''Generate output predictions for the input samples
batch by batch, using stochastic forward passes. If
dropout is used at training, during prediction network
units will be dropped at random as well. This procedure
can be used for MC dropout (see [ModelTest callbacks](callbacks.md)).
# Arguments
X: the input data, as a numpy array.
batch_size: integer.
verbose: verbosity mode, 0 or 1.
# Returns
A numpy array of predictions.
# References
- [Dropout: A simple way to prevent neural networks from overfitting](http://jmlr.org/papers/v15/srivastava14a.html)
- [Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning](http://arxiv.org/abs/1506.02142)
'''
X = models.standardize_X(X)
if self._predict_stochastic is None: # we only get self.model after init
self._predict_stochastic = K.function([self.model.X_test], [self.model.y_train])
return self.model._predict_loop(self._predict_stochastic, X, batch_size, verbose)[0]
def on_epoch_begin(self, epoch, logs={}):
if epoch % self.test_every_X_epochs != 0:
return
model_output = self.model.predict(self.Xt, batch_size=self.batch_size,
verbose=self.verbose)
MC_model_output = []
for _ in range(self.T):
MC_model_output += [self.predict_stochastic(self.Xt,
batch_size=self.batch_size,
verbose=self.verbose)]
MC_model_output = np.array(MC_model_output)
MC_model_output_mean = np.mean(MC_model_output, 0)
if self.loss == 'binary':
standard_acc = np.mean(self.Yt == np.round(model_output.flatten()))
MC_acc = np.mean(self.Yt == np.round(MC_model_output_mean.flatten()))
print(("Standard accuracy at epoch %05d: %0.5f" % (epoch, float(standard_acc))))
print(("MC accuracy at epoch %05d: %0.5f" % (epoch, float(MC_acc))))
elif self.loss == 'categorical':
standard_acc = np.mean(np.argmax(self.Yt, axis=-1) == np.argmax(model_output, axis=-1))
MC_acc = np.mean(np.argmax(self.Yt, axis=-1) == np.argmax(MC_model_output_mean, axis=-1))
print(("Standard accuracy at epoch %05d: %0.5f" % (epoch, float(standard_acc))))
print(("MC accuracy at epoch %05d: %0.5f" % (epoch, float(MC_acc))))
elif self.loss == 'euclidean':
model_output = model_output * self.std_y_train + self.mean_y_train
standard_err = np.mean((self.Yt - model_output)**2.0, 0)**0.5
MC_model_output_mean = MC_model_output_mean * self.std_y_train + self.mean_y_train
MC_err = np.mean((self.Yt - MC_model_output_mean)**2.0, 0)**0.5
print(("Standard error at epoch %05d: %0.5f" % (epoch, float(standard_err))))
print(("MC error at epoch %05d: %0.5f" % (epoch, float(MC_err))))
else:
raise Exception('No loss: ' + loss)
| 50.068627
| 133
| 0.628941
|
f2980776389746a98319b28669546605a63c76e7
| 3,343
|
py
|
Python
|
torchbench/semantic_segmentation/camvid.py
|
xvdp/torchbench
|
cf41be6be15b97ee8d82567d815bf8dafe5a0206
|
[
"Apache-2.0"
] | null | null | null |
torchbench/semantic_segmentation/camvid.py
|
xvdp/torchbench
|
cf41be6be15b97ee8d82567d815bf8dafe5a0206
|
[
"Apache-2.0"
] | null | null | null |
torchbench/semantic_segmentation/camvid.py
|
xvdp/torchbench
|
cf41be6be15b97ee8d82567d815bf8dafe5a0206
|
[
"Apache-2.0"
] | null | null | null |
from torch.utils.data import DataLoader
from sotabenchapi.core import BenchmarkResult
from torchbench.datasets import CamVid
from torchbench.utils import default_data_to_device, send_model_to_device
from torchbench.semantic_segmentation.transforms import (
Normalize,
ToTensor,
Compose,
)
from torchbench.semantic_segmentation.utils import (
default_seg_collate_fn,
default_seg_output_transform,
evaluate_segmentation,
)
class CamVid:
dataset = CamVid
normalize = Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
transforms = Compose([ToTensor(), normalize])
send_data_to_device = default_data_to_device
collate_fn = default_seg_collate_fn
model_output_transform = default_seg_output_transform
task = "Semantic Segmentation"
@classmethod
def benchmark(
cls,
model,
model_description=None,
input_transform=None,
target_transform=None,
transforms=None,
model_output_transform=None,
collate_fn=None,
send_data_to_device=None,
device: str = "cuda",
data_root: str = "./.data/vision/camvid",
num_workers: int = 4,
batch_size: int = 32,
num_gpu: int = 1,
paper_model_name: str = None,
paper_arxiv_id: str = None,
paper_pwc_id: str = None,
paper_results: dict = None,
pytorch_hub_url: str = None,
force: bool = False
) -> BenchmarkResult:
config = locals()
model, device = send_model_to_device(
model, device=device, num_gpu=num_gpu
)
model.eval()
if not input_transform or target_transform or transforms:
transforms = cls.transforms
if not model_output_transform:
model_output_transform = cls.model_output_transform
if not send_data_to_device:
send_data_to_device = cls.send_data_to_device
if not collate_fn:
collate_fn = cls.collate_fn
test_dataset = cls.dataset(
root=data_root,
split="val",
transform=input_transform,
target_transform=target_transform,
transforms=transforms,
)
test_loader = DataLoader(
test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True,
collate_fn=collate_fn,
)
test_loader.no_classes = 12 # Number of classes for CamVid
test_results, speed_mem_metrics, run_hash = evaluate_segmentation(
model=model,
test_loader=test_loader,
model_output_transform=model_output_transform,
send_data_to_device=send_data_to_device,
device=device,
force=force
)
print(test_results)
return BenchmarkResult(
task=cls.task,
config=config,
dataset=cls.dataset.__name__,
results=test_results,
speed_mem_metrics=speed_mem_metrics,
pytorch_hub_id=pytorch_hub_url,
model=paper_model_name,
model_description=model_description,
arxiv_id=paper_arxiv_id,
pwc_id=paper_pwc_id,
paper_results=paper_results,
)
| 29.584071
| 74
| 0.63117
|
e981ab7d535dc27a6a08faad856499422a942877
| 1,068
|
py
|
Python
|
model-optimizer/extensions/front/tf/bucketize_ext.py
|
calvinfeng/openvino
|
11f591c16852637506b1b40d083b450e56d0c8ac
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/extensions/front/tf/bucketize_ext.py
|
calvinfeng/openvino
|
11f591c16852637506b1b40d083b450e56d0c8ac
|
[
"Apache-2.0"
] | 19
|
2021-03-26T08:11:00.000Z
|
2022-02-21T13:06:26.000Z
|
model-optimizer/extensions/front/tf/bucketize_ext.py
|
calvinfeng/openvino
|
11f591c16852637506b1b40d083b450e56d0c8ac
|
[
"Apache-2.0"
] | 1
|
2021-07-28T17:30:46.000Z
|
2021-07-28T17:30:46.000Z
|
"""
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from extensions.ops.bucketize import Bucketize
from mo.front.extractor import FrontExtractorOp
class BucketizeFrontExtractor(FrontExtractorOp):
op = 'Bucketize'
enabled = True
@classmethod
def extract(cls, node):
boundaries = np.array(node.pb.attr['boundaries'].list.f, dtype=np.float)
Bucketize.update_node_stat(node, {'boundaries': boundaries, 'with_right_bound': False, 'output_type': np.int32})
return cls.enabled
| 33.375
| 120
| 0.749064
|
65f23c8bcca0213c673a670e1ff6677c65044d97
| 40,664
|
py
|
Python
|
test/functional/feature_segwit.py
|
bugls/worldcoin
|
214767f3f2aaea7312bccc6593ad4dce10447576
|
[
"MIT"
] | 3
|
2021-11-09T00:54:55.000Z
|
2022-01-14T04:28:47.000Z
|
test/functional/feature_segwit.py
|
bugls/worldcoin
|
214767f3f2aaea7312bccc6593ad4dce10447576
|
[
"MIT"
] | 1
|
2021-10-31T18:00:16.000Z
|
2021-10-31T18:00:16.000Z
|
test/functional/feature_segwit.py
|
bugls/worldcoin
|
214767f3f2aaea7312bccc6593ad4dce10447576
|
[
"MIT"
] | 2
|
2021-03-29T14:53:31.000Z
|
2022-02-06T22:17:04.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Worldcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the SegWit changeover logic."""
from decimal import Decimal
from test_framework.address import (
key_to_p2pkh,
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
program_to_witness,
script_to_p2sh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from test_framework.blocktools import witness_script, send_to_witness
from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, sha256, ToHex
from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG, OP_TRUE, OP_DROP
from test_framework.test_framework import WorldcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, bytes_to_hex_str, connect_nodes, hex_str_to_bytes, sync_blocks, try_rpc
from io import BytesIO
NODE_0 = 0
NODE_2 = 2
WIT_V0 = 0
WIT_V1 = 1
def getutxo(txid):
utxo = {}
utxo["vout"] = 0
utxo["txid"] = txid
return utxo
def find_spendable_utxo(node, min_value):
for utxo in node.listunspent(query_options={'minimumAmount': min_value}):
if utxo['spendable']:
return utxo
raise AssertionError("Unspent output equal or higher than %s not found" % min_value)
class SegWitTest(WorldcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [
[
"-rpcserialversion=0",
"-vbparams=segwit:0:999999999999",
"-addresstype=legacy",
"-deprecatedrpc=addwitnessaddress",
],
[
"-blockversion=4",
"-rpcserialversion=1",
"-vbparams=segwit:0:999999999999",
"-addresstype=legacy",
"-deprecatedrpc=addwitnessaddress",
],
[
"-blockversion=536870915",
"-vbparams=segwit:0:999999999999",
"-addresstype=legacy",
"-deprecatedrpc=addwitnessaddress",
],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
super().setup_network()
connect_nodes(self.nodes[0], 2)
self.sync_all()
def success_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 2)
sync_blocks(self.nodes)
def skip_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 1)
sync_blocks(self.nodes)
def fail_accept(self, node, error_msg, txid, sign, redeem_script=""):
assert_raises_rpc_error(-26, error_msg, send_to_witness, use_p2wsh=1, node=node, utxo=getutxo(txid), pubkey=self.pubkey[0], encode_p2sh=False, amount=Decimal("49.998"), sign=sign, insert_redeem_script=redeem_script)
def run_test(self):
self.nodes[0].generate(161) #block 161
self.log.info("Verify sigops are counted in GBT with pre-BIP141 rules before the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({})
assert(tmpl['sizelimit'] == 1000000)
assert('weightlimit' not in tmpl)
assert(tmpl['sigoplimit'] == 20000)
assert(tmpl['transactions'][0]['hash'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 2)
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
assert(tmpl['sizelimit'] == 1000000)
assert('weightlimit' not in tmpl)
assert(tmpl['sigoplimit'] == 20000)
assert(tmpl['transactions'][0]['hash'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 2)
self.nodes[0].generate(1) #block 162
balance_presetup = self.nodes[0].getbalance()
self.pubkey = []
p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh
wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness
for i in range(3):
newaddress = self.nodes[i].getnewaddress()
self.pubkey.append(self.nodes[i].getaddressinfo(newaddress)["pubkey"])
multiscript = CScript([OP_1, hex_str_to_bytes(self.pubkey[-1]), OP_1, OP_CHECKMULTISIG])
p2sh_addr = self.nodes[i].addwitnessaddress(newaddress)
bip173_addr = self.nodes[i].addwitnessaddress(newaddress, False)
p2sh_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'p2sh-segwit')['address']
bip173_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'bech32')['address']
assert_equal(p2sh_addr, key_to_p2sh_p2wpkh(self.pubkey[-1]))
assert_equal(bip173_addr, key_to_p2wpkh(self.pubkey[-1]))
assert_equal(p2sh_ms_addr, script_to_p2sh_p2wsh(multiscript))
assert_equal(bip173_ms_addr, script_to_p2wsh(multiscript))
p2sh_ids.append([])
wit_ids.append([])
for v in range(2):
p2sh_ids[i].append([])
wit_ids[i].append([])
for i in range(5):
for n in range(3):
for v in range(2):
wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999")))
p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999")))
self.nodes[0].generate(1) #block 163
sync_blocks(self.nodes)
# Make sure all nodes recognize the transactions as theirs
assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*50 + 20*Decimal("49.999") + 50)
assert_equal(self.nodes[1].getbalance(), 20*Decimal("49.999"))
assert_equal(self.nodes[2].getbalance(), 20*Decimal("49.999"))
self.nodes[0].generate(260) #block 423
sync_blocks(self.nodes)
self.log.info("Verify witness txs are skipped for mining before the fork")
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) #block 426
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) #block 427
self.log.info("Verify unsigned p2sh witness txs without a redeem script are invalid")
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V0][1], False)
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V1][1], False)
self.nodes[2].generate(4) # blocks 428-431
self.log.info("Verify previous witness txs skipped for mining can now be mined")
assert_equal(len(self.nodes[2].getrawmempool()), 4)
block = self.nodes[2].generate(1) #block 432 (first block with new rules; 432 = 144 * 3)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[2].getrawmempool()), 0)
segwit_tx_list = self.nodes[2].getblock(block[0])["tx"]
assert_equal(len(segwit_tx_list), 5)
self.log.info("Verify default node can't accept txs with missing witness")
# unsigned, no scriptsig
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V1][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False)
# unsigned with redeem script
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False, witness_script(False, self.pubkey[0]))
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False, witness_script(True, self.pubkey[0]))
self.log.info("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag")
assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False))
assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False))
for i in range(len(segwit_tx_list)):
tx = FromHex(CTransaction(), self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[2].getrawtransaction(segwit_tx_list[i]) != self.nodes[0].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i], 0) == self.nodes[2].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) != self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i]) == self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) == bytes_to_hex_str(tx.serialize_without_witness()))
self.log.info("Verify witness txs without witness data are invalid after the fork")
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program hash mismatch) (code 64)', wit_ids[NODE_2][WIT_V0][2], sign=False)
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program was passed an empty witness) (code 64)', wit_ids[NODE_2][WIT_V1][2], sign=False)
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program hash mismatch) (code 64)', p2sh_ids[NODE_2][WIT_V0][2], sign=False, redeem_script=witness_script(False, self.pubkey[2]))
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program was passed an empty witness) (code 64)', p2sh_ids[NODE_2][WIT_V1][2], sign=False, redeem_script=witness_script(True, self.pubkey[2]))
self.log.info("Verify default node can now use witness txs")
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) #block 432
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) #block 433
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435
self.log.info("Verify sigops are counted in GBT with BIP141 rules after the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
assert(tmpl['sizelimit'] >= 3999577) # actual maximum size is lower due to minimum mandatory non-witness data
assert(tmpl['weightlimit'] == 4000000)
assert(tmpl['sigoplimit'] == 80000)
assert(tmpl['transactions'][0]['txid'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 8)
self.nodes[0].generate(1) # Mine a block to clear the gbt cache
self.log.info("Non-segwit miners are able to use GBT response after activation.")
# Create a 3-tx chain: tx1 (non-segwit input, paying to a segwit output) ->
# tx2 (segwit input, paying to a non-segwit output) ->
# tx3 (non-segwit input, paying to a non-segwit output).
# tx1 is allowed to appear in the block, but no others.
txid1 = send_to_witness(1, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.996"))
hex_tx = self.nodes[0].gettransaction(txid)['hex']
tx = FromHex(CTransaction(), hex_tx)
assert(tx.wit.is_null()) # This should not be a segwit input
assert(txid1 in self.nodes[0].getrawmempool())
# Now create tx2, which will spend from txid1.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b''))
tx.vout.append(CTxOut(int(49.99 * COIN), CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx2_hex = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))['hex']
txid2 = self.nodes[0].sendrawtransaction(tx2_hex)
tx = FromHex(CTransaction(), tx2_hex)
assert(not tx.wit.is_null())
# Now create tx3, which will spend from txid2
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b""))
tx.vout.append(CTxOut(int(49.95 * COIN), CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) # Huge fee
tx.calc_sha256()
txid3 = self.nodes[0].sendrawtransaction(ToHex(tx))
assert(tx.wit.is_null())
assert(txid3 in self.nodes[0].getrawmempool())
# Now try calling getblocktemplate() without segwit support.
template = self.nodes[0].getblocktemplate()
# Check that tx1 is the only transaction of the 3 in the template.
template_txids = [ t['txid'] for t in template['transactions'] ]
assert(txid2 not in template_txids and txid3 not in template_txids)
assert(txid1 in template_txids)
# Check that running with segwit support results in all 3 being included.
template = self.nodes[0].getblocktemplate({"rules": ["segwit"]})
template_txids = [ t['txid'] for t in template['transactions'] ]
assert(txid1 in template_txids)
assert(txid2 in template_txids)
assert(txid3 in template_txids)
# Check that wtxid is properly reported in mempool entry
assert_equal(int(self.nodes[0].getmempoolentry(txid3)["wtxid"], 16), tx.calc_sha256(True))
# Mine a block to clear the gbt cache again.
self.nodes[0].generate(1)
self.log.info("Verify behaviour of importaddress, addwitnessaddress and listunspent")
# Some public keys to be used later
pubkeys = [
"0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb
"02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97
"04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV
"02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd
"036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66
"0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K
"0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ
]
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn")
uncompressed_spendable_address = ["mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu"]
self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR")
compressed_spendable_address = ["mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe"]
assert ((self.nodes[0].getaddressinfo(uncompressed_spendable_address[0])['iscompressed'] == False))
assert ((self.nodes[0].getaddressinfo(compressed_spendable_address[0])['iscompressed'] == True))
self.nodes[0].importpubkey(pubkeys[0])
compressed_solvable_address = [key_to_p2pkh(pubkeys[0])]
self.nodes[0].importpubkey(pubkeys[1])
compressed_solvable_address.append(key_to_p2pkh(pubkeys[1]))
self.nodes[0].importpubkey(pubkeys[2])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])]
spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress
spendable_after_importaddress = [] # These outputs should be seen after importaddress
solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable
unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress
solvable_anytime = [] # These outputs should be solvable after importpubkey
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address'])
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]])['address'])
unknown_address = ["mtKKyoHabkk6e4ppT7NaM7THqPUt7AzPrT", "2NDP3jLWAFT8NDAiUa9qiE6oBt2awmMq7Dx"]
# Test multisig_without_privkey
# We have 2 public keys without private keys, use addmultisigaddress to add to wallet.
# Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address.
multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])['address']
script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG])
solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL]))
for i in compressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# p2sh multisig with compressed keys should always be spendable
spendable_anytime.extend([p2sh])
# bare multisig can be watched and signed, but is not treated as ours
solvable_after_importaddress.extend([bare])
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with compressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
# P2WPKH and P2SH_P2WPKH with compressed keys should always be spendable
spendable_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in uncompressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# p2sh multisig with uncompressed keys should always be spendable
spendable_anytime.extend([p2sh])
# bare multisig can be watched and signed, but is not treated as ours
solvable_after_importaddress.extend([bare])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# Witness output types with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in compressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
# Multisig without private is not seen after addmultisigaddress, but seen after importaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH, P2PK, P2WPKH and P2SH_P2WPKH with compressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk, p2wpkh, p2sh_p2wpkh])
# P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress
solvable_after_importaddress.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# Witness output types with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
op1 = CScript([OP_1])
op0 = CScript([OP_0])
# 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V
unsolvable_address = ["mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V", "2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe", script_to_p2sh(op1), script_to_p2sh(op0)]
unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D")
unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG])
unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)])
p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL])
p2wshop1 = CScript([OP_0, sha256(op1)])
unsolvable_after_importaddress.append(unsolvablep2pkh)
unsolvable_after_importaddress.append(unsolvablep2wshp2pkh)
unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script
unsolvable_after_importaddress.append(p2wshop1)
unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided
unsolvable_after_importaddress.append(p2shop0)
spendable_txid = []
solvable_txid = []
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1))
self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0)
importlist = []
for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
bare = hex_str_to_bytes(v['hex'])
importlist.append(bytes_to_hex_str(bare))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)])))
else:
pubkey = hex_str_to_bytes(v['pubkey'])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
importlist.append(bytes_to_hex_str(p2pk))
importlist.append(bytes_to_hex_str(p2pkh))
importlist.append(bytes_to_hex_str(CScript([OP_0, hash160(pubkey)])))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pk)])))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pkh)])))
importlist.append(bytes_to_hex_str(unsolvablep2pkh))
importlist.append(bytes_to_hex_str(unsolvablep2wshp2pkh))
importlist.append(bytes_to_hex_str(op1))
importlist.append(bytes_to_hex_str(p2wshop1))
for i in importlist:
# import all generated addresses. The wallet already has the private keys for some of these, so catch JSON RPC
# exceptions and continue.
try_rpc(-4, "The wallet already contains the private key for this address or script", self.nodes[0].importaddress, i, "", False, True)
self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only
self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used
# note that no witness address should be returned by unsolvable addresses
for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address:
assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
# addwitnessaddress should return a witness addresses even if keys are not in the wallet
self.nodes[0].addwitnessaddress(multisig_without_privkey_address)
for i in compressed_spendable_address + compressed_solvable_address:
witaddress = self.nodes[0].addwitnessaddress(i)
# addwitnessaddress should return the same address if it is a known P2SH-witness address
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# Repeat some tests. This time we don't add witness scripts with importaddress
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH")
uncompressed_spendable_address = ["mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi"]
self.nodes[0].importprivkey("cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw")
compressed_spendable_address = ["n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL"]
self.nodes[0].importpubkey(pubkeys[5])
compressed_solvable_address = [key_to_p2pkh(pubkeys[5])]
self.nodes[0].importpubkey(pubkeys[6])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])]
spendable_after_addwitnessaddress = [] # These outputs should be seen after importaddress
solvable_after_addwitnessaddress=[] # These outputs should be seen after importaddress but not spendable
unseen_anytime = [] # These outputs should never be seen
solvable_anytime = [] # These outputs should be solvable after importpubkey
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address'])
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address'])
premature_witaddress = []
for i in compressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after addwitnessaddress
spendable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH are always spendable
spendable_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in uncompressed_spendable_address + uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in compressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
# P2WSH multisig without private key are seen after addwitnessaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2SH_P2PK, P2SH_P2PKH with compressed keys are always solvable
solvable_anytime.extend([p2wpkh, p2sh_p2wpkh])
self.mine_and_test_listunspent(spendable_anytime, 2)
self.mine_and_test_listunspent(solvable_anytime, 1)
self.mine_and_test_listunspent(spendable_after_addwitnessaddress + solvable_after_addwitnessaddress + unseen_anytime, 0)
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used
# note that a multisig address returned by addmultisigaddress is not solvable until it is added with importaddress
# premature_witaddress are not accepted until the script is added with addwitnessaddress first
for i in uncompressed_spendable_address + uncompressed_solvable_address + premature_witaddress:
# This will raise an exception
assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
# after importaddress it should pass addwitnessaddress
v = self.nodes[0].getaddressinfo(compressed_solvable_address[1])
self.nodes[0].importaddress(v['hex'],"",False,True)
for i in compressed_spendable_address + compressed_solvable_address + premature_witaddress:
witaddress = self.nodes[0].addwitnessaddress(i)
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
spendable_txid.append(self.mine_and_test_listunspent(spendable_after_addwitnessaddress + spendable_anytime, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_after_addwitnessaddress + solvable_anytime, 1))
self.mine_and_test_listunspent(unseen_anytime, 0)
# Check that createrawtransaction/decoderawtransaction with non-v0 Bech32 works
v1_addr = program_to_witness(1, [3,5])
v1_tx = self.nodes[0].createrawtransaction([getutxo(spendable_txid[0])],{v1_addr: 1})
v1_decoded = self.nodes[1].decoderawtransaction(v1_tx)
assert_equal(v1_decoded['vout'][0]['scriptPubKey']['addresses'][0], v1_addr)
assert_equal(v1_decoded['vout'][0]['scriptPubKey']['hex'], "51020305")
# Check that spendable outputs are really spendable
self.create_and_mine_tx_from_txids(spendable_txid)
# import all the private keys so solvable addresses become spendable
self.nodes[0].importprivkey("cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb")
self.nodes[0].importprivkey("cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97")
self.nodes[0].importprivkey("91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV")
self.nodes[0].importprivkey("cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd")
self.nodes[0].importprivkey("cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66")
self.nodes[0].importprivkey("cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K")
self.create_and_mine_tx_from_txids(solvable_txid)
# Test that importing native P2WPKH/P2WSH scripts works
for use_p2wsh in [False, True]:
if use_p2wsh:
scriptPubKey = "00203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a"
transaction = "01000000000100e1f505000000002200203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a00000000"
else:
scriptPubKey = "a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d87"
transaction = "01000000000100e1f5050000000017a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d8700000000"
self.nodes[1].importaddress(scriptPubKey, "", False)
rawtxfund = self.nodes[1].fundrawtransaction(transaction)['hex']
rawtxfund = self.nodes[1].signrawtransactionwithwallet(rawtxfund)["hex"]
txid = self.nodes[1].sendrawtransaction(rawtxfund)
assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid)
assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid)
# Assert it is properly saved
self.stop_node(1)
self.start_node(1)
assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid)
assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid)
def mine_and_test_listunspent(self, script_list, ismine):
utxo = find_spendable_utxo(self.nodes[0], 50)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int('0x'+utxo['txid'],0), utxo['vout'])))
for i in script_list:
tx.vout.append(CTxOut(10000000, i))
tx.rehash()
signresults = self.nodes[0].signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
txid = self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
watchcount = 0
spendcount = 0
for i in self.nodes[0].listunspent():
if (i['txid'] == txid):
watchcount += 1
if (i['spendable'] == True):
spendcount += 1
if (ismine == 2):
assert_equal(spendcount, len(script_list))
elif (ismine == 1):
assert_equal(watchcount, len(script_list))
assert_equal(spendcount, 0)
else:
assert_equal(watchcount, 0)
return txid
def p2sh_address_to_script(self,v):
bare = CScript(hex_str_to_bytes(v['hex']))
p2sh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2wsh = CScript([OP_0, sha256(bare)])
p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL])
return([bare, p2sh, p2wsh, p2sh_p2wsh])
def p2pkh_address_to_script(self,v):
pubkey = hex_str_to_bytes(v['pubkey'])
p2wpkh = CScript([OP_0, hash160(pubkey)])
p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL])
p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL])
p2wsh_p2pk = CScript([OP_0, sha256(p2pk)])
p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)])
p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL])
p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL])
return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]
def create_and_mine_tx_from_txids(self, txids, success = True):
tx = CTransaction()
for i in txids:
txtmp = CTransaction()
txraw = self.nodes[0].getrawtransaction(i)
f = BytesIO(hex_str_to_bytes(txraw))
txtmp.deserialize(f)
for j in range(len(txtmp.vout)):
tx.vin.append(CTxIn(COutPoint(int('0x'+i,0), j)))
tx.vout.append(CTxOut(0, CScript()))
tx.rehash()
signresults = self.nodes[0].signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
if __name__ == '__main__':
SegWitTest().main()
| 62.272588
| 224
| 0.688299
|
6ac3e01264c7cd59a4d56a205c71b7c01aeb2680
| 7,357
|
py
|
Python
|
src/habitat_sim2real/sims/ros/rosrobot_sim.py
|
WGW101/habitat_sim2real
|
04be5215af53122f166d6dfdfb131af3619d10a0
|
[
"MIT"
] | null | null | null |
src/habitat_sim2real/sims/ros/rosrobot_sim.py
|
WGW101/habitat_sim2real
|
04be5215af53122f166d6dfdfb131af3619d10a0
|
[
"MIT"
] | null | null | null |
src/habitat_sim2real/sims/ros/rosrobot_sim.py
|
WGW101/habitat_sim2real
|
04be5215af53122f166d6dfdfb131af3619d10a0
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import quaternion as quat
import habitat
from habitat.core.simulator import Simulator, RGBSensor, DepthSensor, SensorSuite
from gym import spaces
from .intf_node import HabitatInterfaceROSNode
class ROSDepthSensor(DepthSensor):
def _get_observation_space(self, *args, **kwargs):
return spaces.Box(low=0 if self.config.NORMALIZE_DEPTH else self.config.MIN_DEPTH,
high=1 if self.config.NORMALIZE_DEPTH else self.config.MAX_DEPTH,
shape=(self.config.HEIGHT, self.config.WIDTH, 1),
dtype=np.float32)
def get_observation(self, sim_obs):
out = cv2.resize(sim_obs[1], (self.config.WIDTH, self.config.HEIGHT))
out = out.astype(np.float32) * 0.001
out = np.clip(out, self.config.MIN_DEPTH, self.config.MAX_DEPTH)
if self.config.NORMALIZE_DEPTH:
out = (out - self.config.MIN_DEPTH) \
/ (self.config.MAX_DEPTH - self.config.MIN_DEPTH)
return out[:, :, np.newaxis]
class ROSRGBSensor(RGBSensor):
def _get_observation_space(self, *args, **kwargs):
return spaces.Box(low=0,
high=255,
shape=(self.config.HEIGHT, self.config.WIDTH, 3),
dtype=np.uint8)
def get_observation(self, sim_obs):
out = cv2.resize(sim_obs[0], (self.config.WIDTH, self.config.HEIGHT))
return out
class AgentState:
def __init__(self, p, q):
self.position = np.array(p)
self.rotation = quat.quaternion(q[3], *q[:3])
def __repr__(self):
return "AgentState(position={}, rotation={})".format(self.position, self.rotation)
class DummyROSAgent:
def __init__(self, state):
self.state = state
class DummyROSPathfinder:
def __init__(self, intf_node):
self._intf_node = intf_node
self._bounds = None
self._topdown = None
def get_bounds(self):
if self._bounds is None:
self._bounds = self._intf_node.get_map_bounds()
return self._bounds
def get_topdown_view(self, *args, **kwargs):
if self._topdown is None:
thresh = self._intf_node.cfg.MAP_FREE_THRESH
grid = self._intf_node.get_map_grid()
self._topdown = (grid < thresh) & (grid > -1)
return self._topdown
@habitat.registry.register_simulator(name="ROS-Robot-v0")
class ROSRobot(Simulator):
def __init__(self, config):
self.habitat_config = config
self.intf_node = HabitatInterfaceROSNode(config.ROS)
self.pathfinder = DummyROSPathfinder(self.intf_node)
self.cur_camera_tilt = 0
self._sensor_suite = SensorSuite([ROSRGBSensor(config=config.RGB_SENSOR),
ROSDepthSensor(config=config.DEPTH_SENSOR)])
if config.ACTION_SPACE_CONFIG == "v0":
self._action_space = spaces.Discrete(4)
else: # v1 or pyrobotnoisy
self._action_space = spaces.Discrete(6)
self.has_published_goal = False
self.previous_step_collided = False
@property
def sensor_suite(self):
return self._sensor_suite
@property
def action_space(self):
return self._action_space
def reconfigure(self, config):
self.habitat_config = config
def reset(self):
self.has_published_goal = False
ag_cfg = getattr(self.habitat_config,
self.habitat_config.AGENTS[self.habitat_config.DEFAULT_AGENT_ID])
if ag_cfg.IS_SET_START_STATE:
pos = np.array(ag_cfg.START_POSITION)
rot = quat.quaternion(ag_cfg.START_ROTATION[3], *ag_cfg.START_ROTATION[:3])
state = self.get_agent_state()
if not (np.allclose(pos, state.position)
and quat.isclose(rot, state.rotation)):
self.intf_node.cancel_move_on_bump = False
self.intf_node.move_to_absolute(ag_cfg.START_POSITION, ag_cfg.START_ROTATION)
self.intf_node.cancel_move_on_bump = True
self.intf_node.set_camera_tilt(self.habitat_config.RGB_SENSOR.ORIENTATION[0])
self.intf_node.clear_collided()
self.previous_step_collided = False
raw_images = self.intf_node.get_raw_images()
return self._sensor_suite.get_observations(raw_images)
def step(self, action):
if action == 0: # STOP
pass
elif action == 1: # MOVE_FORWARD
self.intf_node.move_to_relative(self.habitat_config.FORWARD_STEP_SIZE, 0)
elif action == 2: # TURN_LEFT
self.intf_node.move_to_relative(0, np.radians(self.habitat_config.TURN_ANGLE))
elif action == 3: # TURN_RIGHT
self.intf_node.move_to_relative(0, -np.radians(self.habitat_config.TURN_ANGLE))
elif action == 4: # LOOK_UP
self.cur_camera_tilt -= self.habitat_config.TILT_ANGLE
self.cur_camera_tilt = max(-45, min(self.cur_camera_tilt, 45))
self.intf_node.set_camera_tilt(np.radians(self.cur_camera_tilt))
elif action == 5: # LOOK_DOWN
self.cur_camera_tilt += self.habitat_config.TILT_ANGLE
self.cur_camera_tilt = max(-45, min(self.cur_camera_tilt, 45))
self.intf_node.set_camera_tilt(np.radians(self.cur_camera_tilt))
has_collided = self.intf_node.has_collided()
if not self.previous_step_collided and has_collided:
self.intf_node.clear_collided()
self.previous_step_collided = True
raw_images = self.intf_node.get_raw_images()
return self._sensor_suite.get_observations(raw_images)
def get_observations_at(self, position=None, rotation=None, keep_agent_at_new_pose=False):
if position is None and rotation is None:
raw_images = self.intf_node.get_raw_images()
return self._sensor_suite.get_observations(raw_images)
else:
raise RuntimeError("Can only query observations for current pose on a real robot.")
def get_agent(self, agent_id=0):
return DummyROSAgent(self.get_agent_state())
def get_agent_state(self, agent_id=0):
p, q = self.intf_node.get_robot_pose()
return AgentState(p, q)
def geodesic_distance(self, src, destinations, episode=None):
try:
iter(destinations[0])
except TypeError:
destinations = [destinations]
# Kinda hacky... Sim has no way to know the goal when a new episode starts
# But the first call to geodesic_distance is for the distance_to_goal measure...
if not self.has_published_goal:
self.publish_episode_goal(destinations[0])
self.has_published_goal = True
return min(self.intf_node.get_distance(src, dst) for dst in destinations)
def sample_navigable_point(self):
return self.intf_node.sample_free_point()
def get_topdown_map(self):
return self.intf_node.get_map_grid()
def seed(self, seed):
self.intf_node.seed_rng(seed)
@property
def up_vector(self):
return np.array([0.0, 1.0, 0.0])
@property
def forward_vector(self):
return np.array([0.0, 0.0, -1.0])
def publish_episode_goal(self, goal_pos):
self.intf_node.publish_episode_goal(goal_pos)
| 38.317708
| 95
| 0.651896
|
7cece274eced4635192b66f3fec43f31dd269b95
| 777
|
py
|
Python
|
test/test_sphering.py
|
exepulveda/pymgt
|
c49a1de3d45b82ce8dd659693e3c41fc4c967b24
|
[
"MIT"
] | 1
|
2021-07-10T00:58:30.000Z
|
2021-07-10T00:58:30.000Z
|
test/test_sphering.py
|
exepulveda/pymgt
|
c49a1de3d45b82ce8dd659693e3c41fc4c967b24
|
[
"MIT"
] | null | null | null |
test/test_sphering.py
|
exepulveda/pymgt
|
c49a1de3d45b82ce8dd659693e3c41fc4c967b24
|
[
"MIT"
] | null | null | null |
import numpy as np
from pymgt import *
from test_utils import *
def test_simple():
sample_size = 10000
ndim = 5
t = SpheringTransform(name='sphering')
x = np.random.uniform(10.0, 20.0, size=(sample_size, ndim))
y = t.fit_transform(x)
assert_reversibility(t, x)
def test_states():
sample_size = 1000
ndim = 4
t = SpheringTransform(name='sphering')
x = np.random.uniform(-20.0, -10.0, size=(sample_size, ndim))
y, x_back = assert_reversibility(t, x)
state = t.state
# build new object
t = SpheringTransform(name='sphering')
t.state = state
y2 = t.transform(x)
x_back2 = t.inverse_transform(y2)
np.testing.assert_array_almost_equal(y, y2)
np.testing.assert_array_almost_equal(x_back, x_back2)
| 21.583333
| 65
| 0.666667
|
9421dbcce2d7beabaf59b4325be569e8e680769c
| 1,695
|
py
|
Python
|
train.py
|
sotirioszikas/Image-Classifier-Udacity-2nd-Project
|
74ef6490b642770ab37cf7d534c0b83a5f7f1606
|
[
"MIT"
] | null | null | null |
train.py
|
sotirioszikas/Image-Classifier-Udacity-2nd-Project
|
74ef6490b642770ab37cf7d534c0b83a5f7f1606
|
[
"MIT"
] | null | null | null |
train.py
|
sotirioszikas/Image-Classifier-Udacity-2nd-Project
|
74ef6490b642770ab37cf7d534c0b83a5f7f1606
|
[
"MIT"
] | null | null | null |
#Imports
import torch
from torch import nn, optim, tensor
import torch.nn.functional as F
from torchvision import datasets, transforms, models
import matplotlib.pyplot as plt
import json
import numpy as np
from collections import OrderedDict
from PIL import Image
from torch.optim import lr_scheduler
import os
import copy
import argparse
import base #imports the functions in base.py in order to use them here.
ap = argparse.ArgumentParser(description='train.py')
ap.add_argument('--gpu', dest = "gpu", action = "store", default = "gpu")
ap.add_argument('--save_dir', dest = "save_dir", action = "store", default = "/home/workspace/ImageClassifier/checkpoint.pth")
ap.add_argument('--learning_rate', dest = "learning_rate", action = "store", default = 0.001)
ap.add_argument('--dropout', dest = "dropout", action = "store", default = 0.5)
ap.add_argument('--epochs', dest = "epochs", action = "store", type = int, default = 6)
ap.add_argument('--arch', dest = "arch", action = "store", default = "vgg19", type = str)
ap.add_argument('--hidden', type = int, dest = "hidden", action = "store", default = 4096)
inputs = ap.parse_args()
path = inputs.save_dir
lr = inputs.learning_rate
net = inputs.arch
p = inputs.dropout
hidden = inputs.hidden
device = inputs.gpu
epochs = inputs.epochs
dataloaders,image_datasets = base.load_data()
model, criterion, optimizer, scheduler = base.build_nn(p, hidden, lr, device, net)
model = base.train_model(model, criterion, optimizer, scheduler, epochs, dataloaders, image_datasets, device = 'cuda')
base.save_checkpoint(image_datasets, optimizer, p, hidden, lr, epochs, path, model)
print("----Done! :) The model has been trained and saved!----\n")
| 36.06383
| 126
| 0.733333
|
422586408d6dbf2753cd617d82bb6a0852f5793a
| 14,141
|
py
|
Python
|
b2sdk/account_info/sqlite_account_info.py
|
phistrom/b2-sdk-python
|
b103939cc19020a5f7cae7a6e26f1073b5ba2232
|
[
"MIT"
] | null | null | null |
b2sdk/account_info/sqlite_account_info.py
|
phistrom/b2-sdk-python
|
b103939cc19020a5f7cae7a6e26f1073b5ba2232
|
[
"MIT"
] | null | null | null |
b2sdk/account_info/sqlite_account_info.py
|
phistrom/b2-sdk-python
|
b103939cc19020a5f7cae7a6e26f1073b5ba2232
|
[
"MIT"
] | null | null | null |
######################################################################
#
# File: b2sdk/account_info/sqlite_account_info.py
#
# Copyright 2019 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
import json
import logging
import os
import stat
import threading
from .exception import (CorruptAccountInfo, MissingAccountData)
from .upload_url_pool import UrlPoolAccountInfo
import sqlite3
logger = logging.getLogger(__name__)
B2_ACCOUNT_INFO_ENV_VAR = 'B2_ACCOUNT_INFO'
B2_ACCOUNT_INFO_DEFAULT_FILE = '~/.b2_account_info'
class SqliteAccountInfo(UrlPoolAccountInfo):
"""
Store account information in an `sqlite3 <https://www.sqlite.org>`_ database which is
used to manage concurrent access to the data.
The ``update_done`` table tracks the schema updates that have been
completed.
"""
def __init__(self, file_name=None, last_upgrade_to_run=None):
"""
If ``file_name`` argument is empty or ``None``, path from ``B2_ACCOUNT_INFO`` environment variable is used. If that is not available, a default of ``~/.b2_account_info`` is used.
:param str file_name: The sqlite file to use; overrides the default.
:param int last_upgrade_to_run: For testing only, override the auto-update on the db.
"""
self.thread_local = threading.local()
user_account_info_path = file_name or os.environ.get(
B2_ACCOUNT_INFO_ENV_VAR, B2_ACCOUNT_INFO_DEFAULT_FILE
)
self.filename = file_name or os.path.expanduser(user_account_info_path)
logger.debug('%s file path to use: %s', self.__class__.__name__, self.filename)
self._validate_database()
with self._get_connection() as conn:
self._create_tables(conn, last_upgrade_to_run)
super(SqliteAccountInfo, self).__init__()
def _validate_database(self, last_upgrade_to_run=None):
"""
Make sure that the database is openable. Removes the file if it's not.
"""
# If there is no file there, that's fine. It will get created when
# we connect.
if not os.path.exists(self.filename):
self._create_database(last_upgrade_to_run)
return
# If we can connect to the database, and do anything, then all is good.
try:
with self._connect() as conn:
self._create_tables(conn, last_upgrade_to_run)
return
except sqlite3.DatabaseError:
pass # fall through to next case
# If the file contains JSON with the right stuff in it, convert from
# the old representation.
try:
with open(self.filename, 'rb') as f:
data = json.loads(f.read().decode('utf-8'))
keys = [
'account_id', 'application_key', 'account_auth_token', 'api_url',
'download_url', 'minimum_part_size', 'realm'
]
if all(k in data for k in keys):
# remove the json file
os.unlink(self.filename)
# create a database
self._create_database(last_upgrade_to_run)
# add the data from the JSON file
with self._connect() as conn:
self._create_tables(conn, last_upgrade_to_run)
insert_statement = """
INSERT INTO account
(account_id, application_key, account_auth_token, api_url, download_url, minimum_part_size, realm)
values (?, ?, ?, ?, ?, ?, ?);
"""
conn.execute(insert_statement, tuple(data[k] for k in keys))
# all is happy now
return
except ValueError: # includes json.decoder.JSONDecodeError
pass
# Remove the corrupted file and create a new database
raise CorruptAccountInfo(self.filename)
def _get_connection(self):
"""
Connections to sqlite cannot be shared across threads.
"""
try:
return self.thread_local.connection
except AttributeError:
self.thread_local.connection = self._connect()
return self.thread_local.connection
def _connect(self):
return sqlite3.connect(self.filename, isolation_level='EXCLUSIVE')
def _create_database(self, last_upgrade_to_run):
"""
Make sure that the database is created and sets the file permissions.
This should be done before storing any sensitive data in it.
"""
# Create the tables in the database
conn = self._connect()
try:
with conn:
self._create_tables(conn, last_upgrade_to_run)
finally:
conn.close()
# Set the file permissions
os.chmod(self.filename, stat.S_IRUSR | stat.S_IWUSR)
def _create_tables(self, conn, last_upgrade_to_run):
conn.execute(
"""
CREATE TABLE IF NOT EXISTS
update_done (
update_number INT NOT NULL
);
"""
)
conn.execute(
"""
CREATE TABLE IF NOT EXISTS
account (
account_id TEXT NOT NULL,
application_key TEXT NOT NULL,
account_auth_token TEXT NOT NULL,
api_url TEXT NOT NULL,
download_url TEXT NOT NULL,
minimum_part_size INT NOT NULL,
realm TEXT NOT NULL
);
"""
)
conn.execute(
"""
CREATE TABLE IF NOT EXISTS
bucket (
bucket_name TEXT NOT NULL,
bucket_id TEXT NOT NULL
);
"""
)
# This table is not used any more. We may use it again
# someday if we save upload URLs across invocations of
# the command-line tool.
conn.execute(
"""
CREATE TABLE IF NOT EXISTS
bucket_upload_url (
bucket_id TEXT NOT NULL,
upload_url TEXT NOT NULL,
upload_auth_token TEXT NOT NULL
);
"""
)
# By default, we run all the upgrades
last_upgrade_to_run = 2 if last_upgrade_to_run is None else last_upgrade_to_run
# Add the 'allowed' column if it hasn't been yet.
if 1 <= last_upgrade_to_run:
self._ensure_update(1, 'ALTER TABLE account ADD COLUMN allowed TEXT;')
# Add the 'account_id_or_app_key_id' column if it hasn't been yet
if 2 <= last_upgrade_to_run:
self._ensure_update(2, 'ALTER TABLE account ADD COLUMN account_id_or_app_key_id TEXT;')
def _ensure_update(self, update_number, update_command):
"""
Run the update with the given number if it hasn't been done yet.
Does the update and stores the number as a single transaction,
so they will always be in sync.
"""
with self._get_connection() as conn:
conn.execute('BEGIN')
cursor = conn.execute(
'SELECT COUNT(*) AS count FROM update_done WHERE update_number = ?;',
(update_number,)
)
update_count = cursor.fetchone()[0]
assert update_count in [0, 1]
if update_count == 0:
conn.execute(update_command)
conn.execute(
'INSERT INTO update_done (update_number) VALUES (?);', (update_number,)
)
def clear(self):
"""
Remove all info about accounts and buckets.
"""
with self._get_connection() as conn:
conn.execute('DELETE FROM account;')
conn.execute('DELETE FROM bucket;')
conn.execute('DELETE FROM bucket_upload_url;')
def _set_auth_data(
self,
account_id,
auth_token,
api_url,
download_url,
minimum_part_size,
application_key,
realm,
allowed,
application_key_id,
):
assert self.allowed_is_valid(allowed)
with self._get_connection() as conn:
conn.execute('DELETE FROM account;')
conn.execute('DELETE FROM bucket;')
conn.execute('DELETE FROM bucket_upload_url;')
insert_statement = """
INSERT INTO account
(account_id, account_id_or_app_key_id, application_key, account_auth_token, api_url, download_url, minimum_part_size, realm, allowed)
values (?, ?, ?, ?, ?, ?, ?, ?, ?);
"""
conn.execute(
insert_statement, (
account_id,
application_key_id,
application_key,
auth_token,
api_url,
download_url,
minimum_part_size,
realm,
json.dumps(allowed),
)
)
def set_auth_data_with_schema_0_for_test(
self,
account_id,
auth_token,
api_url,
download_url,
minimum_part_size,
application_key,
realm,
):
"""
Set authentication data for tests.
:param str account_id: an account ID
:param str auth_token: an authentication token
:param str api_url: an API URL
:param str download_url: a download URL
:param int minimum_part_size: a minimum part size
:param str application_key: an application key
:param str realm: a realm to authorize account in
"""
with self._get_connection() as conn:
conn.execute('DELETE FROM account;')
conn.execute('DELETE FROM bucket;')
conn.execute('DELETE FROM bucket_upload_url;')
insert_statement = """
INSERT INTO account
(account_id, application_key, account_auth_token, api_url, download_url, minimum_part_size, realm)
values (?, ?, ?, ?, ?, ?, ?);
"""
conn.execute(
insert_statement, (
account_id,
application_key,
auth_token,
api_url,
download_url,
minimum_part_size,
realm,
)
)
def get_application_key(self):
return self._get_account_info_or_raise('application_key')
def get_account_id(self):
return self._get_account_info_or_raise('account_id')
def get_application_key_id(self):
"""
Return an application key ID.
The 'account_id_or_app_key_id' column was not in the original schema, so it may be NULL.
Nota bene - this is the only place where we are not renaming account_id_or_app_key_id to application_key_id
because it requires a column change.
application_key_id == account_id_or_app_key_id
:rtype: str
"""
result = self._get_account_info_or_raise('account_id_or_app_key_id')
if result is None:
return self.get_account_id()
else:
return result
def get_api_url(self):
return self._get_account_info_or_raise('api_url')
def get_account_auth_token(self):
return self._get_account_info_or_raise('account_auth_token')
def get_download_url(self):
return self._get_account_info_or_raise('download_url')
def get_realm(self):
return self._get_account_info_or_raise('realm')
def get_minimum_part_size(self):
return self._get_account_info_or_raise('minimum_part_size')
def get_allowed(self):
"""
Return 'allowed' dictionary info.
The 'allowed' column was not in the original schema, so it may be NULL.
:rtype: dict
"""
allowed_json = self._get_account_info_or_raise('allowed')
if allowed_json is None:
return self.DEFAULT_ALLOWED
else:
return json.loads(allowed_json)
def _get_account_info_or_raise(self, column_name):
try:
with self._get_connection() as conn:
cursor = conn.execute('SELECT %s FROM account;' % (column_name,))
value = cursor.fetchone()[0]
return value
except Exception as e:
logger.exception(
'_get_account_info_or_raise encountered a problem while trying to retrieve "%s"',
column_name
)
raise MissingAccountData(str(e))
def refresh_entire_bucket_name_cache(self, name_id_iterable):
with self._get_connection() as conn:
conn.execute('DELETE FROM bucket;')
for (bucket_name, bucket_id) in name_id_iterable:
conn.execute(
'INSERT INTO bucket (bucket_name, bucket_id) VALUES (?, ?);',
(bucket_name, bucket_id)
)
def save_bucket(self, bucket):
with self._get_connection() as conn:
conn.execute('DELETE FROM bucket WHERE bucket_id = ?;', (bucket.id_,))
conn.execute(
'INSERT INTO bucket (bucket_id, bucket_name) VALUES (?, ?);',
(bucket.id_, bucket.name)
)
def remove_bucket_name(self, bucket_name):
with self._get_connection() as conn:
conn.execute('DELETE FROM bucket WHERE bucket_name = ?;', (bucket_name,))
def get_bucket_id_or_none_from_bucket_name(self, bucket_name):
try:
with self._get_connection() as conn:
cursor = conn.execute(
'SELECT bucket_id FROM bucket WHERE bucket_name = ?;', (bucket_name,)
)
return cursor.fetchone()[0]
except TypeError: # TypeError: 'NoneType' object is unsubscriptable
return None
except sqlite3.Error:
return None
| 35.8
| 186
| 0.575914
|
9749c48542e74e0055b2f949e6be734c5219c998
| 4,085
|
py
|
Python
|
ceilometer/ipmi/platform/ipmi_sensor.py
|
orbitfp7/ceilometer
|
9905da14bbdf06f95e1e056c9ca0e18087214d0f
|
[
"Apache-2.0"
] | 2
|
2015-09-07T09:15:26.000Z
|
2015-09-30T02:13:23.000Z
|
ceilometer/ipmi/platform/ipmi_sensor.py
|
orbitfp7/ceilometer
|
9905da14bbdf06f95e1e056c9ca0e18087214d0f
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/ipmi/platform/ipmi_sensor.py
|
orbitfp7/ceilometer
|
9905da14bbdf06f95e1e056c9ca0e18087214d0f
|
[
"Apache-2.0"
] | 1
|
2019-09-16T02:11:41.000Z
|
2019-09-16T02:11:41.000Z
|
# Copyright 2014 Intel Corporation.
#
# Author: Zhai Edwin <edwin.zhai@intel.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""IPMI sensor to collect various sensor data of compute node"""
from ceilometer.ipmi.platform import exception as ipmiexcept
from ceilometer.ipmi.platform import ipmitool
from ceilometer.openstack.common.gettextutils import _
IPMICMD = {"sdr_dump": "sdr dump",
"sdr_info": "sdr info",
"sensor_dump": "sdr -v",
"sensor_dump_temperature": "sdr -v type Temperature",
"sensor_dump_current": "sdr -v type Current",
"sensor_dump_fan": "sdr -v type Fan",
"sensor_dump_voltage": "sdr -v type Voltage"}
# Requires translation of output into dict
DICT_TRANSLATE_TEMPLATE = {"translate": 1}
class IPMISensor(object):
"""The python implementation of IPMI sensor using ipmitool
The class implements the IPMI sensor to get various sensor data of
compute node. It uses ipmitool to execute the IPMI command and parse
the output into dict.
"""
_inited = False
_instance = None
def __new__(cls, *args, **kwargs):
"""Singleton to avoid duplicated initialization."""
if not cls._instance:
cls._instance = super(IPMISensor, cls).__new__(cls, *args,
**kwargs)
return cls._instance
def __init__(self):
if not (self._instance and self._inited):
self.ipmi_support = False
self._inited = True
self.ipmi_support = self.check_ipmi()
@ipmitool.execute_ipmi_cmd()
def _get_sdr_info(self):
"""Get the SDR info."""
return IPMICMD['sdr_info']
@ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE)
def _read_sensor_all(self):
"""Get the sensor data for type."""
return IPMICMD['sensor_dump']
@ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE)
def _read_sensor_temperature(self):
"""Get the sensor data for Temperature."""
return IPMICMD['sensor_dump_temperature']
@ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE)
def _read_sensor_voltage(self):
"""Get the sensor data for Voltage."""
return IPMICMD['sensor_dump_voltage']
@ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE)
def _read_sensor_current(self):
"""Get the sensor data for Current."""
return IPMICMD['sensor_dump_current']
@ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE)
def _read_sensor_fan(self):
"""Get the sensor data for Fan."""
return IPMICMD['sensor_dump_fan']
def read_sensor_any(self, sensor_type=''):
"""Get the sensor data for type."""
if not self.ipmi_support:
return {}
mapping = {'': self._read_sensor_all,
'Temperature': self._read_sensor_temperature,
'Fan': self._read_sensor_fan,
'Voltage': self._read_sensor_voltage,
'Current': self._read_sensor_current}
try:
return mapping[sensor_type]()
except KeyError:
raise ipmiexcept.IPMIException(_('Wrong sensor type'))
def check_ipmi(self):
"""IPMI capability checking
This function is used to detect if compute node is IPMI capable
platform. Just run a simple IPMI command to get SDR info for check.
"""
try:
self._get_sdr_info()
except ipmiexcept.IPMIException:
return False
return True
| 35.215517
| 78
| 0.651652
|
af7b853f5f85a13741b10f092862c1ce35d1a9e0
| 38,460
|
py
|
Python
|
Server/integrations/saml/SamlExternalAuthenticator.py
|
syntrydy/oxAuth
|
19c9a6cebd1377e3c347347e76b8070491d4944f
|
[
"MIT"
] | null | null | null |
Server/integrations/saml/SamlExternalAuthenticator.py
|
syntrydy/oxAuth
|
19c9a6cebd1377e3c347347e76b8070491d4944f
|
[
"MIT"
] | null | null | null |
Server/integrations/saml/SamlExternalAuthenticator.py
|
syntrydy/oxAuth
|
19c9a6cebd1377e3c347347e76b8070491d4944f
|
[
"MIT"
] | null | null | null |
# oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text.
# Copyright (c) 2016, Gluu
#
# Author: Yuriy Movchan
#
import java
import json
from java.lang import StringBuilder
from javax.faces.context import FacesContext
from java.util import Arrays, ArrayList, HashMap, IdentityHashMap
from javax.faces.application import FacesMessage
from org.gluu.jsf2.message import FacesMessages
from org.gluu.saml import SamlConfiguration, AuthRequest, Response
from org.xdi.ldap.model import CustomAttribute
from org.xdi.model.custom.script.type.auth import PersonAuthenticationType
from org.xdi.oxauth.model.common import User
from org.xdi.oxauth.security import Identity
from org.xdi.oxauth.service import UserService, ClientService, AuthenticationService, AttributeService
from org.xdi.oxauth.service.net import HttpService
from org.xdi.service.cdi.util import CdiUtil
from org.xdi.util import StringHelper, ArrayHelper, Util
from org.gluu.jsf2.service import FacesService
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, configurationAttributes):
print "Asimba. Initialization"
asimba_saml_certificate_file = configurationAttributes.get("asimba_saml_certificate_file").getValue2()
saml_idp_sso_target_url = configurationAttributes.get("saml_idp_sso_target_url").getValue2()
asimba_entity_id = configurationAttributes.get("asimba_entity_id").getValue2()
saml_use_authn_context = StringHelper.toBoolean(configurationAttributes.get("saml_use_authn_context").getValue2(), True)
if saml_use_authn_context:
saml_name_identifier_format = configurationAttributes.get("saml_name_identifier_format").getValue2()
else:
saml_name_identifier_format = None
asimba_saml_certificate = self.loadCeritificate(asimba_saml_certificate_file)
if StringHelper.isEmpty(asimba_saml_certificate):
print "Asimba. Initialization. File with x509 certificate should be not empty"
return False
samlConfiguration = SamlConfiguration()
# Set the issuer of the authentication request. This would usually be the URL of the issuing web application
samlConfiguration.setIssuer(asimba_entity_id)
# Tells the IdP to return a persistent identifier for the user
samlConfiguration.setNameIdentifierFormat(saml_name_identifier_format)
# The URL at the Identity Provider where to the authentication request should be sent
samlConfiguration.setIdpSsoTargetUrl(saml_idp_sso_target_url)
# Enablediable RequestedAuthnContext
samlConfiguration.setUseRequestedAuthnContext(saml_use_authn_context)
# Load x509 certificate
samlConfiguration.loadCertificateFromString(asimba_saml_certificate)
self.samlConfiguration = samlConfiguration
self.generateNameId = False
if configurationAttributes.containsKey("saml_generate_name_id"):
self.generateNameId = StringHelper.toBoolean(configurationAttributes.get("saml_generate_name_id").getValue2(), False)
print "Asimba. Initialization. The property saml_generate_name_id is %s" % self.generateNameId
self.updateUser = False
if configurationAttributes.containsKey("saml_update_user"):
self.updateUser = StringHelper.toBoolean(configurationAttributes.get("saml_update_user").getValue2(), False)
print "Asimba. Initialization. The property saml_update_user is %s" % self.updateUser
self.userObjectClasses = None
if configurationAttributes.containsKey("user_object_classes"):
self.userObjectClasses = self.prepareUserObjectClasses(configurationAttributes)
self.userEnforceAttributesUniqueness = None
if configurationAttributes.containsKey("enforce_uniqueness_attr_list"):
self.userEnforceAttributesUniqueness = self.prepareUserEnforceUniquenessAttributes(configurationAttributes)
self.attributesMapping = None
if configurationAttributes.containsKey("saml_idp_attributes_mapping"):
saml_idp_attributes_mapping = configurationAttributes.get("saml_idp_attributes_mapping").getValue2()
if StringHelper.isEmpty(saml_idp_attributes_mapping):
print "Asimba. Initialization. The property saml_idp_attributes_mapping is empty"
return False
self.attributesMapping = self.prepareAttributesMapping(saml_idp_attributes_mapping)
if self.attributesMapping == None:
print "Asimba. Initialization. The attributes mapping isn't valid"
return False
self.samlExtensionModule = None
if configurationAttributes.containsKey("saml_extension_module"):
saml_extension_module_name = configurationAttributes.get("saml_extension_module").getValue2()
try:
self.samlExtensionModule = __import__(saml_extension_module_name)
saml_extension_module_init_result = self.samlExtensionModule.init(configurationAttributes)
if not saml_extension_module_init_result:
return False
except ImportError, ex:
print "Asimba. Initialization. Failed to load saml_extension_module: '%s'" % saml_extension_module_name
print "Asimba. Initialization. Unexpected error:", ex
return False
self.debugEnrollment = False
print "Asimba. Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "Asimba. Destroy"
print "Asimba. Destroyed successfully"
return True
def getApiVersion(self):
return 1
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
identity = CdiUtil.bean(Identity)
credentials = identity.getCredentials()
userService = CdiUtil.bean(UserService)
authenticationService = CdiUtil.bean(AuthenticationService)
saml_map_user = False
saml_enroll_user = False
saml_enroll_all_user_attr = False
# Use saml_deployment_type only if there is no attributes mapping
if configurationAttributes.containsKey("saml_deployment_type"):
saml_deployment_type = StringHelper.toLowerCase(configurationAttributes.get("saml_deployment_type").getValue2())
if StringHelper.equalsIgnoreCase(saml_deployment_type, "map"):
saml_map_user = True
if StringHelper.equalsIgnoreCase(saml_deployment_type, "enroll"):
saml_enroll_user = True
if StringHelper.equalsIgnoreCase(saml_deployment_type, "enroll_all_attr"):
saml_enroll_all_user_attr = True
saml_allow_basic_login = False
if configurationAttributes.containsKey("saml_allow_basic_login"):
saml_allow_basic_login = StringHelper.toBoolean(configurationAttributes.get("saml_allow_basic_login").getValue2(), False)
use_basic_auth = False
if saml_allow_basic_login:
# Detect if user used basic authnetication method
user_name = credentials.getUsername()
user_password = credentials.getPassword()
if StringHelper.isNotEmpty(user_name) and StringHelper.isNotEmpty(user_password):
use_basic_auth = True
if (step == 1) and saml_allow_basic_login and use_basic_auth:
print "Asimba. Authenticate for step 1. Basic authentication"
identity.setWorkingParameter("saml_count_login_steps", 1)
user_name = credentials.getUsername()
user_password = credentials.getPassword()
logged_in = False
if StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password):
logged_in = authenticationService.authenticate(user_name, user_password)
if (not logged_in):
return False
return True
if (step == 1):
print "Asimba. Authenticate for step 1"
currentSamlConfiguration = self.getCurrentSamlConfiguration(self.samlConfiguration, configurationAttributes, requestParameters)
if (currentSamlConfiguration == None):
print "Asimba. Prepare for step 1. Client saml configuration is invalid"
return False
saml_response_array = requestParameters.get("SAMLResponse")
if ArrayHelper.isEmpty(saml_response_array):
print "Asimba. Authenticate for step 1. saml_response is empty"
return False
saml_response = saml_response_array[0]
print "Asimba. Authenticate for step 1. saml_response: '%s'" % saml_response
samlResponse = Response(currentSamlConfiguration)
samlResponse.loadXmlFromBase64(saml_response)
saml_validate_response = True
if configurationAttributes.containsKey("saml_validate_response"):
saml_validate_response = StringHelper.toBoolean(configurationAttributes.get("saml_validate_response").getValue2(), False)
if saml_validate_response:
if not samlResponse.isValid():
print "Asimba. Authenticate for step 1. saml_response isn't valid"
return False
if samlResponse.isAuthnFailed():
print "Asimba. Authenticate for step 1. saml_response AuthnFailed"
return False
saml_response_attributes = samlResponse.getAttributes()
print "Asimba. Authenticate for step 1. attributes: '%s'" % saml_response_attributes
if saml_map_user:
saml_user_uid = self.getSamlNameId(samlResponse)
if saml_user_uid == None:
return False
# Use mapping to local IDP user
print "Asimba. Authenticate for step 1. Attempting to find user by oxExternalUid: saml: '%s'" % saml_user_uid
# Check if the is user with specified saml_user_uid
find_user_by_uid = userService.getUserByAttribute("oxExternalUid", "saml:%s" % saml_user_uid)
if find_user_by_uid == None:
print "Asimba. Authenticate for step 1. Failed to find user"
print "Asimba. Authenticate for step 1. Setting count steps to 2"
identity.setWorkingParameter("saml_count_login_steps", 2)
identity.setWorkingParameter("saml_user_uid", saml_user_uid)
return True
found_user_name = find_user_by_uid.getUserId()
print "Asimba. Authenticate for step 1. found_user_name: '%s'" % found_user_name
user_authenticated = authenticationService.authenticate(found_user_name)
if user_authenticated == False:
print "Asimba. Authenticate for step 1. Failed to authenticate user"
return False
print "Asimba. Authenticate for step 1. Setting count steps to 1"
identity.setWorkingParameter("saml_count_login_steps", 1)
post_login_result = self.samlExtensionPostLogin(configurationAttributes, find_user_by_uid)
print "Asimba. Authenticate for step 1. post_login_result: '%s'" % post_login_result
return post_login_result
elif saml_enroll_user:
# Convert SAML response to user entry
newUser = self.getMappedUser(configurationAttributes, requestParameters, saml_response_attributes)
saml_user_uid = self.getNameId(samlResponse, newUser)
if saml_user_uid == None:
return False
self.setDefaultUid(newUser, saml_user_uid)
newUser.setAttribute("oxExternalUid", "saml:%s" % saml_user_uid)
# Use auto enrollment to local IDP
print "Asimba. Authenticate for step 1. Attempting to find user by oxExternalUid: saml: '%s'" % saml_user_uid
# Check if there is user with specified saml_user_uid
find_user_by_uid = userService.getUserByAttribute("oxExternalUid", "saml:%s" % saml_user_uid)
if find_user_by_uid == None:
# Auto user enrollment
print "Asimba. Authenticate for step 1. There is no user in LDAP. Adding user to local LDAP"
print "Asimba. Authenticate for step 1. Attempting to add user '%s' with next attributes: '%s'" % (saml_user_uid, newUser.getCustomAttributes())
user_unique = self.checkUserUniqueness(newUser)
if not user_unique:
print "Asimba. Authenticate for step 1. Failed to add user: '%s'. User not unique" % newUser.getUserId()
facesMessages = CdiUtil.bean(FacesMessages)
facesMessages.add(FacesMessage.SEVERITY_ERROR, "Failed to enroll. User with same key attributes exist already")
facesMessages.setKeepMessages()
return False
find_user_by_uid = userService.addUser(newUser, True)
print "Asimba. Authenticate for step 1. Added new user with UID: '%s'" % find_user_by_uid.getUserId()
else:
if self.updateUser:
print "Asimba. Authenticate for step 1. Attempting to update user '%s' with next attributes: '%s'" % (saml_user_uid, newUser.getCustomAttributes())
find_user_by_uid.setCustomAttributes(newUser.getCustomAttributes())
userService.updateUser(find_user_by_uid)
print "Asimba. Authenticate for step 1. Updated user with UID: '%s'" % saml_user_uid
found_user_name = find_user_by_uid.getUserId()
print "Asimba. Authenticate for step 1. found_user_name: '%s'" % found_user_name
user_authenticated = authenticationService.authenticate(found_user_name)
if user_authenticated == False:
print "Asimba. Authenticate for step 1. Failed to authenticate user: '%s'" % found_user_name
return False
print "Asimba. Authenticate for step 1. Setting count steps to 1"
identity.setWorkingParameter("saml_count_login_steps", 1)
post_login_result = self.samlExtensionPostLogin(configurationAttributes, find_user_by_uid)
print "Asimba. Authenticate for step 1. post_login_result: '%s'" % post_login_result
return post_login_result
elif saml_enroll_all_user_attr:
# Convert SAML response to user entry
newUser = self.getMappedAllAttributesUser(saml_response_attributes)
saml_user_uid = self.getNameId(samlResponse, newUser)
if saml_user_uid == None:
return False
self.setDefaultUid(newUser, saml_user_uid)
newUser.setAttribute("oxExternalUid", "saml:%s" % saml_user_uid)
print "Asimba. Authenticate for step 1. Attempting to find user by oxExternalUid: saml:%s" % saml_user_uid
# Check if there is user with specified saml_user_uid
find_user_by_uid = userService.getUserByAttribute("oxExternalUid", "saml:%s" % saml_user_uid)
if find_user_by_uid == None:
# Auto user enrollment
print "Asimba. Authenticate for step 1. There is no user in LDAP. Adding user to local LDAP"
print "Asimba. Authenticate for step 1. Attempting to add user '%s' with next attributes: '%s'" % (saml_user_uid, newUser.getCustomAttributes())
user_unique = self.checkUserUniqueness(newUser)
if not user_unique:
print "Asimba. Authenticate for step 1. Failed to add user: '%s'. User not unique" % newUser.getUserId()
facesMessages = CdiUtil.bean(FacesMessages)
facesMessages.add(FacesMessage.SEVERITY_ERROR, "Failed to enroll. User with same key attributes exist already")
facesMessages.setKeepMessages()
return False
find_user_by_uid = userService.addUser(newUser, True)
print "Asimba. Authenticate for step 1. Added new user with UID: '%s'" % find_user_by_uid.getUserId()
else:
if self.updateUser:
print "Asimba. Authenticate for step 1. Attempting to update user '%s' with next attributes: '%s'" % (saml_user_uid, newUser.getCustomAttributes())
find_user_by_uid.setCustomAttributes(newUser.getCustomAttributes())
userService.updateUser(find_user_by_uid)
print "Asimba. Authenticate for step 1. Updated user with UID: '%s'" % saml_user_uid
found_user_name = find_user_by_uid.getUserId()
print "Asimba. Authenticate for step 1. found_user_name: '%s'" % found_user_name
user_authenticated = authenticationService.authenticate(found_user_name)
if user_authenticated == False:
print "Asimba. Authenticate for step 1. Failed to authenticate user"
return False
print "Asimba. Authenticate for step 1. Setting count steps to 1"
identity.setWorkingParameter("saml_count_login_steps", 1)
post_login_result = self.samlExtensionPostLogin(configurationAttributes, find_user_by_uid)
print "Asimba. Authenticate for step 1. post_login_result: '%s'" % post_login_result
return post_login_result
else:
if saml_user_uid == None:
return False
# Check if the is user with specified saml_user_uid
print "Asimba. Authenticate for step 1. Attempting to find user by uid: '%s'" % saml_user_uid
find_user_by_uid = userService.getUser(saml_user_uid)
if find_user_by_uid == None:
print "Asimba. Authenticate for step 1. Failed to find user"
return False
found_user_name = find_user_by_uid.getUserId()
print "Asimba. Authenticate for step 1. found_user_name: '%s'" % found_user_name
user_authenticated = authenticationService.authenticate(found_user_name)
if user_authenticated == False:
print "Asimba. Authenticate for step 1. Failed to authenticate user"
return False
print "Asimba. Authenticate for step 1. Setting count steps to 1"
identity.setWorkingParameter("saml_count_login_steps", 1)
post_login_result = self.samlExtensionPostLogin(configurationAttributes, find_user_by_uid)
print "Asimba. Authenticate for step 1. post_login_result: '%s'" % post_login_result
return post_login_result
elif (step == 2):
print "Asimba. Authenticate for step 2"
sessionAttributes = identity.getSessionId().getSessionAttributes()
if (sessionAttributes == None) or not sessionAttributes.containsKey("saml_user_uid"):
print "Asimba. Authenticate for step 2. saml_user_uid is empty"
return False
saml_user_uid = sessionAttributes.get("saml_user_uid")
passed_step1 = StringHelper.isNotEmptyString(saml_user_uid)
if not passed_step1:
return False
user_name = credentials.getUsername()
user_password = credentials.getPassword()
logged_in = False
if StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password):
logged_in = authenticationService.authenticate(user_name, user_password)
if not logged_in:
return False
# Check if there is user which has saml_user_uid
# Avoid mapping Saml account to more than one IDP account
find_user_by_uid = userService.getUserByAttribute("oxExternalUid", "saml:%s" % saml_user_uid)
if find_user_by_uid == None:
# Add saml_user_uid to user one id UIDs
find_user_by_uid = userService.addUserAttribute(user_name, "oxExternalUid", "saml:%s" % saml_user_uid)
if find_user_by_uid == None:
print "Asimba. Authenticate for step 2. Failed to update current user"
return False
post_login_result = self.samlExtensionPostLogin(configurationAttributes, find_user_by_uid)
print "Asimba. Authenticate for step 2. post_login_result: '%s'" % post_login_result
return post_login_result
else:
found_user_name = find_user_by_uid.getUserId()
print "Asimba. Authenticate for step 2. found_user_name: '%s'" % found_user_name
if StringHelper.equals(user_name, found_user_name):
post_login_result = self.samlExtensionPostLogin(configurationAttributes, find_user_by_uid)
print "Asimba. Authenticate for step 2. post_login_result: '%s'" % post_login_result
return post_login_result
return False
else:
return False
def prepareForStep(self, configurationAttributes, requestParameters, step):
authenticationService = CdiUtil.bean(AuthenticationService)
if (step == 1):
print "Asimba. Prepare for step 1"
httpService = CdiUtil.bean(HttpService)
facesContext = CdiUtil.bean(FacesContext)
request = facesContext.getExternalContext().getRequest()
assertionConsumerServiceUrl = httpService.constructServerUrl(request) + "/postlogin"
print "Asimba. Prepare for step 1. Prepared assertionConsumerServiceUrl: '%s'" % assertionConsumerServiceUrl
currentSamlConfiguration = self.getCurrentSamlConfiguration(self.samlConfiguration, configurationAttributes, requestParameters)
if currentSamlConfiguration == None:
print "Asimba. Prepare for step 1. Client saml configuration is invalid"
return False
# Generate an AuthRequest and send it to the identity provider
samlAuthRequest = AuthRequest(currentSamlConfiguration)
external_auth_request_uri = currentSamlConfiguration.getIdpSsoTargetUrl() + "?SAMLRequest=" + samlAuthRequest.getRequest(True, assertionConsumerServiceUrl)
print "Asimba. Prepare for step 1. external_auth_request_uri: '%s'" % external_auth_request_uri
facesService = CdiUtil.bean(FacesService)
facesService.redirectToExternalURL(external_auth_request_uri)
return True
elif (step == 2):
print "Asimba. Prepare for step 2"
return True
else:
return False
def getExtraParametersForStep(self, configurationAttributes, step):
if (step == 2):
return Arrays.asList("saml_user_uid")
return None
def getCountAuthenticationSteps(self, configurationAttributes):
identity = CdiUtil.bean(Identity)
if identity.isSetWorkingParameter("saml_count_login_steps"):
return identity.getWorkingParameter("saml_count_login_steps")
return 2
def getPageForStep(self, configurationAttributes, step):
if (step == 1):
saml_allow_basic_login = False
if configurationAttributes.containsKey("saml_allow_basic_login"):
saml_allow_basic_login = StringHelper.toBoolean(configurationAttributes.get("saml_allow_basic_login").getValue2(), False)
if saml_allow_basic_login:
return "/login.xhtml"
else:
return "/auth/saml/samllogin.xhtml"
return "/auth/saml/samlpostlogin.xhtml"
def logout(self, configurationAttributes, requestParameters):
return True
def isPassedStep1():
identity = CdiUtil.bean(Identity)
credentials = identity.getCredentials()
user_name = credentials.getUsername()
passed_step1 = StringHelper.isNotEmptyString(user_name)
return passed_step1
def loadCeritificate(self, asimba_saml_certificate_file):
asimba_saml_certificate = None
# Load certificate from file
f = open(asimba_saml_certificate_file, 'r')
try:
asimba_saml_certificate = f.read()
except:
print "Asimba. Failed to load certificate from file: '%s'" % asimba_saml_certificate_file
return None
finally:
f.close()
return asimba_saml_certificate
def getClientConfiguration(self, configurationAttributes, requestParameters):
# Get client configuration
if configurationAttributes.containsKey("saml_client_configuration_attribute"):
saml_client_configuration_attribute = configurationAttributes.get("saml_client_configuration_attribute").getValue2()
print "Asimba. GetClientConfiguration. Using client attribute: '%s'" % saml_client_configuration_attribute
if requestParameters == None:
return None
client_id = None
client_id_array = requestParameters.get("client_id")
if ArrayHelper.isNotEmpty(client_id_array) and StringHelper.isNotEmptyString(client_id_array[0]):
client_id = client_id_array[0]
if client_id == None:
identity = CdiUtil.bean(Identity)
if identity.getSessionId() != None:
client_id = identity.getSessionId().getSessionAttributes().get("client_id")
if client_id == None:
print "Asimba. GetClientConfiguration. client_id is empty"
return None
clientService = CdiUtil.bean(ClientService)
client = clientService.getClient(client_id)
if client == None:
print "Asimba. GetClientConfiguration. Failed to find client '%s' in local LDAP" % client_id
return None
saml_client_configuration = clientService.getCustomAttribute(client, saml_client_configuration_attribute)
if (saml_client_configuration == None) or StringHelper.isEmpty(saml_client_configuration.getValue()):
print "Asimba. GetClientConfiguration. Client '%s' attribute '%s' is empty" % ( client_id, saml_client_configuration_attribute )
else:
print "Asimba. GetClientConfiguration. Client '%s' attribute '%s' is '%s'" % ( client_id, saml_client_configuration_attribute, saml_client_configuration )
return saml_client_configuration
return None
def getCurrentSamlConfiguration(self, currentSamlConfiguration, configurationAttributes, requestParameters):
saml_client_configuration = self.getClientConfiguration(configurationAttributes, requestParameters)
if saml_client_configuration == None:
return currentSamlConfiguration
saml_client_configuration_value = json.loads(saml_client_configuration.getValue())
client_asimba_saml_certificate = None
client_asimba_saml_certificate_file = saml_client_configuration_value["asimba_saml_certificate_file"]
if StringHelper.isNotEmpty(client_asimba_saml_certificate_file):
client_asimba_saml_certificate = self.loadCeritificate(client_asimba_saml_certificate_file)
if StringHelper.isEmpty(client_asimba_saml_certificate):
print "Asimba. BuildClientSamlConfiguration. File with x509 certificate should be not empty. Using default configuration"
return currentSamlConfiguration
clientSamlConfiguration = currentSamlConfiguration.clone()
if client_asimba_saml_certificate != None:
clientSamlConfiguration.loadCertificateFromString(client_asimba_saml_certificate)
client_asimba_entity_id = saml_client_configuration_value["asimba_entity_id"]
clientSamlConfiguration.setIssuer(client_asimba_entity_id)
saml_use_authn_context = saml_client_configuration_value["saml_use_authn_context"]
client_use_saml_use_authn_context = StringHelper.toBoolean(saml_use_authn_context, True)
clientSamlConfiguration.setUseRequestedAuthnContext(client_use_saml_use_authn_context)
return clientSamlConfiguration
def prepareAttributesMapping(self, saml_idp_attributes_mapping):
saml_idp_attributes_mapping_json = json.loads(saml_idp_attributes_mapping)
if len(saml_idp_attributes_mapping_json) == 0:
print "Asimba. PrepareAttributesMapping. There is no attributes mapping specified in saml_idp_attributes_mapping property"
return None
attributeMapping = IdentityHashMap()
for local_attribute_name in saml_idp_attributes_mapping_json:
localAttribute = StringHelper.toLowerCase(local_attribute_name)
for idp_attribute_name in saml_idp_attributes_mapping_json[local_attribute_name]:
idpAttribute = StringHelper.toLowerCase(idp_attribute_name)
attributeMapping.put(idpAttribute, localAttribute)
return attributeMapping
def prepareUserObjectClasses(self, configurationAttributes):
user_object_classes = configurationAttributes.get("user_object_classes").getValue2()
user_object_classes_list_array = StringHelper.split(user_object_classes, ",")
if ArrayHelper.isEmpty(user_object_classes_list_array):
return None
return user_object_classes_list_array
def prepareUserEnforceUniquenessAttributes(self, configurationAttributes):
enforce_uniqueness_attr_list = configurationAttributes.get("enforce_uniqueness_attr_list").getValue2()
enforce_uniqueness_attr_list_array = StringHelper.split(enforce_uniqueness_attr_list, ",")
if ArrayHelper.isEmpty(enforce_uniqueness_attr_list_array):
return None
return enforce_uniqueness_attr_list_array
def prepareCurrentAttributesMapping(self, currentAttributesMapping, configurationAttributes, requestParameters):
saml_client_configuration = self.getClientConfiguration(configurationAttributes, requestParameters)
if saml_client_configuration == None:
return currentAttributesMapping
saml_client_configuration_value = json.loads(saml_client_configuration.getValue())
clientAttributesMapping = self.prepareAttributesMapping(saml_client_configuration_value["saml_idp_attributes_mapping"])
if clientAttributesMapping == None:
print "Asimba. PrepareCurrentAttributesMapping. Client attributes mapping is invalid. Using default one"
return currentAttributesMapping
return clientAttributesMapping
def samlExtensionPostLogin(self, configurationAttributes, user):
if self.samlExtensionModule == None:
return True
try:
post_login_result = self.samlExtensionModule.postLogin(configurationAttributes, user)
print "Asimba. ExtensionPostlogin result: '%s'" % post_login_result
return post_login_result
except Exception, ex:
print "Asimba. ExtensionPostlogin. Failed to execute postLogin method"
print "Asimba. ExtensionPostlogin. Unexpected error:", ex
return False
except java.lang.Throwable, ex:
print "Asimba. ExtensionPostlogin. Failed to execute postLogin method"
ex.printStackTrace()
return False
def checkUserUniqueness(self, user):
if self.userEnforceAttributesUniqueness == None:
return True
userService = CdiUtil.bean(UserService)
# Prepare user object to search by pattern
userBaseDn = userService.getDnForUser(None)
userToSearch = User()
userToSearch.setDn(userBaseDn)
for userAttributeName in self.userEnforceAttributesUniqueness:
attribute_values_list = user.getAttributeValues(userAttributeName)
if (attribute_values_list != None) and (attribute_values_list.size() > 0):
userToSearch.setAttribute(userAttributeName, attribute_values_list)
users = userService.getUsersBySample(userToSearch, 1)
if users.size() > 0:
return False
return True
def getMappedUser(self, configurationAttributes, requestParameters, saml_response_attributes):
# Convert Saml result attributes keys to lover case
saml_response_normalized_attributes = HashMap()
for saml_response_attribute_entry in saml_response_attributes.entrySet():
saml_response_normalized_attributes.put(StringHelper.toLowerCase(saml_response_attribute_entry.getKey()), saml_response_attribute_entry.getValue())
currentAttributesMapping = self.prepareCurrentAttributesMapping(self.attributesMapping, configurationAttributes, requestParameters)
print "Asimba. Get mapped user. Using next attributes mapping '%s'" % currentAttributesMapping
newUser = User()
# Set custom object classes
if self.userObjectClasses != None:
print "Asimba. Get mapped user. User custom objectClasses to add persons: '%s'" % Util.array2ArrayList(self.userObjectClasses)
newUser.setCustomObjectClasses(self.userObjectClasses)
for attributesMappingEntry in currentAttributesMapping.entrySet():
idpAttribute = attributesMappingEntry.getKey()
localAttribute = attributesMappingEntry.getValue()
if self.debugEnrollment:
print "Asimba. Get mapped user. Trying to map '%s' into '%s'" % (idpAttribute, localAttribute)
localAttributeValue = saml_response_normalized_attributes.get(idpAttribute)
if localAttributeValue != None:
if self.debugEnrollment:
print "Asimba. Get mapped user. Setting attribute '%s' value '%s'" % (localAttribute, localAttributeValue)
newUser.setAttribute(localAttribute, localAttributeValue)
else:
if newUser.getAttribute(localAttribute) == None:
newUser.setAttribute(localAttribute, ArrayList())
return newUser
def getMappedAllAttributesUser(self, saml_response_attributes):
user = User()
# Set custom object classes
if self.userObjectClasses != None:
print "Asimba. Get mapped all attributes user. User custom objectClasses to add persons: '%s'" % Util.array2ArrayList(self.userObjectClasses)
user.setCustomObjectClasses(self.userObjectClasses)
# Prepare map to do quick mapping
attributeService = CdiUtil.bean(AttributeService)
ldapAttributes = attributeService.getAllAttributes()
samlUriToAttributesMap = HashMap()
for ldapAttribute in ldapAttributes:
saml2Uri = ldapAttribute.getSaml2Uri()
if saml2Uri == None:
saml2Uri = attributeService.getDefaultSaml2Uri(ldapAttribute.getName())
samlUriToAttributesMap.put(saml2Uri, ldapAttribute.getName())
customAttributes = ArrayList()
for key in saml_response_attributes.keySet():
ldapAttributeName = samlUriToAttributesMap.get(key)
if ldapAttributeName == None:
print "Asimba. Get mapped all attributes user. Skipping saml attribute: '%s'" % key
continue
if StringHelper.equalsIgnoreCase(ldapAttributeName, "uid"):
continue
attribute = CustomAttribute(ldapAttributeName)
attribute.setValues(saml_response_attributes.get(key))
customAttributes.add(attribute)
user.setCustomAttributes(customAttributes)
return user
def getNameId(self, samlResponse, newUser):
if self.generateNameId:
saml_user_uid = self.generateNameUid(newUser)
else:
saml_user_uid = self.getSamlNameId(samlResponse)
return saml_user_uid
def getSamlNameId(self, samlResponse):
saml_response_name_id = samlResponse.getNameId()
if StringHelper.isEmpty(saml_response_name_id):
print "Asimba. Get Saml response. saml_response_name_id is invalid"
return None
print "Asimba. Get Saml response. saml_response_name_id: '%s'" % saml_response_name_id
# Use persistent Id as saml_user_uid
return saml_response_name_id
def generateNameUid(self, user):
if self.userEnforceAttributesUniqueness == None:
print "Asimba. Build local external uid. User enforce attributes uniqueness not specified"
return None
sb = StringBuilder()
first = True
for userAttributeName in self.userEnforceAttributesUniqueness:
if not first:
sb.append("!")
first = False
attribute_values_list = user.getAttributeValues(userAttributeName)
if (attribute_values_list != None) and (attribute_values_list.size() > 0):
first_attribute_value = attribute_values_list.get(0)
sb.append(first_attribute_value)
return sb.toString()
def setDefaultUid(self, user, saml_user_uid):
if StringHelper.isEmpty(user.getUserId()):
user.setUserId(saml_user_uid)
| 49.056122
| 171
| 0.672569
|
7c4cd509b8fc250851efee8685b875586928364a
| 345
|
py
|
Python
|
tests/cli_edge/test_cli_edge.py
|
Rdvp1514/test_junkie
|
9246a33abc9ac8d6584781dcbe95e1093507aa8f
|
[
"MIT"
] | 72
|
2018-10-25T18:32:42.000Z
|
2022-02-02T03:03:09.000Z
|
tests/cli_edge/test_cli_edge.py
|
Rdvp1514/test_junkie
|
9246a33abc9ac8d6584781dcbe95e1093507aa8f
|
[
"MIT"
] | 41
|
2018-12-13T22:30:35.000Z
|
2021-11-04T09:08:49.000Z
|
tests/cli_edge/test_cli_edge.py
|
Rdvp1514/test_junkie
|
9246a33abc9ac8d6584781dcbe95e1093507aa8f
|
[
"MIT"
] | 10
|
2019-04-05T10:51:11.000Z
|
2021-12-06T15:18:56.000Z
|
import os
from tests.cli.Cmd import Cmd
ROOT = __file__.split("{0}tests".format(os.sep))[0]
EXE = ROOT + "{0}test_junkie{0}cli{0}cli.py".format(os.sep)
TESTS = ROOT + "{0}tests{0}cli_edge".format(os.sep)
def test_interrupt():
Cmd.run(['python', EXE, 'config', 'restore', '--all'])
print(Cmd.run(['python', EXE, 'run', '-s', TESTS]))
| 26.538462
| 59
| 0.634783
|
8ce333514ef73ce6c1f2fc8ccd3a6cfe0cd34441
| 11,861
|
py
|
Python
|
vive-dump/vivedump.py
|
unhcfreg/VR4Sec
|
2547afcd064b6f765f4003b90da2cb618d12bc62
|
[
"MIT"
] | null | null | null |
vive-dump/vivedump.py
|
unhcfreg/VR4Sec
|
2547afcd064b6f765f4003b90da2cb618d12bc62
|
[
"MIT"
] | null | null | null |
vive-dump/vivedump.py
|
unhcfreg/VR4Sec
|
2547afcd064b6f765f4003b90da2cb618d12bc62
|
[
"MIT"
] | 1
|
2020-04-08T22:09:06.000Z
|
2020-04-08T22:09:06.000Z
|
# Vivedump
# Team Front row
# Dumps VR tracking information and whatever else we can find
import json
import struct
import visualizer as vis
import os
import volatility.plugins.common as common
import volatility.plugins.taskmods as taskmods
import volatility.utils as utils
import volatility.win32 as win32
import volatility.debug as debug
from volatility.renderers import TreeGrid
try:
import yara
HAS_YARA = True
except ImportError:
HAS_YARA = False
# Using a hex string rather than trying to deal with escape characters
# string = jsonid" : "chaperone_info"
YARA_JSON = {
'chap_config': 'rule chap_config { strings: $p = {6a736f6e696422203a20226368617065726f6e655f696e666f} condition: $p}',
}
# The opcode that loads the pointer into rax
#YARA_HMD = {
# 'hmd_pointer': 'rule hmd_pointer { strings: $p = {48 8b 05 15 f9 10 00} condition: $p}',
#}
YARA_HMD = {
'hmd_pointer': 'rule hmd_pointer { strings: $p = {48 8b 05 05 cf 10 00} condition: $p}',
}
# Maximum length of data to read in when looking for the Chaperone config file
max_size_of_file = 4096
# Tracked devices and the offsets to their data
hmd = ["HMD", 0x5C]
controller1 = ["Controller", 0x19C]
controller2 = ["Controller", 0x14C]
base_station1 = ["Base Station 1", 0xAC]
base_station2 = ["Base Station 2", 0xFC]
tracked_objects = [hmd , controller1, controller2, base_station1, base_station2]
def deref(address, a, length=4):
"""Derefernces the pointer in that memory location"""
return struct.unpack("<I", a.read(address, length))[0]
def hex_to_float(hex):
"""Converts a hex string to float"""
return struct.unpack('!f', str(hex).decode('hex'))[0]
def tracked_result_dict(enum):
"""Converts the enumerator code for EtrackingResult to string, see openVR.h for documentation"""
return {
1: 'TrackingResult_Uninitialized',
100: 'TrackingResult_Calibrating_InProgress',
101: 'TrackingResult_Calibrating_OutOfRange',
200: 'TrackingResult_Running_OK',
201: 'TrackingResult_Running_OutOfRange',
}[enum]
def parse_json(rdata):
""" Takes in the block of data where the yarascan matched,
Crops of the junk at the end and closes up the last bracket.
The config file always ends in the version number so search for that and cut it off
TODO Sloppy, should preserve version number
"""
end_file = rdata.rfind('version')
fixed_end = "{\n\"" + rdata[:end_file]
# Close out the rest of the JSON
end_file = fixed_end.rfind("]")
fixed_end = fixed_end[:end_file + 1] + "}"
# Load the memory as a JSON
parsed_json = json.loads(fixed_end, strict=False)
return parsed_json
def convert_to_matrix44(m):
return [
m[0][0], m[1][0], m[2][0], 0.0,
m[0][1], m[1][1], m[2][1], 0.0,
m[0][2], m[1][2], m[2][2], 0.0,
m[0][3], m[1][3], m[2][3], 1.0
]
class ViveDump(taskmods.DllList):
"""Extracts SteamVR information"""
meta_info = dict(
author='Peter Gromkowski',
contact='pgrom1@unh.newhaven.edu',
url='https://github.com/strat1892/vive-dump',
version='1.0',
)
visualizer = vis.Vis()
# TODO add option to scan for jsons
# add: specify number of devices
def __init__(self, config, *args, **kwargs):
taskmods.DllList.__init__(self, config, *args, **kwargs)
config.add_option("NUM-DEVICES", short_option='N', default=1,
help='Number of tracked devices to extract',
action='store', type=int)
config.add_option("FULL-SCAN", short_option='F', default=True,
help='Scan the entire dump',
action='store_false')
config.add_option("CHAP-FILE", short_option='C', default=False,
help='Provide Chaperone config file', type='str')
config.remove_option("OFFSET")
def build_obj(self, universe_number, parsed_json):
"""
Accepts a parsed json and converts the verticies into an obj file
Create a new object file for each file found.
There may be multiple universes per file
TODO Account for mulitple universes: Standing and Room scale
If there is both a standing and seating universe they will both be dumped into the same file
"""
filename = 'chaperone_visualization' + str(universe_number) + '.obj'
with open(filename, 'w+') as data_file:
wall_counter = 0
for room in parsed_json['universes']:
# Parse each wall
for wall in room['collision_bounds']:
# We can assume there will be four corners to a wall
for i in range(0, 4):
# Prefix to specify a vertex
data_file.write('v ')
# Points are represented as x (left/right), y (verticle), z (front/back)
coords = []
for j in range(0, 3):
data_file.write(str(wall[i][j]) + ' ')
coords.append(wall[i][j])
data_file.write('\n') # Add space to group walls
self.visualizer.add_vert(coords[0], coords[1], coords[2])
wall_counter += 1
data_file.write('\n') # Space to separate vertices from faces
for face in range(0, wall_counter):
# Prefix to represent the line specifies a face
data_file.write('f ')
self.visualizer.add_edge(4 * face, 4 * face + 1)
self.visualizer.add_edge(4 * face + 1, 4 * face + 2)
self.visualizer.add_edge(4 * face + 2, 4 * face + 3)
self.visualizer.add_edge(4 * face + 3, 4 * face)
# obj file format refers to the first vertex as 1
# We can assume that all faces can be represented as quads
for i in range(1, 5):
data_file.write(str(4 * face + i) + ' ')
data_file.write('\n')
def get_coords(self, pointer, process_space):
""" Pulls information from the HMD structgit
The x coordinate is offset 0x68 from the pointer
each following point is offset 0x10
TODO find the normal vector, hunch between coordinates
Controller coordinates start at 0xB8
"""
for i in range(self._config.NUM_DEVICES):
print(tracked_objects[i][0])
matrix = []
for row_offset in [0, 0x10, 0x20]:
row = []
for col_offset in [0, 4, 8, 12]:
x_hex = deref(pointer + tracked_objects[i][1] + row_offset + col_offset, process_space)
row.append(hex_to_float('%x' % x_hex))
print("{0}, {1}, {2}, {3}".format(row[0], row[1], row[2], row[3]))
matrix.append(row)
track_result = deref(pointer + tracked_objects[i][1] + 0x48, process_space)
bool_vis_connected = deref(pointer + tracked_objects[i][1] + 0x4c, process_space)
print("ETrackingResult: " + tracked_result_dict(track_result))
print("Bool values: {0}".format(hex(bool_vis_connected)))
# Isolate the byte containing each bool value
pose_is_valid = bool_vis_connected & 0x00000f00
device_is_connected = bool_vis_connected & 0x0000000f
print('bPoseIsValid: {0}'.format(bool(pose_is_valid)))
print('bDeviceIsConnected: {0}'.format(bool(device_is_connected)))
print("\n")
self.visualizer.set_device(convert_to_matrix44(matrix), tracked_objects[i][0])
return matrix
def calculate(self):
"""Required: Use Filescan to find Chaperone config file"""
if not HAS_YARA:
debug.error('Yara must be installed for this plugin')
# Load address space
addr_space = utils.load_as(self._config)
# Get list of processes
tasks = win32.tasks.pslist(addr_space)
universe_count = 0 # Append to dumpfile in case of multiple finds
# Read the Chaperone information from the provided file
if self._config.CHAP_FILE:
print("Loading Chaperone information from file")
file1 = open(self._config.CHAP_FILE, "r+")
json_from_file = json.load(file1)
self.build_obj(1, json_from_file)
for task in tasks:
if self._config.FULL_SCAN and str(task.ImageFileName) != "vrmonitor.exe":
continue
else:
print("Scanning {0} pid: {1}".format(task.ImageFileName, task.UniqueProcessId))
vad_offset = 0
for vad, process_space in task.get_vads():
vad_offset += vad.Length
# print " ".join(hex(ord(n)) for n in pointer)
if vad.Length > 8*1024*1024*1024:
continue
# read Vad content
data = process_space.zread(vad.Start, vad.Length)
if not self._config.CHAP_FILE:
# Complile yara signatures
rules = yara.compile(sources=YARA_JSON)
# match yara rules
matches = rules.match(data=data)
for match in matches:
universe_count += 1
match_offset = vad.Start + match.strings[0][0]
print("Found chaperone config file at {0}".format(hex(match_offset)))
# Read the region matching the yara scan
rdata = process_space.zread(match_offset, max_size_of_file)
parsed_json = parse_json(rdata)
self.build_obj(universe_count, parsed_json)
rules = yara.compile(sources=YARA_HMD)
matches = rules.match(data=data)
for match in matches:
# Find the opcode for that loads the pointer to RSI
match_offset = vad.Start + match.strings[0][0]
# Pointer is a DWORD so read and reconstruct
# deref_pointer_lower = deref(match_offset + 1112348, process_space)
# deref_pointer_upper = deref(match_offset + 1112352, process_space)
deref_pointer_lower = deref(match_offset + 0x10CF0C, process_space)
deref_pointer_upper = deref(match_offset + 0x10CF10, process_space)
pointer = 4294967296 * deref_pointer_upper + deref_pointer_lower
print("Pointer to HMD struct found: {0}".format(hex(pointer)))
matrix = self.get_coords(pointer, process_space)
yield matrix
def generator(self, data):
for task in data:
yield (0, [
int(task.UniqueProcessId),
str(task.CreateTime),
str(task.ImageFileName)
])
def render_text(self, outfd, data):
"""
This method formats output to the terminal.
:param outfd | <file>
data | <generator>
"""
for matrix in data:
outfd.write("{0}, {1}, {2}".format(matrix[0], matrix[1], matrix[2]))
self.visualizer.on_execute()
#def unified_output(self, data):
# return TreeGrid([
# ("Matrix 0", float ),
# ( "Matrix 2", float)
# ], self.generator(data))
# self.visualizer.on_execute()
| 38.26129
| 122
| 0.573729
|
a530e77e2f290066bac111e11b742816060808a6
| 2,021
|
py
|
Python
|
Python/Examples/Macros/JointsPlayback.py
|
halmusaibeli/RoboDK-API
|
e017aa26715bc8d0fcbbc05e57acc32f2d2d6174
|
[
"MIT"
] | null | null | null |
Python/Examples/Macros/JointsPlayback.py
|
halmusaibeli/RoboDK-API
|
e017aa26715bc8d0fcbbc05e57acc32f2d2d6174
|
[
"MIT"
] | null | null | null |
Python/Examples/Macros/JointsPlayback.py
|
halmusaibeli/RoboDK-API
|
e017aa26715bc8d0fcbbc05e57acc32f2d2d6174
|
[
"MIT"
] | null | null | null |
# This macro will move the robot along a list of joints in a CSV file
# Tip: Use the macro MonitorJoints.py to record a CSV file that can be loaded by this script
TIME_MATCH = False
MEASURE_COLLISIONS = False
from robodk.robolink import * # API to communicate with RoboDK
from robodk.robomath import * # basic matrix operations
from robodk.robofileio import *
from time import gmtime, strftime
RDK = Robolink()
# Turn off collision checking
RDK.setCollisionActive(COLLISION_OFF)
# Ask the user to select a robot arm (mechanisms are ignored)
robot = RDK.ItemUserPick('Select a robot', ITEM_TYPE_ROBOT_ARM)
if not robot.Valid():
raise Exception("Robot is not available")
# Generate a file in the same folder where we have the RDK file
folder_path = RDK.getParam('PATH_OPENSTATION')
# Ask the user to select a file
#filename = getOpenFile(path_preference=folder_path, strfile='', strtitle='Open CSV file ...', defaultextension='.csv', filetypes=[('CSV files', '*.csv'), ('All files', '*.*')])
filename = RDK.getParam('PATH_OPENSTATION') + '/joints-test.csv'
# Load the CSV file as a list of lists (each row is a list)
csvdata = LoadList(filename)
# Iterate through each row
total_collision_time = 0
max_collision_time = 0
count = 0
last_row = None
for row in csvdata:
joints = row
# Match timings
if TIME_MATCH and last_row is not None:
t_step = row[-1] - last_row[-1]
pause(t_step)
robot.setJoints(row)
lastrow = row
# Measure collision time
if MEASURE_COLLISIONS:
ncol = RDK.Collisions()
collision_time = int(RDK.Command("CollisionCalcTime"))
total_collision_time = total_collision_time + collision_time
count = count + 1
max_collision_time = max(max_collision_time, collision_time)
time_average = total_collision_time / count
msg = "Collision time (ms): Max: %i | Average: %.1f | Last: %i" % (max_collision_time, time_average, collision_time)
RDK.ShowMessage(msg, False)
print(msg)
| 33.131148
| 177
| 0.711529
|
6c23855d1ef8eaae55e089847be6f418a2eb327b
| 95
|
py
|
Python
|
pet_booking/appointment/apps.py
|
bilbeyt/pet_booking
|
be362790be6faecd5cebb38256327b9acf399152
|
[
"MIT"
] | null | null | null |
pet_booking/appointment/apps.py
|
bilbeyt/pet_booking
|
be362790be6faecd5cebb38256327b9acf399152
|
[
"MIT"
] | null | null | null |
pet_booking/appointment/apps.py
|
bilbeyt/pet_booking
|
be362790be6faecd5cebb38256327b9acf399152
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class AppoinmentConfig(AppConfig):
name = 'appoinment'
| 15.833333
| 34
| 0.768421
|
031817ba5116a9624713c415f7851d021da841ba
| 998
|
py
|
Python
|
CreateHeader.py
|
earney/WallaceSubmit
|
5a7ca3f03b4fb9a0d1e995489dd1259a386447cd
|
[
"MIT"
] | null | null | null |
CreateHeader.py
|
earney/WallaceSubmit
|
5a7ca3f03b4fb9a0d1e995489dd1259a386447cd
|
[
"MIT"
] | null | null | null |
CreateHeader.py
|
earney/WallaceSubmit
|
5a7ca3f03b4fb9a0d1e995489dd1259a386447cd
|
[
"MIT"
] | null | null | null |
import datetime
class CreateHeader:
def __init__(self, comment=u'//'):
self._comment=comment
self._name=''
self._submitDate=''
self._assignment=''
def setName(self, name):
self._name=name
def setSubmitDate(self, date=None):
self._submitDate=date
if date is None:
_d=datetime.datetime.now()
self._submitDate=_d.strftime('%Y-%m-%d %H:%M:%S')
def setAssignment(self, assignment):
self._assignment=assignment
def getHeader(self):
_str=self._comment + u'#'*60
_str+='\n'
_str+=self._comment + '\n'
_str+=self._comment + ' Assignment: %s\n' % self._assignment
_str+=self._comment + ' Name: %s\n' % self._name
_str+=self._comment + ' Submit Date: %s\n' % self._submitDate
_str+=self._comment + '\n'
_str+=self._comment + '#'*60
_str+='\n'
return _str
if __name__ == '__main__':
_ch=CreateHeader(comment=u'#')
_ch.setSubmitDate()
print(_ch.getHeader())
| 24.95
| 67
| 0.608216
|
7f6b742d8ac0babee6ace46b8daa47f1ac92651d
| 460
|
py
|
Python
|
microservices/frontend/main.py
|
dlorenc/skaffold-demo
|
839dc3addef5700a42641afc1755c00ca3430543
|
[
"Apache-2.0"
] | null | null | null |
microservices/frontend/main.py
|
dlorenc/skaffold-demo
|
839dc3addef5700a42641afc1755c00ca3430543
|
[
"Apache-2.0"
] | null | null | null |
microservices/frontend/main.py
|
dlorenc/skaffold-demo
|
839dc3addef5700a42641afc1755c00ca3430543
|
[
"Apache-2.0"
] | null | null | null |
import flask
import requests
application = flask.Flask(__name__)
@application.route('/encrypt')
def encrypt():
msg = flask.request.args.get("message")
response = requests.get(url="http://backend:8000", params={"message": msg})
return flask.render_template('encrypted.html', result=response.text)
@application.route('/')
def index():
return flask.render_template('index.html')
if __name__ == '__main__':
application.run(host='0.0.0.0')
| 24.210526
| 79
| 0.706522
|
63c81ea8cad7ce45b3fd79752d7a4315f063f7a8
| 1,121
|
py
|
Python
|
apps/tracker/models.py
|
dlzou/csua-backend
|
a2a6642017b81c2fe2bcc497ecc772e9b7dfe210
|
[
"MIT"
] | null | null | null |
apps/tracker/models.py
|
dlzou/csua-backend
|
a2a6642017b81c2fe2bcc497ecc772e9b7dfe210
|
[
"MIT"
] | null | null | null |
apps/tracker/models.py
|
dlzou/csua-backend
|
a2a6642017b81c2fe2bcc497ecc772e9b7dfe210
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
def seconds_to_time(seconds: int) -> str:
sign = "" if seconds >= 0 else "-"
mag = abs(seconds)
m, s = divmod(mag, 60)
h, m = divmod(m, 60)
return "%s%d:%02d:%02d" % (sign, h, m, s)
class User(models.Model):
username = models.CharField(
max_length=32, primary_key=True, unique=True, editable=False
)
last_ping = models.DateTimeField(auto_now=True, editable=True)
time_spent = models.IntegerField(default=0)
@property
def time(self):
return seconds_to_time(self.time_spent)
@property
def realname(self):
# TODO: make an LDAP query here and cache the result.
return self.username
class Computer(models.Model):
hostname = models.CharField(max_length=15, primary_key=True)
user = models.ForeignKey("User", on_delete=models.PROTECT, null=True)
local_timestamp = models.DateTimeField(auto_now=True)
@property
def open(self):
return self.user.time_spent >= 7200
@property
def time(self):
return seconds_to_time(self.user.time_spent)
| 26.069767
| 73
| 0.665477
|
83e9a63bee8f443d1f7d2e578592c5e71db14905
| 2,193
|
py
|
Python
|
src/oci/data_safe/models/discovery_job_collection.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/data_safe/models/discovery_job_collection.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/data_safe/models/discovery_job_collection.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class DiscoveryJobCollection(object):
"""
A collection of discovery job summary objects.
"""
def __init__(self, **kwargs):
"""
Initializes a new DiscoveryJobCollection object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param items:
The value to assign to the items property of this DiscoveryJobCollection.
:type items: list[oci.data_safe.models.DiscoveryJobSummary]
"""
self.swagger_types = {
'items': 'list[DiscoveryJobSummary]'
}
self.attribute_map = {
'items': 'items'
}
self._items = None
@property
def items(self):
"""
**[Required]** Gets the items of this DiscoveryJobCollection.
An array of discovery job summary objects.
:return: The items of this DiscoveryJobCollection.
:rtype: list[oci.data_safe.models.DiscoveryJobSummary]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this DiscoveryJobCollection.
An array of discovery job summary objects.
:param items: The items of this DiscoveryJobCollection.
:type: list[oci.data_safe.models.DiscoveryJobSummary]
"""
self._items = items
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 30.887324
| 245
| 0.664387
|
4d8287db44e6a29deea4f5c5131658055b3ae21d
| 1,359
|
py
|
Python
|
utils/data_generator/sessions_users_generator.py
|
aldoorozco/adaptive_data_pipelines
|
afdced109d5e76c95d68fa6935c2864ac926986f
|
[
"MIT"
] | 1
|
2021-04-12T06:51:21.000Z
|
2021-04-12T06:51:21.000Z
|
utils/data_generator/sessions_users_generator.py
|
aldoorozco/adaptive_data_pipelines
|
afdced109d5e76c95d68fa6935c2864ac926986f
|
[
"MIT"
] | null | null | null |
utils/data_generator/sessions_users_generator.py
|
aldoorozco/adaptive_data_pipelines
|
afdced109d5e76c95d68fa6935c2864ac926986f
|
[
"MIT"
] | null | null | null |
import json
from utils import *
from collections import Counter
def get_zipped_data(records, users):
session_ids = (n for n in range(records))
user_ids = get_normal_dist(users, users/2, records)
#cntr = Counter(sorted(user_ids))
#srted = {k: v for k, v in sorted(cntr.items(), key=lambda item: item[1])}
return zip(session_ids, user_ids)
def write_json(zipped, records, file_index):
file_name = f'sessions_users/sessions_users_{file_index:03}.json'
print(f'Creating file {file_name} with {records} records')
try:
with open(file_name, 'w+') as f:
data = get_n_records_iter(zipped, records)
written = 0
for session_id, user_id in data:
record = {'session_id': int(session_id), 'user_id': int(user_id)}
f.write(json.dumps(record) + '\n')
written += 1
if written != records:
print(f'ERROR: discrepancy between written {written} and expected {records}')
except IOError:
print('ERROR: Unable to write to file')
def main():
records, files, users = get_records_files(3)
records_per_file = int(records / files)
zipped = get_zipped_data(records, users)
for file_index in range(files):
write_json(zipped, records_per_file, file_index)
if __name__ == '__main__':
main()
| 32.357143
| 93
| 0.642384
|
5dece45b351d0b531e8907dd524d69228a3f6a9d
| 17,151
|
py
|
Python
|
swipc-gen/hashes_gen.py
|
inspier/switch-reversing
|
577f363bfdef05c75708c9af9d1e690197fd9233
|
[
"MIT"
] | 61
|
2019-02-28T11:38:58.000Z
|
2021-11-06T23:57:55.000Z
|
swipc-gen/hashes_gen.py
|
inspier/switch-reversing
|
577f363bfdef05c75708c9af9d1e690197fd9233
|
[
"MIT"
] | 1
|
2021-01-04T23:26:09.000Z
|
2021-01-04T23:26:09.000Z
|
swipc-gen/hashes_gen.py
|
inspier/switch-reversing
|
577f363bfdef05c75708c9af9d1e690197fd9233
|
[
"MIT"
] | 6
|
2019-03-13T17:03:10.000Z
|
2022-01-15T02:03:31.000Z
|
all_hashes = {'9dbc3be4561e89d1': ['nn::ncm::IContentManager'], 'ef572d053fbe0dd0': ['nn::spl::detail::IFsInterface'], '2171a2a3f43a01e2': ['nn::account::nas::IOAuthProcedureForUserRegistration'], '6072a19a5a95b12b': ['nn::bcat::detail::ipc::IDeliveryCacheProgressService'], '6a677c7d9d17ff0a': ['nn::pl::detail::ISharedFontManager'], 'a207109123aa9ee6': ['nn::am::service::IHomeMenuFunctions'], 'aa0c2c207c1ad820': ['nn::nfp::detail::ISystem'], '9a735d669435e452': ['nn::npns::INpnsUser'], '54eb1a88a4c29c49': ['nn::fssrv::sf::IFileSystemProxy'], '3eced0a96862ad10': ['nn::audio::detail::IFinalOutputRecorderManager'], '736d36f862942a6f': ['nn::account::profile::IProfile'], '9598cbef138ebfbf': ['nn::avm::srv::IAvmService'], 'b001441e42b94817': ['nn::clkrst::IClkrstManager'], 'dde489cb9efa1a59': ['nn::ssl::sf::ISslContext'], '7e7ca6d0175ba7bf': ['nn::npns::INpnsSystem'], '3066e14681acfab6': ['nn::ldn::detail::ISystemLocalCommunicationService'], '0fe24d77cf284316': ['nn::account::nas::IOAuthProcedureForExternalNsa'], '84f7f20db4ecb151': ['nn::nifm::detail::IRequest'], 'b8ca6e5433e79cb2': ['nn::timesrv::detail::service::IStaticService'], 'b3bb4bde9ecb224d': ['nn::arp::detail::IWriter'], '705b8aca306f6ff2': ['nn::ns::detail::IApplicationManagerInterface'], '5a6508384258ea2c': ['nns::hosbinder::IHOSBinderDriver'], 'b2da156762209730': ['nn::bcat::detail::ipc::IDeliveryCacheStorageService'], '72c4bcf58cf47efe': ['nn::fssrv::sf::ISaveDataDivisionExporter'], '1f088477c9a99fc8': ['nn::ns::detail::IFactoryResetInterface'], 'd14a028c2a3a2bc9': ['nn::sasbus::IManager', 'nn::fssrv::sf::ISaveDataTransferProhibiter', 'nn::ns::detail::IGameCardStopper', 'nn::ns::detail::IRequestServerStopper', 'nn::olsc::srv::IStopperObject', 'nn::friends::detail::ipc::IDaemonSuspendSessionService', 'nn::am::service::IWindow'], 'f97608292cb9e45a': ['nn::pctl::detail::ipc::IParentalControlServiceFactory'], 'ad95cda1f17d02d4': ['nn::gpio::IPadSession'], 'e3bfdfd72ff62112': ['nn::am::service::IMovieMaker'], '973f73b5a4c23f12': ['nn::account::baas::IManagerForSystemService'], '366ccafd9a88612d': ['nn::ovln::IReceiver'], 'd370e691cf84b160': ['nn::fssrv::sf::ISaveDataChunkExporter'], '47baca73a040292e': ['nn::ns::detail::IApplicationVersionInterface'], '074dc39d711d350c': ['nn::audio::detail::IAudioRendererManagerForApplet'], '52d58b4987c8e6a2': ['nn::nim::detail::IShopServiceManager'], '01526d24fab313d1': ['nn::am::service::IWindowController'], 'c7f6882dbbe76192': ['nn::am::service::ILibraryAppletAccessor'], '6c80c86b931a5598': ['nn::fgm::sf::IRequest'], '4653755c14f961f7': ['nn::erpt::sf::IReport', 'nn::erpt::sf::IAttachment'], 'b74095cc54d5fac2': ['nn::grcsrv::IGrcService'], 'a202525b1a8a98a9': ['nn::ldn::detail::ISystemLocalCommunicationService'], '98c3a246e850be98': ['nn::bgtc::IStateControlService'], '3675d0c8d0312f92': ['nn::capsrv::sf::IScreenShotService'], '86f2c1d0ef9ce65a': ['nn::settings::ISettingsServer'], '2e4ff497ce2ed08e': ['nn::am::service::ICommonStateGetter'], '1af536efc15cfa46': ['nn::am::service::IAudioController'], '7bad1b4f5bcfd278': ['nn::nifm::detail::IGeneralService'], '69547c20efe089af': ['nn::bluetooth::IBluetoothDriver'], 'a09e4f8e00d4e789': ['nn::i2c::ISession'], '09e11edc25c5fddc': ['nn::bcat::detail::ipc::IServiceCreator'], '7de5842c233a5940': ['nn::apm::ISystemManager'], 'd07c250ec43981d7': ['nn::fssrv::sf::ISaveDataExporter'], 'dd2e74efbec1b336': ['nn::hid::IHidServer'], 'a10e408d2022ffae': ['nn::olsc::srv::IOlscServiceForSystemService'], '562a2fb12839614a': ['nn::i2c::IManager'], '2f929cedb08800d2': ['nn::visrv::sf::IManagerRootService'], '3c3c0d8e93ea8fbd': ['nn::audio::detail::IAudioInManagerForDebugger', 'nn::audio::detail::IAudioOutManagerForDebugger', 'nn::audio::detail::IAudioRendererManagerForDebugger'], '9e41fd46d4b7a93f': ['nn::nfc::am::detail::IAmManager', 'nn::btm::IBtmSystem', 'nn::ldn::detail::ISystemServiceCreator', 'nn::fgm::sf::ISession', 'nn::ldn::detail::IMonitorServiceCreator', 'nn::nfc::detail::ISystemManager', 'nn::nfp::detail::ISystemManager', 'nn::ovln::IReceiverService'], '6f9db4402cc791f7': ['nn::capsrv::sf::IAlbumAccessorService'], 'e39cfdb37d61fa5d': ['nn::lm::ILogService', 'nn::am::service::IApplicationProxyService'], '3cf13869c273d105': ['nn::am::service::ISelfController'], '2f555efa2f8d506a': ['nn::hidbus::IHidbusSystemServer'], 'b70ca143ff309821': ['nn::ns::detail::IAsyncValue', 'nn::nim::detail::IAsyncValue'], 'd9d1a213c68756ea': ['nn::migration::savedata::IServer'], '44eafd8c5082b661': ['nn::nsd::detail::IManager'], 'cee0689a9a0b11ba': ['nn::audioctrl::detail::IAudioController'], 'f3824a893af85c91': ['nn::spl::detail::ISslInterface'], '09e103475160adc4': ['nn::bcat::detail::ipc::IDeliveryCacheFileService'], '8fa19d2fc41ca255': ['nn::pwm::IManager'], '942c96c0df18d3fd': ['nn::irsensor::IIrSensorSystemServer'], 'd297f7f4c55f2a99': ['nn::ns::detail::IDevelopInterface'], 'c2a342bd87e77823': ['nn::ntc::detail::service::IStaticService'], '76e2b39f1ee08dad': ['nn::fssrv::sf::ISaveDataTransferManagerWithDivision'], 'b947f9dbc37d5322': ['nn::grcsrv::IGameMovieTrimmer'], 'fb356508a0535b77': ['nn::am::service::ILibraryAppletSelfAccessor'], '311160d735d1918d': ['nn::ns::detail::IServiceGetterInterface'], 'e40baf330c502c08': ['nn::capsrv::sf::IAlbumControlSession'], '4c68e46daece7a9a': ['nn::olsc::srv::ITransferTaskListController'], '5eab6ed6bcdcc324': ['nn::sm::detail::IUserInterface'], '61346617995cb585': ['nn::fatalsrv::IService'], '66070958073744f1': ['nn::fssrv::sf::IFileSystemProxy'], 'f53b458f6e874c1b': ['nn::audio::detail::IAudioOutManager'], 'ffb1ed1d3467e2a7': ['nn::am::service::IGlobalStateController'], '247e674c7d7c9942': ['nn::ns::detail::IDynamicRightsInterface'], '443a5456dfc61051': ['nn::pctl::detail::ipc::IParentalControlService'], 'a8ee81247c3a9384': ['nn::am::service::IProcessWindingController'], '78ba94f758d51102': ['nn::prepo::detail::ipc::IPrepoService'], 'a86c6f7cb5a6d7e7': ['nn::spl::detail::IGeneralInterface'], 'ea5e8ecd4fa6e4bd': ['nn::settings::IFactorySettingsServer'], 'ed64283144cce438': ['nn::timesrv::detail::service::ISystemClock'], 'de50b78b3b860d4f': ['nn::ns::detail::IReadOnlyApplicationRecordInterface'], 'fb9dd96747bea4c4': ['nn::account::baas::IAsyncContextForLoginForOnlinePlay', 'nn::migration::detail::IAsyncSaveDataMigrationPolicyInfoContext'], '16ff6a82ff0ff103': ['nn::hid::IHidSystemServer'], 'f5519e62e121008f': ['nn::am::service::IDisplayController'], '0743a7edc095b7e0': ['nn::lm::ILogger'], '79a058bc9c6317dc': ['nn::socket::resolver::IResolver'], 'f217ca661a52836a': ['nn::account::baas::IGuestLoginRequest'], '49027abdb751d433': ['nn::spl::detail::ICryptoInterface'], 'e8774ee6ae68f5b0': ['nn::ssl::sf::ISslService'], 'b8df11e25c5458ae': ['nn::ovln::ISenderService'], 'abeacc4a646ad7ca': ['nn::account::IAccountServiceForAdministrator'], '07e8757f9fc2697c': ['nn::fssrv::sf::ISaveDataDivisionImporter'], '8ae9f33a76d9d281': ['nn::fssrv::sf::IFileSystemProxyForLoader'], 'aa880cb1496e9ded': ['nn::account::nas::IAuthorizationRequest'], 'e11bbfff93a9ecbe': ['nn::am::service::IApplicationCreator'], '485b4d67666e8e4a': ['nn::audio::detail::IAudioOut'], 'faa980e4d4917414': ['nn::gpio::IManager'], '2d35498adea83db4': ['nn::sasbus::ISession'], '9bc89aed8f15cc88': ['nn::news::detail::ipc::INewsDataService'], 'd2b84173c21b2361': ['nn::fssrv::sf::IStorage'], 'a5441bae3cdffc00': ['nn::btm::IBtm'], 'f405e7cc8d9d8020': ['nn::mii::detail::IImageDatabaseService'], 'f0a18bbdf50f8eb8': ['nn::am::service::ISelfController'], '0212a598d1e8125d': ['nn::dauth::detail::IService'], '4c9bf3e216c217f4': ['nn::ncm::IContentStorage'], 'be53d4498f2ab253': ['nns::hosbinder::IHOSBinderDriver'], '899bac0f033326fa': ['nn::audio::detail::IAudioRendererManager'], '5f9a68bcde03c8ea': ['nn::fssrv::sf::IProgramRegistry'], 'ca01926746ce7096': ['nn::olsc::srv::IDaemonController'], 'cefef7d38d0df976': ['nn::migration::user::IService'], '42297b8bafbeadd2': ['nn::grcsrv::IMovieMaker'], '96a5e651c106c4cd': ['nn::apm::ISession'], '70240b01a9865c3e': ['nn::visrv::sf::IManagerDisplayService'], '13b0c7617249b16f': ['nn::audio::detail::IAudioOutManagerForApplet'], 'c238fb7bfeb26bba': ['nn::psm::IPsmServer'], '2a1ed120563f1553': ['nn::socket::sf::IClient'], '2bdab1c969211ee3': ['nn::ro::detail::IRoInterface'], '898b516ade69b3f0': ['nn::hid::IHidSystemServer'], '7e1c9c0fe4f2a35d': ['nn::settings::ISystemSettingsServer'], '3626e2bd982b86f7': ['nn::capsrv::sf::IAlbumControlService'], '5df308fef6e890fb': ['nn::fssrv::sf::IDirectory'], '1e0c42da4a8e5c5f': ['nn::fssrv::sf::IFile'], 'fda693c0c9a8af50': ['nn::visrv::sf::IManagerDisplayService'], '1b449f8c623d7144': ['nn::am::service::IDebugFunctions'], 'a5dcc30cc04403e6': ['nn::pwm::IChannelSession'], 'b38c9250276e3f12': ['nn::fssrv::sf::IFileSystemProxy'], '655f9549829d5a6a': ['nn::friends::detail::ipc::INotificationService'], '8ff813fe4df151bf': ['nn::nifm::detail::IGeneralService'], '7e29f4a30d982df9': ['nn::fssrv::sf::ISaveDataTransferManager'], '34236f7ff3e2e0e5': ['nv::gemcontrol::INvGemControl'], '5f370d9245ddcccb': ['nn::am::service::ICommonStateGetter'], 'eec92b1678d09c46': ['nn::news::detail::ipc::IServiceCreator'], '92f51b2ce1fc1fe9': ['nn::capsrv::sf::IAlbumAccessorSession'], '20114ce0f01eba55': ['nn::fssrv::sf::IDeviceOperator'], '4875e024ec9e0522': ['nn::nfc::detail::ISystem'], '0498e1c15d5f1b60': ['nn::nifm::detail::INetworkProfile'], '80df875455597a17': ['nn::am::service::IOverlayFunctions'], '362fb4c55ec8b00a': ['nn::fssrv::sf::IFileSystemProxy'], '6a3e3f99e719d99e': ['nn::am::service::IApplicationAccessor'], 'aabc6a86dcefad36': ['nn::settings::ISystemSettingsServer'], 'e8228d2b01a6f6df': ['nn::am::service::IAppletAccessor'], 'f183cedfd4bc9151': ['nn::visrv::sf::IApplicationDisplayService'], 'bf797334c54581d2': ['nn::erpt::sf::IManager'], '6082c9fcf9ae88ce': ['nn::am::service::IAppletCommonFunctions'], '26ad622dd70169b7': ['nn::account::baas::IFloatingRegistrationRequest'], 'd5a760ae52f58e6c': ['nn::fssrv::sf::ISaveDataChunkImporter'], '07f921ea3bb51294': ['nn::fssrv::sf::IDeviceOperator'], '4404b42704f07bfe': ['nn::am::service::IApplicationAccessor'], '84bfddaa69efd59f': ['nn::account::IBaasAccessTokenAccessor'], '998451bfbac45b6f': ['nn::ns::detail::IDownloadTaskInterface'], 'b175ab3039854b8d': ['nn::am::service::ISystemAppletProxy'], 'b3837c70870c5181': ['nn::fssrv::sf::IEventNotifier', 'nn::account::detail::INotifier', 'nn::fatalsrv::IPrivateService', 'nn::olsc::srv::INativeHandleHolder', 'nn::hid::IAppletResource', 'nn::bcat::detail::ipc::INotifierService', 'nn::bcat::detail::ipc::IDeliveryTaskSuspensionService', 'nn::news::detail::ipc::INewlyArrivedEventHolder', 'nn::news::detail::ipc::IOverwriteEventHolder'], '7368439e01b81da1': ['nn::friends::detail::ipc::IServiceCreator'], 'b9bd0d1b4fbe9950': ['nn::sm::detail::IManagerInterface'], '41a5b015fd7b2974': ['nn::news::detail::ipc::INewsDatabaseService'], '18ca865f9fadfb40': ['nn::ns::detail::IProgressMonitorForDeleteUserSaveDataAll'], '28032a2a86978fee': ['nn::regulator::IRegulatorManager'], '8b3d2a6cd1c5a1ca': ['nn::am::service::ILibraryAppletSelfAccessor'], 'e6bb8599ea001b76': ['nn::olsc::srv::IRemoteStorageController'], '4418757e9e8d2871': ['nn::gpio::IManager'], 'b7dbe08acbd75365': ['nn::friends::detail::ipc::IFriendService'], '62f3d257ca284243': ['nn::uart::IPortSession'], '80364747ff4a018f': ['nn::lbl::detail::ILblController'], 'b4925f1abba2be6f': ['nn::ns::detail::ISystemUpdateControl'], '1b7066fc2a402917': ['nn::migration::user::IClient'], '1e1b4340508e40b2': ['nn::ns::detail::IAsyncResult', 'nn::nim::detail::IAsyncResult'], '03f0ea6f9335575c': ['nn::fssrv::sf::ISaveDataInfoReader', 'nn::spl::detail::IRandomInterface'], '5652230574111fa7': ['nn::am::service::ILibraryAppletProxy', 'nn::am::service::IOverlayAppletProxy'], '75f43c56308cd7b6': ['nn::nifm::detail::IScanRequest'], '9392f1ee90235c69': ['nn::uart::IManager'], 'ef36fa28640afe99': ['nn::eupld::sf::IRequest'], '438792474de8dba6': ['nn::fssrv::sf::ISaveDataChunkIterator'], 'a0be9886cc1eaa59': ['nv::gemcoredump::INvGemCoreDump'], 'b9edb1600b89ceac': ['nn::ssl::sf::ISslConnection'], '26e44709faf92ed0': ['nn::am::service::ILibraryAppletCreator'], 'c77bcca6437b27a6': ['nn::timesrv::detail::service::ISteadyClock'], '7aea8734a0f840ff': ['nn::bcat::detail::ipc::IDeliveryCacheDirectoryService'], '5b6e9df3d3b58471': ['nn::fssrv::sf::IFileSystem'], 'd74fdf70159290ac': ['nn::srepo::detail::ipc::ISrepoService'], '812f005802818484': ['nn::ns::detail::IProgressAsyncResult'], 'b3913dcf1653eb90': ['nn::audio::detail::IAudioRenderer'], '678e5715bbf73012': ['nn::account::nas::IOAuthProcedureForNintendoAccountLinkage'], '391522848c1f6043': ['nn::fssrv::sf::IFileSystemProxy'], '17749092dc6fb853': ['nn::clkrst::IClkrstSession'], '1500deb9a7b0c914': ['nn::am::service::IApplicationProxy'], '388802cf41c41bce': ['nn::account::baas::IAdministrator'], 'e78fd383329c1e63': ['nn::ns::detail::IDocumentInterface'], '5216949a0ceebb5f': ['nn::fssrv::sf::IMultiCommitManager'], 'fbbbc6514394774e': ['nn::socket::sf::IClient'], '72cd1a5e3a3c7f4d': ['nn::account::IAccountServiceForSystemService'], 'a70b98ccc3299f80': ['nn::visrv::sf::IApplicationRootService', 'nn::sasbus::IManager', 'nn::fan::detail::IManager', 'nn::mii::detail::IStaticService'], 'feafd8f56bfb6c79': ['nn::am::service::IStorageChannel'], '9b1f77d36f593b6a': ['nn::regulator::IRegulatorSession'], 'c6fa95d83771352c': ['nn::mii::detail::IDatabaseService'], 'ff182bb207d2b5a5': ['nn::erpt::sf::IContext'], 'b47f55ada5e6b225': ['nn::eupld::sf::IControl'], '3b5390a64705a065': ['nns::nvdrv::INvDrvServices'], '38596578645fafba': ['nn::dauth::detail::IAsyncResult', 'nn::account::detail::IAsyncContext', 'nn::olsc::srv::IAsyncResult', 'nn::migration::detail::IAsyncContext', 'nn::news::detail::ipc::IDownloadContext'], 'bb4a6a9db65360e5': ['nn::am::service::ILockAccessor'], '6826a1702a669c81': ['nn::bcat::detail::ipc::IBcatService'], '122ed7b9d8b764a4': ['nn::ovln::ISender'], '78ecb0e1ac578c6c': ['nn::visrv::sf::ISystemDisplayService'], 'cf1de82e5d258edb': ['nn::audio::detail::IAudioDevice'], 'be0c9d3c0945d8e8': ['nn::erpt::sf::ISession'], 'f474a636d785a723': ['nn::grcsrv::IContinuousRecorder'], 'cd5711d486a56f56': ['nn::ns::detail::ISystemUpdateInterface'], '011d3a57dac18e69': ['nn::npns::INotificationReceiver'], '60b2072cf75e98e9': ['nn::fssrv::sf::ISaveDataImporter'], '45ac41ac041971b7': ['nn::ns::detail::IVulnerabilityManagerInterface'], 'd7a91bbf777d68e6': ['nn::news::detail::ipc::INewsService'], 'd5276e9a369a02d4': ['nn::bgtc::ITaskService'], '5cc15656aa779039': ['nn::am::service::IStorageAccessor'], 'b8f2b4fed4f9dd19': ['nn::am::service::IAppletCommonFunctions'], 'f9cae162fd25a500': ['nn::account::detail::ISessionObject'], 'e95f80443466e96f': ['nn::ns::detail::IECommerceInterface'], '78cfe7a902912164': ['nn::mmnv::IRequest'], '4958f97e2c248677': ['nn::timesrv::detail::service::ITimeZoneService'], '5fe31c4ba5458e86': ['nn::nifm::detail::IStaticService'], '9ff0846fa9d9bfbe': ['nn::am::service::IApplicationFunctions'], '51c707b04050f17d': ['nn::hid::IActiveVibrationDeviceList', 'nn::pcv::IArbitrationManager'], '1c3ea2497ad14abd': ['nn::am::service::ITransferStorageAccessor'], '11611b1034a052c8': ['nn::fan::detail::IController'], 'a1eb9b617884b06e': ['nn::account::profile::IProfileEditor'], '79f9bddeea176bff': ['nn::ncm::IContentStorage'], '1e6aaa6e9c1d263c': ['nn::hid::IHidDebugServer'], 'ed990669d25d5086': ['nn::olsc::srv::IForbiddenSaveDataIndication'], 'c88c3118b02ec25b': ['nn::pl::detail::IPlatformServiceManager'], 'a0e6c9339df87581': ['nn::nfc::am::detail::IAm'], 'de74463cee165833': ['nn::migration::user::IServer'], '34c74f65e1610cf7': ['nn::pcv::detail::IPcvService'], '4fac7e300a84232b': ['nn::capsrv::sf::IScreenShotControlService'], '8e9938e174daf0bd': ['nn::ncm::IContentMetaDatabase'], '61a91fdb411efe4a': ['nn::nim::detail::IAsyncProgressResult'], 'e89b11adefffbfd4': ['nn::avm::srv::IVersionListImporter'], 'c3ebc917c3fbe907': ['nn::tc::IManager'], '771fa4a8fb46317f': ['nn::account::baas::IManagerForApplication'], 'e870d6c9931ac281': ['nn::btm::IBtmSystemCore'], '545040c28142bc5b': ['nn::psm::IPsmSession'], '75bd73bde1f6c923': ['nv::MemoryProfiler::IMemoryProfiler'], 'f07ce1118432e8ee': ['nn::pcv::detail::IPcvService'], '0370161c5a92ca25': ['nn::ldn::detail::IMonitorService'], '26f4c471b9b5b453': ['nn::account::http::IOAuthProcedure'], '97cac48103b20231': ['nn::account::IAccountServiceForApplication'], '927a357138944f57': ['nn::ntc::detail::service::IEnsureNetworkClockAvailabilityService'], '78e1342e6626dfa1': ['nn::migration::savedata::IClient'], 'a2eeab79ddf3035a': ['nn::grcsrv::IOffscreenRecorder'], 'be3851481ba76f64': ['nn::ns::detail::IAccountProxyInterface'], '02c66976480a90b3': ['nn::visrv::sf::ISystemRootService'], '8ca323a7f3a293c5': ['nn::audio::detail::IAudioInManagerForApplet'], 'cfef8b4ffcd68bd4': ['nn::arp::detail::IReader'], '5f3424303dd51d50': ['nn::ns::detail::IReadOnlyApplicationControlDataInterface'], '99a880037575580a': ['nn::hid::IHidDebugServer'], '4eeb8c6cb84b38c4': ['nn::apm::IManager'], '3de251fa2ea20236': ['nn::ns::detail::IContentManagementInterface'], 'f9b56db27c746e3d': ['nn::arp::detail::IRegistrar']}
| 8,575.5
| 17,150
| 0.735292
|
f12f409f0d032166f0e0e1d05e4c99bf29bbd17e
| 2,022
|
py
|
Python
|
prsa/__init__.py
|
ridwanmsharif/rsa
|
0feb3ede639246314b5b8d5c4c2092a96c1918d5
|
[
"MIT"
] | 24
|
2017-01-01T16:32:46.000Z
|
2019-03-14T17:42:57.000Z
|
prsa/__init__.py
|
ridwanmsharif/rsa
|
0feb3ede639246314b5b8d5c4c2092a96c1918d5
|
[
"MIT"
] | null | null | null |
prsa/__init__.py
|
ridwanmsharif/rsa
|
0feb3ede639246314b5b8d5c4c2092a96c1918d5
|
[
"MIT"
] | 3
|
2017-01-02T08:30:31.000Z
|
2021-04-22T17:41:34.000Z
|
#!/usr/bin/env python
from rsa import *
from primesieve import *
import argparse
parser = argparse.ArgumentParser(prog='PRSA',
description='''Implementation of the algorithms
defined by the RSA scheme in Python3. Computes ciphertext from message and
Computes message from ciphertext granted access to secret decryption key by
using Extended Euclidean Algorithm, Unicode Points, Modular Arithmetic and
Modular Exponentiation''', add_help=True)
subparsers = parser.add_subparsers()
decrypt_parser = subparsers.add_parser('decrypt', help='Decrypt ciphertext using RSA')
decrypt_parser.add_argument('ctext', metavar='C', type=int,
help='Encoded ciphertext to be decrypted')
decrypt_parser.add_argument('privatekey', metavar='d', type=int,
help='A natural number that serves as the first element of the private key')
decrypt_parser.add_argument('mod', metavar='n', type=int,
help='A natural number that serves as the second element of the private key')
encrypt_parser = subparsers.add_parser('encrypt', help='Encrypt message using RSA')
encrypt_parser.add_argument('text', metavar='M', type=str,
help='Message to be encrypted')
encrypt_parser.add_argument('publickey', metavar='e', type=int,
help='A natural number that serves as the first element of the public key')
encrypt_parser.add_argument('mod', metavar='n', type=int,
help='A natural number that serves as the second element of the private key')
generate_parser = subparsers.add_parser('generate',
help='Find a key pair given the two natural numbers')
generate_parser.add_argument('np', metavar='p', type=int,
help='An integer for setting a prime (p)')
generate_parser.add_argument('nq', metavar='q', type=int,
help='An integer for setting a prime (q)')
args = parser.parse_args()
if hasattr(args, 'ctext'):
print(rsa_decrypt(args.ctext, args.privatekey, args.mod))
elif hasattr(args, 'publickey'):
print(rsa_encrypt(args.text, args.publickey, args.mod))
else:
print(generate_key_pair(args.np, args.nq))
| 42.125
| 86
| 0.756677
|
2326cf671541dde7b154434dc10621cdb070f847
| 1,998
|
py
|
Python
|
pages/goods_page.py
|
pavel-wh/autotesting_course
|
7d6a395fe000aa796f3ed1eeaa4387caa7dd2ecb
|
[
"MIT"
] | 1
|
2019-11-17T02:29:25.000Z
|
2019-11-17T02:29:25.000Z
|
pages/goods_page.py
|
pavel-wh/autotesting_course
|
7d6a395fe000aa796f3ed1eeaa4387caa7dd2ecb
|
[
"MIT"
] | 1
|
2021-06-01T23:53:26.000Z
|
2021-06-01T23:53:26.000Z
|
pages/goods_page.py
|
pavel-wh/autotesting_course
|
7d6a395fe000aa796f3ed1eeaa4387caa7dd2ecb
|
[
"MIT"
] | null | null | null |
import time
from .base_page import BasePage
from selenium.webdriver.common.by import By
from .locators import GoodsPageLocators
from selenium.common.exceptions import NoAlertPresentException
import math
class GoodsPage(BasePage):
def solve_quiz_and_get_code(self):
"""метод для подсчёта результата математического выражения и вывода ответа.
"""
alert = self.browser.switch_to.alert
x = alert.text.split(" ")[2]
answer = str(math.log(abs((12 * math.sin(float(x))))))
alert.send_keys(answer)
alert.accept()
try:
alert = self.browser.switch_to.alert
print(f"Your code: {alert.text}")
alert.accept()
except NoAlertPresentException:
print("No second alert presented")
def click_to_add_basket_button(self):
basket_button = self.browser.find_element(
*GoodsPageLocators.ADD_BASKET_BUTTON)
basket_button.click()
def should_be_message_about_goods_name(self):
name_goods = self.browser.find_element(
*GoodsPageLocators.NAME_GOODS)
message_name_goods = self.browser.find_element(
*GoodsPageLocators.MESSAGE_NAME_GOODS)
assert name_goods.text == message_name_goods.text, "Goods name not match"
def should_be_message_about_goods_price(self):
price_goods = self.browser.find_element(
*GoodsPageLocators.PRICE_GOODS)
message_price_goods = self.browser.find_element(
*GoodsPageLocators.MESSAGE_PRICE_GOODS)
assert price_goods.text == message_price_goods.text, "Goods price not match"
def should_not_be_success_message(self):
assert self.is_not_element_present(*GoodsPageLocators.SUCCESS_MESSAGE), \
"Success message is presented, but should not be"
def should_be_success_message_disappear(self):
assert self.is_disappeared(*GoodsPageLocators.SUCCESS_MESSAGE), \
"Success message is not disappeared"
| 39.176471
| 84
| 0.697197
|
079bdfd0a05d20676b2a4db7d5d3d7906726edf2
| 69,612
|
py
|
Python
|
salt/loader.py
|
TheLocehiliosan/salt
|
57674c3a734df42393b1bb552701f3aebb9d14ea
|
[
"Apache-2.0"
] | null | null | null |
salt/loader.py
|
TheLocehiliosan/salt
|
57674c3a734df42393b1bb552701f3aebb9d14ea
|
[
"Apache-2.0"
] | null | null | null |
salt/loader.py
|
TheLocehiliosan/salt
|
57674c3a734df42393b1bb552701f3aebb9d14ea
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
The Salt loader is the core to Salt's plugin system, the loader scans
directories for python loadable code and organizes the code into the
plugin interfaces used by Salt.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import sys
import time
import logging
import inspect
import tempfile
import functools
import threading
import traceback
import types
from zipimport import zipimporter
# Import salt libs
import salt.config
import salt.defaults.exitcodes
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.event
import salt.utils.files
import salt.utils.lazy
import salt.utils.odict
import salt.utils.platform
import salt.utils.versions
import salt.utils.stringutils
from salt.exceptions import LoaderError
from salt.template import check_render_pipe_str
from salt.utils.decorators import Depends
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import reload_module
if sys.version_info[:2] >= (3, 5):
import importlib.machinery # pylint: disable=no-name-in-module,import-error
import importlib.util # pylint: disable=no-name-in-module,import-error
USE_IMPORTLIB = True
else:
import imp
USE_IMPORTLIB = False
try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
try:
import pkg_resources
HAS_PKG_RESOURCES = True
except ImportError:
HAS_PKG_RESOURCES = False
log = logging.getLogger(__name__)
SALT_BASE_PATH = os.path.abspath(salt.syspaths.INSTALL_DIR)
LOADED_BASE_NAME = 'salt.loaded'
if USE_IMPORTLIB:
# pylint: disable=no-member
MODULE_KIND_SOURCE = 1
MODULE_KIND_COMPILED = 2
MODULE_KIND_EXTENSION = 3
MODULE_KIND_PKG_DIRECTORY = 5
SUFFIXES = []
for suffix in importlib.machinery.EXTENSION_SUFFIXES:
SUFFIXES.append((suffix, 'rb', MODULE_KIND_EXTENSION))
for suffix in importlib.machinery.SOURCE_SUFFIXES:
SUFFIXES.append((suffix, 'rb', MODULE_KIND_SOURCE))
for suffix in importlib.machinery.BYTECODE_SUFFIXES:
SUFFIXES.append((suffix, 'rb', MODULE_KIND_COMPILED))
MODULE_KIND_MAP = {
MODULE_KIND_SOURCE: importlib.machinery.SourceFileLoader,
MODULE_KIND_COMPILED: importlib.machinery.SourcelessFileLoader,
MODULE_KIND_EXTENSION: importlib.machinery.ExtensionFileLoader
}
# pylint: enable=no-member
else:
SUFFIXES = imp.get_suffixes()
PY3_PRE_EXT = \
re.compile(r'\.cpython-{0}{1}(\.opt-[1-9])?'.format(*sys.version_info[:2]))
# Because on the cloud drivers we do `from salt.cloud.libcloudfuncs import *`
# which simplifies code readability, it adds some unsupported functions into
# the driver's module scope.
# We list un-supported functions here. These will be removed from the loaded.
# TODO: remove the need for this cross-module code. Maybe use NotImplemented
LIBCLOUD_FUNCS_NOT_SUPPORTED = (
'parallels.avail_sizes',
'parallels.avail_locations',
'proxmox.avail_sizes',
)
# Will be set to pyximport module at runtime if cython is enabled in config.
pyximport = None
def static_loader(
opts,
ext_type,
tag,
pack=None,
int_type=None,
ext_dirs=True,
ext_type_dirs=None,
base_path=None,
filter_name=None,
):
funcs = LazyLoader(
_module_dirs(
opts,
ext_type,
tag,
int_type,
ext_dirs,
ext_type_dirs,
base_path,
),
opts,
tag=tag,
pack=pack,
)
ret = {}
funcs._load_all()
if filter_name:
funcs = FilterDictWrapper(funcs, filter_name)
for key in funcs:
ret[key] = funcs[key]
return ret
def _module_dirs(
opts,
ext_type,
tag=None,
int_type=None,
ext_dirs=True,
ext_type_dirs=None,
base_path=None,
):
if tag is None:
tag = ext_type
sys_types = os.path.join(base_path or SALT_BASE_PATH, int_type or ext_type)
ext_types = os.path.join(opts['extension_modules'], ext_type)
ext_type_types = []
if ext_dirs:
if ext_type_dirs is None:
ext_type_dirs = '{0}_dirs'.format(tag)
if ext_type_dirs in opts:
ext_type_types.extend(opts[ext_type_dirs])
if HAS_PKG_RESOURCES and ext_type_dirs:
for entry_point in pkg_resources.iter_entry_points('salt.loader', ext_type_dirs):
loaded_entry_point = entry_point.load()
for path in loaded_entry_point():
ext_type_types.append(path)
cli_module_dirs = []
# The dirs can be any module dir, or a in-tree _{ext_type} dir
for _dir in opts.get('module_dirs', []):
# Prepend to the list to match cli argument ordering
maybe_dir = os.path.join(_dir, ext_type)
if os.path.isdir(maybe_dir):
cli_module_dirs.insert(0, maybe_dir)
continue
maybe_dir = os.path.join(_dir, '_{0}'.format(ext_type))
if os.path.isdir(maybe_dir):
cli_module_dirs.insert(0, maybe_dir)
return cli_module_dirs + ext_type_types + [ext_types, sys_types]
def minion_mods(
opts,
context=None,
utils=None,
whitelist=None,
initial_load=False,
loaded_base_name=None,
notify=False,
static_modules=None,
proxy=None):
'''
Load execution modules
Returns a dictionary of execution modules appropriate for the current
system by evaluating the __virtual__() function in each module.
:param dict opts: The Salt options dictionary
:param dict context: A Salt context that should be made present inside
generated modules in __context__
:param dict utils: Utility functions which should be made available to
Salt modules in __utils__. See `utils_dirs` in
salt.config for additional information about
configuration.
:param list whitelist: A list of modules which should be whitelisted.
:param bool initial_load: Deprecated flag! Unused.
:param str loaded_base_name: A string marker for the loaded base name.
:param bool notify: Flag indicating that an event should be fired upon
completion of module loading.
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
__grains__ = salt.loader.grains(__opts__)
__opts__['grains'] = __grains__
__utils__ = salt.loader.utils(__opts__)
__salt__ = salt.loader.minion_mods(__opts__, utils=__utils__)
__salt__['test.ping']()
'''
# TODO Publish documentation for module whitelisting
if not whitelist:
whitelist = opts.get('whitelist_modules', None)
ret = LazyLoader(
_module_dirs(opts, 'modules', 'module'),
opts,
tag='module',
pack={'__context__': context, '__utils__': utils, '__proxy__': proxy},
whitelist=whitelist,
loaded_base_name=loaded_base_name,
static_modules=static_modules,
)
ret.pack['__salt__'] = ret
# Load any provider overrides from the configuration file providers option
# Note: Providers can be pkg, service, user or group - not to be confused
# with cloud providers.
providers = opts.get('providers', False)
if providers and isinstance(providers, dict):
for mod in providers:
# sometimes providers opts is not to diverge modules but
# for other configuration
try:
funcs = raw_mod(opts, providers[mod], ret)
except TypeError:
break
else:
if funcs:
for func in funcs:
f_key = '{0}{1}'.format(mod, func[func.rindex('.'):])
ret[f_key] = funcs[func]
if notify:
evt = salt.utils.event.get_event('minion', opts=opts, listen=False)
evt.fire_event({'complete': True}, tag='/salt/minion/minion_mod_complete')
return ret
def raw_mod(opts, name, functions, mod='modules'):
'''
Returns a single module loaded raw and bypassing the __virtual__ function
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
testmod = salt.loader.raw_mod(__opts__, 'test', None)
testmod['test.ping']()
'''
loader = LazyLoader(
_module_dirs(opts, mod, 'module'),
opts,
tag='rawmodule',
virtual_enable=False,
pack={'__salt__': functions},
)
# if we don't have the module, return an empty dict
if name not in loader.file_mapping:
return {}
loader._load_module(name) # load a single module (the one passed in)
return dict(loader._dict) # return a copy of *just* the funcs for `name`
def metaproxy(opts):
'''
Return functions used in the meta proxy
'''
return LazyLoader(
_module_dirs(opts, 'metaproxy'),
opts,
tag='metaproxy'
)
def matchers(opts):
'''
Return the matcher services plugins
'''
return LazyLoader(
_module_dirs(opts, 'matchers'),
opts,
tag='matchers'
)
def engines(opts, functions, runners, utils, proxy=None):
'''
Return the master services plugins
'''
pack = {'__salt__': functions,
'__runners__': runners,
'__proxy__': proxy,
'__utils__': utils}
return LazyLoader(
_module_dirs(opts, 'engines'),
opts,
tag='engines',
pack=pack,
)
def proxy(opts, functions=None, returners=None, whitelist=None, utils=None):
'''
Returns the proxy module for this salt-proxy-minion
'''
ret = LazyLoader(
_module_dirs(opts, 'proxy'),
opts,
tag='proxy',
pack={'__salt__': functions, '__ret__': returners, '__utils__': utils},
)
ret.pack['__proxy__'] = ret
return ret
def returners(opts, functions, whitelist=None, context=None, proxy=None):
'''
Returns the returner modules
'''
return LazyLoader(
_module_dirs(opts, 'returners', 'returner'),
opts,
tag='returner',
whitelist=whitelist,
pack={'__salt__': functions, '__context__': context, '__proxy__': proxy or {}},
)
def utils(opts, whitelist=None, context=None, proxy=proxy):
'''
Returns the utility modules
'''
return LazyLoader(
_module_dirs(opts, 'utils', ext_type_dirs='utils_dirs'),
opts,
tag='utils',
whitelist=whitelist,
pack={'__context__': context, '__proxy__': proxy or {}},
)
def pillars(opts, functions, context=None):
'''
Returns the pillars modules
'''
ret = LazyLoader(_module_dirs(opts, 'pillar'),
opts,
tag='pillar',
pack={'__salt__': functions,
'__context__': context,
'__utils__': utils(opts)})
ret.pack['__ext_pillar__'] = ret
return FilterDictWrapper(ret, '.ext_pillar')
def tops(opts):
'''
Returns the tops modules
'''
if 'master_tops' not in opts:
return {}
whitelist = list(opts['master_tops'].keys())
ret = LazyLoader(
_module_dirs(opts, 'tops', 'top'),
opts,
tag='top',
whitelist=whitelist,
)
return FilterDictWrapper(ret, '.top')
def wheels(opts, whitelist=None, context=None):
'''
Returns the wheels modules
'''
if context is None:
context = {}
return LazyLoader(
_module_dirs(opts, 'wheel'),
opts,
tag='wheel',
whitelist=whitelist,
pack={'__context__': context},
)
def outputters(opts):
'''
Returns the outputters modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader instance, with only outputters present in the keyspace
'''
ret = LazyLoader(
_module_dirs(opts, 'output', ext_type_dirs='outputter_dirs'),
opts,
tag='output',
)
wrapped_ret = FilterDictWrapper(ret, '.output')
# TODO: this name seems terrible... __salt__ should always be execution mods
ret.pack['__salt__'] = wrapped_ret
return wrapped_ret
def serializers(opts):
'''
Returns the serializers modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader instance, with only serializers present in the keyspace
'''
return LazyLoader(
_module_dirs(opts, 'serializers'),
opts,
tag='serializers',
)
def eauth_tokens(opts):
'''
Returns the tokens modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader instance, with only token backends present in the keyspace
'''
return LazyLoader(
_module_dirs(opts, 'tokens'),
opts,
tag='tokens',
)
def auth(opts, whitelist=None):
'''
Returns the auth modules
:param dict opts: The Salt options dictionary
:returns: LazyLoader
'''
return LazyLoader(
_module_dirs(opts, 'auth'),
opts,
tag='auth',
whitelist=whitelist,
pack={'__salt__': minion_mods(opts)},
)
def fileserver(opts, backends):
'''
Returns the file server modules
'''
return LazyLoader(_module_dirs(opts, 'fileserver'),
opts,
tag='fileserver',
whitelist=backends,
pack={'__utils__': utils(opts)})
def roster(opts, runner=None, utils=None, whitelist=None):
'''
Returns the roster modules
'''
return LazyLoader(
_module_dirs(opts, 'roster'),
opts,
tag='roster',
whitelist=whitelist,
pack={
'__runner__': runner,
'__utils__': utils,
},
)
def thorium(opts, functions, runners):
'''
Load the thorium runtime modules
'''
pack = {'__salt__': functions, '__runner__': runners, '__context__': {}}
ret = LazyLoader(_module_dirs(opts, 'thorium'),
opts,
tag='thorium',
pack=pack)
ret.pack['__thorium__'] = ret
return ret
def states(opts, functions, utils, serializers, whitelist=None, proxy=None):
'''
Returns the state modules
:param dict opts: The Salt options dictionary
:param dict functions: A dictionary of minion modules, with module names as
keys and funcs as values.
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
statemods = salt.loader.states(__opts__, None, None)
'''
ret = LazyLoader(
_module_dirs(opts, 'states'),
opts,
tag='states',
pack={'__salt__': functions, '__proxy__': proxy or {}},
whitelist=whitelist,
)
ret.pack['__states__'] = ret
ret.pack['__utils__'] = utils
ret.pack['__serializers__'] = serializers
return ret
def beacons(opts, functions, context=None, proxy=None):
'''
Load the beacon modules
:param dict opts: The Salt options dictionary
:param dict functions: A dictionary of minion modules, with module names as
keys and funcs as values.
'''
return LazyLoader(
_module_dirs(opts, 'beacons'),
opts,
tag='beacons',
pack={'__context__': context, '__salt__': functions, '__proxy__': proxy or {}},
virtual_funcs=[],
)
def log_handlers(opts):
'''
Returns the custom logging handler modules
:param dict opts: The Salt options dictionary
'''
ret = LazyLoader(
_module_dirs(
opts,
'log_handlers',
int_type='handlers',
base_path=os.path.join(SALT_BASE_PATH, 'log'),
),
opts,
tag='log_handlers',
)
return FilterDictWrapper(ret, '.setup_handlers')
def ssh_wrapper(opts, functions=None, context=None):
'''
Returns the custom logging handler modules
'''
return LazyLoader(
_module_dirs(
opts,
'wrapper',
base_path=os.path.join(SALT_BASE_PATH, os.path.join('client', 'ssh')),
),
opts,
tag='wrapper',
pack={
'__salt__': functions,
'__grains__': opts.get('grains', {}),
'__pillar__': opts.get('pillar', {}),
'__context__': context,
},
)
def render(opts, functions, states=None, proxy=None):
'''
Returns the render modules
'''
pack = {'__salt__': functions,
'__grains__': opts.get('grains', {})}
if states:
pack['__states__'] = states
pack['__proxy__'] = proxy or {}
ret = LazyLoader(
_module_dirs(
opts,
'renderers',
'render',
ext_type_dirs='render_dirs',
),
opts,
tag='render',
pack=pack,
)
rend = FilterDictWrapper(ret, '.render')
if not check_render_pipe_str(opts['renderer'], rend, opts['renderer_blacklist'], opts['renderer_whitelist']):
err = ('The renderer {0} is unavailable, this error is often because '
'the needed software is unavailable'.format(opts['renderer']))
log.critical(err)
raise LoaderError(err)
return rend
def grain_funcs(opts, proxy=None):
'''
Returns the grain functions
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
grainfuncs = salt.loader.grain_funcs(__opts__)
'''
ret = LazyLoader(
_module_dirs(
opts,
'grains',
'grain',
ext_type_dirs='grains_dirs',
),
opts,
tag='grains',
)
ret.pack['__utils__'] = utils(opts, proxy=proxy)
return ret
def _load_cached_grains(opts, cfn):
'''
Returns the grains cached in cfn, or None if the cache is too old or is
corrupted.
'''
if not os.path.isfile(cfn):
log.debug('Grains cache file does not exist.')
return None
grains_cache_age = int(time.time() - os.path.getmtime(cfn))
if grains_cache_age > opts.get('grains_cache_expiration', 300):
log.debug(
'Grains cache last modified %s seconds ago and cache '
'expiration is set to %s. Grains cache expired. '
'Refreshing.',
grains_cache_age, opts.get('grains_cache_expiration', 300)
)
return None
if opts.get('refresh_grains_cache', False):
log.debug('refresh_grains_cache requested, Refreshing.')
return None
log.debug('Retrieving grains from cache')
try:
serial = salt.payload.Serial(opts)
with salt.utils.files.fopen(cfn, 'rb') as fp_:
cached_grains = salt.utils.data.decode(serial.load(fp_), preserve_tuples=True)
if not cached_grains:
log.debug('Cached grains are empty, cache might be corrupted. Refreshing.')
return None
return cached_grains
except (IOError, OSError):
return None
def grains(opts, force_refresh=False, proxy=None):
'''
Return the functions for the dynamic grains and the values for the static
grains.
Since grains are computed early in the startup process, grains functions
do not have __salt__ or __proxy__ available. At proxy-minion startup,
this function is called with the proxymodule LazyLoader object so grains
functions can communicate with their controlled device.
.. code-block:: python
import salt.config
import salt.loader
__opts__ = salt.config.minion_config('/etc/salt/minion')
__grains__ = salt.loader.grains(__opts__)
print __grains__['id']
'''
# Need to re-import salt.config, somehow it got lost when a minion is starting
import salt.config
# if we have no grains, lets try loading from disk (TODO: move to decorator?)
cfn = os.path.join(
opts['cachedir'],
'grains.cache.p'
)
if not force_refresh and opts.get('grains_cache', False):
cached_grains = _load_cached_grains(opts, cfn)
if cached_grains:
return cached_grains
else:
log.debug('Grains refresh requested. Refreshing grains.')
if opts.get('skip_grains', False):
return {}
grains_deep_merge = opts.get('grains_deep_merge', False) is True
if 'conf_file' in opts:
pre_opts = {}
pre_opts.update(salt.config.load_config(
opts['conf_file'], 'SALT_MINION_CONFIG',
salt.config.DEFAULT_MINION_OPTS['conf_file']
))
default_include = pre_opts.get(
'default_include', opts['default_include']
)
include = pre_opts.get('include', [])
pre_opts.update(salt.config.include_config(
default_include, opts['conf_file'], verbose=False
))
pre_opts.update(salt.config.include_config(
include, opts['conf_file'], verbose=True
))
if 'grains' in pre_opts:
opts['grains'] = pre_opts['grains']
else:
opts['grains'] = {}
else:
opts['grains'] = {}
grains_data = {}
blist = opts.get('grains_blacklist', [])
funcs = grain_funcs(opts, proxy=proxy)
if force_refresh: # if we refresh, lets reload grain modules
funcs.clear()
# Run core grains
for key in funcs:
if not key.startswith('core.'):
continue
log.trace('Loading %s grain', key)
ret = funcs[key]()
if not isinstance(ret, dict):
continue
if blist:
for key in list(ret):
for block in blist:
if salt.utils.stringutils.expr_match(key, block):
del ret[key]
log.trace('Filtering %s grain', key)
if not ret:
continue
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, ret)
else:
grains_data.update(ret)
# Run the rest of the grains
for key in funcs:
if key.startswith('core.') or key == '_errors':
continue
try:
# Grains are loaded too early to take advantage of the injected
# __proxy__ variable. Pass an instance of that LazyLoader
# here instead to grains functions if the grains functions take
# one parameter. Then the grains can have access to the
# proxymodule for retrieving information from the connected
# device.
log.trace('Loading %s grain', key)
parameters = salt.utils.args.get_function_argspec(funcs[key]).args
kwargs = {}
if 'proxy' in parameters:
kwargs['proxy'] = proxy
if 'grains' in parameters:
kwargs['grains'] = grains_data
ret = funcs[key](**kwargs)
except Exception:
if salt.utils.platform.is_proxy():
log.info('The following CRITICAL message may not be an error; the proxy may not be completely established yet.')
log.critical(
'Failed to load grains defined in grain file %s in '
'function %s, error:\n', key, funcs[key],
exc_info=True
)
continue
if not isinstance(ret, dict):
continue
if blist:
for key in list(ret):
for block in blist:
if salt.utils.stringutils.expr_match(key, block):
del ret[key]
log.trace('Filtering %s grain', key)
if not ret:
continue
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, ret)
else:
grains_data.update(ret)
if opts.get('proxy_merge_grains_in_module', True) and proxy:
try:
proxytype = proxy.opts['proxy']['proxytype']
if proxytype + '.grains' in proxy:
if proxytype + '.initialized' in proxy and proxy[proxytype + '.initialized']():
try:
proxytype = proxy.opts['proxy']['proxytype']
ret = proxy[proxytype + '.grains']()
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, ret)
else:
grains_data.update(ret)
except Exception:
log.critical('Failed to run proxy\'s grains function!',
exc_info=True
)
except KeyError:
pass
grains_data.update(opts['grains'])
# Write cache if enabled
if opts.get('grains_cache', False):
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Late import
import salt.modules.cmdmod
# Make sure cache file isn't read-only
salt.modules.cmdmod._run_quiet('attrib -R "{0}"'.format(cfn))
with salt.utils.files.fopen(cfn, 'w+b') as fp_:
try:
serial = salt.payload.Serial(opts)
serial.dump(grains_data, fp_)
except TypeError as e:
log.error('Failed to serialize grains cache: %s', e)
raise # re-throw for cleanup
except Exception as e:
log.error('Unable to write to grains cache file %s: %s', cfn, e)
# Based on the original exception, the file may or may not have been
# created. If it was, we will remove it now, as the exception means
# the serialized data is not to be trusted, no matter what the
# exception is.
if os.path.isfile(cfn):
os.unlink(cfn)
if grains_deep_merge:
salt.utils.dictupdate.update(grains_data, opts['grains'])
else:
grains_data.update(opts['grains'])
return salt.utils.data.decode(grains_data, preserve_tuples=True)
# TODO: get rid of? Does anyone use this? You should use raw() instead
def call(fun, **kwargs):
'''
Directly call a function inside a loader directory
'''
args = kwargs.get('args', [])
dirs = kwargs.get('dirs', [])
funcs = LazyLoader(
[os.path.join(SALT_BASE_PATH, 'modules')] + dirs,
None,
tag='modules',
virtual_enable=False,
)
return funcs[fun](*args)
def runner(opts, utils=None, context=None, whitelist=None):
'''
Directly call a function inside a loader directory
'''
if utils is None:
utils = {}
if context is None:
context = {}
ret = LazyLoader(
_module_dirs(opts, 'runners', 'runner', ext_type_dirs='runner_dirs'),
opts,
tag='runners',
pack={'__utils__': utils, '__context__': context},
whitelist=whitelist,
)
# TODO: change from __salt__ to something else, we overload __salt__ too much
ret.pack['__salt__'] = ret
return ret
def queues(opts):
'''
Directly call a function inside a loader directory
'''
return LazyLoader(
_module_dirs(opts, 'queues', 'queue', ext_type_dirs='queue_dirs'),
opts,
tag='queues',
)
def sdb(opts, functions=None, whitelist=None, utils=None):
'''
Make a very small database call
'''
if utils is None:
utils = {}
return LazyLoader(
_module_dirs(opts, 'sdb'),
opts,
tag='sdb',
pack={
'__sdb__': functions,
'__opts__': opts,
'__utils__': utils,
'__salt__': minion_mods(opts, utils),
},
whitelist=whitelist,
)
def pkgdb(opts):
'''
Return modules for SPM's package database
.. versionadded:: 2015.8.0
'''
return LazyLoader(
_module_dirs(
opts,
'pkgdb',
base_path=os.path.join(SALT_BASE_PATH, 'spm')
),
opts,
tag='pkgdb'
)
def pkgfiles(opts):
'''
Return modules for SPM's file handling
.. versionadded:: 2015.8.0
'''
return LazyLoader(
_module_dirs(
opts,
'pkgfiles',
base_path=os.path.join(SALT_BASE_PATH, 'spm')
),
opts,
tag='pkgfiles'
)
def clouds(opts):
'''
Return the cloud functions
'''
# Let's bring __active_provider_name__, defaulting to None, to all cloud
# drivers. This will get temporarily updated/overridden with a context
# manager when needed.
functions = LazyLoader(
_module_dirs(opts,
'clouds',
'cloud',
base_path=os.path.join(SALT_BASE_PATH, 'cloud'),
int_type='clouds'),
opts,
tag='clouds',
pack={'__utils__': salt.loader.utils(opts),
'__active_provider_name__': None},
)
for funcname in LIBCLOUD_FUNCS_NOT_SUPPORTED:
log.trace(
'\'%s\' has been marked as not supported. Removing from the '
'list of supported cloud functions', funcname
)
functions.pop(funcname, None)
return functions
def netapi(opts):
'''
Return the network api functions
'''
return LazyLoader(
_module_dirs(opts, 'netapi'),
opts,
tag='netapi',
)
def executors(opts, functions=None, context=None, proxy=None):
'''
Returns the executor modules
'''
executors = LazyLoader(
_module_dirs(opts, 'executors', 'executor'),
opts,
tag='executor',
pack={'__salt__': functions, '__context__': context or {}, '__proxy__': proxy or {}},
)
executors.pack['__executors__'] = executors
return executors
def cache(opts, serial):
'''
Returns the returner modules
'''
return LazyLoader(
_module_dirs(opts, 'cache', 'cache'),
opts,
tag='cache',
pack={'__opts__': opts, '__context__': {'serial': serial}},
)
def _generate_module(name):
if name in sys.modules:
return
code = "'''Salt loaded {0} parent module'''".format(name.split('.')[-1])
# ModuleType can't accept a unicode type on PY2
module = types.ModuleType(str(name)) # future lint: disable=blacklisted-function
exec(code, module.__dict__)
sys.modules[name] = module
def _mod_type(module_path):
if module_path.startswith(SALT_BASE_PATH):
return 'int'
return 'ext'
# TODO: move somewhere else?
class FilterDictWrapper(MutableMapping):
'''
Create a dict which wraps another dict with a specific key suffix on get
This is to replace "filter_load"
'''
def __init__(self, d, suffix):
self._dict = d
self.suffix = suffix
def __setitem__(self, key, val):
self._dict[key] = val
def __delitem__(self, key):
del self._dict[key]
def __getitem__(self, key):
return self._dict[key + self.suffix]
def __len__(self):
return len(self._dict)
def __iter__(self):
for key in self._dict:
if key.endswith(self.suffix):
yield key.replace(self.suffix, '')
class LazyLoader(salt.utils.lazy.LazyDict):
'''
A pseduo-dictionary which has a set of keys which are the
name of the module and function, delimited by a dot. When
the value of the key is accessed, the function is then loaded
from disk and into memory.
.. note::
Iterating over keys will cause all modules to be loaded.
:param list module_dirs: A list of directories on disk to search for modules
:param dict opts: The salt options dictionary.
:param str tag: The tag for the type of module to load
:param func mod_type_check: A function which can be used to verify files
:param dict pack: A dictionary of function to be packed into modules as they are loaded
:param list whitelist: A list of modules to whitelist
:param bool virtual_enable: Whether or not to respect the __virtual__ function when loading modules.
:param str virtual_funcs: The name of additional functions in the module to call to verify its functionality.
If not true, the module will not load.
:returns: A LazyLoader object which functions as a dictionary. Keys are 'module.function' and values
are function references themselves which are loaded on-demand.
# TODO:
- move modules_max_memory into here
- singletons (per tag)
'''
mod_dict_class = salt.utils.odict.OrderedDict
def __init__(self,
module_dirs,
opts=None,
tag='module',
loaded_base_name=None,
mod_type_check=None,
pack=None,
whitelist=None,
virtual_enable=True,
static_modules=None,
proxy=None,
virtual_funcs=None,
): # pylint: disable=W0231
'''
In pack, if any of the values are None they will be replaced with an
empty context-specific dict
'''
self.inject_globals = {}
self.pack = {} if pack is None else pack
if opts is None:
opts = {}
threadsafety = not opts.get('multiprocessing')
self.context_dict = salt.utils.context.ContextDict(threadsafe=threadsafety)
self.opts = self.__prep_mod_opts(opts)
self.module_dirs = module_dirs
self.tag = tag
self.loaded_base_name = loaded_base_name or LOADED_BASE_NAME
self.mod_type_check = mod_type_check or _mod_type
if '__context__' not in self.pack:
self.pack['__context__'] = None
for k, v in six.iteritems(self.pack):
if v is None: # if the value of a pack is None, lets make an empty dict
self.context_dict.setdefault(k, {})
self.pack[k] = salt.utils.context.NamespacedDictWrapper(self.context_dict, k)
self.whitelist = whitelist
self.virtual_enable = virtual_enable
self.initial_load = True
# names of modules that we don't have (errors, __virtual__, etc.)
self.missing_modules = {} # mapping of name -> error
self.loaded_modules = {} # mapping of module_name -> dict_of_functions
self.loaded_files = set() # TODO: just remove them from file_mapping?
self.static_modules = static_modules if static_modules else []
if virtual_funcs is None:
virtual_funcs = []
self.virtual_funcs = virtual_funcs
self.disabled = set(
self.opts.get(
'disable_{0}{1}'.format(
self.tag,
'' if self.tag[-1] == 's' else 's'
),
[]
)
)
# A map of suffix to description for imp
self.suffix_map = {}
# A list to determine precedence of extensions
# Prefer packages (directories) over modules (single files)!
self.suffix_order = ['']
for (suffix, mode, kind) in SUFFIXES:
self.suffix_map[suffix] = (suffix, mode, kind)
self.suffix_order.append(suffix)
self._lock = threading.RLock()
self._refresh_file_mapping()
super(LazyLoader, self).__init__() # late init the lazy loader
# create all of the import namespaces
_generate_module('{0}.int'.format(self.loaded_base_name))
_generate_module('{0}.int.{1}'.format(self.loaded_base_name, tag))
_generate_module('{0}.ext'.format(self.loaded_base_name))
_generate_module('{0}.ext.{1}'.format(self.loaded_base_name, tag))
def __getitem__(self, item):
'''
Override the __getitem__ in order to decorate the returned function if we need
to last-minute inject globals
'''
func = super(LazyLoader, self).__getitem__(item)
if self.inject_globals:
return global_injector_decorator(self.inject_globals)(func)
else:
return func
def __getattr__(self, mod_name):
'''
Allow for "direct" attribute access-- this allows jinja templates to
access things like `salt.test.ping()`
'''
if mod_name in ('__getstate__', '__setstate__'):
return object.__getattribute__(self, mod_name)
# if we have an attribute named that, lets return it.
try:
return object.__getattr__(self, mod_name) # pylint: disable=no-member
except AttributeError:
pass
# otherwise we assume its jinja template access
if mod_name not in self.loaded_modules and not self.loaded:
for name in self._iter_files(mod_name):
if name in self.loaded_files:
continue
# if we got what we wanted, we are done
if self._load_module(name) and mod_name in self.loaded_modules:
break
if mod_name in self.loaded_modules:
return self.loaded_modules[mod_name]
else:
raise AttributeError(mod_name)
def missing_fun_string(self, function_name):
'''
Return the error string for a missing function.
This can range from "not available' to "__virtual__" returned False
'''
mod_name = function_name.split('.')[0]
if mod_name in self.loaded_modules:
return '\'{0}\' is not available.'.format(function_name)
else:
try:
reason = self.missing_modules[mod_name]
except KeyError:
return '\'{0}\' is not available.'.format(function_name)
else:
if reason is not None:
return '\'{0}\' __virtual__ returned False: {1}'.format(mod_name, reason)
else:
return '\'{0}\' __virtual__ returned False'.format(mod_name)
def _refresh_file_mapping(self):
'''
refresh the mapping of the FS on disk
'''
# map of suffix to description for imp
if self.opts.get('cython_enable', True) is True:
try:
global pyximport
pyximport = __import__('pyximport') # pylint: disable=import-error
pyximport.install()
# add to suffix_map so file_mapping will pick it up
self.suffix_map['.pyx'] = tuple()
except ImportError:
log.info('Cython is enabled in the options but not present '
'in the system path. Skipping Cython modules.')
# Allow for zipimport of modules
if self.opts.get('enable_zip_modules', True) is True:
self.suffix_map['.zip'] = tuple()
# allow for module dirs
if USE_IMPORTLIB:
self.suffix_map[''] = ('', '', MODULE_KIND_PKG_DIRECTORY)
else:
self.suffix_map[''] = ('', '', imp.PKG_DIRECTORY)
# create mapping of filename (without suffix) to (path, suffix)
# The files are added in order of priority, so order *must* be retained.
self.file_mapping = salt.utils.odict.OrderedDict()
opt_match = []
def _replace_pre_ext(obj):
'''
Hack so we can get the optimization level that we replaced (if
any) out of the re.sub call below. We use a list here because
it is a persistent data structure that we will be able to
access after re.sub is called.
'''
opt_match.append(obj)
return ''
for mod_dir in self.module_dirs:
try:
# Make sure we have a sorted listdir in order to have
# expectable override results
files = sorted(
x for x in os.listdir(mod_dir) if x != '__pycache__'
)
except OSError:
continue # Next mod_dir
if six.PY3:
try:
pycache_files = [
os.path.join('__pycache__', x) for x in
sorted(os.listdir(os.path.join(mod_dir, '__pycache__')))
]
except OSError:
pass
else:
files.extend(pycache_files)
for filename in files:
try:
dirname, basename = os.path.split(filename)
if basename.startswith('_'):
# skip private modules
# log messages omitted for obviousness
continue # Next filename
f_noext, ext = os.path.splitext(basename)
if six.PY3:
f_noext = PY3_PRE_EXT.sub(_replace_pre_ext, f_noext)
try:
opt_level = int(
opt_match.pop().group(1).rsplit('-', 1)[-1]
)
except (AttributeError, IndexError, ValueError):
# No regex match or no optimization level matched
opt_level = 0
try:
opt_index = self.opts['optimization_order'].index(opt_level)
except KeyError:
log.trace(
'Disallowed optimization level %d for module '
'name \'%s\', skipping. Add %d to the '
'\'optimization_order\' config option if you '
'do not want to ignore this optimization '
'level.', opt_level, f_noext, opt_level
)
continue
else:
# Optimization level not reflected in filename on PY2
opt_index = 0
# make sure it is a suffix we support
if ext not in self.suffix_map:
continue # Next filename
if f_noext in self.disabled:
log.trace(
'Skipping %s, it is disabled by configuration',
filename
)
continue # Next filename
fpath = os.path.join(mod_dir, filename)
# if its a directory, lets allow us to load that
if ext == '':
# is there something __init__?
subfiles = os.listdir(fpath)
for suffix in self.suffix_order:
if '' == suffix:
continue # Next suffix (__init__ must have a suffix)
init_file = '__init__{0}'.format(suffix)
if init_file in subfiles:
break
else:
continue # Next filename
try:
curr_ext = self.file_mapping[f_noext][1]
curr_opt_index = self.file_mapping[f_noext][2]
except KeyError:
pass
else:
if '' in (curr_ext, ext) and curr_ext != ext:
log.error(
'Module/package collision: \'%s\' and \'%s\'',
fpath,
self.file_mapping[f_noext][0]
)
if six.PY3 and ext == '.pyc' and curr_ext == '.pyc':
# Check the optimization level
if opt_index >= curr_opt_index:
# Module name match, but a higher-priority
# optimization level was already matched, skipping.
continue
elif not curr_ext or self.suffix_order.index(ext) >= self.suffix_order.index(curr_ext):
# Match found but a higher-priorty match already
# exists, so skip this.
continue
if six.PY3 and not dirname and ext == '.pyc':
# On Python 3, we should only load .pyc files from the
# __pycache__ subdirectory (i.e. when dirname is not an
# empty string).
continue
# Made it this far - add it
self.file_mapping[f_noext] = (fpath, ext, opt_index)
except OSError:
continue
for smod in self.static_modules:
f_noext = smod.split('.')[-1]
self.file_mapping[f_noext] = (smod, '.o', 0)
def clear(self):
'''
Clear the dict
'''
with self._lock:
super(LazyLoader, self).clear() # clear the lazy loader
self.loaded_files = set()
self.missing_modules = {}
self.loaded_modules = {}
# if we have been loaded before, lets clear the file mapping since
# we obviously want a re-do
if hasattr(self, 'opts'):
self._refresh_file_mapping()
self.initial_load = False
def __prep_mod_opts(self, opts):
'''
Strip out of the opts any logger instance
'''
if '__grains__' not in self.pack:
self.context_dict['grains'] = opts.get('grains', {})
self.pack['__grains__'] = salt.utils.context.NamespacedDictWrapper(self.context_dict, 'grains')
if '__pillar__' not in self.pack:
self.context_dict['pillar'] = opts.get('pillar', {})
self.pack['__pillar__'] = salt.utils.context.NamespacedDictWrapper(self.context_dict, 'pillar')
mod_opts = {}
for key, val in list(opts.items()):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _iter_files(self, mod_name):
'''
Iterate over all file_mapping files in order of closeness to mod_name
'''
# do we have an exact match?
if mod_name in self.file_mapping:
yield mod_name
# do we have a partial match?
for k in self.file_mapping:
if mod_name in k:
yield k
# anyone else? Bueller?
for k in self.file_mapping:
if mod_name not in k:
yield k
def _reload_submodules(self, mod):
submodules = (
getattr(mod, sname) for sname in dir(mod) if
isinstance(getattr(mod, sname), mod.__class__)
)
# reload only custom "sub"modules
for submodule in submodules:
# it is a submodule if the name is in a namespace under mod
if submodule.__name__.startswith(mod.__name__ + '.'):
reload_module(submodule)
self._reload_submodules(submodule)
def _load_module(self, name):
mod = None
fpath, suffix = self.file_mapping[name][:2]
self.loaded_files.add(name)
fpath_dirname = os.path.dirname(fpath)
try:
sys.path.append(fpath_dirname)
if suffix == '.pyx':
mod = pyximport.load_module(name, fpath, tempfile.gettempdir())
elif suffix == '.o':
top_mod = __import__(fpath, globals(), locals(), [])
comps = fpath.split('.')
if len(comps) < 2:
mod = top_mod
else:
mod = top_mod
for subname in comps[1:]:
mod = getattr(mod, subname)
elif suffix == '.zip':
mod = zipimporter(fpath).load_module(name)
else:
desc = self.suffix_map[suffix]
# if it is a directory, we don't open a file
try:
mod_namespace = '.'.join((
self.loaded_base_name,
self.mod_type_check(fpath),
self.tag,
name))
except TypeError:
mod_namespace = '{0}.{1}.{2}.{3}'.format(
self.loaded_base_name,
self.mod_type_check(fpath),
self.tag,
name)
if suffix == '':
if USE_IMPORTLIB:
# pylint: disable=no-member
# Package directory, look for __init__
loader_details = [
(importlib.machinery.SourceFileLoader, importlib.machinery.SOURCE_SUFFIXES),
(importlib.machinery.SourcelessFileLoader, importlib.machinery.BYTECODE_SUFFIXES),
(importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES),
]
file_finder = importlib.machinery.FileFinder(
fpath_dirname,
*loader_details
)
spec = file_finder.find_spec(mod_namespace)
if spec is None:
raise ImportError()
# TODO: Get rid of load_module in favor of
# exec_module below. load_module is deprecated, but
# loading using exec_module has been causing odd things
# with the magic dunders we pack into the loaded
# modules, most notably with salt-ssh's __opts__.
mod = spec.loader.load_module()
# mod = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(mod)
# pylint: enable=no-member
sys.modules[mod_namespace] = mod
else:
mod = imp.load_module(mod_namespace, None, fpath, desc)
# reload all submodules if necessary
if not self.initial_load:
self._reload_submodules(mod)
else:
if USE_IMPORTLIB:
# pylint: disable=no-member
loader = MODULE_KIND_MAP[desc[2]](mod_namespace, fpath)
spec = importlib.util.spec_from_file_location(
mod_namespace, fpath, loader=loader
)
if spec is None:
raise ImportError()
# TODO: Get rid of load_module in favor of
# exec_module below. load_module is deprecated, but
# loading using exec_module has been causing odd things
# with the magic dunders we pack into the loaded
# modules, most notably with salt-ssh's __opts__.
mod = spec.loader.load_module()
#mod = importlib.util.module_from_spec(spec)
#spec.loader.exec_module(mod)
# pylint: enable=no-member
sys.modules[mod_namespace] = mod
else:
with salt.utils.files.fopen(fpath, desc[1]) as fn_:
mod = imp.load_module(mod_namespace, fn_, fpath, desc)
except IOError:
raise
except ImportError as exc:
if 'magic number' in six.text_type(exc):
error_msg = 'Failed to import {0} {1}. Bad magic number. If migrating from Python2 to Python3, remove all .pyc files and try again.'.format(self.tag, name)
log.warning(error_msg)
self.missing_modules[name] = error_msg
log.debug(
'Failed to import %s %s:\n',
self.tag, name, exc_info=True
)
self.missing_modules[name] = exc
return False
except Exception as error:
log.error(
'Failed to import %s %s, this is due most likely to a '
'syntax error:\n', self.tag, name, exc_info=True
)
self.missing_modules[name] = error
return False
except SystemExit as error:
try:
fn_, _, caller, _ = traceback.extract_tb(sys.exc_info()[2])[-1]
except Exception:
pass
else:
tgt_fn = os.path.join('salt', 'utils', 'process.py')
if fn_.endswith(tgt_fn) and '_handle_signals' in caller:
# Race conditon, SIGTERM or SIGINT received while loader
# was in process of loading a module. Call sys.exit to
# ensure that the process is killed.
sys.exit(salt.defaults.exitcodes.EX_OK)
log.error(
'Failed to import %s %s as the module called exit()\n',
self.tag, name, exc_info=True
)
self.missing_modules[name] = error
return False
finally:
sys.path.remove(fpath_dirname)
if hasattr(mod, '__opts__'):
mod.__opts__.update(self.opts)
else:
mod.__opts__ = self.opts
# pack whatever other globals we were asked to
for p_name, p_value in six.iteritems(self.pack):
setattr(mod, p_name, p_value)
module_name = mod.__name__.rsplit('.', 1)[-1]
# Call a module's initialization method if it exists
module_init = getattr(mod, '__init__', None)
if inspect.isfunction(module_init):
try:
module_init(self.opts)
except TypeError as e:
log.error(e)
except Exception:
err_string = '__init__ failed'
log.debug(
'Error loading %s.%s: %s',
self.tag, module_name, err_string, exc_info=True
)
self.missing_modules[module_name] = err_string
self.missing_modules[name] = err_string
return False
# if virtual modules are enabled, we need to look for the
# __virtual__() function inside that module and run it.
if self.virtual_enable:
virtual_funcs_to_process = ['__virtual__'] + self.virtual_funcs
for virtual_func in virtual_funcs_to_process:
virtual_ret, module_name, virtual_err, virtual_aliases = \
self._process_virtual(mod, module_name, virtual_func)
if virtual_err is not None:
log.trace(
'Error loading %s.%s: %s',
self.tag, module_name, virtual_err
)
# if _process_virtual returned a non-True value then we are
# supposed to not process this module
if virtual_ret is not True and module_name not in self.missing_modules:
# If a module has information about why it could not be loaded, record it
self.missing_modules[module_name] = virtual_err
self.missing_modules[name] = virtual_err
return False
else:
virtual_aliases = ()
# If this is a proxy minion then MOST modules cannot work. Therefore, require that
# any module that does work with salt-proxy-minion define __proxyenabled__ as a list
# containing the names of the proxy types that the module supports.
#
# Render modules and state modules are OK though
if 'proxy' in self.opts:
if self.tag in ['grains', 'proxy']:
if not hasattr(mod, '__proxyenabled__') or \
(self.opts['proxy']['proxytype'] not in mod.__proxyenabled__ and
'*' not in mod.__proxyenabled__):
err_string = 'not a proxy_minion enabled module'
self.missing_modules[module_name] = err_string
self.missing_modules[name] = err_string
return False
if getattr(mod, '__load__', False) is not False:
log.info(
'The functions from module \'%s\' are being loaded from the '
'provided __load__ attribute', module_name
)
# If we had another module by the same virtual name, we should put any
# new functions under the existing dictionary.
mod_names = [module_name] + list(virtual_aliases)
mod_dict = dict((
(x, self.loaded_modules.get(x, self.mod_dict_class()))
for x in mod_names
))
for attr in getattr(mod, '__load__', dir(mod)):
if attr.startswith('_'):
# private functions are skipped
continue
func = getattr(mod, attr)
if not inspect.isfunction(func) and not isinstance(func, functools.partial):
# Not a function!? Skip it!!!
continue
# Let's get the function name.
# If the module has the __func_alias__ attribute, it must be a
# dictionary mapping in the form of(key -> value):
# <real-func-name> -> <desired-func-name>
#
# It default's of course to the found callable attribute name
# if no alias is defined.
funcname = getattr(mod, '__func_alias__', {}).get(attr, attr)
for tgt_mod in mod_names:
try:
full_funcname = '.'.join((tgt_mod, funcname))
except TypeError:
full_funcname = '{0}.{1}'.format(tgt_mod, funcname)
# Save many references for lookups
# Careful not to overwrite existing (higher priority) functions
if full_funcname not in self._dict:
self._dict[full_funcname] = func
if funcname not in mod_dict[tgt_mod]:
setattr(mod_dict[tgt_mod], funcname, func)
mod_dict[tgt_mod][funcname] = func
self._apply_outputter(func, mod)
# enforce depends
try:
Depends.enforce_dependencies(self._dict, self.tag)
except RuntimeError as exc:
log.info(
'Depends.enforce_dependencies() failed for the following '
'reason: %s', exc
)
for tgt_mod in mod_names:
self.loaded_modules[tgt_mod] = mod_dict[tgt_mod]
return True
def _load(self, key):
'''
Load a single item if you have it
'''
# if the key doesn't have a '.' then it isn't valid for this mod dict
if not isinstance(key, six.string_types):
raise KeyError('The key must be a string.')
if '.' not in key:
raise KeyError('The key \'%s\' should contain a \'.\'', key)
mod_name, _ = key.split('.', 1)
with self._lock:
# It is possible that the key is in the dictionary after
# acquiring the lock due to another thread loading it.
if mod_name in self.missing_modules or key in self._dict:
return True
# if the modulename isn't in the whitelist, don't bother
if self.whitelist and mod_name not in self.whitelist:
log.error(
'Failed to load function %s because its module (%s) is '
'not in the whitelist: %s', key, mod_name, self.whitelist
)
raise KeyError(key)
def _inner_load(mod_name):
for name in self._iter_files(mod_name):
if name in self.loaded_files:
continue
# if we got what we wanted, we are done
if self._load_module(name) and key in self._dict:
return True
return False
# try to load the module
ret = None
reloaded = False
# re-scan up to once, IOErrors or a failed load cause re-scans of the
# filesystem
while True:
try:
ret = _inner_load(mod_name)
if not reloaded and ret is not True:
self._refresh_file_mapping()
reloaded = True
continue
break
except IOError:
if not reloaded:
self._refresh_file_mapping()
reloaded = True
continue
return ret
def _load_all(self):
'''
Load all of them
'''
with self._lock:
for name in self.file_mapping:
if name in self.loaded_files or name in self.missing_modules:
continue
self._load_module(name)
self.loaded = True
def reload_modules(self):
with self._lock:
self.loaded_files = set()
self._load_all()
def _apply_outputter(self, func, mod):
'''
Apply the __outputter__ variable to the functions
'''
if hasattr(mod, '__outputter__'):
outp = mod.__outputter__
if func.__name__ in outp:
func.__outputter__ = outp[func.__name__]
def _process_virtual(self, mod, module_name, virtual_func='__virtual__'):
'''
Given a loaded module and its default name determine its virtual name
This function returns a tuple. The first value will be either True or
False and will indicate if the module should be loaded or not (i.e. if
it threw and exception while processing its __virtual__ function). The
second value is the determined virtual name, which may be the same as
the value provided.
The default name can be calculated as follows::
module_name = mod.__name__.rsplit('.', 1)[-1]
'''
# The __virtual__ function will return either a True or False value.
# If it returns a True value it can also set a module level attribute
# named __virtualname__ with the name that the module should be
# referred to as.
#
# This allows us to have things like the pkg module working on all
# platforms under the name 'pkg'. It also allows for modules like
# augeas_cfg to be referred to as 'augeas', which would otherwise have
# namespace collisions. And finally it allows modules to return False
# if they are not intended to run on the given platform or are missing
# dependencies.
virtual_aliases = getattr(mod, '__virtual_aliases__', tuple())
try:
error_reason = None
if hasattr(mod, '__virtual__') and inspect.isfunction(mod.__virtual__):
try:
start = time.time()
virtual = getattr(mod, virtual_func)()
if isinstance(virtual, tuple):
error_reason = virtual[1]
virtual = virtual[0]
if self.opts.get('virtual_timer', False):
end = time.time() - start
msg = 'Virtual function took {0} seconds for {1}'.format(
end, module_name)
log.warning(msg)
except Exception as exc:
error_reason = (
'Exception raised when processing __virtual__ function'
' for {0}. Module will not be loaded: {1}'.format(
mod.__name__, exc))
log.error(error_reason, exc_info_on_loglevel=logging.DEBUG)
virtual = None
# Get the module's virtual name
virtualname = getattr(mod, '__virtualname__', virtual)
if not virtual:
# if __virtual__() evaluates to False then the module
# wasn't meant for this platform or it's not supposed to
# load for some other reason.
# Some modules might accidentally return None and are
# improperly loaded
if virtual is None:
log.warning(
'%s.__virtual__() is wrongly returning `None`. '
'It should either return `True`, `False` or a new '
'name. If you\'re the developer of the module '
'\'%s\', please fix this.', mod.__name__, module_name
)
return (False, module_name, error_reason, virtual_aliases)
# At this point, __virtual__ did not return a
# boolean value, let's check for deprecated usage
# or module renames
if virtual is not True and module_name != virtual:
# The module is renaming itself. Updating the module name
# with the new name
log.trace('Loaded %s as virtual %s', module_name, virtual)
if not hasattr(mod, '__virtualname__'):
salt.utils.versions.warn_until(
'Hydrogen',
'The \'{0}\' module is renaming itself in its '
'__virtual__() function ({1} => {2}). Please '
'set it\'s virtual name as the '
'\'__virtualname__\' module attribute. '
'Example: "__virtualname__ = \'{2}\'"'.format(
mod.__name__,
module_name,
virtual
)
)
if virtualname != virtual:
# The __virtualname__ attribute does not match what's
# being returned by the __virtual__() function. This
# should be considered an error.
log.error(
'The module \'%s\' is showing some bad usage. Its '
'__virtualname__ attribute is set to \'%s\' yet the '
'__virtual__() function is returning \'%s\'. These '
'values should match!',
mod.__name__, virtualname, virtual
)
module_name = virtualname
# If the __virtual__ function returns True and __virtualname__
# is set then use it
elif virtual is True and virtualname != module_name:
if virtualname is not True:
module_name = virtualname
except KeyError:
# Key errors come out of the virtual function when passing
# in incomplete grains sets, these can be safely ignored
# and logged to debug, still, it includes the traceback to
# help debugging.
log.debug('KeyError when loading %s', module_name, exc_info=True)
except Exception:
# If the module throws an exception during __virtual__()
# then log the information and continue to the next.
log.error(
'Failed to read the virtual function for %s: %s',
self.tag, module_name, exc_info=True
)
return (False, module_name, error_reason, virtual_aliases)
return (True, module_name, None, virtual_aliases)
def global_injector_decorator(inject_globals):
'''
Decorator used by the LazyLoader to inject globals into a function at
execute time.
globals
Dictionary with global variables to inject
'''
def inner_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
with salt.utils.context.func_globals_inject(f, **inject_globals):
return f(*args, **kwargs)
return wrapper
return inner_decorator
| 35.552605
| 171
| 0.554229
|
2e4bd8a59468490cef451d9f3d07008815e8df9b
| 1,711
|
py
|
Python
|
test/gen_interop_data.py
|
JohnEmhoff/spavro
|
6a6c32d53e3b882964fbcbd9b4e3bfd071567be0
|
[
"Apache-2.0"
] | 32
|
2018-01-09T05:07:18.000Z
|
2022-03-13T15:50:40.000Z
|
test/gen_interop_data.py
|
JohnEmhoff/spavro
|
6a6c32d53e3b882964fbcbd9b4e3bfd071567be0
|
[
"Apache-2.0"
] | 14
|
2017-10-27T21:52:22.000Z
|
2021-08-06T15:01:46.000Z
|
test/gen_interop_data.py
|
JohnEmhoff/spavro
|
6a6c32d53e3b882964fbcbd9b4e3bfd071567be0
|
[
"Apache-2.0"
] | 12
|
2017-10-24T17:14:58.000Z
|
2021-08-03T10:22:58.000Z
|
#!/usr/bin/env python
# Modifications copyright (C) 2017 Pluralsight LLC
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from spavro import schema
from spavro import io
from spavro import datafile
DATUM = {
'intField': 12,
'longField': 15234324L,
'stringField': unicode('hey'),
'boolField': True,
'floatField': 1234.0,
'doubleField': -1234.0,
'bytesField': '12312adf',
'nullField': None,
'arrayField': [5.0, 0.0, 12.0],
'mapField': {'a': {'label': 'a'}, 'bee': {'label': 'cee'}},
'unionField': 12.0,
'enumField': 'C',
'fixedField': '1019181716151413',
'recordField': {'label': 'blah', 'children': [{'label': 'inner', 'children': []}]},
}
if __name__ == "__main__":
interop_schema = schema.parse(open(sys.argv[1], 'r').read())
writer = open(sys.argv[2], 'wb')
datum_writer = io.DatumWriter()
# NB: not using compression
dfw = datafile.DataFileWriter(writer, datum_writer, interop_schema)
dfw.append(DATUM)
dfw.close()
| 34.22
| 85
| 0.70602
|
5bdcd7d41c45539c677d23099e432bd3a0ea9274
| 1,394
|
py
|
Python
|
setup.py
|
Degelzhao/delorean
|
bb4611a2c3bd502ea4ec1f1e2b697e21fbab6dc9
|
[
"MIT"
] | 1
|
2019-02-18T13:28:07.000Z
|
2019-02-18T13:28:07.000Z
|
setup.py
|
Degelzhao/delorean
|
bb4611a2c3bd502ea4ec1f1e2b697e21fbab6dc9
|
[
"MIT"
] | null | null | null |
setup.py
|
Degelzhao/delorean
|
bb4611a2c3bd502ea4ec1f1e2b697e21fbab6dc9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from version import __version__
dependencies = [
'babel>=2.1.1',
'humanize>=0.5.1',
'python-dateutil>=2.4.2',
'pytz>=2015.7',
'tzlocal>=1.2']
setup(
name='Delorean',
version='.'.join(str(x) for x in __version__),
description='library for manipulating datetimes with ease and clarity',
url='https://github.com/myusuf3/delorean',
author='Mahdi Yusuf',
author_email="yusuf.mahdi@gmail.com",
packages=[
'delorean',
],
license='MIT license',
install_requires=dependencies,
test_suite = 'tests.test_data',
long_description=open('README.rst').read(),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| 29.041667
| 75
| 0.606887
|
552c3ebc8ca038b75c5fa10c22f1722865558121
| 2,364
|
py
|
Python
|
preprocesador/unit_tests/test_solucionador_de_correferencias.py
|
XrossFox/maquina-de-aspectos
|
e124167154cac2355278f309e2b3153e723a8c47
|
[
"MIT"
] | null | null | null |
preprocesador/unit_tests/test_solucionador_de_correferencias.py
|
XrossFox/maquina-de-aspectos
|
e124167154cac2355278f309e2b3153e723a8c47
|
[
"MIT"
] | null | null | null |
preprocesador/unit_tests/test_solucionador_de_correferencias.py
|
XrossFox/maquina-de-aspectos
|
e124167154cac2355278f309e2b3153e723a8c47
|
[
"MIT"
] | 1
|
2019-01-22T21:21:46.000Z
|
2019-01-22T21:21:46.000Z
|
import os
import sys
sys.path.append('../')
import unittest
import solucionador_de_correferencias as corref
class TestCorreferencias(unittest.TestCase):
def setUp(self):
print(os.getcwd())
self.C = corref.SolucionadorDeCorreferencias(os.getcwd()+"\\..\\corenlp\\CoreNLP")
def test_resolucion_de_correferencias(self):
"""Prueba si las correferencias son reemplazadas correctamente. Reemplaza pronombres de tema u objeto por su tem u objeto respectivamente"""
#Entradas
li = [
"My blue shirt is awesome, i really like it. It's also very expensive",
"That cat is so small, it fits in my hand",
"Radioactivity is very dangerous, it can kill you or it can make you grow a third arm. Stay away from it",
"My hands are cold, they are trembling",
"That thing is weird, is so ugly it makes my eyes sore. I´m going to burn it in the furnace",
"I'm tall. I also exercise daily.",
"My favorite food is hamburgers. My favorite color is green.",
"I like roses, I like them",
"That is very obvious, no way to miss it",
"My car is red. It's color is very lively",
]
#Salidas esperadas
lo = [
"my blue shirt is awesome, i really like my blue shirt. my blue shirt's also very expensive",
"that cat is so small, cat fits in my hand",
"radioactivity is very dangerous, radioactivity can kill you or radioactivity can make you grow a third arm. stay away from radioactivity",
"my hands are cold, my hands are trembling",
"that thing is weird, is so ugly thing makes my eyes sore. i´m going to burn thing in the furnace",
"i'm tall. i also exercise daily.",
"my favorite food is hamburgers. my favorite color is green.",
"i like roses, i like roses",
"that is very obvious, no way to miss it",
"my car is red. my car's color is very lively",
]
for i, o in zip(li,lo):
res = self.C.resolver_y_reemplazar(i)
self.assertEqual(res, o)
def tearDown(self):
self.C.cerrar()
if __name__ == "__main__":
unittest.main()
| 38.754098
| 151
| 0.594755
|
1d4a0d86426973c4d028fe44dfba3432ca25a194
| 5,088
|
py
|
Python
|
var/spack/repos/builtin/packages/picard/package.py
|
ilagunap/spack
|
510f869c3ae8ac2721debd29e98076212ee75852
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2020-03-09T14:32:26.000Z
|
2020-03-09T14:32:26.000Z
|
var/spack/repos/builtin/packages/picard/package.py
|
ilagunap/spack
|
510f869c3ae8ac2721debd29e98076212ee75852
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 5
|
2021-07-26T12:12:00.000Z
|
2022-03-01T12:16:03.000Z
|
var/spack/repos/builtin/packages/picard/package.py
|
ilagunap/spack
|
510f869c3ae8ac2721debd29e98076212ee75852
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import glob
import os.path
import re
from spack import *
class Picard(Package):
"""Picard is a set of command line tools for manipulating high-throughput
sequencing (HTS) data and formats such as SAM/BAM/CRAM and VCF.
"""
homepage = "https://broadinstitute.github.io/picard/"
url = "https://github.com/broadinstitute/picard/releases/download/2.9.2/picard.jar"
_urlfmt = "https://github.com/broadinstitute/picard/releases/download/{0}/picard.jar"
_oldurlfmt = 'https://github.com/broadinstitute/picard/releases/download/{0}/picard-tools-{0}.zip'
# They started distributing a single jar file at v2.6.0, prior to
# that it was a .zip file with multiple .jar and .so files
version('2.25.7', sha256='dc0e830d3e838dee2b4f4aa1c9631fb3a4c3ec982de8dfe5145fc748104c7146', expand=False)
version('2.25.6', sha256='768709826514625381e6fa3920945360167f4e830bf72f79eb070da059676f02', expand=False)
version('2.25.5', sha256='f7fa9784b84d384abfcbd77076f5ceab7b2797dc863ac35fd41470daa3efe3a5', expand=False)
version('2.25.4', sha256='c3ac14471e48e28e24201ae5515f828ffa45db7ac3e37173251a672845b4a9b3', expand=False)
version('2.25.3', sha256='1d4dbfcb36d23fde11f173ab0de8a99835e04161efb04f246bbcbfc0bee3b493', expand=False)
version('2.25.2', sha256='aead4baf12028a5d2b9ef70e00c93e4bba8f6bece0b9a7104b89b842a8e0326f', expand=False)
version('2.25.1', sha256='6d6e622d6b465f9fc704c37bcba55b0dac5ad14d07088bbdbdfade5cd027ee1c', expand=False)
version('2.25.0', sha256='faf2434da84fe21b516d57817767920fdedfc19ebdc01d9cae8d6d3314f7e897', expand=False)
version('2.24.0', sha256='70e91039bccc6f6db60f18c41713218a8cdf45f591f02c1012c062152b27cd7b', expand=False)
version('2.20.8', sha256='aff92d618ee9e6bafc1ab4fbfa89fc557a0dbe596ae4b92fe3bf93ebf95c7105', expand=False)
version('2.19.0', sha256='f97fc3f7a73b55cceea8b6a6488efcf1b2fbf8cad61d88645704ddd45a8c5950', expand=False)
version('2.18.3', sha256='0e0fc45d9a822ee9fa562b3bb8f1525a439e4cd541429a1a4ca4646f37189b70', expand=False)
version('2.18.0', sha256='c4c64b39ab47968e4afcaf1a30223445ee5082eab31a03eee240e54c2e9e1dce', expand=False)
version('2.17.0', sha256='ffea8bf90e850919c0833e9dcc16847d40090a1ef733c583a710a3282b925998', expand=False)
version('2.16.0', sha256='01cf3c930d2b4841960497491512d327bf669c1ed2e753152e1e651a27288c2d', expand=False)
version('2.15.0', sha256='dc3ff74c704954a10796b390738078617bb0b0fef15731e9d268ed3b26c6a285', expand=False)
version('2.13.2', sha256='db7749f649e8423053fb971e6af5fb8a9a1a797cb1f20fef1100edf9f82f6f94', expand=False)
version('2.10.0', sha256='e256d5e43656b7d8be454201a7056dce543fe9cbeb30329a0d8c22d28e655775', expand=False)
version('2.9.4', sha256='0ecee9272bd289d902cc18053010f0364d1696e7518ac92419a99b2f0a1cf689', expand=False)
version('2.9.3', sha256='6cca26ce5094b25a82e1a8d646920d584c6db5f009476196dc285be6522e00ce', expand=False)
version('2.9.2', sha256='05714b9743a7685a43c94a93f5d03aa4070d3ab6e20151801301916d3e546eb7', expand=False)
version('2.9.0', sha256='9a57f6bd9086ea0f5f1a6d9d819459854cb883bb8093795c916538ed9dd5de64', expand=False)
version('2.8.3', sha256='97a4b6c8927c8cb5f3450630c9b39bf210ced8c271f198119118ce1c24b8b0f6', expand=False)
version('2.6.0', sha256='671d9e86e6bf0c28ee007aea55d07e2456ae3a57016491b50aab0fd2fd0e493b', expand=False)
version('1.140', sha256='0d27287217413db6b846284c617d502eaa578662dcb054a7017083eab9c54438')
depends_on('java@8:', type='run')
def install(self, spec, prefix):
mkdirp(prefix.bin)
# The list of files to install varies with release...
# ... but skip the spack-build-{env}out.txt files.
files = [x for x in glob.glob("*") if not re.match("^spack-", x)]
for f in files:
install(f, prefix.bin)
# Set up a helper script to call java on the jar file,
# explicitly codes the path for java and the jar file.
script_sh = join_path(os.path.dirname(__file__), "picard.sh")
script = prefix.bin.picard
install(script_sh, script)
set_executable(script)
# Munge the helper script to explicitly point to java and the
# jar file.
java = self.spec['java'].prefix.bin.java
kwargs = {'ignore_absent': False, 'backup': False, 'string': False}
filter_file('^java', java, script, **kwargs)
filter_file('picard.jar', join_path(prefix.bin, 'picard.jar'),
script, **kwargs)
def setup_run_environment(self, env):
"""The Picard docs suggest setting this as a convenience."""
env.prepend_path('PICARD', join_path(self.prefix.bin, 'picard.jar'))
def url_for_version(self, version):
if version < Version('2.6.0'):
return self._oldurlfmt.format(version)
else:
return self._urlfmt.format(version)
| 59.858824
| 110
| 0.748035
|
268df28c032c889c887d5f552ff17ff7c7e7f0e4
| 3,253
|
py
|
Python
|
src/document.py
|
Superar/SemOpinionS
|
9b0b0f9050bb1f7d66b91d5804f89b2eea9fbc20
|
[
"MIT"
] | 2
|
2021-09-04T02:51:34.000Z
|
2022-03-16T17:53:52.000Z
|
src/document.py
|
Superar/SemOpinionS
|
9b0b0f9050bb1f7d66b91d5804f89b2eea9fbc20
|
[
"MIT"
] | null | null | null |
src/document.py
|
Superar/SemOpinionS
|
9b0b0f9050bb1f7d66b91d5804f89b2eea9fbc20
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
from pathlib import Path
import penman
import networkx as nx
from .amr import AMR
class Document(object):
"""
Class that reads a file with AMR graphs in penman notation.
Attributes:
corpus: list of tuples (id, sentence, AMR)
"""
doc_item = namedtuple('DocumentSent', ['id', 'snt', 'amr'])
def __init__(self, corpus: list, corpus_path: Path = None):
self.corpus = corpus
self.path = corpus_path
def __iter__(self):
return iter(self.corpus)
def __getitem__(self, item: str):
for doc in self:
if doc.id == item:
return doc
def __contains__(self, item):
return bool(self.__getitem__(item))
@classmethod
def read(cls, corpus_path: Path):
"""
Creates an object from a file containing ids, sentences and
AMR graphs in penman notation.
Parameters:
corpus_path (str): Path of the file to be read
Returns:
Document: An object with all read AMR graphs
"""
corpus = list()
with open(corpus_path, encoding='utf-8') as corpusfile:
corpusstr = corpusfile.read()
for penman_g in penman.loads(corpusstr):
amr = AMR.load_penman(penman_g)
corpus.append(cls.doc_item(penman_g.metadata['id'],
penman_g.metadata['snt'],
amr))
return cls(corpus, Path(corpus_path))
def merge_graphs(self, collapse_ner: bool = False, collapse_date: bool = False) -> AMR:
"""
Merges all AMR graphs in the current document into a single representation.
Parameters:
collapse_ner (bool, default False): Wheter to keep all NE nodes collapsed
collapse_date (bool, default False): Wheter to keep all date nodes collapsed
Return:
AMR: A single representation of all AMR graphs in the document merged
"""
merge_graph = AMR()
for amr in self.corpus:
merge_graph = merge_graph.merge(amr.amr,
collapse_ner=collapse_ner,
collapse_date=collapse_date)
# Place multi-sentence root
new_root = merge_graph.add_concept('multi-sentence')
top = merge_graph.get_top()
root_number = 1
while top:
merge_graph.remove_edge(top, top, ':TOP')
merge_graph.add_edge(new_root, top,
':snt{}'.format(root_number),
label=':snt{}'.format(root_number))
top = merge_graph.get_top()
root_number += 1
merge_graph.add_edge(new_root, new_root, ':TOP', label=':TOP')
# Remove disconnected nodes
# This should not affect well-formed AMR graphs
largest_component = max(nx.connected_components(merge_graph.to_undirected()),
key=len)
nodes_to_remove = [n for n in merge_graph.nodes()
if n not in largest_component]
merge_graph.remove_nodes_from(nodes_to_remove)
return merge_graph
| 34.606383
| 91
| 0.576698
|
7470c44181f0f7723520bbd004ea91a27f6ec370
| 3,741
|
py
|
Python
|
evaluate.py
|
RiccardoBiondi/FemurSegmentation
|
9f1253666cce22541354bb448acc6972eb76c142
|
[
"MIT"
] | null | null | null |
evaluate.py
|
RiccardoBiondi/FemurSegmentation
|
9f1253666cce22541354bb448acc6972eb76c142
|
[
"MIT"
] | null | null | null |
evaluate.py
|
RiccardoBiondi/FemurSegmentation
|
9f1253666cce22541354bb448acc6972eb76c142
|
[
"MIT"
] | null | null | null |
#!/bin/env python
import os
import itk
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from glob import glob
from FemurSegmentation.IOManager import ImageReader
from FemurSegmentation.IOManager import VolumeWriter
from FemurSegmentation.filters import execute_pipeline
from FemurSegmentation.filters import adjust_physical_space
from FemurSegmentation.metrics import itk_label_overlapping_measures
from FemurSegmentation.metrics import itk_hausdorff_distance
from FemurSegmentation.metrics import itk_hausdorff_distance_map
# %%
def parse_args():
description = 'Automated CT Femur Segmentation'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--source',
dest='source',
required=True,
type=str,
action='store',
help='Source Image Filename')
parser.add_argument('--target',
dest='target',
required=True,
type=str,
action='store',
help='Target Image Filename')
parser.add_argument('--output',
dest='output',
required=True,
type=str,
action='store',
help='output csv in which save the results')
parser.add_argument('--distance_map',
dest='distance_map',
required=False,
type=str,
action='store',
help='output filename for the distance map between source and target',
default=None)
args = parser.parse_args()
return args
def main(source_path, target_path, compute_distance_map=False):
ImageType = itk.Image[itk.SS, 3]
reader = ImageReader()
name = os.path.basename(source_path)
source = reader(source_path, ImageType)
target = reader(target_path, ImageType)
source = adjust_physical_space(source, target, ImageType)
measures = itk_label_overlapping_measures(source, target)
_ = measures.Update()
hd = itk_hausdorff_distance(source, target)
_ = hd.Update()
distance_map = None
if compute_distance_map:
distance_map = itk_hausdorff_distance_map(source, target)
dict = {'Patient Name' : [name],
'Dice Coefficient' : [measures.GetDiceCoefficient()],
'Jaccard Coefficient' : [measures.GetJaccardCoefficient()],
'Volume Similarity' : [measures.GetVolumeSimilarity()],
'Hausdorff Distance' : [hd.GetHausdorffDistance()],
'Average Hausdorff Distance' : [hd.GetAverageHausdorffDistance()]}
df = pd.DataFrame.from_dict(dict)
print('Processed Image: {}'.format(name), flush=True)
print('Computed Metrics:', flush=True)
print(df)
return [df, distance_map]
if __name__ == '__main__':
args = parse_args()
print('Source Image: {}'.format(args.source), flush=True)
print('Target Image: {}'.format(args.target), flush=True)
compute_distance_map=False
if args.distance_map is not None:
compute_distance_map=True
df, distance_map = main(args.source, args.target, compute_distance_map=compute_distance_map)
print('Writing the results to {}'.format(args.output), flush=True)
df.to_csv(args.output, sep=',', index=False)
if compute_distance_map:
print('Writing the distance map to {}'.format(args.distance_map))
writer = VolumeWriter()
_ = writer(args.distance_map, distance_map)
print('[DONE]', flush=True)
| 30.169355
| 96
| 0.623897
|
2f372fcac9a0efe7d78ff3babda1beb86f5a2811
| 1,146
|
py
|
Python
|
ppo_train.py
|
deval-maker/gym-multiagent-warehouse-sort
|
0154da11e0f1ee4da6c5d21a950a6231d7a7ea68
|
[
"Apache-2.0"
] | null | null | null |
ppo_train.py
|
deval-maker/gym-multiagent-warehouse-sort
|
0154da11e0f1ee4da6c5d21a950a6231d7a7ea68
|
[
"Apache-2.0"
] | null | null | null |
ppo_train.py
|
deval-maker/gym-multiagent-warehouse-sort
|
0154da11e0f1ee4da6c5d21a950a6231d7a7ea68
|
[
"Apache-2.0"
] | null | null | null |
import gym
from gym.envs.registration import register
from stable_baselines3 import PPO
from stable_baselines3.common.env_util import make_vec_env
from stable_baselines3.ppo.policies import MlpPolicy
from datetime import datetime
now = datetime.now()
current_time = now.strftime("%d_%m_%Y_%H_%M_%S")
filename = "ppo_warehouse_sort_"+ str(current_time)
print("Model will be saved at ", filename)
class CustomPolicy(MlpPolicy):
def __init__(self, *args, **kwargs):
super(CustomPolicy, self).__init__(*args, **kwargs,
net_arch=[64, 64, dict(pi=[64, 64], vf=[64, 64])])
def _get_torch_save_params(self):
state_dicts = ["policy", "policy.optimizer", "policy.lr_scheduler"]
return state_dicts, []
register(
id='warehouse-sort-v0',
entry_point='gym_multigrid.envs:WarehouseSortEnvN1'
)
env = gym.make('warehouse-sort-v0')
model = PPO(CustomPolicy, env, verbose=1)
model.learn(total_timesteps=10000)
model.save(filename)
obs = env.reset()
while True:
action, _states = model.predict(obs)
obs, rewards, dones, info = env.step(action)
env.render()
if dones:
env.reset()
| 27.285714
| 75
| 0.711169
|
2579e2e153d7ca351524c650d14601707ee99cbb
| 815
|
py
|
Python
|
util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py
|
Nayan-Das/augur
|
857f4a4e7d688fd54356aa0f546834071fbabbf2
|
[
"MIT"
] | 3
|
2019-10-31T19:07:48.000Z
|
2019-11-20T23:14:15.000Z
|
util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py
|
Nayan-Das/augur
|
857f4a4e7d688fd54356aa0f546834071fbabbf2
|
[
"MIT"
] | 3
|
2019-12-03T21:21:17.000Z
|
2019-12-05T15:26:22.000Z
|
util/alembic/versions/a051167419fa_add_admin_to_user_name_to_repogroup.py
|
Nayan-Das/augur
|
857f4a4e7d688fd54356aa0f546834071fbabbf2
|
[
"MIT"
] | 4
|
2019-11-05T20:22:12.000Z
|
2019-12-12T18:08:30.000Z
|
"""Add admin to User, name to RepoGroup
Revision ID: a051167419fa
Revises: 2eaa930b1f5a
Create Date: 2019-02-17 13:09:42.138936
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a051167419fa'
down_revision = '2eaa930b1f5a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('repo_group', sa.Column('name', sa.String(length=128), nullable=True))
op.add_column('user', sa.Column('administrator', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'administrator')
op.drop_column('repo_group', 'name')
# ### end Alembic commands ###
| 26.290323
| 88
| 0.695706
|
32375e5c25a3cc8432f12ea88795242af9bf1298
| 12,383
|
py
|
Python
|
gamelib/tests/test_walkthrough.py
|
CTPUG/suspended_sentence
|
85956c7250509bf93e51f91cda906e93db4e72a4
|
[
"MIT"
] | 2
|
2020-01-02T10:11:49.000Z
|
2021-07-11T03:38:41.000Z
|
gamelib/tests/test_walkthrough.py
|
CTPUG/suspended_sentence
|
85956c7250509bf93e51f91cda906e93db4e72a4
|
[
"MIT"
] | null | null | null |
gamelib/tests/test_walkthrough.py
|
CTPUG/suspended_sentence
|
85956c7250509bf93e51f91cda906e93db4e72a4
|
[
"MIT"
] | 1
|
2020-08-29T17:48:12.000Z
|
2020-08-29T17:48:12.000Z
|
from pyntnclick.tests.game_logic_utils import GameLogicTestCase
import gamelib.main
class TestWalkthrough(GameLogicTestCase):
GAME_DESCRIPTION_CLASS = gamelib.main.SuspendedSentence
CURRENT_SCENE = 'cryo'
def move_to(self, target):
self.interact_thing(self.state.get_current_scene().name + '.door')
self.assert_current_scene('map')
self.interact_thing('map.to' + target)
self.assert_current_scene(target)
def test_walkthrough(self):
"""A complete game walkthrough.
This should only contain interacts and assertions."""
# TODO: Add flavour interactions, maybe?
# Partially open the door.
self.assert_game_data('door', 'shut', 'cryo.door')
self.interact_thing('cryo.door')
self.assert_game_data('door', 'ajar', 'cryo.door')
# Get the titanium leg.
self.interact_thing('cryo.unit.1')
self.assert_detail_thing('cryo.titanium_leg')
self.interact_thing('cryo.titanium_leg', detail='cryo_detail')
self.assert_detail_thing('cryo.titanium_leg', False)
self.assert_inventory_item('titanium_leg:')
self.close_detail()
# Open the door the rest of the way.
self.interact_thing('cryo.door', 'titanium_leg:')
self.assert_game_data('door', 'open', 'cryo.door')
self.assert_inventory_item('titanium_leg:')
# Go to the mess.
self.move_to('mess')
# Check that life support is broken
self.assert_game_data('life support status', 'broken')
# Get the cans.
self.assert_game_data('cans_available', 3, 'mess.cans')
self.interact_thing('mess.cans')
self.assert_inventory_item('full_can:0')
self.assert_game_data('cans_available', 2, 'mess.cans')
self.interact_thing('mess.cans')
self.assert_inventory_item('full_can:1')
self.assert_game_data('cans_available', 1, 'mess.cans')
self.interact_thing('mess.cans')
self.assert_inventory_item('full_can:2')
self.assert_scene_thing('mess.cans', False)
# Bash one of the cans.
self.assert_item_exists('dented_can:0', False)
self.interact_item('full_can:1', 'titanium_leg:')
self.assert_inventory_item('dented_can:0')
self.assert_inventory_item('full_can:1', False)
# Go to the machine room.
self.move_to('machine')
# Sharpen leg into machete.
self.interact_thing('machine.grinder', 'titanium_leg:')
self.assert_inventory_item('titanium_leg', False)
self.assert_inventory_item('machete:')
# Go to the cryo room.
self.move_to('cryo')
# Chop up some pipes.
self.assert_game_data('fixed', True, 'cryo.pipe.left')
self.interact_thing('cryo.pipe.left', 'machete:')
self.assert_game_data('fixed', False, 'cryo.pipe.left')
self.assert_inventory_item('tube_fragment:0')
self.assert_game_data('fixed', True, 'cryo.pipe.right.top')
self.interact_thing('cryo.pipe.right.top', 'machete:')
self.assert_game_data('fixed', False, 'cryo.pipe.right.top')
self.assert_inventory_item('tube_fragment:1')
self.assert_game_data('fixed', True, 'cryo.pipe.right.bottom')
self.interact_thing('cryo.pipe.right.bottom', 'machete:')
self.assert_game_data('fixed', False, 'cryo.pipe.right.bottom')
self.assert_inventory_item('tube_fragment:2')
# Go to the mess.
self.move_to('mess')
# Clear the broccoli.
self.assert_game_data('status', 'blocked', 'mess.tubes')
self.interact_thing('mess.tubes', 'machete:')
self.assert_game_data('status', 'broken', 'mess.tubes')
# Go to the bridge.
self.move_to('bridge')
# Check that the AI is online.
self.assert_game_data('ai status', 'online')
# Get the stethoscope.
self.interact_thing('bridge.stethoscope')
self.assert_inventory_item('stethoscope:')
self.assert_scene_thing('bridge.stethoscope', False)
# Get the superconductor.
self.interact_thing('bridge.massagechair_base')
self.interact_thing('bridge.superconductor', detail='chair_detail')
self.assert_inventory_item('superconductor:')
self.assert_detail_thing('bridge.superconductor', False)
self.close_detail()
# Go to the crew quarters.
self.move_to('crew_quarters')
# Get the poster.
self.interact_thing('crew.poster')
self.assert_inventory_item('escher_poster:')
self.assert_scene_thing('crew.poster', False)
# Get the fishbowl.
self.assert_game_data('has_bowl', True, 'crew.fishbowl')
self.interact_thing('crew.fishbowl')
self.assert_game_data('has_bowl', False, 'crew.fishbowl')
self.assert_inventory_item('fishbowl:')
# Crack the safe.
self.assert_game_data('is_cracked', False, 'crew.safe')
self.interact_thing('crew.safe', 'stethoscope:')
self.assert_game_data('is_cracked', True, 'crew.safe')
# Get the duct tape.
self.assert_game_data('has_tape', True, 'crew.safe')
self.interact_thing('crew.safe')
self.assert_game_data('has_tape', False, 'crew.safe')
self.assert_inventory_item('duct_tape:')
# Make the helmet.
self.interact_item('fishbowl:', 'duct_tape:')
self.assert_inventory_item('helmet:')
self.assert_inventory_item('fishbowl', False)
# Go to the engine room.
self.move_to('engine')
# Check that the engines are broken.
self.assert_game_data('engine online', False)
# Get the can opener.
self.interact_thing('engine.canopener')
self.assert_inventory_item('canopener:')
self.assert_scene_thing('engine.canopener', False)
# Open the cans.
self.interact_item('full_can:2', 'canopener:')
self.assert_inventory_item('full_can:2', False)
self.assert_inventory_item('empty_can:0')
self.interact_item('full_can:0', 'canopener:')
self.assert_inventory_item('full_can:0', False)
self.assert_inventory_item('empty_can:1')
self.interact_item('dented_can:0', 'canopener:')
self.assert_inventory_item('dented_can:0', False)
self.assert_inventory_item('empty_can:2')
# Go to the machine room.
self.move_to('machine')
# Weld pipes and cans.
self.assert_game_data('contents', [], 'machine.welder.slot')
self.interact_thing('machine.welder.slot', 'tube_fragment:0')
self.assert_inventory_item('tube_fragment:0', False)
self.assert_game_data('contents', ['tube'], 'machine.welder.slot')
self.interact_thing('machine.welder.slot', 'empty_can:1')
self.assert_inventory_item('empty_can:1', False)
self.assert_game_data(
'contents', ['tube', 'can'], 'machine.welder.slot')
self.interact_thing('machine.welder.button')
self.assert_game_data('contents', [], 'machine.welder.slot')
self.assert_inventory_item('cryo_pipes_one:')
self.assert_game_data('contents', [], 'machine.welder.slot')
self.interact_thing('machine.welder.slot', 'tube_fragment:2')
self.assert_inventory_item('tube_fragment:2', False)
self.assert_game_data('contents', ['tube'], 'machine.welder.slot')
self.interact_thing('machine.welder.slot', 'empty_can:2')
self.assert_inventory_item('empty_can:2', False)
self.assert_game_data(
'contents', ['tube', 'can'], 'machine.welder.slot')
self.interact_thing('machine.welder.button')
self.assert_game_data('contents', [], 'machine.welder.slot')
self.assert_inventory_item('cryo_pipes_one', False)
self.assert_inventory_item('cryo_pipes_two:')
self.assert_game_data('contents', [], 'machine.welder.slot')
self.interact_thing('machine.welder.slot', 'tube_fragment:1')
self.assert_inventory_item('tube_fragment:1', False)
self.assert_game_data('contents', ['tube'], 'machine.welder.slot')
self.interact_thing('machine.welder.slot', 'empty_can:0')
self.assert_inventory_item('empty_can:0', False)
self.assert_game_data(
'contents', ['tube', 'can'], 'machine.welder.slot')
self.interact_thing('machine.welder.button')
self.assert_game_data('contents', [], 'machine.welder.slot')
self.assert_inventory_item('cryo_pipes_two', False)
self.assert_inventory_item('cryo_pipes_three:')
# Go to the mess.
self.move_to('mess')
# Replace the tubes.
self.interact_thing('mess.tubes', 'cryo_pipes_three:')
self.assert_inventory_item('cryo_pipes_three', False)
self.assert_game_data('status', 'replaced', 'mess.tubes')
# Check that life support is replaced
self.assert_game_data('life support status', 'replaced')
# Tape up the tubes.
self.interact_thing('mess.tubes', 'duct_tape:')
self.assert_game_data('status', 'fixed', 'mess.tubes')
# Check that life support is fixed
self.assert_game_data('life support status', 'fixed')
# Get the detergent bottle.
self.interact_thing('mess.detergent')
self.assert_inventory_item('detergent_bottle:')
# Go to the cryo room.
self.move_to('cryo')
# Fill the detergent bottle.
self.interact_thing('cryo.pool', 'detergent_bottle:')
self.assert_inventory_item('detergent_bottle', False)
self.assert_inventory_item('full_detergent_bottle:')
# Go to the engine room.
self.move_to('engine')
# Patch the cracked pipe.
self.assert_game_data('fixed', False, 'engine.cracked_pipe')
self.interact_thing('engine.cracked_pipe', 'duct_tape:')
self.assert_game_data('fixed', True, 'engine.cracked_pipe')
# Fill the cryofluid receptacles.
self.assert_game_data('filled', False, 'engine.cryo_containers')
self.interact_thing(
'engine.cryo_container_receptacle', 'full_detergent_bottle:')
self.assert_game_data('filled', True, 'engine.cryo_containers')
self.assert_inventory_item('full_detergent_bottle', False)
# Remove the burned-out superconductor.
self.assert_game_data('present', True, 'engine.superconductor')
self.assert_game_data('working', False, 'engine.superconductor')
self.interact_thing('engine.superconductor', 'machete:')
self.assert_game_data('present', False, 'engine.superconductor')
self.assert_game_data('working', False, 'engine.superconductor')
# Tape up new superconductor.
self.interact_item('superconductor:', 'duct_tape:')
self.assert_inventory_item('superconductor', False)
self.assert_inventory_item('taped_superconductor:')
# Install superconductor.
self.interact_thing('engine.superconductor', 'taped_superconductor:')
self.assert_inventory_item('taped_superconductor', False)
self.assert_game_data('present', True, 'engine.superconductor')
self.assert_game_data('working', True, 'engine.superconductor')
# Check that we've fixed the engines.
self.assert_game_data('engine online', True)
# Go to the bridge.
self.move_to('bridge')
# Show JIM the poster.
self.interact_thing('bridge.camera', 'escher_poster:')
self.assert_game_data('ai status', 'looping')
# Get at JIM.
self.assert_game_data('ai panel', 'closed')
self.interact_thing('jim_panel', 'machete:')
self.assert_game_data('ai panel', 'open')
# Break JIM.
self.interact_thing('jim_panel', 'machete:')
self.assert_game_data('ai panel', 'broken')
# Check that we've turned off JIM.
self.assert_game_data('ai status', 'dead')
# Bring up nav console.
self.interact_thing('bridge.comp')
self.interact_thing('bridge_comp.nav_tab', detail='bridge_comp_detail')
self.assert_game_data('tab', 'nav', detail='bridge_comp_detail')
# Go somewhere interesting.
self.interact_thing(
'bridge_comp.nav_line2', detail='bridge_comp_detail')
| 40.204545
| 79
| 0.655899
|
a5bbcd542f844bba840c526e6e292dd108ab5727
| 132
|
py
|
Python
|
minjector/injection/inject_lazy.py
|
MichaelSchneeberger/minjector
|
64655483e53f49696e6ef6a3b45ca7bca8ab2dfb
|
[
"MIT"
] | null | null | null |
minjector/injection/inject_lazy.py
|
MichaelSchneeberger/minjector
|
64655483e53f49696e6ef6a3b45ca7bca8ab2dfb
|
[
"MIT"
] | null | null | null |
minjector/injection/inject_lazy.py
|
MichaelSchneeberger/minjector
|
64655483e53f49696e6ef6a3b45ca7bca8ab2dfb
|
[
"MIT"
] | null | null | null |
from minjector.injection.injectionbase import inject_base
def inject_lazy(**bindings):
return inject_base(bindings, lazy=True)
| 26.4
| 57
| 0.810606
|
8eedd5c0b995aa32cf406183bbaf8a55daa92966
| 1,897
|
py
|
Python
|
src/app/cart.py
|
fovecsernyes/pizza-ordering-system
|
581d89a0b6d2f41e082539446d7a83976ed4cfa1
|
[
"MIT"
] | 1
|
2022-02-28T22:18:19.000Z
|
2022-02-28T22:18:19.000Z
|
src/app/cart.py
|
vattila96/pizza-ordering-system
|
a8bd5529636fd1515bdf0c38680fd509e58c0af5
|
[
"MIT"
] | 70
|
2019-09-30T19:14:41.000Z
|
2019-12-12T21:24:41.000Z
|
src/app/cart.py
|
vattila96/pizza-ordering-system
|
a8bd5529636fd1515bdf0c38680fd509e58c0af5
|
[
"MIT"
] | 2
|
2019-10-07T14:04:06.000Z
|
2020-11-16T09:23:08.000Z
|
from decimal import Decimal
from django.conf import settings
from .models import Pizza
class Cart(object):
def __init__(self, request):
self.session = request.session
cart = self.session.get(settings.CART_SESSION_ID)
if not cart:
cart = self.session[settings.CART_SESSION_ID] = {}
self.cart = cart
def add(self, product, quantity=1.0, update_quantity=False):
product_id = str(product.id)
if product_id not in self.cart:
self.cart[product_id] = {'quantity': 0.0, 'price': str(product.price)}
if update_quantity:
self.cart[product_id]['quantity'] = quantity
else:
self.cart[product_id]['quantity'] += quantity
self.save()
def save(self):
self.session[settings.CART_SESSION_ID] = self.cart
self.session.modified = True
def remove(self, product):
product_id = str(product.id)
if product_id in self.cart:
del self.cart[product_id]
self.save()
def __iter__(self):
product_ids = self.cart.keys()
products = Pizza.objects.filter(id__in=product_ids)
for product in products:
self.cart[str(product.id)]['product'] = product
for item in self.cart.values():
item['price'] = Decimal(item['price'])
item['total_price'] = item['price'] * Decimal(item['quantity'])
yield item
def __len__(self):
return 1 + int(sum(item['quantity'] for item in self.cart.values()))
def get_total_price(self):
return sum(Decimal(item['price']) * Decimal(item['quantity']) for item in self.cart.values())
def valid_cart(self):
return (2 * sum(item['quantity'] for item in self.cart.values()) % 2) == 0
def clear(self):
del self.session[settings.CART_SESSION_ID]
self.session.modified = True
| 33.875
| 101
| 0.614128
|
20ed6559023ccb3e077294f1be86d1b37b28af79
| 10,289
|
py
|
Python
|
fontlink/font_lib/font_lib.py
|
danpla/fontlink
|
03184053bf4cb4a0aad4fa8a0c01a50145d4d39c
|
[
"Zlib"
] | 6
|
2015-12-06T01:41:45.000Z
|
2020-08-20T04:20:16.000Z
|
fontlink/font_lib/font_lib.py
|
danpla/fontlink
|
03184053bf4cb4a0aad4fa8a0c01a50145d4d39c
|
[
"Zlib"
] | null | null | null |
fontlink/font_lib/font_lib.py
|
danpla/fontlink
|
03184053bf4cb4a0aad4fa8a0c01a50145d4d39c
|
[
"Zlib"
] | 2
|
2020-08-20T04:20:20.000Z
|
2021-04-14T21:01:38.000Z
|
from gettext import gettext as _, ngettext
import json
import os
from gi.repository import Gtk, Gdk, Pango
from .. import config
from ..settings import settings
from .. import dialogs
from .. import utils
from .models import SetStore
from .font_list import FontList
class FontLib(Gtk.Paned):
_FILE = os.path.join(config.CONFIG_DIR, 'sets.json')
_DEFAULT_SET_NAME = _('New set')
class _ViewColumn:
TOGGLE = 0
NAME = 1
STATS = 2
def __init__(self):
super().__init__()
self._set_store = SetStore()
self._font_list = FontList()
self._create_ui()
def _create_ui(self):
grid = Gtk.Grid(orientation=Gtk.Orientation.VERTICAL)
self._set_list = Gtk.TreeView(
model=self._set_store,
headers_visible=False,
reorderable=True,
search_column=SetStore.COL_NAME,
has_tooltip=True)
self._set_list.connect('button-press-event', self._on_button_press)
self._set_list.connect('query-tooltip', self._on_query_tooltip)
selection = self._set_list.get_selection()
selection.set_mode(Gtk.SelectionMode.BROWSE)
selection.connect('changed', self._on_selection_changed)
scrolled = Gtk.ScrolledWindow(
shadow_type=Gtk.ShadowType.IN,
width_request=150,
expand=True
)
scrolled.add(self._set_list)
grid.add(scrolled)
# Columns
toggle = Gtk.CellRendererToggle()
toggle.connect('toggled', self._on_toggled)
col_toggle = Gtk.TreeViewColumn('', toggle)
col_toggle.set_cell_data_func(toggle, self._toggle_cell_data_func)
self._set_list.append_column(col_toggle)
name = Gtk.CellRendererText(
editable=True,
ellipsize=Pango.EllipsizeMode.END
)
name.connect('edited', self._on_name_edited)
col_name = Gtk.TreeViewColumn(
_('Font sets'), name, text=SetStore.COL_NAME)
col_name.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col_name.set_expand(True)
self._set_list.append_column(col_name)
stats = Gtk.CellRendererText(xalign=1.0)
col_stats = Gtk.TreeViewColumn('', stats)
col_stats.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col_stats.set_cell_data_func(stats, self._stats_cell_data_func)
self._set_list.append_column(col_stats)
# Toolbar
toolbar = Gtk.Toolbar()
toolbar.get_style_context().add_class('bottom-toolbar')
grid.add(toolbar)
btn_new = Gtk.ToolButton(
label=_('New'),
icon_name='document-new',
tooltip_text=_('Create a new set'))
btn_new.connect('clicked', self._on_new)
toolbar.add(btn_new)
btn_delete = Gtk.ToolButton(
label=_('Delete'),
icon_name='edit-delete',
tooltip_text=_('Delete the set'))
btn_delete.connect('clicked', self._on_delete)
toolbar.add(btn_delete)
self.pack1(grid, False, False)
self.pack2(self._font_list, True, False)
def _on_button_press(self, widget, event):
if not (event.type == Gdk.EventType.BUTTON_PRESS and
event.button == Gdk.BUTTON_SECONDARY):
return Gdk.EVENT_PROPAGATE
self._set_list.grab_focus()
click_info = self._set_list.get_path_at_pos(int(event.x), int(event.y))
if click_info is not None:
tree_path, column, cell_x, cell_y = click_info
self._set_list.set_cursor(tree_path, column, False)
menu = Gtk.Menu(attach_widget=widget)
mi_new = Gtk.MenuItem(
label=_('_New'),
use_underline=True,
tooltip_text=_('Create a new set')
)
mi_new.connect('activate', self._on_new)
menu.append(mi_new)
menu.append(Gtk.SeparatorMenuItem())
mi_duplicate = Gtk.MenuItem(
label=_('D_uplicate'),
use_underline=True,
tooltip_text=_('Duplicate the set')
)
mi_duplicate.connect('activate', self._on_duplicate)
menu.append(mi_duplicate)
mi_rename = Gtk.MenuItem(
label=_('_Rename…'),
use_underline=True,
tooltip_text=_('Rename the set')
)
mi_rename.connect('activate', self._on_rename)
menu.append(mi_rename)
menu.append(Gtk.SeparatorMenuItem())
mi_delete = Gtk.MenuItem(
label=_('_Delete'),
use_underline=True,
tooltip_text=_('Delete the set')
)
mi_delete.connect('activate', self._on_delete)
menu.append(mi_delete)
menu.show_all()
menu.popup(None, None, None, None, event.button, event.time)
return Gdk.EVENT_STOP
def _on_query_tooltip(self, tree_view, x, y, keyboard_tip, tooltip):
points_to_row, *context = tree_view.get_tooltip_context(
x, y, keyboard_tip)
if not points_to_row:
return False
set_store, tree_path, tree_iter = context[2:]
font_set = set_store[tree_iter][SetStore.COL_FONTSET]
num_fonts = len(font_set)
text = ngettext('{num} font', '{num} fonts', num_fonts).format(
num=num_fonts)
if num_fonts > 0:
text = '{}\n{}'.format(
text,
# Translators: Number of active fonts
ngettext('{num} active', '{num} active',
font_set.num_active).format(num=font_set.num_active))
tooltip.set_text(text)
tree_view.set_tooltip_row(tooltip, tree_path)
return True
def _toggle_cell_data_func(self, column, cell, set_store, tree_iter, data):
font_set = set_store[tree_iter][SetStore.COL_FONTSET]
if font_set.num_active == 0:
cell.props.inconsistent = False
cell.props.active = False
elif font_set.num_active == len(font_set):
cell.props.inconsistent = False
cell.props.active = True
else:
cell.props.inconsistent = True
def _stats_cell_data_func(self, column, cell, set_store, tree_iter, data):
font_set = set_store[tree_iter][SetStore.COL_FONTSET]
cell.props.text = '{}/{}'.format(font_set.num_active, len(font_set))
def _on_selection_changed(self, selection):
set_store, tree_iter = selection.get_selected()
if tree_iter is None:
return
self._font_list.font_set = set_store[tree_iter][SetStore.COL_FONTSET]
def _on_toggled(self, cell_toggle, tree_path):
font_set = self._set_store[tree_path][SetStore.COL_FONTSET]
font_set.set_state_all(font_set.num_active < len(font_set))
def _on_name_edited(self, cell_text, tree_path, new_name):
new_name = new_name.strip()
if not new_name:
return
old_name = self._set_store[tree_path][SetStore.COL_NAME]
if new_name == old_name:
return
all_names = set(row[SetStore.COL_NAME] for row in self._set_store)
all_names.discard(old_name)
new_name = utils.unique_name(new_name, all_names)
self._set_store[tree_path][SetStore.COL_NAME] = new_name
def _on_new(self, widget):
selection = self._set_list.get_selection()
set_store, tree_iter = selection.get_selected()
tree_iter = set_store.add_set(self._DEFAULT_SET_NAME, tree_iter)
tree_path = set_store.get_path(tree_iter)
column = self._set_list.get_column(self._ViewColumn.NAME)
self._set_list.set_cursor(tree_path, column, True)
def _on_duplicate(self, widget):
selection = self._set_list.get_selection()
set_store, tree_iter = selection.get_selected()
if tree_iter is None:
return
tree_iter = set_store.duplicate_set(tree_iter)
tree_path = set_store.get_path(tree_iter)
self._set_list.set_cursor(tree_path, None, False)
def _on_rename(self, widget):
selection = self._set_list.get_selection()
set_store, tree_iter = selection.get_selected()
if tree_iter is None:
return
tree_path = set_store.get_path(tree_iter)
column = self._set_list.get_column(self._ViewColumn.NAME)
self._set_list.set_cursor(tree_path, column, True)
def _on_delete(self, widget):
selection = self._set_list.get_selection()
set_store, tree_iter = selection.get_selected()
if tree_iter is None:
return
row = set_store[tree_iter]
if not dialogs.confirmation(
self.get_toplevel(),
_('Delete “{set_name}”?').format(
set_name=row[SetStore.COL_NAME]),
_('_Delete')):
return
row[SetStore.COL_FONTSET].remove_all_fonts()
set_store.remove(tree_iter)
if len(set_store) == 0:
set_store.add_set(self._DEFAULT_SET_NAME)
self._set_list.set_cursor(0)
def add_fonts(self, paths):
"""Add fonts to the currently selected set."""
font_set = self._font_list.font_set
if font_set is not None:
font_set.add_fonts(paths)
def save_state(self):
settings['splitter_position'] = self.get_position()
settings['selected_set'] = self._set_list.get_cursor()[0][0] + 1
try:
with open(self._FILE, 'w', encoding='utf-8') as f:
json.dump(
self._set_store.as_json, f, ensure_ascii=False, indent=2)
except OSError:
pass
def load_state(self):
self.set_position(
settings.get('splitter_position', self.get_position()))
try:
with open(self._FILE, 'r', encoding='utf-8') as f:
self._set_store.as_json = json.load(f)
except (KeyError, ValueError, OSError):
pass
if len(self._set_store) == 0:
self._set_store.add_set(self._DEFAULT_SET_NAME)
tree_path = max(0, settings.get('selected_set', 1) - 1)
self._set_list.set_cursor(tree_path)
self._set_list.scroll_to_cell(tree_path, None, False, 0, 0)
| 33.624183
| 79
| 0.621926
|
591113e221b16249c49958ca6214d3ee0e4d7fde
| 13,303
|
py
|
Python
|
beakerx/beakerx/plot/chart_models.py
|
altavir/beakerx
|
06fb4200d8042fc2a52e3a1ce8be8aa4b72d3743
|
[
"Apache-2.0"
] | null | null | null |
beakerx/beakerx/plot/chart_models.py
|
altavir/beakerx
|
06fb4200d8042fc2a52e3a1ce8be8aa4b72d3743
|
[
"Apache-2.0"
] | null | null | null |
beakerx/beakerx/plot/chart_models.py
|
altavir/beakerx
|
06fb4200d8042fc2a52e3a1ce8be8aa4b72d3743
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from beakerx.plot.legend import LegendPosition, LegendLayout
from beakerx.plot.plotitem import *
from beakerx.plot.plotitem_treemap import *
from beakerx.utils import *
from .tree_map_reducer import TreeMapReducer
class Chart(BaseObject):
def __init__(self, **kwargs):
super(Chart, self).__init__(**kwargs)
self.init_width = getValue(kwargs, 'initWidth', 640)
self.init_height = getValue(kwargs, 'initHeight', 480)
self.chart_title = getValue(kwargs, 'title')
self.show_legend = getValue(kwargs, 'showLegend')
self.use_tool_tip = getValue(kwargs, 'useToolTip', True)
self.legend_position = getValue(kwargs, 'legendPosition',
LegendPosition())
self.legend_layout = getValue(kwargs, 'legendLayout',
LegendLayout.VERTICAL)
self.type = "Plot"
class AbstractChart(Chart):
def __init__(self, **kwargs):
super(AbstractChart, self).__init__(**kwargs)
self.rangeAxes = getValue(kwargs, 'yAxes', [])
if len(self.rangeAxes) == 0:
self.rangeAxes.append(YAxis(**kwargs))
self.domain_axis_label = getValue(kwargs, 'xLabel')
self.y_label = getValue(kwargs, 'yLabel')
self.x_lower_margin = getValue(kwargs, 'xLowerMargin', 0.05)
self.x_upper_margin = getValue(kwargs, 'xUpperMargin', 0.05)
self.y_auto_range = getValue(kwargs, 'yAutoRange')
self.y_auto_range_includes_zero = getValue(kwargs,
'yAutoRangeIncludesZero')
self.y_lower_margin = getValue(kwargs, 'yLowerMargin')
self.y_upper_margin = getValue(kwargs, 'yUpperMargin')
self.y_lower_bound = getValue(kwargs, 'yLowerBound')
self.y_upper_bound = getValue(kwargs, 'yUpperBound')
self.log_y = getValue(kwargs, 'logY', False)
self.omit_checkboxes = getValue(kwargs, 'omitCheckboxes', False)
self.crosshair = getValue(kwargs, 'crosshair')
self.timezone = getValue(kwargs, 'timeZone')
self.auto_zoom = getValue(kwargs, 'autoZoom')
class XYChart(AbstractChart):
TOO_MANY_ROWS = "tooManyRows"
TOTAL_NUMBER_OF_POINTS = "totalNumberOfPoints"
NUMBER_OF_POINTS_TO_DISPLAY = "numberOfPointsToDisplay"
ROWS_LIMIT_ITEMS = "rowsLimitItems"
def __init__(self, **kwargs):
super(XYChart, self).__init__(**kwargs)
self.graphics_list = getValue(kwargs, 'graphics', [])
self.constant_lines = getValue(kwargs, 'constantLines', [])
self.constant_bands = getValue(kwargs, 'constantBands', [])
self.texts = getValue(kwargs, 'texts', [])
self.x_auto_range = getValue(kwargs, 'xAutoRange', True)
self.x_lower_bound = getValue(kwargs, 'xLowerBound', 0)
self.x_upper_bound = getValue(kwargs, 'xUpperBound', 0)
self.log_x = getValue(kwargs, 'logX', False)
self.x_log_base = getValue(kwargs, 'xLogBase', 10)
self.lodThreshold = getValue(kwargs, 'lodThreshold')
def add(self, item):
if isinstance(item, YAxis):
self.rangeAxes.append(item)
elif isinstance(item, Text):
self.texts.append(item)
elif isinstance(item, ConstantLine):
self.constant_lines.append(item)
elif isinstance(item, ConstantBand):
self.constant_bands.append(item)
elif isinstance(item, Graphics):
self.graphics_list.append(item)
elif isinstance(item, list):
for elem in item:
self.add(elem)
return self
def setYBound(self, lower, upper):
self.y_lower_bound = lower
self.y_upper_bound = upper
self.rangeAxes[0].setBound(lower, upper)
return self
def setXBound(self, lower, upper):
self.x_auto_range = False
self.x_lower_bound = lower
self.x_upper_bound = upper
return self
class HeatMapChart(XYChart):
ROWS_LIMIT = 10000
COLUMN_LIMIT = 100
def __init__(self, rows_limit, column_limit, **kwargs):
super(HeatMapChart, self).__init__(**kwargs)
self.rows_limit = rows_limit
self.column_limit = column_limit
@staticmethod
def total_points(listOfData):
return sum(map(lambda x: len(x), listOfData))
@staticmethod
def find_step_for_column(row):
step = 2
while (int(len(row) / step)) > HeatMapChart.COLUMN_LIMIT:
step += 1
return step
@staticmethod
def limit_column_in_row(row):
if len(row) > HeatMapChart.COLUMN_LIMIT:
step = HeatMapChart.find_step_for_column(row)
limited_row = list(map(lambda index: row[index],
filter(lambda s: s % step == 0,
[index for index in range(len(row))])))
return limited_row
else:
return row
@staticmethod
def limit_elements_in_row(listOfData):
return list(map(HeatMapChart.limit_column_in_row, listOfData))
@staticmethod
def limit_rows(listOfData):
step = HeatMapChart.find_step_for_column(listOfData)
limited_row = list(map(lambda index: listOfData[index],
filter(lambda s: s % step == 0,
[index for index in range(len(listOfData))])))
return limited_row
@staticmethod
def limit_Heatmap(listOfData):
limited_elements_in_row = HeatMapChart.limit_elements_in_row(listOfData)
total_points = HeatMapChart.total_points(limited_elements_in_row)
too_many_rows = total_points > HeatMapChart.ROWS_LIMIT
if too_many_rows:
return HeatMapChart.limit_rows(limited_elements_in_row)
return limited_elements_in_row
def transform(self):
self_copy = copy.copy(self)
self_copy.totalNumberOfPoints = self.total_points(self_copy.graphics_list)
self_copy.rowsLimitItems = self.rows_limit
too_many_points = self_copy.totalNumberOfPoints > self.rows_limit
if too_many_points:
limited_heat_map_data = self.limit_Heatmap(self_copy.graphics_list)
self_copy.graphics_list = limited_heat_map_data
self_copy.numberOfPointsToDisplay = self.total_points(self_copy.graphics_list)
self_copy.numberOfPointsToDisplay = self.total_points(self_copy.graphics_list)
self_copy.tooManyRows = too_many_points
return super(HeatMapChart, self_copy).transform()
class HistogramChart(XYChart):
ROWS_LIMIT = 1000000
ROWS_LIMIT_T0_INDEX = 10000
def __init__(self, **kwargs):
self.log = getValue(kwargs, 'log', False)
if self.log:
kwargs['logY'] = True
super(HistogramChart, self).__init__(**kwargs)
self.type = 'Histogram'
self.bin_count = getValue(kwargs, 'binCount')
self.cumulative = getValue(kwargs, 'cumulative', False)
self.normed = getValue(kwargs, 'normed', False)
self.range_min = getValue(kwargs, 'rangeMin')
self.range_max = getValue(kwargs, 'rangeMax')
self.names = getValue(kwargs, 'names')
self.displayMode = getValue(kwargs, 'displayMode')
color = getValue(kwargs, 'color')
if color is not None:
if isinstance(color, Color):
self.colors = []
self.colors.append(color)
else:
self.colors = color
@staticmethod
def limit_points(x):
if len(x) >= HistogramChart.ROWS_LIMIT:
return x[0:HistogramChart.ROWS_LIMIT_T0_INDEX]
return x
@staticmethod
def total_number(listOfData):
return max(list(map(lambda x: len(x), listOfData)))
def transform(self):
self_copy = copy.copy(self)
self_copy.totalNumberOfPoints = HistogramChart.total_number(self_copy.graphics_list)
self_copy.tooManyRows = self_copy.totalNumberOfPoints >= HistogramChart.ROWS_LIMIT
self_copy.rowsLimitItems = HistogramChart.ROWS_LIMIT
self_copy.numberOfPointsToDisplay = str(HistogramChart.ROWS_LIMIT_T0_INDEX) + " items"
self_copy.graphics_list = list(map(HistogramChart.limit_points, self_copy.graphics_list))
return super(HistogramChart, self_copy).transform()
class CategoryChart(XYChart):
def __init__(self, **kwargs):
super(CategoryChart, self).__init__(**kwargs)
self.type = 'CategoryPlot'
self.categoryNamesLabelAngle = getValue(kwargs,
'categoryNamesLabelAngle', 0.0)
self.categoryNames = getValue(kwargs, 'categoryNames', [])
self.y_upper_margin = getValue(kwargs, 'upperMargin', 0.0)
self.y_lower_bound = getValue(kwargs, 'lowerMargin', 0.0)
self.x_upper_margin = getValue(kwargs, 'upperMargin', 0.05)
self.x_lower_margin = getValue(kwargs, 'lowerMargin', 0.05)
self.category_margin = getValue(kwargs, 'categoryMargin', 0.2)
self.y_auto_range_includes_zero = getValue(kwargs,
'y_auto_range_includes_zero',
False)
self.y_auto_range = getValue(kwargs, 'y_auto_range', True)
self.orientation = getValue(kwargs, 'orientation')
class TreeMapChart(XYChart):
ROWS_LIMIT = 1000
def __init__(self, **kwargs):
super(TreeMapChart, self).__init__(**kwargs)
self.type = 'TreeMap'
self.showLegend = getValue(kwargs, 'showLegend', True)
self.title = getValue(kwargs, 'title', "")
self.colorProvider = getValue(kwargs, 'colorProvider',
RandomColorProvider())
self.toolTipBuilder = getValue(kwargs, 'toolTipBuilder')
self.mode = getValue(kwargs, 'mode', Mode.SQUARIFY).value
self.ratio = getValue(kwargs, 'ratio')
self.valueAccessor = getValue(kwargs, 'valueAccessor',
ValueAccessor.VALUE)
self.custom_styles = []
self.element_styles = {}
self.graphics_list = getValue(kwargs, 'root')
def transform(self):
tree_map = self
tree_map.process(tree_map.graphics_list)
count_nodes = tree_map.count_nodes(self.graphics_list, self.increase_by_one, 0)
to_many_rows = count_nodes > TreeMapChart.ROWS_LIMIT
if to_many_rows:
tree_map = copy.copy(self)
tree_map.totalNumberOfPoints = count_nodes
tree_map.rowsLimitItems = TreeMapChart.ROWS_LIMIT
tree_map.graphics_list = TreeMapReducer.limit_tree_map(TreeMapChart.ROWS_LIMIT, self.graphics_list)
tree_map.numberOfPointsToDisplay = str(
tree_map.count_nodes(tree_map.graphics_list, self.increase_by_one_when_leaf, 0)
) + " leaves"
tree_map.tooManyRows = to_many_rows
return super(TreeMapChart, tree_map).transform()
def process(self, node):
children = node.children
if children is not None:
for child in children:
self.process(child)
node.user_object["isLeaf"] = node.isLeaf()
if node.isLeaf():
node.color = self.colorProvider.getColor(node)
toolTipBuilder = self.toolTipBuilder
if toolTipBuilder is not None:
node.tooltip = toolTipBuilder.getToolTip(node)
@staticmethod
def increase_by_one(node, count):
return count + 1
@staticmethod
def increase_by_one_when_leaf(node, count):
if node.user_object["isLeaf"]:
count = count + 1
return count
def count_nodes(self, node, increase_fun, count):
count = increase_fun(node, count)
children = node.children
if children is not None:
for child in children:
count = self.count_nodes(child, increase_fun, count)
return count
class CombinedChart(BaseObject):
def __init__(self, **kwargs):
super(CombinedChart, self).__init__(**kwargs)
self.init_width = getValue(kwargs, 'initWidth', 640)
self.init_height = getValue(kwargs, 'initHeight', 480)
self.title = getValue(kwargs, 'title')
self.x_label = getValue(kwargs, 'xLabel', 'Linear')
self.plots = getValue(kwargs, 'plots', [])
self.weights = getValue(kwargs, 'weights', [])
self.auto_zoom = getValue(kwargs, 'autoZoom')
self.version = 'groovy'
self.type = 'CombinedPlot'
self.y_tickLabels_visible = True
self.x_tickLabels_visible = True
self.plot_type = 'Plot'
| 40.43465
| 111
| 0.642261
|
a47f0a8e6de64f9cf02d2f23dfeb307733eaedc4
| 535
|
py
|
Python
|
functions/Osfile.py
|
DhruvBajaj01/Virtual-Assistant
|
bba3c4122e1cc93f31b1e0894b6192739d51a6b4
|
[
"MIT"
] | 3
|
2022-02-21T06:59:46.000Z
|
2022-03-01T06:38:58.000Z
|
functions/Osfile.py
|
DhruvBajaj01/Virtual-Assistant
|
bba3c4122e1cc93f31b1e0894b6192739d51a6b4
|
[
"MIT"
] | null | null | null |
functions/Osfile.py
|
DhruvBajaj01/Virtual-Assistant
|
bba3c4122e1cc93f31b1e0894b6192739d51a6b4
|
[
"MIT"
] | null | null | null |
import os
import subprocess as sp
paths = {
'notepad': "C:/Program Files (x86)/Notepad++/notepad++.exe",
'discord': "C:\\Users\\Dhruv Bajaj\\AppData\\Local\\Discord\\app-1.0.9003\\Discord.exe"
}
def open_notepad():
os.startfile(paths['notepad'])
def open_discord():
os.startfile(paths['discord'])
def open_cmd():
os.system('start cmd')
def open_camera():
sp.run('start microsoft.windows.camera:', shell=True)
def open_calculator():
sp.run('start microsoft.windows.Calculator:', shell=True)
| 17.258065
| 91
| 0.663551
|
26385ec1cadd9ffff7f1cbc157d61a1978008530
| 2,687
|
py
|
Python
|
app/common/fileutil.py
|
rahulsh1/flamescope
|
15a6914d0848503e0153d438597178970ea56a9c
|
[
"Apache-2.0"
] | null | null | null |
app/common/fileutil.py
|
rahulsh1/flamescope
|
15a6914d0848503e0153d438597178970ea56a9c
|
[
"Apache-2.0"
] | null | null | null |
app/common/fileutil.py
|
rahulsh1/flamescope
|
15a6914d0848503e0153d438597178970ea56a9c
|
[
"Apache-2.0"
] | null | null | null |
# This file is part of FlameScope, a performance analysis tool created by the
# Netflix cloud performance team. See:
#
# https://github.com/Netflix/flamescope
#
# Copyright 2018 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import magic
import gzip
import json
from json import JSONDecodeError
from os.path import abspath
from app.common.error import InvalidFileError
from app.perf.regexp import event_regexp
from app import config
invalidchars = re.compile('[^a-zA-Z0-9.,/_%+: -\\\\]')
def validpath(file_path):
if invalidchars.search(file_path):
return False
if not os.path.exists(file_path):
return False
return True
def get_file_mime(file_path):
return magic.from_file(file_path, mime=True)
def is_perf_file(f):
for line in f:
if (line[0] == '#'):
continue
r = event_regexp.search(line)
if r:
return True
return False
def get_file(file_path):
# ensure the file is below PROFILE_DIR:
if not abspath(file_path).startswith(abspath(config.PROFILE_DIR)):
raise InvalidFileError("File %s is not in PROFILE_DIR" % file_path)
if not validpath(file_path):
raise InvalidFileError("Invalid characters or file %s does not exist." % file_path)
mime = get_file_mime(file_path)
if mime in ['application/x-gzip', 'application/gzip']:
return gzip.open(file_path, 'rt')
elif mime == 'text/plain':
return open(file_path, 'r')
else:
raise InvalidFileError('Unknown mime type.')
def get_profile_type(file_path):
f = get_file(file_path)
if is_perf_file(f):
f.close()
return ('perf_script', None)
else:
try:
f.seek(0)
r = json.load(f)
f.close()
if isinstance(r, list):
if 'ph' in r[0]:
return ('trace_event', r)
elif 'nodes' in r:
return ('cpuprofile', r)
raise InvalidFileError('Unknown JSON file.')
except JSONDecodeError:
f.close()
raise InvalidFileError('Unknown file type.')
| 29.206522
| 91
| 0.649051
|
113589f123679efad0fbe54ccdbfe3cac9860ccb
| 2,560
|
py
|
Python
|
vispy/gloo/gl/desktop.py
|
robmcmullen/vispy
|
8d5092fdae4a24fc364ae51c7e34e12d3fd6d0a2
|
[
"BSD-3-Clause"
] | null | null | null |
vispy/gloo/gl/desktop.py
|
robmcmullen/vispy
|
8d5092fdae4a24fc364ae51c7e34e12d3fd6d0a2
|
[
"BSD-3-Clause"
] | null | null | null |
vispy/gloo/gl/desktop.py
|
robmcmullen/vispy
|
8d5092fdae4a24fc364ae51c7e34e12d3fd6d0a2
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
""" GL ES 2.0 API implemented via desktop GL (i.e subset of normal OpenGL).
"""
import os
import sys
import ctypes.util
from . import _copy_gl_functions
from ._constants import * # noqa
# Ctypes stuff
# Load the OpenGL library. We more or less follow the same approach
# as PyOpenGL does internally
_have_get_proc_address = False
_lib = os.getenv('VISPY_GL_LIB', '')
if _lib != '':
if sys.platform.startswith('win'):
_lib = ctypes.windll.LoadLibrary(_lib)
else:
_lib = ctypes.cdll.LoadLibrary(_lib)
elif sys.platform.startswith('win'):
# Windows
_lib = ctypes.windll.opengl32
try:
wglGetProcAddress = _lib.wglGetProcAddress
wglGetProcAddress.restype = ctypes.CFUNCTYPE(
ctypes.POINTER(ctypes.c_int))
wglGetProcAddress.argtypes = [ctypes.c_char_p]
_have_get_proc_address = True
except AttributeError:
pass
else:
# Unix-ish
if sys.platform.startswith('darwin'):
_fname = ctypes.util.find_library('OpenGL')
else:
_fname = ctypes.util.find_library('GL')
if not _fname:
raise RuntimeError('Could not load OpenGL library.')
# Load lib
_lib = ctypes.cdll.LoadLibrary(_fname)
def _have_context():
return _lib.glGetError() != 1282 # GL_INVALID_OPERATION
def _get_gl_func(name, restype, argtypes):
# Based on a function in Pyglet
try:
# Try using normal ctypes stuff
func = getattr(_lib, name)
func.restype = restype
func.argtypes = argtypes
return func
except AttributeError:
if sys.platform.startswith('win'):
# Ask for a pointer to the function, this is the approach
# for OpenGL extensions on Windows
fargs = (restype,) + argtypes
ftype = ctypes.WINFUNCTYPE(*fargs)
if not _have_get_proc_address:
raise RuntimeError('Function %s not available.' % name)
if not _have_context():
raise RuntimeError('Using %s with no OpenGL context.' % name)
address = wglGetProcAddress(name.encode('utf-8'))
if address:
return ctypes.cast(address, ftype)
# If not Windows or if we did not return function object on Windows:
raise RuntimeError('Function %s not present in context.' % name)
# Inject
from . import _desktop
_copy_gl_functions(_desktop, globals())
| 30.47619
| 77
| 0.652344
|
5c8833e4cf914a7600f27982f9a03d6cd6b7cfac
| 283
|
py
|
Python
|
install.py
|
ia2c/aptamers
|
b4152c0383248bbbaa7c80581cee322bf4c6bb3a
|
[
"MIT"
] | 1
|
2017-03-21T12:30:29.000Z
|
2017-03-21T12:30:29.000Z
|
install.py
|
ia2c/aptamers
|
b4152c0383248bbbaa7c80581cee322bf4c6bb3a
|
[
"MIT"
] | null | null | null |
install.py
|
ia2c/aptamers
|
b4152c0383248bbbaa7c80581cee322bf4c6bb3a
|
[
"MIT"
] | null | null | null |
fichier = open("Rosetta/main/source/src/apps.src.settings","r")
commands = fichier.readlines()
fichier.close()
commands = commands[:-1] + [" 'cifparse',\n"] + [commands[-1]]
fichier = open("Rosetta/main/source/src/apps.src.settings","w")
fichier.writelines(commands)
fichier.close()
| 35.375
| 63
| 0.713781
|
7fe9c4a7cb496a10f99e7632b34bc27194810bc6
| 38,771
|
py
|
Python
|
src/storage-preview/azext_storage_preview/vendored_sdks/azure_storagev2/fileshare/v2020_02_10/_generated/models/_models_py3.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/storage-preview/azext_storage_preview/vendored_sdks/azure_storagev2/fileshare/v2020_02_10/_generated/models/_models_py3.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/storage-preview/azext_storage_preview/vendored_sdks/azure_storagev2/fileshare/v2020_02_10/_generated/models/_models_py3.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from azure.core.exceptions import HttpResponseError
class AccessPolicy(Model):
"""An Access policy.
:param start: The date-time the policy is active.
:type start: str
:param expiry: The date-time the policy expires.
:type expiry: str
:param permission: The permissions for the ACL policy.
:type permission: str
"""
_attribute_map = {
'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}},
'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}},
'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}},
}
_xml_map = {
}
def __init__(self, *, start: str=None, expiry: str=None, permission: str=None, **kwargs) -> None:
super(AccessPolicy, self).__init__(**kwargs)
self.start = start
self.expiry = expiry
self.permission = permission
class ClearRange(Model):
"""ClearRange.
All required parameters must be populated in order to send to Azure.
:param start: Required.
:type start: long
:param end: Required.
:type end: long
"""
_validation = {
'start': {'required': True},
'end': {'required': True},
}
_attribute_map = {
'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}},
'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}},
}
_xml_map = {
'name': 'ClearRange'
}
def __init__(self, *, start: int, end: int, **kwargs) -> None:
super(ClearRange, self).__init__(**kwargs)
self.start = start
self.end = end
class CopyFileSmbInfo(Model):
"""Additional parameters for start_copy operation.
:param file_permission_copy_mode: Specifies the option to copy file
security descriptor from source file or to set it using the value which is
defined by the header value of x-ms-file-permission or
x-ms-file-permission-key. Possible values include: 'source', 'override'
:type file_permission_copy_mode: str or
~azure.storage.fileshare.models.PermissionCopyModeType
:param ignore_read_only: Specifies the option to overwrite the target file
if it already exists and has read-only attribute set.
:type ignore_read_only: bool
:param file_attributes: Specifies either the option to copy file
attributes from a source file(source) to a target file or a list of
attributes to set on a target file.
:type file_attributes: str
:param file_creation_time: Specifies either the option to copy file
creation time from a source file(source) to a target file or a time value
in ISO 8601 format to set as creation time on a target file.
:type file_creation_time: str
:param file_last_write_time: Specifies either the option to copy file last
write time from a source file(source) to a target file or a time value in
ISO 8601 format to set as last write time on a target file.
:type file_last_write_time: str
:param set_archive_attribute: Specifies the option to set archive
attribute on a target file. True means archive attribute will be set on a
target file despite attribute overrides or a source file state.
:type set_archive_attribute: bool
"""
_attribute_map = {
'file_permission_copy_mode': {'key': '', 'type': 'PermissionCopyModeType', 'xml': {'name': 'file_permission_copy_mode'}},
'ignore_read_only': {'key': '', 'type': 'bool', 'xml': {'name': 'ignore_read_only'}},
'file_attributes': {'key': '', 'type': 'str', 'xml': {'name': 'file_attributes'}},
'file_creation_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_creation_time'}},
'file_last_write_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_last_write_time'}},
'set_archive_attribute': {'key': '', 'type': 'bool', 'xml': {'name': 'set_archive_attribute'}},
}
_xml_map = {
}
def __init__(self, *, file_permission_copy_mode=None, ignore_read_only: bool=None, file_attributes: str=None, file_creation_time: str=None, file_last_write_time: str=None, set_archive_attribute: bool=None, **kwargs) -> None:
super(CopyFileSmbInfo, self).__init__(**kwargs)
self.file_permission_copy_mode = file_permission_copy_mode
self.ignore_read_only = ignore_read_only
self.file_attributes = file_attributes
self.file_creation_time = file_creation_time
self.file_last_write_time = file_last_write_time
self.set_archive_attribute = set_archive_attribute
class CorsRule(Model):
"""CORS is an HTTP feature that enables a web application running under one
domain to access resources in another domain. Web browsers implement a
security restriction known as same-origin policy that prevents a web page
from calling APIs in a different domain; CORS provides a secure way to
allow one domain (the origin domain) to call APIs in another domain.
All required parameters must be populated in order to send to Azure.
:param allowed_origins: Required. The origin domains that are permitted to
make a request against the storage service via CORS. The origin domain is
the domain from which the request originates. Note that the origin must be
an exact case-sensitive match with the origin that the user age sends to
the service. You can also use the wildcard character '*' to allow all
origin domains to make requests via CORS.
:type allowed_origins: str
:param allowed_methods: Required. The methods (HTTP request verbs) that
the origin domain may use for a CORS request. (comma separated)
:type allowed_methods: str
:param allowed_headers: Required. The request headers that the origin
domain may specify on the CORS request.
:type allowed_headers: str
:param exposed_headers: Required. The response headers that may be sent in
the response to the CORS request and exposed by the browser to the request
issuer.
:type exposed_headers: str
:param max_age_in_seconds: Required. The maximum amount time that a
browser should cache the preflight OPTIONS request.
:type max_age_in_seconds: int
"""
_validation = {
'allowed_origins': {'required': True},
'allowed_methods': {'required': True},
'allowed_headers': {'required': True},
'exposed_headers': {'required': True},
'max_age_in_seconds': {'required': True, 'minimum': 0},
}
_attribute_map = {
'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}},
'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}},
'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}},
'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}},
'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}},
}
_xml_map = {
}
def __init__(self, *, allowed_origins: str, allowed_methods: str, allowed_headers: str, exposed_headers: str, max_age_in_seconds: int, **kwargs) -> None:
super(CorsRule, self).__init__(**kwargs)
self.allowed_origins = allowed_origins
self.allowed_methods = allowed_methods
self.allowed_headers = allowed_headers
self.exposed_headers = exposed_headers
self.max_age_in_seconds = max_age_in_seconds
class DirectoryItem(Model):
"""A listed directory item.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
}
_xml_map = {
'name': 'Directory'
}
def __init__(self, *, name: str, **kwargs) -> None:
super(DirectoryItem, self).__init__(**kwargs)
self.name = name
class FileHTTPHeaders(Model):
"""Additional parameters for a set of operations, such as: File_create,
File_set_http_headers.
:param file_content_type: Sets the MIME content type of the file. The
default type is 'application/octet-stream'.
:type file_content_type: str
:param file_content_encoding: Specifies which content encodings have been
applied to the file.
:type file_content_encoding: str
:param file_content_language: Specifies the natural languages used by this
resource.
:type file_content_language: str
:param file_cache_control: Sets the file's cache control. The File service
stores this value but does not use or modify it.
:type file_cache_control: str
:param file_content_md5: Sets the file's MD5 hash.
:type file_content_md5: bytearray
:param file_content_disposition: Sets the file's Content-Disposition
header.
:type file_content_disposition: str
"""
_attribute_map = {
'file_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_type'}},
'file_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_encoding'}},
'file_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_language'}},
'file_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'file_cache_control'}},
'file_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'file_content_md5'}},
'file_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_disposition'}},
}
_xml_map = {
}
def __init__(self, *, file_content_type: str=None, file_content_encoding: str=None, file_content_language: str=None, file_cache_control: str=None, file_content_md5: bytearray=None, file_content_disposition: str=None, **kwargs) -> None:
super(FileHTTPHeaders, self).__init__(**kwargs)
self.file_content_type = file_content_type
self.file_content_encoding = file_content_encoding
self.file_content_language = file_content_language
self.file_cache_control = file_cache_control
self.file_content_md5 = file_content_md5
self.file_content_disposition = file_content_disposition
class FileItem(Model):
"""A listed file item.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
:param properties: Required.
:type properties: ~azure.storage.fileshare.models.FileProperty
"""
_validation = {
'name': {'required': True},
'properties': {'required': True},
}
_attribute_map = {
'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
'properties': {'key': 'Properties', 'type': 'FileProperty', 'xml': {'name': 'Properties'}},
}
_xml_map = {
'name': 'File'
}
def __init__(self, *, name: str, properties, **kwargs) -> None:
super(FileItem, self).__init__(**kwargs)
self.name = name
self.properties = properties
class FileProperty(Model):
"""File properties.
All required parameters must be populated in order to send to Azure.
:param content_length: Required. Content length of the file. This value
may not be up-to-date since an SMB client may have modified the file
locally. The value of Content-Length may not reflect that fact until the
handle is closed or the op-lock is broken. To retrieve current property
values, call Get File Properties.
:type content_length: long
"""
_validation = {
'content_length': {'required': True},
}
_attribute_map = {
'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}},
}
_xml_map = {
}
def __init__(self, *, content_length: int, **kwargs) -> None:
super(FileProperty, self).__init__(**kwargs)
self.content_length = content_length
class FileRange(Model):
"""An Azure Storage file range.
All required parameters must be populated in order to send to Azure.
:param start: Required. Start of the range.
:type start: long
:param end: Required. End of the range.
:type end: long
"""
_validation = {
'start': {'required': True},
'end': {'required': True},
}
_attribute_map = {
'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}},
'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}},
}
_xml_map = {
'name': 'Range'
}
def __init__(self, *, start: int, end: int, **kwargs) -> None:
super(FileRange, self).__init__(**kwargs)
self.start = start
self.end = end
class FilesAndDirectoriesListSegment(Model):
"""Abstract for entries that can be listed from Directory.
All required parameters must be populated in order to send to Azure.
:param directory_items: Required.
:type directory_items: list[~azure.storage.fileshare.models.DirectoryItem]
:param file_items: Required.
:type file_items: list[~azure.storage.fileshare.models.FileItem]
"""
_validation = {
'directory_items': {'required': True},
'file_items': {'required': True},
}
_attribute_map = {
'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]', 'xml': {'name': 'DirectoryItems', 'itemsName': 'Directory'}},
'file_items': {'key': 'FileItems', 'type': '[FileItem]', 'xml': {'name': 'FileItems', 'itemsName': 'File'}},
}
_xml_map = {
'name': 'Entries'
}
def __init__(self, *, directory_items, file_items, **kwargs) -> None:
super(FilesAndDirectoriesListSegment, self).__init__(**kwargs)
self.directory_items = directory_items
self.file_items = file_items
class HandleItem(Model):
"""A listed Azure Storage handle item.
All required parameters must be populated in order to send to Azure.
:param handle_id: Required. XSMB service handle ID
:type handle_id: str
:param path: Required. File or directory name including full path starting
from share root
:type path: str
:param file_id: Required. FileId uniquely identifies the file or
directory.
:type file_id: str
:param parent_id: ParentId uniquely identifies the parent directory of the
object.
:type parent_id: str
:param session_id: Required. SMB session ID in context of which the file
handle was opened
:type session_id: str
:param client_ip: Required. Client IP that opened the handle
:type client_ip: str
:param open_time: Required. Time when the session that previously opened
the handle has last been reconnected. (UTC)
:type open_time: datetime
:param last_reconnect_time: Time handle was last connected to (UTC)
:type last_reconnect_time: datetime
"""
_validation = {
'handle_id': {'required': True},
'path': {'required': True},
'file_id': {'required': True},
'session_id': {'required': True},
'client_ip': {'required': True},
'open_time': {'required': True},
}
_attribute_map = {
'handle_id': {'key': 'HandleId', 'type': 'str', 'xml': {'name': 'HandleId'}},
'path': {'key': 'Path', 'type': 'str', 'xml': {'name': 'Path'}},
'file_id': {'key': 'FileId', 'type': 'str', 'xml': {'name': 'FileId'}},
'parent_id': {'key': 'ParentId', 'type': 'str', 'xml': {'name': 'ParentId'}},
'session_id': {'key': 'SessionId', 'type': 'str', 'xml': {'name': 'SessionId'}},
'client_ip': {'key': 'ClientIp', 'type': 'str', 'xml': {'name': 'ClientIp'}},
'open_time': {'key': 'OpenTime', 'type': 'rfc-1123', 'xml': {'name': 'OpenTime'}},
'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123', 'xml': {'name': 'LastReconnectTime'}},
}
_xml_map = {
'name': 'Handle'
}
def __init__(self, *, handle_id: str, path: str, file_id: str, session_id: str, client_ip: str, open_time, parent_id: str=None, last_reconnect_time=None, **kwargs) -> None:
super(HandleItem, self).__init__(**kwargs)
self.handle_id = handle_id
self.path = path
self.file_id = file_id
self.parent_id = parent_id
self.session_id = session_id
self.client_ip = client_ip
self.open_time = open_time
self.last_reconnect_time = last_reconnect_time
class LeaseAccessConditions(Model):
"""Additional parameters for a set of operations.
:param lease_id: If specified, the operation only succeeds if the
resource's lease is active and matches this ID.
:type lease_id: str
"""
_attribute_map = {
'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}},
}
_xml_map = {
}
def __init__(self, *, lease_id: str=None, **kwargs) -> None:
super(LeaseAccessConditions, self).__init__(**kwargs)
self.lease_id = lease_id
class ListFilesAndDirectoriesSegmentResponse(Model):
"""An enumeration of directories and files.
All required parameters must be populated in order to send to Azure.
:param service_endpoint: Required.
:type service_endpoint: str
:param share_name: Required.
:type share_name: str
:param share_snapshot:
:type share_snapshot: str
:param directory_path: Required.
:type directory_path: str
:param prefix: Required.
:type prefix: str
:param marker:
:type marker: str
:param max_results:
:type max_results: int
:param segment: Required.
:type segment:
~azure.storage.fileshare.models.FilesAndDirectoriesListSegment
:param next_marker: Required.
:type next_marker: str
"""
_validation = {
'service_endpoint': {'required': True},
'share_name': {'required': True},
'directory_path': {'required': True},
'prefix': {'required': True},
'segment': {'required': True},
'next_marker': {'required': True},
}
_attribute_map = {
'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'name': 'ShareName', 'attr': True}},
'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'name': 'ShareSnapshot', 'attr': True}},
'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'name': 'DirectoryPath', 'attr': True}},
'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment', 'xml': {'name': 'Segment'}},
'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
}
_xml_map = {
'name': 'EnumerationResults'
}
def __init__(self, *, service_endpoint: str, share_name: str, directory_path: str, prefix: str, segment, next_marker: str, share_snapshot: str=None, marker: str=None, max_results: int=None, **kwargs) -> None:
super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs)
self.service_endpoint = service_endpoint
self.share_name = share_name
self.share_snapshot = share_snapshot
self.directory_path = directory_path
self.prefix = prefix
self.marker = marker
self.max_results = max_results
self.segment = segment
self.next_marker = next_marker
class ListHandlesResponse(Model):
"""An enumeration of handles.
All required parameters must be populated in order to send to Azure.
:param handle_list:
:type handle_list: list[~azure.storage.fileshare.models.HandleItem]
:param next_marker: Required.
:type next_marker: str
"""
_validation = {
'next_marker': {'required': True},
}
_attribute_map = {
'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'itemsName': 'Entries', 'wrapped': True}},
'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
}
_xml_map = {
'name': 'EnumerationResults'
}
def __init__(self, *, next_marker: str, handle_list=None, **kwargs) -> None:
super(ListHandlesResponse, self).__init__(**kwargs)
self.handle_list = handle_list
self.next_marker = next_marker
class ListSharesResponse(Model):
"""An enumeration of shares.
All required parameters must be populated in order to send to Azure.
:param service_endpoint: Required.
:type service_endpoint: str
:param prefix:
:type prefix: str
:param marker:
:type marker: str
:param max_results:
:type max_results: int
:param share_items:
:type share_items: list[~azure.storage.fileshare.models.ShareItem]
:param next_marker: Required.
:type next_marker: str
"""
_validation = {
'service_endpoint': {'required': True},
'next_marker': {'required': True},
}
_attribute_map = {
'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}},
'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}},
'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}},
'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}},
'share_items': {'key': 'ShareItems', 'type': '[ShareItem]', 'xml': {'name': 'Shares', 'itemsName': 'Shares', 'wrapped': True}},
'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}},
}
_xml_map = {
'name': 'EnumerationResults'
}
def __init__(self, *, service_endpoint: str, next_marker: str, prefix: str=None, marker: str=None, max_results: int=None, share_items=None, **kwargs) -> None:
super(ListSharesResponse, self).__init__(**kwargs)
self.service_endpoint = service_endpoint
self.prefix = prefix
self.marker = marker
self.max_results = max_results
self.share_items = share_items
self.next_marker = next_marker
class Metrics(Model):
"""Storage Analytics metrics for file service.
All required parameters must be populated in order to send to Azure.
:param version: Required. The version of Storage Analytics to configure.
:type version: str
:param enabled: Required. Indicates whether metrics are enabled for the
File service.
:type enabled: bool
:param include_apis: Indicates whether metrics should generate summary
statistics for called API operations.
:type include_apis: bool
:param retention_policy:
:type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy
"""
_validation = {
'version': {'required': True},
'enabled': {'required': True},
}
_attribute_map = {
'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}},
'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}},
}
_xml_map = {
}
def __init__(self, *, version: str, enabled: bool, include_apis: bool=None, retention_policy=None, **kwargs) -> None:
super(Metrics, self).__init__(**kwargs)
self.version = version
self.enabled = enabled
self.include_apis = include_apis
self.retention_policy = retention_policy
class RetentionPolicy(Model):
"""The retention policy.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Indicates whether a retention policy is enabled
for the File service. If false, metrics data is retained, and the user is
responsible for deleting it.
:type enabled: bool
:param days: Indicates the number of days that metrics data should be
retained. All data older than this value will be deleted. Metrics data is
deleted on a best-effort basis after the retention period expires.
:type days: int
"""
_validation = {
'enabled': {'required': True},
'days': {'maximum': 365, 'minimum': 1},
}
_attribute_map = {
'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}},
}
_xml_map = {
}
def __init__(self, *, enabled: bool, days: int=None, **kwargs) -> None:
super(RetentionPolicy, self).__init__(**kwargs)
self.enabled = enabled
self.days = days
class ShareFileRangeList(Model):
"""The list of file ranges.
:param ranges:
:type ranges: list[~azure.storage.fileshare.models.FileRange]
:param clear_ranges:
:type clear_ranges: list[~azure.storage.fileshare.models.ClearRange]
"""
_attribute_map = {
'ranges': {'key': 'Ranges', 'type': '[FileRange]', 'xml': {'name': 'Ranges', 'itemsName': 'Range'}},
'clear_ranges': {'key': 'ClearRanges', 'type': '[ClearRange]', 'xml': {'name': 'ClearRanges', 'itemsName': 'ClearRange'}},
}
_xml_map = {
}
def __init__(self, *, ranges=None, clear_ranges=None, **kwargs) -> None:
super(ShareFileRangeList, self).__init__(**kwargs)
self.ranges = ranges
self.clear_ranges = clear_ranges
class ShareItem(Model):
"""A listed Azure Storage share item.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
:param snapshot:
:type snapshot: str
:param deleted:
:type deleted: bool
:param version:
:type version: str
:param properties: Required.
:type properties: ~azure.storage.fileshare.models.ShareProperties
:param metadata:
:type metadata: dict[str, str]
"""
_validation = {
'name': {'required': True},
'properties': {'required': True},
}
_attribute_map = {
'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}},
'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}},
'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}},
'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}},
'properties': {'key': 'Properties', 'type': 'ShareProperties', 'xml': {'name': 'Properties'}},
'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}},
}
_xml_map = {
'name': 'Share'
}
def __init__(self, *, name: str, properties, snapshot: str=None, deleted: bool=None, version: str=None, metadata=None, **kwargs) -> None:
super(ShareItem, self).__init__(**kwargs)
self.name = name
self.snapshot = snapshot
self.deleted = deleted
self.version = version
self.properties = properties
self.metadata = metadata
class SharePermission(Model):
"""A permission (a security descriptor) at the share level.
All required parameters must be populated in order to send to Azure.
:param permission: Required. The permission in the Security Descriptor
Definition Language (SDDL).
:type permission: str
"""
_validation = {
'permission': {'required': True},
}
_attribute_map = {
'permission': {'key': 'permission', 'type': 'str', 'xml': {'name': 'permission'}},
}
_xml_map = {
}
def __init__(self, *, permission: str, **kwargs) -> None:
super(SharePermission, self).__init__(**kwargs)
self.permission = permission
class ShareProperties(Model):
"""Properties of a share.
All required parameters must be populated in order to send to Azure.
:param last_modified: Required.
:type last_modified: datetime
:param etag: Required.
:type etag: str
:param quota: Required.
:type quota: int
:param provisioned_iops:
:type provisioned_iops: int
:param provisioned_ingress_mbps:
:type provisioned_ingress_mbps: int
:param provisioned_egress_mbps:
:type provisioned_egress_mbps: int
:param next_allowed_quota_downgrade_time:
:type next_allowed_quota_downgrade_time: datetime
:param deleted_time:
:type deleted_time: datetime
:param remaining_retention_days:
:type remaining_retention_days: int
:param lease_status: Possible values include: 'locked', 'unlocked'
:type lease_status: str or ~azure.storage.fileshare.models.LeaseStatusType
:param lease_state: Possible values include: 'available', 'leased',
'expired', 'breaking', 'broken'
:type lease_state: str or ~azure.storage.fileshare.models.LeaseStateType
:param lease_duration: Possible values include: 'infinite', 'fixed'
:type lease_duration: str or
~azure.storage.fileshare.models.LeaseDurationType
"""
_validation = {
'last_modified': {'required': True},
'etag': {'required': True},
'quota': {'required': True},
}
_attribute_map = {
'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}},
'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}},
'quota': {'key': 'Quota', 'type': 'int', 'xml': {'name': 'Quota'}},
'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int', 'xml': {'name': 'ProvisionedIops'}},
'provisioned_ingress_mbps': {'key': 'ProvisionedIngressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedIngressMBps'}},
'provisioned_egress_mbps': {'key': 'ProvisionedEgressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedEgressMBps'}},
'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123', 'xml': {'name': 'NextAllowedQuotaDowngradeTime'}},
'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}},
'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}},
'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}},
'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}},
'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}},
}
_xml_map = {
}
def __init__(self, *, last_modified, etag: str, quota: int, provisioned_iops: int=None, provisioned_ingress_mbps: int=None, provisioned_egress_mbps: int=None, next_allowed_quota_downgrade_time=None, deleted_time=None, remaining_retention_days: int=None, lease_status=None, lease_state=None, lease_duration=None, **kwargs) -> None:
super(ShareProperties, self).__init__(**kwargs)
self.last_modified = last_modified
self.etag = etag
self.quota = quota
self.provisioned_iops = provisioned_iops
self.provisioned_ingress_mbps = provisioned_ingress_mbps
self.provisioned_egress_mbps = provisioned_egress_mbps
self.next_allowed_quota_downgrade_time = next_allowed_quota_downgrade_time
self.deleted_time = deleted_time
self.remaining_retention_days = remaining_retention_days
self.lease_status = lease_status
self.lease_state = lease_state
self.lease_duration = lease_duration
class ShareProtocolSettings(Model):
"""Protocol settings.
:param smb: Settings for SMB protocol.
:type smb: ~azure.storage.fileshare.models.ShareSmbSettings
"""
_attribute_map = {
'smb': {'key': 'Smb', 'type': 'ShareSmbSettings', 'xml': {'name': 'SMB'}},
}
_xml_map = {
}
def __init__(self, *, smb=None, **kwargs) -> None:
super(ShareProtocolSettings, self).__init__(**kwargs)
self.smb = smb
class ShareSmbSettings(Model):
"""Settings for SMB protocol.
:param multichannel: Settings for SMB Multichannel.
:type multichannel: ~azure.storage.fileshare.models.SmbMultichannel
"""
_attribute_map = {
'multichannel': {'key': 'Multichannel', 'type': 'SmbMultichannel', 'xml': {'name': 'Multichannel'}},
}
_xml_map = {
}
def __init__(self, *, multichannel=None, **kwargs) -> None:
super(ShareSmbSettings, self).__init__(**kwargs)
self.multichannel = multichannel
class ShareStats(Model):
"""Stats for the share.
All required parameters must be populated in order to send to Azure.
:param share_usage_bytes: Required. The approximate size of the data
stored in bytes. Note that this value may not include all recently created
or recently resized files.
:type share_usage_bytes: int
"""
_validation = {
'share_usage_bytes': {'required': True},
}
_attribute_map = {
'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int', 'xml': {'name': 'ShareUsageBytes'}},
}
_xml_map = {
}
def __init__(self, *, share_usage_bytes: int, **kwargs) -> None:
super(ShareStats, self).__init__(**kwargs)
self.share_usage_bytes = share_usage_bytes
class SignedIdentifier(Model):
"""Signed identifier.
All required parameters must be populated in order to send to Azure.
:param id: Required. A unique id.
:type id: str
:param access_policy: The access policy.
:type access_policy: ~azure.storage.fileshare.models.AccessPolicy
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}},
'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}},
}
_xml_map = {
}
def __init__(self, *, id: str, access_policy=None, **kwargs) -> None:
super(SignedIdentifier, self).__init__(**kwargs)
self.id = id
self.access_policy = access_policy
class SmbMultichannel(Model):
"""Settings for SMB multichannel.
:param enabled: If SMB multichannel is enabled.
:type enabled: bool
"""
_attribute_map = {
'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}},
}
_xml_map = {
'name': 'Multichannel'
}
def __init__(self, *, enabled: bool=None, **kwargs) -> None:
super(SmbMultichannel, self).__init__(**kwargs)
self.enabled = enabled
class SourceModifiedAccessConditions(Model):
"""Additional parameters for upload_range_from_url operation.
:param source_if_match_crc64: Specify the crc64 value to operate only on
range with a matching crc64 checksum.
:type source_if_match_crc64: bytearray
:param source_if_none_match_crc64: Specify the crc64 value to operate only
on range without a matching crc64 checksum.
:type source_if_none_match_crc64: bytearray
"""
_attribute_map = {
'source_if_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_match_crc64'}},
'source_if_none_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_none_match_crc64'}},
}
_xml_map = {
}
def __init__(self, *, source_if_match_crc64: bytearray=None, source_if_none_match_crc64: bytearray=None, **kwargs) -> None:
super(SourceModifiedAccessConditions, self).__init__(**kwargs)
self.source_if_match_crc64 = source_if_match_crc64
self.source_if_none_match_crc64 = source_if_none_match_crc64
class StorageError(Model):
"""StorageError.
:param message:
:type message: str
"""
_attribute_map = {
'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}},
}
_xml_map = {
}
def __init__(self, *, message: str=None, **kwargs) -> None:
super(StorageError, self).__init__(**kwargs)
self.message = message
class StorageErrorException(HttpResponseError):
"""Server responsed with exception of type: 'StorageError'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, response, deserialize, *args):
model_name = 'StorageError'
self.error = deserialize(model_name, response)
if self.error is None:
self.error = deserialize.dependencies[model_name]()
super(StorageErrorException, self).__init__(response=response)
class StorageServiceProperties(Model):
"""Storage service properties.
:param hour_metrics: A summary of request statistics grouped by API in
hourly aggregates for files.
:type hour_metrics: ~azure.storage.fileshare.models.Metrics
:param minute_metrics: A summary of request statistics grouped by API in
minute aggregates for files.
:type minute_metrics: ~azure.storage.fileshare.models.Metrics
:param cors: The set of CORS rules.
:type cors: list[~azure.storage.fileshare.models.CorsRule]
:param protocol: Protocol settings
:type protocol: ~azure.storage.fileshare.models.ShareProtocolSettings
"""
_attribute_map = {
'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}},
'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}},
'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}},
'protocol': {'key': 'Protocol', 'type': 'ShareProtocolSettings', 'xml': {'name': 'ProtocolSettings'}},
}
_xml_map = {
}
def __init__(self, *, hour_metrics=None, minute_metrics=None, cors=None, protocol=None, **kwargs) -> None:
super(StorageServiceProperties, self).__init__(**kwargs)
self.hour_metrics = hour_metrics
self.minute_metrics = minute_metrics
self.cors = cors
self.protocol = protocol
| 37.936399
| 334
| 0.640298
|
dc2da35ff5c1ee96b1d44cff7a222af587f60f06
| 1,171
|
py
|
Python
|
fixtures/generate.py
|
bbengfort/iterfile
|
a212c9fbffe265aabc93feb0c7b8957368ceba86
|
[
"MIT"
] | 1
|
2021-07-06T01:16:08.000Z
|
2021-07-06T01:16:08.000Z
|
fixtures/generate.py
|
bbengfort/iterfile
|
a212c9fbffe265aabc93feb0c7b8957368ceba86
|
[
"MIT"
] | null | null | null |
fixtures/generate.py
|
bbengfort/iterfile
|
a212c9fbffe265aabc93feb0c7b8957368ceba86
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# This quick script generates fixtures for benchmarking.
import os
import random
# Number of words per line
MIN_LINE = 20
MAX_LINE = 100
# Words to randomly select to add to the line
WORDS = ("fizz", "buzz", "foo", "bar", "baz")
# Paths of fixtures to create
BASEDIR = os.path.dirname(__file__)
FIXTURES = {
os.path.join(BASEDIR, "small.txt"): 100,
os.path.join(BASEDIR, "medium.txt"): 1000,
os.path.join(BASEDIR, "large.txt"): 10000,
}
def make_fixture(path, lines, words=WORDS, minlen=MIN_LINE, maxlen=MAX_LINE):
"""
Writes a file to the specified path with the number of lines specified by
randomly choosing between minlen and maxlen words and writing them.
"""
with open(path, 'w') as f:
for _ in range(lines):
text = [
random.choice(words)
for _ in range(random.randint(minlen, maxlen))
]
f.write(" ".join(text) + "\n")
if __name__ == '__main__':
for path, lines in FIXTURES.items():
make_fixture(path, lines)
# Make the profiling fixture
# make_fixture('jumbo.txt', 750000, minlen=100, maxlen=2000)
| 26.022222
| 77
| 0.636208
|
9dac9b9435ead8da0275326d93cf15d37b41237b
| 2,170
|
py
|
Python
|
ccdproc/tests/test_keyword.py
|
cdeil/ccdproc
|
1bcfb0142669243325bfce05b4f2fc45ea013f02
|
[
"BSD-3-Clause"
] | null | null | null |
ccdproc/tests/test_keyword.py
|
cdeil/ccdproc
|
1bcfb0142669243325bfce05b4f2fc45ea013f02
|
[
"BSD-3-Clause"
] | null | null | null |
ccdproc/tests/test_keyword.py
|
cdeil/ccdproc
|
1bcfb0142669243325bfce05b4f2fc45ea013f02
|
[
"BSD-3-Clause"
] | null | null | null |
from astropy.tests.helper import pytest
from astropy import units as u
from astropy.units import Quantity
from astropy.io import fits
from ..ccdproc import Keyword
def test_keyword_init():
key_name = 'some_key'
key = Keyword(key_name, unit=u.second)
assert key.name == key_name
assert key.unit == u.second
def test_keyword_properties_read_only():
key = Keyword('observer')
with pytest.raises(AttributeError):
key.name = 'error'
with pytest.raises(AttributeError):
key.unit = u.hour
unit = u.second
numerical_value = 30
# The variable "expected" below is
# True if the expected result is key.value == numerical_value * key.unit
# Name of an error if an error is expected
# A string if the expected value is a string
@pytest.mark.parametrize('value,unit,expected', [
(numerical_value, unit, True),
(numerical_value, None, ValueError),
(numerical_value * unit, None, True),
(numerical_value * unit, unit, True),
(numerical_value * unit, u.km, True),
('some string', None, 'some string'),
('no strings with unit', unit, ValueError)
])
def test_value_setting(value, unit, expected):
name = 'exposure'
# Setting at initialization time with
try:
expected_is_error = issubclass(expected, Exception)
except TypeError:
expected_is_error = False
if expected_is_error:
with pytest.raises(expected):
key = Keyword(name, unit=unit, value=value)
else:
key = Keyword(name, unit=unit, value=value)
if isinstance(expected, basestring):
assert key.value == expected
else:
assert key.value == numerical_value * key.unit
def test_keyword_value_from_header():
name = 'exposure'
numerical_value = 30
unit = u.second
h = fits.Header()
h[name] = numerical_value
key = Keyword(name, unit=unit)
assert key.value_from(h) == numerical_value * unit
assert key.value == numerical_value * unit
| 31.449275
| 76
| 0.620276
|
a956a1261c77834af5bf1ea2a9a3b0853172f8ca
| 42,013
|
py
|
Python
|
content/post/Psychopy_Trajectories/Ppy/Example_Traj_lastrun.py
|
santiagoalonso/starter-academic
|
b88ddf3c3fc18935ef42ced413bd733255298371
|
[
"MIT"
] | null | null | null |
content/post/Psychopy_Trajectories/Ppy/Example_Traj_lastrun.py
|
santiagoalonso/starter-academic
|
b88ddf3c3fc18935ef42ced413bd733255298371
|
[
"MIT"
] | null | null | null |
content/post/Psychopy_Trajectories/Ppy/Example_Traj_lastrun.py
|
santiagoalonso/starter-academic
|
b88ddf3c3fc18935ef42ced413bd733255298371
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy3 Experiment Builder (v2021.1.4),
on July 07, 2021, at 16:25
If you publish work using this script the most relevant publication is:
Peirce J, Gray JR, Simpson S, MacAskill M, Höchenberger R, Sogo H, Kastman E, Lindeløv JK. (2019)
PsychoPy2: Experiments in behavior made easy Behav Res 51: 195.
https://doi.org/10.3758/s13428-018-01193-y
"""
from __future__ import absolute_import, division
from psychopy import locale_setup
from psychopy import prefs
from psychopy import sound, gui, visual, core, data, event, logging, clock, colors
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle, choice as randchoice
import os # handy system and path functions
import sys # to get file system encoding
from psychopy.hardware import keyboard
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# Store info about the experiment session
psychopyVersion = '2021.1.4'
expName = 'YOUNSTE' # from the Builder filename that created this script
expInfo = {'Cedula': ''}
dlg = gui.DlgFromDict(dictionary=expInfo, sortKeys=False, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
expInfo['psychopyVersion'] = psychopyVersion
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['Cedula'], expName, expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath='E:\\Github Repos\\starter-academic\\content\\post\\Psychopy_Trajectories\\Ppy\\Example_Traj_lastrun.py',
savePickle=True, saveWideText=True,
dataFileName=filename)
# save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
frameTolerance = 0.001 # how close to onset before 'same' frame
# Start Code - component code to be run after the window creation
# Setup the Window
win = visual.Window(
size=[1900, 1060], fullscr=False, screen=0,
winType='pyglet', allowGUI=True, allowStencil=False,
monitor='testMonitor', color=[0,0,0], colorSpace='rgb',
blendMode='avg', useFBO=True,
units='norm')
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
# create a default keyboard (e.g. to check for escape)
defaultKeyboard = keyboard.Keyboard()
# Initialize components for Routine "Instructions"
InstructionsClock = core.Clock()
Instr_CategCaras_text = visual.TextStim(win=win, name='Instr_CategCaras_text',
text='',
font='Open Sans',
pos=(0, 0), height=0.06, wrapWidth=None, ori=0.0,
color='white', colorSpace='rgb', opacity=None,
languageStyle='LTR',
depth=0.0);
wind = Instr_CategCaras_text.win
stf = wind.size[1]/wind.size[0]
Next_button_CategCaras_image = visual.ImageStim(
win=win,
name='Next_button_CategCaras_image',
image='Images/next_button.png', mask=None,
ori=0.0, pos=(0.8, -0.8), size=(0.25*stf, 0.25),
color=[1,1,1], colorSpace='rgb', opacity=None,
flipHoriz=False, flipVert=False,
texRes=128.0, interpolate=True, depth=-2.0)
Instr_CategCaras_mouse = event.Mouse(win=win)
x, y = [None, None]
Instr_CategCaras_mouse.mouseClock = core.Clock()
# Initialize components for Routine "Train_Img_CategCaras"
Train_Img_CategCarasClock = core.Clock()
FruitVeg_image = visual.ImageStim(
win=win,
name='FruitVeg_image',
image='sin', mask=None,
ori=0.0, pos=(0, 0), size=(0.5*stf, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=None,
flipHoriz=False, flipVert=False,
texRes=128.0, interpolate=True, depth=0.0)
# Initialize components for Routine "Train_CategCaras"
Train_CategCarasClock = core.Clock()
FingerLift_penalty = 5 #(seconds)
Mouse_tol = 0.5 #to detect finger jumps (norm. units)
Train_CategCaras_mouse = event.Mouse(win=win)
x, y = [None, None]
Train_CategCaras_mouse.mouseClock = core.Clock()
Train_CategCaras_Beh_mouse = event.Mouse(win=win)
x, y = [None, None]
Train_CategCaras_Beh_mouse.mouseClock = core.Clock()
Train_CategCaras_Izq_polygon = visual.Rect(
win=win, name='Train_CategCaras_Izq_polygon',
width=(0.25*stf, 0.25)[0], height=(0.25*stf, 0.25)[1],
ori=0.0, pos=(-0.75, 0.75),
lineWidth=1.0, colorSpace='rgb', lineColor=(-1.0000, -1.0000, -1.0000), fillColor='white',
opacity=None, depth=-3.0, interpolate=True)
Train_CategCaras_Der_polygon = visual.Rect(
win=win, name='Train_CategCaras_Der_polygon',
width=(0.25*stf, 0.25)[0], height=(0.25*stf, 0.25)[1],
ori=0.0, pos=(0.75, 0.75),
lineWidth=1.0, colorSpace='rgb', lineColor=(-1.0000, -1.0000, -1.0000), fillColor='white',
opacity=None, depth=-4.0, interpolate=True)
Train_CategCaras_Inferior_polygon = visual.Rect(
win=win, name='Train_CategCaras_Inferior_polygon',
width=(0.25*stf, 0.25)[0], height=(0.25*stf, 0.25)[1],
ori=0.0, pos=(0, -0.8),
lineWidth=1.0, colorSpace='rgb', lineColor=(-1.0000, -1.0000, -1.0000), fillColor='white',
opacity=None, depth=-5.0, interpolate=True)
Train_CategCaras_Male_text = visual.TextStim(win=win, name='Train_CategCaras_Male_text',
text='Fruta',
font='Open Sans',
pos=(-0.75, 0.5), height=0.08, wrapWidth=None, ori=0.0,
color='white', colorSpace='rgb', opacity=None,
languageStyle='LTR',
depth=-6.0);
Train_CategCaras_Female_text = visual.TextStim(win=win, name='Train_CategCaras_Female_text',
text='Verdura',
font='Open Sans',
pos=(0.75, 0.5), height=0.08, wrapWidth=None, ori=0.0,
color='white', colorSpace='rgb', opacity=None,
languageStyle='LTR',
depth=-7.0);
# Initialize components for Routine "NoLiftFinger_Warning"
NoLiftFinger_WarningClock = core.Clock()
NoLiftFinger_Warning_text = visual.TextStim(win=win, name='NoLiftFinger_Warning_text',
text='Cuando escoja no levante el dedo. ',
font='Open Sans',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0.0,
color='white', colorSpace='rgb', opacity=None,
languageStyle='LTR',
depth=0.0);
# Initialize components for Routine "Thanks"
ThanksClock = core.Clock()
Thanks_text = visual.TextStim(win=win, name='Thanks_text',
text='Gracias por su participación. ',
font='Open Sans',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0.0,
color='white', colorSpace='rgb', opacity=None,
languageStyle='LTR',
depth=0.0);
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# set up handler to look after randomisation of conditions etc
Trials_Instr_CategCaras = data.TrialHandler(nReps=1.0, method='sequential',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('CategCaras_instr.xlsx'),
seed=None, name='Trials_Instr_CategCaras')
thisExp.addLoop(Trials_Instr_CategCaras) # add the loop to the experiment
thisTrials_Instr_CategCara = Trials_Instr_CategCaras.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrials_Instr_CategCara.rgb)
if thisTrials_Instr_CategCara != None:
for paramName in thisTrials_Instr_CategCara:
exec('{} = thisTrials_Instr_CategCara[paramName]'.format(paramName))
for thisTrials_Instr_CategCara in Trials_Instr_CategCaras:
currentLoop = Trials_Instr_CategCaras
# abbreviate parameter names if possible (e.g. rgb = thisTrials_Instr_CategCara.rgb)
if thisTrials_Instr_CategCara != None:
for paramName in thisTrials_Instr_CategCara:
exec('{} = thisTrials_Instr_CategCara[paramName]'.format(paramName))
# ------Prepare to start Routine "Instructions"-------
continueRoutine = True
# update component parameters for each repeat
Instr_CategCaras_text.setText(I_CategCaras)
wind = Instr_CategCaras_text.win
stf = wind.size[1]/wind.size[0]
# setup some python lists for storing info about the Instr_CategCaras_mouse
Instr_CategCaras_mouse.clicked_name = []
gotValidClick = False # until a click is received
# keep track of which components have finished
InstructionsComponents = [Instr_CategCaras_text, Next_button_CategCaras_image, Instr_CategCaras_mouse]
for thisComponent in InstructionsComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
InstructionsClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Instructions"-------
while continueRoutine:
# get current time
t = InstructionsClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=InstructionsClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *Instr_CategCaras_text* updates
if Instr_CategCaras_text.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
Instr_CategCaras_text.frameNStart = frameN # exact frame index
Instr_CategCaras_text.tStart = t # local t and not account for scr refresh
Instr_CategCaras_text.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(Instr_CategCaras_text, 'tStartRefresh') # time at next scr refresh
Instr_CategCaras_text.setAutoDraw(True)
wind = Instr_CategCaras_text.win
stf = wind.size[1]/wind.size[0]
# *Next_button_CategCaras_image* updates
if Next_button_CategCaras_image.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
Next_button_CategCaras_image.frameNStart = frameN # exact frame index
Next_button_CategCaras_image.tStart = t # local t and not account for scr refresh
Next_button_CategCaras_image.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(Next_button_CategCaras_image, 'tStartRefresh') # time at next scr refresh
Next_button_CategCaras_image.setAutoDraw(True)
# *Instr_CategCaras_mouse* updates
if Instr_CategCaras_mouse.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
Instr_CategCaras_mouse.frameNStart = frameN # exact frame index
Instr_CategCaras_mouse.tStart = t # local t and not account for scr refresh
Instr_CategCaras_mouse.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(Instr_CategCaras_mouse, 'tStartRefresh') # time at next scr refresh
Instr_CategCaras_mouse.status = STARTED
Instr_CategCaras_mouse.mouseClock.reset()
prevButtonState = Instr_CategCaras_mouse.getPressed() # if button is down already this ISN'T a new click
if Instr_CategCaras_mouse.status == STARTED: # only update if started and not finished!
buttons = Instr_CategCaras_mouse.getPressed()
if buttons != prevButtonState: # button state changed?
prevButtonState = buttons
if sum(buttons) > 0: # state changed to a new click
# check if the mouse was inside our 'clickable' objects
gotValidClick = False
for obj in [Next_button_CategCaras_image,]:
if obj.contains(Instr_CategCaras_mouse):
gotValidClick = True
Instr_CategCaras_mouse.clicked_name.append(obj.name)
if gotValidClick: # abort routine on response
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in InstructionsComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Instructions"-------
for thisComponent in InstructionsComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
Trials_Instr_CategCaras.addData('Instr_CategCaras_text.started', Instr_CategCaras_text.tStartRefresh)
Trials_Instr_CategCaras.addData('Instr_CategCaras_text.stopped', Instr_CategCaras_text.tStopRefresh)
wind = Instr_CategCaras_text.win
stf = wind.size[1]/wind.size[0]
Trials_Instr_CategCaras.addData('Next_button_CategCaras_image.started', Next_button_CategCaras_image.tStartRefresh)
Trials_Instr_CategCaras.addData('Next_button_CategCaras_image.stopped', Next_button_CategCaras_image.tStopRefresh)
# store data for Trials_Instr_CategCaras (TrialHandler)
x, y = Instr_CategCaras_mouse.getPos()
buttons = Instr_CategCaras_mouse.getPressed()
if sum(buttons):
# check if the mouse was inside our 'clickable' objects
gotValidClick = False
for obj in [Next_button_CategCaras_image,]:
if obj.contains(Instr_CategCaras_mouse):
gotValidClick = True
Instr_CategCaras_mouse.clicked_name.append(obj.name)
Trials_Instr_CategCaras.addData('Instr_CategCaras_mouse.x', x)
Trials_Instr_CategCaras.addData('Instr_CategCaras_mouse.y', y)
Trials_Instr_CategCaras.addData('Instr_CategCaras_mouse.leftButton', buttons[0])
Trials_Instr_CategCaras.addData('Instr_CategCaras_mouse.midButton', buttons[1])
Trials_Instr_CategCaras.addData('Instr_CategCaras_mouse.rightButton', buttons[2])
if len(Instr_CategCaras_mouse.clicked_name):
Trials_Instr_CategCaras.addData('Instr_CategCaras_mouse.clicked_name', Instr_CategCaras_mouse.clicked_name[0])
Trials_Instr_CategCaras.addData('Instr_CategCaras_mouse.started', Instr_CategCaras_mouse.tStart)
Trials_Instr_CategCaras.addData('Instr_CategCaras_mouse.stopped', Instr_CategCaras_mouse.tStop)
# the Routine "Instructions" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
thisExp.nextEntry()
# completed 1.0 repeats of 'Trials_Instr_CategCaras'
# set up handler to look after randomisation of conditions etc
Trials_Train_CategCaras = data.TrialHandler(nReps=1.0, method='random',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('CategCaras_Train_imgs.xlsx'),
seed=None, name='Trials_Train_CategCaras')
thisExp.addLoop(Trials_Train_CategCaras) # add the loop to the experiment
thisTrials_Train_CategCara = Trials_Train_CategCaras.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrials_Train_CategCara.rgb)
if thisTrials_Train_CategCara != None:
for paramName in thisTrials_Train_CategCara:
exec('{} = thisTrials_Train_CategCara[paramName]'.format(paramName))
for thisTrials_Train_CategCara in Trials_Train_CategCaras:
currentLoop = Trials_Train_CategCaras
# abbreviate parameter names if possible (e.g. rgb = thisTrials_Train_CategCara.rgb)
if thisTrials_Train_CategCara != None:
for paramName in thisTrials_Train_CategCara:
exec('{} = thisTrials_Train_CategCara[paramName]'.format(paramName))
# ------Prepare to start Routine "Train_Img_CategCaras"-------
continueRoutine = True
routineTimer.add(1.000000)
# update component parameters for each repeat
FruitVeg_image.setImage(Train_CategCaras_img_file)
# keep track of which components have finished
Train_Img_CategCarasComponents = [FruitVeg_image]
for thisComponent in Train_Img_CategCarasComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
Train_Img_CategCarasClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Train_Img_CategCaras"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = Train_Img_CategCarasClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=Train_Img_CategCarasClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *FruitVeg_image* updates
if FruitVeg_image.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
FruitVeg_image.frameNStart = frameN # exact frame index
FruitVeg_image.tStart = t # local t and not account for scr refresh
FruitVeg_image.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(FruitVeg_image, 'tStartRefresh') # time at next scr refresh
FruitVeg_image.setAutoDraw(True)
if FruitVeg_image.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > FruitVeg_image.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
FruitVeg_image.tStop = t # not accounting for scr refresh
FruitVeg_image.frameNStop = frameN # exact frame index
win.timeOnFlip(FruitVeg_image, 'tStopRefresh') # time at next scr refresh
FruitVeg_image.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in Train_Img_CategCarasComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Train_Img_CategCaras"-------
for thisComponent in Train_Img_CategCarasComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
Trials_Train_CategCaras.addData('FruitVeg_image.started', FruitVeg_image.tStartRefresh)
Trials_Train_CategCaras.addData('FruitVeg_image.stopped', FruitVeg_image.tStopRefresh)
# ------Prepare to start Routine "Train_CategCaras"-------
continueRoutine = True
# update component parameters for each repeat
Options_Appear = False
RT = -999999 #response time (no longer in bottom square - time finger arrived to bottom square)
Finger_Jump = -1
penalty = 0
x0 = 999 #Initialize
y0 = 9999
x1 = 999
y1 = 999
# setup some python lists for storing info about the Train_CategCaras_mouse
Train_CategCaras_mouse.x = []
Train_CategCaras_mouse.y = []
Train_CategCaras_mouse.leftButton = []
Train_CategCaras_mouse.midButton = []
Train_CategCaras_mouse.rightButton = []
Train_CategCaras_mouse.time = []
gotValidClick = False # until a click is received
# setup some python lists for storing info about the Train_CategCaras_Beh_mouse
Train_CategCaras_Beh_mouse.x = []
Train_CategCaras_Beh_mouse.y = []
Train_CategCaras_Beh_mouse.leftButton = []
Train_CategCaras_Beh_mouse.midButton = []
Train_CategCaras_Beh_mouse.rightButton = []
Train_CategCaras_Beh_mouse.time = []
gotValidClick = False # until a click is received
# keep track of which components have finished
Train_CategCarasComponents = [Train_CategCaras_mouse, Train_CategCaras_Beh_mouse, Train_CategCaras_Izq_polygon, Train_CategCaras_Der_polygon, Train_CategCaras_Inferior_polygon, Train_CategCaras_Male_text, Train_CategCaras_Female_text]
for thisComponent in Train_CategCarasComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
Train_CategCarasClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Train_CategCaras"-------
while continueRoutine:
# get current time
t = Train_CategCarasClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=Train_CategCarasClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
if Train_CategCaras_Inferior_polygon.contains(Train_CategCaras_mouse):
Options_Appear = True
if (RT < 0 ) & (Options_Appear==True) & (not Train_CategCaras_Inferior_polygon.contains(Train_CategCaras_mouse)):
RT = Train_CategCaras_Beh_mouse.mouseClock.getTime()
Trials_Train_CategCaras.addData('Train_CategCaras.RT', RT)
if Options_Appear & Train_CategCaras_Izq_polygon.contains(Train_CategCaras_mouse):
Trials_Train_CategCaras.addData('Train_CategCaras.Response', "Fruta")
continueRoutine = False
if Options_Appear & Train_CategCaras_Der_polygon.contains(Train_CategCaras_mouse):
Trials_Train_CategCaras.addData('Train_CategCaras.Response', "Verdura")
continueRoutine = False
if Options_Appear == True:
if Finger_Jump < 0:
x0, y0 = Train_CategCaras_mouse.getPos()
Finger_Jump = 0
elif Finger_Jump == 0:
x1, y1 = Train_CategCaras_mouse.getPos()
if abs(y1-y0) > Mouse_tol or abs(x1-x0) > Mouse_tol:
Finger_Jump = 1
penalty = FingerLift_penalty
Trials_Train_CategCaras.addData('Train_CategCaras.FingerJump', Finger_Jump)
continueRoutine = False
else:
x0, y0 = Train_CategCaras_mouse.getPos()
# *Train_CategCaras_mouse* updates
if Train_CategCaras_mouse.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
Train_CategCaras_mouse.frameNStart = frameN # exact frame index
Train_CategCaras_mouse.tStart = t # local t and not account for scr refresh
Train_CategCaras_mouse.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(Train_CategCaras_mouse, 'tStartRefresh') # time at next scr refresh
Train_CategCaras_mouse.status = STARTED
Train_CategCaras_mouse.mouseClock.reset()
prevButtonState = Train_CategCaras_mouse.getPressed() # if button is down already this ISN'T a new click
if Train_CategCaras_mouse.status == STARTED: # only update if started and not finished!
x, y = Train_CategCaras_mouse.getPos()
Train_CategCaras_mouse.x.append(x)
Train_CategCaras_mouse.y.append(y)
buttons = Train_CategCaras_mouse.getPressed()
Train_CategCaras_mouse.leftButton.append(buttons[0])
Train_CategCaras_mouse.midButton.append(buttons[1])
Train_CategCaras_mouse.rightButton.append(buttons[2])
Train_CategCaras_mouse.time.append(Train_CategCaras_mouse.mouseClock.getTime())
# *Train_CategCaras_Beh_mouse* updates
if Train_CategCaras_Beh_mouse.status == NOT_STARTED and Options_Appear:
# keep track of start time/frame for later
Train_CategCaras_Beh_mouse.frameNStart = frameN # exact frame index
Train_CategCaras_Beh_mouse.tStart = t # local t and not account for scr refresh
Train_CategCaras_Beh_mouse.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(Train_CategCaras_Beh_mouse, 'tStartRefresh') # time at next scr refresh
Train_CategCaras_Beh_mouse.status = STARTED
Train_CategCaras_Beh_mouse.mouseClock.reset()
prevButtonState = Train_CategCaras_Beh_mouse.getPressed() # if button is down already this ISN'T a new click
if Train_CategCaras_Beh_mouse.status == STARTED: # only update if started and not finished!
x, y = Train_CategCaras_Beh_mouse.getPos()
Train_CategCaras_Beh_mouse.x.append(x)
Train_CategCaras_Beh_mouse.y.append(y)
buttons = Train_CategCaras_Beh_mouse.getPressed()
Train_CategCaras_Beh_mouse.leftButton.append(buttons[0])
Train_CategCaras_Beh_mouse.midButton.append(buttons[1])
Train_CategCaras_Beh_mouse.rightButton.append(buttons[2])
Train_CategCaras_Beh_mouse.time.append(Train_CategCaras_Beh_mouse.mouseClock.getTime())
# *Train_CategCaras_Izq_polygon* updates
if Train_CategCaras_Izq_polygon.status == NOT_STARTED and Options_Appear:
# keep track of start time/frame for later
Train_CategCaras_Izq_polygon.frameNStart = frameN # exact frame index
Train_CategCaras_Izq_polygon.tStart = t # local t and not account for scr refresh
Train_CategCaras_Izq_polygon.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(Train_CategCaras_Izq_polygon, 'tStartRefresh') # time at next scr refresh
Train_CategCaras_Izq_polygon.setAutoDraw(True)
# *Train_CategCaras_Der_polygon* updates
if Train_CategCaras_Der_polygon.status == NOT_STARTED and Options_Appear:
# keep track of start time/frame for later
Train_CategCaras_Der_polygon.frameNStart = frameN # exact frame index
Train_CategCaras_Der_polygon.tStart = t # local t and not account for scr refresh
Train_CategCaras_Der_polygon.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(Train_CategCaras_Der_polygon, 'tStartRefresh') # time at next scr refresh
Train_CategCaras_Der_polygon.setAutoDraw(True)
# *Train_CategCaras_Inferior_polygon* updates
if Train_CategCaras_Inferior_polygon.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
Train_CategCaras_Inferior_polygon.frameNStart = frameN # exact frame index
Train_CategCaras_Inferior_polygon.tStart = t # local t and not account for scr refresh
Train_CategCaras_Inferior_polygon.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(Train_CategCaras_Inferior_polygon, 'tStartRefresh') # time at next scr refresh
Train_CategCaras_Inferior_polygon.setAutoDraw(True)
# *Train_CategCaras_Male_text* updates
if Train_CategCaras_Male_text.status == NOT_STARTED and Options_Appear:
# keep track of start time/frame for later
Train_CategCaras_Male_text.frameNStart = frameN # exact frame index
Train_CategCaras_Male_text.tStart = t # local t and not account for scr refresh
Train_CategCaras_Male_text.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(Train_CategCaras_Male_text, 'tStartRefresh') # time at next scr refresh
Train_CategCaras_Male_text.setAutoDraw(True)
# *Train_CategCaras_Female_text* updates
if Train_CategCaras_Female_text.status == NOT_STARTED and Options_Appear:
# keep track of start time/frame for later
Train_CategCaras_Female_text.frameNStart = frameN # exact frame index
Train_CategCaras_Female_text.tStart = t # local t and not account for scr refresh
Train_CategCaras_Female_text.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(Train_CategCaras_Female_text, 'tStartRefresh') # time at next scr refresh
Train_CategCaras_Female_text.setAutoDraw(True)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in Train_CategCarasComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Train_CategCaras"-------
for thisComponent in Train_CategCarasComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
MT = Train_CategCaras_Beh_mouse.mouseClock.getTime() - RT
Trials_Train_CategCaras.addData('Train_CategCaras.MT', MT)
# store data for Trials_Train_CategCaras (TrialHandler)
Trials_Train_CategCaras.addData('Train_CategCaras_mouse.x', Train_CategCaras_mouse.x)
Trials_Train_CategCaras.addData('Train_CategCaras_mouse.y', Train_CategCaras_mouse.y)
Trials_Train_CategCaras.addData('Train_CategCaras_mouse.leftButton', Train_CategCaras_mouse.leftButton)
Trials_Train_CategCaras.addData('Train_CategCaras_mouse.midButton', Train_CategCaras_mouse.midButton)
Trials_Train_CategCaras.addData('Train_CategCaras_mouse.rightButton', Train_CategCaras_mouse.rightButton)
Trials_Train_CategCaras.addData('Train_CategCaras_mouse.time', Train_CategCaras_mouse.time)
Trials_Train_CategCaras.addData('Train_CategCaras_mouse.started', Train_CategCaras_mouse.tStart)
Trials_Train_CategCaras.addData('Train_CategCaras_mouse.stopped', Train_CategCaras_mouse.tStop)
# store data for Trials_Train_CategCaras (TrialHandler)
Trials_Train_CategCaras.addData('Train_CategCaras_Beh_mouse.x', Train_CategCaras_Beh_mouse.x)
Trials_Train_CategCaras.addData('Train_CategCaras_Beh_mouse.y', Train_CategCaras_Beh_mouse.y)
Trials_Train_CategCaras.addData('Train_CategCaras_Beh_mouse.leftButton', Train_CategCaras_Beh_mouse.leftButton)
Trials_Train_CategCaras.addData('Train_CategCaras_Beh_mouse.midButton', Train_CategCaras_Beh_mouse.midButton)
Trials_Train_CategCaras.addData('Train_CategCaras_Beh_mouse.rightButton', Train_CategCaras_Beh_mouse.rightButton)
Trials_Train_CategCaras.addData('Train_CategCaras_Beh_mouse.time', Train_CategCaras_Beh_mouse.time)
Trials_Train_CategCaras.addData('Train_CategCaras_Beh_mouse.started', Train_CategCaras_Beh_mouse.tStart)
Trials_Train_CategCaras.addData('Train_CategCaras_Beh_mouse.stopped', Train_CategCaras_Beh_mouse.tStop)
Trials_Train_CategCaras.addData('Train_CategCaras_Izq_polygon.started', Train_CategCaras_Izq_polygon.tStartRefresh)
Trials_Train_CategCaras.addData('Train_CategCaras_Izq_polygon.stopped', Train_CategCaras_Izq_polygon.tStopRefresh)
Trials_Train_CategCaras.addData('Train_CategCaras_Der_polygon.started', Train_CategCaras_Der_polygon.tStartRefresh)
Trials_Train_CategCaras.addData('Train_CategCaras_Der_polygon.stopped', Train_CategCaras_Der_polygon.tStopRefresh)
Trials_Train_CategCaras.addData('Train_CategCaras_Inferior_polygon.started', Train_CategCaras_Inferior_polygon.tStartRefresh)
Trials_Train_CategCaras.addData('Train_CategCaras_Inferior_polygon.stopped', Train_CategCaras_Inferior_polygon.tStopRefresh)
Trials_Train_CategCaras.addData('Train_CategCaras_Male_text.started', Train_CategCaras_Male_text.tStartRefresh)
Trials_Train_CategCaras.addData('Train_CategCaras_Male_text.stopped', Train_CategCaras_Male_text.tStopRefresh)
Trials_Train_CategCaras.addData('Train_CategCaras_Female_text.started', Train_CategCaras_Female_text.tStartRefresh)
Trials_Train_CategCaras.addData('Train_CategCaras_Female_text.stopped', Train_CategCaras_Female_text.tStopRefresh)
# the Routine "Train_CategCaras" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "NoLiftFinger_Warning"-------
continueRoutine = True
# update component parameters for each repeat
# keep track of which components have finished
NoLiftFinger_WarningComponents = [NoLiftFinger_Warning_text]
for thisComponent in NoLiftFinger_WarningComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
NoLiftFinger_WarningClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "NoLiftFinger_Warning"-------
while continueRoutine:
# get current time
t = NoLiftFinger_WarningClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=NoLiftFinger_WarningClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *NoLiftFinger_Warning_text* updates
if NoLiftFinger_Warning_text.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
NoLiftFinger_Warning_text.frameNStart = frameN # exact frame index
NoLiftFinger_Warning_text.tStart = t # local t and not account for scr refresh
NoLiftFinger_Warning_text.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(NoLiftFinger_Warning_text, 'tStartRefresh') # time at next scr refresh
NoLiftFinger_Warning_text.setAutoDraw(True)
if NoLiftFinger_Warning_text.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > NoLiftFinger_Warning_text.tStartRefresh + penalty-frameTolerance:
# keep track of stop time/frame for later
NoLiftFinger_Warning_text.tStop = t # not accounting for scr refresh
NoLiftFinger_Warning_text.frameNStop = frameN # exact frame index
win.timeOnFlip(NoLiftFinger_Warning_text, 'tStopRefresh') # time at next scr refresh
NoLiftFinger_Warning_text.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in NoLiftFinger_WarningComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "NoLiftFinger_Warning"-------
for thisComponent in NoLiftFinger_WarningComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
Trials_Train_CategCaras.addData('NoLiftFinger_Warning_text.started', NoLiftFinger_Warning_text.tStartRefresh)
Trials_Train_CategCaras.addData('NoLiftFinger_Warning_text.stopped', NoLiftFinger_Warning_text.tStopRefresh)
# the Routine "NoLiftFinger_Warning" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
thisExp.nextEntry()
# completed 1.0 repeats of 'Trials_Train_CategCaras'
# ------Prepare to start Routine "Thanks"-------
continueRoutine = True
routineTimer.add(4.000000)
# update component parameters for each repeat
# keep track of which components have finished
ThanksComponents = [Thanks_text]
for thisComponent in ThanksComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
ThanksClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Thanks"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = ThanksClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=ThanksClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *Thanks_text* updates
if Thanks_text.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
Thanks_text.frameNStart = frameN # exact frame index
Thanks_text.tStart = t # local t and not account for scr refresh
Thanks_text.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(Thanks_text, 'tStartRefresh') # time at next scr refresh
Thanks_text.setAutoDraw(True)
if Thanks_text.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > Thanks_text.tStartRefresh + 4.0-frameTolerance:
# keep track of stop time/frame for later
Thanks_text.tStop = t # not accounting for scr refresh
Thanks_text.frameNStop = frameN # exact frame index
win.timeOnFlip(Thanks_text, 'tStopRefresh') # time at next scr refresh
Thanks_text.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in ThanksComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Thanks"-------
for thisComponent in ThanksComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('Thanks_text.started', Thanks_text.tStartRefresh)
thisExp.addData('Thanks_text.stopped', Thanks_text.tStopRefresh)
# Flip one final time so any remaining win.callOnFlip()
# and win.timeOnFlip() tasks get executed before quitting
win.flip()
# these shouldn't be strictly necessary (should auto-save)
thisExp.saveAsWideText(filename+'.csv', delim='auto')
thisExp.saveAsPickle(filename)
logging.flush()
# make sure everything is closed down
thisExp.abort() # or data files will save again on exit
win.close()
core.quit()
| 52.780151
| 238
| 0.710542
|
700a8dc54d31f07b8aa04e8f1184d785209e2599
| 492
|
py
|
Python
|
authentication/urls.py
|
RAGNAROSaa/-
|
833688d556ecc70570a9b464160271ace07380d9
|
[
"Apache-2.0"
] | 5
|
2016-09-25T02:59:13.000Z
|
2018-07-18T05:20:58.000Z
|
authentication/urls.py
|
RAGNAROSaa/-
|
833688d556ecc70570a9b464160271ace07380d9
|
[
"Apache-2.0"
] | 1
|
2016-12-01T01:11:53.000Z
|
2016-12-01T01:11:53.000Z
|
authentication/urls.py
|
RAGNAROSaa/-
|
833688d556ecc70570a9b464160271ace07380d9
|
[
"Apache-2.0"
] | 6
|
2016-09-24T02:42:57.000Z
|
2016-11-10T13:35:13.000Z
|
from django.conf.urls import url
from authentication import views
urlpatterns = [
url(r'user/signup/$', views.SignupView.as_view(), name='user-signup'),
url(r'user/logout/$', views.LogoutView.as_view(), name='user-logout'),
url(r'user/login/$', views.LoginView.as_view(), name='user-login'),
url(r'user/list/$', views.UserListView.as_view(), name='myuser-list'),
url(r'user/photo/(?P<pk>[0-9]+)/update/$', views.UserPhotoChangeView.as_view(), name='user-change-photo')
]
| 44.727273
| 109
| 0.686992
|
5058f447ab91d5c3e136802a35454ba6dff83718
| 399
|
py
|
Python
|
.history/py/UserInput_20201230125908.py
|
minefarmer/Comprehensive-Python
|
f97b9b83ec328fc4e4815607e6a65de90bb8de66
|
[
"Unlicense"
] | null | null | null |
.history/py/UserInput_20201230125908.py
|
minefarmer/Comprehensive-Python
|
f97b9b83ec328fc4e4815607e6a65de90bb8de66
|
[
"Unlicense"
] | null | null | null |
.history/py/UserInput_20201230125908.py
|
minefarmer/Comprehensive-Python
|
f97b9b83ec328fc4e4815607e6a65de90bb8de66
|
[
"Unlicense"
] | null | null | null |
# person = input("Enter your name: ")
# print("Hello ", person)
# x = input("Enter a number: ") # 8
# y = input("Enter another number: ") # 7
# z = x + y
# print(z) # 87 ## this was the result of concatenation
person = input("Enter your name: ")
print("Hello ", person)
x = input("Enter a number: ") # 8
y = input("Enter another number: ") # 7
z = print(int(x) +int(y))
print(z) # 87
| 21
| 58
| 0.581454
|
d9e507ab08cfb04a79232b6ce6fdb36d2da31014
| 764
|
py
|
Python
|
django_proyecto/bin/django-admin.py
|
Nemo3003/Proyecto_Final_G_8
|
65bc0b74e5746801b373b8714fc6574f2de21f91
|
[
"Apache-2.0"
] | null | null | null |
django_proyecto/bin/django-admin.py
|
Nemo3003/Proyecto_Final_G_8
|
65bc0b74e5746801b373b8714fc6574f2de21f91
|
[
"Apache-2.0"
] | null | null | null |
django_proyecto/bin/django-admin.py
|
Nemo3003/Proyecto_Final_G_8
|
65bc0b74e5746801b373b8714fc6574f2de21f91
|
[
"Apache-2.0"
] | 1
|
2021-09-05T23:42:46.000Z
|
2021-09-05T23:42:46.000Z
|
#!/home/nemo/Documents/Programación/Informatorio-main/Etapa2/django_proyecto_carlos/Proyecto_Final_G_8/django_proyecto/bin/python
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| 34.727273
| 129
| 0.748691
|
969c7e3bb5369b5ef8499c6c3875abac9ade1502
| 13,250
|
py
|
Python
|
cluster_optimizer/scorer.py
|
ndgigliotti/cluster-optimizer
|
40505493ba2e9a2352c5c759d1b2d61c1bc72fe7
|
[
"BSD-3-Clause"
] | null | null | null |
cluster_optimizer/scorer.py
|
ndgigliotti/cluster-optimizer
|
40505493ba2e9a2352c5c759d1b2d61c1bc72fe7
|
[
"BSD-3-Clause"
] | null | null | null |
cluster_optimizer/scorer.py
|
ndgigliotti/cluster-optimizer
|
40505493ba2e9a2352c5c759d1b2d61c1bc72fe7
|
[
"BSD-3-Clause"
] | null | null | null |
from types import MappingProxyType
from typing import Iterable
import numpy as np
from sklearn import metrics
from sklearn.metrics._scorer import _BaseScorer, _passthrough_scorer
from sklearn.pipeline import Pipeline
from sklearn.utils.validation import check_consistent_length, check_is_fitted
def _get_labels(estimator):
"""Gets the cluster labels from an estimator or pipeline."""
if isinstance(estimator, Pipeline):
check_is_fitted(estimator._final_estimator, ["labels_"])
labels = estimator._final_estimator.labels_
else:
check_is_fitted(estimator, ["labels_"])
labels = estimator.labels_
return labels
def _noise_ratio(labels, noise_label=-1):
labels = np.asarray(labels)
return (labels == noise_label).mean()
def _remove_noise_cluster(*arrays, labels, noise_label=-1):
"""Removes the noise cluster found in `labels` (if any) from all `arrays`."""
is_noise = labels == noise_label
arrays = list(arrays)
for i, arr in enumerate(arrays):
arrays[i] = arr[~is_noise].copy()
check_consistent_length(*arrays)
return tuple(arrays)
class _LabelScorerSupervised(_BaseScorer):
def _score(self, estimator, X, labels_true):
"""Evaluate estimator labels relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have `labels_` attribute.
X : {array-like, sparse matrix}
Does nothing, since estimator should already have `labels_`.
Here for API compatability.
labels_true : array-like
Ground truth target values for cluster labels.
Returns
-------
score : float
Score function applied to cluster labels.
"""
labels = _get_labels(estimator)
labels_true, labels = _remove_noise_cluster(labels_true, labels, labels=labels)
return self._sign * self._score_func(labels_true, labels, **self._kwargs)
def __call__(self, estimator, X, labels_true):
"""Evaluate estimator labels relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have `labels_` attribute.
X : {array-like, sparse matrix}
Does nothing, since estimator should already have `labels_`.
Here for API compatability.
labels_true : array-like
Ground truth target values for cluster labels.
Returns
-------
score : float
Score function applied to cluster labels.
"""
return self._score(
estimator,
X,
labels_true,
)
class _LabelScorerUnsupervised(_BaseScorer):
def _score(self, estimator, X, labels_true=None):
"""Evaluate cluster labels on X.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have `labels_` attribute.
X : {array-like, sparse matrix}
Data that will be used to evaluate cluster labels.
labels_true: array-like
Does nothing. Here for API compatability.
Returns
-------
score : float
Score function applied to cluster labels.
"""
labels = _get_labels(estimator)
if isinstance(estimator, Pipeline):
X = estimator[:-1].transform(X)
X, labels = _remove_noise_cluster(X, labels, labels=labels)
return self._sign * self._score_func(X, labels, **self._kwargs)
def __call__(self, estimator, X, labels_true=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have `labels_` attribute.
X : {array-like, sparse matrix}
Data that will be used to evaluate cluster labels.
labels_true: array-like
Does nothing. Here for API compatability.
Returns
-------
score : float
Score function applied cluster labels.
"""
return self._score(
estimator,
X,
)
def make_scorer(
score_func,
*,
ground_truth=True,
greater_is_better=True,
**kwargs,
):
"""Make a clustering scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in
:class:`~cluster_opt.ClusterOptimizer`
It takes a score function, such as :func:`~sklearn.metrics.silhouette_score`,
:func:`~sklearn.metrics.mutual_info_score`, or
:func:`~sklearn.metrics.adjusted_rand_index`
and returns a callable that scores an estimator's output.
The signature of the call is `(estimator, X, y)` where `estimator`
is the model to be evaluated, `X` is the data and `y` is the
ground truth labeling (or `None` in the case of unsupervised models).
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
ground_truth : bool, default=True
Whether score_func uses ground truth labels.
greater_is_better : bool, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
"""
sign = 1 if greater_is_better else -1
if ground_truth:
cls = _LabelScorerSupervised
else:
cls = _LabelScorerUnsupervised
return cls(score_func, sign, kwargs)
SCORERS = {
"silhouette_score": make_scorer(metrics.silhouette_score, ground_truth=False),
"silhouette_score_euclidean": make_scorer(
metrics.silhouette_score, ground_truth=False
),
"silhouette_score_cosine": make_scorer(
metrics.silhouette_score, ground_truth=False, metric="cosine"
),
"davies_bouldin_score": make_scorer(
metrics.davies_bouldin_score, greater_is_better=False, ground_truth=False
),
"calinski_harabasz_score": make_scorer(
metrics.calinski_harabasz_score, ground_truth=False
),
"mutual_info_score": make_scorer(metrics.mutual_info_score),
"normalized_mutual_info_score": make_scorer(metrics.normalized_mutual_info_score),
"adjusted_mutual_info_score": make_scorer(metrics.adjusted_mutual_info_score),
"rand_score": make_scorer(metrics.rand_score),
"adjusted_rand_score": make_scorer(metrics.adjusted_rand_score),
"completeness_score": make_scorer(metrics.completeness_score),
"fowlkes_mallows_score": make_scorer(metrics.fowlkes_mallows_score),
"homogeneity_score": make_scorer(metrics.homogeneity_score),
"v_measure_score": make_scorer(metrics.v_measure_score),
}
SCORERS.update({k.replace("_score", ""): v for k, v in SCORERS.items()})
SCORERS = MappingProxyType(SCORERS)
def get_scorer(scoring):
"""Get a clustering scorer from string.
Parameters
----------
scoring : str or callable
Scoring method as string. If callable it is returned as is.
Returns
-------
scorer : callable
The scorer.
"""
if isinstance(scoring, str):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError(
f"'{scoring}' is not a valid scoring value. "
"Use sorted(cluster_optimizer.scorer.SCORERS.keys()) "
"to get valid options."
)
else:
scorer = scoring
return scorer
def check_scoring(estimator, scoring=None):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : str or callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
if not hasattr(estimator, "fit"):
raise TypeError(
"estimator should be an estimator implementing "
"'fit' method, %r was passed" % estimator
)
if isinstance(scoring, str):
return get_scorer(scoring)
elif callable(scoring):
# Heuristic to ensure user has not passed a metric
module = getattr(scoring, "__module__", None)
if (
hasattr(module, "startswith")
and module.startswith("sklearn.metrics.")
and not module.startswith("sklearn.metrics._scorer")
and not module.startswith("sklearn.metrics.tests.")
):
raise ValueError(
"scoring value %r looks like it is a metric "
"function rather than a scorer. A scorer should "
"require an estimator as its first parameter. "
"Please use `make_scorer` to convert a metric "
"to a scorer." % scoring
)
return get_scorer(scoring)
elif scoring is None:
if hasattr(estimator, "score"):
return _passthrough_scorer
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator
)
elif isinstance(scoring, Iterable):
raise ValueError(
"For evaluating multiple scores, use "
"sklearn.model_selection.cross_validate instead. "
"{0} was passed.".format(scoring)
)
else:
raise ValueError(
"scoring value should either be a callable, string or"
" None. %r was passed" % scoring
)
def check_multimetric_scoring(estimator, scoring):
"""Check the scoring parameter in cases when multiple metrics are allowed.
Parameters
----------
estimator : sklearn estimator instance
The estimator for which the scoring will be applied.
scoring : list, tuple or dict
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
See :ref:`multimetric_grid_search` for an example.
Returns
-------
scorers_dict : dict
A dict mapping each scorer name to its validated scorer.
"""
err_msg_generic = (
f"scoring is invalid (got {scoring!r}). Refer to the "
"scoring glossary for details: "
"https://scikit-learn.org/stable/glossary.html#term-scoring"
)
if isinstance(scoring, (list, tuple, set)):
err_msg = (
"The list/tuple elements must be unique " "strings of predefined scorers. "
)
invalid = False
try:
keys = set(scoring)
except TypeError:
invalid = True
if invalid:
raise ValueError(err_msg)
if len(keys) != len(scoring):
raise ValueError(
f"{err_msg} Duplicate elements were found in"
f" the given list. {scoring!r}"
)
elif len(keys) > 0:
if not all(isinstance(k, str) for k in keys):
if any(callable(k) for k in keys):
raise ValueError(
f"{err_msg} One or more of the elements "
"were callables. Use a dict of score "
"name mapped to the scorer callable. "
f"Got {scoring!r}"
)
else:
raise ValueError(
f"{err_msg} Non-string types were found "
f"in the given list. Got {scoring!r}"
)
scorers = {
scorer: check_scoring(estimator, scoring=scorer) for scorer in scoring
}
else:
raise ValueError(f"{err_msg} Empty list was given. {scoring!r}")
elif isinstance(scoring, dict):
keys = set(scoring)
if not all(isinstance(k, str) for k in keys):
raise ValueError(
"Non-string types were found in the keys of "
f"the given dict. scoring={scoring!r}"
)
if len(keys) == 0:
raise ValueError(f"An empty dict was passed. {scoring!r}")
scorers = {
key: check_scoring(estimator, scoring=scorer)
for key, scorer in scoring.items()
}
else:
raise ValueError(err_msg_generic)
return scorers
| 33.715013
| 87
| 0.616453
|
8bc9cf88be4621018d56fc7c9e5672a569120c80
| 628
|
py
|
Python
|
autotest/manage.py
|
zuoleilei3253/zuoleilei
|
e188b15a0aa4a9fde00dba15e8300e4b87973e2d
|
[
"Apache-2.0"
] | null | null | null |
autotest/manage.py
|
zuoleilei3253/zuoleilei
|
e188b15a0aa4a9fde00dba15e8300e4b87973e2d
|
[
"Apache-2.0"
] | null | null | null |
autotest/manage.py
|
zuoleilei3253/zuoleilei
|
e188b15a0aa4a9fde00dba15e8300e4b87973e2d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'autotest.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.545455
| 73
| 0.683121
|
7b53743d330e148051b4f33042dc93088260671c
| 1,753
|
py
|
Python
|
tests/conftest.py
|
yammesicka/arrow
|
b8a601139c6eef2c452666b84dcfd9130c300352
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
yammesicka/arrow
|
b8a601139c6eef2c452666b84dcfd9130c300352
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
yammesicka/arrow
|
b8a601139c6eef2c452666b84dcfd9130c300352
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from datetime import datetime
import pytest
from arrow import arrow, factory, formatter, locales, parser
@pytest.fixture(scope="class")
def time_utcnow(request):
request.cls.arrow = arrow.Arrow.utcnow()
@pytest.fixture(scope="class")
def time_2013_01_01(request):
request.cls.now = arrow.Arrow.utcnow()
request.cls.arrow = arrow.Arrow(2013, 1, 1)
request.cls.datetime = datetime(2013, 1, 1)
@pytest.fixture(scope="class")
def time_2013_02_03(request):
request.cls.arrow = arrow.Arrow(2013, 2, 3, 12, 30, 45, 1)
@pytest.fixture(scope="class")
def time_2013_02_15(request):
request.cls.datetime = datetime(2013, 2, 15, 3, 41, 22, 8923)
request.cls.arrow = arrow.Arrow.fromdatetime(request.cls.datetime)
@pytest.fixture(scope="class")
def arrow_formatter(request):
request.cls.formatter = formatter.DateTimeFormatter()
@pytest.fixture(scope="class")
def arrow_factory(request):
request.cls.factory = factory.ArrowFactory()
@pytest.fixture(scope="class")
def lang_locales(request):
request.cls.locales = locales._locales
@pytest.fixture(scope="class")
def lang_locale(request):
# As locale test classes are prefixed with Test, we are dynamically getting the locale by the test class name.
# TestEnglishLocale -> EnglishLocale
name = request.cls.__name__[4:]
request.cls.locale = locales.get_locale_by_class_name(name)
@pytest.fixture(scope="class")
def dt_parser(request):
request.cls.parser = parser.DateTimeParser()
@pytest.fixture(scope="class")
def dt_parser_regex(request):
request.cls.format_regex = parser.DateTimeParser._FORMAT_RE
@pytest.fixture(scope="class")
def tzinfo_parser(request):
request.cls.parser = parser.TzinfoParser()
| 25.779412
| 114
| 0.735311
|
af3fb719a951156f418ebb37b0df8f3f9ad8ce83
| 15,968
|
py
|
Python
|
official/nlp/modeling/layers/kernel_attention.py
|
mcasanova1445/models
|
37be0fdb4abccca633bb3199a4e6f3f71cd174d9
|
[
"Apache-2.0"
] | 1
|
2022-02-02T06:29:41.000Z
|
2022-02-02T06:29:41.000Z
|
official/nlp/modeling/layers/kernel_attention.py
|
mdsaifhaider/models
|
7214e17eb425963ec3d0295be215d5d26deaeb32
|
[
"Apache-2.0"
] | 8
|
2020-05-19T00:52:30.000Z
|
2020-06-04T23:57:20.000Z
|
official/nlp/modeling/layers/kernel_attention.py
|
mdsaifhaider/models
|
7214e17eb425963ec3d0295be215d5d26deaeb32
|
[
"Apache-2.0"
] | 2
|
2021-10-07T04:47:04.000Z
|
2021-12-18T04:18:19.000Z
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras-based kernel attention layer."""
import functools
import math
import tensorflow as tf
_NUMERIC_STABLER = 1e-6
class KernelMask(tf.keras.layers.Layer):
"""Creates kernel attention mask.
inputs: from_tensor: 2D or 3D Tensor of shape
[batch_size, from_seq_length, ...].
mask: a Tensor of shape [batch_size, from_seq_length] which indicates
which part of the inputs we should not attend.
Returns:
float Tensor of shape [batch_size, from_seq_length] that KernelAttention
takes as mask.
"""
def call(self, inputs, mask):
mask = tf.cast(mask, inputs.dtype)
return mask
def create_projection_matrix(m, d, seed=None):
r"""Constructs the matrix of random projections.
Constructs a matrix of random orthogonal projections. Each projection vector
has direction chosen uniformly at random length taken from the
\chi(d) distribution.).
Args:
m: number of random projections.
d: dimensionality of each random projection.
seed: random seed used to construct projections. If not, we use the stateful
api.
Returns:
The matrix of random projections of the shape [m, d].
"""
nb_full_blocks = math.ceil(m / d)
block_list = tf.TensorArray(tf.float32,
size=tf.cast(nb_full_blocks, dtype=tf.int32))
stateful = False
if seed is None:
stateful = True
# dummy seed to make sure the graph compiles though the path is not taken.
seed = tf.constant([0, 1])
current_seed = seed
for i in range(nb_full_blocks):
if stateful:
unstructured_block = tf.random.normal((d, d))
else:
unstructured_block = tf.random.stateless_normal((d, d), seed=current_seed)
current_seed = tf.random.stateless_uniform([2],
seed=current_seed,
minval=None,
dtype=tf.int32)
q, _ = tf.linalg.qr(unstructured_block)
q = tf.transpose(q)
block_list = block_list.write(i, q)
final_matrix = block_list.concat()[:m]
if stateful is None:
multiplier = tf.norm(tf.random.normal((m, d)), axis=1)
else:
multiplier = tf.norm(
tf.random.stateless_normal((m, d), seed=current_seed), axis=1)
return tf.linalg.matmul(tf.linalg.diag(multiplier), final_matrix)
def _generalized_kernel(x, projection_matrix, f, h):
"""Generalized kernel in RETHINKING ATTENTION WITH PERFORMERS.
Args:
x: The feature being transformed with shape [B, T, N ,H].
projection_matrix: The matrix with shape [M, H] that we projecct x to, where
M is the number of projections.
f: A non-linear function applied on x or projected x.
h: A muliplier which is a function of x applied after projected and
transformed. Only applied if projection_matrix is not None.
Returns:
Transformed feature.
"""
if projection_matrix is None:
return h(x) * f(x)
else:
x_projected = tf.einsum("BTNH,MH->BTNM", x, projection_matrix)
return h(x) * f(x_projected) / tf.math.sqrt(
tf.cast(tf.shape(projection_matrix)[0], tf.float32))
# pylint: disable=g-long-lambda
_TRANSFORM_MAP = {
"elu":
functools.partial(
_generalized_kernel,
f=lambda x: tf.keras.activations.elu(x) + 1,
h=lambda x: 1),
"relu":
functools.partial(
_generalized_kernel, f=tf.keras.activations.relu, h=lambda x: 1),
"square":
functools.partial(
_generalized_kernel, f=tf.math.square, h=lambda x: 1),
"exp":
functools.partial(
_generalized_kernel,
# Avoid exp explosion by shifting.
f=lambda x: tf.math.exp(
x - tf.math.reduce_max(x, axis=[1, 2, 3], keepdims=True)),
h=lambda x: tf.math.exp(
-0.5 * tf.math.reduce_sum(
tf.math.square(x), axis=-1, keepdims=True)),),
"expmod":
functools.partial(
_generalized_kernel,
# Avoid exp explosion by shifting.
f=lambda x: tf.math.exp(x - tf.math.reduce_max(
x, axis=[1, 2, 3], keepdims=True)),
h=lambda x: tf.math.exp(-0.5 * tf.math.sqrt(
tf.cast(tf.shape(x)[-1], tf.float32))),
),
"identity":
functools.partial(_generalized_kernel, f=lambda x: x, h=lambda x: 1)
}
# pylint: enable=g-long-lambda
class KernelAttention(tf.keras.layers.MultiHeadAttention):
"""A variant of efficient transformers which replaces softmax with kernels.
This module combines ideas from the two following papers:
Rethinking Attention with Performers
(https://arxiv.org/abs/2009.14794)
- exp (Lemma 1, positive), relu
- random/deterministic projection
Transformers are RNNs: Fast Autoregressive Transformers with Linear Attention
(https://arxiv.org/abs/2006.16236)
- elu
with the theory of approximating angular Performer kernels from go/performer.
The module enables computing efficient attention in both: long sequence and
shorter sequence regimes. In the former setting, the attention matrix is never
explicitly computed and instead its low-rank decomposition obtained with given
kernel feature maps is leveraged to conduct attention module calculations
(see: https://arxiv.org/abs/2006.16236). In the latter setting, attention
matrix is constructed, but kernel features providing dimensionality reduction
are applied, resulting in more efficient computation of the attention matrix.
"""
def __init__(self,
feature_transform="exp",
num_random_features=256,
seed=0,
redraw=False,
is_short_seq=False,
begin_kernel=0,
scale=None,
**kwargs):
r"""Constructor of KernelAttention.
Args:
feature_transform: A non-linear transform of the keys and quries.
Possible transforms are "elu", "relu", "square", "exp", "expmod",
"identity".
num_random_features: Number of random features to be used for projection.
if num_random_features <= 0, no production is used before transform.
seed: The seed to begin drawing random features. Once the seed is set, the
psedo number generation is determinisitc. Users should pass different
seed for different layers. For multi-worker, each layer will use the
same projection at each step.
redraw: Whether to redraw projection every forward pass during training.
The argument is only effective when num_random_features > 0.
is_short_seq: boolean predicate indicating whether input data consists of
very short sequences or not; in most cases this should be False
(default option).
begin_kernel: Apply kernel_attention after this sequence id and apply
softmax attention before this.
scale: The value to scale the dot product as described in `Attention Is
All You Need`. If None, we use 1/sqrt(dk) as described in the paper.
**kwargs: The same arguments `MultiHeadAttention` layer.
"""
if feature_transform not in _TRANSFORM_MAP:
raise ValueError("Unsupported feature_transform. The supported "
"feature_transform are %s. "
"Got '%s'." % (_TRANSFORM_MAP.keys(), feature_transform))
if num_random_features <= 0 and redraw:
raise ValueError(
"There is nothing to redraw when num_random_features <= 0.")
self._feature_transform = feature_transform
self._num_random_features = num_random_features
self._redraw = redraw
self._is_short_seq = is_short_seq
self._begin_kernel = begin_kernel
# We use the seed for two scenarios:
# 1. inference
# 2. no redraw
self._seed = seed
super().__init__(**kwargs)
if scale is None:
self._scale = 1.0 / math.sqrt(float(self._key_dim))
else:
self._scale = scale
self._projection_matrix = None
if num_random_features > 0:
self._projection_matrix = create_projection_matrix(
self._num_random_features, self._key_dim,
tf.constant([self._seed, self._seed + 1]))
def _compute_attention(self,
query,
key,
value,
feature_transform,
is_short_seq,
attention_mask=None,
training=False,
numeric_stabler=_NUMERIC_STABLER):
"""Applies kernel attention with query, key, value tensors.
This function defines the computation inside `call` with projected
multi-head Q, K, V inputs. Users can override this function for customized
attention implementation.
Args:
query: Projected query `Tensor` of shape `[B, T, N, key_dim]`.
key: Projected key `Tensor` of shape `[B, S, N, key_dim]`.
value: Projected value `Tensor` of shape `[B, S, N, value_dim]`.
feature_transform: A non-linear transform of the keys and quries.
is_short_seq: boolean predicate indicating whether input data consists of
short or long sequences; usually short sequence is defined as having
length L <= 1024.
attention_mask: a boolean mask of shape `[B, S]`, that prevents
attenting to masked positions. Note that the mask is only appied to
the keys. User may want to mask the output if query contains pads.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
numeric_stabler: A scalar value added to avoid divide by 0.
Returns:
attention_output: Multi-headed outputs of attention computation.
"""
projection_matrix = None
if self._num_random_features > 0:
if self._redraw and training:
projection_matrix = create_projection_matrix(self._num_random_features,
self._key_dim)
else:
projection_matrix = self._projection_matrix
if is_short_seq:
# Note: Applying scalar multiply at the smaller end of einsum improves
# XLA performance, but may introduce slight numeric differences in
# the Transformer attention head.
query = query * self._scale
else:
# Note: we suspect spliting the scale to key, query yields smaller
# approximation variance when random projection is used.
# For simplicity, we also split when there's no random projection.
key *= math.sqrt(self._scale)
query *= math.sqrt(self._scale)
key = _TRANSFORM_MAP[feature_transform](key, projection_matrix)
query = _TRANSFORM_MAP[feature_transform](query, projection_matrix)
if attention_mask is not None:
key = tf.einsum("BSNH,BS->BSNH", key, attention_mask)
if is_short_seq:
attention_scores = tf.einsum("BTNH,BSNH->BTSN", query, key)
attention_scores = tf.nn.softmax(attention_scores, axis=2)
attention_output = tf.einsum("BTSN,BSNH->BTNH", attention_scores, value)
else:
kv = tf.einsum("BSNH,BSND->BNDH", key, value)
denominator = 1.0 / (
tf.einsum("BTNH,BNH->BTN", query, tf.reduce_sum(key, axis=1)) +
_NUMERIC_STABLER)
attention_output = tf.einsum(
"BTNH,BNDH,BTN->BTND", query, kv, denominator)
return attention_output
def _build_from_signature(self, query, value, key=None):
super()._build_from_signature(query=query, value=value, key=key) # pytype: disable=attribute-error # typed-keras
if self._begin_kernel > 0:
common_kwargs = dict(
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activity_regularizer=self._activity_regularizer,
kernel_constraint=self._kernel_constraint,
bias_constraint=self._bias_constraint)
self._output_dense_softmax = self._make_output_dense(
self._query_shape.rank - 1, common_kwargs,
name="attention_output_softmax")
self._dropout_softmax = tf.keras.layers.Dropout(rate=self._dropout)
def call(self,
query,
value,
key=None,
attention_mask=None,
training=False):
"""Compute attention with kernel mechanism.
Args:
query: Query `Tensor` of shape `[B, T, dim]`.
value: Value `Tensor` of shape `[B, S, dim]`.
key: Optional key `Tensor` of shape `[B, S, dim]`. If not given, will use
`value` for both `key` and `value`, which is the most common case.
attention_mask: a boolean mask of shape `[B, S]`, that prevents
attenting to masked positions. Note that the mask is only appied to
the keys. User may want to mask the output if query contains pads.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Returns:
Multi-headed outputs of attention computation.
"""
if not self._built_from_signature:
self._build_from_signature(query=query, value=value, key=key)
if key is None:
key = value
# N = `num_attention_heads`
# H = `size_per_head`
# `query` = [B, T, N ,H]
query = self._query_dense(query)
# `key` = [B, S, N, H]
key = self._key_dense(key)
# `value` = [B, S, N, D]
value = self._value_dense(value)
if self._begin_kernel > 0:
attention_output_softmax = self._compute_attention(
query[:, :self._begin_kernel],
key, value, "identity", True, attention_mask, training)
attention_output_softmax = self._dropout_softmax(attention_output_softmax)
attention_output_softmax = self._output_dense_softmax(
attention_output_softmax)
attention_output_kernel = self._compute_attention(
query[:, self._begin_kernel:],
key, value, self._feature_transform, self._is_short_seq,
attention_mask, training)
attention_output_kernel = self._dropout_layer(attention_output_kernel)
attention_output_kernel = self._output_dense(
attention_output_kernel)
attention_output = tf.concat(
[attention_output_softmax, attention_output_kernel], axis=1)
else:
attention_output = self._compute_attention(
query, key, value, self._feature_transform,
self._is_short_seq, attention_mask, training)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_output = self._dropout_layer(attention_output)
attention_output = self._output_dense(attention_output)
return attention_output
def get_config(self):
config = {
"feature_transform": self._feature_transform,
"num_random_features": self._num_random_features,
"seed": self._seed,
"redraw": self._redraw,
"is_short_seq": self._is_short_seq,
"begin_kernel": self._begin_kernel,
"scale": self._scale,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| 40.221662
| 118
| 0.664704
|
1244fdfe1f5b183d2e6247fbd1fced33da02c2b4
| 2,720
|
py
|
Python
|
setup.py
|
jtpio/p5-kernel
|
35ebc2e3f0720cfd2af945b04dddc69744f1eb29
|
[
"BSD-3-Clause"
] | 5
|
2021-10-01T07:29:50.000Z
|
2022-02-27T01:03:16.000Z
|
setup.py
|
jtpio/p5-kernel
|
35ebc2e3f0720cfd2af945b04dddc69744f1eb29
|
[
"BSD-3-Clause"
] | 4
|
2021-09-29T14:57:08.000Z
|
2022-03-23T12:52:24.000Z
|
setup.py
|
jtpio/p5-kernel
|
35ebc2e3f0720cfd2af945b04dddc69744f1eb29
|
[
"BSD-3-Clause"
] | 4
|
2021-10-01T07:19:08.000Z
|
2022-03-21T09:32:43.000Z
|
"""
jupyterlite-p5-kernel setup
"""
import json
import sys
from pathlib import Path
import setuptools
HERE = Path(__file__).parent.resolve()
# The name of the project
NAME = "jupyterlite-p5-kernel"
PACKAGE = NAME.replace("-", "_")
src_path = HERE / "packages/p5-kernel-extension"
lab_path = HERE / NAME.replace("-", "_") / "labextension"
# Representative files that should exist after a successful build
ensured_targets = [str(lab_path / "package.json"), str(lab_path / "static/style.js")]
labext_name = "@jupyterlite/p5-kernel-extension"
data_files_spec = [
(
"share/jupyter/labextensions/%s" % labext_name,
str(lab_path.relative_to(HERE)),
"**",
),
("share/jupyter/labextensions/%s" % labext_name, str("."), "install.json"),
]
long_description = (HERE / "README.md").read_text()
# Get the package info from package.json
pkg_json = json.loads((src_path / "package.json").read_bytes())
version = (
pkg_json["version"]
.replace("-alpha.", "a")
.replace("-beta.", "b")
.replace("-rc.", "rc")
)
setup_args = dict(
name=NAME,
version=version,
url=pkg_json["homepage"],
author=pkg_json["author"]["name"],
author_email=pkg_json["author"]["email"],
description=pkg_json["description"],
license=pkg_json["license"],
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
install_requires=[],
zip_safe=False,
include_package_data=True,
python_requires=">=3.6",
platforms="Linux, Mac OS X, Windows",
keywords=["Jupyter", "JupyterLab", "JupyterLab3"],
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Framework :: Jupyter",
],
)
try:
from jupyter_packaging import wrap_installers, npm_builder, get_data_files
post_develop = npm_builder(build_cmd="build", build_dir=lab_path)
setup_args["cmdclass"] = wrap_installers(
post_develop=post_develop, ensured_targets=ensured_targets
)
setup_args["data_files"] = get_data_files(data_files_spec)
except ImportError as e:
import logging
logging.basicConfig(format="%(levelname)s: %(message)s")
logging.warning(
"Build tool `jupyter-packaging` is missing. Install it with pip or conda."
)
if not ("--name" in sys.argv or "--version" in sys.argv):
raise e
if __name__ == "__main__":
setuptools.setup(**setup_args)
| 29.247312
| 85
| 0.663235
|
494136e63baee6a326f46c20739603b84b59eb25
| 1,826
|
py
|
Python
|
selfdrive/locationd/models/constants.py
|
woori2875/test
|
79a0a597fed5a399cb5700ed39a847aca7f971c2
|
[
"MIT"
] | null | null | null |
selfdrive/locationd/models/constants.py
|
woori2875/test
|
79a0a597fed5a399cb5700ed39a847aca7f971c2
|
[
"MIT"
] | null | null | null |
selfdrive/locationd/models/constants.py
|
woori2875/test
|
79a0a597fed5a399cb5700ed39a847aca7f971c2
|
[
"MIT"
] | null | null | null |
import os
GENERATED_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), 'generated'))
class ObservationKind:
UNKNOWN = 0
NO_OBSERVATION = 1
GPS_NED = 2
ODOMETRIC_SPEED = 3
PHONE_GYRO = 4
GPS_VEL = 5
PSEUDORANGE_GPS = 6
PSEUDORANGE_RATE_GPS = 7
SPEED = 8
NO_ROT = 9
PHONE_ACCEL = 10
ORB_POINT = 11
ECEF_POS = 12
CAMERA_ODO_TRANSLATION = 13
CAMERA_ODO_ROTATION = 14
ORB_FEATURES = 15
MSCKF_TEST = 16
FEATURE_TRACK_TEST = 17
LANE_PT = 18
IMU_FRAME = 19
PSEUDORANGE_GLONASS = 20
PSEUDORANGE_RATE_GLONASS = 21
PSEUDORANGE = 22
PSEUDORANGE_RATE = 23
ECEF_VEL = 31
ECEF_ORIENTATION_FROM_GPS = 32
ROAD_FRAME_XY_SPEED = 24 # (x, y) [m/s]
ROAD_FRAME_YAW_RATE = 25 # [rad/s]
STEER_ANGLE = 26 # [rad]
ANGLE_OFFSET_FAST = 27 # [rad]
STIFFNESS = 28 # [-]
STEER_RATIO = 29 # [-]
ROAD_FRAME_X_SPEED = 30 # (x) [m/s]
ROAD_ROLL = 31 # [rad]
names = [
'Unknown',
'No observation',
'GPS NED',
'Odometric speed',
'Phone gyro',
'GPS velocity',
'GPS pseudorange',
'GPS pseudorange rate',
'Speed',
'No rotation',
'Phone acceleration',
'ORB point',
'ECEF pos',
'camera odometric translation',
'camera odometric rotation',
'ORB features',
'MSCKF test',
'Feature track test',
'Lane ecef point',
'imu frame eulers',
'GLONASS pseudorange',
'GLONASS pseudorange rate',
'Road Frame x,y speed',
'Road Frame yaw rate',
'Steer Angle',
'Fast Angle Offset',
'Stiffness',
'Steer Ratio',
]
@classmethod
def to_string(cls, kind):
return cls.names[kind]
SAT_OBS = [ObservationKind.PSEUDORANGE_GPS,
ObservationKind.PSEUDORANGE_RATE_GPS,
ObservationKind.PSEUDORANGE_GLONASS,
ObservationKind.PSEUDORANGE_RATE_GLONASS]
| 22
| 85
| 0.648412
|
98cb07dc94f0c16aa927397cfb54eb34b666bcef
| 212
|
py
|
Python
|
dependencies/amitools-0.1.0/amitools/vamos/lib/util/UtilStruct.py
|
limi/AGSImager
|
d3771800308e61a7a07df4a9b361e5bd5ba9e409
|
[
"MIT"
] | null | null | null |
dependencies/amitools-0.1.0/amitools/vamos/lib/util/UtilStruct.py
|
limi/AGSImager
|
d3771800308e61a7a07df4a9b361e5bd5ba9e409
|
[
"MIT"
] | null | null | null |
dependencies/amitools-0.1.0/amitools/vamos/lib/util/UtilStruct.py
|
limi/AGSImager
|
d3771800308e61a7a07df4a9b361e5bd5ba9e409
|
[
"MIT"
] | null | null | null |
from amitools.vamos.AmigaStruct import AmigaStruct
# TagItem
class TagItemStruct(AmigaStruct):
_name = "TagItem"
_format = [
('ULONG','ti_Tag'),
('ULONG','ti_Data')
]
TagItemDef = TagItemStruct()
| 17.666667
| 50
| 0.688679
|
26865bce9ebad1b77a7c6d58a03a501b6a8d98f9
| 3,147
|
py
|
Python
|
tests/support/dirutils.py
|
dalito/linkml-runtime
|
192a33962aed06f727ffad1a697003ac6ec85c2c
|
[
"CC0-1.0"
] | null | null | null |
tests/support/dirutils.py
|
dalito/linkml-runtime
|
192a33962aed06f727ffad1a697003ac6ec85c2c
|
[
"CC0-1.0"
] | null | null | null |
tests/support/dirutils.py
|
dalito/linkml-runtime
|
192a33962aed06f727ffad1a697003ac6ec85c2c
|
[
"CC0-1.0"
] | null | null | null |
import filecmp
import os
from contextlib import redirect_stdout
from io import StringIO
from typing import Optional
from tests.support.filters import ldcontext_metadata_filter
def make_and_clear_directory(dirbase: str) -> None:
""" Make dirbase if necessary and then clear generated files """
import shutil
safety_file = os.path.join(dirbase, "generated")
if os.path.exists(dirbase):
if not os.path.exists(safety_file):
raise FileNotFoundError("'generated' guard file not found in {}".format(safety_file))
shutil.rmtree(dirbase)
os.makedirs(dirbase)
with open(os.path.join(dirbase, "generated"), "w") as f:
f.write("Generated for safety. Directory will not be cleared if this file is not present")
def file_text(txt_or_fname: str) -> str:
"""
Determine whether text_or_fname is a file name or a string and, if a file name, read it
:param txt_or_fname:
:return:
"""
if len(txt_or_fname) > 4 and '\n' not in txt_or_fname:
with open(txt_or_fname) as ef:
return ef.read()
return txt_or_fname
class dircmp(filecmp.dircmp):
"""
Compare the content of dir1 and dir2. In contrast with filecmp.dircmp, this
subclass compares the content of files with the same path.
"""
def phase3(self):
"""
Find out differences between common files.
Ensure we are using content comparison with shallow=False.
"""
fcomp = filecmp.cmpfiles(self.left, self.right, self.common_files,
shallow=False)
self.same_files, self.diff_files, self.funny_files = fcomp
filecmp.dircmp.methodmap['same_files'] = phase3
filecmp.dircmp.methodmap['diff_files'] = phase3
filecmp.dircmp.methodmap['funny_files'] = phase3
def _do_cmp(f1, f2):
bufsize = filecmp.BUFSIZE
with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2:
while True:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if f1.endswith('.context.jsonld'):
b1 = ldcontext_metadata_filter(b1)
b2 = ldcontext_metadata_filter(b2)
if b1 != b2:
return False
if not b1:
return True
filecmp._do_cmp = _do_cmp
def are_dir_trees_equal(dir1: str, dir2: str) -> Optional[str]:
"""
Compare two directories recursively. Files in each directory are
assumed to be equal if their names and contents are equal.
@param dir1: First directory path
@param dir2: Second directory path
@return: None if directories match, else summary of differences
"""
def has_local_diffs(dc: dircmp) -> bool:
return bool(dc.diff_files or dc.funny_files or dc.left_only or dc.right_only)
def has_diffs(dc: dircmp) -> bool:
return has_local_diffs(dc) or any(has_diffs(sd) for sd in dc.subdirs.values())
dirs_cmp = dircmp(dir1, dir2, ignore=['generated'])
if has_diffs(dirs_cmp):
output = StringIO()
with redirect_stdout(output):
dirs_cmp.report_full_closure()
return output.getvalue()
return None
| 32.78125
| 99
| 0.653956
|
701c70739bef1002514a0630e3ab4ff74611eae1
| 418
|
py
|
Python
|
onadata/apps/fsforms/migrations/0047_fieldsightxf_from_project.py
|
awemulya/fieldsight-kobocat
|
f302d084e30fb637d43ec638c701e01a3dddc721
|
[
"BSD-2-Clause"
] | 38
|
2017-02-28T05:39:40.000Z
|
2019-01-16T04:39:04.000Z
|
onadata/apps/fsforms/migrations/0047_fieldsightxf_from_project.py
|
awemulya/fieldsightt
|
f302d084e30fb637d43ec638c701e01a3dddc721
|
[
"BSD-2-Clause"
] | 20
|
2017-04-27T09:14:27.000Z
|
2019-01-17T06:35:52.000Z
|
onadata/apps/fsforms/migrations/0047_fieldsightxf_from_project.py
|
awemulya/fieldsightt
|
f302d084e30fb637d43ec638c701e01a3dddc721
|
[
"BSD-2-Clause"
] | 5
|
2017-02-22T12:25:19.000Z
|
2019-01-15T11:16:40.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fsforms', '0046_fieldsightxf_is_survey'),
]
operations = [
migrations.AddField(
model_name='fieldsightxf',
name='from_project',
field=models.BooleanField(default=True),
),
]
| 20.9
| 52
| 0.619617
|
2a7a855408557230b50334db33f4e966ba447fa5
| 24
|
py
|
Python
|
mayan/apps/ocr/literals.py
|
wan1869/dushuhu
|
934dd178e67140cffc6b9203e793fdf8bbc73a54
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/ocr/literals.py
|
wan1869/dushuhu
|
934dd178e67140cffc6b9203e793fdf8bbc73a54
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/ocr/literals.py
|
wan1869/dushuhu
|
934dd178e67140cffc6b9203e793fdf8bbc73a54
|
[
"Apache-2.0"
] | 1
|
2021-04-30T09:44:14.000Z
|
2021-04-30T09:44:14.000Z
|
DO_OCR_RETRY_DELAY = 10
| 12
| 23
| 0.833333
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.