hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d7c4301e9525afa3df161c4bb149791dda0f63f5
| 7,781
|
py
|
Python
|
flask_app/helpers/preprocess_text.py
|
julien-bonnefoy/website
|
a00d70697cc3a367dcdb32ca62ed29493029cf91
|
[
"Apache-2.0"
] | null | null | null |
flask_app/helpers/preprocess_text.py
|
julien-bonnefoy/website
|
a00d70697cc3a367dcdb32ca62ed29493029cf91
|
[
"Apache-2.0"
] | null | null | null |
flask_app/helpers/preprocess_text.py
|
julien-bonnefoy/website
|
a00d70697cc3a367dcdb32ca62ed29493029cf91
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
from tqdm import trange
from IPython.display import display, HTML
from sklearn.feature_extraction.text import CountVectorizer
from tqdm.notebook import tqdm
def drop_na_values(df, subset=None):
n_rows_before = df.shape[0]
for column in subset:
if column is None:
print(f'no column specified...drop NA global')
df.dropna(inplace=True)
else:
print(f'dropping NA in “{column}“')
df.dropna(subset=[column], inplace=True)
print('\tInitial number of rows: {}'.format(n_rows_before))
n_rows_after = df.shape[0]
print('\tnumber of NA rows removed: {}'.format(n_rows_before - n_rows_after))
print('\tnumber of rows after drop NA : {}'.format(n_rows_after))
df.reset_index(drop=True, inplace=True)
print('\tindex reset\n')
return df
def drop_duplicate_values(df, subset=None, printme=False):
n_rows_before = df.shape[0]
if subset is None:
if printme:
print(f'no column specified...drop duplicates')
df.drop_duplicates(inplace=True)
else:
if printme:
print(f'dropping duplicates in “{subset}“')
df.drop_duplicates(subset=subset, inplace=True)
if printme:
print('\tInitial number of rows: {}'.format(n_rows_before))
n_rows_after = df.shape[0]
if printme:
print('\tnumber of duplicates NA rows removed: {}'.format(n_rows_before - n_rows_after))
print('\tnumber of rows after drop duplicates : {}'.format(n_rows_after))
df.reset_index(drop=True, inplace=True)
if printme:
print('\tindex reset\n')
return df
def detect_language(df, column):
import langid
df['detected_lang'] = pd.Series(dtype=str)
for i in trange(len(df)):
text = df[column][i]
detected_lang = langid.classify(text)[0]
df['detected_lang'][i] = detected_lang
return df[f'{column}']
def remove_not_description(df, column):
import re
exp_5 = re.compile("[Dd]emande de formation n°")
exp_5_list = []
for index, text in enumerate(df[column]):
if exp_5.search(text):
exp_5_list.append(index)
print(f'Number of rows "Demande de formation n°" : {len(exp_5_list)}')
df = df.iloc[[index for index in range(df.shape[0]) if index not in exp_5_list]]
df.reset_index(drop=True, inplace=True)
exp_6 = re.compile("[Rr][ée]gularisation\s+[cfC]")
exp_6_list = []
for index, text in enumerate(df[column]):
if exp_6.search(text):
exp_6_list.append(index)
print(f'Number of rows "Régularisation CPF" : {len(exp_6_list)}')
df = df.iloc[[index for index in range(df.shape[0]) if index not in exp_6_list]]
df.reset_index(drop=True, inplace=True)
return df
def remove_accents(text):
import re
# remove accents
text = re.sub(r'[àâ]', r'a', str(text))
text = re.sub(r'[ÀÂ]', r'A', str(text))
text = re.sub(r'[éèêë]', r'e', str(text))
text = re.sub(r'[ÉÈÊ]', r'E', str(text))
text = re.sub(r'[îï]', r'i', str(text))
text = re.sub(r'[Ï]', r'I', str(text))
text = re.sub(r'[ô]', r'o', str(text))
text = re.sub(r'[Ô]', r'O', str(text))
text = re.sub(r'[ûùü]', r'u', str(text))
text = re.sub(r'[œ]', r'oe', str(text))
text = re.sub(r'[ç]', r'c', str(text))
return text
def space_lower_upper(df, column):
import re
df[column] = [re.sub(r'([a-z])([A-Z])', r'\g<1> \g<2>', text) for text in df[column]]
return df
def space_symbol_letter(df, column):
import re
df[column] = [re.sub(r'([a-zA-Zύ0-9])([^a-zA-Z\s0-9])', r'\g<1> \g<2>', text) for text in df[column]]
df[column] = [re.sub(r'([^a-zA-Z\s0-9])([a-zA-Zύ0-9])', r'\g<1> \g<2>', text) for text in df[column]]
return df
def space_digit_letter(df, column):
import re
df[column] = [re.sub(r'([a-zA-Z])([0-9])', r'\g<1> \g<2>', text) for text in df[column]]
df[column] = [re.sub(r'([0-9])([a-zA-Z])', r'\g<1> \g<2>', text) for text in df[column]]
return df
def get_top_n_words(corpus, stopwords = [], n=15):
vec = CountVectorizer(lowercase=False, ngram_range=(1, 1), stop_words=stopwords).fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
return words_freq[:n]
def get_top_n2_words(corpus, stopwords = [], n=15):
vec = CountVectorizer(lowercase=False, ngram_range=(2, 2), stop_words=stopwords).fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
return words_freq[:n]
def get_stop_words(stop_file_path, print=False):
"""load stop words """
with open(stop_file_path, 'r', encoding="utf-8") as f:
stopwords = f.readlines()
stop_set = set(m.strip() for m in stopwords)
# return frozenset(stop_set)
if print:
display(HTML("<h5>LOADED</h5>"))
display(HTML(f'<p>number of stopwords : <span style="fontSize: 18px; font-weight: bolder; display: inline; line-height: 24px; backgroundColor: #ff7900; color: #fff;">{len(stop_set)}</span></p>'))
return set(stop_set)
def add_stopwords(STOPWORDS, ADDITIONAL_STOPWORDS):
'''
:param STOPWORDS: list of strings
:param ADDITIONAL_STOPWORDS: other list of strings
:return: a set of words
'''
for word in ADDITIONAL_STOPWORDS:
STOPWORDS.add(word)
STOPWORDS = list(STOPWORDS)
STOPWORDS.sort()
STOPWORDS = set(STOPWORDS)
return STOPWORDS
def sort_coo(coo_matrix):
tuples = zip(coo_matrix.col, coo_matrix.data)
return sorted(tuples, key=lambda x: (x[1], x[0]), reverse=True)
def extract_topn_from_vector(feature_names, sorted_items, topn=10):
"""get the feature names and tf-idf score of top n items"""
# use only topn items from vector
sorted_items = sorted_items[:topn]
score_vals = []
feature_vals = []
# word index and corresponding tf-idf score
for idx, score in sorted_items:
# keep track of feature name and its corresponding score
score_vals.append(round(score, 3))
feature_vals.append(feature_names[idx])
# create a tuples of feature,score
# results = zip(feature_vals,score_vals)
results = {}
for idx in range(len(feature_vals)):
results[feature_vals[idx]] = score_vals[idx]
return results
def format_topics_sentences(ldamodel, corpus, texts):
# Init output
sentences_topics = pd.DataFrame()
# Get main topic in each documentpickle
for i, row in tqdm(enumerate(ldamodel[corpus])):
row = sorted(row, key=lambda x: (x[1]), reverse=True)
# Get the Dominant topic, Perc Contribution and Keywords for each document
for j, (topic_num, prop_topic) in enumerate(row):
if j == 0: # => dominant topic
wp = ldamodel.show_topic(topic_num)
topic_keywords = ", ".join([word for word, prop in wp])
sentences_topics = sentences_topics.append(pd.Series([str(int(topic_num)), round(prop_topic, 4), topic_keywords]), ignore_index=True)
else:
break
sentences_topics.columns = ['dominant_topic_id', 'topic_contribution_in_document', 'topic_keywords']
# Add original text to the end of the output
contents = pd.Series(texts)
sentences_topics = pd.concat([sentences_topics, contents], axis=1)
return sentences_topics
| 32.693277
| 208
| 0.63488
|
f13d79ed30319f0877f9a5a62f67ce60849a359b
| 2,733
|
py
|
Python
|
test/acceptance/e2e/delete_node/features/terrain.py
|
jesuspg/fiware-sdc
|
1ceb1a4c2f9b0d9ec2c79155f960d5100a35f17d
|
[
"Apache-2.0"
] | null | null | null |
test/acceptance/e2e/delete_node/features/terrain.py
|
jesuspg/fiware-sdc
|
1ceb1a4c2f9b0d9ec2c79155f960d5100a35f17d
|
[
"Apache-2.0"
] | null | null | null |
test/acceptance/e2e/delete_node/features/terrain.py
|
jesuspg/fiware-sdc
|
1ceb1a4c2f9b0d9ec2c79155f960d5100a35f17d
|
[
"Apache-2.0"
] | 1
|
2018-03-05T23:30:48.000Z
|
2018-03-05T23:30:48.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
__author__ = 'jfernandez'
from lettuce import world, before, after
from commons.terrain_steps import setup_feature, setup_scenario, setup_outline, tear_down
from commons.provisioning_steps import ProvisioningSteps
from commons.rest_utils import RestUtils
from commons.fabric_utils import execute_chef_client_stop, execute_puppet_agent_stop, \
remove_chef_client_cert_file, remove_puppet_agent_cert_file, remove_all_generated_test_files, \
remove_puppet_agent_catalog
provisioning_steps = ProvisioningSteps()
rest_utils = RestUtils()
@before.each_feature
def before_each_feature(feature):
""" Hook: Will be executed before each feature. Configures global vars and gets token from keystone """
setup_feature(feature)
@before.each_scenario
def before_each_scenario(scenario):
""" Hook: Will be executed before each Scenario. Setup Scenario and initialize World vars """
setup_scenario(scenario)
world.agents_running = list()
world.list_of_installed_products = list()
@after.each_scenario
def after_each_scenario(scenario):
"""
Hook: Will be executed after each Scenario.
Removes Test data and cleans the system. Kills all agents running in the VM
"""
if world.node_name is not None:
execute_chef_client_stop()
execute_puppet_agent_stop()
remove_chef_client_cert_file()
remove_puppet_agent_cert_file()
remove_all_generated_test_files()
remove_puppet_agent_catalog()
rest_utils.delete_node(world.headers, world.tenant_id, world.node_name)
@before.outline
def before_outline(param1, param2, param3, param4):
""" Hook: Will be executed before each Scenario Outline. Same behaviour as 'before_each_scenario'"""
setup_outline(param1, param2, param3, param4)
@after.all
def after_all(scenario):
""" Hook: Will be executed after all Scenarios and Features. Removes Feature data and cleans the system """
tear_down(scenario)
| 35.493506
| 112
| 0.765459
|
6ed4132e5f12bdfc28f8e532278f3ee257f4c3ef
| 688
|
py
|
Python
|
Etap 3/Logia08/Zad4.py
|
aszokalski/Logia
|
5e29745b01623df8a2f162f143656a76056af407
|
[
"MIT"
] | null | null | null |
Etap 3/Logia08/Zad4.py
|
aszokalski/Logia
|
5e29745b01623df8a2f162f143656a76056af407
|
[
"MIT"
] | null | null | null |
Etap 3/Logia08/Zad4.py
|
aszokalski/Logia
|
5e29745b01623df8a2f162f143656a76056af407
|
[
"MIT"
] | null | null | null |
def anagramy(lista):
wynik = []
slista = set(lista)
processed = []
for slowo in lista:
if slowo in processed:
continue
slowa = an(slowo)
grupa = list(slowa & slista)
wynik.append(grupa)
processed += grupa
return wynik
def an(slowo):
war.clear()
wariacjabezpowt(slowo, len(slowo), "")
ret = war
return set(ret)
war = []
def wariacjabezpowt(zrodlo, n, wariacja):
if n == 0:
war.append(wariacja)
return
for litera in zrodlo:
if zrodlo.count(litera) > wariacja.count(litera):
wariacjabezpowt(zrodlo, n - 1, wariacja + litera)
return
| 20.235294
| 61
| 0.556686
|
8b8eb4b6e39df0b1e8ba8b7ef51c9d2dc7e5212c
| 22,556
|
py
|
Python
|
catalyst/pipeline/engine.py
|
nongnoobjung/catalyst
|
94583c2684ca5e43b3a21296789dd21378be66aa
|
[
"Apache-2.0"
] | 1
|
2018-01-25T23:49:06.000Z
|
2018-01-25T23:49:06.000Z
|
catalyst/pipeline/engine.py
|
nongnoobjung/catalyst
|
94583c2684ca5e43b3a21296789dd21378be66aa
|
[
"Apache-2.0"
] | null | null | null |
catalyst/pipeline/engine.py
|
nongnoobjung/catalyst
|
94583c2684ca5e43b3a21296789dd21378be66aa
|
[
"Apache-2.0"
] | null | null | null |
"""
Compute Engine definitions for the Pipeline API.
"""
from abc import (
ABCMeta,
abstractmethod,
)
from uuid import uuid4
from six import (
iteritems,
with_metaclass,
)
from numpy import array
from pandas import DataFrame, MultiIndex
from toolz import groupby, juxt
from toolz.curried.operator import getitem
from catalyst.lib.adjusted_array import ensure_adjusted_array, ensure_ndarray
from catalyst.errors import NoFurtherDataError
from catalyst.utils.numpy_utils import (
as_column,
repeat_first_axis,
repeat_last_axis,
)
from catalyst.utils.pandas_utils import explode
from .term import AssetExists, InputDates, LoadableTerm
from catalyst.utils.date_utils import compute_date_range_chunks
from catalyst.utils.pandas_utils import categorical_df_concat
from catalyst.utils.sharedoc import copydoc
class PipelineEngine(with_metaclass(ABCMeta)):
@abstractmethod
def run_pipeline(self, pipeline, start_date, end_date):
"""
Compute values for ``pipeline`` between ``start_date`` and
``end_date``.
Returns a DataFrame with a MultiIndex of (date, asset) pairs.
Parameters
----------
pipeline : catalyst.pipeline.Pipeline
The pipeline to run.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`catalyst.pipeline.term.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
"""
raise NotImplementedError("run_pipeline")
@abstractmethod
def run_chunked_pipeline(self, pipeline, start_date, end_date, chunksize):
"""
Compute values for `pipeline` in number of days equal to `chunksize`
and return stitched up result. Computing in chunks is useful for
pipelines computed over a long period of time.
Parameters
----------
pipeline : Pipeline
The pipeline to run.
start_date : pd.Timestamp
The start date to run the pipeline for.
end_date : pd.Timestamp
The end date to run the pipeline for.
chunksize : int or None
The number of days to execute at a time. If None, then
results will be calculated for entire date range at once.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`catalyst.pipeline.term.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
See Also
--------
:meth:`catalyst.pipeline.engine.PipelineEngine.run_pipeline`
"""
raise NotImplementedError("run_chunked_pipeline")
class NoEngineRegistered(Exception):
"""
Raised if a user tries to call pipeline_output in an algorithm that hasn't
set up a pipeline engine.
"""
class ExplodingPipelineEngine(PipelineEngine):
"""
A PipelineEngine that doesn't do anything.
"""
def run_pipeline(self, pipeline, start_date, end_date):
raise NoEngineRegistered(
"Attempted to run a pipeline but no pipeline "
"resources were registered."
)
def run_chunked_pipeline(self, pipeline, start_date, end_date, chunksize):
raise NoEngineRegistered(
"Attempted to run a chunked pipeline but no pipeline "
"resources were registered."
)
def default_populate_initial_workspace(initial_workspace,
root_mask_term,
execution_plan,
dates,
assets):
"""The default implementation for ``populate_initial_workspace``. This
function returns the ``initial_workspace`` argument without making any
modifications.
Parameters
----------
initial_workspace : dict[array-like]
The initial workspace before we have populated it with any cached
terms.
root_mask_term : Term
The root mask term, normally ``AssetExists()``. This is needed to
compute the dates for individual terms.
execution_plan : ExecutionPlan
The execution plan for the pipeline being run.
dates : pd.DatetimeIndex
All of the dates being requested in this pipeline run including
the extra dates for look back windows.
assets : pd.Int64Index
All of the assets that exist for the window being computed.
Returns
-------
populated_initial_workspace : dict[term, array-like]
The workspace to begin computations with.
"""
return initial_workspace
class SimplePipelineEngine(PipelineEngine):
"""
PipelineEngine class that computes each term independently.
Parameters
----------
get_loader : callable
A function that is given a loadable term and returns a PipelineLoader
to use to retrieve raw data for that term.
calendar : DatetimeIndex
Array of dates to consider as trading days when computing a range
between a fixed start and end.
asset_finder : catalyst.assets.AssetFinder
An AssetFinder instance. We depend on the AssetFinder to determine
which assets are in the top-level universe at any point in time.
populate_initial_workspace : callable, optional
A function which will be used to populate the initial workspace when
computing a pipeline. See
:func:`catalyst.pipeline.engine.default_populate_initial_workspace`
for more info.
See Also
--------
:func:`catalyst.pipeline.engine.default_populate_initial_workspace`
"""
__slots__ = (
'_get_loader',
'_calendar',
'_finder',
'_root_mask_term',
'_root_mask_dates_term',
'_populate_initial_workspace',
)
def __init__(self,
get_loader,
calendar,
asset_finder,
populate_initial_workspace=None):
self._get_loader = get_loader
self._calendar = calendar
self._finder = asset_finder
self._root_mask_term = AssetExists()
self._root_mask_dates_term = InputDates()
self._populate_initial_workspace = (
populate_initial_workspace or default_populate_initial_workspace
)
def run_pipeline(self, pipeline, start_date, end_date):
"""
Compute a pipeline.
The algorithm implemented here can be broken down into the following
stages:
0. Build a dependency graph of all terms in `pipeline`. Topologically
sort the graph to determine an order in which we can compute the
terms.
1. Ask our AssetFinder for a "lifetimes matrix", which should contain,
for each date between start_date and end_date, a boolean value for
each known asset indicating whether the asset existed on that date.
2. Compute each term in the dependency order determined in (0), caching
the results in a a dictionary to that they can be fed into future
terms.
3. For each date, determine the number of assets passing
pipeline.screen. The sum, N, of all these values is the total
number of rows in our output frame, so we pre-allocate an output
array of length N for each factor in `terms`.
4. Fill in the arrays allocated in (3) by copying computed values from
our output cache into the corresponding rows.
5. Stick the values computed in (4) into a DataFrame and return it.
Step 0 is performed by ``Pipeline.to_graph``.
Step 1 is performed in ``SimplePipelineEngine._compute_root_mask``.
Step 2 is performed in ``SimplePipelineEngine.compute_chunk``.
Steps 3, 4, and 5 are performed in ``SimplePiplineEngine._to_narrow``.
Parameters
----------
pipeline : catalyst.pipeline.Pipeline
The pipeline to run.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`catalyst.pipeline.term.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
See Also
--------
:meth:`catalyst.pipeline.engine.PipelineEngine.run_pipeline`
:meth:`catalyst.pipeline.engine.PipelineEngine.run_chunked_pipeline`
"""
if end_date < start_date:
raise ValueError(
"start_date must be before or equal to end_date \n"
"start_date=%s, end_date=%s" % (start_date, end_date)
)
screen_name = uuid4().hex
graph = pipeline.to_execution_plan(
screen_name,
self._root_mask_term,
self._calendar,
start_date,
end_date,
)
extra_rows = graph.extra_rows[self._root_mask_term]
root_mask = self._compute_root_mask(start_date, end_date, extra_rows)
dates, assets, root_mask_values = explode(root_mask)
initial_workspace = self._populate_initial_workspace(
{
self._root_mask_term: root_mask_values,
self._root_mask_dates_term: as_column(dates.values)
},
self._root_mask_term,
graph,
dates,
assets,
)
results = self.compute_chunk(
graph,
dates,
assets,
initial_workspace,
)
return self._to_narrow(
graph.outputs,
results,
results.pop(screen_name),
dates[extra_rows:],
assets,
)
@copydoc(PipelineEngine.run_chunked_pipeline)
def run_chunked_pipeline(self, pipeline, start_date, end_date, chunksize):
ranges = compute_date_range_chunks(
self._calendar,
start_date,
end_date,
chunksize,
)
chunks = [self.run_pipeline(pipeline, s, e) for s, e in ranges]
return categorical_df_concat(chunks, inplace=True)
def _compute_root_mask(self, start_date, end_date, extra_rows):
"""
Compute a lifetimes matrix from our AssetFinder, then drop columns that
didn't exist at all during the query dates.
Parameters
----------
start_date : pd.Timestamp
Base start date for the matrix.
end_date : pd.Timestamp
End date for the matrix.
extra_rows : int
Number of extra rows to compute before `start_date`.
Extra rows are needed by terms like moving averages that require a
trailing window of data.
Returns
-------
lifetimes : pd.DataFrame
Frame of dtype `bool` containing dates from `extra_rows` days
before `start_date`, continuing through to `end_date`. The
returned frame contains as columns all assets in our AssetFinder
that existed for at least one day between `start_date` and
`end_date`.
"""
calendar = self._calendar
finder = self._finder
start_idx, end_idx = self._calendar.slice_locs(start_date, end_date)
if start_idx < extra_rows:
raise NoFurtherDataError.from_lookback_window(
initial_message="Insufficient data to compute Pipeline:",
first_date=calendar[0],
lookback_start=start_date,
lookback_length=extra_rows,
)
# Build lifetimes matrix reaching back to `extra_rows` days before
# `start_date.`
lifetimes = finder.lifetimes(
calendar[start_idx - extra_rows:end_idx],
include_start_date=False
)
assert lifetimes.index[extra_rows] == start_date
assert lifetimes.index[-1] == end_date
if not lifetimes.columns.unique:
columns = lifetimes.columns
duplicated = columns[columns.duplicated()].unique()
raise AssertionError("Duplicated sids: %d" % duplicated)
# Filter out columns that didn't exist between the requested start and
# end dates.
existed = lifetimes.iloc[extra_rows:].any()
ret = lifetimes.loc[:, existed]
shape = ret.shape
assert shape[0] * shape[1] != 0, 'root mask cannot be empty'
return ret
@staticmethod
def _inputs_for_term(term, workspace, graph):
"""
Compute inputs for the given term.
This is mostly complicated by the fact that for each input we store as
many rows as will be necessary to serve **any** computation requiring
that input.
"""
offsets = graph.offset
out = []
if term.windowed:
# If term is windowed, then all input data should be instances of
# AdjustedArray.
for input_ in term.inputs:
adjusted_array = ensure_adjusted_array(
workspace[input_], input_.missing_value,
)
out.append(
adjusted_array.traverse(
window_length=term.window_length,
offset=offsets[term, input_],
)
)
else:
# If term is not windowed, input_data may be an AdjustedArray or
# np.ndarray. Coerce the former to the latter.
for input_ in term.inputs:
input_data = ensure_ndarray(workspace[input_])
offset = offsets[term, input_]
# OPTIMIZATION: Don't make a copy by doing input_data[0:] if
# offset is zero.
if offset:
input_data = input_data[offset:]
out.append(input_data)
return out
def get_loader(self, term):
return self._get_loader(term)
def compute_chunk(self, graph, dates, assets, initial_workspace):
"""
Compute the Pipeline terms in the graph for the requested start and end
dates.
Parameters
----------
graph : catalyst.pipeline.graph.TermGraph
dates : pd.DatetimeIndex
Row labels for our root mask.
assets : pd.Int64Index
Column labels for our root mask.
initial_workspace : dict
Map from term -> output.
Must contain at least entry for `self._root_mask_term` whose shape
is `(len(dates), len(assets))`, but may contain additional
pre-computed terms for testing or optimization purposes.
Returns
-------
results : dict
Dictionary mapping requested results to outputs.
"""
self._validate_compute_chunk_params(dates, assets, initial_workspace)
get_loader = self.get_loader
# Copy the supplied initial workspace so we don't mutate it in place.
workspace = initial_workspace.copy()
# If loadable terms share the same loader and extra_rows, load them all
# together.
loader_group_key = juxt(get_loader, getitem(graph.extra_rows))
loader_groups = groupby(loader_group_key, graph.loadable_terms)
refcounts = graph.initial_refcounts(workspace)
for term in graph.execution_order(refcounts):
# `term` may have been supplied in `initial_workspace`, and in the
# future we may pre-compute loadable terms coming from the same
# dataset. In either case, we will already have an entry for this
# term, which we shouldn't re-compute.
if term in workspace:
continue
# Asset labels are always the same, but date labels vary by how
# many extra rows are needed.
mask, mask_dates = graph.mask_and_dates_for_term(
term,
self._root_mask_term,
workspace,
dates,
)
if isinstance(term, LoadableTerm):
to_load = sorted(
loader_groups[loader_group_key(term)],
key=lambda t: t.dataset
)
loader = get_loader(term)
loaded = loader.load_adjusted_array(
to_load, mask_dates, assets, mask,
)
workspace.update(loaded)
else:
workspace[term] = term._compute(
self._inputs_for_term(term, workspace, graph),
mask_dates,
assets,
mask,
)
if term.ndim == 2:
assert workspace[term].shape == mask.shape
else:
assert workspace[term].shape == (mask.shape[0], 1)
# Decref dependencies of ``term``, and clear any terms whose
# refcounts hit 0.
for garbage_term in graph.decref_dependencies(term, refcounts):
del workspace[garbage_term]
out = {}
graph_extra_rows = graph.extra_rows
for name, term in iteritems(graph.outputs):
# Truncate off extra rows from outputs.
out[name] = workspace[term][graph_extra_rows[term]:]
return out
def _to_narrow(self, terms, data, mask, dates, assets):
"""
Convert raw computed pipeline results into a DataFrame for public APIs.
Parameters
----------
terms : dict[str -> Term]
Dict mapping column names to terms.
data : dict[str -> ndarray[ndim=2]]
Dict mapping column names to computed results for those names.
mask : ndarray[bool, ndim=2]
Mask array of values to keep.
dates : ndarray[datetime64, ndim=1]
Row index for arrays `data` and `mask`
assets : ndarray[int64, ndim=2]
Column index for arrays `data` and `mask`
Returns
-------
results : pd.DataFrame
The indices of `results` are as follows:
index : two-tiered MultiIndex of (date, asset).
Contains an entry for each (date, asset) pair corresponding to
a `True` value in `mask`.
columns : Index of str
One column per entry in `data`.
If mask[date, asset] is True, then result.loc[(date, asset), colname]
will contain the value of data[colname][date, asset].
"""
if not mask.any():
# Manually handle the empty DataFrame case. This is a workaround
# to pandas failing to tz_localize an empty dataframe with a
# MultiIndex. It also saves us the work of applying a known-empty
# mask to each array.
#
# Slicing `dates` here to preserve pandas metadata.
empty_dates = dates[:0]
empty_assets = array([], dtype=object)
return DataFrame(
data={
name: array([], dtype=arr.dtype)
for name, arr in iteritems(data)
},
index=MultiIndex.from_arrays([empty_dates, empty_assets]),
)
resolved_assets = array(self._finder.retrieve_all(assets))
dates_kept = repeat_last_axis(dates.values, len(assets))[mask]
assets_kept = repeat_first_axis(resolved_assets, len(dates))[mask]
final_columns = {}
for name in data:
# Each term that computed an output has its postprocess method
# called on the filtered result.
#
# As of Mon May 2 15:38:47 2016, we only use this to convert
# LabelArrays into categoricals.
final_columns[name] = terms[name].postprocess(data[name][mask])
return DataFrame(
data=final_columns,
index=MultiIndex.from_arrays([dates_kept, assets_kept]),
).tz_localize('UTC', level=0)
def _validate_compute_chunk_params(self, dates, assets, initial_workspace):
"""
Verify that the values passed to compute_chunk are well-formed.
"""
root = self._root_mask_term
clsname = type(self).__name__
# Writing this out explicitly so this errors in testing if we change
# the name without updating this line.
compute_chunk_name = self.compute_chunk.__name__
if root not in initial_workspace:
raise AssertionError(
"root_mask values not supplied to {cls}.{method}".format(
cls=clsname,
method=compute_chunk_name,
)
)
shape = initial_workspace[root].shape
implied_shape = len(dates), len(assets)
if shape != implied_shape:
raise AssertionError(
"root_mask shape is {shape}, but received dates/assets "
"imply that shape should be {implied}".format(
shape=shape,
implied=implied_shape,
)
)
| 36.736156
| 79
| 0.600949
|
da07e1c62cf90ec6316c5e7dc07ebe2e08a980e6
| 33,766
|
py
|
Python
|
venv/Lib/site-packages/PyInstaller/building/build_main.py
|
Gabs-Leo/Kay-O.Time.Limiter
|
5cdc6ebd0a5117478d3bc5d3883098b4a532d671
|
[
"MIT"
] | 6
|
2021-07-14T03:23:17.000Z
|
2021-08-07T05:07:21.000Z
|
PyInstaller/building/build_main.py
|
416426/pyinstaller
|
0f2b2e921433ab5a510c7efdb21d9c1d7cfbc645
|
[
"Apache-2.0"
] | null | null | null |
PyInstaller/building/build_main.py
|
416426/pyinstaller
|
0f2b2e921433ab5a510c7efdb21d9c1d7cfbc645
|
[
"Apache-2.0"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2021, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
Build packages using spec files.
NOTE: All global variables, classes and imported modules create API
for .spec files.
"""
import glob
import os
import pprint
import shutil
import sys
import pkg_resources
# Relative imports to PyInstaller modules.
from PyInstaller import HOMEPATH, DEFAULT_DISTPATH, DEFAULT_WORKPATH
from PyInstaller import compat
from PyInstaller import log as logging
from PyInstaller.utils.misc import absnormpath, compile_py_files
from PyInstaller.compat import is_win, PYDYLIB_NAMES
from PyInstaller.depend import bindepend
from PyInstaller.depend.analysis import initialize_modgraph
from PyInstaller.building.api import PYZ, EXE, COLLECT, MERGE
from PyInstaller.building.datastruct import TOC, Target, Tree, _check_guts_eq
from PyInstaller.building.splash import Splash
from PyInstaller.building.osx import BUNDLE
from PyInstaller.building.toc_conversion import DependencyProcessor
from PyInstaller.building.utils import \
_check_guts_toc_mtime, format_binaries_and_datas
from PyInstaller.depend.utils import \
create_py3_base_library, scan_code_for_ctypes
from PyInstaller.archive import pyz_crypto
from PyInstaller.utils.misc import \
get_path_to_toplevel_modules, get_unicode_modules, mtime
if is_win:
from PyInstaller.utils.win32 import winmanifest
logger = logging.getLogger(__name__)
STRINGTYPE = type('')
TUPLETYPE = type((None,))
rthooks = {}
# place where the loader modules and initialization scripts live
_init_code_path = os.path.join(HOMEPATH, 'PyInstaller', 'loader')
IMPORT_TYPES = ['top-level', 'conditional', 'delayed', 'delayed, conditional',
'optional', 'conditional, optional', 'delayed, optional',
'delayed, conditional, optional']
WARNFILE_HEADER = """\
This file lists modules PyInstaller was not able to find. This does not
necessarily mean this module is required for running you program. Python and
Python 3rd-party packages include a lot of conditional or optional modules. For
example the module 'ntpath' only exists on Windows, whereas the module
'posixpath' only exists on Posix systems.
Types if import:
* top-level: imported at the top-level - look at these first
* conditional: imported within an if-statement
* delayed: imported from within a function
* optional: imported within a try-except-statement
IMPORTANT: Do NOT post this list to the issue-tracker. Use it as a basis for
yourself tracking down the missing module. Thanks!
"""
# TODO find better place for function.
def setupUPXFlags():
f = compat.getenv("UPX", "")
if is_win:
# Binaries built with Visual Studio 7.1 require --strip-loadconf
# or they won't compress. Configure.py makes sure that UPX is new
# enough to support --strip-loadconf.
f = "--strip-loadconf " + f
# Do not compress any icon, so that additional icons in the executable
# can still be externally bound
f = "--compress-icons=0 " + f
f = "--best " + f
compat.setenv("UPX", f)
class Analysis(Target):
"""
Class does analysis of the user's main Python scripts.
An Analysis has five outputs, all TOCs (Table of Contents) accessed as
attributes of the analysis.
scripts
The scripts you gave Analysis as input, with any runtime hook scripts
prepended.
pure
The pure Python modules.
binaries
The extensionmodules and their dependencies. The secondary dependecies
are filtered. On Windows files from C:\\Windows are excluded by default.
On Linux/Unix only system libraries from /lib or /usr/lib are excluded.
datas
Data-file dependencies. These are data-file that are found to be needed
by modules. They can be anything: plugins, font files, images, translations,
etc.
zipfiles
The zipfiles dependencies (usually .egg files).
"""
_old_scripts = {
absnormpath(os.path.join(HOMEPATH, "support", "_mountzlib.py")),
absnormpath(os.path.join(HOMEPATH, "support", "useUnicode.py")),
absnormpath(os.path.join(HOMEPATH, "support", "useTK.py")),
absnormpath(os.path.join(HOMEPATH, "support", "unpackTK.py")),
absnormpath(os.path.join(HOMEPATH, "support", "removeTK.py"))
}
def __init__(self, scripts, pathex=None, binaries=None, datas=None,
hiddenimports=None, hookspath=None, hooksconfig=None,
excludes=None, runtime_hooks=None, cipher=None,
win_no_prefer_redirects=False, win_private_assemblies=False,
noarchive=False):
"""
scripts
A list of scripts specified as file names.
pathex
An optional list of paths to be searched before sys.path.
binaries
An optional list of additional binaries (dlls, etc.) to include.
datas
An optional list of additional data files to include.
hiddenimport
An optional list of additional (hidden) modules to include.
hookspath
An optional list of additional paths to search for hooks.
(hook-modules).
hooksconfig
An optional dict of config settings for hooks.
(hook-modules).
excludes
An optional list of module or package names (their Python names,
not path names) that will be ignored (as though they were not found).
runtime_hooks
An optional list of scripts to use as users' runtime hooks. Specified
as file names.
cipher
Add optional instance of the pyz_crypto.PyiBlockCipher class
(with a provided key).
win_no_prefer_redirects
If True, prefers not to follow version redirects when searching for
Windows SxS Assemblies.
win_private_assemblies
If True, changes all bundled Windows SxS Assemblies into Private
Assemblies to enforce assembly versions.
noarchive
If True, don't place source files in a archive, but keep them as
individual files.
"""
super(Analysis, self).__init__()
from PyInstaller.config import CONF
self.inputs = []
spec_dir = os.path.dirname(CONF['spec'])
for script in scripts:
# If path is relative, it is relative to the location of .spec file.
if not os.path.isabs(script):
script = os.path.join(spec_dir, script)
if absnormpath(script) in self._old_scripts:
logger.warning('Ignoring obsolete auto-added script %s', script)
continue
# Normalize script path.
script = os.path.normpath(script)
if not os.path.exists(script):
raise SystemExit("script '%s' not found" % script)
self.inputs.append(script)
# Django hook requires this variable to find the script manage.py.
CONF['main_script'] = self.inputs[0]
self.pathex = self._extend_pathex(pathex, self.inputs)
# Set global config variable 'pathex' to make it available for
# PyInstaller.utils.hooks and import hooks. Path extensions for module
# search.
CONF['pathex'] = self.pathex
# Extend sys.path so PyInstaller could find all necessary modules.
logger.info('Extending PYTHONPATH with paths\n' + pprint.pformat(self.pathex))
sys.path.extend(self.pathex)
# Set global variable to hold assembly binding redirects
CONF['binding_redirects'] = []
self.hiddenimports = hiddenimports or []
# Include modules detected when parsing options, like 'codecs' and encodings.
self.hiddenimports.extend(CONF['hiddenimports'])
self.hookspath = []
# Append directories in `hookspath` (`--additional-hooks-dir`) to
# take precedence over those from the entry points.
if hookspath:
self.hookspath.extend(hookspath)
# Add hook directories from PyInstaller entry points.
for entry_point in pkg_resources.iter_entry_points(
'pyinstaller40', 'hook-dirs'):
self.hookspath += list(entry_point.load()())
self.hooksconfig = {}
if hooksconfig:
self.hooksconfig.update(hooksconfig)
# Custom runtime hook files that should be included and started before
# any existing PyInstaller runtime hooks.
self.custom_runtime_hooks = runtime_hooks or []
if cipher:
logger.info('Will encrypt Python bytecode with key: %s', cipher.key)
# Create a Python module which contains the decryption key which will
# be used at runtime by pyi_crypto.PyiBlockCipher.
pyi_crypto_key_path = os.path.join(CONF['workpath'], 'pyimod00_crypto_key.py')
with open(pyi_crypto_key_path, 'w', encoding='utf-8') as f:
f.write('# -*- coding: utf-8 -*-\n'
'key = %r\n' % cipher.key)
self.hiddenimports.append('tinyaes')
self.excludes = excludes or []
self.scripts = TOC()
self.pure = TOC()
self.binaries = TOC()
self.zipfiles = TOC()
self.zipped_data = TOC()
self.datas = TOC()
self.dependencies = TOC()
self.binding_redirects = CONF['binding_redirects'] = []
self.win_no_prefer_redirects = win_no_prefer_redirects
self.win_private_assemblies = win_private_assemblies
self._python_version = sys.version
self.noarchive = noarchive
self.__postinit__()
# TODO create function to convert datas/binaries from 'hook format' to TOC.
# Initialise 'binaries' and 'datas' with lists specified in .spec file.
if binaries:
logger.info("Appending 'binaries' from .spec")
for name, pth in format_binaries_and_datas(binaries, workingdir=spec_dir):
self.binaries.append((name, pth, 'BINARY'))
if datas:
logger.info("Appending 'datas' from .spec")
for name, pth in format_binaries_and_datas(datas, workingdir=spec_dir):
self.datas.append((name, pth, 'DATA'))
_GUTS = (# input parameters
('inputs', _check_guts_eq), # parameter `scripts`
('pathex', _check_guts_eq),
('hiddenimports', _check_guts_eq),
('hookspath', _check_guts_eq),
('hooksconfig', _check_guts_eq),
('excludes', _check_guts_eq),
('custom_runtime_hooks', _check_guts_eq),
('win_no_prefer_redirects', _check_guts_eq),
('win_private_assemblies', _check_guts_eq),
('noarchive', _check_guts_eq),
#'cipher': no need to check as it is implied by an
# additional hidden import
#calculated/analysed values
('_python_version', _check_guts_eq),
('scripts', _check_guts_toc_mtime),
('pure', lambda *args: _check_guts_toc_mtime(*args, **{'pyc': 1})),
('binaries', _check_guts_toc_mtime),
('zipfiles', _check_guts_toc_mtime),
('zipped_data', None), # TODO check this, too
('datas', _check_guts_toc_mtime),
# TODO: Need to add "dependencies"?
# cached binding redirects - loaded into CONF for PYZ/COLLECT to find.
('binding_redirects', None),
)
def _extend_pathex(self, spec_pathex, scripts):
"""
Normalize additional paths where PyInstaller will look for modules and
add paths with scripts to the list of paths.
:param spec_pathex: Additional paths defined defined in .spec file.
:param scripts: Scripts to create executable from.
:return: list of updated paths
"""
# Based on main supplied script - add top-level modules directory to PYTHONPATH.
# Sometimes the main app script is not top-level module but submodule like 'mymodule.mainscript.py'.
# In that case PyInstaller will not be able find modules in the directory containing 'mymodule'.
# Add this directory to PYTHONPATH so PyInstaller could find it.
pathex = []
# Add scripts paths first.
for script in scripts:
logger.debug('script: %s' % script)
script_toplevel_dir = get_path_to_toplevel_modules(script)
if script_toplevel_dir:
pathex.append(script_toplevel_dir)
# Append paths from .spec.
if spec_pathex is not None:
pathex.extend(spec_pathex)
# Normalize paths in pathex and make them absolute.
return [absnormpath(p) for p in pathex]
def _check_guts(self, data, last_build):
if Target._check_guts(self, data, last_build):
return True
for fnm in self.inputs:
if mtime(fnm) > last_build:
logger.info("Building because %s changed", fnm)
return True
# Now we know that none of the input parameters and none of
# the input files has changed. So take the values calculated
# resp. analysed in the last run and store them in `self`.
self.scripts = TOC(data['scripts'])
self.pure = TOC(data['pure'])
self.binaries = TOC(data['binaries'])
self.zipfiles = TOC(data['zipfiles'])
self.zipped_data = TOC(data['zipped_data'])
self.datas = TOC(data['datas'])
# Store previously found binding redirects in CONF for later use by PKG/COLLECT
from PyInstaller.config import CONF
self.binding_redirects = CONF['binding_redirects'] = data['binding_redirects']
return False
def assemble(self):
"""
This method is the MAIN method for finding all necessary files to be bundled.
"""
from PyInstaller.config import CONF
for m in self.excludes:
logger.debug("Excluding module '%s'" % m)
self.graph = initialize_modgraph(
excludes=self.excludes, user_hook_dirs=self.hookspath)
# TODO Find a better place where to put 'base_library.zip' and when to created it.
# For Python 3 it is necessary to create file 'base_library.zip'
# containing core Python modules. In Python 3 some built-in modules
# are written in pure Python. base_library.zip is a way how to have
# those modules as "built-in".
libzip_filename = os.path.join(CONF['workpath'], 'base_library.zip')
create_py3_base_library(libzip_filename, graph=self.graph)
# Bundle base_library.zip as data file.
# Data format of TOC item: ('relative_path_in_dist_dir', 'absolute_path_on_disk', 'DATA')
self.datas.append((os.path.basename(libzip_filename), libzip_filename, 'DATA'))
# Expand sys.path of module graph.
# The attribute is the set of paths to use for imports: sys.path,
# plus our loader, plus other paths from e.g. --path option).
self.graph.path = self.pathex + self.graph.path
self.graph.set_setuptools_nspackages()
logger.info("running Analysis %s", self.tocbasename)
# Get paths to Python and, in Windows, the manifest.
python = compat.python_executable
if not is_win:
# Linux/MacOS: get a real, non-link path to the running Python executable.
while os.path.islink(python):
python = os.path.join(os.path.dirname(python), os.readlink(python))
depmanifest = None
else:
# Windows: Create a manifest to embed into built .exe, containing the same
# dependencies as python.exe.
depmanifest = winmanifest.Manifest(type_="win32", name=CONF['specnm'],
processorArchitecture=winmanifest.processor_architecture(),
version=(1, 0, 0, 0))
depmanifest.filename = os.path.join(CONF['workpath'],
CONF['specnm'] + ".exe.manifest")
# We record "binaries" separately from the modulegraph, as there
# is no way to record those dependencies in the graph. These include
# the python executable and any binaries added by hooks later.
# "binaries" are not the same as "extensions" which are .so or .dylib
# that are found and recorded as extension nodes in the graph.
# Reset seen variable before running bindepend. We use bindepend only for
# the python executable.
bindepend.seen.clear()
# Add binary and assembly dependencies of Python.exe.
# This also ensures that its assembly depencies under Windows get added to the
# built .exe's manifest. Python 2.7 extension modules have no assembly
# dependencies, and rely on the app-global dependencies set by the .exe.
self.binaries.extend(bindepend.Dependencies([('', python, '')],
manifest=depmanifest,
redirects=self.binding_redirects)[1:])
if is_win:
depmanifest.writeprettyxml()
### Module graph.
#
# Construct the module graph of import relationships between modules
# required by this user's application. For each entry point (top-level
# user-defined Python script), all imports originating from this entry
# point are recursively parsed into a subgraph of the module graph. This
# subgraph is then connected to this graph's root node, ensuring
# imported module nodes will be reachable from the root node -- which is
# is (arbitrarily) chosen to be the first entry point's node.
# List to hold graph nodes of scripts and runtime hooks in use order.
priority_scripts = []
# Assume that if the script does not exist, Modulegraph will raise error.
# Save the graph nodes of each in sequence.
for script in self.inputs:
logger.info("Analyzing %s", script)
priority_scripts.append(self.graph.add_script(script))
# Analyze the script's hidden imports (named on the command line)
self.graph.add_hiddenimports(self.hiddenimports)
### Post-graph hooks.
self.graph.process_post_graph_hooks(self)
# Update 'binaries' TOC and 'datas' TOC.
deps_proc = DependencyProcessor(self.graph,
self.graph._additional_files_cache)
self.binaries.extend(deps_proc.make_binaries_toc())
self.datas.extend(deps_proc.make_datas_toc())
self.zipped_data.extend(deps_proc.make_zipped_data_toc())
# Note: zipped eggs are collected below
### Look for dlls that are imported by Python 'ctypes' module.
# First get code objects of all modules that import 'ctypes'.
logger.info('Looking for ctypes DLLs')
# dict like: {'module1': code_obj, 'module2': code_obj}
ctypes_code_objs = self.graph.get_code_using("ctypes")
for name, co in ctypes_code_objs.items():
# Get dlls that might be needed by ctypes.
logger.debug('Scanning %s for shared libraries or dlls', name)
ctypes_binaries = scan_code_for_ctypes(co)
self.binaries.extend(set(ctypes_binaries))
self.datas.extend(
(dest, source, "DATA") for (dest, source) in
format_binaries_and_datas(self.graph.metadata_required())
)
# Analyze run-time hooks.
# Run-time hooks has to be executed before user scripts. Add them
# to the beginning of 'priority_scripts'.
priority_scripts = self.graph.analyze_runtime_hooks(self.custom_runtime_hooks) + priority_scripts
# 'priority_scripts' is now a list of the graph nodes of custom runtime
# hooks, then regular runtime hooks, then the PyI loader scripts.
# Further on, we will make sure they end up at the front of self.scripts
### Extract the nodes of the graph as TOCs for further processing.
# Initialize the scripts list with priority scripts in the proper order.
self.scripts = self.graph.nodes_to_toc(priority_scripts)
# Extend the binaries list with all the Extensions modulegraph has found.
self.binaries = self.graph.make_binaries_toc(self.binaries)
# Fill the "pure" list with pure Python modules.
assert len(self.pure) == 0
self.pure = self.graph.make_pure_toc()
# And get references to module code objects constructed by ModuleGraph
# to avoid writing .pyc/pyo files to hdd.
self.pure._code_cache = self.graph.get_code_objects()
# Add remaining binary dependencies - analyze Python C-extensions and what
# DLLs they depend on.
logger.info('Looking for dynamic libraries')
self.binaries.extend(bindepend.Dependencies(self.binaries,
redirects=self.binding_redirects))
### Include zipped Python eggs.
logger.info('Looking for eggs')
self.zipfiles.extend(deps_proc.make_zipfiles_toc())
# Verify that Python dynamic library can be found.
# Without dynamic Python library PyInstaller cannot continue.
self._check_python_library(self.binaries)
if is_win:
# Remove duplicate redirects
self.binding_redirects[:] = list(set(self.binding_redirects))
logger.info("Found binding redirects: \n%s", self.binding_redirects)
# Filter binaries to adjust path of extensions that come from
# python's lib-dynload directory. Prefix them with lib-dynload
# so that we'll collect them into subdirectory instead of
# directly into _MEIPASS
for idx, tpl in enumerate(self.binaries):
name, path, typecode = tpl
if typecode == 'EXTENSION' \
and not os.path.dirname(os.path.normpath(name)) \
and os.path.basename(os.path.dirname(path)) == 'lib-dynload':
name = os.path.join('lib-dynload', name)
self.binaries[idx] = (name, path, typecode)
# Place Python source in data files for the noarchive case.
if self.noarchive:
# Create a new TOC of ``(dest path for .pyc, source for .py, type)``.
new_toc = TOC()
for name, path, typecode in self.pure:
assert typecode == 'PYMODULE'
# Transform a python module name into a file name.
name = name.replace('.', os.sep)
# Special case: modules have an implied filename to add.
if os.path.splitext(os.path.basename(path))[0] == '__init__':
name += os.sep + '__init__'
# Append the extension for the compiled result.
# In python 3.5 (PEP-488) .pyo files were replaced by
# .opt-1.pyc and .opt-2.pyc. However, it seems that for
# bytecode-only module distribution, we always need to
# use the .pyc extension.
name += '.pyc'
new_toc.append((name, path, typecode))
# Put the result of byte-compiling this TOC in datas. Mark all entries as data.
for name, path, typecode in compile_py_files(new_toc, CONF['workpath']):
self.datas.append((name, path, 'DATA'))
# Store no source in the archive.
self.pure = TOC()
# Write warnings about missing modules.
self._write_warnings()
# Write debug information about hte graph
self._write_graph_debug()
def _write_warnings(self):
"""
Write warnings about missing modules. Get them from the graph
and use the graph to figure out who tried to import them.
"""
def dependency_description(name, depInfo):
if not depInfo or depInfo == 'direct':
imptype = 0
else:
imptype = (depInfo.conditional
+ 2 * depInfo.function
+ 4 * depInfo.tryexcept)
return '%s (%s)' % (name, IMPORT_TYPES[imptype])
from PyInstaller.config import CONF
miss_toc = self.graph.make_missing_toc()
with open(CONF['warnfile'], 'w', encoding='utf-8') as wf:
wf.write(WARNFILE_HEADER)
for (n, p, status) in miss_toc:
importers = self.graph.get_importers(n)
print(status, 'module named', n, '- imported by',
', '.join(dependency_description(name, data)
for name, data in importers),
file=wf)
logger.info("Warnings written to %s", CONF['warnfile'])
def _write_graph_debug(self):
"""Write a xref (in html) and with `--log-level DEBUG` a dot-drawing
of the graph.
"""
from PyInstaller.config import CONF
with open(CONF['xref-file'], 'w', encoding='utf-8') as fh:
self.graph.create_xref(fh)
logger.info("Graph cross-reference written to %s", CONF['xref-file'])
if logger.getEffectiveLevel() > logging.DEBUG:
return
# The `DOT language's <https://www.graphviz.org/doc/info/lang.html>`_
# default character encoding (see the end of the linked page) is UTF-8.
with open(CONF['dot-file'], 'w', encoding='utf-8') as fh:
self.graph.graphreport(fh)
logger.info("Graph drawing written to %s", CONF['dot-file'])
def _check_python_library(self, binaries):
"""
Verify presence of the Python dynamic library in the binary dependencies.
Python library is an essential piece that has to be always included.
"""
# First check that libpython is in resolved binary dependencies.
for (nm, filename, typ) in binaries:
if typ == 'BINARY' and nm in PYDYLIB_NAMES:
# Just print its filename and return.
logger.info('Using Python library %s', filename)
# Checking was successful - end of function.
return
# Python lib not in dependencies - try to find it.
logger.info('Python library not in binary dependencies. Doing additional searching...')
python_lib = bindepend.get_python_library_path()
logger.debug('Adding Python library to binary dependencies')
binaries.append((os.path.basename(python_lib), python_lib, 'BINARY'))
logger.info('Using Python library %s', python_lib)
class ExecutableBuilder(object):
"""
Class that constructs the executable.
"""
# TODO wrap the 'main' and 'build' function into this class.
def build(spec, distpath, workpath, clean_build):
"""
Build the executable according to the created SPEC file.
"""
from PyInstaller.config import CONF
# Ensure starting tilde and environment variables get expanded in distpath / workpath.
# '~/path/abc', '${env_var_name}/path/abc/def'
distpath = compat.expand_path(distpath)
workpath = compat.expand_path(workpath)
CONF['spec'] = compat.expand_path(spec)
CONF['specpath'], CONF['specnm'] = os.path.split(spec)
CONF['specnm'] = os.path.splitext(CONF['specnm'])[0]
# Add 'specname' to workpath and distpath if they point to PyInstaller homepath.
if os.path.dirname(distpath) == HOMEPATH:
distpath = os.path.join(HOMEPATH, CONF['specnm'], os.path.basename(distpath))
CONF['distpath'] = distpath
if os.path.dirname(workpath) == HOMEPATH:
workpath = os.path.join(HOMEPATH, CONF['specnm'], os.path.basename(workpath), CONF['specnm'])
else:
workpath = os.path.join(workpath, CONF['specnm'])
CONF['warnfile'] = os.path.join(workpath, 'warn-%s.txt' % CONF['specnm'])
CONF['dot-file'] = os.path.join(workpath, 'graph-%s.dot' % CONF['specnm'])
CONF['xref-file'] = os.path.join(workpath, 'xref-%s.html' % CONF['specnm'])
# Clean PyInstaller cache (CONF['cachedir']) and temporary files (workpath)
# to be able start a clean build.
if clean_build:
logger.info('Removing temporary files and cleaning cache in %s', CONF['cachedir'])
for pth in (CONF['cachedir'], workpath):
if os.path.exists(pth):
# Remove all files in 'pth'.
for f in glob.glob(pth + '/*'):
# Remove dirs recursively.
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f)
# Create DISTPATH and workpath if they does not exist.
for pth in (CONF['distpath'], workpath):
if not os.path.exists(pth):
os.makedirs(pth)
# Construct NAMESPACE for running the Python code from .SPEC file.
# NOTE: Passing NAMESPACE allows to avoid having global variables in this
# module and makes isolated environment for running tests.
# NOTE: Defining NAMESPACE allows to map any class to a apecific name for .SPEC.
# FIXME: Some symbols might be missing. Add them if there are some failures.
# TODO: What from this .spec API is deprecated and could be removed?
spec_namespace = {
# Set of global variables that can be used while processing .spec file.
# Some of them act as configuration options.
'DISTPATH': CONF['distpath'],
'HOMEPATH': HOMEPATH,
'SPEC': CONF['spec'],
'specnm': CONF['specnm'],
'SPECPATH': CONF['specpath'],
'WARNFILE': CONF['warnfile'],
'workpath': workpath,
# PyInstaller classes for .spec.
'TOC': TOC,
'Analysis': Analysis,
'BUNDLE': BUNDLE,
'COLLECT': COLLECT,
'EXE': EXE,
'MERGE': MERGE,
'PYZ': PYZ,
'Tree': Tree,
'Splash': Splash,
# Python modules available for .spec.
'os': os,
'pyi_crypto': pyz_crypto,
}
# Set up module PyInstaller.config for passing some arguments to 'exec'
# function.
from PyInstaller.config import CONF
CONF['workpath'] = workpath
# Execute the specfile. Read it as a binary file...
try:
with open(spec, 'rb') as f:
# ... then let Python determine the encoding, since ``compile`` accepts
# byte strings.
code = compile(f.read(), spec, 'exec')
except FileNotFoundError as e:
raise SystemExit('spec "{}" not found'.format(spec))
exec(code, spec_namespace)
def __add_options(parser):
parser.add_argument("--distpath", metavar="DIR",
default=DEFAULT_DISTPATH,
help=('Where to put the bundled app (default: %s)' %
os.path.join(os.curdir, 'dist')))
parser.add_argument('--workpath', default=DEFAULT_WORKPATH,
help=('Where to put all the temporary work files, '
'.log, .pyz and etc. (default: %s)' %
os.path.join(os.curdir, 'build')))
parser.add_argument('-y', '--noconfirm',
action="store_true", default=False,
help='Replace output directory (default: %s) without '
'asking for confirmation' % os.path.join('SPECPATH', 'dist', 'SPECNAME'))
parser.add_argument('--upx-dir', default=None,
help='Path to UPX utility (default: search the execution path)')
parser.add_argument("-a", "--ascii", action="store_true",
help="Do not include unicode encoding support "
"(default: included if available)")
parser.add_argument('--clean', dest='clean_build', action='store_true',
default=False,
help='Clean PyInstaller cache and remove temporary '
'files before building.')
def main(pyi_config, specfile, noconfirm, ascii=False, **kw):
from PyInstaller.config import CONF
CONF['noconfirm'] = noconfirm
# Some modules are included if they are detected at build-time or
# if a command-line argument is specified. (e.g. --ascii)
if CONF.get('hiddenimports') is None:
CONF['hiddenimports'] = []
# Test unicode support.
if not ascii:
CONF['hiddenimports'].extend(get_unicode_modules())
# FIXME: this should be a global import, but can't due to recursive imports
# If configuration dict is supplied - skip configuration step.
if pyi_config is None:
import PyInstaller.configure as configure
CONF.update(configure.get_config(kw.get('upx_dir')))
else:
CONF.update(pyi_config)
if CONF['hasUPX']:
setupUPXFlags()
CONF['ui_admin'] = kw.get('ui_admin', False)
CONF['ui_access'] = kw.get('ui_uiaccess', False)
build(specfile, kw.get('distpath'), kw.get('workpath'), kw.get('clean_build'))
| 44.487484
| 108
| 0.624326
|
2ce1c9641cc200b42b3792c5786ab1b371cd19ee
| 6,492
|
py
|
Python
|
plaso/output/tln.py
|
Defense-Cyber-Crime-Center/plaso
|
4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47
|
[
"Apache-2.0"
] | 2
|
2016-02-18T12:46:29.000Z
|
2022-03-13T03:04:59.000Z
|
plaso/output/tln.py
|
CNR-ITTIG/plasodfaxp
|
923797fc00664fa9e3277781b0334d6eed5664fd
|
[
"Apache-2.0"
] | null | null | null |
plaso/output/tln.py
|
CNR-ITTIG/plasodfaxp
|
923797fc00664fa9e3277781b0334d6eed5664fd
|
[
"Apache-2.0"
] | 6
|
2016-12-18T08:05:36.000Z
|
2021-04-06T14:19:11.000Z
|
# -*- coding: utf-8 -*-
"""Output module for the TLN format.
For documentation on the TLN format see: http://forensicswiki.org/wiki/TLN
"""
from plaso.lib import errors
from plaso.lib import timelib
from plaso.output import interface
from plaso.output import manager
class TLNBaseOutputModule(interface.LinearOutputModule):
"""Base class for a TLN output module."""
# Stop pylint from complaining about missing WriteEventBody.
# pylint: disable=abstract-method
_FIELD_DELIMITER = u'|'
_DESCRIPTION_FIELD_DELIMITER = u';'
_HEADER = u''
def _FormatDescription(self, event_object):
"""Formats the description.
Args:
event_object: the event object (instance of EventObject).
Returns:
A string containing the value for the description field.
"""
date_time_string = timelib.Timestamp.CopyToIsoFormat(
event_object.timestamp, timezone=self._output_mediator.timezone)
timestamp_description = getattr(event_object, u'timestamp_desc', u'UNKNOWN')
message, _ = self._output_mediator.GetFormattedMessages(event_object)
if message is None:
raise errors.NoFormatterFound(
u'Unable to find event formatter for: {0:s}.'.format(
getattr(event_object, u'data_type', u'UNKNOWN')))
description = u'{0:s}; {1:s}; {2:s}'.format(
date_time_string, timestamp_description,
message.replace(self._DESCRIPTION_FIELD_DELIMITER, u' '))
return self._SanitizeField(description)
def _FormatHostname(self, event_object):
"""Formats the hostname.
Args:
event_object: the event object (instance of EventObject).
Returns:
A string containing the value for the hostname field.
"""
hostname = self._output_mediator.GetHostname(event_object)
return self._SanitizeField(hostname)
def _FormatSource(self, event_object):
"""Formats the source.
Args:
event_object: the event object (instance of EventObject).
Returns:
A string containing the value for the source field.
"""
source_short, _ = self._output_mediator.GetFormattedSources(event_object)
if source_short is None:
raise errors.NoFormatterFound(
u'Unable to find event formatter for: {0:s}.'.format(
getattr(event_object, u'data_type', u'UNKNOWN')))
return self._SanitizeField(source_short)
def _FormatUsername(self, event_object):
"""Formats the username.
Args:
event_object: the event object (instance of EventObject).
Returns:
A string containing the value for the username field.
"""
username = self._output_mediator.GetUsername(event_object)
return self._SanitizeField(username)
def _SanitizeField(self, field):
"""Sanitizes a field for output.
This method removes the field delimiter from the field string.
Args:
field: the string that makes up the field.
Returns:
A string containing the value for the field.
"""
if self._FIELD_DELIMITER:
return field.replace(self._FIELD_DELIMITER, u' ')
return field
def WriteHeader(self):
"""Writes the header to the output."""
self._WriteLine(self._HEADER)
class TLNOutputModule(TLNBaseOutputModule):
"""Output module for the TLN format.
TLN defines 5 | separated fields, namely:
* Time - 32-bit POSIX (or Unix) epoch timestamp.
* Source - The name of the parser or plugin that produced the event.
* Host - The source host system.
* User - The user associated with the data.
* Description - Message string describing the data.
"""
NAME = u'tln'
DESCRIPTION = u'TLN 5 field | delimited output.'
_HEADER = u'Time|Source|Host|User|Description\n'
def WriteEventBody(self, event_object):
"""Writes the body of an event object to the output.
Args:
event_object: the event object (instance of EventObject).
"""
if not hasattr(event_object, u'timestamp'):
return
posix_timestamp = timelib.Timestamp.CopyToPosix(event_object.timestamp)
source = self._FormatSource(event_object)
hostname = self._FormatHostname(event_object)
username = self._FormatUsername(event_object)
description = self._FormatDescription(event_object)
out_write = u'{0:d}|{1:s}|{2:s}|{3:s}|{4!s}\n'.format(
posix_timestamp, source, hostname, username, description)
self._WriteLine(out_write)
class L2TTLNOutputModule(TLNBaseOutputModule):
"""Output module for the log2timeline extended variant of the TLN format.
l2tTLN is an extended variant of TLN introduced log2timeline 0.65.
l2tTLN extends basic TLN to 7 | separated fields, namely:
* Time - 32-bit POSIX (or Unix) epoch timestamp.
* Source - The name of the parser or plugin that produced the event.
* Host - The source host system.
* User - The user associated with the data.
* Description - Message string describing the data.
* TZ - L2T 0.65 field. Timezone of the event.
* Notes - L2T 0.65 field. Optional notes field or filename and inode.
"""
NAME = u'l2ttln'
DESCRIPTION = u'Extended TLN 7 field | delimited output.'
_HEADER = u'Time|Source|Host|User|Description|TZ|Notes\n'
def _FormatNotes(self, event_object):
"""Formats the notes.
Args:
event_object: the event object (instance of EventObject).
Returns:
A string containing the value for the notes field.
"""
notes = getattr(event_object, u'notes', u'')
if not notes:
notes = u'File: {0:s} inode: {1!s}'.format(
getattr(event_object, u'display_name', u''),
getattr(event_object, u'inode', u''))
return self._SanitizeField(notes)
def WriteEventBody(self, event_object):
"""Writes the body of an event object to the output.
Args:
event_object: the event object (instance of EventObject).
"""
if not hasattr(event_object, u'timestamp'):
return
posix_timestamp = timelib.Timestamp.CopyToPosix(event_object.timestamp)
source = self._FormatSource(event_object)
hostname = self._FormatHostname(event_object)
username = self._FormatUsername(event_object)
description = self._FormatDescription(event_object)
notes = self._FormatNotes(event_object)
out_write = u'{0:d}|{1:s}|{2:s}|{3:s}|{4:s}|{5!s}|{6!s}\n'.format(
posix_timestamp, source, hostname, username, description,
self._output_mediator.timezone, notes)
self._WriteLine(out_write)
manager.OutputManager.RegisterOutputs([L2TTLNOutputModule, TLNOutputModule])
| 32.138614
| 80
| 0.702557
|
2f59e4b60daab186d9eefcb05aad1c483eaf084c
| 781
|
py
|
Python
|
tests/test-full/extensions/test.py
|
robjwells/majestic
|
cdfb91c595055e975be84afc6ec04fe22ebddc51
|
[
"MIT"
] | null | null | null |
tests/test-full/extensions/test.py
|
robjwells/majestic
|
cdfb91c595055e975be84afc6ec04fe22ebddc51
|
[
"MIT"
] | null | null | null |
tests/test-full/extensions/test.py
|
robjwells/majestic
|
cdfb91c595055e975be84afc6ec04fe22ebddc51
|
[
"MIT"
] | null | null | null |
import majestic
def process_posts_and_pages(*, posts, pages, settings):
"""Dummy process function that sets an attribute on each object"""
for post in posts:
post.test_attr = 'post'
for page in pages:
page.test_attr = 'page'
new_page = majestic.Page(title='extension test', body='',
slug='extension-test', settings=settings)
new_page.test_attr = 'page' # Make it like every other page
return {'posts': posts, 'pages': pages, 'new_objects': [new_page]}
def process_objects_to_write(*, objects, settings):
new_page = majestic.Page(title='', body='', slug='objects_to_write',
settings=settings)
new_page.test_attr = 'page'
return {'objects': objects + [new_page]}
| 33.956522
| 72
| 0.629962
|
e6867b24584f2ae1b7b8df686629defbd2bcd6ef
| 8,503
|
py
|
Python
|
custom_components/xiaomi_cloud_map_extractor/xiaomi_vacuum_map_parser/const.py
|
licheng5625/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor
|
d1af6a71e8287c6f3f4002367146d42451efa1cb
|
[
"MIT"
] | null | null | null |
custom_components/xiaomi_cloud_map_extractor/xiaomi_vacuum_map_parser/const.py
|
licheng5625/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor
|
d1af6a71e8287c6f3f4002367146d42451efa1cb
|
[
"MIT"
] | null | null | null |
custom_components/xiaomi_cloud_map_extractor/xiaomi_vacuum_map_parser/const.py
|
licheng5625/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor
|
d1af6a71e8287c6f3f4002367146d42451efa1cb
|
[
"MIT"
] | null | null | null |
DOMAIN = "xiaomi_cloud_map_extractor"
PLATFORMS = ["camera"]
CONF_ATTRIBUTES = "attributes"
CONF_AUTO_UPDATE = "auto_update"
CONF_AVAILABLE_API_DREAME = "dreame"
CONF_AVAILABLE_API_ROIDMI = "roidmi"
CONF_AVAILABLE_API_VIOMI = "viomi"
CONF_AVAILABLE_API_XIAOMI = "xiaomi"
CONF_AVAILABLE_COUNTRIES = ["cn", "de", "us", "ru", "tw", "sg", "in", "i2"]
CONF_BOTTOM = "bottom"
CONF_COLOR = "color"
CONF_COLORS = "colors"
CONF_COUNTRY = "country"
CONF_DRAW = "draw"
CONF_FORCE_API = "force_api"
CONF_FONT = "font"
CONF_FONT_SIZE = "font_size"
CONF_LEFT = "left"
CONF_MAP_TRANSFORM = "map_transformation"
CONF_RIGHT = "right"
CONF_ROOM_COLORS = "room_colors"
CONF_ROTATE = "rotate"
CONF_SCALE = "scale"
CONF_SIZES = "sizes"
CONF_SIZE_CHARGER_RADIUS = "charger_radius"
CONF_SIZE_IGNORED_OBSTACLE_RADIUS = "ignored_obstacle_radius"
CONF_SIZE_IGNORED_OBSTACLE_WITH_PHOTO_RADIUS = "ignored_obstacle_with_photo_radius"
CONF_SIZE_OBSTACLE_RADIUS = "obstacle_radius"
CONF_SIZE_OBSTACLE_WITH_PHOTO_RADIUS = "obstacle_with_photo_radius"
CONF_SIZE_VACUUM_RADIUS = "vacuum_radius"
CONF_STORE_MAP_RAW = "store_map_raw"
CONF_STORE_MAP_IMAGE = "store_map_image"
CONF_STORE_MAP_PATH = "store_map_path"
CONF_TEXT = "text"
CONF_TEXTS = "texts"
CONF_TOP = "top"
CONF_TRIM = "trim"
CONF_X = "x"
CONF_Y = "y"
CONF_AVAILABLE_APIS = [CONF_AVAILABLE_API_XIAOMI, CONF_AVAILABLE_API_VIOMI, CONF_AVAILABLE_API_ROIDMI,
CONF_AVAILABLE_API_DREAME]
CONF_AVAILABLE_SIZES = [CONF_SIZE_VACUUM_RADIUS, CONF_SIZE_IGNORED_OBSTACLE_RADIUS,
CONF_SIZE_IGNORED_OBSTACLE_WITH_PHOTO_RADIUS, CONF_SIZE_OBSTACLE_RADIUS,
CONF_SIZE_OBSTACLE_WITH_PHOTO_RADIUS, CONF_SIZE_CHARGER_RADIUS]
MINIMAL_IMAGE_WIDTH = 20
MINIMAL_IMAGE_HEIGHT = 20
CONTENT_TYPE = "image/png"
DEFAULT_NAME = "Xiaomi Cloud Map Extractor"
ATTRIBUTE_CALIBRATION = "calibration_points"
ATTRIBUTE_CHARGER = "charger"
ATTRIBUTE_CLEANED_ROOMS = "cleaned_rooms"
ATTRIBUTE_COUNTRY = "country"
ATTRIBUTE_GOTO = "goto"
ATTRIBUTE_GOTO_PATH = "goto_path"
ATTRIBUTE_GOTO_PREDICTED_PATH = "goto_predicted_path"
ATTRIBUTE_IGNORED_OBSTACLES = "ignored_obstacles"
ATTRIBUTE_IGNORED_OBSTACLES_WITH_PHOTO = "ignored_obstacles_with_photo"
ATTRIBUTE_IMAGE = "image"
ATTRIBUTE_IS_EMPTY = "is_empty"
ATTRIBUTE_MAP_NAME = "map_name"
ATTRIBUTE_MAP_SAVED = "map_saved"
ATTRIBUTE_NO_GO_AREAS = "no_go_areas"
ATTRIBUTE_NO_MOPPING_AREAS = "no_mopping_areas"
ATTRIBUTE_OBSTACLES = "obstacles"
ATTRIBUTE_OBSTACLES_WITH_PHOTO = "obstacles_with_photo"
ATTRIBUTE_PATH = "path"
ATTRIBUTE_ROOMS = "rooms"
ATTRIBUTE_ROOM_NUMBERS = "room_numbers"
ATTRIBUTE_VACUUM_POSITION = "vacuum_position"
ATTRIBUTE_VACUUM_ROOM = "vacuum_room"
ATTRIBUTE_VACUUM_ROOM_NAME = "vacuum_room_name"
ATTRIBUTE_WALLS = "walls"
ATTRIBUTE_ZONES = "zones"
CONF_AVAILABLE_ATTRIBUTES = [ATTRIBUTE_CALIBRATION, ATTRIBUTE_CHARGER, ATTRIBUTE_CLEANED_ROOMS, ATTRIBUTE_COUNTRY,
ATTRIBUTE_GOTO, ATTRIBUTE_GOTO_PATH, ATTRIBUTE_GOTO_PREDICTED_PATH,
ATTRIBUTE_IGNORED_OBSTACLES, ATTRIBUTE_IGNORED_OBSTACLES_WITH_PHOTO, ATTRIBUTE_IMAGE,
ATTRIBUTE_IS_EMPTY, ATTRIBUTE_MAP_NAME, ATTRIBUTE_NO_GO_AREAS, ATTRIBUTE_NO_MOPPING_AREAS,
ATTRIBUTE_OBSTACLES, ATTRIBUTE_OBSTACLES_WITH_PHOTO, ATTRIBUTE_PATH, ATTRIBUTE_ROOMS,
ATTRIBUTE_ROOM_NUMBERS, ATTRIBUTE_VACUUM_POSITION, ATTRIBUTE_VACUUM_ROOM,
ATTRIBUTE_VACUUM_ROOM_NAME, ATTRIBUTE_WALLS, ATTRIBUTE_ZONES]
ATTR_A = "a"
ATTR_ANGLE = "angle"
ATTR_CONFIDENCE_LEVEL = "confidence_level"
ATTR_DESCRIPTION = "description"
ATTR_HEIGHT = "height"
ATTR_MODEL = "model"
ATTR_NAME = "name"
ATTR_OFFSET_X = "offset_x"
ATTR_OFFSET_Y = "offset_y"
ATTR_PATH = "path"
ATTR_PHOTO_NAME = "photo_name"
ATTR_POINT_LENGTH = "point_length"
ATTR_POINT_SIZE = "point_size"
ATTR_ROTATION = "rotation"
ATTR_SCALE = "scale"
ATTR_SIZE = "size"
ATTR_TYPE = "type"
ATTR_USED_API = "used_api"
ATTR_WIDTH = "width"
ATTR_X = "x"
ATTR_X0 = "x0"
ATTR_X1 = "x1"
ATTR_X2 = "x2"
ATTR_X3 = "x3"
ATTR_Y = "y"
ATTR_Y0 = "y0"
ATTR_Y1 = "y1"
ATTR_Y2 = "y2"
ATTR_Y3 = "y3"
DRAWABLE_ALL = "all"
DRAWABLE_CHARGER = "charger"
DRAWABLE_CLEANED_AREA = "cleaned_area"
DRAWABLE_GOTO_PATH = "goto_path"
DRAWABLE_IGNORED_OBSTACLES = "ignored_obstacles"
DRAWABLE_IGNORED_OBSTACLES_WITH_PHOTO = "ignored_obstacles_with_photo"
DRAWABLE_NO_GO_AREAS = "no_go_zones"
DRAWABLE_NO_MOPPING_AREAS = "no_mopping_zones"
DRAWABLE_OBSTACLES = "obstacles"
DRAWABLE_OBSTACLES_WITH_PHOTO = "obstacles_with_photo"
DRAWABLE_PATH = "path"
DRAWABLE_PREDICTED_PATH = "predicted_path"
DRAWABLE_ROOM_NAMES = "room_names"
DRAWABLE_VACUUM_POSITION = "vacuum_position"
DRAWABLE_VIRTUAL_WALLS = "virtual_walls"
DRAWABLE_ZONES = "zones"
CONF_AVAILABLE_DRAWABLES = [DRAWABLE_ALL, DRAWABLE_CLEANED_AREA, DRAWABLE_CHARGER, DRAWABLE_GOTO_PATH,
DRAWABLE_IGNORED_OBSTACLES, DRAWABLE_IGNORED_OBSTACLES_WITH_PHOTO, DRAWABLE_NO_GO_AREAS,
DRAWABLE_NO_MOPPING_AREAS, DRAWABLE_OBSTACLES, DRAWABLE_OBSTACLES_WITH_PHOTO, DRAWABLE_PATH,
DRAWABLE_PREDICTED_PATH, DRAWABLE_ROOM_NAMES, DRAWABLE_VACUUM_POSITION,
DRAWABLE_VIRTUAL_WALLS, DRAWABLE_ZONES]
COLOR_ROOM_PREFIX = "color_room_"
COLOR_CHARGER = "color_charger"
COLOR_CLEANED_AREA = "color_cleaned_area"
COLOR_GOTO_PATH = "color_goto_path"
COLOR_GREY_WALL = "color_grey_wall"
COLOR_IGNORED_OBSTACLE = "color_ignored_obstacle"
COLOR_IGNORED_OBSTACLE_WITH_PHOTO = "color_ignored_obstacle_with_photo"
COLOR_MAP_INSIDE = "color_map_inside"
COLOR_MAP_OUTSIDE = "color_map_outside"
COLOR_MAP_WALL = "color_map_wall"
COLOR_MAP_WALL_V2 = "color_map_wall_v2"
COLOR_NEW_DISCOVERED_AREA = "color_new_discovered_area"
COLOR_NO_GO_ZONES = "color_no_go_zones"
COLOR_NO_GO_ZONES_OUTLINE = "color_no_go_zones_outline"
COLOR_NO_MOPPING_ZONES = "color_no_mop_zones"
COLOR_NO_MOPPING_ZONES_OUTLINE = "color_no_mop_zones_outline"
COLOR_OBSTACLE = "color_obstacle"
COLOR_OBSTACLE_WITH_PHOTO = "color_obstacle_with_photo"
COLOR_PATH = "color_path"
COLOR_PREDICTED_PATH = "color_predicted_path"
COLOR_ROBO = "color_robo"
COLOR_ROOM_NAMES = "color_room_names"
COLOR_SCAN = "color_scan"
COLOR_UNKNOWN = "color_unknown"
COLOR_VIRTUAL_WALLS = "color_virtual_walls"
COLOR_ZONES = "color_zones"
COLOR_ZONES_OUTLINE = "color_zones_outline"
CONF_AVAILABLE_COLORS = [COLOR_CHARGER, COLOR_CLEANED_AREA, COLOR_GOTO_PATH, COLOR_GREY_WALL, COLOR_IGNORED_OBSTACLE,
COLOR_IGNORED_OBSTACLE_WITH_PHOTO, COLOR_MAP_INSIDE, COLOR_MAP_OUTSIDE, COLOR_MAP_WALL,
COLOR_MAP_WALL_V2, COLOR_NEW_DISCOVERED_AREA, COLOR_NO_GO_ZONES, COLOR_NO_GO_ZONES_OUTLINE,
COLOR_NO_MOPPING_ZONES, COLOR_NO_MOPPING_ZONES_OUTLINE, COLOR_OBSTACLE,
COLOR_OBSTACLE_WITH_PHOTO, COLOR_PATH, COLOR_PREDICTED_PATH, COLOR_ROBO, COLOR_ROOM_NAMES,
COLOR_SCAN, COLOR_UNKNOWN, COLOR_VIRTUAL_WALLS, COLOR_ZONES, COLOR_ZONES_OUTLINE]
COLOR_ROOM_1 = "color_room_1"
COLOR_ROOM_2 = "color_room_2"
COLOR_ROOM_3 = "color_room_3"
COLOR_ROOM_4 = "color_room_4"
COLOR_ROOM_5 = "color_room_5"
COLOR_ROOM_6 = "color_room_6"
COLOR_ROOM_7 = "color_room_7"
COLOR_ROOM_8 = "color_room_8"
COLOR_ROOM_9 = "color_room_9"
COLOR_ROOM_10 = "color_room_10"
COLOR_ROOM_11 = "color_room_11"
COLOR_ROOM_12 = "color_room_12"
COLOR_ROOM_13 = "color_room_13"
COLOR_ROOM_14 = "color_room_14"
COLOR_ROOM_15 = "color_room_15"
COLOR_ROOM_16 = "color_room_16"
CONF_DEFAULT_ROOM_COLORS = [COLOR_ROOM_1, COLOR_ROOM_2, COLOR_ROOM_3, COLOR_ROOM_4, COLOR_ROOM_5, COLOR_ROOM_6,
COLOR_ROOM_7, COLOR_ROOM_8, COLOR_ROOM_9, COLOR_ROOM_10, COLOR_ROOM_11, COLOR_ROOM_12,
COLOR_ROOM_13, COLOR_ROOM_14, COLOR_ROOM_15, COLOR_ROOM_16]
AVAILABLE_APIS = {
CONF_AVAILABLE_API_DREAME: ["dreame.vacuum."],
CONF_AVAILABLE_API_ROIDMI: ["roidmi.vacuum."],
CONF_AVAILABLE_API_VIOMI: ["viomi.vacuum."]
}
API_EXCEPTIONS = {
"viomi.vacuum.v18": CONF_AVAILABLE_API_ROIDMI
}
MM = 50
DEFAULT_TRIMS = {
CONF_LEFT: 0,
CONF_RIGHT: 0,
CONF_TOP: 0,
CONF_BOTTOM: 0
}
DEFAULT_SIZES = {
CONF_SIZE_VACUUM_RADIUS: 4,
CONF_SIZE_IGNORED_OBSTACLE_RADIUS: 3,
CONF_SIZE_IGNORED_OBSTACLE_WITH_PHOTO_RADIUS: 3,
CONF_SIZE_OBSTACLE_RADIUS: 3,
CONF_SIZE_OBSTACLE_WITH_PHOTO_RADIUS: 3,
CONF_SIZE_CHARGER_RADIUS: 4
}
| 37.959821
| 120
| 0.777255
|
c1ecd0ac58789121a022c7b0a427d59e8ff0ef16
| 1,374
|
py
|
Python
|
appengine/predator/analysis/predator.py
|
mcgreevy/chromium-infra
|
09064105713603f7bf75c772e8354800a1bfa256
|
[
"BSD-3-Clause"
] | 1
|
2018-01-02T05:47:07.000Z
|
2018-01-02T05:47:07.000Z
|
appengine/predator/analysis/predator.py
|
mcgreevy/chromium-infra
|
09064105713603f7bf75c772e8354800a1bfa256
|
[
"BSD-3-Clause"
] | null | null | null |
appengine/predator/analysis/predator.py
|
mcgreevy/chromium-infra
|
09064105713603f7bf75c772e8354800a1bfa256
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from analysis.culprit import Culprit
# TODO(http://crbug.com/659346): write coverage tests.
class Predator(object): # pragma: no cover
"""The Main entry point into the Predator library."""
def __init__(self, cl_classifier, component_classifier, project_classifier):
self.cl_classifier = cl_classifier
self.component_classifier = component_classifier
self.project_classifier = project_classifier
def FindCulprit(self, report):
"""Given a CrashReport, return a Culprit."""
suspected_cls = self.cl_classifier(report)
assert suspected_cls is not None
suspected_project = (
self.project_classifier.ClassifySuspects(suspected_cls) or
self.project_classifier.ClassifyCallStack(
report.stacktrace.crash_stack))
suspected_components = (
self.component_classifier.ClassifySuspects(suspected_cls) or
self.component_classifier.ClassifyCallStack(
report.stacktrace.crash_stack))
return Culprit(project=suspected_project,
components=suspected_components,
cls=suspected_cls,
regression_range=report.regression_range,
algorithm='core_algorithm')
| 37.135135
| 78
| 0.720524
|
fcffbeed35687c5a50c1a6954488354ef689e4b0
| 677
|
py
|
Python
|
7. Event Handling/virtual_event.py
|
samujjwaal/Tkinter-GUI-Course
|
9ed492f49507333eec54b8b9262262def03fe00f
|
[
"MIT"
] | null | null | null |
7. Event Handling/virtual_event.py
|
samujjwaal/Tkinter-GUI-Course
|
9ed492f49507333eec54b8b9262262def03fe00f
|
[
"MIT"
] | null | null | null |
7. Event Handling/virtual_event.py
|
samujjwaal/Tkinter-GUI-Course
|
9ed492f49507333eec54b8b9262262def03fe00f
|
[
"MIT"
] | null | null | null |
from tkinter import *
from tkinter import ttk
root = Tk()
entry = ttk.Entry(root)
entry.pack()
# bind built in virtual events
entry.bind("<<Copy>>", lambda e: print("Copy"))
entry.bind("<<Paste>>", lambda e: print("Paste"))
# define custom virtual event
entry.event_add("<<OddNumber>>", "1", "3", "5", "7", "9")
# bind custom virtual event
entry.bind("<<OddNumber>>", lambda e: print("Odd Number!"))
# to view details about a virtual event
print(entry.event_info("<<OddNumber>>"))
# programmatically invoke virtual event
entry.event_generate("<<OddNumber>>")
entry.event_generate("<<Paste>>")
# delete virtual event
entry.event_delete("<<OddNumber>>")
root.mainloop()
| 22.566667
| 59
| 0.691285
|
e4f55010fa5b6db24a535d5c002d279fa530c739
| 3,237
|
py
|
Python
|
notebooks/tfx_pipelines/pipeline/solutions/pipeline/runner.py
|
ghostofsheep/asl-ml-immersion
|
4ed74c9fb67d570a498219b347b905e82f525d6e
|
[
"Apache-2.0"
] | null | null | null |
notebooks/tfx_pipelines/pipeline/solutions/pipeline/runner.py
|
ghostofsheep/asl-ml-immersion
|
4ed74c9fb67d570a498219b347b905e82f525d6e
|
[
"Apache-2.0"
] | null | null | null |
notebooks/tfx_pipelines/pipeline/solutions/pipeline/runner.py
|
ghostofsheep/asl-ml-immersion
|
4ed74c9fb67d570a498219b347b905e82f525d6e
|
[
"Apache-2.0"
] | 1
|
2021-11-10T02:54:02.000Z
|
2021-11-10T02:54:02.000Z
|
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KFP runner configuration"""
import kfp
from tfx.orchestration import data_types
from tfx.orchestration.kubeflow import kubeflow_dag_runner
from typing import Optional, Dict, List, Text
from distutils.util import strtobool
from config import Config
from pipeline import create_pipeline
if __name__ == '__main__':
# Set the values for the compile time parameters
ai_platform_training_args = {
'project': Config.PROJECT_ID,
'region': Config.GCP_REGION,
'serviceAccount': Config.CUSTOM_SERVICE_ACCOUNT,
'masterConfig': {
'imageUri': Config.TFX_IMAGE,
}
}
ai_platform_serving_args = {
'project_id': Config.PROJECT_ID,
'model_name': Config.MODEL_NAME,
'runtimeVersion': Config.RUNTIME_VERSION,
'pythonVersion': Config.PYTHON_VERSION,
'regions': [Config.GCP_REGION]
}
beam_tmp_folder = '{}/beam/tmp'.format(Config.ARTIFACT_STORE_URI)
beam_pipeline_args = [
'--runner=DataflowRunner',
'--experiments=shuffle_mode=auto',
'--project=' + Config.PROJECT_ID,
'--temp_location=' + beam_tmp_folder,
'--region=' + Config.GCP_REGION,
]
# Set the default values for the pipeline runtime parameters
data_root_uri = data_types.RuntimeParameter(
name='data-root-uri',
default=Config.DATA_ROOT_URI,
ptype=Text
)
train_steps = data_types.RuntimeParameter(
name='train-steps',
default=5000,
ptype=int
)
eval_steps = data_types.RuntimeParameter(
name='eval-steps',
default=500,
ptype=int
)
pipeline_root = '{}/{}/{}'.format(
Config.ARTIFACT_STORE_URI,
Config.PIPELINE_NAME,
kfp.dsl.RUN_ID_PLACEHOLDER)
# Set KubeflowDagRunner settings.
metadata_config = kubeflow_dag_runner.get_default_kubeflow_metadata_config()
runner_config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
kubeflow_metadata_config = metadata_config,
pipeline_operator_funcs = kubeflow_dag_runner.get_default_pipeline_operator_funcs(
strtobool(Config.USE_KFP_SA)),
tfx_image=Config.TFX_IMAGE)
# Compile the pipeline.
kubeflow_dag_runner.KubeflowDagRunner(config=runner_config).run(
create_pipeline(
pipeline_name=Config.PIPELINE_NAME,
pipeline_root=pipeline_root,
data_root_uri=data_root_uri,
train_steps=train_steps,
eval_steps=eval_steps,
enable_tuning=strtobool(Config.ENABLE_TUNING),
ai_platform_training_args=ai_platform_training_args,
ai_platform_serving_args=ai_platform_serving_args,
beam_pipeline_args=beam_pipeline_args))
| 31.427184
| 88
| 0.72629
|
7941028c41441115c5951efdd014ed949ca5c0a9
| 14,534
|
py
|
Python
|
OmniMarkupLib/Renderers/libs/pygments/lexers/idl.py
|
henumohe/OmniMarkupPreviewer
|
a15382a8309fe04f2c515151c00c074ab9c0d1ab
|
[
"MIT"
] | 652
|
2015-07-26T00:00:17.000Z
|
2022-02-24T18:30:04.000Z
|
OmniMarkupLib/Renderers/libs/pygments/lexers/idl.py
|
henumohe/OmniMarkupPreviewer
|
a15382a8309fe04f2c515151c00c074ab9c0d1ab
|
[
"MIT"
] | 82
|
2015-01-15T12:30:43.000Z
|
2022-01-06T02:56:53.000Z
|
OmniMarkupLib/Renderers/libs/pygments/lexers/idl.py
|
henumohe/OmniMarkupPreviewer
|
a15382a8309fe04f2c515151c00c074ab9c0d1ab
|
[
"MIT"
] | 99
|
2015-01-14T19:53:45.000Z
|
2021-08-11T15:17:26.000Z
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.idl
~~~~~~~~~~~~~~~~~~~
Lexers for IDL.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, words
from pygments.token import Text, Comment, Operator, Keyword, Name, Number
__all__ = ['IDLLexer']
class IDLLexer(RegexLexer):
"""
Pygments Lexer for IDL (Interactive Data Language).
.. versionadded:: 1.6
"""
name = 'IDL'
aliases = ['idl']
filenames = ['*.pro']
mimetypes = ['text/idl']
flags = re.IGNORECASE | re.MULTILINE
_RESERVED = (
'and', 'begin', 'break', 'case', 'common', 'compile_opt',
'continue', 'do', 'else', 'end', 'endcase', 'elseelse',
'endfor', 'endforeach', 'endif', 'endrep', 'endswitch',
'endwhile', 'eq', 'for', 'foreach', 'forward_function',
'function', 'ge', 'goto', 'gt', 'if', 'inherits', 'le',
'lt', 'mod', 'ne', 'not', 'of', 'on_ioerror', 'or', 'pro',
'repeat', 'switch', 'then', 'until', 'while', 'xor')
"""Reserved words from: http://www.exelisvis.com/docs/reswords.html"""
_BUILTIN_LIB = (
'abs', 'acos', 'adapt_hist_equal', 'alog', 'alog10',
'amoeba', 'annotate', 'app_user_dir', 'app_user_dir_query',
'arg_present', 'array_equal', 'array_indices', 'arrow',
'ascii_template', 'asin', 'assoc', 'atan', 'axis',
'a_correlate', 'bandpass_filter', 'bandreject_filter',
'barplot', 'bar_plot', 'beseli', 'beselj', 'beselk',
'besely', 'beta', 'bilinear', 'binary_template', 'bindgen',
'binomial', 'bin_date', 'bit_ffs', 'bit_population',
'blas_axpy', 'blk_con', 'box_cursor', 'breakpoint',
'broyden', 'butterworth', 'bytarr', 'byte', 'byteorder',
'bytscl', 'caldat', 'calendar', 'call_external',
'call_function', 'call_method', 'call_procedure', 'canny',
'catch', 'cd', 'cdf_\w*', 'ceil', 'chebyshev',
'check_math',
'chisqr_cvf', 'chisqr_pdf', 'choldc', 'cholsol', 'cindgen',
'cir_3pnt', 'close', 'cluster', 'cluster_tree', 'clust_wts',
'cmyk_convert', 'colorbar', 'colorize_sample',
'colormap_applicable', 'colormap_gradient',
'colormap_rotation', 'colortable', 'color_convert',
'color_exchange', 'color_quan', 'color_range_map', 'comfit',
'command_line_args', 'complex', 'complexarr', 'complexround',
'compute_mesh_normals', 'cond', 'congrid', 'conj',
'constrained_min', 'contour', 'convert_coord', 'convol',
'convol_fft', 'coord2to3', 'copy_lun', 'correlate', 'cos',
'cosh', 'cpu', 'cramer', 'create_cursor', 'create_struct',
'create_view', 'crossp', 'crvlength', 'cti_test',
'ct_luminance', 'cursor', 'curvefit', 'cvttobm', 'cv_coord',
'cw_animate', 'cw_animate_getp', 'cw_animate_load',
'cw_animate_run', 'cw_arcball', 'cw_bgroup', 'cw_clr_index',
'cw_colorsel', 'cw_defroi', 'cw_field', 'cw_filesel',
'cw_form', 'cw_fslider', 'cw_light_editor',
'cw_light_editor_get', 'cw_light_editor_set', 'cw_orient',
'cw_palette_editor', 'cw_palette_editor_get',
'cw_palette_editor_set', 'cw_pdmenu', 'cw_rgbslider',
'cw_tmpl', 'cw_zoom', 'c_correlate', 'dblarr', 'db_exists',
'dcindgen', 'dcomplex', 'dcomplexarr', 'define_key',
'define_msgblk', 'define_msgblk_from_file', 'defroi',
'defsysv', 'delvar', 'dendrogram', 'dendro_plot', 'deriv',
'derivsig', 'determ', 'device', 'dfpmin', 'diag_matrix',
'dialog_dbconnect', 'dialog_message', 'dialog_pickfile',
'dialog_printersetup', 'dialog_printjob',
'dialog_read_image', 'dialog_write_image', 'digital_filter',
'dilate', 'dindgen', 'dissolve', 'dist', 'distance_measure',
'dlm_load', 'dlm_register', 'doc_library', 'double',
'draw_roi', 'edge_dog', 'efont', 'eigenql', 'eigenvec',
'ellipse', 'elmhes', 'emboss', 'empty', 'enable_sysrtn',
'eof', 'eos_\w*', 'erase', 'erf', 'erfc', 'erfcx',
'erode', 'errorplot', 'errplot', 'estimator_filter',
'execute', 'exit', 'exp', 'expand', 'expand_path', 'expint',
'extrac', 'extract_slice', 'factorial', 'fft', 'filepath',
'file_basename', 'file_chmod', 'file_copy', 'file_delete',
'file_dirname', 'file_expand_path', 'file_info',
'file_lines', 'file_link', 'file_mkdir', 'file_move',
'file_poll_input', 'file_readlink', 'file_same',
'file_search', 'file_test', 'file_which', 'findgen',
'finite', 'fix', 'flick', 'float', 'floor', 'flow3',
'fltarr', 'flush', 'format_axis_values', 'free_lun',
'fstat', 'fulstr', 'funct', 'fv_test', 'fx_root',
'fz_roots', 'f_cvf', 'f_pdf', 'gamma', 'gamma_ct',
'gauss2dfit', 'gaussfit', 'gaussian_function', 'gaussint',
'gauss_cvf', 'gauss_pdf', 'gauss_smooth', 'getenv',
'getwindows', 'get_drive_list', 'get_dxf_objects',
'get_kbrd', 'get_login_info', 'get_lun', 'get_screen_size',
'greg2jul', 'grib_\w*', 'grid3', 'griddata',
'grid_input', 'grid_tps', 'gs_iter',
'h5[adfgirst]_\w*', 'h5_browser', 'h5_close',
'h5_create', 'h5_get_libversion', 'h5_open', 'h5_parse',
'hanning', 'hash', 'hdf_\w*', 'heap_free',
'heap_gc', 'heap_nosave', 'heap_refcount', 'heap_save',
'help', 'hilbert', 'histogram', 'hist_2d', 'hist_equal',
'hls', 'hough', 'hqr', 'hsv', 'h_eq_ct', 'h_eq_int',
'i18n_multibytetoutf8', 'i18n_multibytetowidechar',
'i18n_utf8tomultibyte', 'i18n_widechartomultibyte',
'ibeta', 'icontour', 'iconvertcoord', 'idelete', 'identity',
'idlexbr_assistant', 'idlitsys_createtool', 'idl_base64',
'idl_validname', 'iellipse', 'igamma', 'igetcurrent',
'igetdata', 'igetid', 'igetproperty', 'iimage', 'image',
'image_cont', 'image_statistics', 'imaginary', 'imap',
'indgen', 'intarr', 'interpol', 'interpolate',
'interval_volume', 'int_2d', 'int_3d', 'int_tabulated',
'invert', 'ioctl', 'iopen', 'iplot', 'ipolygon',
'ipolyline', 'iputdata', 'iregister', 'ireset', 'iresolve',
'irotate', 'ir_filter', 'isa', 'isave', 'iscale',
'isetcurrent', 'isetproperty', 'ishft', 'isocontour',
'isosurface', 'isurface', 'itext', 'itranslate', 'ivector',
'ivolume', 'izoom', 'i_beta', 'journal', 'json_parse',
'json_serialize', 'jul2greg', 'julday', 'keyword_set',
'krig2d', 'kurtosis', 'kw_test', 'l64indgen', 'label_date',
'label_region', 'ladfit', 'laguerre', 'laplacian',
'la_choldc', 'la_cholmprove', 'la_cholsol', 'la_determ',
'la_eigenproblem', 'la_eigenql', 'la_eigenvec', 'la_elmhes',
'la_gm_linear_model', 'la_hqr', 'la_invert',
'la_least_squares', 'la_least_square_equality',
'la_linear_equation', 'la_ludc', 'la_lumprove', 'la_lusol',
'la_svd', 'la_tridc', 'la_trimprove', 'la_triql',
'la_trired', 'la_trisol', 'least_squares_filter', 'leefilt',
'legend', 'legendre', 'linbcg', 'lindgen', 'linfit',
'linkimage', 'list', 'll_arc_distance', 'lmfit', 'lmgr',
'lngamma', 'lnp_test', 'loadct', 'locale_get',
'logical_and', 'logical_or', 'logical_true', 'lon64arr',
'lonarr', 'long', 'long64', 'lsode', 'ludc', 'lumprove',
'lusol', 'lu_complex', 'machar', 'make_array', 'make_dll',
'make_rt', 'map', 'mapcontinents', 'mapgrid', 'map_2points',
'map_continents', 'map_grid', 'map_image', 'map_patch',
'map_proj_forward', 'map_proj_image', 'map_proj_info',
'map_proj_init', 'map_proj_inverse', 'map_set',
'matrix_multiply', 'matrix_power', 'max', 'md_test',
'mean', 'meanabsdev', 'mean_filter', 'median', 'memory',
'mesh_clip', 'mesh_decimate', 'mesh_issolid', 'mesh_merge',
'mesh_numtriangles', 'mesh_obj', 'mesh_smooth',
'mesh_surfacearea', 'mesh_validate', 'mesh_volume',
'message', 'min', 'min_curve_surf', 'mk_html_help',
'modifyct', 'moment', 'morph_close', 'morph_distance',
'morph_gradient', 'morph_hitormiss', 'morph_open',
'morph_thin', 'morph_tophat', 'multi', 'm_correlate',
'ncdf_\w*', 'newton', 'noise_hurl', 'noise_pick',
'noise_scatter', 'noise_slur', 'norm', 'n_elements',
'n_params', 'n_tags', 'objarr', 'obj_class', 'obj_destroy',
'obj_hasmethod', 'obj_isa', 'obj_new', 'obj_valid',
'online_help', 'on_error', 'open', 'oplot', 'oploterr',
'parse_url', 'particle_trace', 'path_cache', 'path_sep',
'pcomp', 'plot', 'plot3d', 'ploterr', 'plots', 'plot_3dbox',
'plot_field', 'pnt_line', 'point_lun', 'polarplot',
'polar_contour', 'polar_surface', 'poly', 'polyfill',
'polyfillv', 'polygon', 'polyline', 'polyshade', 'polywarp',
'poly_2d', 'poly_area', 'poly_fit', 'popd', 'powell',
'pref_commit', 'pref_get', 'pref_set', 'prewitt', 'primes',
'print', 'printd', 'product', 'profile', 'profiler',
'profiles', 'project_vol', 'psafm', 'pseudo',
'ps_show_fonts', 'ptrarr', 'ptr_free', 'ptr_new',
'ptr_valid', 'pushd', 'p_correlate', 'qgrid3', 'qhull',
'qromb', 'qromo', 'qsimp', 'query_ascii', 'query_bmp',
'query_csv', 'query_dicom', 'query_gif', 'query_image',
'query_jpeg', 'query_jpeg2000', 'query_mrsid', 'query_pict',
'query_png', 'query_ppm', 'query_srf', 'query_tiff',
'query_wav', 'radon', 'randomn', 'randomu', 'ranks',
'rdpix', 'read', 'reads', 'readu', 'read_ascii',
'read_binary', 'read_bmp', 'read_csv', 'read_dicom',
'read_gif', 'read_image', 'read_interfile', 'read_jpeg',
'read_jpeg2000', 'read_mrsid', 'read_pict', 'read_png',
'read_ppm', 'read_spr', 'read_srf', 'read_sylk',
'read_tiff', 'read_wav', 'read_wave', 'read_x11_bitmap',
'read_xwd', 'real_part', 'rebin', 'recall_commands',
'recon3', 'reduce_colors', 'reform', 'region_grow',
'register_cursor', 'regress', 'replicate',
'replicate_inplace', 'resolve_all', 'resolve_routine',
'restore', 'retall', 'return', 'reverse', 'rk4', 'roberts',
'rot', 'rotate', 'round', 'routine_filepath',
'routine_info', 'rs_test', 'r_correlate', 'r_test',
'save', 'savgol', 'scale3', 'scale3d', 'scope_level',
'scope_traceback', 'scope_varfetch', 'scope_varname',
'search2d', 'search3d', 'sem_create', 'sem_delete',
'sem_lock', 'sem_release', 'setenv', 'set_plot',
'set_shading', 'sfit', 'shade_surf', 'shade_surf_irr',
'shade_volume', 'shift', 'shift_diff', 'shmdebug', 'shmmap',
'shmunmap', 'shmvar', 'show3', 'showfont', 'simplex', 'sin',
'sindgen', 'sinh', 'size', 'skewness', 'skip_lun',
'slicer3', 'slide_image', 'smooth', 'sobel', 'socket',
'sort', 'spawn', 'spher_harm', 'sph_4pnt', 'sph_scat',
'spline', 'spline_p', 'spl_init', 'spl_interp', 'sprsab',
'sprsax', 'sprsin', 'sprstp', 'sqrt', 'standardize',
'stddev', 'stop', 'strarr', 'strcmp', 'strcompress',
'streamline', 'stregex', 'stretch', 'string', 'strjoin',
'strlen', 'strlowcase', 'strmatch', 'strmessage', 'strmid',
'strpos', 'strput', 'strsplit', 'strtrim', 'struct_assign',
'struct_hide', 'strupcase', 'surface', 'surfr', 'svdc',
'svdfit', 'svsol', 'swap_endian', 'swap_endian_inplace',
'symbol', 'systime', 's_test', 't3d', 'tag_names', 'tan',
'tanh', 'tek_color', 'temporary', 'tetra_clip',
'tetra_surface', 'tetra_volume', 'text', 'thin', 'threed',
'timegen', 'time_test2', 'tm_test', 'total', 'trace',
'transpose', 'triangulate', 'trigrid', 'triql', 'trired',
'trisol', 'tri_surf', 'truncate_lun', 'ts_coef', 'ts_diff',
'ts_fcast', 'ts_smooth', 'tv', 'tvcrs', 'tvlct', 'tvrd',
'tvscl', 'typename', 't_cvt', 't_pdf', 'uindgen', 'uint',
'uintarr', 'ul64indgen', 'ulindgen', 'ulon64arr', 'ulonarr',
'ulong', 'ulong64', 'uniq', 'unsharp_mask', 'usersym',
'value_locate', 'variance', 'vector', 'vector_field', 'vel',
'velovect', 'vert_t3d', 'voigt', 'voronoi', 'voxel_proj',
'wait', 'warp_tri', 'watershed', 'wdelete', 'wf_draw',
'where', 'widget_base', 'widget_button', 'widget_combobox',
'widget_control', 'widget_displaycontextmen', 'widget_draw',
'widget_droplist', 'widget_event', 'widget_info',
'widget_label', 'widget_list', 'widget_propertysheet',
'widget_slider', 'widget_tab', 'widget_table',
'widget_text', 'widget_tree', 'widget_tree_move',
'widget_window', 'wiener_filter', 'window', 'writeu',
'write_bmp', 'write_csv', 'write_gif', 'write_image',
'write_jpeg', 'write_jpeg2000', 'write_nrif', 'write_pict',
'write_png', 'write_ppm', 'write_spr', 'write_srf',
'write_sylk', 'write_tiff', 'write_wav', 'write_wave',
'wset', 'wshow', 'wtn', 'wv_applet', 'wv_cwt',
'wv_cw_wavelet', 'wv_denoise', 'wv_dwt', 'wv_fn_coiflet',
'wv_fn_daubechies', 'wv_fn_gaussian', 'wv_fn_haar',
'wv_fn_morlet', 'wv_fn_paul', 'wv_fn_symlet',
'wv_import_data', 'wv_import_wavelet', 'wv_plot3d_wps',
'wv_plot_multires', 'wv_pwt', 'wv_tool_denoise',
'xbm_edit', 'xdisplayfile', 'xdxf', 'xfont',
'xinteranimate', 'xloadct', 'xmanager', 'xmng_tmpl',
'xmtool', 'xobjview', 'xobjview_rotate',
'xobjview_write_image', 'xpalette', 'xpcolor', 'xplot3d',
'xregistered', 'xroi', 'xsq_test', 'xsurface', 'xvaredit',
'xvolume', 'xvolume_rotate', 'xvolume_write_image',
'xyouts', 'zoom', 'zoom_24')
"""Functions from: http://www.exelisvis.com/docs/routines-1.html"""
tokens = {
'root': [
(r'^\s*;.*?\n', Comment.Singleline),
(words(_RESERVED, prefix=r'\b', suffix=r'\b'), Keyword),
(words(_BUILTIN_LIB, prefix=r'\b', suffix=r'\b'), Name.Builtin),
(r'\+=|-=|\^=|\*=|/=|#=|##=|<=|>=|=', Operator),
(r'\+\+|--|->|\+|-|##|#|\*|/|<|>|&&|\^|~|\|\|\?|:', Operator),
(r'\b(mod=|lt=|le=|eq=|ne=|ge=|gt=|not=|and=|or=|xor=)', Operator),
(r'\b(mod|lt|le|eq|ne|ge|gt|not|and|or|xor)\b', Operator),
(r'\b[0-9](L|B|S|UL|ULL|LL)?\b', Number),
(r'.', Text),
]
}
| 55.262357
| 79
| 0.58862
|
d1f7706c2bdcd02b997bd03433be71eaf2145c87
| 13,070
|
py
|
Python
|
app/train_integratedml_bin.py
|
intersystems-community/iris-ml-suite
|
d82868e6d83cb7fb8da2a98dd98cf7c6d28d1210
|
[
"MIT"
] | null | null | null |
app/train_integratedml_bin.py
|
intersystems-community/iris-ml-suite
|
d82868e6d83cb7fb8da2a98dd98cf7c6d28d1210
|
[
"MIT"
] | null | null | null |
app/train_integratedml_bin.py
|
intersystems-community/iris-ml-suite
|
d82868e6d83cb7fb8da2a98dd98cf7c6d28d1210
|
[
"MIT"
] | 1
|
2020-09-09T21:48:37.000Z
|
2020-09-09T21:48:37.000Z
|
import jaydebeapi
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import train_test_split
from pandas import DataFrame
import re, string
import pickle
import json
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
import nltk
from nltk.corpus import stopwords
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
import pandas as pd
jdbc_server = "jdbc:IRIS://52.142.62.199:9091/PYTHON"
jdbc_driver = 'com.intersystems.jdbc.IRISDriver'
iris_jdbc_jar = "./intersystems-jdbc-3.1.0.jar"
iris_user = "_SYSTEM"
iris_password = "SYS"
conn = jaydebeapi.connect(jdbc_driver, jdbc_server, [iris_user, iris_password], iris_jdbc_jar)
with open('views.json') as json_file:
views = set(json.load(json_file))
with open('models.json') as json_file:
models = set(json.load(json_file))
with open('trains.json') as json_file:
trains = set(json.load(json_file))
curs = conn.cursor()
err_views = []
err_models = []
err_trains = []
for x in views:
try:
exec = x.split("\"")[1]
curs.execute(exec)
except Exception as inst:
err_views.append(inst)
print("erro em {}".format(exec))
curs = conn.cursor()
for x in models:
try:
exec = x.split("\"")[1]
curs.execute(exec)
except Exception as inst:
err_models.append(inst)
print("erro em {}".format(exec))
curs = conn.cursor()
for x in trains:
try:
exec = x.split("\"")[1]
curs.execute(exec)
except Exception as inst:
err_trains.append(inst)
print("erro em {}".format(exec))
print("")
conn = jaydebeapi.connect(jdbc_driver, jdbc_server, [iris_user, iris_password], iris_jdbc_jar)
curs = conn.cursor()
curs.execute("SELECT "
" id, Name, Tags, Text "
"FROM Community.Post_Train "
"Where "
"not text is null "
"order by id")
total_cache = curs.fetchall()
#getting all description of each tag to compose the vocabulary
curs_vocab = conn.cursor()
curs_vocab.execute("SELECT ID||' '||Description "
"FROM Community.Tag "
"where not id is null "
"and not Description is null "
"order by id")
total_vocab = curs_vocab.fetchall()
df_vocab = DataFrame(columns=["vocab"], data=total_vocab)
df_vocab = df_vocab.applymap(lambda s: s.lower() if type(s) == str else s)
curs_tags = conn.cursor()
curs_tags.execute("SELECT ID "
"FROM Community.Tag "
"where not id is null order by id")
total_tags = curs_tags.fetchall()
df_tags = DataFrame(columns=["tags"], data=total_tags)
df_tags = df_tags.applymap(lambda s: s.lower() if type(s) == str else s)
df = DataFrame(total_cache)
df.columns = [x[0].lower() for x in curs.description]
def clean_text(text):
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"c\+\+", "cplusplus ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "can not ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r"\'scuse", " excuse ", text)
text = re.sub('\W', ' ', text)
text = re.sub('\s+', ' ', text)
text = text.strip(' ')
return text
def prepare_dataframe(_df):
#converting all to lower case
_df = _df.applymap(lambda s: s.lower() if type(s) == str else s)
_df["tags"] = tuple(_df["tags"].str.split(","))
_df["text"] = _df["text"].map(lambda com : clean_text(com))
return _df
def get_all_tags(tags_list):
real_tags = df_tags["tags"].values.tolist()
all_tags = []
for x in tags_list.values:
all_tags += x[0]
result = list(set(all_tags))
result = [x for x in result if x in real_tags]
#result.remove("article")
#result.remove("question")
#result.remove("caché")
with open('all_tags.json', 'w') as outfile:
json.dump(result, outfile)
return tuple(set(result))
df = prepare_dataframe(df)
all_tags = get_all_tags(df[["tags"]])
mlb = MultiLabelBinarizer(classes=all_tags)
y_total = mlb.fit_transform(df["tags"])
n = df.shape[0]
vec = CountVectorizer(ngram_range=(1,1), strip_accents='unicode',
max_features=900,stop_words=stop_words, binary=True)
vec.fit(df_vocab["vocab"])
percent_training = 0.8
line = int(percent_training * n)
df_x_train = df["text"][:line]
df_x_test = df["text"][line:]
x_train = vec.transform(df_x_train)
#saving a pickle with the vectorizer model
filename = 'vec_integratedml_bin.sav'
pickle.dump(vec, open(filename, 'wb'))
x_test = vec.transform(df_x_test)
# creating data frames to create a csv to send to intersystems iris
sp_matrix_x_train = pd.DataFrame.sparse.from_spmatrix(x_train)
sp_matrix_x_test = pd.DataFrame.sparse.from_spmatrix(x_test)
# adding a c prefix to create columns with alphanumeric names
sp_matrix_x_test.columns = ["c" + str(c) for c in sp_matrix_x_test.columns]
sp_matrix_x_train.columns = ["c" + str(c) for c in sp_matrix_x_train.columns]
sp_matrix_x_test.to_csv("xtest_bin.csv", index=False)
sp_matrix_x_train.to_csv("xtrain_bin.csv", index=False)
# formating names to be usable in intersystems iris
formated_columns = ["tag_" + re.subn(r"[\é\s\\\(\)\.\,\$\&\+\/\?\%\|\"\#\-]", "_", x.strip())[0] for x in mlb.classes_]
with open('formated_columns_bin.json', 'w') as outfile:
json.dump(formated_columns, outfile)
#x_train = x_total[:line]
y_train = DataFrame(y_total[:line])
#x_test = x_total[line:]
y_test = DataFrame(y_total[line:])
y_test.columns = mlb.classes_
y_train.columns = mlb.classes_
csv_y_train = y_train.copy()
csv_y_test = y_test.copy()
csv_y_test.columns = formated_columns
csv_y_train.columns = formated_columns
csv_y_test.to_csv("ytest_bin.csv", index=False)
csv_y_train.to_csv("ytrain_bin.csv", index=False)
#creating 2 dictionaries to convert and revert the names
iris_columns = {}
python_columns = {}
all_views = []
all_models = []
all_trains = []
curs_loop = conn.cursor()
for i, x in enumerate(formated_columns):
python_columns[x]=mlb.classes_[i]
iris_columns[mlb.classes_[i]]=x
#creating sql objects to perform the training
drop_view_text = "DROP VIEW community.view_train_{}".format(x)
view_text = "CREATE VIEW community.view_train_{} " \
"AS " \
"SELECT " \
"c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24, c25, c26, c27, c28, c29, c30, c31, c32, c33, c34, c35, c36, c37, c38, c39, c40, c41, c42, c43, c44, c45, c46, c47, c48, c49, c50, c51, c52, c53, c54, c55, c56, c57, c58, c59, c60, c61, c62, c63, c64, c65, c66, c67, c68, c69, c70, c71, c72, c73, c74, c75, c76, c77, c78, c79, c80, c81, c82, c83, c84, c85, c86, c87, c88, c89, c90, c91, c92, c93, c94, c95, c96, c97, c98, c99, c100, c101, c102, c103, c104, c105, c106, c107, c108, c109, c110, c111, c112, c113, c114, c115, c116, c117, c118, c119, c120, c121, c122, c123, c124, c125, c126, c127, c128, c129, c130, c131, c132, c133, c134, c135, c136, c137, c138, c139, c140, c141, c142, c143, c144, c145, c146, c147, c148, c149, c150, c151, c152, c153, c154, c155, c156, c157, c158, c159, c160, c161, c162, c163, c164, c165, c166, c167, c168, c169, c170, c171, c172, c173, c174, c175, c176, c177, c178, c179, c180, c181, c182, c183, c184, c185, c186, c187, c188, c189, c190, c191, c192, c193, c194, c195, c196, c197, c198, c199, c200, c201, c202, c203, c204, c205, c206, c207, c208, c209, c210, c211, c212, c213, c214, c215, c216, c217, c218, c219, c220, c221, c222, c223, c224, c225, c226, c227, c228, c229, c230, c231, c232, c233, c234, c235, c236, c237, c238, c239, c240, c241, c242, c243, c244, c245, c246, c247, c248, c249, c250, c251, c252, c253, c254, c255, c256, c257, c258, c259, c260, c261, c262, c263, c264, c265, c266, c267, c268, c269, c270, c271, c272, c273, c274, c275, c276, c277, c278, c279, c280, c281, c282, c283, c284, c285, c286, c287, c288, c289, c290, c291, c292, c293, c294, c295, c296, c297, c298, c299, c300, c301, c302, c303, c304, c305, c306, c307, c308, c309, c310, c311, c312, c313, c314, c315, c316, c317, c318, c319, c320, c321, c322, c323, c324, c325, c326, c327, c328, c329, c330, c331, c332, c333, c334, c335, c336, c337, c338, c339, c340, c341, c342, c343, c344, c345, c346, c347, c348, c349, c350, c351, c352, c353, c354, c355, c356, c357, c358, c359, c360, c361, c362, c363, c364, c365, c366, c367, c368, c369, c370, c371, c372, c373, c374, c375, c376, c377, c378, c379, c380, c381, c382, c383, c384, c385, c386, c387, c388, c389, c390, c391, c392, c393, c394, c395, c396, c397, c398, c399, c400, c401, c402, c403, c404, c405, c406, c407, c408, c409, c410, c411, c412, c413, c414, c415, c416, c417, c418, c419, c420, c421, c422, c423, c424, c425, c426, c427, c428, c429, c430, c431, c432, c433, c434, c435, c436, c437, c438, c439, c440, c441, c442, c443, c444, c445, c446, c447, c448, c449, c450, c451, c452, c453, c454, c455, c456, c457, c458, c459, c460, c461, c462, c463, c464, c465, c466, c467, c468, c469, c470, c471, c472, c473, c474, c475, c476, c477, c478, c479, c480, c481, c482, c483, c484, c485, c486, c487, c488, c489, c490, c491, c492, c493, c494, c495, c496, c497, c498, c499, c500, c501, c502, c503, c504, c505, c506, c507, c508, c509, c510, c511, c512, c513, c514, c515, c516, c517, c518, c519, c520, c521, c522, c523, c524, c525, c526, c527, c528, c529, c530, c531, c532, c533, c534, c535, c536, c537, c538, c539, c540, c541, c542, c543, c544, c545, c546, c547, c548, c549, c550, c551, c552, c553, c554, c555, c556, c557, c558, c559, c560, c561, c562, c563, c564, c565, c566, c567, c568, c569, c570, c571, c572, c573, c574, c575, c576, c577, c578, c579, c580, c581, c582, c583, c584, c585, c586, c587, c588, c589, c590, c591, c592, c593, c594, c595, c596, c597, c598, c599, c600, c601, c602, c603, c604, c605, c606, c607, c608, c609, c610, c611, c612, c613, c614, c615, c616, c617, c618, c619, c620, c621, c622, c623, c624, c625, c626, c627, c628, c629, c630, c631, c632, c633, c634, c635, c636, c637, c638, c639, c640, c641, c642, c643, c644, c645, c646, c647, c648, c649, c650, c651, c652, c653, c654, c655, c656, c657, c658, c659, c660, c661, c662, c663, c664, c665, c666, c667, c668, c669, c670, c671, c672, c673, c674, c675, c676, c677, c678, c679, c680, c681, c682, c683, c684, c685, c686, c687, c688, c689, c690, c691, c692, c693, c694, c695, c696, c697, c698, c699, c700, c701, c702, c703, c704, c705, c706, c707, c708, c709, c710, c711, c712, c713, c714, c715, c716, c717, c718, c719, c720, c721, c722, c723, c724, c725, c726, c727, c728, c729, c730, c731, c732, c733, c734, c735, c736, c737, c738, c739, c740, c741, c742, c743, c744, c745, c746, c747, c748, c749, c750, c751, c752, c753, c754, c755, c756, c757, c758, c759, c760, c761, c762, c763, c764, c765, c766, c767, c768, c769, c770, c771, c772, c773, c774, c775, c776, c777, c778, c779, c780, c781, c782, c783, c784, c785, c786, c787, c788, c789, c790, c791, c792, c793, c794, c795, c796, c797, c798, c799, c800, c801, c802, c803, c804, c805, c806, c807, c808, c809, c810, c811, c812, c813, c814, c815, c816, c817, c818, c819, c820, c821, c822, c823, c824, c825, c826, c827, c828, c829, c830, c831, c832, c833, c834, c835, c836, c837, c838, c839, c840, c841, c842, c843, c844, c845, c846, c847, c848, c849, c850, c851, c852, c853, c854, c855, c856, c857, c858, c859, c860, c861, c862, c863, c864, c865, c866, c867, c868, c869, c870, c871, c872, c873, c874, c875, c876, c877, c878, c879, c880, c881, c882, c883, c884, c885, c886, c887, c888, c889, c890, c891, c892, c893, c894, c895, c896, c897, c898, c899, " \
"{} " \
"FROM " \
"community.xtrain as xtrain " \
"inner join " \
"community.ytrain as ytrain " \
"ON " \
"ytrain.id = xtrain.id".format(x,x)
model_text = "CREATE MODEL has_{}_tag PREDICTING ({}) FROM community.view_train_{}".format(x,x,x)
train_text = "TRAIN MODEL has_{}_tag FROM community.view_train_{}".format(x,x,x)
print(x)
try:
curs_loop.execute(view_text)
curs_loop.execute(model_text)
curs_loop.execute(train_text)
except:
print(x)
all_views.append("Set tSC = ##class(%SQL.Statement).%ExecDirect(, \"{}\")".format(view_text))
all_models.append("Set tSC = ##class(%SQL.Statement).%ExecDirect(, \"{}\")".format(model_text))
all_trains.append("Set tSC = ##class(%SQL.Statement).%ExecDirect(, \"{}\")".format(train_text))
predictors = {}
| 51.456693
| 5,310
| 0.657077
|
54ae8cfd9d0be07a46ac814aba753cffafd883fc
| 12,329
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_gtp_ie_allow_list.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_gtp_ie_allow_list.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_gtp_ie_allow_list.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_gtp_ie_allow_list
short_description: IE allow list in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify gtp feature and ie_allow_list category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
enable_log:
description:
- Enable/Disable logging for task.
type: bool
required: false
default: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
member_path:
type: str
description:
- Member attribute path to operate on.
- Delimited by a slash character if there are more than one attribute.
- Parameter marked with member_path is legitimate for doing member operation.
member_state:
type: str
description:
- Add or delete a member under specified attribute path.
- When member_state is specified, the state option is ignored.
choices:
- present
- absent
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
gtp_ie_allow_list:
description:
- IE allow list.
default: null
type: dict
suboptions:
entries:
description:
- Entries of allow list for unknown or out-of-state IEs.
type: list
suboptions:
id:
description:
- Entry ID.
required: true
type: int
ie:
description:
- IE ID (1 - 255).
type: int
message:
description:
- Message ID (1 - 255).
type: int
name:
description:
- IE allow list name.
required: true
type: str
'''
EXAMPLES = '''
- hosts: fortigates
collections:
- fortinet.fortios
connection: httpapi
vars:
vdom: "root"
ansible_httpapi_use_ssl: yes
ansible_httpapi_validate_certs: no
ansible_httpapi_port: 443
tasks:
- name: IE allow list.
fortios_gtp_ie_allow_list:
vdom: "{{ vdom }}"
state: "present"
access_token: "<your_own_value>"
gtp_ie_allow_list:
entries:
-
id: "4"
ie: "5"
message: "6"
name: "default_name_7"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import schema_to_module_spec
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_schema_versioning
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def filter_gtp_ie_allow_list_data(json):
option_list = ['entries', 'name']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def gtp_ie_allow_list(data, fos):
vdom = data['vdom']
state = data['state']
gtp_ie_allow_list_data = data['gtp_ie_allow_list']
filtered_data = underscore_to_hyphen(filter_gtp_ie_allow_list_data(gtp_ie_allow_list_data))
if state == "present" or state is True:
return fos.set('gtp',
'ie-allow-list',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('gtp',
'ie-allow-list',
mkey=filtered_data['name'],
vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!')
def is_successful_status(resp):
return 'status' in resp and resp['status'] == 'success' or \
'http_status' in resp and resp['http_status'] == 200 or \
'http_method' in resp and resp['http_method'] == "DELETE" and resp['http_status'] == 404
def fortios_gtp(data, fos):
fos.do_member_operation('gtp_ie_allow_list')
if data['gtp_ie_allow_list']:
resp = gtp_ie_allow_list(data, fos)
else:
fos._module.fail_json(msg='missing task body: %s' % ('gtp_ie_allow_list'))
return not is_successful_status(resp), \
is_successful_status(resp) and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
versioned_schema = {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"entries": {
"type": "list",
"children": {
"message": {
"type": "integer",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"ie": {
"type": "integer",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"id": {
"type": "integer",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
},
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
},
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'name'
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"enable_log": {"required": False, "type": bool},
"vdom": {"required": False, "type": "str", "default": "root"},
"member_path": {"required": False, "type": "str"},
"member_state": {
"type": "str",
"required": False,
"choices": ["present", "absent"]
},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"gtp_ie_allow_list": {
"required": False, "type": "dict", "default": None,
"options": {
}
}
}
for attribute_name in module_spec['options']:
fields["gtp_ie_allow_list"]['options'][attribute_name] = module_spec['options'][attribute_name]
if mkeyname and mkeyname == attribute_name:
fields["gtp_ie_allow_list"]['options'][attribute_name]['required'] = True
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
if 'enable_log' in module.params:
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, "gtp_ie_allow_list")
is_error, has_changed, result = fortios_gtp(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 31.371501
| 144
| 0.593884
|
97e5d354a6cc0e58440e88a7f95883c4c9f06d6d
| 9,020
|
py
|
Python
|
tests/inspectdb/tests.py
|
huicheese/Django-test
|
eb866b3c4c61eb990e68f4b6a02cb1b7d948adbd
|
[
"BSD-3-Clause"
] | 118
|
2015-01-03T15:42:32.000Z
|
2022-02-01T02:58:38.000Z
|
tests/inspectdb/tests.py
|
huicheese/Django-test
|
eb866b3c4c61eb990e68f4b6a02cb1b7d948adbd
|
[
"BSD-3-Clause"
] | 18
|
2015-01-14T07:51:48.000Z
|
2021-10-14T01:19:26.000Z
|
tests/inspectdb/tests.py
|
huicheese/Django-test
|
eb866b3c4c61eb990e68f4b6a02cb1b7d948adbd
|
[
"BSD-3-Clause"
] | 70
|
2015-01-01T00:33:24.000Z
|
2021-12-10T03:43:07.000Z
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import re
from django.core.management import call_command
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.utils.unittest import expectedFailure
from django.utils.six import PY3, StringIO
if connection.vendor == 'oracle':
expectedFailureOnOracle = expectedFailure
else:
expectedFailureOnOracle = lambda f: f
class InspectDBTestCase(TestCase):
def test_stealth_table_name_filter_option(self):
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn:tn.startswith('inspectdb_'),
stdout=out)
error_message = "inspectdb has examined a table that should have been filtered out."
# contrib.contenttypes is one of the apps always installed when running
# the Django test suite, check that one of its tables hasn't been
# inspected
self.assertNotIn("class DjangoContentType(models.Model):", out.getvalue(), msg=error_message)
def make_field_type_asserter(self):
"""Call inspectdb and return a function to validate a field type in its output"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
def assertFieldType(name, definition):
out_def = re.search(r'^\s*%s = (models.*)$' % name, output, re.MULTILINE).groups()[0]
self.assertEqual(definition, out_def)
return assertFieldType
# Inspecting oracle DB doesn't produce correct results, see #19884
@expectedFailureOnOracle
def test_field_types(self):
"""Test introspection of various Django field types"""
assertFieldType = self.make_field_type_asserter()
assertFieldType('char_field', "models.CharField(max_length=10)")
assertFieldType('comma_separated_int_field', "models.CharField(max_length=99)")
assertFieldType('date_field', "models.DateField()")
assertFieldType('date_time_field', "models.DateTimeField()")
assertFieldType('email_field', "models.CharField(max_length=75)")
assertFieldType('file_field', "models.CharField(max_length=100)")
assertFieldType('file_path_field', "models.CharField(max_length=100)")
if connection.vendor == 'postgresql':
# Only PostgreSQL has a specific type
assertFieldType('ip_address_field', "models.GenericIPAddressField()")
assertFieldType('gen_ip_adress_field', "models.GenericIPAddressField()")
else:
assertFieldType('ip_address_field', "models.CharField(max_length=15)")
assertFieldType('gen_ip_adress_field', "models.CharField(max_length=39)")
assertFieldType('slug_field', "models.CharField(max_length=50)")
assertFieldType('text_field', "models.TextField()")
assertFieldType('time_field', "models.TimeField()")
assertFieldType('url_field', "models.CharField(max_length=200)")
def test_number_field_types(self):
"""Test introspection of various Django field types"""
assertFieldType = self.make_field_type_asserter()
assertFieldType('id', "models.IntegerField(primary_key=True)")
assertFieldType('big_int_field', "models.BigIntegerField()")
if connection.vendor == 'mysql':
# No native boolean type on MySQL
assertFieldType('bool_field', "models.IntegerField()")
assertFieldType('null_bool_field', "models.IntegerField(blank=True, null=True)")
else:
assertFieldType('bool_field', "models.BooleanField()")
assertFieldType('null_bool_field', "models.NullBooleanField()")
if connection.vendor == 'sqlite':
# Guessed arguments, see #5014
assertFieldType('decimal_field', "models.DecimalField(max_digits=10, decimal_places=5) "
"# max_digits and decimal_places have been guessed, as this database handles decimal fields as float")
else:
assertFieldType('decimal_field', "models.DecimalField(max_digits=6, decimal_places=1)")
assertFieldType('float_field', "models.FloatField()")
assertFieldType('int_field', "models.IntegerField()")
if connection.vendor == 'sqlite':
assertFieldType('pos_int_field', "models.PositiveIntegerField()")
assertFieldType('pos_small_int_field', "models.PositiveSmallIntegerField()")
else:
# 'unsigned' property undetected on other backends
assertFieldType('pos_int_field', "models.IntegerField()")
if connection.vendor == 'postgresql':
assertFieldType('pos_small_int_field', "models.SmallIntegerField()")
else:
assertFieldType('pos_small_int_field', "models.IntegerField()")
if connection.vendor in ('sqlite', 'postgresql'):
assertFieldType('small_int_field', "models.SmallIntegerField()")
else:
assertFieldType('small_int_field', "models.IntegerField()")
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_attribute_name_not_python_keyword(self):
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn:tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
error_message = "inspectdb generated an attribute name which is a python keyword"
# Recursive foreign keys should be set to 'self'
self.assertIn("parent = models.ForeignKey('self')", output)
self.assertNotIn("from = models.ForeignKey(InspectdbPeople)", output, msg=error_message)
# As InspectdbPeople model is defined after InspectdbMessage, it should be quoted
self.assertIn("from_field = models.ForeignKey('InspectdbPeople', db_column='from_id')",
output)
self.assertIn("people_pk = models.ForeignKey(InspectdbPeople, primary_key=True)",
output)
self.assertIn("people_unique = models.ForeignKey(InspectdbPeople, unique=True)",
output)
def test_digits_column_name_introspection(self):
"""Introspection of column names consist/start with digits (#16536/#17676)"""
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn:tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
error_message = "inspectdb generated a model field name which is a number"
self.assertNotIn(" 123 = models.CharField", output, msg=error_message)
self.assertIn("number_123 = models.CharField", output)
error_message = "inspectdb generated a model field name which starts with a digit"
self.assertNotIn(" 4extra = models.CharField", output, msg=error_message)
self.assertIn("number_4extra = models.CharField", output)
self.assertNotIn(" 45extra = models.CharField", output, msg=error_message)
self.assertIn("number_45extra = models.CharField", output)
def test_special_column_name_introspection(self):
"""
Introspection of column names containing special characters,
unsuitable for Python identifiers
"""
out = StringIO()
call_command('inspectdb', stdout=out)
output = out.getvalue()
base_name = 'Field' if connection.vendor != 'oracle' else 'field'
self.assertIn("field = models.IntegerField()", output)
self.assertIn("field_field = models.IntegerField(db_column='%s_')" % base_name, output)
self.assertIn("field_field_0 = models.IntegerField(db_column='%s__')" % base_name, output)
self.assertIn("field_field_1 = models.IntegerField(db_column='__field')", output)
self.assertIn("prc_x = models.IntegerField(db_column='prc(%) x')", output)
if PY3:
# Python 3 allows non-ascii identifiers
self.assertIn("tamaño = models.IntegerField()", output)
else:
self.assertIn("tama_o = models.IntegerField(db_column='tama\\xf1o')", output)
def test_managed_models(self):
"""Test that by default the command generates models with `Meta.managed = False` (#14305)"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn:tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
self.longMessage = False
self.assertIn(" managed = False", output, msg='inspectdb should generate unmanaged models.')
| 50.960452
| 118
| 0.667738
|
b775cd418d33bc5c80d9ec1581ecd24cef40cdfd
| 14,248
|
py
|
Python
|
train.py
|
kevinghst/UDA_sup
|
c970622370d5de6b8c48b458cb8b4fe59e37effb
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
kevinghst/UDA_sup
|
c970622370d5de6b8c48b458cb8b4fe59e37effb
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
kevinghst/UDA_sup
|
c970622370d5de6b8c48b458cb8b4fe59e37effb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 SanghunYun, Korea University.
# (Strongly inspired by Dong-Hyun Lee, Kakao Brain)
#
# Except load and save function, the whole codes of file has been modified and added by
# SanghunYun, Korea University for UDA.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
from copy import deepcopy
from typing import NamedTuple
from tqdm import tqdm
import time
import shutil
from logger import *
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from utils import checkpoint
# from utils.logger import Logger
from tensorboardX import SummaryWriter
from utils.utils import output_logging, bin_accuracy, multi_accuracy, AverageMeterSet
import pdb
class Trainer(object):
"""Training Helper class"""
def __init__(self, cfg, model, data_iter, optimizer, device, ema_model, ema_optimizer):
self.cfg = cfg
self.model = model
self.optimizer = optimizer
self.device = device
self.ema_model = ema_model
self.ema_optimizer = ema_optimizer
# data iter
if len(data_iter) == 1:
self.sup_iter = data_iter[0]
elif len(data_iter) == 2:
self.sup_iter = self.repeat_dataloader(data_iter[0])
self.unsup_iter = None
self.eval_iter = data_iter[1]
elif len(data_iter) == 3:
self.sup_iter = self.repeat_dataloader(data_iter[0])
self.unsup_iter = self.repeat_dataloader(data_iter[1])
self.eval_iter = data_iter[2]
def train(self, get_loss, get_acc, model_file, pretrain_file):
if self.cfg.uda_mode or self.cfg.mixmatch_mode:
ssl_mode = True
else:
ssl_mode = False
""" train uda"""
# tensorboardX logging
if self.cfg.results_dir:
dir = os.path.join('results', self.cfg.results_dir)
if os.path.exists(dir) and os.path.isdir(dir):
shutil.rmtree(dir)
writer = SummaryWriter(log_dir=dir)
#logger_path = dir + 'log.txt'
#logger = Logger(logger_path, title='uda')
#if self.cfg.no_unsup_loss:
# logger.set_names(['Train Loss', 'Valid Acc', 'Valid Loss', 'LR'])
#else:
# logger.set_names(['Train Loss', 'Train Loss X', 'Train Loss U', 'Train Loss W U', 'Valid Acc', 'Valid Loss', 'LR'])
meters = AverageMeterSet()
self.model.train()
if self.cfg.model == "custom":
self.load(model_file, pretrain_file) # between model_file and pretrain_file, only one model will be loaded
model = self.model.to(self.device)
ema_model = self.ema_model.to(self.device) if self.ema_model else None
if self.cfg.data_parallel: # Parallel GPU mode
model = nn.DataParallel(model)
ema_model = nn.DataParallel(ema_model) if ema_model else None
global_step = 0
loss_sum = 0.
max_acc = [0., 0, 0., 0.] # acc, step, val_loss, train_loss
no_improvement = 0
sup_batch_size = None
unsup_batch_size = None
# Progress bar is set by unsup or sup data
# uda_mode == True --> sup_iter is repeated
# uda_mode == False --> sup_iter is not repeated
iter_bar = tqdm(self.unsup_iter, total=self.cfg.total_steps, disable=self.cfg.hide_tqdm) if ssl_mode \
else tqdm(self.sup_iter, total=self.cfg.total_steps, disable=self.cfg.hide_tqdm)
start = time.time()
for i, batch in enumerate(iter_bar):
# Device assignment
if ssl_mode:
sup_batch = [t.to(self.device) for t in next(self.sup_iter)]
unsup_batch = [t.to(self.device) for t in batch]
unsup_batch_size = unsup_batch_size or unsup_batch[0].shape[0]
if unsup_batch[0].shape[0] != unsup_batch_size:
continue
else:
sup_batch = [t.to(self.device) for t in batch]
unsup_batch = None
# update
self.optimizer.zero_grad()
final_loss, sup_loss, unsup_loss, weighted_unsup_loss = get_loss(model, sup_batch, unsup_batch, global_step)
if self.cfg.no_sup_loss:
final_loss = unsup_loss
elif self.cfg.no_unsup_loss:
final_loss = sup_loss
meters.update('train_loss', final_loss.item())
meters.update('sup_loss', sup_loss.item())
meters.update('unsup_loss', unsup_loss.item())
meters.update('w_unsup_loss', weighted_unsup_loss.item())
meters.update('lr', self.optimizer.get_lr()[0])
final_loss.backward()
self.optimizer.step()
if self.ema_optimizer:
self.ema_optimizer.step()
# print loss
global_step += 1
loss_sum += final_loss.item()
if not self.cfg.hide_tqdm:
if ssl_mode:
iter_bar.set_description('final=%5.3f unsup=%5.3f sup=%5.3f'\
% (final_loss.item(), unsup_loss.item(), sup_loss.item()))
else:
iter_bar.set_description('loss=%5.3f' % (final_loss.item()))
if global_step % self.cfg.save_steps == 0:
self.save(global_step)
if get_acc and global_step % self.cfg.check_steps == 0 and global_step > self.cfg.check_after:
if self.cfg.mixmatch_mode:
results = self.eval(get_acc, None, ema_model)
else:
total_accuracy, avg_val_loss = self.validate()
# logging
writer.add_scalars('data/eval_acc', {'eval_acc' : total_accuracy}, global_step)
writer.add_scalars('data/eval_loss', {'eval_loss': avg_val_loss}, global_step)
if self.cfg.no_unsup_loss:
writer.add_scalars('data/train_loss', {'train_loss': meters['train_loss'].avg}, global_step)
writer.add_scalars('data/lr', {'lr': meters['lr'].avg}, global_step)
else:
writer.add_scalars('data/train_loss', {'train_loss': meters['train_loss'].avg}, global_step)
writer.add_scalars('data/sup_loss', {'sup_loss': meters['sup_loss'].avg}, global_step)
writer.add_scalars('data/unsup_loss', {'unsup_loss': meters['unsup_loss'].avg}, global_step)
writer.add_scalars('data/w_unsup_loss', {'w_unsup_loss': meters['w_unsup_loss'].avg}, global_step)
writer.add_scalars('data/lr', {'lr': meters['lr'].avg}, global_step)
meters.reset()
if max_acc[0] < total_accuracy:
self.save(global_step)
max_acc = total_accuracy, global_step, avg_val_loss, final_loss.item()
no_improvement = 0
else:
no_improvement += 1
print(" Top 1 Accuracy: {0:.4f}".format(total_accuracy))
print(" Validation Loss: {0:.4f}".format(avg_val_loss))
print(" Train Loss: {0:.4f}".format(final_loss.item()))
if ssl_mode:
print(" Sup Loss: {0:.4f}".format(sup_loss.item()))
print(" Unsup Loss: {0:.4f}".format(unsup_loss.item()))
print(" Learning rate: {0:.7f}".format(self.optimizer.get_lr()[0]))
print(
'Max Accuracy : %5.3f Best Val Loss : %5.3f Best Train Loss : %5.4f Max global_steps : %d Cur global_steps : %d'
%(max_acc[0], max_acc[2], max_acc[3], max_acc[1], global_step), end='\n\n'
)
if no_improvement == self.cfg.early_stopping:
print("Early stopped")
total_time = time.time() - start
print('Total Training Time: %d' %(total_time), end='\n')
break
if self.cfg.total_steps and self.cfg.total_steps < global_step:
print('The total steps have been reached')
total_time = time.time() - start
print('Total Training Time: %d' %(total_time), end='\n')
if get_acc:
if self.cfg.mixmatch_mode:
results = self.eval(get_acc, None, ema_model)
else:
total_accuracy, avg_val_loss = self.validate()
if max_acc[0] < total_accuracy:
max_acc = total_accuracy, global_step, avg_val_loss, final_loss.item()
print(" Top 1 Accuracy: {0:.4f}".format(total_accuracy))
print(" Validation Loss: {0:.2f}".format(avg_val_loss))
print(" Train Loss: {0:.2f}".format(final_loss.item()))
print('Max Accuracy : %5.3f Best Val Loss : %5.3f Best Train Loss : %5.3f Max global_steps : %d Cur global_steps : %d' %(max_acc[0], max_acc[2], max_acc[3], max_acc[1], global_step), end='\n\n')
self.save(global_step)
return
writer.close()
return global_step
def eval(self, evaluate, model_file, model):
""" evaluation function """
if model_file:
self.load(model_file, None)
model = self.model.to(self.device)
if self.cfg.data_parallel:
model = nn.DataParallel(model)
model.eval()
results = []
iter_bar = tqdm(self.sup_iter) if model_file \
else tqdm(deepcopy(self.eval_iter))
for batch in iter_bar:
batch = [t.to(self.device) for t in batch]
with torch.no_grad():
accuracy, result = evaluate(model, batch)
results.append(result)
iter_bar.set_description('Eval Acc=%5.3f' % accuracy)
return results
def validate(self):
print("Running validation")
model = self.model
device = self.device
val_loader = self.eval_iter
cfg = self.cfg
# Put the model in evaluation mode--the dropout layers behave differently
# during evaluation.
model.eval()
# Tracking variables
total_eval_accuracy = 0
total_eval_loss = 0
nb_eval_steps = 0
total_prec1 = 0
total_prec5 = 0
# Evaluate data for one epoch
for batch in val_loader:
batch = [t.to(device) for t in batch]
b_input_ids, b_input_mask, b_segment_ids, b_labels = batch
batch_size = b_input_ids.size(0)
with torch.no_grad():
if cfg.model == "bert":
logits = model(
input_ids = b_input_ids,
attention_mask = b_input_mask,
no_pretrained_pool = cfg.no_pretrained_pool
)
else:
logits = model(
b_input_ids,
b_segment_ids,
b_input_mask
)
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits, b_labels)
# Accumulate the validation loss.
total_eval_loss += loss.item()
# Calculate the accuracy for this batch of test sentences, and
# accumulate it over all batches.
if cfg.num_labels == 2:
logits = logits.detach().cpu().numpy()
b_labels = b_labels.to('cpu').numpy()
total_prec1 += bin_accuracy(logits, b_labels)
else:
prec1, prec5 = multi_accuracy(logits, b_labels, topk=(1,5))
total_prec1 += prec1
total_prec5 += prec5
avg_prec1 = total_prec1 / len(val_loader)
avg_prec5 = total_prec5 / len(val_loader)
avg_val_loss = total_eval_loss / len(val_loader)
return avg_prec1, avg_val_loss
def load(self, model_file, pretrain_file):
""" between model_file and pretrain_file, only one model will be loaded """
if model_file:
print('Loading the model from', model_file)
if torch.cuda.is_available():
self.model.load_state_dict(torch.load(model_file))
else:
self.model.load_state_dict(torch.load(model_file, map_location='cpu'))
elif pretrain_file:
print('Loading the pretrained model from', pretrain_file)
if pretrain_file.endswith('.ckpt'): # checkpoint file in tensorflow
checkpoint.load_model(self.model.transformer, pretrain_file)
elif pretrain_file.endswith('.pt'): # pretrain model file in pytorch
self.model.transformer.load_state_dict(
{key[12:]: value
for key, value in torch.load(pretrain_file).items()
if key.startswith('transformer')}
) # load only transformer parts
def save(self, i):
""" save model """
if not os.path.isdir(os.path.join('results', self.cfg.results_dir, 'save')):
os.makedirs(os.path.join('results', self.cfg.results_dir, 'save'))
torch.save(self.model.state_dict(),
os.path.join('results', self.cfg.results_dir, 'save', 'model_steps_'+str(i)+'.pt'))
def repeat_dataloader(self, iterable):
""" repeat dataloader """
while True:
for x in iterable:
yield x
| 40.477273
| 216
| 0.566887
|
8c87775ec72d65dbc462a55041e05078cb0cdfd9
| 3,032
|
py
|
Python
|
py/qaviton/utils/databases/sqlite.py
|
qaviton/qaviton
|
112f1620af36e09031909bd36b7e388df577b75b
|
[
"Apache-2.0"
] | 9
|
2018-09-06T10:27:55.000Z
|
2020-01-02T16:50:13.000Z
|
py/qaviton/utils/databases/sqlite.py
|
idanhakimi/qaviton
|
6ed17e4131c35cae902b95498e60de53e5b05a56
|
[
"Apache-2.0"
] | 6
|
2019-06-05T09:44:21.000Z
|
2022-03-11T23:26:41.000Z
|
py/qaviton/utils/databases/sqlite.py
|
idanhakimi/qaviton
|
6ed17e4131c35cae902b95498e60de53e5b05a56
|
[
"Apache-2.0"
] | 9
|
2018-09-21T14:47:40.000Z
|
2021-12-21T01:37:20.000Z
|
import sqlite3
class DataBase:
"""
a very simple implementation
to create local SQLite database connection
specified by db_file
you can integrate your database with your custom commands class
example usage:
class command:
get_users = '''SELECT user_id, password FROM users;'''
get_user_by_user_id = '''SELECT user_id, password FROM users WHERE user_id = '{}';'''
get_user_by_id = '''SELECT user_id, password FROM users WHERE id = '{}';'''
get_last_user = '''SELECT user_id, password FROM users ORDER BY ID DESC LIMIT 1'''
add_user = '''INSERT INTO users (user_id, password) VALUES ('{}', '{}');'''
drop_table = '''DROP TABLE IF EXISTS {};'''
create_users_table = '''CREATE TABLE IF NOT EXISTS users (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_id text,
password text);'''
# preferably use 'with' to automatically commit and close the connection
with DataBase('db') as db:
db.command = command
db.execute(db.command.drop_table.format('users'))
db.execute(db.command.create_users_table)
db.execute(db.command.add_user.format('id123', 'password1'))
db = DataBase('db')
db.command = command
db.execute(db.command.add_user.format('id777', 'password1'))
user1 = db.execute(db.command.get_user_by_id.format('id123'))
user2 = db.execute(db.command.get_user_by_id.format('id777'))
db.commit()
db.close()
"""
def __init__(self, db_file: str, commands=None):
""" create database connection
:param db_file: path of db file
:param commands: custom useful commands
"""
self.connection = sqlite3.connect(db_file)
self.cursor = self.connection.cursor()
self.version = sqlite3.version
self.command = commands
def __enter__(self):
return self
def __exit__(self, *tp):
self.commit()
self.close()
def __call__(self, command):
""":rtype: list"""
return self.execute(command)
def commit(self):
self.connection.commit()
def close(self):
self.connection.close()
def execute(self, command):
""":rtype: list"""
return self.cursor.execute(command).fetchall()
def export_table_to_file(self, table, file, titles, permission='w'):
""" export an entire table to a file
:param table: table name
:param file: path to export
:param titles: table titles
:param permission: probably write
"""
self.cursor.execute("select * from {}".format(table))
table_list = self.cursor.fetchall()
with open(file, permission) as f:
f.write(','.join(titles) + '\n')
for i in table_list:
s = []
for a in i:
s.append(str(a))
f.write(','.join(s) + '\n')
| 34.850575
| 97
| 0.582784
|
052e420f9122cd31306e4ccc3fa0c4c9567e303b
| 171
|
py
|
Python
|
django_decadence/apps.py
|
ksiazkowicz/django-decadence
|
0a29a7c748feb0d98cb8f4783a93159fc290ea67
|
[
"MIT"
] | 1
|
2019-08-26T11:30:02.000Z
|
2019-08-26T11:30:02.000Z
|
django_decadence/apps.py
|
ksiazkowicz/django-decadence
|
0a29a7c748feb0d98cb8f4783a93159fc290ea67
|
[
"MIT"
] | null | null | null |
django_decadence/apps.py
|
ksiazkowicz/django-decadence
|
0a29a7c748feb0d98cb8f4783a93159fc290ea67
|
[
"MIT"
] | 1
|
2021-02-21T13:30:16.000Z
|
2021-02-21T13:30:16.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class DjangoDecadenceConfig(AppConfig):
name = 'django_decadence'
| 19
| 39
| 0.760234
|
5bccc1112d4e1e5f5f99d31f812d04d9445a560f
| 326
|
py
|
Python
|
Python/P1.py
|
tinkpo/Curso-Introducci-n-a-la-Computaci-n-para-Matem-ticos
|
72b5db944c759c0d977553fa1bc15a11ac110909
|
[
"MIT"
] | null | null | null |
Python/P1.py
|
tinkpo/Curso-Introducci-n-a-la-Computaci-n-para-Matem-ticos
|
72b5db944c759c0d977553fa1bc15a11ac110909
|
[
"MIT"
] | null | null | null |
Python/P1.py
|
tinkpo/Curso-Introducci-n-a-la-Computaci-n-para-Matem-ticos
|
72b5db944c759c0d977553fa1bc15a11ac110909
|
[
"MIT"
] | null | null | null |
x=list(input("Ingrese la lista "))
i=0
j=1
a=[0]
b=[0]
while(i<=len(x)-2):
if(x[i]==x[i+1]):
j=j+1
b.append(x[i])
a.append(j)
else:
j=1
i=i+1
print('La longitud de la meseta mas larga es: ', max(a), '\n', 'Y el numero de la meseta mas larga es :', b[a.index(max(a))])
| 21.733333
| 126
| 0.48773
|
62b58b1e8ea77c8b210add38e81cda63ee33ca59
| 1,606
|
py
|
Python
|
angstrom/2019/misc/TI-1337/wrapper.py
|
mystickev/ctf-archives
|
89e99a5cd5fb6b2923cad3fe1948d3ff78649b4e
|
[
"MIT"
] | 1
|
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
angstrom/2019/misc/TI-1337/wrapper.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
angstrom/2019/misc/TI-1337/wrapper.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1
|
2021-12-19T11:06:24.000Z
|
2021-12-19T11:06:24.000Z
|
#!/usr/bin/env python3
"""
Wrapper that filters all input to the calculator program to make sure it follows the blacklist.
It is not necessary to fully understand this code. Just know it doesn't allow any of the characters in the following string:
"()[]{}_.#\"\'\\ABCDEFGHIJKLMNOPQRSTUVWXYZ"
Check ti1337.py to see what the program actually does with valid input.
"""
import subprocess, fcntl, os, sys, selectors
os.chdir("app")
p = subprocess.Popen(["python3", "ti1337.py"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# set files descriptors to nonblocking and create selectors
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(sys.stdin, fcntl.F_SETFL, fcntl.fcntl(sys.stdin, fcntl.F_GETFL) | os.O_NONBLOCK)
selector = selectors.DefaultSelector()
selector.register(sys.stdin, selectors.EVENT_READ, 'stdin')
selector.register(p.stdout, selectors.EVENT_READ, 'stdout')
blacklist = "()[]{}_.#\"\'\\ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# until the program has finished
while p.poll() == None:
events = selector.select()
for key, mask in events:
if key.data == 'stdin':
# write input
line = key.fileobj.readline()
for c in line:
if c in blacklist:
print("That doesn't seem like math!")
sys.exit()
p.stdin.write(bytes(line, encoding="utf-8"))
p.stdin.flush()
elif key.data == 'stdout':
# print output
output = key.fileobj.read()
sys.stdout.write(output.decode("utf-8"))
sys.stdout.flush()
output, error = p.communicate()
if error: sys.stdout.write(error.decode("utf-8"))
sys.stdout.flush()
| 40.15
| 124
| 0.722914
|
4900292d68396da77072f843b5d6304a0b873c26
| 8,089
|
py
|
Python
|
client/commands/kill.py
|
KevinHock/pyre-check
|
09b0f53e23353b16a59ef76d8e5c5b78bdf08447
|
[
"MIT"
] | 5
|
2019-02-14T19:46:47.000Z
|
2020-01-16T05:48:45.000Z
|
client/commands/kill.py
|
KevinHock/pyre-check
|
09b0f53e23353b16a59ef76d8e5c5b78bdf08447
|
[
"MIT"
] | null | null | null |
client/commands/kill.py
|
KevinHock/pyre-check
|
09b0f53e23353b16a59ef76d8e5c5b78bdf08447
|
[
"MIT"
] | 2
|
2019-02-14T19:46:23.000Z
|
2020-07-13T03:53:04.000Z
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import shutil
import signal
import subprocess
import tempfile
from itertools import chain
from pathlib import Path
from typing import Optional
import psutil
from .. import BINARY_NAME, CLIENT_NAME, LOG_DIRECTORY, configuration_monitor
from ..analysis_directory import AnalysisDirectory
from ..configuration import Configuration
from ..project_files_monitor import ProjectFilesMonitor
from ..watchman_subscriber import WatchmanSubscriber
from .command import Command
from .rage import Rage
LOG: logging.Logger = logging.getLogger(__name__)
PYRE_FIRE = """
.
,,,
.,,,,
,,,,,,,,
,,,*/*,,,,.
,,,*///*,,,,,
,,,,*/////*,,,,,
,,,,////////*,,,,,
,,,*/////(/////*,,, ,
,,,**/////(((//*/*,,.,.
,,,*/////((((((////,,,,*
,,.,,,/////((((((((//***/,,,
.,,,,/*////((((((((((/////*,,.
,,,,*//////((((((((((/////*,,,
,,,,*/////((((((((((((////*,,,,,
,,,,,//////((((((((((/////,,,,,*
.,,,,,*/////(((((((/////*,,,,,*.
##, *,,,,,,*//////////////,,,,,,**
.,*###(/**,,,*,,,,,,,,,,,,,,,*,**/
/(##################(****///***,**,*****//*(#############//
///#####%%%%%%%%#############((####(########%%##########*//
///#%%#, /**/((#########%#######. ############%%%%##/*/
//*/*//#####%%..#((#(### *######////* ////
,/**///#%%#. ,////////* .%##/*/*//,
.///* .///*////* #/*////,
"""
class Kill(Command):
NAME = "kill"
def __init__(
self,
arguments: argparse.Namespace,
original_directory: str,
configuration: Optional[Configuration] = None,
analysis_directory: Optional[AnalysisDirectory] = None,
) -> None:
super(Kill, self).__init__(
arguments, original_directory, configuration, analysis_directory
)
self._with_fire: bool = arguments.with_fire
@classmethod
def add_subparser(cls, parser: argparse._SubParsersAction) -> None:
kill = parser.add_parser(cls.NAME)
kill.set_defaults(command=cls)
kill.add_argument(
"--with-fire", action="store_true", help="A no-op flag that adds emphasis."
)
def generate_analysis_directory(self) -> AnalysisDirectory:
return AnalysisDirectory(".")
@staticmethod
def _delete_linked_path(link_path: Path) -> None:
try:
actual_path = os.readlink(link_path)
os.remove(actual_path)
except OSError:
pass
try:
os.unlink(link_path)
except OSError:
pass
def _delete_caches(self) -> None:
root_log_directory = Path(self._current_directory, LOG_DIRECTORY)
# If a resource cache exists, delete it to remove corrupted artifacts.
try:
shutil.rmtree(str(root_log_directory / "resource_cache"))
except OSError:
pass
# If a buck builder cache exists, also remove it.
scratch_path = None
try:
scratch_path = (
subprocess.check_output(
f"mkscratch path --subdir pyre {self._current_directory}".split()
)
.decode()
.strip()
)
except Exception as exception:
LOG.debug("Could not find scratch path because of exception: %s", exception)
if scratch_path is not None:
buck_builder_cache_directory = str(
Path(scratch_path, ".buck_builder_cache")
)
try:
LOG.debug(
"Deleting buck builder cache at %s", buck_builder_cache_directory
)
shutil.rmtree(buck_builder_cache_directory)
except OSError as exception:
LOG.debug(
"Failed to delete buck builder cache due to exception: %s.",
exception,
)
def _kill_client_processes(self) -> None:
for process in psutil.process_iter(attrs=["name"]):
if process.info["name"] != CLIENT_NAME:
continue
# We need to be careful about how we kill the client here, as otherwise we
# might cause a race where we attempt to kill the `pyre kill` command.
pid_to_kill = process.pid
if pid_to_kill == os.getpgid(os.getpid()):
continue
try:
LOG.info(
"Killing process {} with pid {}.".format(
process.info["name"], pid_to_kill
)
)
os.kill(pid_to_kill, signal.SIGKILL)
except (ProcessLookupError, PermissionError) as exception:
LOG.debug(
"Failed to kill process {} with pid {} due to exception {}".format(
process.info["name"], pid_to_kill, exception
)
)
WatchmanSubscriber.stop_subscriber(
ProjectFilesMonitor.base_path(self._configuration), ProjectFilesMonitor.NAME
)
WatchmanSubscriber.stop_subscriber(
configuration_monitor.ConfigurationMonitor.base_path(self._configuration),
configuration_monitor.ConfigurationMonitor.NAME,
)
@staticmethod
def _kill_binary_processes() -> None:
# Kills all processes that have the same binary as the one specified
# in the configuration.
binary_name = _get_process_name("PYRE_BINARY", BINARY_NAME)
subprocess.run(["pkill", binary_name])
def _delete_server_files(self) -> None:
root_log_directory = Path(self._current_directory, LOG_DIRECTORY)
LOG.info("Deleting server files under %s", root_log_directory)
socket_paths = root_log_directory.glob("**/server.sock")
json_server_paths = root_log_directory.glob("**/json_server.sock")
pid_paths = root_log_directory.glob("**/server.pid")
for path in chain(socket_paths, json_server_paths, pid_paths):
self._delete_linked_path(path)
def _rage(self) -> None:
with tempfile.NamedTemporaryFile(
prefix="pyre-rage-", suffix=".log", delete=False
) as output:
LOG.warning("Saving the output of `pyre rage` into `%s`", output.name)
arguments = self._arguments
arguments.output_path = output.name
Rage(
arguments,
self._original_directory,
self._configuration,
self._analysis_directory,
).run()
def _run(self) -> None:
explicit_local = self._arguments.local_configuration
if explicit_local:
LOG.warning(
"Pyre kill will terminate all running servers. "
"Specifying local path `{}` is unnecessary.".format(explicit_local)
)
self._rage()
self._kill_binary_processes()
self._delete_server_files()
self._delete_caches()
self._kill_client_processes()
if self._arguments.with_fire is True:
LOG.warning(
"All --with-fire functionality has now been included in `pyre kill`.\n "
"The flag is now a no-op, but here is a pyre for your troubles."
)
LOG.info(PYRE_FIRE)
def _get_process_name(environment_variable_name: str, default: str) -> str:
overridden = os.getenv(environment_variable_name)
if overridden is not None:
return os.path.basename(overridden)
else:
return default
| 36.936073
| 90
| 0.525281
|
a99175618d146a1a89d3cb72ee925b6693d162b3
| 5,495
|
py
|
Python
|
infinisdk/entry_point.py
|
kobutton/infinisdk
|
4ae5c66a2d7926636a52cb0a1452f9a2809b2ec2
|
[
"BSD-3-Clause"
] | null | null | null |
infinisdk/entry_point.py
|
kobutton/infinisdk
|
4ae5c66a2d7926636a52cb0a1452f9a2809b2ec2
|
[
"BSD-3-Clause"
] | null | null | null |
infinisdk/entry_point.py
|
kobutton/infinisdk
|
4ae5c66a2d7926636a52cb0a1452f9a2809b2ec2
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import arrow
import dateutil
import pkg_resources
import click
import logbook
from infinisdk import Q
from infinisdk.infinibox import InfiniBox
from infinisdk.core.config import config
_logger = logbook.Logger('sdk-cli')
logbook.set_datetime_format('local')
_DEFAULT_CONSOLE_LEVEL = logbook.INFO
CUSTOMIZE_ENTRY_POINT = 'infinisdk.cli.customize'
@click.group()
@click.option("-v", "--verbose", count=True)
@click.option("-q", "--quiet", count=True)
@click.pass_context
def cli(ctx, verbose, quiet):
console_handler = logbook.StderrHandler()
console_handler.level = min(max(logbook.TRACE, _DEFAULT_CONSOLE_LEVEL-verbose+quiet), logbook.CRITICAL)
ctx.obj['console_handler'] = console_handler
console_handler.format_string = '{record.message}'
console_handler.push_application()
def _get_system_object(system_name, should_login=False):
system = InfiniBox(system_name)
has_auth = bool(system.api.get_auth())
if not has_auth:
msg = "Auth (username & password) wasn't set at {!r} file".format(config.root.ini_file_path)
click.echo(click.style(msg, fg='yellow'))
if should_login:
if not has_auth:
click.echo('Please provide authentication for your system')
username = click.prompt('Username')
password = click.prompt('Password', hide_input=True)
system.api.set_auth(username, password=password, login=False)
try:
system.login()
except Exception:
_logger.debug('Caught exception while trying to login', exc_info=True)
raise click.ClickException('Failed to login to system {}'.format(system_name))
return system
def _interact(**local_vars):
try:
from IPython import embed
embed(user_ns=local_vars, display_banner=False)
except ImportError:
from code import interact
interact(local=local_vars)
@cli.command()
@click.option("-s", "--system-name", required=True)
def interact(system_name):
system = _get_system_object(system_name)
_interact(system=system)
@cli.group()
def events():
pass
TIME_TEPMPLATE = 'YYYY-MM-DD HH:mm:ss'
def _convert_time_string_to_arrow(time_string, tzinfo):
datetime_obj = dateutil.parser.parse(time_string)
return arrow.get(datetime_obj, tzinfo=tzinfo)
@events.command(name='query')
@click.option("-s", "--system-name", required=True)
@click.option("--show-reporter/--hide-reporter", default=False, is_flag=True)
@click.option("--show-visibility/--hide-visibility", default=False, is_flag=True)
@click.option("--show-source-node-id/--hide-source-node-id", default=False, is_flag=True)
@click.option("--force-color/--no-color", "enable_color", default=None, is_flag=True)
@click.option("--local-time/--utc-time", "display_in_local_time", default=True, is_flag=True)
@click.option("-l", "--level", "min_level", default=None)
@click.option("-S", "--since", default=None)
@click.option("-U", "--until", default=None)
@click.option("--asc/--desc", "sorting_order", default=None, is_flag=True)
def events_query(system_name, show_reporter, show_visibility, show_source_node_id, display_in_local_time,
enable_color, min_level, since, until, sorting_order):
tzinfo = 'local' if display_in_local_time else 'utc'
if enable_color is None:
enable_color = sys.stdout.isatty()
if enable_color:
colorize = {'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red', 'INFO': 'green'}.get
else:
colorize = lambda _: None
system = _get_system_object(system_name, should_login=True)
system.login()
filters = []
if min_level:
supported_levels = system.events.get_levels()
try:
min_index = supported_levels.index(min_level)
except ValueError:
raise click.ClickException('Unsupported level {!r}'.format(min_level))
filters.append(Q.level.in_(supported_levels[min_index:]))
if since is not None:
filters.append(Q.timestamp > _convert_time_string_to_arrow(since, tzinfo))
if until is not None:
filters.append(Q.timestamp < _convert_time_string_to_arrow(until, tzinfo))
query = system.events.find(*filters)
if sorting_order is not None:
query = query.sort(+Q.id if sorting_order else -Q.id)
for event in query:
event_info = event.get_fields(from_cache=True)
event_time = event_info['timestamp']
if display_in_local_time:
event_time = event_time.to('local')
formatted = '{} {:5}'.format(event_time.format(TIME_TEPMPLATE), event_info['id'])
if show_reporter:
formatted += ' {:10}'.format(event_info['reporter'])
if show_visibility:
formatted += ' {:9}'.format(event['visibility'])
if show_source_node_id:
formatted += ' node-{}'.format(event['source_node_id'])
click.echo(formatted+' ', nl=False)
level = event_info['level']
click.echo(click.style(level, fg=colorize(level)), nl=False)
click.echo(' {code} {desc}'.format(code=event_info['code'], desc=event_info['description'].replace('\n', ' ')))
def main_entry_point():
for customize_function_cli in pkg_resources.iter_entry_points(CUSTOMIZE_ENTRY_POINT): # pylint: disable=no-member
func = customize_function_cli.load()
func()
return cli(obj={}) # pylint: disable=no-value-for-parameter,unexpected-keyword-arg
if __name__ == '__main__':
sys.exit(main_entry_point())
| 38.971631
| 119
| 0.688808
|
d4f82812d661a7cee86e5299a37dc1b5e2272f7f
| 766
|
py
|
Python
|
garnahata_in_ua/elastic_models.py
|
dchaplinsky/ragoogle
|
dccb3d29334c3220ea12c46c725c443c8bd725c0
|
[
"MIT"
] | 3
|
2018-06-10T21:20:56.000Z
|
2021-04-04T11:21:06.000Z
|
garnahata_in_ua/elastic_models.py
|
dchaplinsky/ragoogle
|
dccb3d29334c3220ea12c46c725c443c8bd725c0
|
[
"MIT"
] | 7
|
2018-08-14T20:54:49.000Z
|
2020-06-05T18:17:30.000Z
|
garnahata_in_ua/elastic_models.py
|
dchaplinsky/ragoogle
|
dccb3d29334c3220ea12c46c725c443c8bd725c0
|
[
"MIT"
] | 3
|
2018-06-27T12:53:13.000Z
|
2020-09-25T19:41:46.000Z
|
from elasticsearch_dsl import DocType, Index
from django.template.loader import render_to_string
GARNAHATA_INDEX = "garnahata_ownerships"
garnahata_idx = Index(GARNAHATA_INDEX)
@garnahata_idx.doc_type
class ElasticGarnahataModel(DocType):
def render_infocard(self):
from .apps import GarnahataInUaConfig as AppConfig
return render_to_string(
"garnahata_in_ua/infocard.html",
{
"res": self,
"url": "https://garnahata.in.ua{}".format(self.url),
"datasource_name": AppConfig.name,
"datasource_verbose_name": AppConfig.verbose_name,
},
)
class Meta:
index = GARNAHATA_INDEX
doc_type = "garnahata_ownerships_doctype"
| 28.37037
| 68
| 0.652742
|
b0796fcd875d83fe3917ce53b4923c5be0d9198b
| 220
|
py
|
Python
|
firmware/Heat controller/copy_hex.py
|
atoomnetmarc/IoT12
|
7706f69758a800da70bf8034a91a331206706824
|
[
"Apache-2.0"
] | 1
|
2021-11-11T23:48:05.000Z
|
2021-11-11T23:48:05.000Z
|
firmware/Heat controller/copy_hex.py
|
atoomnetmarc/IoT12
|
7706f69758a800da70bf8034a91a331206706824
|
[
"Apache-2.0"
] | null | null | null |
firmware/Heat controller/copy_hex.py
|
atoomnetmarc/IoT12
|
7706f69758a800da70bf8034a91a331206706824
|
[
"Apache-2.0"
] | 1
|
2021-09-17T16:21:44.000Z
|
2021-09-17T16:21:44.000Z
|
Import("env", "projenv")
from shutil import copyfile
def copyhex(*args, **kwargs):
copyfile(str(kwargs['target'][0]), 'hex/'+env['BOARD_MCU']+'.hex')
env.AddPostAction("$BUILD_DIR/${PROGNAME}.hex", copyhex)
| 27.5
| 71
| 0.659091
|
87202ac59b086ceb020b206ffbef6bda08878afe
| 3,773
|
py
|
Python
|
scripts/train.py
|
MathieuTuli/autoHyper
|
d7410741b5963518d704f422c0502695885a4093
|
[
"MIT"
] | 9
|
2021-11-30T18:58:05.000Z
|
2022-03-07T16:55:54.000Z
|
scripts/train.py
|
MathieuTuli/autoHyper
|
d7410741b5963518d704f422c0502695885a4093
|
[
"MIT"
] | null | null | null |
scripts/train.py
|
MathieuTuli/autoHyper
|
d7410741b5963518d704f422c0502695885a4093
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2020
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pathlib import Path
from typing import Dict
from autohyper import optimize, LowRankMetrics, HyperParameters
from torchvision import datasets, transforms
from torch.optim import Adam
from gutils import init_logger
import torchvision.models as models
import numpy as np
import torch
def main():
# indicate which hyper-parameters to optimize
dataset = torch.utils.data.DataLoader(
datasets.CIFAR10('.', download=True, transform=transforms.ToTensor()),
batch_size=128)
def epoch_trainer(hyper_parameters: Dict[str, float],
epochs) -> LowRankMetrics:
# update model/optimizer parameters based on values in @argument:
# hyper_parameters
print('Run epochs:', hyper_parameters)
model = models.resnet18()
model.train()
model = model.cuda()
metrics = LowRankMetrics(list(model.parameters()))
optimizer = Adam(model.parameters(),
lr=hyper_parameters['lr'],
weight_decay=hyper_parameters['weight_decay'],)
criterion = torch.nn.CrossEntropyLoss().cuda()
accs = list()
for epoch in epochs:
for inputs, targets in dataset:
inputs = inputs.cuda()
targets = targets.cuda()
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
accs.append(accuracy(outputs, targets)[0].item())
# run epoch training...
# at every epoch, evaluate low_rank metrics
print(f"Epoch {epoch} | Loss {np.mean(accs)}")
metrics.evaluate()
return metrics
hyper_parameters = HyperParameters(lr=True, weight_decay=True)
final_hp = optimize(epoch_trainer=epoch_trainer,
hyper_parameters=hyper_parameters)
final_hyper_parameters_dict = final_hp.final()
# do your final training will optimized hyper parameters
epoch_trainer(final_hyper_parameters_dict, epochs=range(250))
def accuracy(outputs, targets, topk=(1,)):
with torch.no_grad():
maxk = max(topk)
batch_size = targets.size(0)
_, pred = outputs.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(targets.contiguous().view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous(
).view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == "__main__":
logger = init_logger(Path('logs'))
main()
| 37.356436
| 78
| 0.666578
|
6528b23649338721a5302ac7c4e4eac3dafd00da
| 2,674
|
py
|
Python
|
tests/pyquil_algorithms.py
|
myQLM/myqlm-interop
|
9d77cb7c719f82be05d9f88493522940b8142124
|
[
"Apache-2.0"
] | 5
|
2020-09-09T09:44:31.000Z
|
2021-07-02T09:49:21.000Z
|
tests/pyquil_algorithms.py
|
myQLM/myqlm-interop
|
9d77cb7c719f82be05d9f88493522940b8142124
|
[
"Apache-2.0"
] | null | null | null |
tests/pyquil_algorithms.py
|
myQLM/myqlm-interop
|
9d77cb7c719f82be05d9f88493522940b8142124
|
[
"Apache-2.0"
] | 3
|
2020-07-10T17:51:47.000Z
|
2021-04-13T16:33:44.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import sys
import pytest
from collections import Counter
def compare_results(expected, result, aggregate=True):
if aggregate:
expected_arr = [entry.state for entry in expected.raw_data]
result_arr = [entry.state for entry in expected.raw_data]
expected_counts = Counter(expected_arr)
result_counts = Counter(result_arr)
else:
expected_counts = {entry.state:entry.probability for entry in
expected.raw_data}
result_counts = {entry.state:entry.probability for entry in
result.raw_data}
distance = {}
for state in expected_counts.keys():
if state in result_counts.keys():
distance[state] = abs(expected_counts[state] - result_counts[state])
else:
distance[state] = expected_counts[state]
for state in result_counts.keys():
if state not in expected_counts.keys():
distance[state] = result_counts[state]
return distance
def analyze_distance(distance):
import statistics
return statistics.median(list(distance.values()))
if __name__=="__main__":
if sys.version_info < (3, 7):
pytest.skip("python version < 3.7: skipping pyquil_binder tests", allow_module_level=True)
from qat.interop.pyquil.algorithms import run_simon
from qat.linalg import LinAlg
from qat.mps import MPS
from qat.interop.pyquil.providers import generate_qlm_result
from pyquil.api import QVMConnection
qvm = QVMConnection(endpoint="http://127.0.0.1:15011")
linalg = LinAlg()
mps = MPS(lnnize=True)
qvm_res = run_simon(qvm, trials=10024)
simon_res = {entry.state:entry.probability for entry in generate_qlm_result(qvm_res).raw_data}
print(simon_res)
print(max(simon_res.values()))
| 33.848101
| 98
| 0.698205
|
22e77b272f6ea6aaa771a9c4cca37612ab23834a
| 2,463
|
py
|
Python
|
ucr/core/architecture/neck/craft_fpn.py
|
DocYard-ai/UCR
|
7618aa336f56e71d9fd8cdc2d591e3d138e3dc68
|
[
"Apache-2.0"
] | 10
|
2021-04-06T15:57:20.000Z
|
2021-11-14T23:00:13.000Z
|
ucr/core/architecture/neck/craft_fpn.py
|
felixdittrich92/UCR
|
7618aa336f56e71d9fd8cdc2d591e3d138e3dc68
|
[
"Apache-2.0"
] | 5
|
2021-04-22T10:28:17.000Z
|
2022-03-02T07:47:53.000Z
|
ucr/core/architecture/neck/craft_fpn.py
|
felixdittrich92/UCR
|
7618aa336f56e71d9fd8cdc2d591e3d138e3dc68
|
[
"Apache-2.0"
] | 2
|
2021-04-30T10:27:51.000Z
|
2021-08-02T15:12:10.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
class CRAFTFPN(nn.Module):
def __init__(self, in_channels):
super(CRAFTFPN, self).__init__()
""" U network """
self.upconv1 = double_conv(1024, 512, 256)
self.upconv2 = double_conv(512, 256, 128)
self.upconv3 = double_conv(256, 128, 64)
self.upconv4 = double_conv(128, 64, 32)
init_weights(self.upconv1.modules())
init_weights(self.upconv2.modules())
init_weights(self.upconv3.modules())
init_weights(self.upconv4.modules())
self.out_channels = None
def forward(self, x):
""" Base network """
sources = x
""" U network """
y = torch.cat([sources[0], sources[1]], dim=1)
y = self.upconv1(y)
y = F.interpolate(
y, size=sources[2].size()[2:], mode="bilinear", align_corners=False
)
y = torch.cat([y, sources[2]], dim=1)
y = self.upconv2(y)
y = F.interpolate(
y, size=sources[3].size()[2:], mode="bilinear", align_corners=False
)
y = torch.cat([y, sources[3]], dim=1)
y = self.upconv3(y)
y = F.interpolate(
y, size=sources[4].size()[2:], mode="bilinear", align_corners=False
)
y = torch.cat([y, sources[4]], dim=1)
feature = self.upconv4(y)
return feature
class double_conv(nn.Module):
def __init__(self, in_ch, mid_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch + mid_ch, mid_ch, kernel_size=1),
nn.BatchNorm2d(mid_ch),
nn.ReLU(inplace=True),
nn.Conv2d(mid_ch, out_ch, kernel_size=3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
)
def forward(self, x):
x = self.conv(x)
return x
def init_weights(modules):
for m in modules:
if isinstance(m, nn.Conv2d):
init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
| 28.976471
| 79
| 0.566788
|
862468a2902b8b4459a357c84fbd337084831be0
| 13,188
|
py
|
Python
|
presalytics/lib/tools/ooxml_tools.py
|
presalytics/python-client
|
5d80b78562126feeeb49af4738e2c1aed12dce3a
|
[
"MIT"
] | 4
|
2020-02-21T16:30:46.000Z
|
2021-01-12T12:22:03.000Z
|
presalytics/lib/tools/ooxml_tools.py
|
presalytics/python-client
|
5d80b78562126feeeb49af4738e2c1aed12dce3a
|
[
"MIT"
] | 4
|
2019-12-28T19:30:08.000Z
|
2020-03-31T19:27:45.000Z
|
presalytics/lib/tools/ooxml_tools.py
|
presalytics/python-client
|
5d80b78562126feeeb49af4738e2c1aed12dce3a
|
[
"MIT"
] | null | null | null |
import datetime
import typing
import logging
import presalytics
import presalytics.story.outline
import presalytics.client.api
import presalytics.lib.widgets.ooxml
import presalytics.lib.themes.ooxml
if typing.TYPE_CHECKING:
from presalytics.client.presalytics_story import Story
from presalytics.client.presalytics_ooxml_automation import Document
from presalytics.story.components import ThemeBase
from presalytics.story.outline import StoryOutline
logger = logging.getLogger(__name__)
def add_ooxml_document_to_story(story_id, new_document_filepath, replace_id=None, username=None, password=None):
"""
Utility method for asscoiating a new Document in the Presalytics API Ooxml Automation service
with a Story.
Parameters
----------
story_id: str
The Presalytics API Story service Id for for the story
new_document_filepath str
The local filepath to the document tha you want to associate with the story
replace_id: str, option
If you want to replace a document, this the documents Ooxml Automation service Id
Passing this value will also update the references to Ooxml Automation
service Document object in the Story Outline. This is a good option if you have
not made significant changes to the new version of the document. Widgets may
fail to render if more than minor changes are made to the object tree.
username: str, option
Presalytics API username. Defaults to workspace username.
password: str, option
Presalytics API password.
"""
client = presalytics.client.api.Client(username=username, password=password)
if story_id == "empty" or not story_id:
message = "The story_id must not be empty"
raise presalytics.lib.exceptions.InvalidArgumentException(message)
replace = True if replace_id else False
story = client.story.story_id_file_post(story_id, file=new_document_filepath, replace_existing=replace, obsolete_id=replace_id)
def create_story_from_ooxml_file(filename: str, client_info={}) -> 'Story':
"""
Utility Method for building stories into the Presalytics API directly from a Presentation or Spreadsheet file.
Parameters
----------
filename : str
A string contain the local path to a presenation or spreadsheet object.
client_info : dict, optional
A dictionary containing arguments that will be unpacked and passed to a `presalytics.client.api.Client` object on intialization.
This dictionary can include the `token`, `cache_tokens`, `delegate_login` values. See `presalytics.client.api.Client` for more information.
Returns:
----------
A `presalytics.client.presalytics_story.models.story.Story` containing information about the Story object in the Presalytics API
"""
story: 'Story'
logger.info("Starting presalytics tool: create_story_from_ooxml_file")
logger.info("Intializing presalytics client.")
client = presalytics.Client(**client_info)
logger.info("Sending file to presalytics server for document processing and base story creation")
story = client.story.story_post_file(file=filename)
logger.info("Creating local instances of file widgets")
outline = presalytics.StoryOutline.load(story.outline)
for i in range(0, len(outline.pages)):
page = outline.pages[i]
for j in range(0, len(page.widgets)):
widget = page.widgets[j]
logger.info('Creating OoxmlFileWidget with name "{}"'.format(widget.name))
inst = presalytics.OoxmlFileWidget.deserialize(widget, **client_info)
presalytics.COMPONENTS.register(inst)
logger.info('Rewriting outline with widget: "{}"'.format(widget.name))
outline.pages[i].widgets[j] = inst.serialize()
story.outline = outline.dump()
return story
def create_outline_from_ooxml_document(story_api: 'Story',
ooxml_document: 'Document',
title: str = None,
description: str = None,
themes: typing.Sequence['ThemeBase'] = None,
client_info={}):
"""
Utility Method for generating a story from a presenation or spreadsheet file
This method encapsulates a standard workflow for interating the the Presalytics
API into a single function. This method takes known `presalytics.client.presalytics_ooxml_automation.models.document.Document`
with a known `presalytics.client.presalytics_story.models.story.Story`. This method returns a `presalytics.story.outline.StoryOutline`
that can be used to replace with existing `presalytics.story.outline.StoryOutline` on the story if the user warrants it.
Parameters
----------
story_api : presalytics.client.presalytics_story.models.story.Story
The represenation for the Presalytics API instance that will recieve a new associate with the **ooxml_document**
ooxml_document : presalytics.client.presalytics_ooxml_automation.models.document.Document
the new ooxml_document will be associated with the story. The outline will be generated from this object
title : str, optional
If not provided, the story will take on the title of the `presalytics.client.presalytics_ooxml_automation.models.document.Document` object
description : str, optional
If not provided, the story description will initialize as the empty string
themes : list of presalytics.story.components.ThemeBase, optional
A list of the themes to added to the returned `presalytics.story.outline.StoryOutline`
client_info : dict, optional
A dictionary containing arguments that will be unpacked and passed to a `presalytics.client.api.Client` object on intialization.
This dictionary can include the `token`, `cache_tokens`, `delegate_login` values. See `presalytics.client.api.Client` for more information.
Returns
----------
A `presalytics.story.outline.StoryOutline` for the user to optionally serialize and append to the story in downstream operations
"""
info = presalytics.story.outline.Info(
revision=0,
date_created=datetime.datetime.utcnow().isoformat(),
date_modified=datetime.datetime.utcnow().isoformat(),
created_by=presalytics.CONFIG.get("USERNAME", ""),
modified_by=presalytics.CONFIG.get("USERNAME", ""),
revision_notes='Created by via "create_outline_from_ooxml_file" method'
)
if description:
_description = description
else:
_description = ""
try:
pages = create_pages_from_ooxml_document(story_api, ooxml_document, client_info=client_info)
except Exception as ex:
logger.error("Error adding pages to outline", exc_info=True)
pages = []
try:
if themes:
_themes = themes
else:
ooxml_id = pages[0].widgets[0].data["document_ooxml_id"]
_themes = [create_theme_from_ooxml_document(ooxml_id, client_info=client_info)]
except Exception:
logger.error("Unable to add theme to ooxml_story", exc_info=True)
_themes = []
if title:
_title = title
else:
_title = pages[0].widgets[0].name
outline = presalytics.story.outline.StoryOutline(
info=info,
pages=pages,
description=_description,
title=_title,
themes=_themes,
plugins=[]
)
return outline
def create_pages_from_ooxml_document(story: 'Story',
ooxml_document: 'Document',
client_info={}):
"""
Utility Method for building stories into the Presalytics API directly from a Presenation or Spreadsheet file.
Parameters
----------
story : presalytics.client.presalytics_story.models.story.Story
Representation for Presalytics API Story object
ooxml_document: presalytics.client.presalytics_ooxml_automation.models.document.Document
The ooxml_document on the story that you want to create pages from
client_info : dict, optional
A dictionary containing arguments that will be unpacked and passed to a `presalytics.client.api.Client` object on intialization.
This dictionary can include the `token`, `cache_tokens`, `delegate_login` values. See `presalytics.client.api.Client` for more information.
Returns:
----------
A list of `presalytics.story.outline.Page` objects representing the slides, pages and charts in the source document
"""
pages = []
order = []
pages_unordered = []
client = presalytics.Client(**client_info)
child_objects = client.ooxml_automation.documents_childobjects_get_id(ooxml_document.id)
document_type = client.ooxml_automation.documents_documenttype_typeid_get_type_id(ooxml_document.document_type_id)
if document_type.file_extension == "pptx":
slides_meta = [x for x in child_objects if x.object_type == "Slide.Slides"]
ep_map = presalytics.OoxmlEndpointMap.slide()
for slide in slides_meta:
try:
widget = presalytics.OoxmlFileWidget(
filename=ooxml_document.filename,
name=slide.entity_name,
endpoint_map=ep_map,
object_name=slide.entity_name,
object_ooxml_id=slide.entity_id,
document_ooxml_id=ooxml_document.id,
story_id=story.id,
client_info=client_info
)
widget_kind = "widget-page"
widget_name = slide.entity_name
widgets = [widget.serialize()]
page = presalytics.story.outline.Page(
kind=widget_kind,
name=widget_name,
widgets=widgets
)
this_slide_meta = client.ooxml_automation.slides_slides_get_id(slide.entity_id)
order.append(this_slide_meta.number -1)
pages_unordered.append(page)
except:
logger.error("Unable to add widget {0} to outline ooxml document {1}".format(slide.entity_name, ooxml_document.id))
# TODO: insert excel chart handling here
for j in range(0, len(order)):
idx = order.index(j)
pages.append(pages_unordered[idx])
return pages
def create_theme_from_ooxml_document(document_id: str, client_info={}):
"""
Creates a `presalytics.story.outline.Theme` object from an Presalytics API ooxml_document object
Parameters:
----------
document_id: str
A string containing a uuid that corresponds to a document in the Ooxml Automation service of the Presalytics API
client_info : dict, optional
A dictionary containing arguments that will be unpacked and passed to a `presalytics.client.api.Client` object on intialization.
This dictionary can include the `token`, `cache_tokens`, `delegate_login` values. See `presalytics.client.api.Client` for more information.
Returns:
----------
A `presalytics.story.outline.Theme` object with formats extracted from the ooxml_document
"""
client = presalytics.Client(**client_info)
child_objects = client.ooxml_automation.documents_childobjects_get_id(document_id)
themes = [x for x in child_objects if x.object_type == "Theme.Themes"]
if len(themes) > 1:
slide_no = None
for theme_data in themes:
theme_info = client.ooxml_automation.theme_themes_get_id(theme_data.entity_id)
parent_slide = client.ooxml_automation.slides_slides_get_id(theme_info.slide_id)
if slide_no is None:
slide_no = parent_slide.number
theme_meta = theme_info
else:
if parent_slide.number < slide_no:
theme_meta = theme_info
if parent_slide.number == 0:
break
else:
theme_meta = client.ooxml_automation.theme_themes_get_id(themes[0].entity_id)
theme = presalytics.lib.themes.ooxml.OoxmlTheme(
theme_meta.name,
theme_meta.id,
client_info=client_info
)
return theme.serialize().to_dict()
def get_mime_type_from_filename(client: presalytics.Client, filename) -> typing.Optional[str]:
"""
Determines the mimetype from a file's type extension
Parameters:
----------
client : presalytics.client.api.Client
A client object for making api calls
filename : str
A filename against which the method can lookup mimetypes
Returns:
----------
A string containing a mimetype to attach to file uploads
"""
doc_types = client.ooxml_automation.documents_documenttype_get()
file_extension = filename.split(".")[-1]
try:
return next(x.mime_type for x in doc_types if x.file_extension == file_extension)
except StopIteration:
return None
| 42.405145
| 148
| 0.676752
|
778e25cefe986106444c960ba91e891457ec86a5
| 655
|
py
|
Python
|
tests/node/test_softsign.py
|
gglin001/onnx_jax
|
08e2a1181250db48f4436f6430903fc895a3a1d6
|
[
"Apache-2.0"
] | 9
|
2021-04-12T02:37:14.000Z
|
2022-03-28T23:31:40.000Z
|
tests/node/test_softsign.py
|
gglin001/onnx-jax
|
08e2a1181250db48f4436f6430903fc895a3a1d6
|
[
"Apache-2.0"
] | null | null | null |
tests/node/test_softsign.py
|
gglin001/onnx-jax
|
08e2a1181250db48f4436f6430903fc895a3a1d6
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import onnx
from tests.tools import expect
class Softsign:
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Softsign',
inputs=['x'],
outputs=['y'],
)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.array([-0.5, 0, 0.5]).astype(np.float32)
expect(node, inputs=[x], outputs=[y], name='test_softsign_example')
x = np.random.randn(3, 4, 5).astype(np.float32)
y = x / (1 + np.abs(x))
expect(node, inputs=[x], outputs=[y], name='test_softsign')
if __name__ == '__main__':
Softsign.export()
| 24.259259
| 75
| 0.546565
|
5c04973bd78a53d1fd0b81f3c91ccb08213f3f9f
| 1,096
|
py
|
Python
|
setup.py
|
cscutcher/snapper_systemd_boot
|
0a86e6c089c2ef7f39e45a191cc91c960c6b69b5
|
[
"Apache-2.0",
"MIT"
] | 8
|
2019-04-25T03:01:25.000Z
|
2022-03-27T12:25:05.000Z
|
setup.py
|
cscutcher/snapper_systemd_boot
|
0a86e6c089c2ef7f39e45a191cc91c960c6b69b5
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
setup.py
|
cscutcher/snapper_systemd_boot
|
0a86e6c089c2ef7f39e45a191cc91c960c6b69b5
|
[
"Apache-2.0",
"MIT"
] | 3
|
2021-12-06T12:54:44.000Z
|
2022-02-10T19:29:18.000Z
|
from setuptools import setup, find_packages
setup(
name="snapper_systemd_boot",
description="Generate systemd-boot entries from snapper btrfs snapshots.",
version="0.1.0-prealpha1",
packages=find_packages(),
install_requires=[
"reprutils",
"argh",
"sh",
],
extras_require={
"dev": ["pytest"],
},
entry_points={
"console_scripts": [
"snapper-systemd-boot = snapper_systemd_boot.cli:main",
]
},
python_requires=">=3.6",
license="Apache-2.0",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"License :: OSI Approved :: Apache Software License",
"Environment :: Console",
"Intended Audience :: Developers",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Topic :: System :: Archiving :: Backup",
"Topic :: System :: Systems Administration",
],
keywords="systemd-boot snapper btrfs",
)
| 30.444444
| 78
| 0.583942
|
cf2fb4fd93a921bf43b393d6a5a6df5ec1343484
| 659
|
py
|
Python
|
hardware/opentrons_hardware/drivers/can_bus/__init__.py
|
anuwrag/opentrons
|
28c8d76a19e367c6bd38f5290faaa32abf378715
|
[
"Apache-2.0"
] | 2
|
2015-11-10T17:49:51.000Z
|
2016-01-15T04:43:37.000Z
|
hardware/opentrons_hardware/drivers/can_bus/__init__.py
|
anuwrag/opentrons
|
28c8d76a19e367c6bd38f5290faaa32abf378715
|
[
"Apache-2.0"
] | null | null | null |
hardware/opentrons_hardware/drivers/can_bus/__init__.py
|
anuwrag/opentrons
|
28c8d76a19e367c6bd38f5290faaa32abf378715
|
[
"Apache-2.0"
] | null | null | null |
"""Can bus drivers package."""
from .driver import CanDriver
from .can_messenger import CanMessenger, WaitableCallback
from opentrons_hardware.firmware_bindings.message import CanMessage
from opentrons_hardware.firmware_bindings.arbitration_id import (
ArbitrationId,
ArbitrationIdParts,
)
from opentrons_hardware.firmware_bindings.constants import (
NodeId,
FunctionCode,
MessageId,
)
from .settings import DriverSettings
__all__ = [
"CanMessage",
"CanDriver",
"ArbitrationId",
"NodeId",
"FunctionCode",
"MessageId",
"ArbitrationIdParts",
"CanMessenger",
"DriverSettings",
"WaitableCallback",
]
| 21.966667
| 67
| 0.738998
|
c1472854c47858e22cd1ec64e55dbb3463499dd1
| 1,061
|
py
|
Python
|
share/lib/python/neuron/rxd/options.py
|
shhong/nrn
|
0d64e94330c6072529e31033d579b270f742454e
|
[
"BSD-3-Clause"
] | null | null | null |
share/lib/python/neuron/rxd/options.py
|
shhong/nrn
|
0d64e94330c6072529e31033d579b270f742454e
|
[
"BSD-3-Clause"
] | 1
|
2022-03-25T13:30:45.000Z
|
2022-03-25T13:30:45.000Z
|
share/lib/python/neuron/rxd/options.py
|
shhong/nrn
|
0d64e94330c6072529e31033d579b270f742454e
|
[
"BSD-3-Clause"
] | 1
|
2018-12-18T13:52:16.000Z
|
2018-12-18T13:52:16.000Z
|
from .rxdException import RxDException
import re
#TODO: This option is not currently observed
use_reaction_contribution_to_jacobian = True
concentration_nodes_3d = "surface"
# how far inside must a voxel be to be consider inside
# the value is necessary to account for floating point precision
ics_distance_threshold = -1e-12
# resolution (relative to dx) of sampling points use to determine surface
# volume fractions.
ics_partial_volume_resolution = 2
# resolution (relative to dx) of sampling points use to determine surface
# areas of surface voxels.
ics_partial_surface_resolution = 1
# the number of electrophysiology fixed steps per rxd step
# WARNING: setting this to anything other than 1 is probably a very bad
# idea, numerically speaking, at least for now
fixed_step_factor = 1
class _OverrideLockouts:
def __init__(self):
self._extracellular = True
@property
def extracellular(self):
return self._extracellular
@extracellular.setter
def extracellular(self, val):
self._extracellular = val
enable = _OverrideLockouts()
| 27.921053
| 73
| 0.789821
|
8d35cdc7cdc8a4cbe5898a04cbf45731dd5b6360
| 2,262
|
py
|
Python
|
retinopathy_test/models/run_prediction.py
|
vykozlov/retinopathy_test
|
f75491f0ebe5373f7401a83abe24abbf58c4bd64
|
[
"MIT"
] | null | null | null |
retinopathy_test/models/run_prediction.py
|
vykozlov/retinopathy_test
|
f75491f0ebe5373f7401a83abe24abbf58c4bd64
|
[
"MIT"
] | null | null | null |
retinopathy_test/models/run_prediction.py
|
vykozlov/retinopathy_test
|
f75491f0ebe5373f7401a83abe24abbf58c4bd64
|
[
"MIT"
] | null | null | null |
import os, argparse, sys
import tensorflow as tf
import tensorflow.contrib
import numpy as np
import glob
# The original freeze_graph function
# from tensorflow.python.tools.freeze_graph import freeze_graph
dir = os.path.dirname(os.path.realpath(__file__))
from tensorflow.contrib import predictor
import cv2
def load_image(addr, img_size):
# read an image and resize to (224, 224)
# cv2 load images as BGR, convert it to RGB
#record pipeline
img = cv2.imread(addr)
img = cv2.resize(img, (img_size, img_size), interpolation=cv2.INTER_CUBIC)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #COLOR_BGR2RGB
img = img.astype(np.float32)
#iterator pipeline
img = img / 255.0
img = img - 0.5
return img.reshape([1,256,256,3])
def predict_image(model_dir, image_file): #ki: this is the main function performing prediction
# f = glob.glob(model_dir + '*/')[0] #ki: get the model parameters from the first subdirectory in model_dir
# f = glob.glob(model_dir + '1540408813/')[0] #ki: directory 1540408813 contains model parameters for cpu only machine
f = glob.glob(os.path.join(model_dir,'1540408813'))[0]
print (f)
imgs = image_file.split(',')
predictor_fn = tf.contrib.predictor.from_saved_model(export_dir = f, signature_def_key='predict')#ki: create predictor function using the graph and model parameters
results={}
for imgfile in imgs:
img = load_image(imgfile, 256)
output = predictor_fn({'input': img})
print(imgfile, output)
results['%s'%imgfile]=output
return results
if __name__ == '__main__':
parser = argparse.ArgumentParser() #ki: create command line parser to get model parameters and image files.
parser.add_argument(
'--model_dir',
type=str,
default='./retinopathy_serve/',
# default='./',
help="""\
Path to classify_image_graph_def.pb\
"""
)
parser.add_argument(
'--image_file',
type=str,
default='dr4.tiff',
help='Absolute path to image file.'
)
args = parser.parse_args()
results = predict_image(args.model_dir, args.image_file)#ki: take the model parameter and input images and return predicted class (probabilities)
| 37.081967
| 168
| 0.683024
|
331195d8ee6fa39e2b682c860a6fe795656824e5
| 500
|
py
|
Python
|
User/migrations/0005_auto_20210705_1037.py
|
judeakinwale/SMS-backup
|
30636591b43bec94e7406f4c02fde402a5a2e38f
|
[
"MIT"
] | null | null | null |
User/migrations/0005_auto_20210705_1037.py
|
judeakinwale/SMS-backup
|
30636591b43bec94e7406f4c02fde402a5a2e38f
|
[
"MIT"
] | null | null | null |
User/migrations/0005_auto_20210705_1037.py
|
judeakinwale/SMS-backup
|
30636591b43bec94e7406f4c02fde402a5a2e38f
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2 on 2021-07-05 10:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('User', '0004_auto_20210705_1029'),
]
operations = [
migrations.RemoveField(
model_name='biodata',
name='matric_no',
),
migrations.AddField(
model_name='user',
name='matric_no',
field=models.CharField(default='', max_length=300),
),
]
| 21.73913
| 63
| 0.57
|
8f35fb83a01a9cb9110d5d081f9d0de5da9130ff
| 3,489
|
py
|
Python
|
floris/wake.py
|
WISDEM/FLORIS
|
0df374de787e4035e7081fa2a454d0eeabded2f8
|
[
"Apache-2.0"
] | 19
|
2017-11-07T11:15:47.000Z
|
2020-12-22T08:40:51.000Z
|
floris/wake.py
|
WISDEM/FLORIS
|
0df374de787e4035e7081fa2a454d0eeabded2f8
|
[
"Apache-2.0"
] | 28
|
2017-11-27T17:45:41.000Z
|
2019-05-08T22:01:10.000Z
|
floris/wake.py
|
WISDEM/FLORIS
|
0df374de787e4035e7081fa2a454d0eeabded2f8
|
[
"Apache-2.0"
] | 27
|
2017-11-16T19:53:20.000Z
|
2021-04-19T18:38:58.000Z
|
# Copyright 2017 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from . import wake_deflection
from . import wake_velocity
from . import wake_combination
class Wake():
"""
Wake is a container class for the various wake model objects. In particular,
Wake holds references to the velocity and deflection models as well as their
parameters.
inputs:
instance_dictionary: dict - the input dictionary;
it should have the following key-value pairs:
{
"description": str,
"properties": dict({
velocity_model: WakeVelocity
deflection_model: WakeDeflection
parameters: dict({
see WakeVelocity, WakeDeflection
})
}),
}
outputs:
self: Wake - an instantiated Wake object
"""
def __init__(self, instance_dictionary):
self.description = instance_dictionary["description"]
properties = instance_dictionary["properties"]
parameters = properties["parameters"]
self.velocity_models = {
"jensen": wake_velocity.Jensen(parameters),
"floris": wake_velocity.Floris(parameters),
"gauss": wake_velocity.Gauss(parameters),
"curl": wake_velocity.Curl(parameters)
}
self._velocity_model = self.velocity_models[properties["velocity_model"]]
self.deflection_models = {
"jimenez": wake_deflection.Jimenez(parameters),
"gauss_deflection": wake_deflection.Gauss(parameters),
"curl": wake_deflection.Curl(parameters)
}
self._deflection_model = self.deflection_models[properties["deflection_model"]]
self.combination_models = {
"fls": wake_combination.FLS(),
"sosfs": wake_combination.SOSFS()
}
self._combination_model = self.combination_models[properties["combination_model"]]
# Getters & Setters
@property
def velocity_model(self):
return self._velocity_model
@velocity_model.setter
def velocity_model(self, value):
self._velocity_model = self.velocity_models[value]
@property
def deflection_model(self):
return self._deflection_model
@deflection_model.setter
def deflection_model(self, value):
self._deflection_model = self.deflection_models[value]
@property
def combination_model(self):
return self._combination_model
@combination_model.setter
def combination_model(self, value):
self._combination_model = self.combination_models[value]
@property
def deflection_function(self):
return self._deflection_model.function
@property
def velocity_function(self):
return self._velocity_model.function
@property
def combination_function(self):
return self._combination_model.function
| 31.432432
| 90
| 0.656635
|
f79ebe83193b665d8b3e89be6a5363f09d2a83dd
| 904
|
py
|
Python
|
ACME/linear_problem/add_constraints.py
|
mauriziokovacic/ACME
|
2615b66dd4addfd5c03d9d91a24c7da414294308
|
[
"MIT"
] | 3
|
2019-10-23T23:10:55.000Z
|
2021-09-01T07:30:14.000Z
|
ACME/linear_problem/add_constraints.py
|
mauriziokovacic/ACME-Python
|
2615b66dd4addfd5c03d9d91a24c7da414294308
|
[
"MIT"
] | null | null | null |
ACME/linear_problem/add_constraints.py
|
mauriziokovacic/ACME-Python
|
2615b66dd4addfd5c03d9d91a24c7da414294308
|
[
"MIT"
] | 1
|
2020-07-11T11:35:43.000Z
|
2020-07-11T11:35:43.000Z
|
from ..utility.cat import *
from ..utility.issparse import *
from ..math.unitvec import *
def add_constraints(A, hard=None, soft=None):
"""
Adds hard and soft constraints to the given constraints matrix
Parameters
----------
A : Tensor or SparseTensor
the (N,M,) constraints matrix
hard : iterable
the hard constraints indices
soft : LongTensor
the (S,) indices tensor for the soft constraints
Returns
-------
Tensor
the (N+S,M,) constraints matrix
"""
M = A.clone()
if hard is not None:
S = unitvec(A.size(-1), hard, sparse=False, dtype=torch.float, device=A.device)
if issparse(A):
M = M.to_dense()
M[hard] = S
if soft is not None:
S = unitvec(A.size(-1), soft, sparse=True, dtype=torch.float, device=A.device)
M = cat((M, S), dim=-2)
return M
| 25.828571
| 87
| 0.581858
|
505753f2e1e7596e49511e02f53b2cb67aa11842
| 1,025
|
py
|
Python
|
manage.py
|
MahmoudRizk/twemo
|
51a5022c32f402a7fcc7c5a09b30f2fc5a15b60c
|
[
"MIT"
] | null | null | null |
manage.py
|
MahmoudRizk/twemo
|
51a5022c32f402a7fcc7c5a09b30f2fc5a15b60c
|
[
"MIT"
] | null | null | null |
manage.py
|
MahmoudRizk/twemo
|
51a5022c32f402a7fcc7c5a09b30f2fc5a15b60c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# twemo directory.
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, "twemo"))
execute_from_command_line(sys.argv)
| 33.064516
| 77
| 0.654634
|
83dc6587a25795d6879646e05cd154a428a8c31b
| 1,090
|
py
|
Python
|
examples/plotting/file/brewer.py
|
g-parki/bokeh
|
664ead5306bba64609e734d4105c8aa8cfb76d81
|
[
"BSD-3-Clause"
] | 1
|
2020-05-26T15:21:22.000Z
|
2020-05-26T15:21:22.000Z
|
examples/plotting/file/brewer.py
|
g-parki/bokeh
|
664ead5306bba64609e734d4105c8aa8cfb76d81
|
[
"BSD-3-Clause"
] | 1
|
2021-12-15T17:32:31.000Z
|
2021-12-21T18:11:05.000Z
|
examples/plotting/file/brewer.py
|
g-parki/bokeh
|
664ead5306bba64609e734d4105c8aa8cfb76d81
|
[
"BSD-3-Clause"
] | 1
|
2021-12-20T05:50:00.000Z
|
2021-12-20T05:50:00.000Z
|
''' A plot of randomly stacked area styled using the Brewer palette
from the `brewer` dictionary.
.. bokeh-example-metadata::
:apis: bokeh.palettes.brewer
:refs: :ref:`userguide_styling` > :ref:`userguide_styling_using_palettes`
:keywords: brewer, palettes, patches
'''
import numpy as np
import pandas as pd
from bokeh.palettes import brewer
from bokeh.plotting import figure, output_file, show
N = 20
cats = 10
df = pd.DataFrame(np.random.randint(10, 100, size=(N, cats))).add_prefix('y')
def stacked(df):
df_top = df.cumsum(axis=1)
df_bottom = df_top.shift(axis=1).fillna({'y0': 0})[::-1]
df_stack = pd.concat([df_bottom, df_top], ignore_index=True)
return df_stack
areas = stacked(df)
colors = brewer['Spectral'][areas.shape[1]]
x2 = np.hstack((df.index[::-1], df.index))
p = figure(x_range=(0, N-1), y_range=(0, 800))
p.grid.minor_grid_line_color = '#eeeeee'
p.patches([x2] * areas.shape[1], [areas[c].values for c in areas],
color=colors, alpha=0.8, line_color=None)
output_file('stacked_area.html', title='brewer.py example')
show(p)
| 27.25
| 77
| 0.695413
|
8ac721b06a5b2c3fb83b086c3e843a73f677ac93
| 6,470
|
py
|
Python
|
launch_BURN.py
|
Km3888/oyster
|
391ae4ba3fedc4354fa3688654211899a72465b0
|
[
"MIT"
] | null | null | null |
launch_BURN.py
|
Km3888/oyster
|
391ae4ba3fedc4354fa3688654211899a72465b0
|
[
"MIT"
] | 1
|
2020-04-01T16:35:43.000Z
|
2020-04-01T16:35:43.000Z
|
launch_BURN.py
|
Km3888/oyster
|
391ae4ba3fedc4354fa3688654211899a72465b0
|
[
"MIT"
] | null | null | null |
"""
Launcher for experiments with PEARL
"""
import os
import pathlib
import numpy as np
import click
import json
import torch
from rlkit.envs import ENVS
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.torch.sac.policies import TanhGaussianPolicy
from rlkit.torch.networks import FlattenMlp, MlpEncoder, RecurrentEncoder
from rlkit.torch.sac.sac import PEARLSoftActorCritic
from rlkit.torch.sac.burn_sac import BURNSoftActorCritic
from rlkit.torch.sac.agent import PEARLAgent
from rlkit.torch.sac.BURNAgent import BURNAgent
from rlkit.launchers.launcher_util import setup_logger
import rlkit.torch.pytorch_util as ptu
from configs.default import default_config
from configs.short_burn import short_config
def experiment(variant):
# create multi-task environment and sample tasks
env = NormalizedBoxEnv(ENVS[variant['env_name']](**variant['env_params']))
tasks = env.get_all_task_idx()
obs_dim = int(np.prod(env.observation_space.shape))
action_dim = int(np.prod(env.action_space.shape))
reward_dim = 1
# instantiate networks
latent_dim = variant['latent_size']
context_encoder_input_dim = 2 * obs_dim + action_dim + reward_dim if variant['algo_params']['use_next_obs_in_context'] else obs_dim + action_dim + reward_dim
context_encoder_output_dim = latent_dim * 2 if variant['algo_params']['use_information_bottleneck'] else latent_dim
net_size = variant['net_size']
recurrent = variant['algo_params']['recurrent']
encoder_model = RecurrentEncoder if recurrent else MlpEncoder
context_encoder = encoder_model(
hidden_sizes=[200, 200, 200],
input_size=context_encoder_input_dim,
output_size=context_encoder_output_dim,
)
#low Qs first and then high Qs
q_list=[
[FlattenMlp(
hidden_sizes=[net_size, net_size, net_size],
input_size=2*obs_dim + action_dim,
output_size=1,
),
FlattenMlp(
hidden_sizes=[net_size, net_size, net_size],
input_size=2*obs_dim + action_dim,
output_size=1,
)]
,[FlattenMlp(
hidden_sizes=[net_size, net_size, net_size],
input_size=obs_dim + action_dim + latent_dim,
output_size=1,
),
FlattenMlp(
hidden_sizes=[net_size, net_size, net_size],
input_size=obs_dim + action_dim + latent_dim,
output_size=1,
)]]
#low vf first and then high vf
vf_list = [FlattenMlp(
hidden_sizes=[net_size, net_size, net_size],
input_size=2*obs_dim,
output_size=1,
),FlattenMlp(
hidden_sizes=[net_size, net_size, net_size],
input_size=obs_dim + latent_dim,
output_size=1,
)]
#NOTE: Reduced number of hidden layers in h_policy from 3 to 2 (idea being it's not doing as much as the whole policy in PEARL)
h_policy = TanhGaussianPolicy(
hidden_sizes=[net_size, net_size],
obs_dim=obs_dim + latent_dim,
latent_dim=latent_dim,
action_dim=obs_dim,
)
#NOTE: Kept the 3 layers because fuck it it'll get tons of data
l_policy=TanhGaussianPolicy(
hidden_sizes=[net_size,net_size,net_size,net_size],
obs_dim=2*obs_dim,
latent_dim=0,
action_dim=action_dim,
)
#TODO Implement BernAgent
agent = BURNAgent(
latent_dim,
context_encoder,
h_policy,
l_policy,
c=2,
**variant['algo_params']
)
algorithm = BURNSoftActorCritic(
env=env,
train_tasks=list(tasks[:variant['n_train_tasks']]),
eval_tasks=list(tasks[-variant['n_eval_tasks']:]),
nets=[agent, q_list, vf_list],
latent_dim=latent_dim,
**variant['algo_params']
)
# optionally load pre-trained weights
#TODO Make sure weights are properly saved
if variant['path_to_weights'] is not None:
path = variant['path_to_weights']
context_encoder.load_state_dict(torch.load(os.path.join(path, 'context_encoder.pth')))
q_list[0][0].load_state_dict(torch.load(os.path.join(path, 'l_qf1.pth')))
q_list[0][1].load_state_dict(torch.load(os.path.join(path, 'l_qf2.pth')))
q_list[1][0].load_state_dict(torch.load(os.path.join(path, 'h_qf1.pth')))
q_list[1][1].load_state_dict(torch.load(os.path.join(path, 'h_qf2.pth')))
vf_list[0].load_state_dict(torch.load(os.path.join(path, 'l_vf.pth')))
vf_list[1].load_state_dict(torch.load(os.path.join(path, 'h_vf.pth')))
# TODO hacky, revisit after model refactor
algorithm.networks[-2].load_state_dict(torch.load(os.path.join(path, 'target_vf.pth')))
h_policy.load_state_dict(torch.load(os.path.join(path, 'h_policy.pth')))
l_policy.load_state_dict(torch.load(os.path.join(path, 'l_policy.pth')))
# optional GPU mode
ptu.set_gpu_mode(variant['util_params']['use_gpu'], variant['util_params']['gpu_id'])
if ptu.gpu_enabled():
algorithm.to()
# debugging triggers a lot of printing and logs to a debug directory
DEBUG = variant['util_params']['debug']
os.environ['DEBUG'] = str(int(DEBUG))
# create logging directory
# TODO support Docker
exp_id = 'debug' if DEBUG else None
experiment_log_dir = setup_logger(variant['env_name'], variant=variant, exp_id=exp_id, base_log_dir=variant['util_params']['base_log_dir'])
# optionally save eval trajectories as pkl files
if variant['algo_params']['dump_eval_paths']:
pickle_dir = experiment_log_dir + '/eval_trajectories'
pathlib.Path(pickle_dir).mkdir(parents=True, exist_ok=True)
# run the algorithm
algorithm.train()
def deep_update_dict(fr, to):
''' update dict of dicts with new values '''
# assume dicts have same keys
for k, v in fr.items():
if type(v) is dict:
deep_update_dict(v, to[k])
else:
to[k] = v
return to
@click.command()
@click.argument('config', default=None)
@click.option('--gpu', default=0)
@click.option('--docker', is_flag=True, default=False)
@click.option('--debug', is_flag=True, default=False)
def main(config, gpu, docker, debug):
variant = short_config
print(variant['algo_params'])
if config:
with open(os.path.join(config)) as f:
exp_params = json.load(f)
variant = deep_update_dict(exp_params, variant)
variant['util_params']['gpu_id'] = gpu
experiment(variant)
if __name__ == "__main__":
main()
| 35.944444
| 161
| 0.681607
|
be739ca5230ecfbeb3d8542ed80fb414d7a4ae49
| 8,655
|
py
|
Python
|
pygcn/ABC_gcn.py
|
d222nguy/project_602
|
0ce7a3ad0083b22977b558d4d2aceeca839df249
|
[
"MIT"
] | null | null | null |
pygcn/ABC_gcn.py
|
d222nguy/project_602
|
0ce7a3ad0083b22977b558d4d2aceeca839df249
|
[
"MIT"
] | null | null | null |
pygcn/ABC_gcn.py
|
d222nguy/project_602
|
0ce7a3ad0083b22977b558d4d2aceeca839df249
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy import optimize
from deap.benchmarks import schwefel
from abc import ABCMeta
from abc import abstractmethod
from six import add_metaclass
import matplotlib.pyplot as plt
from copy import deepcopy
import config
@add_metaclass(ABCMeta)
class ObjectiveFunction(object):
def __init__(self, name, dim, minf, maxf):
self.name = name
self.dim = dim
self.minf = minf
self.maxf = maxf
def sample(self):
return np.random.uniform(low = self.minf, high = self.maxf, size = self.dim)
def custom_sample(self):
return np.repeat(self.minf, repeats = self.dim) + np.random.uniform(low = 0, high = 1, size = self.dim) * np.repeat(self.maxf - self.minf, repeats = self.dim)
@abstractmethod
def evaluate(self, x):
pass
class Sphere(ObjectiveFunction):
def __init__(self, dim):
super(Sphere, self).__init__('Sphere', dim, -100.0, 100.0)
def evaluate(self, x):
return sum(np.power(x, 2))
@add_metaclass(ABCMeta)
class ArtificialBee(object):
TRIAL_INITIAL_DEFAULT_VALUE = 0
INITIAL_DEFAULT_PROBABILITY = 0.0
def __init__(self, obj_function):
self.pos = obj_function.custom_sample()
self.obj_function = obj_function
self.minf = obj_function.minf
self.maxf = obj_function.maxf
self.fitness = obj_function.evaluate(self.pos)
self.trial = ArtificialBee.TRIAL_INITIAL_DEFAULT_VALUE
self.prob = ArtificialBee.INITIAL_DEFAULT_PROBABILITY
def evaluate_boundaries(self, pos):
if (pos < self.minf).any() or (pos > self.maxf).any():
pos[pos > self.maxf] = self.maxf
pos[pos < self.minf] = self.minf
return pos
def update_bee(self, pos, fitness):
if fitness <= self.fitness:
print('Improved!')
self.pos = pos
self.fitness = fitness
self.trial = 0
else:
self.trial += 1
def reset_bee(self, max_trials):
if self.trial >= max_trials:
self.__reset_bee()
def __reset_bee(self):
self.pos = self.obj_function.custom_sample()
self.fitness = self.obj_function.evaluate(self.pos)
self.trial = ArtificialBee.TRIAL_INITIAL_DEFAULT_VALUE
self.prob = ArtificialBee.INITIAL_DEFAULT_PROBABILITY
class EmployeeBee(ArtificialBee):
def explore(self, max_trials):
#print('==========================================')
if self.trial <= max_trials:
component = np.random.choice(self.pos)
print('component = ', component)
print('self.pos = ', self.pos)
phi = np.random.uniform(low=-1, high=1, size = len(self.pos))
n_pos = self.pos + (self.pos - component) * phi
n_pos = self.evaluate_boundaries(n_pos)
n_fitness = self.obj_function.evaluate(n_pos)
self.update_bee(n_pos, n_fitness)
def get_fitness(self):
return 1/(1 + self.fitness) if self.fitness >= 0 else 1 + np.abs(self.fitness)
def compute_prob(self, max_fitness):
self.prob = self.get_fitness() / max_fitness
class OnLookerBee(ArtificialBee):
def onlook(self, best_food_sources, max_trials):
# for source in best_food_sources:
# print(source.pos)
candidate = np.random.choice(best_food_sources)
self.__exploit(candidate.pos, candidate.fitness, max_trials)
def __exploit(self, candidate, fitness, max_trials):
if self.trial <= max_trials:
component = np.random.choice(candidate)
phi = np.random.uniform(low=-1, high=1, size = len(candidate))
n_pos = candidate + (candidate - component) * phi
n_pos = self.evaluate_boundaries(n_pos)
n_fitness = self.obj_function.evaluate(n_pos)
if n_fitness <= fitness:
self.pos = n_pos
self.fitness = n_fitness
self.trial = 0
else:
self.trial += 1
class ABC(object):
def __init__(self, obj_function, colony_size = 30, n_iter = 50, max_trials = 10):
self.colony_size = colony_size
self.obj_function = obj_function
self.n_iter = n_iter
self.max_trials = max_trials
self.optimal_solution = None
self.optimality_tracking = []
def __reset_algorithm(self):
self.optimal_solution = None
self.optimality_tracking = []
def __update_optimality_tracking(self):
self.optimality_tracking.append(self.optimal_solution.fitness)
def __update_optimal_solution(self):
n_optimal_solution = min(self.onlooker_bees + self.employee_bees, key = lambda bee: bee.fitness)
if not self.optimal_solution:
self.optimal_solution = deepcopy(n_optimal_solution)
else:
if n_optimal_solution.fitness < self.optimal_solution.fitness:
self.optimal_solution = deepcopy(n_optimal_solution)
def __initialize_employees(self):
self.employee_bees = []
for itr in range(self.colony_size // 2):
self.employee_bees.append(EmployeeBee(self.obj_function))
def __initialize_onlookers(self):
self.onlooker_bees = []
for itr in range(self.colony_size // 2):
self.onlooker_bees.append(OnLookerBee(self.obj_function))
def __employee_bees_phase(self):
#print('================================')
#print(len(self.employee_bees))
for bee in self.employee_bees:
bee.explore(self.max_trials)
# map(lambda bee: bee.explore(self.max_trials), self.employee_bees)
def __calculate_probabilities(self):
sum_fitness = sum(map(lambda bee: bee.get_fitness(), self.employee_bees))
for bee in self.employee_bees:
bee.compute_prob(sum_fitness)
#map(lambda bee: bee.compute_prob(sum_fitness), self.employee_bees)
def __select_best_food_sources(self):
self.best_food_sources = list(filter (lambda bee: bee.prob > np.random.uniform(low = 0, high = 1), self.employee_bees))
while not self.best_food_sources:
self.best_food_sources = list(filter(lambda bee: bee.prob > np.random.uniform(low = 0, high = 1), self.employee_bees))
def __onlooker_bees_phase(self):
for bee in self.onlooker_bees:
bee.onlook(self.best_food_sources, self.max_trials)
# map(lambda bee: bee.onlook(self.best_food_sources, self.max_trials), self.onlooker_bees)
def __scout_bee_phase(self):
for bee in self.employee_bees + self.onlooker_bees:
bee.reset_bee(self.max_trials)
# map(lambda bee: bee.reset_bee(self.max_trials), self.onlooker_bees + self.employee_bees)
def optimize(self):
self.__reset_algorithm()
self.__initialize_employees()
self.__initialize_onlookers()
for itr in range(self.n_iter):
self.__employee_bees_phase()
self.__update_optimal_solution()
self.__calculate_probabilities()
self.__select_best_food_sources()
self.__onlooker_bees_phase()
self.__scout_bee_phase()
self.__update_optimal_solution()
self.__update_optimality_tracking()
print('Optimal solution: ', self.optimal_solution.pos)
print("iter: {} = cost: {}"
.format(itr, "%04.03e" % self.optimal_solution.fitness))
def get_objective(objective, dimension=30):
objectives = {'Sphere': Sphere(dimension)}
# 'Rastrigin': Rastrigin(dimension),
# 'Rosenbrock': Rosenbrock(dimension),
# 'Schwefel': Schwefel(dimension)}
return objectives[objective]
def simulate(obj_function, colony_size=30, n_iter=50,
max_trials=10, simulations=1):
itr = range(n_iter)
values = np.zeros(n_iter)
box_optimal = []
for _ in range(simulations):
optimizer = ABC(obj_function=get_objective(obj_function),
colony_size=colony_size, n_iter=n_iter,
max_trials=max_trials)
optimizer.optimize()
values += np.array(optimizer.optimality_tracking)
box_optimal.append(optimizer.optimal_solution.fitness)
print(optimizer.optimal_solution.pos)
values /= simulations
plt.plot(itr, values, lw=0.5, label=obj_function)
plt.legend(loc='upper right')
def main():
plt.figure(figsize=(10, 7))
print("Hello!")
simulate('Sphere')
plt.ticklabel_format(axis='y', style='sci', scilimits=(-2, 2))
plt.xticks(rotation=45)
plt.show()
if __name__ == '__main__':
main()
| 41.018957
| 166
| 0.640324
|
82694510539a1a46ec030f7d07157bd06002c820
| 18,761
|
py
|
Python
|
simulator/models/cifar10_models_v2.py
|
vlad-user/parallel-tempering
|
42ebecdea2a597e706382462dc90aab7e7ca098f
|
[
"MIT"
] | null | null | null |
simulator/models/cifar10_models_v2.py
|
vlad-user/parallel-tempering
|
42ebecdea2a597e706382462dc90aab7e7ca098f
|
[
"MIT"
] | null | null | null |
simulator/models/cifar10_models_v2.py
|
vlad-user/parallel-tempering
|
42ebecdea2a597e706382462dc90aab7e7ca098f
|
[
"MIT"
] | null | null | null |
import sys
import os
import tensorflow as tf
from simulator.graph.device_placer import _gpu_device_name
from simulator.simulator_utils import DTYPE
from simulator.models.helpers import DEFAULT_INITIALIZER
from simulator.models import utils
def lenet5_with_dropout_original(graph, verbose=False):
with graph.as_default():
with tf.device('gpu:0'):
is_train = tf.placeholder_with_default(False, shape=(), name='is_train')
with tf.name_scope('inputs'):
x = tf.placeholder(DTYPE, shape=(None, 32, 32, 3), name='x')
y = tf.placeholder(tf.int32, shape=(None), name='y')
keep_prob = tf.placeholder_with_default(input=1.0,
shape=(),
name='keep_prob')
with tf.name_scope('augmentation'):
augmented = utils.augment_images(x, is_train)
with tf.name_scope('C1-conv'):
conv1 = tf.layers.conv2d(augmented,
filters=6,
kernel_size=5,
strides=(1, 1),
padding='VALID',
activation=tf.nn.tanh,
kernel_initializer=DEFAULT_INITIALIZER)
if verbose:
print('C1-outshape:', conv1.get_shape().as_list())
with tf.variable_scope('S2-avg_pool'):
conv1 = utils.average_pooling_with_weights(value=conv1,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='VALID',
activation=tf.nn.tanh)
if verbose:
print('S2-outshape:', conv1.get_shape().as_list())
with tf.variable_scope('dropout1'):
conv1 = tf.nn.dropout(conv1, keep_prob)
with tf.variable_scope('C3-conv'):
conv2 = tf.layers.conv2d(conv1,
filters=16,
kernel_size=5,
padding='VALID',
activation=tf.nn.tanh,
kernel_initializer=DEFAULT_INITIALIZER)
if verbose:
print('C3-outshape:', conv2.get_shape().as_list())
with tf.variable_scope('S4-avg_pool'):
conv2 = utils.average_pooling_with_weights(value=conv2,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='VALID',
activation=tf.nn.tanh)
if verbose:
print('S4-outshape:', conv2.get_shape().as_list())
with tf.variable_scope('dropout2'):
conv2 = tf.nn.dropout(conv2, keep_prob)
with tf.variable_scope('C5-conv'):
conv3 = tf.layers.conv2d(conv2,
filters=120,
kernel_size=5,
padding='VALID',
activation=tf.nn.tanh,
kernel_initializer=DEFAULT_INITIALIZER)
if verbose:
print('C5-outshape:', conv3.get_shape().as_list())
conv3 = tf.nn.dropout(conv3, keep_prob)
with tf.name_scope('F6-fc'):
flatten = tf.layers.Flatten()(conv3)
fc1 = tf.layers.dense(inputs=flatten,
units=84,
activation=tf.nn.tanh,
kernel_initializer=DEFAULT_INITIALIZER,
name='fc2')
with tf.name_scope('dropout3'):
fc1 = tf.nn.dropout(fc1, keep_prob)
if verbose:
print('F6-outshape:', fc1.get_shape().as_list())
with tf.name_scope('logits'):
'''
logits = tf.layers.dense(inputs=fc1,
units=10,
kernel_initializer=DEFAULT_INITIALIZER)
'''
logits = utils.rbf_euclidean_layer(inputs=fc1, units=10)
return x, y, is_train, keep_prob, logits
def lenet5_with_dropout(graph):
with graph.as_default():
istrain = tf.placeholder(tf.bool, shape=(), name='is_train')
with tf.device('gpu:0'):
with tf.name_scope('inputs'):
x = tf.placeholder(DTYPE, shape=(None, 32, 32, 3))
y = tf.placeholder(tf.int32, shape=(None))
keep_prob = tf.placeholder(tf.float32, shape=(), name='keep_prob')
with tf.name_scope('conv1'):
conv1 = tf.layers.conv2d(x,
filters=6,
kernel_size=5,
strides=(1, 1),
padding='VALID',
activation=tf.nn.relu,
kernel_initializer=DEFAULT_INITIALIZER)
with tf.name_scope('max_pool1'):
conv1 = tf.nn.max_pool(value=conv1,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='VALID')
with tf.name_scope('dropout1'):
conv1 = tf.nn.dropout(conv1, keep_prob)
with tf.name_scope('conv2'):
conv2 = tf.layers.conv2d(x,
filters=16,
kernel_size=5,
padding='VALID',
activation=tf.nn.relu,
kernel_initializer=DEFAULT_INITIALIZER)
with tf.name_scope('max_pool2'):
conv2 = tf.nn.max_pool(value=conv2,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='VALID',
name='max_pool2')
with tf.name_scope('dropout2'):
conv2 = tf.nn.dropout(conv2, keep_prob)
with tf.name_scope('conv3'):
conv3 = tf.layers.conv2d(conv2,
filters=120,
kernel_size=5,
padding='VALID',
activation=tf.nn.relu,
kernel_initializer=DEFAULT_INITIALIZER)
with tf.name_scope('fc1'):
flattened = tf.layers.Flatten()(conv3)
flattened = tf.nn.dropout(flattened, keep_prob)
fc1 = tf.layers.dense(flattened, 84, activation=tf.nn.relu, kernel_initializer=DEFAULT_INITIALIZER)
fc1 = tf.nn.dropout(flattened, keep_prob)
with tf.name_scope('logits'):
logits = tf.layers.dense(inputs=fc1,
units=10,
activation=None,
kernel_initializer=DEFAULT_INITIALIZER)
return x, y, istrain, keep_prob, logits
def lenet5_with_dropout(graph):
with graph.as_default():
istrain = tf.placeholder(tf.bool, shape=(), name='is_train')
with tf.device('gpu:0'):
with tf.name_scope('inputs'):
x = tf.placeholder(DTYPE, shape=(None, 32, 32, 3))
y = tf.placeholder(tf.int32, shape=(None))
keep_prob = tf.placeholder_with_default(input=1.0,
shape=(),
name='keep_prob')
with tf.name_scope('conv1'):
conv1 = tf.layers.conv2d(x,
filters=6,
kernel_size=5,
strides=(1, 1),
padding='VALID',
activation=tf.nn.relu,
kernel_initializer=DEFAULT_INITIALIZER)
with tf.name_scope('max_pool1'):
conv1 = tf.nn.max_pool(value=conv1,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='VALID')
with tf.name_scope('dropout1'):
conv1 = tf.nn.dropout(conv1, keep_prob)
with tf.name_scope('conv2'):
conv2 = tf.layers.conv2d(x,
filters=16,
kernel_size=5,
padding='VALID',
activation=tf.nn.relu,
kernel_initializer=DEFAULT_INITIALIZER)
with tf.name_scope('max_pool2'):
conv2 = tf.nn.max_pool(value=conv2,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='VALID',
name='max_pool2')
with tf.name_scope('dropout2'):
conv2 = tf.nn.dropout(conv2, keep_prob)
with tf.name_scope('conv3'):
conv3 = tf.layers.conv2d(conv2,
filters=120,
kernel_size=5,
padding='VALID',
activation=tf.nn.relu,
kernel_initializer=DEFAULT_INITIALIZER)
with tf.name_scope('fc1'):
flattened = tf.layers.Flatten()(conv3)
fc1 = tf.layers.dense(flattened, 84, activation=tf.nn.relu, kernel_initializer=DEFAULT_INITIALIZER)
fc1 = tf.nn.dropout(flattened, keep_prob)
with tf.name_scope('logits'):
logits = tf.layers.dense(inputs=fc1,
units=10,
activation=None,
kernel_initializer=DEFAULT_INITIALIZER)
return x, y, istrain, keep_prob, logits
def lenet5_lr_with_const_dropout(graph, keep_prob=0.6):
with graph.as_default():
istrain = tf.placeholder(tf.bool, shape=(), name='is_train')
with tf.device('gpu:0'):
with tf.name_scope('inputs'):
x = tf.placeholder(DTYPE, shape=(None, 32, 32, 3))
y = tf.placeholder(tf.int32, shape=(None))
lr = tf.placeholder(tf.float32, shape=(), name='lr')
with tf.name_scope('conv1'):
conv1 = tf.layers.conv2d(x,
filters=6,
kernel_size=5,
strides=(1, 1),
padding='VALID',
activation=tf.nn.relu,
kernel_initializer=DEFAULT_INITIALIZER)
with tf.name_scope('max_pool1'):
conv1 = tf.nn.max_pool(value=conv1,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='VALID')
with tf.name_scope('dropout1'):
conv1 = tf.nn.dropout(conv1, keep_prob)
with tf.name_scope('conv2'):
conv2 = tf.layers.conv2d(x,
filters=16,
kernel_size=5,
padding='VALID',
activation=tf.nn.relu,
kernel_initializer=DEFAULT_INITIALIZER)
with tf.name_scope('max_pool2'):
conv2 = tf.nn.max_pool(value=conv2,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='VALID',
name='max_pool2')
with tf.name_scope('dropout2'):
conv2 = tf.nn.dropout(conv2, keep_prob)
with tf.name_scope('conv3'):
conv3 = tf.layers.conv2d(conv2,
filters=120,
kernel_size=5,
padding='VALID',
activation=tf.nn.relu,
kernel_initializer=DEFAULT_INITIALIZER)
with tf.name_scope('fc1'):
flattened = tf.layers.Flatten()(conv3)
fc1 = tf.layers.dense(flattened, 84, activation=tf.nn.relu, kernel_initializer=DEFAULT_INITIALIZER)
fc1 = tf.nn.dropout(flattened, keep_prob)
with tf.name_scope('logits'):
logits = tf.layers.dense(inputs=fc1,
units=10,
activation=None,
kernel_initializer=DEFAULT_INITIALIZER)
return x, y, istrain, lr, logits
def lenet5(graph,):
x, y, istrain, _, logits = lenet5_lr_with_const_dropout(graph, keep_prob=1.0)
return x, y, istrain, logits
def lenet5_l2_with_const_dropout(graph):
x, y, istrain, _, logits = lenet5_lr_with_const_dropout(graph, keep_prob=1.0)
return x, y, istrain, logits
def lenet5_with_input_noise(graph, verbose=False):
with graph.as_default():
with tf.device('gpu:0'):
is_train = tf.placeholder_with_default(False, shape=(), name='is_train')
with tf.name_scope('inputs'):
x = tf.placeholder(DTYPE, shape=(None, 32, 32, 3), name='x')
y = tf.placeholder(tf.int32, shape=(None), name='y')
stddev_ph = tf.placeholder_with_default(input=.001,
shape=(),
name='stddev')
noisy_inputs = utils.apply_gaussian_noise(x, is_train, stddev_ph)
with tf.name_scope('C1-conv'):
conv1 = tf.layers.conv2d(noisy_inputs,
filters=6,
kernel_size=5,
strides=(1, 1),
padding='VALID',
activation=tf.nn.relu,
kernel_initializer=DEFAULT_INITIALIZER)
with tf.name_scope('max_pool1'):
conv1 = tf.nn.max_pool(value=conv1,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='VALID')
with tf.variable_scope('C3-conv'):
conv2 = tf.layers.conv2d(conv1,
filters=16,
kernel_size=5,
padding='VALID',
activation=tf.nn.relu,
kernel_initializer=DEFAULT_INITIALIZER)
with tf.name_scope('max_pool2'):
conv2 = tf.nn.max_pool(value=conv2,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='VALID',
name='max_pool2')
with tf.variable_scope('C5-conv'):
conv3 = tf.layers.conv2d(conv2,
filters=120,
kernel_size=5,
padding='VALID',
activation=tf.nn.relu,
kernel_initializer=DEFAULT_INITIALIZER)
with tf.name_scope('F6-fc'):
flatten = tf.layers.Flatten()(conv3)
fc1 = tf.layers.dense(inputs=flatten,
units=84,
activation=tf.nn.relu,
kernel_initializer=DEFAULT_INITIALIZER,
name='fc2')
fc1 = tf.nn.dropout(fc1, keep_prob)
with tf.name_scope('logits'):
logits = tf.layers.dense(inputs=fc1,
units=10,
kernel_initializer=DEFAULT_INITIALIZER)
return x, y, is_train, stddev_ph, logits
def lenet5_with_input_noise_const_dropout(graph, keep_prob=0.7, verbose=False):
with graph.as_default():
with tf.device('gpu:0'):
is_train = tf.placeholder_with_default(False, shape=(), name='is_train')
with tf.name_scope('inputs'):
x = tf.placeholder(DTYPE, shape=(None, 32, 32, 3), name='x')
y = tf.placeholder(tf.int32, shape=(None), name='y')
stddev_ph = tf.placeholder_with_default(input=.001,
shape=(),
name='stddev')
noisy_inputs = utils.apply_gaussian_noise(x, is_train, stddev_ph)
with tf.name_scope('C1-conv'):
conv1 = tf.layers.conv2d(noisy_inputs,
filters=6,
kernel_size=5,
strides=(1, 1),
padding='VALID',
activation=tf.nn.relu,
kernel_initializer=DEFAULT_INITIALIZER)
with tf.name_scope('max_pool1'):
conv1 = tf.nn.max_pool(value=conv1,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='VALID')
conv1 = tf.nn.dropout(conv1, keep_prob)
with tf.variable_scope('C3-conv'):
conv2 = tf.layers.conv2d(conv1,
filters=16,
kernel_size=5,
padding='VALID',
activation=tf.nn.relu,
kernel_initializer=DEFAULT_INITIALIZER)
with tf.name_scope('max_pool2'):
conv2 = tf.nn.max_pool(value=conv2,
ksize=(1, 2, 2, 1),
strides=(1, 2, 2, 1),
padding='VALID',
name='max_pool2')
conv2 = tf.nn.dropout(conv2, keep_prob)
with tf.variable_scope('C5-conv'):
conv3 = tf.layers.conv2d(conv2,
filters=120,
kernel_size=5,
padding='VALID',
activation=tf.nn.relu,
kernel_initializer=DEFAULT_INITIALIZER)
with tf.name_scope('F6-fc'):
flatten = tf.layers.Flatten()(conv3)
fc1 = tf.layers.dense(inputs=flatten,
units=84,
activation=tf.nn.relu,
kernel_initializer=DEFAULT_INITIALIZER,
name='fc2')
fc1 = tf.nn.dropout(fc1, keep_prob)
with tf.name_scope('logits'):
logits = tf.layers.dense(inputs=fc1,
units=10,
kernel_initializer=DEFAULT_INITIALIZER)
return x, y, is_train, stddev_ph, logits
if __name__ == '__main__':
os.system('clear')
lenet5_with_dropout(tf.Graph(), verbose=True)
| 41.232967
| 107
| 0.459197
|
10a39aa4d9a637ca25faa9350e5ed69acf3d1425
| 362
|
py
|
Python
|
cpmd/webServer/publish.py
|
computePods/majorDomo
|
0d59f105592c375f16dcd7dcb6ec5bbbae25ce5f
|
[
"Apache-2.0"
] | null | null | null |
cpmd/webServer/publish.py
|
computePods/majorDomo
|
0d59f105592c375f16dcd7dcb6ec5bbbae25ce5f
|
[
"Apache-2.0"
] | null | null | null |
cpmd/webServer/publish.py
|
computePods/majorDomo
|
0d59f105592c375f16dcd7dcb6ec5bbbae25ce5f
|
[
"Apache-2.0"
] | null | null | null |
from typing import Optional, List
from fastapi import FastAPI
from pydantic import BaseModel
# WebServer interface definition....
class NatsMsg(BaseModel):
subject: str
message: List
def addPublishInterface(self) :
@self.app.put("/publish")
async def update_item(msg: NatsMsg):
await self.natsClient.sendMessage(msg)
return { "done": True }
| 20.111111
| 42
| 0.740331
|
8d1a9fd570630604ecbb41e2cac21e21583c3456
| 30,598
|
py
|
Python
|
grr/client/grr_response_client/client_actions/artifact_collector_test.py
|
magnologan/grr
|
06eeb071e9a925b34f67caf776c3330b39154850
|
[
"Apache-2.0"
] | null | null | null |
grr/client/grr_response_client/client_actions/artifact_collector_test.py
|
magnologan/grr
|
06eeb071e9a925b34f67caf776c3330b39154850
|
[
"Apache-2.0"
] | null | null | null |
grr/client/grr_response_client/client_actions/artifact_collector_test.py
|
magnologan/grr
|
06eeb071e9a925b34f67caf776c3330b39154850
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Tests the client artifactor collection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import glob
import io
import os
from absl import app
from future.builtins import filter
import mock
import psutil
from grr_response_client.client_actions import artifact_collector
from grr_response_core import config
from grr_response_core.lib import factory
from grr_response_core.lib import parser
from grr_response_core.lib import parsers
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import anomaly as rdf_anomaly
from grr_response_core.lib.rdfvalues import artifacts as rdf_artifact
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_action as rdf_client_action
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import client_network as rdf_client_network
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
from grr_response_core.lib.util import compatibility
from grr.test_lib import artifact_test_lib
from grr.test_lib import client_test_lib
from grr.test_lib import filesystem_test_lib
from grr.test_lib import osx_launchd_testdata
from grr.test_lib import test_lib
from grr.test_lib import vfs_test_lib
def GetRequest(source, artifact_name, knowledge_base=None):
expanded_source = rdf_artifact.ExpandedSource(base_source=source)
expanded_artifact = rdf_artifact.ExpandedArtifact(
name=artifact_name, sources=[expanded_source])
return rdf_artifact.ClientArtifactCollectorArgs(
artifacts=[expanded_artifact],
apply_parsers=False,
knowledge_base=knowledge_base)
class ArtifactCollectorTest(client_test_lib.EmptyActionTest):
"""Test the artifact collection on the client."""
def setUp(self):
super(ArtifactCollectorTest, self).setUp()
self.source_type = rdf_artifact.ArtifactSource.SourceType
self.test_artifacts_file = os.path.join(config.CONFIG["Test.data_dir"],
"artifacts", "test_artifacts.json")
def RunArtifactCollector(self, request):
result = self.RunAction(artifact_collector.ArtifactCollector, request)[0]
collected_artifact = result.collected_artifacts[0]
return collected_artifact
@artifact_test_lib.PatchCleanArtifactRegistry
def testCommandArtifact(self, registry):
"""Test the basic ExecuteCommand action."""
filesystem_test_lib.Command(
"/usr/bin/dpkg", args=["--list"], system="Linux")
registry.AddFileSource(self.test_artifacts_file)
artifact = registry.GetArtifact("TestCmdArtifact")
request = GetRequest(artifact.sources[0], artifact.name)
collected_artifact = self.RunArtifactCollector(request)
execute_response = collected_artifact.action_results[0].value
self.assertEqual(collected_artifact.name, "TestCmdArtifact")
self.assertGreater(execute_response.time_used, 0)
def testGRRClientActionGetHostname(self):
"""Test the GRR Client Action GetHostname."""
source = rdf_artifact.ArtifactSource(
type=self.source_type.GRR_CLIENT_ACTION,
attributes={"client_action": "GetHostname"})
request = GetRequest(source, "TestClientActionArtifact")
collected_artifact = self.RunArtifactCollector(request)
for action_result in collected_artifact.action_results:
value = action_result.value
self.assertTrue(value.string)
def testGRRClientActionListProcesses(self):
"""Test the GRR Client Action ListProcesses."""
def ProcessIter():
return iter([client_test_lib.MockWindowsProcess()])
source = rdf_artifact.ArtifactSource(
type=self.source_type.GRR_CLIENT_ACTION,
attributes={"client_action": "ListProcesses"})
request = GetRequest(source, "TestClientActionArtifact")
with utils.Stubber(psutil, "process_iter", ProcessIter):
collected_artifact = self.RunArtifactCollector(request)
value = collected_artifact.action_results[0].value
self.assertIsInstance(value, rdf_client.Process)
self.assertEqual(value.pid, 10)
def testGRRClientActionEnumerateInterfaces(self):
"""Test the GRR Client Action EnumerateInterfaces."""
source = rdf_artifact.ArtifactSource(
type=self.source_type.GRR_CLIENT_ACTION,
attributes={"client_action": "EnumerateInterfaces"})
request = GetRequest(source, "TestClientActionArtifact")
collected_artifact = self.RunArtifactCollector(request)
self.assertNotEmpty(collected_artifact.action_results)
for action_result in collected_artifact.action_results:
value = action_result.value
self.assertIsInstance(value, rdf_client_network.Interface)
def testGRRClientActionEnumerateUsers(self):
"""Test the GRR Client Action EnumerateUsers."""
def MockedOpen(requested_path, mode="rb"):
try:
fixture_path = os.path.join(self.base_path, "VFSFixture",
requested_path.lstrip("/"))
return compatibility.builtins.open.old_target(fixture_path, mode)
except IOError:
return compatibility.builtins.open.old_target(requested_path, mode)
source = rdf_artifact.ArtifactSource(
type=self.source_type.GRR_CLIENT_ACTION,
attributes={"client_action": "EnumerateUsers"})
request = GetRequest(source, "TestClientActionArtifact")
with utils.MultiStubber((compatibility.builtins, "open", MockedOpen),
(glob, "glob", lambda x: ["/var/log/wtmp"])):
result = self.RunAction(artifact_collector.ArtifactCollector, request)[0]
collected_artifact = result.collected_artifacts[0]
self.assertLen(collected_artifact.action_results, 4)
for action_result in collected_artifact.action_results:
value = action_result.value
self.assertIsInstance(value, rdf_client.User)
if value.username not in ["user1", "user2", "user3", "utuser"]:
self.fail("Unexpected user found: %s" % value.username)
# Test that the users were added to the knowledge base
self.assertLen(result.knowledge_base.users, 4)
for user in result.knowledge_base.users:
self.assertIn(user.username, ["user1", "user2", "user3", "utuser"])
def testGRRClientActionListNetworkConnections(self):
"""Test the GRR Client Action ListNetworkConnections."""
source = rdf_artifact.ArtifactSource(
type=self.source_type.GRR_CLIENT_ACTION,
attributes={"client_action": "ListNetworkConnections"})
request = GetRequest(source, "TestClientActionArtifact")
collected_artifact = self.RunArtifactCollector(request)
for action_result in collected_artifact.action_results:
value = action_result.value
self.assertIsInstance(value, rdf_client_network.NetworkConnection)
def testGRRClientActionStatFS(self):
"""Test the GRR Client Action StatFS."""
file_path = os.path.join(self.base_path, "numbers.txt")
source = rdf_artifact.ArtifactSource(
type=self.source_type.GRR_CLIENT_ACTION,
attributes={
"client_action": "StatFS",
"action_args": {
"path_list": [file_path]
}
})
request = GetRequest(source, "TestClientActionArtifact")
collected_artifact = self.RunArtifactCollector(request)
self.assertLen(collected_artifact.action_results, 1)
action_result = collected_artifact.action_results[0].value
self.assertIsInstance(action_result, rdf_client_fs.Volume)
def testRegistryValueArtifact(self):
"""Test the basic Registry Value collection."""
source = rdf_artifact.ArtifactSource(
type=self.source_type.REGISTRY_VALUE,
attributes={
"key_value_pairs": [{
"key": (r"HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet"
r"\Control\Session Manager"),
"value": "BootExecute"
}]
})
request = GetRequest(source, "FakeRegistryValue")
with vfs_test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.REGISTRY,
vfs_test_lib.FakeRegistryVFSHandler):
with vfs_test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.OS,
vfs_test_lib.FakeFullVFSHandler):
collected_artifact = self.RunArtifactCollector(request)
file_stat = collected_artifact.action_results[0].value
self.assertIsInstance(file_stat, rdf_client_fs.StatEntry)
urn = file_stat.pathspec.AFF4Path(self.SetupClient(0))
self.assertEndsWith(str(urn), "BootExecute")
def testRegistryKeyArtifact(self):
"""Test the basic Registry Key collection."""
source = rdf_artifact.ArtifactSource(
type=self.source_type.REGISTRY_KEY,
attributes={
"keys": [
r"HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet"
r"\Control\Session Manager\*"
],
})
request = GetRequest(source, "TestRegistryKey")
with vfs_test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.REGISTRY,
vfs_test_lib.FakeRegistryVFSHandler):
with vfs_test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.OS,
vfs_test_lib.FakeFullVFSHandler):
collected_artifact = self.RunArtifactCollector(request)
self.assertLen(collected_artifact.action_results, 1)
file_stat = collected_artifact.action_results[0].value
self.assertIsInstance(file_stat, rdf_client_fs.StatEntry)
def testRegistryNoKeysArtifact(self):
"""Test the basic Registry Key collection."""
source = rdf_artifact.ArtifactSource(
type=self.source_type.REGISTRY_KEY, attributes={
"keys": [],
})
request = GetRequest(source, "TestRegistryKey")
with vfs_test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.REGISTRY,
vfs_test_lib.FakeRegistryVFSHandler):
with vfs_test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.OS,
vfs_test_lib.FakeFullVFSHandler):
collected_artifact = self.RunArtifactCollector(request)
self.assertEmpty(collected_artifact.action_results)
def testDirectoryArtifact(self):
"""Test the source type `DIRECTORY`."""
paths = [
os.path.join(self.base_path, "%%Users.username%%*"),
os.path.join(self.base_path, "VFSFixture", "var", "*", "wtmp")
]
expected = [
os.path.join(self.base_path, "test.plist"),
os.path.join(self.base_path, "test_img.dd"),
os.path.join(self.base_path, "tests"),
os.path.join(self.base_path, "tests_long"),
os.path.join(self.base_path, "syslog"),
os.path.join(self.base_path, "syslog_compress.gz"),
os.path.join(self.base_path, "syslog_false.gz"),
os.path.join(self.base_path, "VFSFixture", "var", "log", "wtmp"),
]
source = rdf_artifact.ArtifactSource(
type=self.source_type.DIRECTORY, attributes={"paths": paths})
knowledge_base = rdf_client.KnowledgeBase(users=[
rdf_client.User(username="test"),
rdf_client.User(username="syslog")
])
request = GetRequest(source, "TestDirectory", knowledge_base)
collected_artifact = self.RunArtifactCollector(request)
self.assertNotEmpty(collected_artifact.action_results)
for file_stat in collected_artifact.action_results:
self.assertIsInstance(file_stat.value, rdf_client_fs.StatEntry)
self.assertIn(file_stat.value.pathspec.path, expected)
def testGrepArtifact(self):
"""Test the source type `GREP`."""
paths = [
os.path.join(self.base_path, "searching", "dpkg.log"),
os.path.join(self.base_path, "searching", "dpkg_false.log"),
os.path.join(self.base_path, "searching", "auth.log")
]
content_regex_list = [br"mydo....\.com"]
source = rdf_artifact.ArtifactSource(
type=self.source_type.GREP,
attributes={
"paths": paths,
"content_regex_list": content_regex_list
})
request = GetRequest(source, "TestGrep")
collected_artifact = self.RunArtifactCollector(request)
self.assertLen(collected_artifact.action_results, 1)
result = collected_artifact.action_results[0].value
self.assertIsInstance(result, rdf_client_fs.StatEntry)
self.assertEndsWith(result.pathspec.path, "auth.log")
@artifact_test_lib.PatchCleanArtifactRegistry
def testMultipleArtifacts(self, registry):
"""Test collecting multiple artifacts."""
filesystem_test_lib.Command(
"/usr/bin/dpkg", args=["--list"], system="Linux")
registry.AddFileSource(self.test_artifacts_file)
artifact = registry.GetArtifact("TestCmdArtifact")
ext_src = rdf_artifact.ExpandedSource(base_source=artifact.sources[0])
ext_art = rdf_artifact.ExpandedArtifact(
name=artifact.name, sources=[ext_src])
request = rdf_artifact.ClientArtifactCollectorArgs(
artifacts=[ext_art], apply_parsers=False)
request.artifacts.append(ext_art)
result = self.RunAction(artifact_collector.ArtifactCollector, request)[0]
collected_artifacts = list(result.collected_artifacts)
self.assertLen(collected_artifacts, 2)
self.assertEqual(collected_artifacts[0].name, "TestCmdArtifact")
self.assertEqual(collected_artifacts[1].name, "TestCmdArtifact")
execute_response_1 = collected_artifacts[0].action_results[0].value
execute_response_2 = collected_artifacts[1].action_results[0].value
self.assertGreater(execute_response_1.time_used, 0)
self.assertGreater(execute_response_2.time_used, 0)
@artifact_test_lib.PatchCleanArtifactRegistry
def testFilterRequestedArtifactResults(self, registry):
"""Test that only artifacts requested by the user are sent to the server."""
filesystem_test_lib.Command(
"/usr/bin/dpkg", args=["--list"], system="Linux")
registry.AddFileSource(self.test_artifacts_file)
artifact = registry.GetArtifact("TestCmdArtifact")
ext_src = rdf_artifact.ExpandedSource(base_source=artifact.sources[0])
ext_art = rdf_artifact.ExpandedArtifact(
name=artifact.name, sources=[ext_src])
request = rdf_artifact.ClientArtifactCollectorArgs(
artifacts=[ext_art], apply_parsers=False)
ext_art = rdf_artifact.ExpandedArtifact(
name=artifact.name, sources=[ext_src], requested_by_user=False)
request.artifacts.append(ext_art)
result = self.RunAction(artifact_collector.ArtifactCollector, request)[0]
collected_artifacts = list(result.collected_artifacts)
self.assertLen(collected_artifacts, 1)
self.assertEqual(collected_artifacts[0].name, "TestCmdArtifact")
execute_response = collected_artifacts[0].action_results[0].value
self.assertGreater(execute_response.time_used, 0)
@artifact_test_lib.PatchCleanArtifactRegistry
def testTSKRaiseValueError(self, registry):
"""Test Raise Error if path type is not OS."""
registry.AddFileSource(self.test_artifacts_file)
ext_src = rdf_artifact.ExpandedSource(
path_type=rdf_paths.PathSpec.PathType.TSK)
ext_art = rdf_artifact.ExpandedArtifact(
name="TestArtifact", sources=[ext_src])
request = rdf_artifact.ClientArtifactCollectorArgs(
artifacts=[ext_art], apply_parsers=False)
artifact = registry.GetArtifact("FakeFileArtifact")
ext_src.base_source = artifact.sources[0]
with self.assertRaises(ValueError):
self.RunAction(artifact_collector.ArtifactCollector, request)
artifact = registry.GetArtifact("BadPathspecArtifact")
ext_src.base_source = artifact.sources[0]
with self.assertRaises(ValueError):
self.RunAction(artifact_collector.ArtifactCollector, request)
@artifact_test_lib.PatchCleanArtifactRegistry
def testUnsupportedSourceType(self, registry):
"""Test that an unsupported source type raises an Error."""
registry.AddFileSource(self.test_artifacts_file)
artifact = registry.GetArtifact("TestAggregationArtifact")
ext_src = rdf_artifact.ExpandedSource(base_source=artifact.sources[0])
ext_art = rdf_artifact.ExpandedArtifact(
name=artifact.name, sources=[ext_src])
request = rdf_artifact.ClientArtifactCollectorArgs(
artifacts=[ext_art],
knowledge_base=None,
ignore_interpolation_errors=True,
apply_parsers=False)
# The type ARTIFACT_GROUP will raise an error because the group should have
# been expanded on the server.
with self.assertRaises(ValueError):
self.RunAction(artifact_collector.ArtifactCollector, request)
class OSXArtifactCollectorTests(client_test_lib.OSSpecificClientTests):
def setUp(self):
super(OSXArtifactCollectorTests, self).setUp()
# pylint: disable=g-import-not-at-top
from grr_response_client.client_actions import operating_system
from grr_response_client.client_actions.osx import osx
# pylint: enable=g-import-not-at-top
self.os = operating_system
self.osx = osx
self.source_type = rdf_artifact.ArtifactSource.SourceType
def EnumerateFilesystemsStub(self, args):
del args # Unused.
path = os.path.join(self.base_path, "osx_fsdata")
with io.open(path, "rb") as f:
filesystems = self.osx.client_utils_osx.ParseFileSystemsStruct(
self.osx.client_utils_osx.StatFS64Struct, 7, f.read())
for fs_struct in filesystems:
yield rdf_client_fs.Filesystem(
device=fs_struct.f_mntfromname,
mount_point=fs_struct.f_mntonname,
type=fs_struct.f_fstypename)
def OSXEnumerateRunningServicesStub(self, args):
del args # Unused.
job = osx_launchd_testdata.JOB[0]
yield rdf_client.OSXServiceInformation(
label=job.get("Label"),
program=job.get("Program"),
sessiontype=job.get("LimitLoadToSessionType"),
lastexitstatus=int(job["LastExitStatus"]),
timeout=int(job["TimeOut"]),
ondemand=bool(job["OnDemand"]))
def testGRRClientActionEnumerateFilesystems(self):
"""Test the GRR Client Action EnumerateFilesystems."""
source = rdf_artifact.ArtifactSource(
type=self.source_type.GRR_CLIENT_ACTION,
attributes={"client_action": "EnumerateFilesystems"})
request = GetRequest(source, "TestClientActionArtifact")
with utils.Stubber(self.os, "EnumerateFilesystemsFromClient",
self.EnumerateFilesystemsStub):
result = self.RunAction(artifact_collector.ArtifactCollector, request)[0]
collected_artifact = result.collected_artifacts[0]
self.assertLen(collected_artifact.action_results, 7)
res = collected_artifact.action_results[0].value
self.assertIsInstance(res, rdf_client_fs.Filesystem)
self.assertEqual(res.type, "hfs")
def testGRRClientActionOSXEnumerateRunningServices(self):
"""Test the GRR Client Action OSXEnumerateRunningServices."""
source = rdf_artifact.ArtifactSource(
type=self.source_type.GRR_CLIENT_ACTION,
attributes={"client_action": "OSXEnumerateRunningServices"})
request = GetRequest(source, "TestClientActionArtifact")
with utils.Stubber(self.os, "EnumerateRunningServices",
self.OSXEnumerateRunningServicesStub):
result = self.RunAction(artifact_collector.ArtifactCollector, request)[0]
collected_artifact = result.collected_artifacts[0]
self.assertLen(collected_artifact.action_results, 1)
res = collected_artifact.action_results[0].value
self.assertIsInstance(res, rdf_client.OSXServiceInformation)
self.assertEqual(res.label, "com.apple.FileSyncAgent.PHD")
class WindowsArtifactCollectorTests(client_test_lib.OSSpecificClientTests):
def setUp(self):
super(WindowsArtifactCollectorTests, self).setUp()
self.test_artifacts_file = os.path.join(config.CONFIG["Test.data_dir"],
"artifacts", "test_artifacts.json")
windows_mock = mock.MagicMock()
modules = {
("grr_response_client.client_actions"
".windows"):
windows_mock
}
module_patcher = mock.patch.dict("sys.modules", modules)
module_patcher.start()
self.addCleanup(module_patcher.stop)
self.windows = windows_mock.windows
@artifact_test_lib.PatchCleanArtifactRegistry
def testWMIArtifact(self, registry):
"""Test collecting a WMI artifact."""
registry.AddFileSource(self.test_artifacts_file)
artifact = registry.GetArtifact("WMIActiveScriptEventConsumer")
ext_src = rdf_artifact.ExpandedSource(base_source=artifact.sources[0])
ext_art = rdf_artifact.ExpandedArtifact(
name=artifact.name, sources=[ext_src])
request = rdf_artifact.ClientArtifactCollectorArgs(
artifacts=[ext_art],
knowledge_base=None,
ignore_interpolation_errors=True,
apply_parsers=False)
result = self.RunAction(artifact_collector.ArtifactCollector, request)[0]
self.assertIsInstance(result, rdf_artifact.ClientArtifactCollectorResult)
coll = artifact_collector.ArtifactCollector()
coll.knowledge_base = None
coll.ignore_interpolation_errors = True
expected = rdf_client_action.WMIRequest(
query="SELECT * FROM ActiveScriptEventConsumer",
base_object="winmgmts:\\root\\subscription")
for action, request in coll._ProcessWmiSource(ext_src):
self.assertEqual(request, expected)
self.assertEqual(action, self.windows.WmiQueryFromClient)
self.windows.WmiQueryFromClient.assert_called_with(request)
class TestEchoCmdParser(parser.CommandParser):
output_types = [rdf_client.SoftwarePackages]
supported_artifacts = ["TestEchoCmdArtifact"]
def Parse(self, cmd, args, stdout, stderr, return_val, time_taken,
knowledge_base):
del cmd, args, stderr, return_val, time_taken, knowledge_base # Unused
installed = rdf_client.SoftwarePackage.InstallState.INSTALLED
yield rdf_client.SoftwarePackages(packages=[
rdf_client.SoftwarePackage(
name="Package",
description=stdout,
version="1",
architecture="amd64",
install_state=installed)
])
class FakeFileParser(parser.FileParser):
output_types = [rdf_protodict.AttributedDict]
supported_artifacts = ["FakeFileArtifact"]
def Parse(self, stat, file_obj, knowledge_base):
del knowledge_base # Unused.
lines = set(l.strip() for l in file_obj.read().splitlines())
users = list(filter(None, lines))
filename = stat.pathspec.path
cfg = {"filename": filename, "users": users}
yield rdf_protodict.AttributedDict(**cfg)
class FakeFileMultiParser(parser.FileMultiParser):
output_types = [rdf_protodict.AttributedDict]
supported_artifacts = ["FakeFileArtifact2"]
def ParseMultiple(self, stats, file_objects, knowledge_base):
del knowledge_base # Unused.
lines = set()
for file_obj in file_objects:
lines.update(set(l.strip() for l in file_obj.read().splitlines()))
users = list(filter(None, lines))
for stat in stats:
filename = stat.pathspec.path
cfg = {"filename": filename, "users": users}
yield rdf_protodict.AttributedDict(**cfg)
class ParseResponsesTest(client_test_lib.EmptyActionTest):
@mock.patch.object(parsers, "SINGLE_RESPONSE_PARSER_FACTORY",
factory.Factory(parsers.SingleResponseParser))
def testCmdArtifactAction(self):
"""Test the actual client action with parsers."""
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register("Cmd", TestEchoCmdParser)
filesystem_test_lib.Command("/bin/echo", args=["1"])
source = rdf_artifact.ArtifactSource(
type=rdf_artifact.ArtifactSource.SourceType.COMMAND,
attributes={
"cmd": "/bin/echo",
"args": ["1"]
})
ext_src = rdf_artifact.ExpandedSource(base_source=source)
ext_art = rdf_artifact.ExpandedArtifact(
name="TestEchoCmdArtifact", sources=[ext_src])
request = rdf_artifact.ClientArtifactCollectorArgs(
artifacts=[ext_art],
knowledge_base=None,
ignore_interpolation_errors=True,
apply_parsers=True)
result = self.RunAction(artifact_collector.ArtifactCollector, request)[0]
self.assertIsInstance(result, rdf_artifact.ClientArtifactCollectorResult)
self.assertLen(result.collected_artifacts, 1)
res = result.collected_artifacts[0].action_results[0].value
self.assertIsInstance(res, rdf_client.SoftwarePackages)
self.assertEqual(res.packages[0].description, "1\n")
@mock.patch.object(parsers, "SINGLE_FILE_PARSER_FACTORY",
factory.Factory(parsers.SingleFileParser))
def testFakeFileArtifactAction(self):
"""Test collecting a file artifact and parsing the response."""
parsers.SINGLE_FILE_PARSER_FACTORY.Register("Fake", FakeFileParser)
file_path = os.path.join(self.base_path, "numbers.txt")
source = rdf_artifact.ArtifactSource(
type=rdf_artifact.ArtifactSource.SourceType.FILE,
attributes={"paths": [file_path]})
ext_src = rdf_artifact.ExpandedSource(base_source=source)
ext_art = rdf_artifact.ExpandedArtifact(
name="FakeFileArtifact", sources=[ext_src])
request = rdf_artifact.ClientArtifactCollectorArgs(
artifacts=[ext_art],
knowledge_base=None,
ignore_interpolation_errors=True,
apply_parsers=True)
result = self.RunAction(artifact_collector.ArtifactCollector, request)[0]
self.assertLen(result.collected_artifacts[0].action_results, 1)
res = result.collected_artifacts[0].action_results[0].value
self.assertIsInstance(res, rdf_protodict.AttributedDict)
self.assertLen(res.users, 1000)
self.assertEqual(res.filename, file_path)
@mock.patch.object(parsers, "MULTI_FILE_PARSER_FACTORY",
factory.Factory(parsers.MultiFileParser))
def testFakeFileArtifactActionProcessTogether(self):
"""Test collecting a file artifact and parsing the responses together."""
parsers.MULTI_FILE_PARSER_FACTORY.Register("Fake", FakeFileMultiParser)
file_path = os.path.join(self.base_path, "numbers.txt")
source = rdf_artifact.ArtifactSource(
type=rdf_artifact.ArtifactSource.SourceType.FILE,
attributes={"paths": [file_path]})
ext_src = rdf_artifact.ExpandedSource(base_source=source)
ext_art = rdf_artifact.ExpandedArtifact(
name="FakeFileArtifact2", sources=[ext_src])
request = rdf_artifact.ClientArtifactCollectorArgs(
artifacts=[ext_art],
knowledge_base=None,
ignore_interpolation_errors=True,
apply_parsers=True)
result = self.RunAction(artifact_collector.ArtifactCollector, request)[0]
self.assertLen(result.collected_artifacts[0].action_results, 1)
res = result.collected_artifacts[0].action_results[0].value
self.assertIsInstance(res, rdf_protodict.AttributedDict)
self.assertLen(res.users, 1000)
self.assertEqual(res.filename, file_path)
class KnowledgeBaseUpdateTest(client_test_lib.EmptyActionTest):
def InitializeRequest(self, initial_knowledge_base=None, provides=None):
"""Prepare ClientArtifactCollectorArgs."""
expanded_source = rdf_artifact.ExpandedSource()
expanded_artifact = rdf_artifact.ExpandedArtifact(
name="EmptyArtifact", sources=[expanded_source], provides=provides)
return rdf_artifact.ClientArtifactCollectorArgs(
artifacts=[expanded_artifact], knowledge_base=initial_knowledge_base)
def GetUpdatedKnowledgeBase(self):
"""Runs the artifact collector with the specified client action result."""
with utils.Stubber(artifact_collector.ArtifactCollector, "_ProcessSources",
self.GetActionResult):
result = self.RunAction(artifact_collector.ArtifactCollector,
self.request)[0]
return result.knowledge_base
def GetActionResult(self, *args):
del args # Unused.
yield [self.response]
def testAnomaly(self):
"""Test the knowledge base stays uninitialized if an anomaly is returned."""
self.request = self.InitializeRequest()
self.response = rdf_anomaly.Anomaly()
knowledge_base = self.GetUpdatedKnowledgeBase()
self.assertEqual(knowledge_base, rdf_client.KnowledgeBase())
def testAddUser(self):
"""Test a user response is added to the knowledge_base."""
self.request = self.InitializeRequest()
self.response = rdf_client.User(username="user1", homedir="/home/foo")
knowledge_base = self.GetUpdatedKnowledgeBase()
self.assertLen(knowledge_base.users, 1)
user = knowledge_base.users[0]
self.assertEqual(user.username, "user1")
self.assertEqual(user.homedir, "/home/foo")
def testUpdateUser(self):
"""Test a user response is updated if present in the knowledge base."""
user = rdf_client.User(username="user1")
initial_knowledge_base = rdf_client.KnowledgeBase(users=[user])
self.request = self.InitializeRequest(initial_knowledge_base)
self.response = rdf_client.User(username="user1", homedir="/home/foo")
knowledge_base = self.GetUpdatedKnowledgeBase()
self.assertLen(knowledge_base.users, 1)
user = knowledge_base.users[0]
self.assertEqual(user.username, "user1")
self.assertEqual(user.homedir, "/home/foo")
def testProvidesMultiple(self):
"""Test provides values are updated from a dictionary."""
provides = ["domain", "current_control_set"]
self.request = self.InitializeRequest(provides=provides)
self.response = rdf_protodict.Dict(
domain="MICROSOFT",
current_control_set="HKEY_LOCAL_MACHINE\\SYSTEM\\ControlSet001",
environ_systemdrive="C:")
knowledge_base = self.GetUpdatedKnowledgeBase()
self.assertEqual(knowledge_base.domain, "MICROSOFT")
self.assertEqual(knowledge_base.current_control_set,
"HKEY_LOCAL_MACHINE\\SYSTEM\\ControlSet001")
self.assertEqual(knowledge_base.environ_systemdrive, "")
def testProvidesSingleValue(self):
"""Test a single provides value is updated from registry data."""
provides = ["code_page"]
self.request = self.InitializeRequest(provides=provides)
self.response = rdf_client_fs.StatEntry(
registry_data=rdf_protodict.DataBlob(string="value1"))
knowledge_base = self.GetUpdatedKnowledgeBase()
self.assertEqual(knowledge_base.code_page, "value1")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| 40.68883
| 80
| 0.726943
|
5943ef5f212bc77fbfcf1eeacb37ada6136d107d
| 1,995
|
py
|
Python
|
pytests/bucket_collections/collection_ops_specs/dgm_load.py
|
bkumaran/TAF
|
27f39eb913fa89b55cdd88ee1c7ef0bb8c094407
|
[
"Apache-2.0"
] | 9
|
2019-02-19T05:55:00.000Z
|
2022-01-20T10:37:28.000Z
|
pytests/bucket_collections/collection_ops_specs/dgm_load.py
|
bkumaran/TAF
|
27f39eb913fa89b55cdd88ee1c7ef0bb8c094407
|
[
"Apache-2.0"
] | 2
|
2019-02-19T07:28:54.000Z
|
2019-06-18T11:22:29.000Z
|
pytests/bucket_collections/collection_ops_specs/dgm_load.py
|
bkumaran/TAF
|
27f39eb913fa89b55cdd88ee1c7ef0bb8c094407
|
[
"Apache-2.0"
] | 155
|
2018-11-13T14:57:07.000Z
|
2022-03-28T11:53:22.000Z
|
from collections_helper.collections_spec_constants import MetaCrudParams
spec = {
# Scope/Collection ops params
MetaCrudParams.COLLECTIONS_TO_FLUSH: 0,
MetaCrudParams.COLLECTIONS_TO_DROP: 0,
MetaCrudParams.SCOPES_TO_DROP: 0,
MetaCrudParams.SCOPES_TO_ADD_PER_BUCKET: 0,
MetaCrudParams.COLLECTIONS_TO_ADD_FOR_NEW_SCOPES: 0,
MetaCrudParams.COLLECTIONS_TO_ADD_PER_BUCKET: 0,
MetaCrudParams.BUCKET_CONSIDERED_FOR_OPS: "all",
MetaCrudParams.SCOPES_CONSIDERED_FOR_OPS: "all",
MetaCrudParams.COLLECTIONS_CONSIDERED_FOR_OPS: "all",
# Doc loading params
"doc_crud": {
MetaCrudParams.DocCrud.COMMON_DOC_KEY: "test_collections",
MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION: 5,
MetaCrudParams.DocCrud.READ_PERCENTAGE_PER_COLLECTION: 0,
MetaCrudParams.DocCrud.UPDATE_PERCENTAGE_PER_COLLECTION: 0,
MetaCrudParams.DocCrud.REPLACE_PERCENTAGE_PER_COLLECTION: 0,
MetaCrudParams.DocCrud.DELETE_PERCENTAGE_PER_COLLECTION: 0,
},
"subdoc_crud": {
MetaCrudParams.SubDocCrud.XATTR_TEST: False,
MetaCrudParams.SubDocCrud.INSERT_PER_COLLECTION: 0,
MetaCrudParams.SubDocCrud.UPSERT_PER_COLLECTION: 0,
MetaCrudParams.SubDocCrud.REMOVE_PER_COLLECTION: 0,
MetaCrudParams.SubDocCrud.LOOKUP_PER_COLLECTION: 0,
},
# Doc_loading task options
MetaCrudParams.DOC_TTL: 0,
MetaCrudParams.DURABILITY_LEVEL: "",
MetaCrudParams.SDK_TIMEOUT: 120, # Default is 60
MetaCrudParams.SDK_TIMEOUT_UNIT: "seconds",
MetaCrudParams.TARGET_VBUCKETS: "all",
MetaCrudParams.SKIP_READ_ON_ERROR: True,
MetaCrudParams.SUPPRESS_ERROR_TABLE: True,
# The below is to skip populating success dictionary for reads
MetaCrudParams.SKIP_READ_SUCCESS_RESULTS: True, # Default is False
MetaCrudParams.COLLECTIONS_CONSIDERED_FOR_CRUD: "all",
MetaCrudParams.SCOPES_CONSIDERED_FOR_CRUD: "all",
MetaCrudParams.BUCKETS_CONSIDERED_FOR_CRUD: "all"
}
| 38.365385
| 72
| 0.768421
|
4b1102300ca699753ab61e7fdfe78d2ecc40fa86
| 3,185
|
py
|
Python
|
env/lib/python3.7/site-packages/masonite/api/controllers/TokenController.py
|
Kolawole39/masonite-guides-tutorial
|
9a21cc635291a42f0722f69925be1809bb20e01c
|
[
"MIT"
] | null | null | null |
env/lib/python3.7/site-packages/masonite/api/controllers/TokenController.py
|
Kolawole39/masonite-guides-tutorial
|
9a21cc635291a42f0722f69925be1809bb20e01c
|
[
"MIT"
] | null | null | null |
env/lib/python3.7/site-packages/masonite/api/controllers/TokenController.py
|
Kolawole39/masonite-guides-tutorial
|
9a21cc635291a42f0722f69925be1809bb20e01c
|
[
"MIT"
] | null | null | null |
import jwt
import pendulum
from masonite.auth import Auth, Sign
from masonite.helpers import password as bcrypt_password
from masonite.helpers.misc import random_string
from masonite.request import Request
from ..exceptions import NoApiTokenFound
from config.application import KEY
class TokenController:
"""Placeholder for the authentication model. This is set via the corresponding TokenRoutes function.
This will default to the auth.py authentication class.
"""
__auth__ = None
def __init__(self):
if self.__auth__ is None:
from config import auth
self.__auth__ = auth.AUTH['guards']['api']['model']
def token(self):
return {'token': Sign().sign(random_string(10))}
def jwt(self, request: Request, auth: Auth):
if not request.input('username') or not request.input('password'):
request.status(401)
return {'error': 'missing username or password'}
user = auth.once().login(
request.input('username'),
request.input('password'),
)
if user:
user.__hidden__ = ['password']
payload = {
'issued': str(pendulum.now()),
'expires': str(pendulum.now().add(minutes=5)),
'refresh': str(pendulum.now().add(days=14)),
'scopes': request.input('scopes'),
'user': user.serialize()
}
return {
'token': bytes(jwt.encode(payload, KEY, algorithm='HS256')).decode('utf-8'),
'expires_at': payload['expires'],
'refresh_expires_at': payload['refresh'],
}
return {'error': 'invalid authentication credentials'}
def jwt_refresh(self, request: Request):
try:
token = jwt.decode(self.fetch_token(request),
KEY, algorithms=['HS256'])
except jwt.exceptions.DecodeError:
return {'error': 'invalid JWT token'}
if not pendulum.parse(token['refresh']).is_past():
payload = {
'issued': str(pendulum.now()),
'expires': str(pendulum.now().add(minutes=5)),
'refresh': str(pendulum.now().add(days=14)),
'scopes': token['scopes'],
}
return {
'token': bytes(jwt.encode(payload, KEY, algorithm='HS256')).decode('utf-8'),
'expires_at': payload['expires'],
'refresh_expires_at': payload['refresh'],
}
return {'error': 'the refresh key on the jwt token has expired'}
def fetch_token(self, request):
"""Gets the token from the request object
Raises:
NoApiTokenFound -- Raised if no API token can be located
Returns:
string -- Returns the token as a string
"""
if request.input('token'):
token = request.input('token')
elif request.header('HTTP_AUTHORIZATION'):
token = request.header(
'HTTP_AUTHORIZATION').replace('Basic ', '')
else:
raise NoApiTokenFound
return token
| 32.835052
| 104
| 0.564521
|
2a3d709d854f76703bc896061f66a2d51a4e1f7a
| 277
|
py
|
Python
|
toontown/safezone/DistributedOZTreasureAI.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 99
|
2019-11-02T22:25:00.000Z
|
2022-02-03T03:48:00.000Z
|
toontown/safezone/DistributedOZTreasureAI.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 42
|
2019-11-03T05:31:08.000Z
|
2022-03-16T22:50:32.000Z
|
toontown/safezone/DistributedOZTreasureAI.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 57
|
2019-11-03T07:47:37.000Z
|
2022-03-22T00:41:49.000Z
|
from . import DistributedSZTreasureAI
class DistributedOZTreasureAI(DistributedSZTreasureAI.DistributedSZTreasureAI):
def __init__(self, air, treasurePlanner, x, y, z):
DistributedSZTreasureAI.DistributedSZTreasureAI.__init__(self, air, treasurePlanner, x, y, z)
| 39.571429
| 101
| 0.801444
|
17185ed1b6e940eb169dc89527637bd47234e51e
| 936
|
py
|
Python
|
constants.py
|
triplejberger/Queue
|
0d0a62dc8d690abe2ddadc6c425bec36108bae38
|
[
"MIT"
] | null | null | null |
constants.py
|
triplejberger/Queue
|
0d0a62dc8d690abe2ddadc6c425bec36108bae38
|
[
"MIT"
] | null | null | null |
constants.py
|
triplejberger/Queue
|
0d0a62dc8d690abe2ddadc6c425bec36108bae38
|
[
"MIT"
] | null | null | null |
# General
BUILDINGS = 'buildings'
UNITS = 'units'
RESEARCH = 'tech'
AGE_UP_TIMES = 'age_up_times'
CIV = 'civilization'
PLAYER_ID = 'player_id'
BUILDING_ID = 'building_id'
TECHNOLOGY_ID = 'technology_id'
UNIT_ID = 'unit_id'
DEQUEUE_EVENTS_AT_INITIAL_TC = 'dequeue_event_at_initial_TC'
INITIAL_TC_ID = 'initial_TC_id'
# Buildings
ID_HOUSE = 70
ID_BARRACKS = 12
ID_STABLE = 101
ID_RANGE = 87
ID_MILL = 68
ID_BLACKSMITH = 103
ID_MARKET = 84
ID_CASTLE = 82
ID_DOCK = 45
ID_TC = 621
ID_TOWER = 79
# Technology
ID_LOOM = 22
ID_WHEELBARROW = 213
ID_TOWN_WATCH = 8
ID_DOUBLE_BIT_AXE = 202
ID_HORSE_COLLAR = 14
ID_FEUDAL_AGE = 101
ID_CASTLE_AGE = 102
ID_IMPERIAL_AGE = 103
ID_MAN_AT_ARMS_UPGRADE = 222
# Units
ID_MILITA = 74
ID_ARCHER = 4
ID_SKIRMISHER = 7
ID_SCOUT = 448
ID_KNIGHT = 38
ID_EAGLE = 751
ID_BATTLE_ELEPHANT = 1132
ID_CAMEL = 329
ID_VILLAGER_MALE = 83
ID_VILLAGER_FEMALE = 293
# Special order types
SPECIAL_ORDER_TYPE_DEQUEUE = 4
| 18.352941
| 60
| 0.775641
|
bd602cd1b4ed580f6e307a2ea3851d40bd3dcbb3
| 48,286
|
py
|
Python
|
tests/validation/tests/v3_api/test_workload.py
|
arthurh4/rancher
|
1e7e336de5ee546a8dd8291211af5cff8754f6f7
|
[
"Apache-2.0"
] | 3
|
2021-01-19T20:20:27.000Z
|
2021-01-19T21:35:40.000Z
|
tests/validation/tests/v3_api/test_workload.py
|
arthurh4/rancher
|
1e7e336de5ee546a8dd8291211af5cff8754f6f7
|
[
"Apache-2.0"
] | 15
|
2021-04-03T21:35:06.000Z
|
2022-01-01T00:16:14.000Z
|
tests/validation/tests/v3_api/test_workload.py
|
rcairesoliveira43/rancher
|
5da52c07665ac0950dc23f42227095d3ba429c74
|
[
"Apache-2.0"
] | 1
|
2022-01-13T09:47:59.000Z
|
2022-01-13T09:47:59.000Z
|
import pytest
from .common import * # NOQA
from rancher import ApiError
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None}
RBAC_ROLES = [CLUSTER_OWNER, PROJECT_MEMBER, PROJECT_OWNER,
PROJECT_READ_ONLY, CLUSTER_MEMBER]
WORKLOAD_TYPES = ["daemonSet", "statefulSet", "cronJob", "job"]
if_check_lb = os.environ.get('RANCHER_CHECK_FOR_LB', "False")
if_check_lb = pytest.mark.skipif(
if_check_lb != "True",
reason='Lb test case skipped')
ENABLE_HOST_NODE_PORT_TESTS = ast.literal_eval(
os.environ.get('RANCHER_ENABLE_HOST_NODE_PORT_TESTS', "True"))
skip_host_node_port = pytest.mark.skipif(
not ENABLE_HOST_NODE_PORT_TESTS,
reason='Tests Skipped for AKS,GKE,EKS Clusters')
def test_wl_sidekick():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("sidekick")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, workload, "deployment", ns.name)
side_con = {"name": "test2",
"image": TEST_IMAGE_NGINX,
"stdin": True,
"tty": True}
con.append(side_con)
workload = p_client.update(workload,
containers=con)
time.sleep(90)
validate_workload_with_sidekicks(
p_client, workload, "deployment", ns.name)
def test_wl_deployment():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, workload, "deployment", ns.name)
def test_wl_statefulset():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
statefulSetConfig={}
)
validate_workload(p_client, workload, "statefulSet", ns.name)
def test_wl_daemonset():
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
schedulable_node_count = len(get_schedulable_nodes(cluster))
validate_workload(p_client, workload, "daemonSet",
ns.name, schedulable_node_count)
def test_wl_cronjob():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
cronJobConfig={
"concurrencyPolicy": "Allow",
"failedJobsHistoryLimit": 10,
"schedule": "*/1 * * * *",
"successfulJobsHistoryLimit": 10})
validate_workload(p_client, workload, "cronJob", ns.name)
def test_wl_upgrade():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=2)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
revisions = workload.revisions()
assert len(revisions) == 1
for revision in revisions:
if revision["containers"][0]["image"] == TEST_IMAGE:
firstrevision = revision.id
con = [{"name": "test1",
"image": TEST_IMAGE_NGINX}]
p_client.update(workload, containers=con)
wait_for_pod_images(p_client, workload, ns.name, TEST_IMAGE_NGINX, 2)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, TEST_IMAGE_NGINX, ns)
revisions = workload.revisions()
assert len(revisions) == 2
for revision in revisions:
if revision["containers"][0]["image"] == TEST_IMAGE_NGINX:
secondrevision = revision.id
con = [{"name": "test1",
"image": TEST_IMAGE_OS_BASE,
"tty": True,
"stdin": True}]
p_client.update(workload, containers=con)
wait_for_pod_images(p_client, workload, ns.name, TEST_IMAGE_OS_BASE, 2)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, TEST_IMAGE_OS_BASE, ns)
revisions = workload.revisions()
assert len(revisions) == 3
for revision in revisions:
if revision["containers"][0]["image"] == TEST_IMAGE_OS_BASE:
thirdrevision = revision.id
p_client.action(workload, "rollback", replicaSetId=firstrevision)
wait_for_pod_images(p_client, workload, ns.name, TEST_IMAGE, 2)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, TEST_IMAGE, ns)
p_client.action(workload, "rollback", replicaSetId=secondrevision)
wait_for_pod_images(p_client, workload, ns.name, TEST_IMAGE_NGINX, 2)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, TEST_IMAGE_NGINX, ns)
p_client.action(workload, "rollback", replicaSetId=thirdrevision)
wait_for_pod_images(p_client, workload, ns.name, TEST_IMAGE_OS_BASE, 2)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, TEST_IMAGE_OS_BASE, ns)
def test_wl_pod_scale_up():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
workload = wait_for_wl_to_active(p_client, workload)
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns.name
allpods = execute_kubectl_cmd(get_pods)
wait_for_pods_in_workload(p_client, workload, 1)
p_client.update(workload, scale=2, containers=con)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_pods_are_running_by_id(allpods, workload, ns.name)
for key, value in workload.workloadLabels.items():
label = key + "=" + value
allpods = execute_kubectl_cmd(get_pods)
wait_for_pods_in_workload(p_client, workload, 2)
p_client.update(workload, scale=3, containers=con)
validate_workload(p_client, workload, "deployment", ns.name, 3)
validate_pods_are_running_by_id(allpods, workload, ns.name)
def test_wl_pod_scale_down():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=3)
wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 3)
p_client.update(workload, scale=2, containers=con)
wait_for_pods_in_workload(p_client, workload, 2)
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns.name
allpods = execute_kubectl_cmd(get_pods)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_pods_are_running_by_id(allpods, workload, ns.name)
p_client.update(workload, scale=1, containers=con)
wait_for_pods_in_workload(p_client, workload, 1)
for key, value in workload.workloadLabels.items():
label = key + "=" + value
allpods = execute_kubectl_cmd(get_pods)
validate_workload(p_client, workload, "deployment", ns.name)
validate_pods_are_running_by_id(allpods, workload, ns.name)
def test_wl_pause_orchestration():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=2)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
p_client.action(workload, "pause")
validate_workload_paused(p_client, workload, True)
con = [{"name": "test1",
"image": TEST_IMAGE_NGINX}]
p_client.update(workload, containers=con)
validate_pod_images(TEST_IMAGE, workload, ns.name)
p_client.action(workload, "resume")
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_workload_paused(p_client, workload, False)
validate_pod_images(TEST_IMAGE_NGINX, workload, ns.name)
# Windows could not support host port for now.
@skip_test_windows_os
@skip_host_node_port
def test_wl_with_hostPort():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 9999
port = {"containerPort": 80,
"type": "containerPort",
"kind": "HostPort",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test1",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
workload = wait_for_wl_to_active(p_client, workload)
validate_hostPort(p_client, workload, source_port, namespace["cluster"])
@skip_host_node_port
def test_wl_with_nodePort():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 30456
port = {"containerPort": 80,
"type": "containerPort",
"kind": "NodePort",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test1",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
workload = wait_for_wl_to_active(p_client, workload)
validate_nodePort(p_client, workload, namespace["cluster"], source_port)
def test_wl_with_clusterIp():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 30458
port = {"containerPort": "80",
"type": "containerPort",
"kind": "ClusterIP",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test1",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
workload = wait_for_wl_to_active(p_client, workload)
# Get cluster Ip
sd_records = p_client.list_dns_record(name=name).data
assert len(sd_records) == 1
cluster_ip = sd_records[0].clusterIp
# Deploy test pods used for clusteIp resolution check
wlname = random_test_name("testclusterip-client")
con = [{"name": "test1",
"image": TEST_IMAGE}]
workload_for_test = p_client.create_workload(name=wlname,
containers=con,
namespaceId=ns.id,
scale=2)
wait_for_wl_to_active(p_client, workload_for_test)
test_pods = wait_for_pods_in_workload(p_client, workload_for_test, 2)
validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port)
@if_check_lb
def test_wl_with_lb():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 9001
port = {"containerPort": 80,
"type": "containerPort",
"kind": "LoadBalancer",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test1",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
workload = wait_for_wl_to_active(p_client, workload)
validate_lb(p_client, workload, source_port)
def test_wl_with_clusterIp_scale_and_upgrade():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 30459
port = {"containerPort": "80",
"type": "containerPort",
"kind": "ClusterIP",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test-cluster-ip",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("cluster-ip-scale-upgrade")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=1)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 1)
sd_records = p_client.list_dns_record(name=name).data
assert len(sd_records) == 1
cluster_ip = sd_records[0].clusterIp
# get test pods
wlname = random_test_name("testclusterip-client")
wl_con = [{"name": "test1", "image": TEST_IMAGE}]
workload_for_test = p_client.create_workload(name=wlname,
containers=wl_con,
namespaceId=ns.id,
scale=2)
wait_for_wl_to_active(p_client, workload_for_test)
test_pods = wait_for_pods_in_workload(p_client, workload_for_test, 2)
validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port)
# scale up
p_client.update(workload, scale=3, caontainers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 3)
validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port)
# scale down
p_client.update(workload, scale=2, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port)
# upgrade
con = [{"name": "test-cluster-ip-upgrade-new",
"image": TEST_IMAGE,
"ports": [port]}]
p_client.update(workload, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port)
@skip_host_node_port
def test_wl_with_nodePort_scale_and_upgrade():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 30457
port = {"containerPort": 80,
"type": "containerPort",
"kind": "NodePort",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test1",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("test-node-port-scale-upgrade")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=1)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 1)
validate_nodePort(p_client, workload, namespace["cluster"], source_port)
# scale up
p_client.update(workload, scale=3, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 3)
validate_nodePort(p_client, workload, namespace["cluster"], source_port)
# scale down
p_client.update(workload, scale=2, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_nodePort(p_client, workload, namespace["cluster"], source_port)
# upgrade
con = [{"name": "test-node-port-scale-upgrade-new",
"image": TEST_IMAGE,
"ports": [port]}]
p_client.update(workload, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_nodePort(p_client, workload, namespace["cluster"], source_port)
# Windows could not support host port for now.
@skip_test_windows_os
@skip_host_node_port
def test_wl_with_hostPort_scale_and_upgrade():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 8888
port = {"containerPort": 80,
"type": "containerPort",
"kind": "HostPort",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test-host-port-upgrade",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("hostport-scale")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=1)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 1)
validate_hostPort(p_client, workload, source_port, namespace["cluster"])
# scale up
p_client.update(workload, scale=2, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_hostPort(p_client, workload, source_port, namespace["cluster"])
# scale down
p_client.update(workload, scale=1, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 1)
validate_hostPort(p_client, workload, source_port, namespace["cluster"])
# From my observation, it is necessary to wait until
# the number of pod equals to the expected number,
# since the workload's state is 'active' but pods
# are not ready yet especially after scaling down and upgrading.
# upgrade
con = [{"name": "test-host-port-upgrade-new",
"image": TEST_IMAGE,
"ports": [port]}]
p_client.update(workload, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 1)
validate_hostPort(p_client, workload, source_port, namespace["cluster"])
@if_check_lb
def test_wl_with_lb_scale_and_upgrade():
p_client = namespace["p_client"]
ns = namespace["ns"]
source_port = 9001
port = {"containerPort": 80,
"type": "containerPort",
"kind": "LoadBalancer",
"protocol": "TCP",
"sourcePort": source_port}
con = [{"name": "test1",
"image": TEST_IMAGE,
"ports": [port]}]
name = random_test_name("lb-scale-upgrade")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
scale=1)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 1)
validate_lb(p_client, workload, source_port)
# scale up
p_client.update(workload, scale=3, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 3)
validate_lb(p_client, workload, source_port)
# scale down
p_client.update(workload, scale=2, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_lb(p_client, workload, source_port)
# upgrade
con = [{"name": "test-load-balance-upgrade-new",
"image": TEST_IMAGE,
"ports": [port]}]
p_client.update(workload, containers=con)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pods_in_workload(p_client, workload, 2)
validate_lb(p_client, workload, source_port)
# --------------------- rbac tests for cluster owner -----------------------
@if_test_rbac
def test_rbac_cluster_owner_wl_create(remove_resource):
# cluster owner can create project and deploy workload in it
p_client, project, ns, workload = setup_project_by_role(CLUSTER_OWNER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
@if_test_rbac
def test_rbac_cluster_owner_wl_create_2(remove_resource):
# cluster owner can deploy workload in any project in the cluster
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
p2 = rbac_get_unshared_project()
p_client2 = get_project_client_for_token(p2, user_token)
ns2 = rbac_get_unshared_ns()
name = random_test_name("default")
con = [{"name": "test1",
"image": TEST_IMAGE}]
wl = p_client2.create_workload(name=name, containers=con,
namespaceId=ns2.id)
validate_workload(p_client2, wl, "deployment", ns2.name)
remove_resource(wl)
@if_test_rbac
def test_rbac_cluster_owner_wl_edit(remove_resource):
p_client, project, ns, workload = setup_project_by_role(CLUSTER_OWNER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# cluster owner can edit workload in the project
p_client.update(workload, scale=2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
con = [{"name": "test1",
"image": "nginx"}]
p_client.update(workload, containers=con)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, "nginx", ns)
@if_test_rbac
def test_rbac_cluster_owner_wl_delete(remove_resource):
p_client, project, ns, workload = setup_project_by_role(CLUSTER_OWNER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# cluster owner can delete workload in the project
p_client.delete(workload)
assert len(p_client.list_workload(uuid=workload.uuid).data) == 0
# --------------------- rbac tests for cluster member -----------------------
@if_test_rbac
def test_rbac_cluster_member_wl_create(remove_resource):
# cluster member can create project and deploy workload in it
p_client, project, ns, workload = setup_project_by_role(CLUSTER_MEMBER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
@if_test_rbac
def test_rbac_cluster_member_wl_create_2():
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
# cluster member can NOT deploy workload in the project he can NOT access
with pytest.raises(ApiError) as e:
p2 = rbac_get_unshared_project()
ns2 = rbac_get_unshared_ns()
new_p_client = get_project_client_for_token(p2, user_token)
new_p_client.create_workload(name=name, containers=con,
namespaceId=ns2.id)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_cluster_member_wl_edit(remove_resource):
p_client, project, ns, workload = setup_project_by_role(CLUSTER_MEMBER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# cluster member can edit workload in the project
p_client.update(workload, scale=2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
con = [{"name": "test1", "image": "nginx"}]
p_client.update(workload, containers=con)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, "nginx", ns)
@if_test_rbac
def test_rbac_cluster_member_wl_delete(remove_resource):
p_client, project, ns, workload = setup_project_by_role(CLUSTER_MEMBER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# cluster member can delete workload in the project
p_client.delete(workload)
assert len(p_client.list_workload(uuid=workload.uuid).data) == 0
# --------------------- rbac tests for project member -----------------------
@if_test_rbac
def test_rbac_project_member_wl_create(remove_resource):
# project member can deploy workload in his project
p_client, project, ns, workload = setup_project_by_role(PROJECT_MEMBER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
@if_test_rbac
def test_rbac_project_member_wl_create_2():
# project member can NOT deploy workload in the project he can NOT access
user_token = rbac_get_user_token_by_role(PROJECT_MEMBER)
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
with pytest.raises(ApiError) as e:
p2 = rbac_get_unshared_project()
ns2 = rbac_get_unshared_ns()
new_p_client = get_project_client_for_token(p2, user_token)
new_p_client.create_workload(name=name, containers=con,
namespaceId=ns2.id)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_project_member_wl_edit(remove_resource):
p_client, project, ns, workload = setup_project_by_role(PROJECT_MEMBER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# project member can edit workload in the project
p_client.update(workload, scale=2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
con = [{"name": "test1", "image": "nginx"}]
p_client.update(workload, containers=con)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, "nginx", ns)
@if_test_rbac
def test_rbac_project_member_wl_delete(remove_resource):
p_client, project, ns, workload = setup_project_by_role(PROJECT_MEMBER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# project member can delete workload in the project
p_client.delete(workload)
assert len(p_client.list_workload(uuid=workload.uuid).data) == 0
# --------------------- rbac tests for project owner -----------------------
@if_test_rbac
def test_rbac_project_owner_wl_create(remove_resource):
# project owner can deploy workload in his project
p_client, project, ns, workload = setup_project_by_role(PROJECT_OWNER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
@if_test_rbac
def test_rbac_project_owner_wl_create_2():
# project owner can NOT deploy workload in the project he can NOT access
user_token = rbac_get_user_token_by_role(PROJECT_OWNER)
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
with pytest.raises(ApiError) as e:
p2 = rbac_get_unshared_project()
ns2 = rbac_get_unshared_ns()
new_p_client = get_project_client_for_token(p2, user_token)
new_p_client.create_workload(name=name, containers=con,
namespaceId=ns2.id)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_project_owner_wl_edit(remove_resource):
p_client, project, ns, workload = setup_project_by_role(PROJECT_OWNER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# project owner can edit workload in his project
p_client.update(workload, scale=2)
validate_workload(p_client, workload, "deployment", ns.name, 2)
con = [{"name": "test1", "image": "nginx"}]
p_client.update(workload, containers=con)
validate_workload(p_client, workload, "deployment", ns.name, 2)
validate_workload_image(p_client, workload, "nginx", ns)
@if_test_rbac
def test_rbac_project_owner_wl_delete(remove_resource):
p_client, project, ns, workload = setup_project_by_role(PROJECT_OWNER,
remove_resource)
validate_workload(p_client, workload, "deployment", ns.name)
# project owner can delete workload in his project
p_client.delete(workload)
assert len(p_client.list_workload(uuid=workload.uuid).data) == 0
# --------------------- rbac tests for project read-only --------------------
@if_test_rbac
def test_rbac_project_read_only_wl_create():
# project read-only can NOT deploy workloads in the project
project = rbac_get_project()
user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
p_client = get_project_client_for_token(project, user_token)
ns = rbac_get_namespace()
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
with pytest.raises(ApiError) as e:
p_client.create_workload(name=name, containers=con,
namespaceId=ns.id)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
def test_rbac_project_read_only_wl_edit(remove_resource):
project = rbac_get_project()
user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
p_client = get_project_client_for_token(project, user_token)
# deploy a workload as cluster owner
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
cluster_owner_p_client = get_project_client_for_token(project,
cluster_owner_token)
ns = rbac_get_namespace()
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
workload = cluster_owner_p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
# project read-only can NOT edit existing workload
with pytest.raises(ApiError) as e:
p_client.update(workload, scale=2)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(workload)
@if_test_rbac
def test_rbac_project_read_only_wl_list():
# project read-only can NOT see workloads in the project he has no access
p2 = rbac_get_unshared_project()
user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
p_client = get_project_client_for_token(p2, user_token)
workloads = p_client.list_workload().data
assert len(workloads) == 0
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
p, ns = create_project_and_ns(
USER_TOKEN, cluster, random_test_name("testworkload"))
p_client = get_project_client_for_token(p, USER_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
def fin():
client = get_user_client()
client.delete(namespace["project"])
request.addfinalizer(fin)
def setup_project_by_role(role, remove_resource):
""" set up a project for a specific role used for rbac testing
- for cluster owner or cluster member:
it creates a project and namespace, then deploys a workload
- for project owner or project member:
it deploys a workload to the existing project and namespace
"""
user_token = rbac_get_user_token_by_role(role)
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
if role in [CLUSTER_OWNER, CLUSTER_MEMBER]:
project, ns = create_project_and_ns(user_token, namespace["cluster"],
random_test_name("test-rbac"))
p_client = get_project_client_for_token(project, user_token)
workload = p_client.create_workload(name=name, containers=con,
namespaceId=ns.id)
remove_resource(project)
remove_resource(ns)
remove_resource(workload)
return p_client, project, ns, workload
elif role in [PROJECT_OWNER, PROJECT_MEMBER]:
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, user_token)
workload = p_client.create_workload(name=name, containers=con,
namespaceId=ns.id)
remove_resource(workload)
return p_client, project, ns, workload
else:
return None, None, None, None
# --------------------- rbac tests by workload types -----------------------
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametarize_create(role, config, remove_resource):
p_client, project, ns = setup_wl_project_by_role(role)
cluster = namespace["cluster"]
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
if role != PROJECT_READ_ONLY:
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
remove_resource(workload)
if role == CLUSTER_MEMBER:
remove_resource(project)
return None
else:
with pytest.raises(ApiError) as e:
workload = create_workload_by_type(p_client, name, con, ns, config)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_create_negative(role, remove_resource, config):
if role == CLUSTER_OWNER:
# cluster owner can deploy workloads in any project in the cluster
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
p2 = rbac_get_unshared_project()
p_client2 = get_project_client_for_token(p2, user_token)
ns2 = rbac_get_unshared_ns()
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
wl = create_workload_by_type(p_client2, name, con, ns2, config)
wait_for_wl_to_active(p_client2, wl)
remove_resource(wl)
else:
# roles cannot deploy workloads in projects they cannot access
user_token = rbac_get_user_token_by_role(role)
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
with pytest.raises(ApiError) as e:
p2 = rbac_get_unshared_project()
ns2 = rbac_get_unshared_ns()
new_p_client = get_project_client_for_token(p2, user_token)
workload = create_workload_by_type(new_p_client, name, con, ns2, config)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_list(role, remove_resource, config):
if role == CLUSTER_MEMBER:
p_client, project, ns = setup_wl_project_by_role(role)
else:
p_client, project, ns = setup_wl_project_by_role(CLUSTER_OWNER)
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
# switch to rbac role
user_token = rbac_get_user_token_by_role(role)
p_client_rbac = get_project_client_for_token(project, user_token)
assert len(p_client_rbac.list_workload(uuid=workload.uuid).data) == 1
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_list_negative(role, remove_resource, config):
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
p_client = get_project_client_for_token(unshared_project, user_token)
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
wait_for_wl_to_active(p_client, workload)
# switch to rbac role
user_token = rbac_get_user_token_by_role(role)
p_client_rbac = get_project_client_for_token(unshared_project, user_token)
if role != CLUSTER_OWNER:
assert len(p_client_rbac.list_workload(uuid=workload.uuid).data) == 0
else:
assert len(p_client_rbac.list_workload(uuid=workload.uuid).data) == 1
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_update(role, remove_resource, config):
# workloads of type job cannot be edited
if config == "job":
return
p_client, project, ns = setup_wl_project_by_role(role)
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
if role != PROJECT_READ_ONLY:
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
con = [{"name": "test1", "image": os.environ.get('RANCHER_TEST_IMAGE',
"nginx")}]
p_client.update(workload, containers=con)
remove_resource(workload)
if role == CLUSTER_MEMBER:
remove_resource(project)
else:
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
p_client = get_project_client_for_token(project, user_token)
ns = rbac_get_namespace()
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
with pytest.raises(ApiError) as e:
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(project, user_token)
con = [{"name": "test1", "image": os.environ.get('RANCHER_TEST_IMAGE',
"nginx")}]
p_client.update(workload, containers=con)
wait_for_pods_in_workload(p_client, workload)
validate_workload(p_client, workload, config, ns.name)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_update_negative(role, remove_resource, config):
# workloads of type job cannot be edited
if config == "job":
return
if role == CLUSTER_OWNER:
# cluster owner can edit workloads in any project in the cluster
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
p_client, project, ns = setup_wl_project_by_role(role)
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
con = [{"name": "test1", "image": "nginx"}]
p_client.update(workload, containers=con)
remove_resource(workload)
else:
project2 = rbac_get_unshared_project()
user_token = rbac_get_user_token_by_role(role)
# roles cannot edit workloads in projects they cannot access
# deploy a workload as cluster owner
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
cluster_owner_p_client = get_project_client_for_token(
project2, cluster_owner_token)
ns = rbac_get_unshared_ns()
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
workload = create_workload_by_type(cluster_owner_p_client,
name, con, ns, config)
with pytest.raises(ApiError) as e:
p_client = get_project_client_for_token(project2, user_token)
con = [{"name": "test1", "image": "nginx"}]
p_client.update(workload, containers=con)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_delete(role, remove_resource, config):
p_client, project, ns = setup_wl_project_by_role(role)
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
if role != PROJECT_READ_ONLY:
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
p_client.delete(workload)
assert len(p_client.list_workload(uuid=workload.uuid).data) == 0
remove_resource(workload)
else:
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
p_client = get_project_client_for_token(project, user_token)
ns = rbac_get_namespace()
workload = create_workload_by_type(p_client, name, con, ns, config)
wait_for_wl_to_active(p_client, workload)
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(project, user_token)
with pytest.raises(ApiError) as e:
p_client.delete(workload)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(workload)
if role == CLUSTER_MEMBER:
remove_resource(project)
@if_test_rbac
@pytest.mark.parametrize("role", RBAC_ROLES)
@pytest.mark.parametrize("config", WORKLOAD_TYPES)
def test_rbac_wl_parametrize_delete_negative(role, remove_resource, config):
if role == CLUSTER_OWNER:
# cluster owner can delete workloads in any project in the cluster
user_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
project = rbac_get_unshared_project()
p_client = get_project_client_for_token(project, user_token)
ns = rbac_get_namespace()
name = random_test_name("default")
con = [{"name": "test1", "image": TEST_IMAGE}]
workload = create_workload_by_type(p_client, name, con, ns, config)
p_client.delete(workload)
else:
project = rbac_get_unshared_project()
user_token = rbac_get_user_token_by_role(role)
# roles cannot delete workloads in projects they cannot access
# deploy a workload as cluster owner
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
cluster_owner_p_client = get_project_client_for_token(
project, cluster_owner_token)
ns = rbac_get_unshared_ns()
con = [{"name": "test1", "image": TEST_IMAGE}]
name = random_test_name("default")
workload = create_workload_by_type(cluster_owner_p_client,
name, con, ns, config)
p_client = get_project_client_for_token(project, user_token)
with pytest.raises(ApiError) as e:
p_client.delete(workload)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(workload)
def setup_wl_project_by_role(role):
if role == CLUSTER_MEMBER:
user_token = rbac_get_user_token_by_role(role)
project, ns = create_project_and_ns(user_token, namespace["cluster"],
random_test_name("test-rbac"))
p_client = get_project_client_for_token(project, user_token)
return p_client, project, ns
else:
project = rbac_get_project()
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(project, user_token)
ns = rbac_get_namespace()
return p_client, project, ns
def create_workload_by_type(client, name, con, ns, config):
if config == "daemonSet":
return client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
elif config == "statefulSet":
return client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
statefulSetConfig={})
elif config == "cronJob":
return client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
cronJobConfig={
"concurrencyPolicy": "Allow",
"failedJobsHistoryLimit": 10,
"schedule": "*/1 * * * *",
"successfulJobsHistoryLimit": 10})
elif config == "job":
return client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
jobConfig={})
| 42.244969
| 84
| 0.633579
|
e527eb09a7a12e5107bf7c394f67efac898c8cce
| 5,260
|
py
|
Python
|
tests/test_imagenet_format.py
|
IRDonch/datumaro
|
d029e67549b7359c887bd15039997bd8bbae7c0c
|
[
"MIT"
] | 237
|
2020-09-07T14:29:26.000Z
|
2022-03-31T12:53:48.000Z
|
tests/test_imagenet_format.py
|
IRDonch/datumaro
|
d029e67549b7359c887bd15039997bd8bbae7c0c
|
[
"MIT"
] | 330
|
2020-09-09T21:27:29.000Z
|
2022-03-31T12:36:18.000Z
|
tests/test_imagenet_format.py
|
IRDonch/datumaro
|
d029e67549b7359c887bd15039997bd8bbae7c0c
|
[
"MIT"
] | 63
|
2020-09-09T07:44:28.000Z
|
2022-03-17T16:07:26.000Z
|
from unittest import TestCase
import os.path as osp
import numpy as np
from datumaro.components.annotation import (
AnnotationType, Label, LabelCategories,
)
from datumaro.components.dataset import Dataset
from datumaro.components.extractor import DatasetItem
from datumaro.plugins.imagenet_format import ImagenetConverter, ImagenetImporter
from datumaro.util.image import Image
from datumaro.util.test_utils import TestDir, compare_datasets
from .requirements import Requirements, mark_requirement
class ImagenetFormatTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id='label_0/1',
image=np.ones((8, 8, 3)),
annotations=[Label(0)]
),
DatasetItem(id='label_1/2',
image=np.ones((10, 10, 3)),
annotations=[Label(1)]
),
], categories={
AnnotationType.label: LabelCategories.from_iterable(
'label_' + str(label) for label in range(2)),
})
with TestDir() as test_dir:
ImagenetConverter.convert(source_dataset, test_dir, save_images=True)
parsed_dataset = Dataset.import_from(test_dir, 'imagenet')
compare_datasets(self, source_dataset, parsed_dataset,
require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_with_multiple_labels(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id='1',
image=np.ones((8, 8, 3)),
annotations=[Label(0), Label(1)]
),
DatasetItem(id='2',
image=np.ones((8, 8, 3))
),
], categories={
AnnotationType.label: LabelCategories.from_iterable(
'label_' + str(label) for label in range(2)),
})
excepted_dataset = Dataset.from_iterable([
DatasetItem(id='label_0/1',
image=np.ones((8, 8, 3)),
annotations=[Label(0)]
),
DatasetItem(id='label_1/1',
image=np.ones((8, 8, 3)),
annotations=[Label(1)]
),
DatasetItem(id='no_label/2',
image=np.ones((8, 8, 3))
),
], categories=['label_0', 'label_1'])
with TestDir() as test_dir:
ImagenetConverter.convert(source_dataset, test_dir, save_images=True)
parsed_dataset = Dataset.import_from(test_dir, 'imagenet')
compare_datasets(self, excepted_dataset, parsed_dataset,
require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_dataset_with_cyrillic_and_spaces_in_filename(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id="label_0/кириллица с пробелом",
image=np.ones((8, 8, 3)),
annotations=[Label(0)]
),
], categories=['label_0'])
with TestDir() as test_dir:
ImagenetConverter.convert(source_dataset, test_dir, save_images=True)
parsed_dataset = Dataset.import_from(test_dir, 'imagenet')
compare_datasets(self, source_dataset, parsed_dataset,
require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_image_with_arbitrary_extension(self):
dataset = Dataset.from_iterable([
DatasetItem(id='no_label/a', image=Image(path='a.JPEG',
data=np.zeros((4, 3, 3)))),
DatasetItem(id='no_label/b', image=Image(path='b.bmp',
data=np.zeros((3, 4, 3)))),
], categories=[])
with TestDir() as test_dir:
ImagenetConverter.convert(dataset, test_dir, save_images=True)
parsed_dataset = Dataset.import_from(test_dir, 'imagenet')
compare_datasets(self, dataset, parsed_dataset,
require_images=True)
DUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'imagenet_dataset')
class ImagenetImporterTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='label_0/label_0_1',
image=np.ones((8, 8, 3)),
annotations=[Label(0)]
),
DatasetItem(id='label_0/label_0_2',
image=np.ones((10, 10, 3)),
annotations=[Label(0)]
),
DatasetItem(id='label_1/label_1_1',
image=np.ones((8, 8, 3)),
annotations=[Label(1)]
)
], categories={
AnnotationType.label: LabelCategories.from_iterable(
'label_' + str(label) for label in range(2)),
})
dataset = Dataset.import_from(DUMMY_DATASET_DIR, 'imagenet')
compare_datasets(self, expected_dataset, dataset, require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_detect_imagenet(self):
self.assertTrue(ImagenetImporter.detect(DUMMY_DATASET_DIR))
| 36.527778
| 81
| 0.611977
|
cd7bef90235e07665b34d64e798dffdb7699cc0f
| 22,177
|
py
|
Python
|
raiden/tests/conftest.py
|
ExchangeUnion/raiden
|
2217bcb698fcfce3499dc1f41ad919ed82e8e45f
|
[
"MIT"
] | null | null | null |
raiden/tests/conftest.py
|
ExchangeUnion/raiden
|
2217bcb698fcfce3499dc1f41ad919ed82e8e45f
|
[
"MIT"
] | 12
|
2019-08-09T19:12:17.000Z
|
2019-12-05T15:49:29.000Z
|
raiden/tests/conftest.py
|
ExchangeUnion/raiden
|
2217bcb698fcfce3499dc1f41ad919ed82e8e45f
|
[
"MIT"
] | null | null | null |
# pylint: disable=wrong-import-position,redefined-outer-name,unused-wildcard-import,wildcard-import
from gevent import monkey # isort:skip # noqa
monkey.patch_all(subprocess=False, thread=False) # isort:skip # noqa
import signal # isort:skip # noqa
import pytest # isort:skip
# Execute these before the other imports because rewrites can't work after the
# module has been imported.
pytest.register_assert_rewrite("raiden.tests.utils.eth_node") # isort:skip
pytest.register_assert_rewrite("raiden.tests.utils.factories") # isort:skip
pytest.register_assert_rewrite("raiden.tests.utils.messages") # isort:skip
pytest.register_assert_rewrite("raiden.tests.utils.network") # isort:skip
pytest.register_assert_rewrite("raiden.tests.utils.protocol") # isort:skip
pytest.register_assert_rewrite("raiden.tests.utils.smartcontracts") # isort:skip
pytest.register_assert_rewrite("raiden.tests.utils.smoketest") # isort:skip
pytest.register_assert_rewrite("raiden.tests.utils.transfer") # isort:skip
import contextlib
import datetime
import os
import re
import subprocess
import sys
import tempfile
import time
from pathlib import Path
import gevent
import structlog
from _pytest.pathlib import LOCK_TIMEOUT, ensure_reset_dir, make_numbered_dir_with_cleanup
from _pytest.tmpdir import get_user
from raiden.constants import (
HIGHEST_SUPPORTED_GETH_VERSION,
HIGHEST_SUPPORTED_PARITY_VERSION,
LOWEST_SUPPORTED_GETH_VERSION,
LOWEST_SUPPORTED_PARITY_VERSION,
EthClient,
)
from raiden.log_config import configure_logging
from raiden.tests.fixtures.blockchain import * # noqa: F401,F403
from raiden.tests.fixtures.variables import * # noqa: F401,F403
from raiden.tests.utils.transport import make_requests_insecure
from raiden.utils.cli import LogLevelConfigType
from raiden.utils.debugging import enable_gevent_monitoring_signal
from raiden.utils.ethereum_clients import is_supported_client
log = structlog.get_logger()
def pytest_addoption(parser):
parser.addoption(
"--blockchain-type", choices=[client.value for client in EthClient], default="geth"
)
parser.addoption(
"--log-config", action="store", default=None, help="Configure tests log output"
)
parser.addoption(
"--plain-log",
action="store_true",
default=False,
help="Do not colorize console log output",
)
parser.addoption(
"--base-port",
action="store",
default=8500,
type="int",
help="Base port number to use for tests.",
)
# The goal here is to ensure the test runner will print something to the
# stdout, this should be done frequently enough for the runner to /not/ get
# killed by the CI. The settings bellow are defined in such a way to
# guarantee that the test fails before the CI kill the runner.
#
# When something is printed depends on the verbosity used. If the tests are
# executed with verbosity zero (the default), the only phase that prints to
# the stdout is pytest_runtest_call.
#
# Consider the following:
#
# 1. test1.setup
# 2. test1.call
# 3. test1.teardown
# 4. test2.setup
# 5. test2.call
# 6. test2.teardown
#
# From the start of step 3 until the end of step 5 there will be no output,
# which is a full test cycle. Because of this, the settings bellow are
# define in terms of their addition being smaller than the CI settings.
#
# Higher verbosities change the analysis above, however this is set for the
# worst case.
timeout_limit_setup_and_call_help = (
"This setting defines the timeout in seconds for the setup *and* call "
"phases of a test. Every test will be allowed to use at most "
"`timeout_limit_setup_and_call` seconds to complete these phases. This "
"setting together with the timeout_limit_teardown defines the total "
"runtime for a single test. The total timeout must be lower than the no "
"output timeout of the continuous integration."
)
parser.addini("timeout_limit_for_setup_and_call", timeout_limit_setup_and_call_help)
timeout_limit_teardown_help = (
"This setting defines the timeout in seconds for the teardown phase. It "
"must be a non-zero value to allow for proper cleanup of fixtures. This "
"setting together with the timeout_limit_setup_and_call defines the "
"total runtime for a single test. The total timeout must be lower than "
"the no output timeout of the continuous integration."
)
parser.addini("timeout_limit_teardown", timeout_limit_teardown_help)
@pytest.fixture(autouse=True, scope="session")
def check_geth_version_for_tests(blockchain_type):
if blockchain_type != "geth":
return
geth_version_string, _ = subprocess.Popen(
["geth", "version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
).communicate()
supported, _, our_version = is_supported_client(geth_version_string.decode())
if not supported:
sys.exit(
f"You are trying to run tests with an unsupported GETH version. "
f"Your Version: {our_version} "
f"Min Supported Version {LOWEST_SUPPORTED_GETH_VERSION} "
f"Max Supported Version {HIGHEST_SUPPORTED_GETH_VERSION}"
)
@pytest.fixture(autouse=True, scope="session")
def check_parity_version_for_tests(blockchain_type):
if blockchain_type != "parity":
return
parity_version_string, _ = subprocess.Popen(
["parity", "--version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
).communicate()
supported, _, our_version = is_supported_client(parity_version_string.decode())
if not supported:
sys.exit(
f"You are trying to run tests with an unsupported PARITY version. "
f"Your Version: {our_version} "
f"Min Supported Version {LOWEST_SUPPORTED_PARITY_VERSION} "
f"Max Supported Version {HIGHEST_SUPPORTED_PARITY_VERSION}"
)
@pytest.fixture(scope="session", autouse=True)
def auto_enable_gevent_monitoring_signal():
enable_gevent_monitoring_signal()
@pytest.fixture(scope="session", autouse=True)
def enable_greenlet_debugger(request):
""" Enable the pdb debugger for gevent's greenlets.
This extends the flag `--pdb` from pytest to enable debugging of greenlets
which have raised an exception to the top-level. Without this hook the
exception raised in a greenlet is printed, and the thread state is
discarded. Making it impossible to execute a post_mortem
"""
if request.config.option.usepdb:
import pdb
import bdb
# Do not run pdb again if an exception hits top-level for a second
# greenlet and the previous pdb session is still running
enabled = False
hub = gevent.get_hub()
def debugger(context, type_, value, tb):
# Always print the exception, because once the pdb REPL is started
# we cannot retrieve it with `sys.exc_info()`.
#
# Using gevent's hub print_exception because it properly handles
# corner cases.
hub.print_exception(context, type_, value, tb)
# Don't enter nested sessions
# Ignore exceptions used to quit the debugger / interpreter
nonlocal enabled
if not enabled and type_ not in (bdb.BdbQuit, KeyboardInterrupt):
enabled = True
pdb.post_mortem() # pylint: disable=no-member
enabled = False
# Hooking the debugger on the hub error handler. Exceptions that are
# not handled on a given greenlet are forwarded to the
# parent.handle_error, until the hub is reached.
#
# Note: for this to work properly, it's really important to use
# gevent's spawn function.
hub.handle_error = debugger
@pytest.fixture(autouse=True)
def logging_level(request, logs_storage):
""" Configure the structlog level for each test run.
For integration tests this also sets the geth verbosity.
"""
# disable pytest's built in log capture, otherwise logs are printed twice
request.config.option.showcapture = "stdout"
if request.config.option.log_cli_level:
level = request.config.option.log_cli_level
elif request.config.option.verbose > 3:
level = "DEBUG"
elif request.config.option.verbose > 1:
level = "INFO"
else:
level = "WARNING"
if request.config.option.log_config:
config_converter = LogLevelConfigType()
logging_levels = config_converter.convert(
value=request.config.option.log_config, param=None, ctx=None
)
else:
logging_levels = {"": level}
# configure_logging requires the path to exist
os.makedirs(logs_storage, exist_ok=True)
time = datetime.datetime.utcnow().isoformat()
debug_path = os.path.join(logs_storage, f"raiden-debug_{time}.log")
configure_logging(
logging_levels,
colorize=not request.config.option.plain_log,
log_file=request.config.option.log_file,
cache_logger_on_first_use=False,
debug_log_file_name=debug_path,
)
log.info("Running test", nodeid=request.node.nodeid)
@pytest.fixture(scope="session", autouse=True)
def dont_exit_pytest():
""" Raiden will quit on any unhandled exception.
This allows the test suite to finish in case an exception is unhandled.
"""
gevent.get_hub().NOT_ERROR = (gevent.GreenletExit, SystemExit)
@pytest.fixture(scope="session", autouse=True)
def insecure_tls():
make_requests_insecure()
@contextlib.contextmanager
def timeout_for_setup_and_call(item):
"""Sets a timeout up to `item.remaining_timeout`, if the timeout is reached
an exception is raised, otherwise the amount of time used by the run is
deducted from the `item.remaining_timeout`.
This function is only used for setup and call, which share the same
timeout. The teardown must have a separate timeout, because even if either
the setup or the call timedout the teardown must still be called to do
fixture clean up.
"""
def report():
gevent.util.print_run_info()
pytest.fail(f"Setup and Call timeout >{item.timeout_setup_and_call}s")
def handler(signum, frame): # pylint: disable=unused-argument
report()
# The handler must be installed before the timer is set, otherwise it is
# possible for the default handler to be used, which would not raise our
# exception. This can happen if the setup phase uses most of the available
# time, leaving just enough for the call to install the new timer and get
# the event.
signal.signal(signal.SIGALRM, handler)
# Negative values are invalid and will raise an exception.
#
# This is not a problem because:
# - pytest_runtest_setup is the first called, it follows the call to
# pytest_runtest_protocol, which validates and sets the timeout values.
# - pytest_runtest_call is the second call, and it will only run if the
# setup was succesfull, i.e. a timeout did not happen. This implies that
# the remaining_timeout is positive.
remaining_timeout = item.remaining_timeout
started_at = time.time()
signal.setitimer(signal.ITIMER_REAL, remaining_timeout)
yield
# The timer must be disabled *before* the handler is unset, otherwise it is
# possible for a timeout event to be handled by the default handler.
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
elapsed = time.time() - started_at
# It is possible for elapsed to be negative, this can happen if the
# time.time clock and the clock used by the signal are different. To
# guarantee the next iteration will only have positive values, raise an
# exception, failling the setup and skiping the call.
item.remaining_timeout -= elapsed
if item.remaining_timeout < 0:
report()
def timeout_from_marker(marker):
"""Return None or the value of the timeout."""
timeout = None
if marker is not None:
if len(marker.args) == 1 and len(marker.kwargs) == 0:
timeout = marker.args[0]
elif len(marker.args) == 0 and len(marker.kwargs) == 1 and "timeout" in marker.kwargs:
timeout = marker.kwargs["timeout"]
else:
raise Exception(
"Invalid marker. It must have only one argument for the "
"timeout, which may be named or not."
)
return timeout
def set_item_timeouts(item):
"""Limit the tests runtime
The timeout is read from the following places (last one takes precedence):
* setup.cfg (ini).
* pytest timeout marker at the specific test.
"""
timeout_limit_setup_and_call = item.config.getini("timeout_limit_for_setup_and_call")
if timeout_limit_setup_and_call == "":
raise Exception("timeout_limit_for_setup_and_call must be set in setup.cfg")
timeout_limit_setup_and_call = float(timeout_limit_setup_and_call)
timeout_limit_teardown = item.config.getini("timeout_limit_teardown")
if timeout_limit_teardown == "":
raise Exception("timeout_limit_teardown must be set in setup.cfg")
timeout_limit_teardown = float(timeout_limit_teardown)
timeout_teardown = timeout_limit_teardown
# There is no marker to configure the teardown timeout
marker = item.get_closest_marker("timeout")
timeout_setup_and_call = timeout_from_marker(marker) or timeout_limit_setup_and_call
if timeout_setup_and_call > timeout_limit_setup_and_call:
raise Exception(
f"Invalid value for the timeout marker {timeout_setup_and_call}. This "
f"value must be smaller than {timeout_limit_setup_and_call}. This is "
f"necessary because the runtime of a test has to be synchronized with "
f"the continuous integration output timeout, e.g. no_output_timeout "
f"in CircleCI. If the timeout is larger than that value the whole "
f"build will be killed because of the lack of output, this will not "
f"produce a failure report nor log files, which makes the build run "
f"useless."
)
if timeout_setup_and_call <= 0:
raise Exception("timeout must not be negative")
if timeout_teardown <= 0:
raise Exception("timeout_limit_teardown must not be negative")
item.timeout_setup_and_call = timeout_setup_and_call
item.remaining_timeout = timeout_setup_and_call
item.timeout_teardown = timeout_teardown
@pytest.hookimpl()
def pytest_runtest_protocol(item, nextitem): # pylint:disable=unused-argument
# The timeouts cannot be configured in the pytest_runtest_setup, because if
# the required value is not set, an exception is raised, but then it is
# swallowed by the `CallInfo.from_call`
set_item_timeouts(item)
# Pytest's test protocol is defined by `pytest.runner:pytest_runtest_protocol`,
# it has three steps where exceptions can safely be raised at:
#
# - setup
# - call
# - teardown
#
# Below one hook for each of the steps is used. This is necessary to guarantee
# that a Timeout exception will be raised only inside these steps that handle
# exceptions, otherwise the test executor could be killed by the timeout
# exception.
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_runtest_setup(item):
with timeout_for_setup_and_call(item):
yield
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_runtest_call(item):
""" More feedback for flaky tests.
In verbose mode this outputs 'FLAKY' every time a test marked as flaky fails.
This doesn't happen when:
- Tests are executed under xdist.
- The fixture setup fails.
"""
# pytest_runtest_call is only called if the test setup finished
# succesfully, this means the code below may not be executed if the fixture
# setup has timedout already.
with timeout_for_setup_and_call(item):
outcome = yield
did_fail = isinstance(outcome._excinfo, tuple) and isinstance(
outcome._excinfo[1], BaseException
)
is_xdist = "PYTEST_XDIST_WORKER" in os.environ
is_flaky_test = item.get_closest_marker("flaky") is not None
should_print = (
did_fail and item.config.option.verbose > 0 and is_flaky_test and not is_xdist
)
if should_print:
capmanager = item.config.pluginmanager.getplugin("capturemanager")
with capmanager.global_and_fixture_disabled():
item.config.pluginmanager.get_plugin("terminalreporter")._tw.write(
"FLAKY ", yellow=True
)
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_runtest_teardown(item):
# The teardown must be executed to clear up the fixtures, even if the
# fixture setup itself failed. Because of this the timeout for the teardown
# is different than the timeout for the setup and call.
def report():
gevent.util.print_run_info()
pytest.fail(
f"Teardown timeout >{item.timeout_setup_and_call}s. This must not happen, when "
f"the teardown times out not all finalizers got a chance to run. This "
f"means not all fixtures are cleaned up, which can make subsequent "
f"tests flaky. This would be the case for pending greenlets which are "
f"not cleared by previous run."
)
def handler(signum, frame): # pylint: disable=unused-argument
report()
# The order of the signal setup/teardown is important, check
# timeout_for_setup_and_call for details
signal.signal(signal.SIGALRM, handler)
signal.setitimer(signal.ITIMER_REAL, item.timeout_teardown)
yield
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
def pytest_generate_tests(metafunc):
fixtures = metafunc.fixturenames
if "transport" in fixtures:
parmeterize_private_rooms = True
transport_and_privacy = list()
number_of_transports = list()
# Filter existing parametrization which is already done in the test
for mark in metafunc.definition.own_markers:
if mark.name == "parametrize":
# Check if 'private_rooms' gets parameterized
if "private_rooms" in mark.args[0]:
parmeterize_private_rooms = False
# Check if more than one transport is used
if "number_of_transports" == mark.args[0]:
number_of_transports = mark.args[1]
if "public_and_private_rooms" in fixtures:
if number_of_transports:
transport_and_privacy.extend(
[
("matrix", [False for _ in range(number_of_transports[0])]),
("matrix", [True for _ in range(number_of_transports[0])]),
]
)
else:
transport_and_privacy.extend([("matrix", False), ("matrix", True)])
else:
if number_of_transports:
transport_and_privacy.extend(
[("matrix", [False for _ in range(number_of_transports[0])])]
)
else:
transport_and_privacy.append(("matrix", False))
if not parmeterize_private_rooms or "private_rooms" not in fixtures:
# If the test does not expect the private_rooms parameter or parametrizes
# `private_rooms` itself, only give he transport values
metafunc.parametrize(
"transport",
list(set(transport_type for transport_type, _ in transport_and_privacy)),
)
else:
metafunc.parametrize("transport,private_rooms", transport_and_privacy)
if sys.platform == "darwin":
# On macOS the temp directory base path is already very long.
# To avoid failures on ipc tests (ipc path length is limited to 104/108 chars on macOS/linux)
# we override the pytest tmpdir machinery to produce shorter paths.
@pytest.fixture(scope="session", autouse=True)
def _tmpdir_short():
"""Shorten tmpdir paths"""
from _pytest.tmpdir import TempPathFactory
def getbasetemp(self):
""" return base temporary directory. """
if self._basetemp is None:
if self._given_basetemp is not None:
basetemp = Path(self._given_basetemp)
ensure_reset_dir(basetemp)
else:
from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT")
temproot = Path(from_env or tempfile.gettempdir())
user = get_user() or "unknown"
# use a sub-directory in the temproot to speed-up
# make_numbered_dir() call
rootdir = temproot.joinpath(f"pyt-{user}")
rootdir.mkdir(exist_ok=True)
basetemp = make_numbered_dir_with_cleanup(
prefix="", root=rootdir, keep=3, lock_timeout=LOCK_TIMEOUT
)
assert basetemp is not None
self._basetemp = t = basetemp
self._trace("new basetemp", t)
return t
else:
return self._basetemp
TempPathFactory.getbasetemp = getbasetemp
@pytest.fixture
def tmpdir(request, tmpdir_factory):
"""Return a temporary directory path object
which is unique to each test function invocation,
created as a sub directory of the base temporary
directory. The returned object is a `py.path.local`_
path object.
"""
name = request.node.name
name = re.sub(r"[\W]", "_", name)
MAXVAL = 15
if len(name) > MAXVAL:
name = name[:MAXVAL]
tdir = tmpdir_factory.mktemp(name, numbered=True)
return tdir
| 38.236207
| 99
| 0.678947
|
daac9dae8067d03f06d44a328d731533d307dac0
| 288
|
py
|
Python
|
CVContent.py
|
alpaylan/cvdl
|
bb4981af134770074460e620c2c65054d62cf32b
|
[
"MIT"
] | 33
|
2021-12-19T16:51:26.000Z
|
2022-03-10T12:07:20.000Z
|
CVContent.py
|
onuratakan/cvdl
|
bb4981af134770074460e620c2c65054d62cf32b
|
[
"MIT"
] | null | null | null |
CVContent.py
|
onuratakan/cvdl
|
bb4981af134770074460e620c2c65054d62cf32b
|
[
"MIT"
] | 4
|
2021-12-19T18:26:12.000Z
|
2022-01-11T15:46:29.000Z
|
class CVContent:
"""
CV Content is defined as a set of sections, including
"Header" as a special section type. Each section
conforms a Schema defined in the CV Schema Document(CVSD)
using a set of pre-defined data types.
"""
def __init__(self):
pass
| 24
| 61
| 0.663194
|
232bed1d48af38703954fb1a4e79fd4294e2b7be
| 14,715
|
py
|
Python
|
from_3b1b/old/counting_in_binary.py
|
tigerking/manim2
|
93e8957e433b8e59acb5a5213a4074ee0125b823
|
[
"MIT"
] | null | null | null |
from_3b1b/old/counting_in_binary.py
|
tigerking/manim2
|
93e8957e433b8e59acb5a5213a4074ee0125b823
|
[
"MIT"
] | null | null | null |
from_3b1b/old/counting_in_binary.py
|
tigerking/manim2
|
93e8957e433b8e59acb5a5213a4074ee0125b823
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import numpy as np
import itertools as it
from copy import deepcopy
import sys
from manim2.imports import *
from script_wrapper import command_line_create_scene
MOVIE_PREFIX = "counting_in_binary/"
BASE_HAND_FILE = os.path.join(VIDEO_DIR, MOVIE_PREFIX, "Base.mp4")
FORCED_FRAME_DURATION = 0.02
TIME_RANGE = (0, 42)
INITIAL_PADDING = 27
NUM_GOOD_FRAMES = 1223
ALGORITHM_TEXT = [
"""
\\begin{flushleft}
Turn up the rightmost finger that is down.
""", """
Turn down all fingers to its right.
\\end{flushleft}
"""
]
FINGER_WORDS = [
"Thumb",
"Index Finger",
"Middle Finger",
"Ring Finger",
"Pinky",
]
COUNT_TO_FRAME_NUM = {
0 : 0,
1 : 27,
2 : 76,
3 : 110,
4 : 163,
5 : 189,
6 : 226,
7 : 264,
8 : 318,
9 : 356,
10 : 384,
11 : 423,
12 : 457,
13 : 513,
14 : 528,
15 : 590,
16 : 620,
17 : 671,
18 : 691,
19 : 740,
20 : 781,
21 : 810,
22 : 855,
23 : 881,
24 : 940,
25 : 970,
26 : 1014,
27 : 1055,
28 : 1092,
29 : 1143,
30 : 1184,
31 : 1219,
}
COUNT_TO_TIP_POS = {
0 : [5.0, 0.5, 0.0],
1 : [3.1, 2.5, 0.0],
2 : [1.5, 2.3, 0.0],
3 : [0.7, 2.0, 0.0],
4 : [0.0, 1.0, 0.0],
}
def finger_tip_power_of_2(finger_no):
return TexMobject(str(2**finger_no)).shift(COUNT_TO_TIP_POS[finger_no])
class Hand(ImageMobject):
STARTING_BOTTOM_RIGHT = [4.61111111e+00, -3.98888889e+00, 9.80454690e-16]
def __init__(self, num, small = False, **kwargs):
Mobject2D.__init__(self, **kwargs)
path = os.path.join(
VIDEO_DIR, MOVIE_PREFIX, "images", "Hand%d.png"%num
)
invert = False
if not self.read_in_cached_attrs(path, invert):
ImageMobject.__init__(self, path, invert = invert)
center = self.get_center()
self.center()
self.rotate(np.pi, axis = RIGHT+UP)
self.sort_points(lambda p : np.log(complex(*p[:2])).imag)
self.rotate(np.pi, axis = RIGHT+UP)
self.shift(center)
self.cache_attrs(path, invert = False)
self.shift(self.STARTING_BOTTOM_RIGHT-self.get_boundary_point(DOWN+RIGHT))
if small:
self.shrink()
def shrink(self):
self.scale_in_place(0.8).to_edge(DOWN, buff = 0.0)
# def set_color_thumb(self, color = "yellow"):
# self.set_color(
# color = color,
# condition = lambda p : p[0] > 4.5 and p[1] > -1.5
# )
def get_algorithm():
return TextMobject(ALGORITHM_TEXT)
def get_finger_colors():
return list(Color("yellow").range_to("red", 5))
def five_char_binary(num):
result = bin(num)[2:]
return (5-len(result))*"0" + result
def read_reversed_binary(string):
return sum([
2**count if char == '1' else 0
for count, char in zip(it.count(), string)
])
class LeftHand(Hand):
def __init__(self, num, **kwargs):
Hand.__init__(
self,
read_reversed_binary(five_char_binary(num)),
**kwargs
)
self.rotate(np.pi, UP)
self.shift(LEFT)
def get_hand_map(which_hand = "right"):
if which_hand == "right":
Class = Hand
elif which_hand == "left":
Class = LeftHand
else:
print("Bad arg, bro")
return
return dict([
(num, Class(num, small=True))
for num in range(32)
])
class OverHand(SceneFromVideo):
def construct(self):
SceneFromVideo.construct(self, BASE_HAND_FILE)
self.frame_duration = FORCED_FRAME_DURATION
self.frames = self.frames[:NUM_GOOD_FRAMES]
class SaveEachNumber(OverHand):
def construct(self):
OverHand.construct(self)
for count in COUNT_TO_FRAME_NUM:
path = os.path.join(
VIDEO_DIR, MOVIE_PREFIX, "images",
"Hand%d.png"%count
)
Image.fromarray(self.frames[COUNT_TO_FRAME_NUM[count]]).save(path)
def write_to_movie(self, name = None):
print("Why bother writing to movie...")
class ShowCounting(OverHand):
def construct(self):
OverHand.construct(self)
self.frames = INITIAL_PADDING*[self.frames[0]] + self.frames
num_frames = len(self.frames)
self.frames = [
disp.paint_mobject(
self.get_counting_mob(32*count // num_frames),
frame
)
for frame, count in zip(self.frames, it.count())
]
def get_counting_mob(self, count):
mob = TexMobject(str(count))
mob.scale(2)
mob.shift(LEFT)
mob.to_edge(UP, buff = 0.1)
return mob
class ShowFrameNum(OverHand):
def construct(self):
OverHand.construct(self)
for frame, count in zip(self.frames, it.count()):
print(count + "of" + len(self.frames))
mob = Mobject(*[
TexMobject(char).shift(0.3*x*RIGHT)
for char, x, in zip(str(count), it.count())
])
self.frames[count] = disp.paint_mobject(
mob.to_corner(UP+LEFT),
frame
)
class CountTo1023(Scene):
def construct(self):
rh_map = get_hand_map("right")
lh_map = get_hand_map("left")
def get_num(count):
return Mobject(*[
TexMobject(char).shift(0.35*x*RIGHT)
for char, x, in zip(str(count), it.count())
]).center().to_edge(UP)
self.frames = [
disp.paint_mobject(Mobject(
rh_map[count%32], lh_map[count//32], get_num(count)
))
for count in range(2**10)
]
class Introduction(Scene):
def construct(self):
words = TextMobject("""
First, let's see how to count
to 31 on just one hand...
""")
hand = Hand(0)
for mob in words, hand:
mob.sort_points(lambda p : p[0])
self.add(words)
self.wait()
self.play(DelayByOrder(Transform(words, hand)))
self.wait()
class ShowReadingRule(Scene):
def construct(self):
sample_counts = [6, 17, 27, 31]
question = TextMobject("""
How do you recognize what number a given configuration represents?
""", size = "\\Huge").scale(0.75).to_corner(UP+LEFT)
answer = TextMobject([
"Think of each finger as representing a power of 2, ",
"then add up the numbers represented by the standing fingers."
], size = "\\Huge").scale(0.75).to_corner(UP+LEFT).split()
self.add(question)
for count in sample_counts:
hand = Hand(count, small = True)
self.add(hand)
self.wait()
self.remove(hand)
self.add(hand)
self.wait()
self.remove(question)
self.add(answer[0])
counts = list(map(finger_tip_power_of_2, list(range(5))))
for count in counts:
self.play(SpinInFromNothing(count, run_time = 0.3))
self.wait()
self.play(ShimmerIn(answer[1]))
for count in sample_counts:
self.clear()
self.add(*answer)
self.read_sample(count)
def read_sample(self, num):
hand = Hand(num, small = True)
bool_arr = [c == '1' for c in five_char_binary(num)]
counts = [4-count for count in range(5) if bool_arr[count]]
count_mobs = list(map(finger_tip_power_of_2, counts))
if num in [6, 27]:
count_mobs[1].shift(0.2*DOWN + 0.2*LEFT)
if num in [6, 17]:
hand.shift(0.8*LEFT)
sum_mobs = TexMobject(
" + ".join([str(2**c) for c in counts]).split(" ") + ["=%d"%num]
).to_corner(UP+RIGHT).split()
self.add(hand, *count_mobs)
self.wait()
self.play(*[
Transform(count_mobs[n/2], sum_mobs[n])
if n%2 == 0 and n/2 < len(counts)
else FadeIn(sum_mobs[n])
for n in range(len(sum_mobs))
])
self.wait(2.0)
class ShowIncrementRule(Scene):
def construct(self):
#First count from 18 to 22
def to_left(words):
return "\\begin{flushleft}" + words + "\\end{flushleft}"
phrases = [
TextMobject(to_left(words), size = "\\Huge").scale(0.75).to_corner(UP+LEFT)
for words in [
"But while you're counting, you don't need to think about powers of 2.",
"Can you see the pattern for incrementing?",
"If the thumb is down, turn it up.",
"If the thumb is up, but the forefinger is down, flip them both.",
"If the thumb and forefinger are up, but the middle finger is down, flip all three.",
"In general, flip all of the fingers up to the rightmost one which is down.",
"After practicing for a minute or two, you're mind starts doing it automatically.",
"Question: Why does this rule for incrementation work?",
]
]
ranges = [
(0, 14, False),
(14, 28, False),
(12, 13, True),
(29, 31, True),
(27, 29, True),
(23, 24, True),
(14, 20, False),
(20, 26, False)
]
oh = OverHand()
for phrase, (start, end, pause) in zip(phrases, ranges):
if pause:
self.background = oh.frames[COUNT_TO_FRAME_NUM[start]]
self.add(phrase)
self.play(ShimmerIn(self.get_arrow_set(start)))
self.wait()
self.clear()
self.reset_background()
self.frames += [
disp.paint_mobject(phrase, frame)
for frame in oh.frames[COUNT_TO_FRAME_NUM[start]:COUNT_TO_FRAME_NUM[end]]
]
if pause:
self.frames += [self.frames[-1]]*int(1.0/self.frame_duration)
def get_arrow_set(self, num):
arrow = TexMobject("\\downarrow", size = "\\Huge")
arrow.set_color("green")
arrow.shift(-arrow.get_bottom())
if num == 12:
tips = [(4, 1, 0)]
elif num == 29:
tips = [
(6, 1.5, 0),
(3, 1.5, 0),
]
elif num == 27:
tips = [
(5.5, 1.5, 0),
(2.75, 3.5, 0),
(2, 1.0, 0),
]
elif num == 23:
tips = [
(6, 1, 0),
(3.25, 3.5, 0),
(2.25, 3.5, 0),
(1.5, 0.75, 0),
]
return Mobject(*[
deepcopy(arrow).shift(tip)
for tip in tips
])
class MindFindsShortcuts(Scene):
def construct(self):
words1 = TextMobject("""
Before long, your mind starts to recognize certain
patterns without needing to perform the addition.
""", size = "\\Huge").scale(0.75).to_corner(LEFT+UP)
words2 = TextMobject("""
Which makes it faster to recognize other patterns...
""", size = "\\Huge").scale(0.75).to_corner(LEFT+UP)
hand = Hand(7).scale(0.5).center().shift(DOWN+2*LEFT)
sum421 = TexMobject("4+2+1").shift(DOWN+2*RIGHT)
seven = TexMobject("7").shift(DOWN+6*RIGHT)
compound = Mobject(
Arrow(hand, sum421),
sum421,
Arrow(sum421, seven)
)
self.add(
words1,
hand,
compound,
seven
)
self.wait()
self.play(
Transform(compound, Arrow(hand, seven).set_color("yellow")),
ShimmerIn(TextMobject("Directly recognize").shift(1.5*DOWN+2*RIGHT))
)
self.wait()
self.clear()
hands = dict([
(num, Hand(num).center().scale(0.5).shift(DOWN))
for num in [23, 16, 7]
])
hands[23].shift(5*LEFT)
hands[16].shift(LEFT)
hands[7].shift(3*RIGHT)
for num in 7, 16:
hands[num].add(TexMobject(str(num)).shift(hands[num].get_top()+0.5*UP))
plus = TexMobject("+").shift(DOWN + RIGHT)
equals = TexMobject("=").shift(DOWN + 2.5*LEFT)
equals23 = TexMobject("=23").shift(DOWN + 5.5*RIGHT)
self.add(words2, hands[23])
self.wait()
self.play(
Transform(
deepcopy(hands[16]).set_color("black").center().shift(hands[23].get_center()),
hands[16]
),
Transform(
deepcopy(hands[7]).set_color("black").center().shift(hands[23].get_center()),
hands[7]
),
Animation(hands[23]),
FadeIn(equals),
FadeIn(plus)
)
self.wait()
self.play(ShimmerIn(equals23))
self.wait()
class CountingExampleSentence(ShowCounting):
def construct(self):
words = "As an example, this is me counting the number of words in this sentence on just one hand!"
self.words = TextMobject(words.split(), size = "\\Huge").scale(0.7).to_corner(UP+LEFT, buff = 0.25).split()
ShowCounting.construct(self)
def get_counting_mob(self, num):
return Mobject(*self.words[:num])
class FinishCountingExampleSentence(Scene):
def construct(self):
words = "As an example, this is me counting the number of words in this sentence on just one hand!"
words = TextMobject(words, size = "\\Huge").scale(0.7).to_corner(UP+LEFT, buff = 0.25)
hand = Hand(18)
sixteen = TexMobject("16").shift([0, 2.25, 0])
two = TexMobject("2").shift([3, 3.65, 0])
eightteen = TexMobject("18").shift([1.5, 2.5, 0])
eightteen.sort_points()
comp = Mobject(sixteen, two)
self.add(hand, comp, words)
self.wait()
self.play(Transform(comp, eightteen))
self.wait()
class Question(Scene):
def construct(self):
self.add(TextMobject("Left to ponder: Why does this rule for incrementing work?"))
class TwoHandStatement(Scene):
def construct(self):
self.add(TextMobject(
"With 10 fingers, you can count up to $2^{10} - 1 = 1023$..."
))
class WithToes(Scene):
def construct(self):
words = TextMobject([
"If you were dexterous enough to use your toes as well,",
"you could count to 1,048,575"
]).split()
self.add(words[0])
self.wait()
self.play(ShimmerIn(words[1]))
self.wait()
if __name__ == "__main__":
command_line_create_scene(MOVIE_PREFIX)
| 30.720251
| 115
| 0.539925
|
263ba45d23a8bbff839009a8420cac0bdeaf378f
| 1,373
|
py
|
Python
|
create_connect.py
|
christophschubert/ccloud-python-api
|
077e58f60a05b5df6cfbda700a3fb05dcaa10ec6
|
[
"Apache-2.0"
] | null | null | null |
create_connect.py
|
christophschubert/ccloud-python-api
|
077e58f60a05b5df6cfbda700a3fb05dcaa10ec6
|
[
"Apache-2.0"
] | null | null | null |
create_connect.py
|
christophschubert/ccloud-python-api
|
077e58f60a05b5df6cfbda700a3fb05dcaa10ec6
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import ccloud
parser = argparse.ArgumentParser(description='Set up local connect cluster.')
parser.add_argument('--kafka-cluster', dest='kafka', metavar'lkc-xxxx', required=True, help='logical Kafka cluster to connect to')
parser.add_argument('--connect-group-id', dest='group_id', metavar='id', required=True, help='group.id of the Connect cluster')
parser.add_argument('--schema-cluster', dest='sr', metavar='lsrc-xxxx', help='schema registry resouce to connect to')
args = parser.parse_args()
topic_config = {
'connect_status': {
'partitions': 2,
}
#connect command and other topics as well
}
configs = [] #accumulator for the various configs
kafka_cluster = args.kafka
group_id = args.group_id
# create service account for connect
cluster_sa = ccloud.create_service_account('Connect {}'.format(group_id), 'Service account for Connect cluster {}'.format(group_id))
cluster_api_key = ccloud.create_api_key(kafka_cluster, cluster_sa.id, 'Key for Connect cluster {} (Kafka)'.format(group_id))
# create topics
for name, config in topic_config.items():
ccloud.create_topic(name, cluster=args.kafka, config=config)
# set ACLs for topic
if args.sr:
# configure schema-registry connection
schema_cluster = args.sr
#create schema registry for connect
#create API key
for line in configs:
print(line)
| 30.511111
| 132
| 0.737072
|
8649de3e2a37c3bcec04cdb9e198446597691c78
| 13,053
|
py
|
Python
|
duo_auth/vendor/duo_client_python/tests/test_admin.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | 1
|
2020-03-18T09:14:55.000Z
|
2020-03-18T09:14:55.000Z
|
duo_auth/vendor/duo_client_python/tests/test_admin.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | 1
|
2021-02-23T23:57:37.000Z
|
2021-02-23T23:57:37.000Z
|
duo_auth/vendor/duo_client_python/tests/test_admin.py
|
killstrelok/insightconnect-plugins
|
911358925f4233ab273dbd8172e8b7b9188ebc01
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import unittest
import warnings
from . import util
import duo_client.admin
class TestAdmin(unittest.TestCase):
def setUp(self):
self.client = duo_client.admin.Admin(
'test_ikey', 'test_akey', 'example.com')
self.client.account_id = 'DA012345678901234567'
# monkeypatch client's _connect()
self.client._connect = lambda: util.MockHTTPConnection()
# if you are wanting to simulate getting lists of objects
# rather than a single object
self.client_list = duo_client.admin.Admin(
'test_ikey', 'test_akey', 'example.com')
self.client_list.account_id = 'DA012345678901234567'
self.client_list._connect = \
lambda: util.MockHTTPConnection(data_response_should_be_list=True)
# if you are wanting to get a response from a call to get
# authentication logs
self.client_authlog = duo_client.admin.Admin(
'test_ikey', 'test_akey', 'example.com')
self.client_authlog.account_id = 'DA012345678901234567'
self.client_authlog._connect = \
lambda: util.MockHTTPConnection(data_response_from_get_authlog=True)
# GET with no params
def test_get_users(self):
response = self.client.get_users()
self.assertEqual(response['method'], 'GET')
self.assertEqual(
response['uri'],
'/admin/v1/users?account_id=%s' % self.client.account_id)
self.assertEqual(response['body'], None)
# GET with params
def test_get_users_by_name(self):
response = self.client.get_users_by_name('foo')
(uri, args) = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users')
self.assertEqual(
util.params_to_dict(args),
{'username':['foo'],
'account_id':[self.client.account_id]})
self.assertEqual(response['body'], None)
response = self.client.get_users_by_name('foo')
(uri, args) = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users')
self.assertEqual(
util.params_to_dict(args),
{'username':['foo'],
'account_id':[self.client.account_id]})
self.assertEqual(response['body'], None)
# POST with params
def test_add_user(self):
# all params given
response = self.client.add_user(
'foo', realname='bar', status='active', notes='notes',
email='foobar@baz.com', firstname='fName', lastname='lName',
alias1='alias1', alias2='alias2', alias3='alias3', alias4='alias4')
self.assertEqual(response['method'], 'POST')
self.assertEqual(response['uri'], '/admin/v1/users')
self.assertEqual(
util.params_to_dict(response['body']),
{
'realname': ['bar'],
'notes': ['notes'],
'username': ['foo'],
'status': ['active'],
'email': ['foobar%40baz.com'],
'firstname': ['fName'],
'lastname': ['lName'],
'account_id': [self.client.account_id],
'alias1': ['alias1'],
'alias2': ['alias2'],
'alias3': ['alias3'],
'alias4': ['alias4'],
})
# defaults
response = self.client.add_user('bar')
self.assertEqual(response['method'], 'POST')
self.assertEqual(response['uri'], '/admin/v1/users')
self.assertEqual(
util.params_to_dict(response['body']),
{'username':['bar'], 'account_id':[self.client.account_id]})
def test_update_user(self):
response = self.client.update_user(
'DU012345678901234567', username='foo', realname='bar',
status='active', notes='notes', email='foobar@baz.com',
firstname='fName', lastname='lName', alias1='alias1',
alias2='alias2', alias3='alias3', alias4='alias4')
self.assertEqual(response['method'], 'POST')
self.assertEqual(
response['uri'], '/admin/v1/users/DU012345678901234567')
self.assertEqual(
util.params_to_dict(response['body']),
{
'account_id':[self.client.account_id],
'realname': ['bar'],
'notes': ['notes'],
'username': ['foo'],
'status': ['active'],
'email': ['foobar%40baz.com'],
'firstname': ['fName'],
'lastname': ['lName'],
'account_id': [self.client.account_id],
'alias1': ['alias1'],
'alias2': ['alias2'],
'alias3': ['alias3'],
'alias4': ['alias4'],
})
def test_get_endpoints(self):
response = self.client.get_endpoints()
self.assertEqual(response['method'], 'GET')
(uri, args) = response['uri'].split('?')
self.assertEqual(uri, '/admin/v1/endpoints')
self.assertEqual(
util.params_to_dict(args),
{'account_id': [self.client.account_id]})
def test_get_user_u2ftokens(self):
""" Test to get u2ftokens by user id.
"""
response = self.client.get_user_u2ftokens('DU012345678901234567')
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/users/DU012345678901234567/u2ftokens')
self.assertEqual(util.params_to_dict(args),
{'account_id':[self.client.account_id]})
def test_get_u2ftokens_with_params(self):
""" Test to get u2ftokens with params.
"""
response = list(self.client_list.get_u2ftokens())[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/u2ftokens')
self.assertEqual(util.params_to_dict(args),
{'account_id':[self.client_list.account_id]})
def test_get_u2ftokens_without_params(self):
""" Test to get u2ftokens without params.
"""
response = list(self.client_list.get_u2ftokens())[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/u2ftokens')
self.assertEqual(util.params_to_dict(args),
{'account_id':[self.client_list.account_id]})
def test_get_u2ftoken_by_id(self):
""" Test to get u2ftoken by registration id.
"""
response = self.client.get_u2ftoken_by_id('DU012345678901234567')
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/u2ftokens/DU012345678901234567')
self.assertEqual(util.params_to_dict(args),
{'account_id':[self.client.account_id]})
def test_delete_u2ftoken(self):
""" Test to delete u2ftoken by registration id.
"""
response = self.client.delete_u2ftoken('DU012345678901234567')
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'DELETE')
self.assertEqual(uri, '/admin/v1/u2ftokens/DU012345678901234567')
self.assertEqual(util.params_to_dict(args),
{'account_id':[self.client.account_id]})
def test_get_user_bypass_codes(self):
""" Test to get bypass codes by user id.
"""
response = self.client.get_user_bypass_codes('DU012345678901234567')
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(
uri,
'/admin/v1/users/DU012345678901234567/bypass_codes')
self.assertEqual(util.params_to_dict(args),
{'account_id': [self.client.account_id]})
def test_get_bypass_codes(self):
""" Test to get bypass codes.
"""
response = list(self.client_list.get_bypass_codes())[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/bypass_codes')
self.assertEqual(util.params_to_dict(args),
{'account_id': [self.client_list.account_id]})
def test_delete_bypass_code_by_id(self):
""" Test to delete a bypass code by id.
"""
response = self.client.delete_bypass_code_by_id('DU012345678901234567')
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'DELETE')
self.assertEqual(uri, '/admin/v1/bypass_codes/DU012345678901234567')
self.assertEqual(util.params_to_dict(args),
{'account_id': [self.client.account_id]})
def test_get_authentication_log_v1(self):
""" Test to get authentication log on version 1 api.
"""
response = self.client_list.get_authentication_log(api_version=1)[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/logs/authentication')
self.assertEqual(
util.params_to_dict(args)['account_id'],
[self.client_list.account_id])
def test_get_authentication_log_v2(self):
""" Test to get authentication log on version 1 api.
"""
response = self.client_authlog.get_authentication_log(api_version=2)
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v2/logs/authentication')
self.assertEqual(
util.params_to_dict(args)['account_id'],
[self.client_authlog.account_id])
def test_get_groups(self):
""" Test for getting list of all groups
"""
response = self.client.get_groups()
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/groups')
self.assertEqual(util.params_to_dict(args),
{'account_id': [self.client.account_id]})
def test_get_group_v1(self):
""" Test for v1 API of getting specific group details
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
response = self.client.get_group('ABC123', api_version=1)
uri, args = response['uri'].split('?')
# Assert deprecation warning generated
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, DeprecationWarning))
self.assertIn('Please migrate to the v2 API', str(w[0].message))
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v1/groups/ABC123')
self.assertEqual(util.params_to_dict(args),
{'account_id': [self.client.account_id]})
def test_get_group_v2(self):
""" Test for v2 API of getting specific group details
"""
response = self.client.get_group('ABC123', api_version=2)
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v2/groups/ABC123')
self.assertEqual(util.params_to_dict(args),
{'account_id': [self.client.account_id]})
def test_get_group_users(self):
""" Test for getting list of users associated with a group
"""
response = self.client.get_group_users('ABC123')
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
self.assertEqual(uri, '/admin/v2/groups/ABC123/users')
self.assertEqual(
util.params_to_dict(args),
{
'account_id': [self.client.account_id],
'limit': ['100'],
'offset': ['0'],
})
def test_delete_group(self):
""" Test for deleting a group
"""
response = self.client.delete_group('ABC123')
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'DELETE')
self.assertEqual(uri, '/admin/v1/groups/ABC123')
self.assertEqual(util.params_to_dict(args),
{'account_id': [self.client.account_id]})
def test_modify_group(self):
""" Test for modifying a group
"""
response = self.client.modify_group('ABC123')
self.assertEqual(response['method'], 'POST')
self.assertEqual(response['uri'], '/admin/v1/groups/ABC123')
self.assertEqual(util.params_to_dict(response['body']),
{'account_id': [self.client.account_id]})
if __name__ == '__main__':
unittest.main()
| 40.039877
| 80
| 0.586532
|
a8adda23c6be1cbba71574297ec354a1dbe487b7
| 5,389
|
py
|
Python
|
plugins/modules/ovirt_group.py
|
mnecas/ansible_collections_ovirt
|
99d97cc72fafa580e1934d44dd751a94ba840718
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/ovirt_group.py
|
mnecas/ansible_collections_ovirt
|
99d97cc72fafa580e1934d44dd751a94ba840718
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/ovirt_group.py
|
mnecas/ansible_collections_ovirt
|
99d97cc72fafa580e1934d44dd751a94ba840718
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_group
short_description: Module to manage groups in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage groups in oVirt/RHV"
options:
name:
description:
- "Name of the group to manage."
required: true
state:
description:
- "Should the group be present/absent."
choices: ['present', 'absent']
default: present
authz_name:
description:
- "Authorization provider of the group. In previous versions of oVirt/RHV known as domain."
required: true
aliases: ['domain']
namespace:
description:
- "Namespace of the authorization provider, where group resides."
required: false
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add group group1 from authorization provider example.com-authz
- ovirt_group:
name: group1
domain: example.com-authz
# Add group group1 from authorization provider example.com-authz
# In case of multi-domain Active Directory setup, you should pass
# also namespace, so it adds correct group:
- ovirt_group:
name: group1
namespace: dc=ad2,dc=example,dc=com
domain: example.com-authz
# Remove group group1 with authorization provider example.com-authz
- ovirt_group:
state: absent
name: group1
domain: example.com-authz
'''
RETURN = '''
id:
description: ID of the group which is managed
returned: On success if group is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
group:
description: "Dictionary of all the group attributes. Group attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/group."
returned: On success if group is found.
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.mnecas.ovirt.plugins.module_utils.ovirt import (
BaseModule,
check_sdk,
check_params,
create_connection,
equal,
ovirt_full_argument_spec,
)
def _group(connection, module):
groups = connection.system_service().groups_service().list(
search="name={name}".format(
name=module.params['name'],
)
)
# If found more groups, filter them by namespace and authz name:
# (filtering here, as oVirt/RHV backend doesn't support it)
if len(groups) > 1:
groups = [
g for g in groups if (
equal(module.params['namespace'], g.namespace) and
equal(module.params['authz_name'], g.domain.name)
)
]
return groups[0] if groups else None
class GroupsModule(BaseModule):
def build_entity(self):
return otypes.Group(
domain=otypes.Domain(
name=self._module.params['authz_name']
),
name=self._module.params['name'],
namespace=self._module.params['namespace'],
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(required=True),
authz_name=dict(required=True, aliases=['domain']),
namespace=dict(default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
groups_service = connection.system_service().groups_service()
groups_module = GroupsModule(
connection=connection,
module=module,
service=groups_service,
)
group = _group(connection, module)
state = module.params['state']
if state == 'present':
ret = groups_module.create(entity=group)
elif state == 'absent':
ret = groups_module.remove(entity=group)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| 28.973118
| 114
| 0.652997
|
b4668175155dbf7e60bdc884ce3307fde13019d5
| 4,128
|
py
|
Python
|
test/functional/feature_minchainwork.py
|
patentcoin/patentcoin
|
5dd4ba6cf18946be6be0f564952a55358c81fe95
|
[
"MIT"
] | 1
|
2021-12-11T19:14:32.000Z
|
2021-12-11T19:14:32.000Z
|
test/functional/feature_minchainwork.py
|
Cryptogits/patentcoin
|
5dd4ba6cf18946be6be0f564952a55358c81fe95
|
[
"MIT"
] | null | null | null |
test/functional/feature_minchainwork.py
|
Cryptogits/patentcoin
|
5dd4ba6cf18946be6be0f564952a55358c81fe95
|
[
"MIT"
] | 2
|
2020-06-27T10:26:01.000Z
|
2021-12-11T15:33:50.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for setting nMinimumChainWork on command line.
Nodes don't consider themselves out of "initial block download" until
their active chain has more work than nMinimumChainWork.
Nodes don't download blocks from a peer unless the peer's best known block
has more work than nMinimumChainWork.
While in initial block download, nodes won't relay blocks to their peers, so
test that this parameter functions as intended by verifying that block relay
only succeeds past a given node once its nMinimumChainWork has been exceeded.
"""
import time
from test_framework.test_framework import PatentcoinTestFramework
from test_framework.util import connect_nodes, assert_equal
# 2 hashes required per regtest block (with no difficulty adjustment)
REGTEST_WORK_PER_BLOCK = 2
class MinimumChainWorkTest(PatentcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101]
def setup_network(self):
# This test relies on the chain setup being:
# node0 <- node1 <- node2
# Before leaving IBD, nodes prefer to download blocks from outbound
# peers, so ensure that we're mining on an outbound peer and testing
# block relay to inbound peers.
self.setup_nodes()
for i in range(self.num_nodes-1):
connect_nodes(self.nodes[i+1], i)
def run_test(self):
# Start building a chain on node0. node2 shouldn't be able to sync until node1's
# minchainwork is exceeded
starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1])
starting_blockcount = self.nodes[2].getblockcount()
num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
self.log.info("Generating %d blocks on node0", num_blocks_to_generate)
hashes = self.nodes[0].generatetoaddress(num_blocks_to_generate,
self.nodes[0].get_deterministic_priv_key().address)
self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork'])
# Sleep a few seconds and verify that node2 didn't get any new blocks
# or headers. We sleep, rather than sync_blocks(node0, node1) because
# it's reasonable either way for node1 to get the blocks, or not get
# them (since they're below node1's minchainwork).
time.sleep(3)
self.log.info("Verifying node 2 has no more blocks than before")
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
# Node2 shouldn't have any new headers yet, because node1 should not
# have relayed anything.
assert_equal(len(self.nodes[2].getchaintips()), 1)
assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)
assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
assert_equal(self.nodes[2].getblockcount(), starting_blockcount)
self.log.info("Generating one more block")
self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)
self.log.info("Verifying nodes are all synced")
# Because nodes in regtest are all manual connections (eg using
# addnode), node1 should not have disconnected node0. If not for that,
# we'd expect node1 to have disconnected node0 for serving an
# insufficient work chain, in which case we'd need to reconnect them to
# continue the test.
self.sync_all()
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
if __name__ == '__main__':
MinimumChainWorkTest().main()
| 45.362637
| 108
| 0.698643
|
ea944c82bb12f999c40cd26c90c2a596a58bd720
| 1,020
|
py
|
Python
|
src/crypt1.py
|
xiaonanln/python-usaco
|
8f0fef19cb5f89232d985f79d955f0de5ef4e10d
|
[
"MIT"
] | null | null | null |
src/crypt1.py
|
xiaonanln/python-usaco
|
8f0fef19cb5f89232d985f79d955f0de5ef4e10d
|
[
"MIT"
] | null | null | null |
src/crypt1.py
|
xiaonanln/python-usaco
|
8f0fef19cb5f89232d985f79d955f0de5ef4e10d
|
[
"MIT"
] | null | null | null |
"""
ID: isaiahl1
LANG: PYTHON2
TASK: crypt1
"""
TASK = 'crypt1'
def readints(fin):
return tuple(int(x) for x in fin.readline().split())
def readint(fin):
return int(fin.readline())
def main(fin, fout):
N = readint(fin)
digits = readints(fin)
count = 0
print 'digits', digits
for a in digits:
if a == 0: continue
for b in digits:
for c in digits:
for d in digits:
if d == 0: continue
for e in digits:
# abc * de
x = (a*100+b*10+c) * e
y = (a*100+b*10+c) * d
if x >= 1000 or y >= 1000: continue
if not fit(x, digits) or not fit(y, digits): continue
s = y*10 + x
if fit(s, digits):
print a,b,c,d,e, x, y, s
count += 1
print 'result', count
print >>fout, count
def fit(n, digits):
d = n % 10
n //= 10
if d not in digits:
return False
while n:
d = n % 10
n //= 10
if d not in digits:
return False
return True
fin = open (TASK + '.in', 'r')
fout = open (TASK + '.out', 'w')
with fin:
with fout:
main(fin, fout)
| 17
| 59
| 0.562745
|
a3f8ffe9c342d0388cb2c63ce70edfa290ec2731
| 3,795
|
py
|
Python
|
alexber/rpsgame/app.py
|
alex-ber/RocketPaperScissorsGame
|
c38c82a17d508c892c686454864ee2356f441d1a
|
[
"BSD-2-Clause"
] | null | null | null |
alexber/rpsgame/app.py
|
alex-ber/RocketPaperScissorsGame
|
c38c82a17d508c892c686454864ee2356f441d1a
|
[
"BSD-2-Clause"
] | 1
|
2019-03-20T10:35:36.000Z
|
2019-03-21T12:46:44.000Z
|
alexber/rpsgame/app.py
|
alex-ber/RocketPaperScissorsGame
|
c38c82a17d508c892c686454864ee2356f441d1a
|
[
"BSD-2-Clause"
] | null | null | null |
import logging.config
from collections import OrderedDict
from alexber.rpsgame import app_conf as conf
from alexber.rpsgame.app_create_instance import importer, create_instance
def _create_player_factory(player_d):
assert player_d is not None
def factory_player():
name_player = player_d.get(conf.NAME_PLAYER_KEY, None)
player = create_instance(**player_d)
return name_player, player
return factory_player
def _mask_engine_params(engine_d):
d = OrderedDict()
for name, value in engine_d.items():
if not name.startswith("init."):
logger.debug(f"Skipping {name}, doesn't have prefix 'init'")
continue
real_name = name[len("init."):]
d[real_name] = value
return d
def run(**kwargs):
"""
This method recieved all conf params in kwargs.
All unexpected values will be ignored.
It is expected that value type is correct.
No conversion on the value of the dict kwargs will be applied.
This method will built playerA, playerB, engine,
and run engine with these players.
Please, consult alexber.rpsgame.app_conf in order to construct kwargs.
Command-line argument and ini-file are suppored out of the box.
JSON/YML, etc. can be easiliy handled also.
"""
#filter out unrelated params without implicit_convert
#if you want to convert values do it explictely
kwargs = conf.parse_dict(kwargs, implicit_convert=False)
engine_d = kwargs.pop(conf.ENGINE_KEY, {})
engine_str = engine_d.pop(conf.CLS_KEY, None)
if engine_str is None:
engine_str = conf.DEFAULT_ENGINE_CLS
engine_cls = importer(engine_str)
p_engine_d = _mask_engine_params(engine_d)
kwargs.update(p_engine_d)
playera_d = kwargs.pop(conf.PLAYER_A_KEY, {})
playerb_d = kwargs.pop(conf.PLAYER_B_KEY, {})
the_engine = engine_cls.from_configuration(playera_factory=_create_player_factory(playera_d),
playerb_factory=_create_player_factory(playerb_d),
**kwargs)
the_engine.play()
def main(args=None):
"""
main method
:param args: if not None, suppresses sys.args
"""
dd = conf.parse_config(args)
run(**dd)
#see https://terryoy.github.io/2016/05/short-ref-python-logging.html
_config = {
"log_config": {
"version": 1,
"formatters": {
"brief": {
"format": "%(message)s",
},
"detail": {
"format": "%(asctime)-15s %(levelname)s [%(name)s.%(funcName)s] %(message)s",
"datefmt": '%Y-%m-%d %H:%M:%S',
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "brief",
},
# "file": {
# "class": "logging.handlers.RotatingFileHandler",
# "filename": "dev.log",
# "level": "DEBUG",
# "formatter": "detail",
# },
},
"root": {
# "handlers": ["console", "file"],
"handlers": ["console"],
"level": "DEBUG",
},
"loggers": {
"requests": {
# "handlers": ["file"],
"handlers": ["console"],
"level": "DEBUG",
"propagate": False,
}
},
},
}
if __name__ == '__main__':
logging.config.dictConfig(_config["log_config"])
del _config
logger = logging.getLogger(__name__)
main()
| 29.648438
| 97
| 0.549671
|
e643ad353a143ce04f071646c88bd4d22490f221
| 1,544
|
py
|
Python
|
python/books/hard_way/p32/ex39.py
|
ShenJinXiang/example
|
9d3bdf73079092791d3f96d73573ee51d66774ab
|
[
"MIT"
] | null | null | null |
python/books/hard_way/p32/ex39.py
|
ShenJinXiang/example
|
9d3bdf73079092791d3f96d73573ee51d66774ab
|
[
"MIT"
] | null | null | null |
python/books/hard_way/p32/ex39.py
|
ShenJinXiang/example
|
9d3bdf73079092791d3f96d73573ee51d66774ab
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# create a mapping of state to abbreviation
states = {
'Oregon': 'OR',
'Florida': 'FL',
'California': 'CA',
'New York': 'NY',
'Michigan': 'MI'
}
# create a basic set of states and some cities in them
cities = {'CA': 'San Francisco', 'MI': 'Detroit', 'FL': 'Jacksonville', 'NY': 'New York', 'OR': 'Portland'}
# add some more cities
# print out some cities
print('-' * 10)
print("NY State has: ", cities['NY']);
print("OR State has: ", cities['OR']);
# print some states
print('-' * 10)
print("Michigan's abbreviation is: ", states['Michigan'])
print("Florida's abbreviation is: ", states['Florida'])
# do it by using the state then cities dict
print('-' * 10)
print("Michigan has: ", cities[states['Michigan']])
print("Florida has: ", cities[states['Florida']])
# print every state abbreviation
print('-' * 10)
for state, abbrev in list(states.items()):
print(f"{state} is abbreviated {abbrev}")
# print every city in state
print('-' * 10)
for abbrev, city in list(cities.items()):
print(f"{abbrev} has the city {city}")
# now do both at the same time
print('-' * 10)
for state, abbrev in list(states.items()):
print(f"{state} state is abbreviated {abbrev}")
print(f"and has city {cities[abbrev]}")
print('-' * 10)
# safely get a abbreviation by state that might not be there
state = states.get('Texas')
if not state:
print("Sorry, no Texas.")
# get a city with a default value
city = cities.get('TX', 'Does Not Exist')
print(f"The city for the state 'TX' is: {city}")
| 26.62069
| 107
| 0.646373
|
187f7cef9029e014e09c9e1979a02437efc73876
| 5,069
|
py
|
Python
|
tests/gateways/disk/test_delete.py
|
richmoore1962/conda
|
ef36713bfeca5b9a8505ff8ae6d7899c2d7c6306
|
[
"BSD-3-Clause"
] | 1
|
2017-06-11T01:32:33.000Z
|
2017-06-11T01:32:33.000Z
|
tests/gateways/disk/test_delete.py
|
richmoore1962/conda
|
ef36713bfeca5b9a8505ff8ae6d7899c2d7c6306
|
[
"BSD-3-Clause"
] | 1
|
2019-04-02T23:35:13.000Z
|
2019-04-02T23:35:13.000Z
|
tests/gateways/disk/test_delete.py
|
richmoore1962/conda
|
ef36713bfeca5b9a8505ff8ae6d7899c2d7c6306
|
[
"BSD-3-Clause"
] | 1
|
2020-12-31T08:15:39.000Z
|
2020-12-31T08:15:39.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from errno import ENOENT
import os
from os.path import isdir, isfile, islink, join, lexists
import pytest
from conda.compat import TemporaryDirectory
from conda.gateways.disk.create import create_link
from conda.gateways.disk.delete import move_to_trash, rm_rf
from conda.gateways.disk.link import islink, symlink
from conda.gateways.disk.update import touch
from conda.models.enums import LinkType
from .test_permissions import _make_read_only, _try_open, tempdir
def _write_file(path, content):
with open(path, "a") as fh:
fh.write(content)
fh.close()
def test_remove_file():
with tempdir() as td:
test_path = join(td, 'test_path')
touch(test_path)
assert isfile(test_path)
_try_open(test_path)
_make_read_only(test_path)
pytest.raises((IOError, OSError), _try_open, test_path)
assert rm_rf(test_path)
assert not isfile(test_path)
def test_remove_file_to_trash():
with tempdir() as td:
test_path = join(td, 'test_path')
touch(test_path)
assert isfile(test_path)
_try_open(test_path)
_make_read_only(test_path)
pytest.raises((IOError, OSError), _try_open, test_path)
assert rm_rf(test_path)
assert not isfile(test_path)
def test_remove_dir():
with tempdir() as td:
test_path = join(td, 'test_path')
touch(test_path)
_try_open(test_path)
assert isfile(test_path)
assert isdir(td)
assert not islink(test_path)
assert rm_rf(td)
assert rm_rf(test_path)
assert not isdir(td)
assert not isfile(test_path)
assert not lexists(test_path)
def test_remove_link_to_file():
with tempdir() as td:
dst_link = join(td, "test_link")
src_file = join(td, "test_file")
_write_file(src_file, "welcome to the ministry of silly walks")
symlink(src_file, dst_link)
assert isfile(src_file)
assert not islink(src_file)
assert islink(dst_link)
assert rm_rf(dst_link)
assert isfile(src_file)
assert rm_rf(src_file)
assert not isfile(src_file)
assert not islink(dst_link)
assert not lexists(dst_link)
def test_remove_link_to_dir():
with tempdir() as td:
dst_link = join(td, "test_link")
src_dir = join(td, "test_dir")
_write_file(src_dir, "welcome to the ministry of silly walks")
symlink(src_dir, dst_link)
assert not islink(src_dir)
assert islink(dst_link)
assert rm_rf(dst_link)
assert not isdir(dst_link)
assert not islink(dst_link)
assert rm_rf(src_dir)
assert not isdir(src_dir)
assert not islink(src_dir)
assert not lexists(dst_link)
def test_rm_rf_does_not_follow_symlinks():
with TemporaryDirectory() as tmp:
# make a file in some temp folder
real_file = os.path.join(tmp, 'testfile')
with open(real_file, 'w') as f:
f.write('weee')
# make a subfolder
subdir = os.path.join(tmp, 'subfolder')
os.makedirs(subdir)
# link to the file in the subfolder
link_path = join(subdir, 'file_link')
create_link(real_file, link_path, link_type=LinkType.softlink)
assert islink(link_path)
# rm_rf the subfolder
rm_rf(subdir)
# assert that the file still exists in the root folder
assert os.path.isfile(real_file)
def test_move_to_trash():
with tempdir() as td:
test_path = join(td, 'test_path')
touch(test_path)
_try_open(test_path)
assert isdir(td)
assert isfile(test_path)
move_to_trash(td, test_path)
assert not isfile(test_path)
def test_move_path_to_trash_couldnt():
from conda.gateways.disk.delete import move_path_to_trash
with tempdir() as td:
test_path = join(td, 'test_path')
touch(test_path)
_try_open(test_path)
assert isdir(td)
assert isfile(test_path)
assert move_path_to_trash(test_path)
def test_backoff_unlink():
from conda.gateways.disk.delete import backoff_rmdir
with tempdir() as td:
test_path = join(td, 'test_path')
touch(test_path)
_try_open(test_path)
assert isdir(td)
backoff_rmdir(td)
assert not isdir(td)
def test_backoff_unlink_doesnt_exist():
from conda.gateways.disk.delete import backoff_rmdir
with tempdir() as td:
test_path = join(td, 'test_path')
touch(test_path)
try:
backoff_rmdir(join(test_path, 'some', 'path', 'in', 'utopia'))
except Exception as e:
assert e.value.errno == ENOENT
def test_try_rmdir_all_empty_doesnt_exist():
from conda.gateways.disk.delete import try_rmdir_all_empty
with tempdir() as td:
assert isdir(td)
try_rmdir_all_empty(td)
assert not isdir(td)
| 30.353293
| 82
| 0.657526
|
8f800cd19c6adff568a5298e5cc74bbe6dc6cc9c
| 2,112
|
py
|
Python
|
pyoutline/outline/executor.py
|
srbhss/OpenCue
|
c0b8aeab0ea324c3c46541d98b4a968230c668eb
|
[
"Apache-2.0"
] | null | null | null |
pyoutline/outline/executor.py
|
srbhss/OpenCue
|
c0b8aeab0ea324c3c46541d98b4a968230c668eb
|
[
"Apache-2.0"
] | 2
|
2019-10-02T23:58:20.000Z
|
2020-03-30T19:18:48.000Z
|
pyoutline/outline/executor.py
|
srbhss/OpenCue
|
c0b8aeab0ea324c3c46541d98b4a968230c668eb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 Sony Pictures Imageworks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple python thread pool."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
import queue
import threading
import logging
__all__ = ["TaskExecutor"]
logger = logging.getLogger("outline.executor")
class TaskExecutor(object):
def __init__(self, threads):
self.__queue = queue.Queue()
for i in range(0, threads):
logger.debug("executor creating thread #%d" % i)
t = threading.Thread(target=self.worker)
t.setDaemon(True)
t.start()
def execute(self, callable_, *args):
"""
Queue up a function for execution by the thread pool.
"""
self.__queue.put((callable_, args))
def wait(self):
"""
Wait until all work in the pool is complete and then
return. Optionally stop the thread pool.
"""
self.__queue.join()
def worker(self):
"""
Code that gets executed by the worker thread
run() function.
"""
while True:
item = self.__queue.get()
try:
if item[1]:
item[0](*item[1])
else:
item[0]()
except Exception as e:
logger.warn("Worker thread exception: %s" % e)
self.__queue.task_done()
| 28.16
| 75
| 0.633523
|
8006931c21ea8c3f27c4af3630c7fc44db363826
| 1,311
|
py
|
Python
|
swtstore/classes/views/app.py
|
janastu/swtstore
|
7326138bf2fbf2a4ed8c7300c68092f91709dfc2
|
[
"BSD-2-Clause"
] | 2
|
2015-04-28T00:35:21.000Z
|
2016-02-11T19:31:15.000Z
|
swtstore/classes/views/app.py
|
janastu/swtstore
|
7326138bf2fbf2a4ed8c7300c68092f91709dfc2
|
[
"BSD-2-Clause"
] | 9
|
2015-02-02T11:24:23.000Z
|
2017-12-29T07:49:07.000Z
|
swtstore/classes/views/app.py
|
janastu/swtstore
|
7326138bf2fbf2a4ed8c7300c68092f91709dfc2
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding utf-8 -*-
# classes/views/apps.py
from flask import Module, request, render_template, redirect,\
url_for, abort
from werkzeug.security import gen_salt
from swtstore.classes.models import Client, User
from swtstore.classes.utils import urlnorm
app = Module(__name__)
@app.route('/register', methods=['GET', 'POST'])
def register():
current_user = User.getCurrentUser()
if current_user is None:
return redirect(url_for('frontend.index'))
if request.method == 'GET':
return render_template('app/register.html')
elif request.method == 'POST':
req_fields = ['name', 'host_url', 'redirect_uris', 'scopes']
for field in req_fields:
if not request.form.get(field):
abort(404)
new_app = Client(
id=gen_salt(40),
client_secret=gen_salt(50),
name=request.form.get('name'),
description=request.form.get('desc'),
user_id=current_user.id,
_host_url=request.form.get('host_url'),
_redirect_uris=urlnorm(request.form.get('redirect_uris')),
_default_scopes=' '.join(request.form.get('scopes').split(',')),
_is_private=False
)
new_app.persist()
return redirect(url_for('user.myApps'))
| 29.133333
| 76
| 0.623188
|
856327a242acb8b83b1abe381a66212bee36dc9c
| 71,690
|
py
|
Python
|
core/controllers/suggestion_test.py
|
EishaMazhar/oppia
|
ab4f3cf20764b27f567798e4b1184471aaf7f73b
|
[
"Apache-2.0"
] | 1
|
2020-09-30T17:55:05.000Z
|
2020-09-30T17:55:05.000Z
|
core/controllers/suggestion_test.py
|
gitter-badger/oppia
|
7d8e659264582d7ce74bc6c139e597b82bca0e04
|
[
"Apache-2.0"
] | 1
|
2020-05-27T06:08:17.000Z
|
2020-05-27T06:08:17.000Z
|
core/controllers/suggestion_test.py
|
gitter-badger/oppia
|
7d8e659264582d7ce74bc6c139e597b82bca0e04
|
[
"Apache-2.0"
] | 1
|
2018-03-20T14:12:31.000Z
|
2018-03-20T14:12:31.000Z
|
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for suggestion controllers."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import os
from constants import constants
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import feedback_services
from core.domain import fs_domain
from core.domain import question_domain
from core.domain import question_services
from core.domain import rights_domain
from core.domain import rights_manager
from core.domain import skill_services
from core.domain import state_domain
from core.domain import story_domain
from core.domain import story_services
from core.domain import suggestion_services
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
import python_utils
(suggestion_models, feedback_models) = models.Registry.import_models([
models.NAMES.suggestion, models.NAMES.feedback])
class SuggestionUnitTests(test_utils.GenericTestBase):
IMAGE_UPLOAD_URL_PREFIX = '/createhandler/imageupload'
ASSET_HANDLER_URL_PREFIX = '/assetsdevhandler'
EXP_ID = 'exp1'
TRANSLATION_LANGUAGE_CODE = 'en'
AUTHOR_EMAIL = 'author@example.com'
AUTHOR_EMAIL_2 = 'author2@example.com'
REVIEWER_EMAIL = 'reviewer@example.com'
TRANSLATOR_EMAIL = 'translator@example.com'
NORMAL_USER_EMAIL = 'user@example.com'
def setUp(self):
super(SuggestionUnitTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.AUTHOR_EMAIL, 'author')
self.signup(self.AUTHOR_EMAIL_2, 'author2')
self.signup(self.NORMAL_USER_EMAIL, 'normalUser')
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.signup(self.TRANSLATOR_EMAIL, 'translator')
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.author_id_2 = self.get_user_id_from_email(self.AUTHOR_EMAIL_2)
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
self.translator_id = self.get_user_id_from_email(self.TRANSLATOR_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
user_services.allow_user_to_review_translation_in_language(
self.reviewer_id, 'hi')
self.editor = user_services.UserActionsInfo(self.editor_id)
# Login and create exploration and suggestions.
self.login(self.EDITOR_EMAIL)
exploration = (
self.save_new_linear_exp_with_state_names_and_interactions(
self.EXP_ID, self.editor_id, ['State 1', 'State 2', 'State 3'],
['TextInput'], category='Algebra'))
self.old_content = state_domain.SubtitledHtml(
'content', '<p>old content html</p>').to_dict()
exploration.states['State 1'].update_content(
state_domain.SubtitledHtml.from_dict(self.old_content))
exploration.states['State 2'].update_content(
state_domain.SubtitledHtml.from_dict(self.old_content))
exploration.states['State 3'].update_content(
state_domain.SubtitledHtml.from_dict(self.old_content))
exp_services._save_exploration(self.editor_id, exploration, '', []) # pylint: disable=protected-access
rights_manager.publish_exploration(self.editor, self.EXP_ID)
rights_manager.assign_role_for_exploration(
self.editor, self.EXP_ID, self.owner_id, rights_domain.ROLE_EDITOR)
self.new_content = state_domain.SubtitledHtml(
'content', '<p>new content html</p>').to_dict()
self.resubmit_change_content = state_domain.SubtitledHtml(
'content', '<p>resubmit change content html</p>').to_dict()
self.logout()
self.login(self.AUTHOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models
.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
'target_type': (
suggestion_models.TARGET_TYPE_EXPLORATION),
'target_id': 'exp1',
'target_version_at_submission': exploration.version,
'change': {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'State 1',
'old_value': self.old_content,
'new_value': self.new_content
},
'description': 'change to state 1',
}, csrf_token=csrf_token)
self.logout()
self.login(self.AUTHOR_EMAIL_2)
csrf_token = self.get_new_csrf_token()
self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models
.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
'target_type': (
suggestion_models.TARGET_TYPE_EXPLORATION),
'target_id': 'exp1',
'target_version_at_submission': exploration.version,
'change': {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'State 2',
'old_value': self.old_content,
'new_value': self.new_content
},
'description': 'change to state 2',
}, csrf_token=csrf_token)
self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models
.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
'target_type': (
suggestion_models.TARGET_TYPE_EXPLORATION),
'target_id': 'exp1',
'target_version_at_submission': exploration.version,
'change': {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'State 3',
'old_value': self.old_content,
'new_value': self.new_content
},
'description': 'change to state 3',
}, csrf_token=csrf_token)
self.logout()
self.login(self.TRANSLATOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT),
'target_type': suggestion_models.TARGET_TYPE_EXPLORATION,
'target_id': 'exp1',
'target_version_at_submission': exploration.version,
'change': {
'cmd': exp_domain.CMD_ADD_TRANSLATION,
'state_name': 'State 3',
'content_id': 'content',
'language_code': 'hi',
'content_html': '<p>old content html</p>',
'translation_html': '<p>In Hindi</p>'
},
'description': 'change to state 3',
}, csrf_token=csrf_token)
self.logout()
def test_create_suggestion(self):
self.login(self.AUTHOR_EMAIL_2)
csrf_token = self.get_new_csrf_token()
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models
.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
'target_type': (
suggestion_models.TARGET_TYPE_EXPLORATION),
'target_id': 'exp1',
'target_version_at_submission': exploration.version,
'change': {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'State 3',
'new_value': self.new_content
},
'description': 'change again to state 3',
}, csrf_token=csrf_token)
suggestions = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id_2))['suggestions']
self.assertEqual(len(suggestions), 3)
self.logout()
def test_create_suggestion_invalid_target_version_input(self):
self.login(self.AUTHOR_EMAIL_2)
csrf_token = self.get_new_csrf_token()
response = self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models
.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
'target_type': (
suggestion_models.TARGET_TYPE_EXPLORATION),
'target_id': 'exp1',
'target_version_at_submission': 'invalid target version',
'change': {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'State 3',
'new_value': self.new_content
},
'description': 'change again to state 3',
}, csrf_token=csrf_token, expected_status_int=400)
suggestions = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id_2))['suggestions']
self.assertEqual(
response['error'],
'Expected target_version_at_submission to be an int, received <type'
' \'unicode\'>')
self.assertEqual(len(suggestions), 2)
self.logout()
def test_suggestion_to_exploration_handler_with_invalid_suggestion_id(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
suggestion_to_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id))['suggestions'][0]
csrf_token = self.get_new_csrf_token()
# Invalid format of suggestion id.
response = self.put_json(
'%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'], 'invalid_suggestion_id'), {
'action': u'reject',
'review_message': u'Rejected!'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'],
'Invalid format for suggestion_id. It must contain 3 parts '
'separated by \'.\'')
csrf_token = self.get_new_csrf_token()
# Suggestion does not exist.
self.put_json(
'%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
'exploration.target_id.id'), {
'action': u'reject',
'review_message': u'Rejected!'
}, csrf_token=csrf_token,
expected_status_int=404)
self.logout()
def test_suggestion_to_exploration_handler_with_invalid_target_type(self):
self.login(self.EDITOR_EMAIL)
question_dict = {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_id'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
}
exp_id = 'new_exp_id'
self.save_new_default_exploration(exp_id, self.editor_id)
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION,
suggestion_models.TARGET_TYPE_TOPIC, exp_id, 1,
self.author_id, {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': question_dict,
'skill_id': None,
'skill_difficulty': 0.3
}, None)
suggestion_id = suggestion_services.query_suggestions(
[('author_id', self.author_id), (
'target_id', exp_id)])[0].suggestion_id
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX, exp_id,
suggestion_id), {
'action': u'reject',
'review_message': u'Rejected!'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'],
'This handler allows actions only on suggestions to explorations.')
self.logout()
def test_suggestion_to_exploration_handler_with_invalid_target_id(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
suggestion_to_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id))['suggestions'][0]
self.save_new_default_exploration('exp_id', self.editor_id)
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX, 'exp_id',
suggestion_to_accept['suggestion_id']), {
'action': u'reject',
'review_message': u'Rejected!'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'],
'The exploration id provided does not match the exploration id '
'present as part of the suggestion_id')
self.logout()
def test_owner_of_exploration_cannot_repond_to_own_suggestion(self):
self.login(self.EDITOR_EMAIL)
exp_id = 'new_exp_id'
self.save_new_default_exploration(exp_id, self.editor_id)
new_content = state_domain.SubtitledHtml(
'content', '<p>new content html</p>').to_dict()
change_cmd = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'State 1',
'new_value': new_content
}
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION, exp_id, 1,
self.editor_id, change_cmd, 'sample description')
suggestion_id = suggestion_services.query_suggestions(
[('author_id', self.editor_id), (
'target_id', exp_id)])[0].suggestion_id
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
exp_id, suggestion_id), {
'action': u'reject',
'review_message': u'Rejected!'
}, csrf_token=csrf_token, expected_status_int=401)
self.assertEqual(
response['error'], 'You cannot accept/reject your own suggestion.')
self.logout()
def test_suggestion_to_exploration_handler_with_invalid_action(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
suggestion_to_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id))['suggestions'][0]
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
suggestion_to_accept['suggestion_id']),
{'action': 'invalid_action'}, csrf_token=csrf_token,
expected_status_int=400)
self.assertEqual(
response['error'], 'Invalid action.')
self.logout()
def test_reject_suggestion_to_exploration(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
suggestion_to_reject = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id))['suggestions'][0]
suggestion = suggestion_services.get_suggestion_by_id(
suggestion_to_reject['suggestion_id'])
self.assertEqual(
suggestion.status, suggestion_models.STATUS_IN_REVIEW)
csrf_token = self.get_new_csrf_token()
self.put_json('%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_reject['target_id'],
suggestion_to_reject['suggestion_id']), {
'action': u'reject',
'review_message': u'Rejected!'
}, csrf_token=csrf_token)
suggestion = suggestion_services.get_suggestion_by_id(
suggestion_to_reject['suggestion_id'])
self.assertEqual(
suggestion.status, suggestion_models.STATUS_REJECTED)
self.logout()
def test_suggestion_to_exploration_handler_with_long_commit_mesage(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
suggestion_to_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id))['suggestions'][0]
csrf_token = self.get_new_csrf_token()
response = self.put_json('%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
suggestion_to_accept['suggestion_id']), {
'action': u'accept',
'commit_message':
u'a' * (feconf.MAX_COMMIT_MESSAGE_LENGTH + 1),
'review_message': u'Accepted'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'],
'Commit messages must be at most 1000 characters long.'
)
def test_accept_suggestion(self):
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
# Test editor can accept successfully.
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
suggestion_to_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id))['suggestions'][0]
# By default, when a suggestion is accepted and the recording of scores
# is enabled, the score of the author of that suggestion is increased
# by 1. Therefore, by setting that increment to the minimum score
# required to review, we can ensure that the author of this suggestion
# has a high enough score to review suggestions in this category. This
# will be used to test whether the author can review a suggestion in
# the same category because of the author's high score in a later test.
enable_recording_of_scores_swap = self.swap(
feconf, 'ENABLE_RECORDING_OF_SCORES', True)
increment_score_of_author_swap = self.swap(
suggestion_models, 'INCREMENT_SCORE_OF_AUTHOR_BY',
feconf.MINIMUM_SCORE_REQUIRED_TO_REVIEW)
with enable_recording_of_scores_swap, increment_score_of_author_swap:
csrf_token = self.get_new_csrf_token()
self.put_json('%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
suggestion_to_accept['suggestion_id']), {
'action': u'accept',
'commit_message': u'commit message',
'review_message': u'Accepted'
}, csrf_token=csrf_token)
suggestion_post_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id))['suggestions'][0]
self.assertEqual(
suggestion_post_accept['status'],
suggestion_models.STATUS_ACCEPTED)
exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID)
self.assertEqual(
exploration.states[suggestion_to_accept[
'change']['state_name']].content.html,
suggestion_to_accept['change']['new_value']['html'])
self.logout()
# Testing user without permissions cannot accept.
self.login(self.NORMAL_USER_EMAIL)
suggestion_to_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id_2))['suggestions'][0]
csrf_token = self.get_new_csrf_token()
self.put_json('%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
suggestion_to_accept['suggestion_id']), {
'action': u'accept',
'commit_message': u'commit message',
'review_message': u'Accepted'
}, csrf_token=csrf_token, expected_status_int=401)
self.logout()
# Testing that author cannot accept own suggestion.
self.login(self.AUTHOR_EMAIL_2)
suggestion_to_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id_2))['suggestions'][0]
csrf_token = self.get_new_csrf_token()
self.put_json('%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
suggestion_to_accept['suggestion_id']), {
'action': u'accept',
'commit_message': u'commit message',
'review_message': u'Accepted'
}, csrf_token=csrf_token, expected_status_int=401)
# Testing users with scores above threshold can accept.
# The score of this author was increased to the review threshold amount
# when the editor accepted a suggestion that was authored by this user.
self.login(self.AUTHOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json('%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
suggestion_to_accept['suggestion_id']), {
'action': u'accept',
'commit_message': u'commit message',
'review_message': u'Accepted'
}, csrf_token=csrf_token)
suggestion_post_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id_2))['suggestions'][0]
self.assertEqual(
suggestion_post_accept['status'],
suggestion_models.STATUS_ACCEPTED)
self.logout()
# Testing admins can accept suggestions.
self.login(self.ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
suggestion_to_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id_2))['suggestions'][1]
self.put_json('%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
suggestion_to_accept['suggestion_id']), {
'action': u'accept',
'commit_message': u'commit message',
'review_message': u'Accepted'
}, csrf_token=csrf_token)
suggestion_post_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id_2))['suggestions'][1]
self.assertEqual(
suggestion_post_accept['status'],
suggestion_models.STATUS_ACCEPTED)
self.logout()
def test_suggestion_list_handler_with_invalid_query_field(self):
response = self.get_json(
'%s?invalid_query_field=value' % (
feconf.SUGGESTION_LIST_URL_PREFIX), expected_status_int=400)
self.assertEqual(
response['error'],
'Not allowed to query on field invalid_query_field')
def test_suggestion_list_handler(self):
suggestions = self.get_json(
'%s?author_id=%s&target_type=%s&target_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id_2,
suggestion_models.TARGET_TYPE_EXPLORATION, self.EXP_ID)
)['suggestions']
self.assertEqual(len(suggestions), 2)
def test_cannot_resubmit_suggestion_with_invalid_suggestion_id(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
response = self.put_json(
'%s/resubmit/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX, 'invalid_suggestion_id'), {
'action': u'reject',
'review_message': u'Rejected!'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'], 'No suggestion found with given suggestion id')
def test_resubmit_rejected_suggestion(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
suggestion = suggestion_services.query_suggestions(
[('author_id', self.author_id), ('target_id', self.EXP_ID)])[0]
suggestion_services.reject_suggestion(
suggestion.suggestion_id, self.reviewer_id, 'reject message')
self.logout()
self.login(self.AUTHOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json('%s/resubmit/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX, suggestion.suggestion_id), {
'summary_message': 'summary message',
'action': u'resubmit',
'change': {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'State 1',
'new_value': self.resubmit_change_content,
'old_value': self.old_content
}
}, csrf_token=csrf_token)
suggestion = suggestion_services.query_suggestions(
[('author_id', self.author_id), ('target_id', self.EXP_ID)])[0]
self.assertEqual(
suggestion.status, suggestion_models.STATUS_IN_REVIEW)
self.assertEqual(
suggestion.change.new_value['html'],
self.resubmit_change_content['html'])
self.assertEqual(
suggestion.change.cmd, exp_domain.CMD_EDIT_STATE_PROPERTY)
self.assertEqual(
suggestion.change.property_name, exp_domain.STATE_PROPERTY_CONTENT)
self.assertEqual(
suggestion.change.state_name, 'State 1')
self.logout()
def test_translation_accept_suggestion_by_reviewer(self):
# Test reviewer can accept successfully.
self.login(self.REVIEWER_EMAIL)
csrf_token = self.get_new_csrf_token()
suggestion_to_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.translator_id))['suggestions'][0]
csrf_token = self.get_new_csrf_token()
self.put_json('%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
suggestion_to_accept['suggestion_id']), {
'action': u'accept',
'commit_message': u'commit message',
'review_message': u'Accepted'
}, csrf_token=csrf_token)
suggestion_post_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.translator_id))['suggestions'][0]
self.assertEqual(
suggestion_post_accept['status'],
suggestion_models.STATUS_ACCEPTED)
self.logout()
def test_translation_suggestion_creation_with_new_images(self):
exp_id = '12345678exp1'
exploration = (
self.save_new_linear_exp_with_state_names_and_interactions(
exp_id, self.editor_id, ['State 1'],
['EndExploration'], category='Algebra'))
state_content_dict = {
'content_id': 'content',
'html': (
'<oppia-noninteractive-image filepath-with-value='
'""img.png"" caption-with-value="""" '
'alt-with-value=""Image"">'
'</oppia-noninteractive-image>')
}
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'img.png'),
'rb', encoding=None) as f:
raw_image = f.read()
self.post_json(
'%s/exploration/%s' % (self.IMAGE_UPLOAD_URL_PREFIX, exp_id),
{'filename': 'img.png'},
csrf_token=csrf_token,
upload_files=(('image', 'unused_filename', raw_image),))
exp_services.update_exploration(
self.editor_id, exp_id, [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'State 1',
'new_value': state_content_dict
})], 'Changes content.')
rights_manager.publish_exploration(self.editor, exp_id)
exploration = exp_fetchers.get_exploration_by_id(exp_id)
text_to_translate = exploration.states['State 1'].content.html
self.logout()
fs = fs_domain.AbstractFileSystem(
fs_domain.GcsFileSystem(feconf.ENTITY_TYPE_EXPLORATION, exp_id))
self.assertTrue(fs.isfile('image/img.png'))
self.login(self.TRANSLATOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT),
'target_type': suggestion_models.TARGET_TYPE_EXPLORATION,
'target_id': exp_id,
'target_version_at_submission': exploration.version,
'change': {
'cmd': exp_domain.CMD_ADD_TRANSLATION,
'state_name': 'State 1',
'content_id': 'content',
'language_code': 'hi',
'content_html': text_to_translate,
'translation_html': (
'<oppia-noninteractive-image filepath-with-value='
'""translation_image.png"" '
'caption-with-value="""" '
'alt-with-value=""Image"">'
'</oppia-noninteractive-image>')
},
}, csrf_token=csrf_token,
upload_files=(
('translation_image.png', 'translation_image.png', raw_image), )
)
fs = fs_domain.AbstractFileSystem(
fs_domain.GcsFileSystem(
feconf.IMAGE_CONTEXT_EXPLORATION_SUGGESTIONS, exp_id))
self.assertTrue(fs.isfile('image/img.png'))
self.assertTrue(fs.isfile('image/img_compressed.png'))
self.assertTrue(fs.isfile('image/translation_image.png'))
self.assertTrue(fs.isfile('image/img_compressed.png'))
suggestion_to_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.translator_id))['suggestions'][0]
self.logout()
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json('%s/exploration/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
suggestion_to_accept['suggestion_id']), {
'action': u'accept',
'commit_message': u'Translated content of State 1',
'review_message': u'This looks good!',
}, csrf_token=csrf_token)
fs = fs_domain.AbstractFileSystem(
fs_domain.GcsFileSystem(feconf.ENTITY_TYPE_EXPLORATION, exp_id))
self.assertTrue(fs.isfile('image/img.png'))
self.assertTrue(fs.isfile('image/translation_image.png'))
self.assertTrue(fs.isfile('image/img_compressed.png'))
class QuestionSuggestionTests(test_utils.GenericTestBase):
AUTHOR_EMAIL = 'author@example.com'
AUTHOR_EMAIL_2 = 'author2@example.com'
# Needs to be 12 characters long.
SKILL_ID = 'skill1234567'
SKILL_DESCRIPTION = 'skill to link question to'
def setUp(self):
super(QuestionSuggestionTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.AUTHOR_EMAIL, 'author')
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.save_new_skill(
self.SKILL_ID, self.admin_id, description=self.SKILL_DESCRIPTION)
self.question_dict = {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': [self.SKILL_ID],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
}
self.login(self.AUTHOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION),
'target_type': suggestion_models.TARGET_TYPE_SKILL,
'target_id': self.SKILL_ID,
'target_version_at_submission': 1,
'change': {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': self.question_dict,
'skill_id': self.SKILL_ID,
'skill_difficulty': 0.3
},
'description': 'Add new question to skill'
}, csrf_token=csrf_token)
self.logout()
def test_query_question_suggestions(self):
suggestions = self.get_json(
'%s?suggestion_type=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION)
)['suggestions']
self.assertEqual(len(suggestions), 1)
suggestion = suggestions[0]
self.assertEqual(
suggestion['suggestion_type'],
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION)
self.assertEqual(suggestion['target_id'], self.SKILL_ID)
self.assertEqual(
suggestion['target_type'], suggestion_models.TARGET_TYPE_SKILL)
self.assertEqual(
suggestion['change']['cmd'],
question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION)
def test_accept_question_suggestion(self):
suggestion_to_accept = self.get_json(
'%s?suggestion_type=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION)
)['suggestions'][0]
self.login(self.ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_VIEWER_UPDATES', True):
self.put_json('%s/skill/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
suggestion_to_accept['suggestion_id']), {
'action': u'accept',
'commit_message': u'commit message',
'review_message': u'This looks good!',
'skill_id': self.SKILL_ID
}, csrf_token=csrf_token)
suggestion_post_accept = self.get_json(
'%s?suggestion_type=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION)
)['suggestions'][0]
self.assertEqual(
suggestion_post_accept['status'],
suggestion_models.STATUS_ACCEPTED)
(
questions, merged_question_skill_links, _) = (
question_services.get_displayable_question_skill_link_details(
1, [self.SKILL_ID], ''))
self.assertEqual(len(questions), 1)
self.assertEqual(
merged_question_skill_links[0].skill_descriptions,
[self.SKILL_DESCRIPTION])
self.assertEqual(
merged_question_skill_links[0].skill_difficulties, [0.3])
self.assertEqual(
questions[0].question_content,
self.question_dict['question_state_data']['content']['html']
)
thread_messages = feedback_services.get_messages(
suggestion_to_accept['suggestion_id'])
last_message = thread_messages[len(thread_messages) - 1]
self.assertEqual(last_message.text, 'This looks good!')
def test_suggestion_creation_with_valid_images(self):
self.save_new_skill(
'skill_id2', self.admin_id, description='description')
question_state_data_dict = self._create_valid_question_data(
'default_state').to_dict()
valid_html = (
'<oppia-noninteractive-math math_content-with-value="{&q'
'uot;raw_latex&quot;: &quot;(x - a_1)(x - a_2)(x - a'
'_3)...(x - a_n-1)(x - a_n)&quot;, &quot;svg_filenam'
'e&quot;: &quot;file.svg&quot;}"></oppia-noninte'
'ractive-math>'
)
question_state_data_dict['content']['html'] = valid_html
self.question_dict = {
'question_state_data': question_state_data_dict,
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_id2'],
'inapplicable_skill_misconception_ids': []
}
self.login(self.AUTHOR_EMAIL)
csrf_token = self.get_new_csrf_token()
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'),
'rb', encoding=None) as f:
raw_image = f.read()
self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION),
'target_type': suggestion_models.TARGET_TYPE_SKILL,
'target_id': self.SKILL_ID,
'target_version_at_submission': 1,
'change': {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': self.question_dict,
'skill_id': self.SKILL_ID,
'skill_difficulty': 0.3
},
'description': 'Add new question to skill'
}, csrf_token=csrf_token, upload_files=(
('file.svg', 'file.svg', raw_image), ))
self.logout()
def test_suggestion_creation_when_images_are_not_provided(self):
self.save_new_skill(
'skill_id2', self.admin_id, description='description')
question_state_data_dict = self._create_valid_question_data(
'default_state').to_dict()
valid_html = (
'<oppia-noninteractive-math math_content-with-value="{&q'
'uot;raw_latex&quot;: &quot;(x - a_1)(x - a_2)(x - a'
'_3)...(x - a_n-1)(x - a_n)&quot;, &quot;svg_filenam'
'e&quot;: &quot;file.svg&quot;}"></oppia-noninte'
'ractive-math>'
)
question_state_data_dict['content']['html'] = valid_html
self.question_dict = {
'question_state_data': question_state_data_dict,
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_id2'],
'inapplicable_skill_misconception_ids': []
}
self.login(self.AUTHOR_EMAIL)
csrf_token = self.get_new_csrf_token()
response_dict = self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION),
'target_type': suggestion_models.TARGET_TYPE_SKILL,
'target_id': self.SKILL_ID,
'target_version_at_submission': 1,
'change': {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': self.question_dict,
'skill_id': self.SKILL_ID,
'skill_difficulty': 0.3
},
'description': 'Add new question to skill'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertIn(
'No image data provided for file with name file.svg.',
response_dict['error'])
self.logout()
def test_suggestion_creation_when_images_are_not_valid(self):
self.save_new_skill(
'skill_id2', self.admin_id, description='description')
question_state_data_dict = self._create_valid_question_data(
'default_state').to_dict()
valid_html = (
'<oppia-noninteractive-math math_content-with-value="{&q'
'uot;raw_latex&quot;: &quot;(x - a_1)(x - a_2)(x - a'
'_3)...(x - a_n-1)(x - a_n)&quot;, &quot;svg_filenam'
'e&quot;: &quot;file.svg&quot;}"></oppia-noninte'
'ractive-math>'
)
question_state_data_dict['content']['html'] = valid_html
self.question_dict = {
'question_state_data': question_state_data_dict,
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_id2'],
'inapplicable_skill_misconception_ids': []
}
self.login(self.AUTHOR_EMAIL)
csrf_token = self.get_new_csrf_token()
large_image = '<svg><path d="%s" /></svg>' % (
'M150 0 L75 200 L225 200 Z ' * 4000)
response_dict = self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION),
'target_type': suggestion_models.TARGET_TYPE_SKILL,
'target_id': self.SKILL_ID,
'target_version_at_submission': 1,
'change': {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': self.question_dict,
'skill_id': self.SKILL_ID,
'skill_difficulty': 0.3
},
'description': 'Add new question to skill'
}, csrf_token=csrf_token,
upload_files=(
('file.svg', 'file.svg', large_image),),
expected_status_int=400)
self.assertIn(
'Image exceeds file size limit of 100 KB.',
response_dict['error'])
self.logout()
class SkillSuggestionTests(test_utils.GenericTestBase):
AUTHOR_EMAIL = 'author@example.com'
REVIEWER_EMAIL = 'reviewer@example.com'
def setUp(self):
super(SkillSuggestionTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.AUTHOR_EMAIL, 'author')
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
user_services.allow_user_to_review_question(self.reviewer_id)
self.skill_id = skill_services.get_new_skill_id()
self.save_new_skill(
self.skill_id, self.admin_id, description='Description')
self.question_dict = {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': [self.skill_id],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
}
self.login(self.AUTHOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION),
'target_type': suggestion_models.TARGET_TYPE_SKILL,
'target_id': self.skill_id,
'target_version_at_submission': 1,
'change': {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': self.question_dict,
'skill_id': self.skill_id,
'skill_difficulty': 0.3
},
'description': 'Add new question to skill'
}, csrf_token=csrf_token)
self.logout()
def test_cannot_access_suggestion_to_skill_handler(self):
self.login(self.ADMIN_EMAIL)
thread_id = feedback_services.create_thread(
suggestion_models.TARGET_TYPE_QUESTION, self.skill_id,
self.author_id, 'description', '', has_suggestion=True)
csrf_token = self.get_new_csrf_token()
self.put_json(
'%s/skill/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX, self.skill_id,
thread_id), {
'action': u'reject',
'review_message': u'Rejected!'
}, csrf_token=csrf_token, expected_status_int=400)
self.logout()
def test_suggestion_to_skill_handler_with_invalid_target_type(self):
self.login(self.ADMIN_EMAIL)
exp_id = 'new_exp_id'
self.save_new_default_exploration(exp_id, self.admin_id)
new_content = state_domain.SubtitledHtml(
'content', '<p>new content html</p>').to_dict()
change_cmd = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'State 1',
'new_value': new_content
}
suggestion_services.create_suggestion(
suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT,
suggestion_models.TARGET_TYPE_EXPLORATION, exp_id, 1,
self.author_id, change_cmd, 'sample description')
suggestion_id = suggestion_services.query_suggestions(
[('author_id', self.author_id), (
'target_id', exp_id)])[0].suggestion_id
csrf_token = self.get_new_csrf_token()
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_VIEWER_UPDATES', True):
response = self.put_json(
'%s/skill/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
self.skill_id, suggestion_id), {
'action': u'reject',
'review_message': u'Rejected!'
}, csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'],
'This handler allows actions only on suggestions to skills.')
self.logout()
def test_suggestion_to_skill_handler_with_invalid_target_id(self):
self.login(self.ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
suggestion_to_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id))['suggestions'][0]
csrf_token = self.get_new_csrf_token()
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_VIEWER_UPDATES', True):
response = self.put_json(
'%s/skill/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
'skill_id', suggestion_to_accept['suggestion_id']),
{
'action': u'reject',
'review_message': u'Rejected!'
},
csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(
response['error'],
'The skill id provided does not match the skill id '
'present as part of the suggestion_id')
self.logout()
def test_suggestion_to_skill_handler_with_invalid_action(self):
self.login(self.ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
suggestion_to_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id))['suggestions'][0]
csrf_token = self.get_new_csrf_token()
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_VIEWER_UPDATES', True):
response = self.put_json(
'%s/skill/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
suggestion_to_accept['suggestion_id']),
{'action': 'invalid_action'}, csrf_token=csrf_token,
expected_status_int=400)
self.assertEqual(
response['error'], 'Invalid action.')
self.logout()
def test_reject_suggestion_to_skill(self):
self.login(self.ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
suggestion_to_reject = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id))['suggestions'][0]
suggestion = suggestion_services.get_suggestion_by_id(
suggestion_to_reject['suggestion_id'])
self.assertEqual(
suggestion.status, suggestion_models.STATUS_IN_REVIEW)
csrf_token = self.get_new_csrf_token()
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_VIEWER_UPDATES', True):
self.put_json('%s/skill/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_reject['target_id'],
suggestion_to_reject['suggestion_id']), {
'action': u'reject',
'review_message': u'Rejected!'
}, csrf_token=csrf_token)
suggestion = suggestion_services.get_suggestion_by_id(
suggestion_to_reject['suggestion_id'])
self.assertEqual(
suggestion.status, suggestion_models.STATUS_REJECTED)
self.logout()
def test_accept_suggestion_to_skill(self):
self.login(self.ADMIN_EMAIL)
csrf_token = self.get_new_csrf_token()
suggestion_to_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id))['suggestions'][0]
suggestion = suggestion_services.get_suggestion_by_id(
suggestion_to_accept['suggestion_id'])
self.assertEqual(
suggestion.status, suggestion_models.STATUS_IN_REVIEW)
csrf_token = self.get_new_csrf_token()
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_VIEWER_UPDATES', True):
self.put_json('%s/skill/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
suggestion_to_accept['suggestion_id']), {
'action': u'accept',
'commit_message': u'commit message',
'review_message': u'Accepted!',
'skill_id': self.skill_id
}, csrf_token=csrf_token)
suggestion = suggestion_services.get_suggestion_by_id(
suggestion_to_accept['suggestion_id'])
self.assertEqual(
suggestion.status, suggestion_models.STATUS_ACCEPTED)
self.logout()
def test_reviewer_accept_suggestion_to_skill(self):
self.login(self.REVIEWER_EMAIL)
csrf_token = self.get_new_csrf_token()
suggestion_to_accept = self.get_json(
'%s?author_id=%s' % (
feconf.SUGGESTION_LIST_URL_PREFIX,
self.author_id))['suggestions'][0]
suggestion = suggestion_services.get_suggestion_by_id(
suggestion_to_accept['suggestion_id'])
self.assertEqual(
suggestion.status, suggestion_models.STATUS_IN_REVIEW)
csrf_token = self.get_new_csrf_token()
with self.swap(constants, 'ENABLE_NEW_STRUCTURE_VIEWER_UPDATES', True):
self.put_json('%s/skill/%s/%s' % (
feconf.SUGGESTION_ACTION_URL_PREFIX,
suggestion_to_accept['target_id'],
suggestion_to_accept['suggestion_id']), {
'action': u'accept',
'commit_message': u'commit message',
'review_message': u'Accepted!',
'skill_id': self.skill_id
}, csrf_token=csrf_token)
suggestion = suggestion_services.get_suggestion_by_id(
suggestion_to_accept['suggestion_id'])
self.assertEqual(
suggestion.status, suggestion_models.STATUS_ACCEPTED)
self.logout()
class UserSubmittedSuggestionsHandlerTest(test_utils.GenericTestBase):
"""Unit test for the UserSubmittedSuggestionsHandler."""
AUTHOR_EMAIL = 'author@example.com'
def setUp(self):
super(UserSubmittedSuggestionsHandlerTest, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.AUTHOR_EMAIL, 'author')
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.TOPIC_ID = 'topic'
self.STORY_ID = 'story'
self.EXP_ID = 'exp1'
# Needs to be 12 characters long.
self.SKILL_ID = 'skill1234567'
self.SKILL_DESCRIPTION = 'skill to link question to'
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, title='Exploration title',
category='Algebra', end_state_name='End State')
self.publish_exploration(self.owner_id, self.EXP_ID)
topic = topic_domain.Topic.create_default_topic(
self.TOPIC_ID, 'topic', 'abbrev', 'description')
topic.thumbnail_filename = 'thumbnail.svg'
topic.thumbnail_bg_color = '#C6DCDA'
topic.subtopics = [
topic_domain.Subtopic(
1, 'Title', ['skill_id_333'], 'image.svg',
constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0],
'dummy-subtopic-three')]
topic.next_subtopic_id = 2
topic_services.save_new_topic(self.owner_id, topic)
topic_services.publish_topic(self.TOPIC_ID, self.admin_id)
story = story_domain.Story.create_default_story(
self.STORY_ID, 'A story', 'Description', self.TOPIC_ID, 'story-a')
story_services.save_new_story(self.owner_id, story)
topic_services.add_canonical_story(
self.owner_id, self.TOPIC_ID, self.STORY_ID)
topic_services.publish_story(
self.TOPIC_ID, self.STORY_ID, self.admin_id)
story_services.update_story(
self.owner_id, self.STORY_ID, [story_domain.StoryChange({
'cmd': 'add_story_node',
'node_id': 'node_1',
'title': 'Node1',
}), story_domain.StoryChange({
'cmd': 'update_story_node_property',
'property_name': 'exploration_id',
'node_id': 'node_1',
'old_value': None,
'new_value': self.EXP_ID
})], 'Changes.')
self.save_new_skill(
self.SKILL_ID, self.owner_id, description=self.SKILL_DESCRIPTION)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.reviewer_id = self.editor_id
self.set_admins([self.ADMIN_USERNAME])
self.editor = user_services.UserActionsInfo(self.editor_id)
# Login and create exploration and suggestions.
self.login(self.EDITOR_EMAIL)
exp_services.update_exploration(
self.owner_id, self.EXP_ID, [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'Introduction',
'new_value': {
'content_id': 'content',
'html': '<p>new content html</p>'
}
})], 'Add content')
self.logout()
self.login(self.AUTHOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT),
'target_type': (suggestion_models.TARGET_TYPE_EXPLORATION),
'target_id': self.EXP_ID,
'target_version_at_submission': exploration.version,
'change': {
'cmd': exp_domain.CMD_ADD_TRANSLATION,
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'hi',
'content_html': '<p>new content html</p>',
'translation_html': '<p>new content html in Hindi</p>'
},
'description': 'Adds translation',
}, csrf_token=csrf_token)
self.question_dict = {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': [self.SKILL_ID],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
}
self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION),
'target_type': suggestion_models.TARGET_TYPE_SKILL,
'target_id': self.SKILL_ID,
'target_version_at_submission': 1,
'change': {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': self.question_dict,
'skill_id': None,
'skill_difficulty': 0.3
},
'description': 'Add new question to skill'
}, csrf_token=csrf_token)
self.logout()
def test_exploration_handler_returns_data(self):
self.login(self.AUTHOR_EMAIL)
response = self.get_json(
'/getsubmittedsuggestions/exploration/translate_content')
self.assertEqual(len(response['suggestions']), 1)
self.assertEqual(len(response['target_id_to_opportunity_dict']), 1)
response = self.get_json(
'/getsubmittedsuggestions/topic/translate_content')
self.assertEqual(response, {})
def test_skill_handler_returns_data(self):
self.login(self.AUTHOR_EMAIL)
response = self.get_json(
'/getsubmittedsuggestions/skill/add_question')
self.assertEqual(len(response['suggestions']), 1)
self.assertEqual(len(response['target_id_to_opportunity_dict']), 1)
response = self.get_json(
'/getsubmittedsuggestions/topic/add_question')
self.assertEqual(response, {})
def test_handler_with_invalid_suggestion_type_raise_error(self):
self.login(self.AUTHOR_EMAIL)
response = self.get_json(
'/getsubmittedsuggestions/exploration/translate_content')
self.assertEqual(len(response['suggestions']), 1)
self.get_json(
'/getsubmittedsuggestions/exploration/invalid_suggestion_type',
expected_status_int=400)
def test_handler_with_invalid_target_type_raise_error(self):
self.login(self.AUTHOR_EMAIL)
response = self.get_json(
'/getsubmittedsuggestions/exploration/translate_content')
self.assertEqual(len(response['suggestions']), 1)
self.get_json(
'/getsubmittedsuggestions/invalid_target_type'
'/translate_content', expected_status_int=400)
class ReviewableSuggestionsHandlerTest(test_utils.GenericTestBase):
"""Unit test for the ReviewableSuggestionsHandler."""
def setUp(self):
super(ReviewableSuggestionsHandlerTest, self).setUp()
self.AUTHOR_EMAIL = 'author@example.com'
self.REVIEWER_EMAIL = 'reviewer@example.com'
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.AUTHOR_EMAIL, 'author')
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.editor = user_services.UserActionsInfo(self.editor_id)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.TOPIC_ID = 'topic'
self.STORY_ID = 'story'
self.EXP_ID = 'exp1'
# Needs to be 12 characters long.
self.SKILL_ID = 'skill1234567'
self.SKILL_DESCRIPTION = 'skill to link question to'
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, title='Exploration title',
category='Algebra', end_state_name='End State')
self.publish_exploration(self.owner_id, self.EXP_ID)
topic = topic_domain.Topic.create_default_topic(
self.TOPIC_ID, 'topic', 'abbrev', 'description')
topic.thumbnail_filename = 'thumbnail.svg'
topic.thumbnail_bg_color = '#C6DCDA'
topic.subtopics = [
topic_domain.Subtopic(
1, 'Title', ['skill_id_333'], 'image.svg',
constants.ALLOWED_THUMBNAIL_BG_COLORS['subtopic'][0],
'dummy-subtopic-three')]
topic.next_subtopic_id = 2
topic_services.save_new_topic(self.owner_id, topic)
topic_services.publish_topic(self.TOPIC_ID, self.admin_id)
story = story_domain.Story.create_default_story(
self.STORY_ID, 'A story', 'Description', self.TOPIC_ID, 'story-b')
story_services.save_new_story(self.owner_id, story)
topic_services.add_canonical_story(
self.owner_id, self.TOPIC_ID, self.STORY_ID)
topic_services.publish_story(
self.TOPIC_ID, self.STORY_ID, self.admin_id)
story_services.update_story(
self.owner_id, self.STORY_ID, [story_domain.StoryChange({
'cmd': 'add_story_node',
'node_id': 'node_1',
'title': 'Node1',
}), story_domain.StoryChange({
'cmd': 'update_story_node_property',
'property_name': 'exploration_id',
'node_id': 'node_1',
'old_value': None,
'new_value': self.EXP_ID
})], 'Changes.')
self.save_new_skill(
self.SKILL_ID, self.owner_id, description=self.SKILL_DESCRIPTION)
user_services.allow_user_to_review_question(self.reviewer_id)
user_services.allow_user_to_review_translation_in_language(
self.reviewer_id, 'hi')
# Login and update exploration and suggestions.
self.login(self.EDITOR_EMAIL)
exp_services.update_exploration(
self.owner_id, self.EXP_ID, [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'Introduction',
'new_value': {
'content_id': 'content',
'html': '<p>new content html</p>'
}
})], 'Add content')
self.logout()
self.login(self.AUTHOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT),
'target_type': (suggestion_models.TARGET_TYPE_EXPLORATION),
'target_id': self.EXP_ID,
'target_version_at_submission': exploration.version,
'change': {
'cmd': exp_domain.CMD_ADD_TRANSLATION,
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'hi',
'content_html': '<p>new content html</p>',
'translation_html': '<p>new content html in Hindi</p>'
},
'description': 'Adds translation',
}, csrf_token=csrf_token)
self.question_dict = {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': [self.SKILL_ID],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
}
self.post_json(
'%s/' % feconf.SUGGESTION_URL_PREFIX, {
'suggestion_type': (
suggestion_models.SUGGESTION_TYPE_ADD_QUESTION),
'target_type': suggestion_models.TARGET_TYPE_SKILL,
'target_id': self.SKILL_ID,
'target_version_at_submission': 1,
'change': {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': self.question_dict,
'skill_id': None,
'skill_difficulty': 0.3
},
'description': 'Add new question to skill'
}, csrf_token=csrf_token)
self.logout()
def test_exploration_handler_returns_data(self):
self.login(self.REVIEWER_EMAIL)
response = self.get_json(
'/getreviewablesuggestions/exploration/translate_content')
self.assertEqual(len(response['suggestions']), 1)
self.assertEqual(len(response['target_id_to_opportunity_dict']), 1)
response = self.get_json(
'/getreviewablesuggestions/topic/translate_content')
self.assertEqual(response, {})
def test_skill_handler_returns_data(self):
self.login(self.REVIEWER_EMAIL)
response = self.get_json(
'/getreviewablesuggestions/skill/add_question')
self.assertEqual(len(response['suggestions']), 1)
self.assertEqual(len(response['target_id_to_opportunity_dict']), 1)
response = self.get_json(
'/getreviewablesuggestions/topic/add_question')
self.assertEqual(response, {})
def test_handler_with_invalid_suggestion_type_raise_error(self):
self.login(self.REVIEWER_EMAIL)
response = self.get_json(
'/getreviewablesuggestions/exploration/translate_content')
self.assertEqual(len(response['suggestions']), 1)
self.get_json(
'/getreviewablesuggestions/exploration/invalid_suggestion_type',
expected_status_int=404)
def test_handler_with_invalid_target_type_raise_error(self):
self.login(self.REVIEWER_EMAIL)
response = self.get_json(
'/getreviewablesuggestions/exploration/translate_content')
self.assertEqual(len(response['suggestions']), 1)
self.get_json(
'/getreviewablesuggestions/invalid_target_type'
'/translate_content', expected_status_int=400)
| 41.391455
| 111
| 0.60205
|
db2b70ef1b086a4f2912233a9df83803b5a5843a
| 6,528
|
py
|
Python
|
bpy_lambda/2.78/scripts/addons_contrib/cmu_mocap_browser/__init__.py
|
resultant-gamedev/bpy_lambda
|
c8cf46c10c69e74a0892b621d76c62edaa5b04bc
|
[
"MIT"
] | null | null | null |
bpy_lambda/2.78/scripts/addons_contrib/cmu_mocap_browser/__init__.py
|
resultant-gamedev/bpy_lambda
|
c8cf46c10c69e74a0892b621d76c62edaa5b04bc
|
[
"MIT"
] | null | null | null |
bpy_lambda/2.78/scripts/addons_contrib/cmu_mocap_browser/__init__.py
|
resultant-gamedev/bpy_lambda
|
c8cf46c10c69e74a0892b621d76c62edaa5b04bc
|
[
"MIT"
] | 1
|
2019-11-24T18:43:42.000Z
|
2019-11-24T18:43:42.000Z
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
# This script was developed with financial support from the Foundation for
# Science and Technology of Portugal, under the grant SFRH/BD/66452/2009.
bl_info = {
"name": "Carnegie Mellon University Mocap Library Browser",
"author": "Daniel Monteiro Basso <daniel@basso.inf.br>",
"version": (2015, 3, 20),
"blender": (2, 66, 6),
"location": "View3D > Tools",
"description": "Assistant for using CMU Motion Capture data",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/3D_interaction/CMU_Mocap_Library_Browser",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"category": "Animation"}
if 'bpy' in locals():
import importlib
library = importlib.reload(library)
download = importlib.reload(download)
makehuman = importlib.reload(makehuman)
data = importlib.reload(data)
else:
from . import library
from . import download
from . import makehuman
from . import data
import os
import bpy
class CMUMocapSubjectBrowser(bpy.types.Panel):
bl_idname = "object.cmu_mocap_subject_browser"
bl_label = "CMU Mocap Subject Browser"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = 'Animation'
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
data.initialize_subjects(context)
layout = self.layout
cml = context.user_preferences.addons['cmu_mocap_browser'].preferences
layout.template_list("UI_UL_list", "SB", cml, "subject_list",
cml, "subject_active")
layout.prop(cml, "subject_import_name")
if cml.subject_active != -1:
sidx = cml.subject_list[cml.subject_active].idx
remote_fname = library.skeleton_url.format(sidx)
tid = "{0:02d}".format(sidx)
local_path = os.path.expanduser(cml.local_storage)
if cml.follow_structure:
local_path = os.path.join(local_path, tid)
local_fname = os.path.join(local_path, tid + ".asf")
do_import = False
if os.path.exists(local_fname):
label = "Import Selected"
do_import = True
elif cml.automatically_import:
label = "Download and Import Selected"
else:
label = "Download Selected"
props = layout.operator("mocap.download_import",
text=label, icon='ARMATURE_DATA')
props.remote_file = remote_fname
props.local_file = local_fname
props.do_import = do_import
class CMUMocapMotionBrowser(bpy.types.Panel):
bl_idname = "object.cmu_mocap_motion_browser"
bl_label = "CMU Mocap Motion Browser"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = 'Animation'
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
cml = context.user_preferences.addons['cmu_mocap_browser'].preferences
layout.template_list("UI_UL_list", "MB", cml, "motion_list",
cml, "motion_active")
if cml.motion_active == -1:
return
sidx = cml.subject_list[cml.subject_active].idx
midx = cml.motion_list[cml.motion_active].idx
motion = library.subjects[sidx]['motions'][midx]
fps = motion['fps']
ifps = fps // cml.frame_skip
row = layout.row()
row.column().label("Original: {0:d} fps.".format(fps))
row.column().label("Importing: {0:d} fps.".format(ifps))
layout.prop(cml, "frame_skip")
layout.prop(cml, "cloud_scale")
remote_fname = library.motion_url.format(sidx, midx)
tid = "{0:02d}".format(sidx)
local_path = os.path.expanduser(cml.local_storage)
if cml.follow_structure:
local_path = os.path.join(local_path, tid)
for target, icon, ext in (
('Motion Data', 'POSE_DATA', 'amc'),
('Marker Cloud', 'EMPTY_DATA', 'c3d'),
('Movie', 'FILE_MOVIE', 'mpg')):
action = "Import" if ext != 'mpg' else "Open"
fname = "{0:02d}_{1:02d}.{2}".format(sidx, midx, ext)
local_fname = os.path.join(local_path, fname)
do_import = False
if os.path.exists(local_fname):
label = "{0} {1}".format(action, target)
do_import = True
elif cml.automatically_import:
label = "Download and {0} {1}".format(action, target)
else:
label = "Download {0}".format(target)
row = layout.row()
props = row.operator("mocap.download_import", text=label, icon=icon)
props.remote_file = remote_fname + ext
props.local_file = local_fname
props.do_import = do_import
row.active = ext in motion['files']
class CMUMocapToMakeHuman(bpy.types.Panel):
bl_idname = "object.cmu_mocap_makehuman"
bl_label = "CMU Mocap to MakeHuman"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = 'Animation'
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
cml = context.user_preferences.addons['cmu_mocap_browser'].preferences
layout.prop_search(cml, "floor", context.scene, "objects")
layout.prop(cml, "feet_angle")
layout.operator("object.cmu_align", text='Align armatures')
layout.operator("object.cmu_transfer", text='Transfer animation')
def register():
bpy.utils.register_module(__name__)
def unregister():
bpy.utils.unregister_module(__name__)
if __name__ == "__main__":
register()
| 37.734104
| 80
| 0.633885
|
3524ddca590cbb63983c3249f1b5ccf01c782510
| 6,894
|
py
|
Python
|
this is crazy airline 6 upgraded.py
|
Abhay-Sastha-S/twitter-sentiment-analysis-cbse
|
4bb39ef645cc00df332d33f56adc214ce81b2397
|
[
"MIT"
] | null | null | null |
this is crazy airline 6 upgraded.py
|
Abhay-Sastha-S/twitter-sentiment-analysis-cbse
|
4bb39ef645cc00df332d33f56adc214ce81b2397
|
[
"MIT"
] | null | null | null |
this is crazy airline 6 upgraded.py
|
Abhay-Sastha-S/twitter-sentiment-analysis-cbse
|
4bb39ef645cc00df332d33f56adc214ce81b2397
|
[
"MIT"
] | null | null | null |
import pandas as pd
project_data_file_raw = open("C:/Users/MSI/Desktop/project_twitter_data updated.csv")
results_data_file = open("C:/Users/MSI/Desktop/resulting_data.csv", "w+")
project_data_file = open("C:/Users/MSI/Desktop/project_twitter_data_filtered.csv", 'r+')
project_data_file.truncate(0)
results_data_file.truncate(0)
#csv1 = pd.read_csv("C:/Users/MSI/Desktop/project_twitter_data updated.csv")
#print(csv1.head())
punctuation_chars = ["'", '"', ",", ".", "!", ":", ";", '#', '@']
# lists of words to use
positive_words = []
with open("C:/Users/MSI/Desktop/positive_words.txt") as pos_words_list:
for lin in pos_words_list:
if lin[0] != ';' and lin[0] != '\n':
positive_words.append(lin.strip())
def get_pos(str_sentences):
str_sentences = remove_punctuation(str_sentences)
list_str_sentences = str_sentences.split()
count = 0
for word in list_str_sentences:
for positiveWord in positive_words:
if word == positiveWord:
count += 1
return count
negative_words = []
with open("C:/Users/MSI/Desktop/negative_words.txt") as neg_words_list:
for lin in neg_words_list:
if lin[0] != ';' and lin[0] != '\n':
negative_words.append(lin.strip())
def get_neg(str_sentences):
str_sentences = remove_punctuation(str_sentences)
list_str_sentences = str_sentences.split()
count = 0
for word in list_str_sentences:
for negativeWord in negative_words:
if word == negativeWord:
count += 1
return count
def remove_punctuation(str_word):
for charPunct in punctuation_chars:
str_word = str_word.replace(charPunct, "")
return str_word
def write_in_data_file(resultingdatafile, pos_count=0, neg_count=0, neut_count=0, tweet_count=0):
search_in_data_file(project_data_file)
resultingdatafile.write(
"Tweet ID, Tweet Text, Number of Retweets, Number of Replies, Positive Score, Negative Score, Net Score, "
"Sentiment, Created On, Location Coord")
resultingdatafile.write("\n")
lines_project_data_file = project_data_file.readlines()
#header_dont_used = lines_project_data_file.pop(0)
#print(header_dont_used)
#print("\n", "Sample Data of Tweets", '\n', lines_project_data_file, "\n")
for lines_tweet_data in lines_project_data_file:
list_tweet_data = lines_tweet_data.strip().split(',')
#print("TWEET TEXT--", list_tweet_data[0], "\n", "-- Retweet Count", list_tweet_data[1], "-- Reply Count",
# list_tweet_data[2])
# create a variable to store the tweet text, list_tweet_data[0] is the tweet text
tweettext = list_tweet_data[0]
tweetlocation = list_tweet_data[4]
tweetcreated = list_tweet_data[5]
tweetid = list_tweet_data[3]
net_score = get_pos(list_tweet_data[0]) - get_neg(list_tweet_data[0])
x = 0
tweet_count += 1
print(tweet_count)
if net_score > 0:
x = "positive"
pos_count += 1
elif net_score < 0:
x = "negative"
neg_count += 1
elif net_score == 0:
x = "neutral"
neut_count += 1
resultingdatafile.write(
f"{tweetid}, {tweettext}, {list_tweet_data[1]}, {list_tweet_data[2]}, {get_pos(list_tweet_data[0])}, {get_neg(list_tweet_data[0])}, {(get_pos(list_tweet_data[0]) - get_neg(list_tweet_data[0]))}, {x}, {tweetcreated}, {tweetlocation} ")
resultingdatafile.write("\n")
if tweet_count > 0:
print("\n", "total no of tweets -", tweet_count, "\n", "no of positive tweets -", pos_count, "\n",
"no of negative counts -", neg_count, "\n",
"no of neutral tweets -", neut_count, "\n")
print("\n", "Percentage of postive tweets -", ((pos_count / tweet_count) * 100), "%", "\n",
"Percentage of negative tweets -", ((neg_count / tweet_count) * 100), "%", "\n",
"Percentage of neutral tweets -", ((neut_count / tweet_count) * 100), "%", "\n")
elif tweet_count == 0 or tweet_count < 0:
print("\n", "NO RESULTS FOR CHOSEN SEARCH, CHOOSE ANOTHER SEARCH TERM ", "\n")
##graphing
def search_in_data_file(searchdatafile, search_word= input('enter search term --')):
searchdatafile.write(
"Tweet Text, Number of Retweets, Number of Replies, Tweet ID, Location Coord, Created On")
searchdatafile.write("\n")
lines_project_data_file_search = project_data_file_raw.readlines()
#header_dont_used_search = lines_project_data_file_search.pop(0)
#print(header_dont_used_search)
#print("\n", "Sample Data of Tweets", '\n', lines_project_data_file_search, "\n")
list_tweet_data_search = []
tweet_text_words = []
data_line_current = []
search_condition = 0
for lines_tweet_data_search in lines_project_data_file_search:
count = 0
list_tweet_data_temp = lines_tweet_data_search.strip().split(',')
list_tweet_data_search.append(lines_tweet_data_search.strip().split(','))
#print(list_tweet_data_search)
#print('\n')
# create a variable to store the tweet text, list_tweet_data[0] is the tweet text
data_line_current = list_tweet_data_search[count]
#print(data_line_current)
tweet_text = 0
tweet_text = data_line_current[0]
#print(tweet_text)
tweet_text_words = tweet_text.split()
#print("here is the one -", "\n", tweet_text_words)
#search_condition = 0
for tweettext_search in tweet_text_words:
if tweettext_search == search_word:
searchdatafile.write(
f"{list_tweet_data_temp[0]}, {list_tweet_data_temp[1]}, {list_tweet_data_temp[2]}, {list_tweet_data_temp[3]}, {list_tweet_data_temp[4]}, {list_tweet_data_temp[5]}")
searchdatafile.write("\n")
search_condition = 1
if search_condition == 0:
print("\n", "NO SEARCH TERM SPOTTED", "\n")
elif search_condition == 1:
print("\n", "SEARCH TERM SPOTTED", "\n")
list_tweet_data_search.pop(0)
write_in_data_file(results_data_file)
project_data_file.close()
results_data_file.close()
project_data_file_raw.close()
#csv2 = pd.read_csv("C:/Users/MSI/Desktop/resulting_data.csv")
#print("\n", "Sample Result Data of Tweets-- ", '\n', csv2.head(), "\n")
if search_print_count != 1 and search_print_count > 1:
pass
# print(search_condition)
# if print_count
else:
if search_condition == 0:
print("\n", "NO SEARCH TERM SPOTTED", "\n")
elif search_condition >= 1:
print("\n", "SEARCH TERM SPOTTED", "\n")
pass
| 39.170455
| 247
| 0.632289
|
e97c7053b712437ddd9adb3801c6bf654177920e
| 2,717
|
py
|
Python
|
PersonManage/role/views.py
|
ahriknow/ahriknow
|
817b5670c964e01ffe19ed182ce0a7b42e17ce09
|
[
"MIT"
] | null | null | null |
PersonManage/role/views.py
|
ahriknow/ahriknow
|
817b5670c964e01ffe19ed182ce0a7b42e17ce09
|
[
"MIT"
] | 3
|
2021-03-19T01:28:43.000Z
|
2021-04-08T19:57:19.000Z
|
PersonManage/role/views.py
|
ahriknow/ahriknow
|
817b5670c964e01ffe19ed182ce0a7b42e17ce09
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from redis import StrictRedis
from rest_framework.response import Response
from rest_framework.views import APIView
from PersonManage.role.models import Role
from PersonManage.role.serializer import OneRole, ManyRole
from PersonManage.jurisdiction.models import Jurisdiction
class RoleView(APIView):
def get(self, request, id=None):
if id:
if role := Role.objects.filter(pk=id).first():
data = OneRole(instance=role, many=False).data
return Response({'code': 200, 'msg': 'Query was successful!', 'data': data})
return Response({'code': 400, 'msg': 'Data does not exist!', 'data': None})
else:
roles = Role.objects.all()
data = ManyRole(instance=roles, many=True).data
return Response({'code': 200, 'msg': 'Query was successful!', 'data': data})
def post(self, request):
try:
role = Role(name=request.data['name'], describe=request.data['describe'])
role.save()
return Response({'code': 200, 'msg': 'Create successful!', 'data': None})
except Exception as ex:
if 'UNIQUE' in str(ex):
return Response({'code': 400, 'msg': 'Data duplication!', 'data': None})
return Response({'code': 500, 'msg': str(ex), 'data': None})
def put(self, request, id=None):
if role := Role.objects.filter(pk=id).first():
data = request.data
if name := data.get('name'):
role.name = name
if describe := data.get('describe'):
role.describe = describe
if 'jurisdictions' in data:
redis = StrictRedis(host=settings.DATABASES['redis']['HOST'],
port=settings.DATABASES['redis']['PORT'],
db=settings.DATABASES['redis']['NAME_2'],
password=settings.DATABASES['redis']['PASS'])
redis.flushdb()
role.jurisdictions.clear()
for i in data['jurisdictions']:
jur = Jurisdiction.objects.filter(pk=i).first()
role.jurisdictions.add(jur)
role.save()
return Response({'code': 200, 'msg': 'Update successful!', 'data': None})
return Response({'code': 400, 'msg': 'Data does not exist!', 'data': None})
def delete(self, request, id=None):
if role := Role.objects.filter(pk=id).first():
role.delete()
return Response({'code': 200, 'msg': 'Delete successful!'})
return Response({'code': 400, 'msg': 'Data does not exist!', 'data': None})
| 46.844828
| 92
| 0.560177
|
d280ab15421e54a7b67d4fd6e756157f8cd08783
| 1,315
|
py
|
Python
|
qiskit/utils/units.py
|
ajavadia/qiskit-sdk-py
|
a59e8e6be1793197e19998c1f7dcfc45e6f2f3af
|
[
"Apache-2.0"
] | 11
|
2019-06-27T09:53:29.000Z
|
2021-03-02T04:40:30.000Z
|
qiskit/utils/units.py
|
ajavadia/qiskit-sdk-py
|
a59e8e6be1793197e19998c1f7dcfc45e6f2f3af
|
[
"Apache-2.0"
] | 24
|
2021-01-27T08:20:27.000Z
|
2021-07-06T09:42:28.000Z
|
qiskit/utils/units.py
|
ajavadia/qiskit-sdk-py
|
a59e8e6be1793197e19998c1f7dcfc45e6f2f3af
|
[
"Apache-2.0"
] | 4
|
2019-08-05T15:35:33.000Z
|
2020-09-18T18:55:02.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""SI unit utilities"""
def apply_prefix(value: float, unit: str) -> float:
"""
Given a SI unit prefix and value, apply the prefix to convert to
standard SI unit.
Args:
value: The number to apply prefix to.
unit: String prefix.
Returns:
Converted value.
Raises:
Exception: If the units aren't recognized.
"""
downfactors = {
'p': 1e12,
'n': 1e9,
'u': 1e6,
'µ': 1e6,
'm': 1e3
}
upfactors = {
'k': 1e3,
'M': 1e6,
'G': 1e9
}
if not unit:
return value
if unit[0] in downfactors:
return value / downfactors[unit[0]]
elif unit[0] in upfactors:
return value * upfactors[unit[0]]
else:
raise Exception("Could not understand units: {u}".format(u=unit))
| 25.784314
| 77
| 0.613688
|
42395a5378bd8c6f7ab333908bac7a974d27cf53
| 2,750
|
py
|
Python
|
tensorlayer/layers/extend.py
|
Windaway/tensorlayer
|
7afd8f0a39a4f1864a82e508f7a326fc998dc033
|
[
"Apache-2.0"
] | 1
|
2019-05-16T13:27:57.000Z
|
2019-05-16T13:27:57.000Z
|
tensorlayer/layers/extend.py
|
Helilysyt/tensorlayer
|
2dc4482a13aff3833a246b4d85b69a5d9079f01d
|
[
"Apache-2.0"
] | null | null | null |
tensorlayer/layers/extend.py
|
Helilysyt/tensorlayer
|
2dc4482a13aff3833a246b4d85b69a5d9079f01d
|
[
"Apache-2.0"
] | 1
|
2021-04-13T06:34:48.000Z
|
2021-04-13T06:34:48.000Z
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorlayer import logging
from tensorlayer.decorators import deprecated_alias
from tensorlayer.layers.core import Layer
__all__ = [
'ExpandDims',
'Tile',
]
class ExpandDims(Layer):
"""
The :class:`ExpandDims` class inserts a dimension of 1 into a tensor's shape,
see `tf.expand_dims() <https://www.tensorflow.org/api_docs/python/tf/expand_dims>`__ .
Parameters
----------
axis : int
The dimension index at which to expand the shape of input.
name : str
A unique layer name. If None, a unique name will be automatically assigned.
Examples
--------
>>> x = tl.layers.Input([10, 3], name='in')
>>> y = tl.layers.ExpandDims(axis=-1)(x)
[10, 3, 1]
"""
def __init__(
self,
axis,
name=None # 'expand_dims',
):
super(ExpandDims, self).__init__(name)
self.axis = axis
self.build((None, ))
self._built = True
logging.info("ExpandDims %s: axis: %d" % (self.name, self.axis))
def __repr__(self):
s = '{classname}('
s += 'axis={axis},'
s += 'name={name}'
s += ")"
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape):
pass
# @tf.function
def forward(self, inputs):
outputs = tf.expand_dims(inputs, axis=self.axis, name=self.name)
return outputs
class Tile(Layer):
"""
The :class:`Tile` class constructs a tensor by tiling a given tensor,
see `tf.tile() <https://www.tensorflow.org/api_docs/python/tf/tile>`__ .
Parameters
----------
multiples: tensor
Must be one of the following types: int32, int64.
1-D Length must be the same as the number of dimensions in input.
name : None or str
A unique layer name.
Examples
--------
>>> x = tl.layers.Input([10, 3], name='in')
>>> y = tl.layers.Tile(multiples=[2, 3])(x)
[20, 9]
"""
def __init__(self, multiples=None, name=None): #'tile'):
super(Tile, self).__init__(name)
self.multiples = multiples
self.build((None, ))
self._built = True
logging.info("Tile %s: multiples: %s" % (self.name, self.multiples))
def __repr__(self):
s = '{classname}('
s += 'multiples={multiples},'
s += 'name={name}'
s += ")"
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape):
pass
# @tf.function
def forward(self, inputs):
outputs = tf.tile(inputs, multiples=self.multiples, name=self.name)
return outputs
| 25.462963
| 90
| 0.581091
|
bc2d148f4d4666dafb99a6f4e156fae150b8487b
| 243
|
py
|
Python
|
01_basic/exercise_068.py
|
sideroff/python-exercises
|
6a9cc55735d977a71697204c734b3ade84a0c4fd
|
[
"MIT"
] | null | null | null |
01_basic/exercise_068.py
|
sideroff/python-exercises
|
6a9cc55735d977a71697204c734b3ade84a0c4fd
|
[
"MIT"
] | 4
|
2020-03-24T18:00:07.000Z
|
2021-06-02T00:51:22.000Z
|
01_basic/exercise_068.py
|
sideroff/python-exercises
|
6a9cc55735d977a71697204c734b3ade84a0c4fd
|
[
"MIT"
] | null | null | null |
from math import sqrt
def main():
digit = int(input("Choose an int: "))
sum_of_digits = sum(list(map(int, list(str(digit)))))
print("The sum of the digits in %i is %i" % (digit, sum_of_digits))
if __name__ == '__main__': main()
| 24.3
| 71
| 0.63786
|
7179969513adacbfeb949c1c273d73332aca36a1
| 42,217
|
py
|
Python
|
src/azure-cli-core/azure/cli/core/__init__.py
|
akashsinghal/azure-cli
|
8ab2f7604a834de790bdea849b3e83f2466428b9
|
[
"MIT"
] | 1
|
2016-09-15T23:10:48.000Z
|
2016-09-15T23:10:48.000Z
|
src/azure-cli-core/azure/cli/core/__init__.py
|
akashsinghal/azure-cli
|
8ab2f7604a834de790bdea849b3e83f2466428b9
|
[
"MIT"
] | null | null | null |
src/azure-cli-core/azure/cli/core/__init__.py
|
akashsinghal/azure-cli
|
8ab2f7604a834de790bdea849b3e83f2466428b9
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from __future__ import print_function
__version__ = "2.9.1"
import os
import sys
import timeit
import six
from knack.cli import CLI
from knack.commands import CLICommandsLoader
from knack.completion import ARGCOMPLETE_ENV_NAME
from knack.introspection import extract_args_from_signature, extract_full_summary_from_signature
from knack.log import get_logger
from knack.preview import PreviewItem
from knack.experimental import ExperimentalItem
from knack.util import CLIError
from knack.arguments import ArgumentsContext, CaseInsensitiveList # pylint: disable=unused-import
from .local_context import AzCLILocalContext, LocalContextAction
logger = get_logger(__name__)
EXCLUDED_PARAMS = ['self', 'raw', 'polling', 'custom_headers', 'operation_config',
'content_version', 'kwargs', 'client', 'no_wait']
EVENT_FAILED_EXTENSION_LOAD = 'MainLoader.OnFailedExtensionLoad'
# [Reserved, in case of future usage]
# Modules that will always be loaded. They don't expose commands but hook into CLI core.
ALWAYS_LOADED_MODULES = []
# Extensions that will always be loaded if installed. They don't expose commands but hook into CLI core.
ALWAYS_LOADED_EXTENSIONS = ['azext_ai_examples', 'azext_ai_did_you_mean_this']
class AzCli(CLI):
def __init__(self, **kwargs):
super(AzCli, self).__init__(**kwargs)
from azure.cli.core.commands import register_cache_arguments
from azure.cli.core.commands.arm import (
register_ids_argument, register_global_subscription_argument)
from azure.cli.core.cloud import get_active_cloud
from azure.cli.core.commands.transform import register_global_transforms
from azure.cli.core._session import ACCOUNT, CONFIG, SESSION, INDEX
from knack.util import ensure_dir
self.data['headers'] = {}
self.data['command'] = 'unknown'
self.data['command_extension_name'] = None
self.data['completer_active'] = ARGCOMPLETE_ENV_NAME in os.environ
self.data['query_active'] = False
azure_folder = self.config.config_dir
ensure_dir(azure_folder)
ACCOUNT.load(os.path.join(azure_folder, 'azureProfile.json'))
CONFIG.load(os.path.join(azure_folder, 'az.json'))
SESSION.load(os.path.join(azure_folder, 'az.sess'), max_age=3600)
INDEX.load(os.path.join(azure_folder, 'commandIndex.json'))
self.cloud = get_active_cloud(self)
logger.debug('Current cloud config:\n%s', str(self.cloud.name))
self.local_context = AzCLILocalContext(self)
register_global_transforms(self)
register_global_subscription_argument(self)
register_ids_argument(self) # global subscription must be registered first!
register_cache_arguments(self)
self.progress_controller = None
def refresh_request_id(self):
"""Assign a new random GUID as x-ms-client-request-id
The method must be invoked before each command execution in order to ensure
unique client-side request ID is generated.
"""
import uuid
self.data['headers']['x-ms-client-request-id'] = str(uuid.uuid1())
def get_progress_controller(self, det=False):
import azure.cli.core.commands.progress as progress
if not self.progress_controller:
self.progress_controller = progress.ProgressHook()
self.progress_controller.init_progress(progress.get_progress_view(det))
return self.progress_controller
def get_cli_version(self):
return __version__
def show_version(self):
from azure.cli.core.util import get_az_version_string, show_updates
from azure.cli.core.commands.constants import (SURVEY_PROMPT, SURVEY_PROMPT_COLOR,
UX_SURVEY_PROMPT, UX_SURVEY_PROMPT_COLOR)
ver_string, updates_available = get_az_version_string()
print(ver_string)
show_updates(updates_available)
show_link = self.config.getboolean('output', 'show_survey_link', True)
if show_link:
print('\n' + (SURVEY_PROMPT_COLOR if self.enable_color else SURVEY_PROMPT))
print(UX_SURVEY_PROMPT_COLOR if self.enable_color else UX_SURVEY_PROMPT)
def exception_handler(self, ex): # pylint: disable=no-self-use
from azure.cli.core.util import handle_exception
return handle_exception(ex)
def save_local_context(self, parsed_args, argument_definitions, specified_arguments):
""" Local Context Attribute arguments
Save argument value to local context if it is defined as SET and user specify a value for it.
:param parsed_args: Parsed args which return by AzCliCommandParser parse_args
:type parsed_args: Namespace
:param argument_definitions: All available argument definitions
:type argument_definitions: dict
:param specified_arguments: Arguments which user specify in this command
:type specified_arguments: list
"""
local_context_args = []
for argument_name in specified_arguments:
# make sure SET is defined
if argument_name not in argument_definitions:
continue
argtype = argument_definitions[argument_name].type
lca = argtype.settings.get('local_context_attribute', None)
if not lca or not lca.actions or LocalContextAction.SET not in lca.actions:
continue
# get the specified value
value = getattr(parsed_args, argument_name)
# save when name and scopes have value
if lca.name and lca.scopes:
self.local_context.set(lca.scopes, lca.name, value)
options = argtype.settings.get('options_list', None)
if options:
local_context_args.append((options[0], value))
# print warning if there are values saved to local context
if local_context_args:
logger.warning('Local context is turned on. Its information is saved in working directory %s. You can '
'run `az local-context off` to turn it off.',
self.local_context.effective_working_directory())
args_str = []
for name, value in local_context_args:
args_str.append('{}: {}'.format(name, value))
logger.warning('Your preference of %s now saved to local context. To learn more, type in `az '
'local-context --help`', ', '.join(args_str) + ' is' if len(args_str) == 1 else ' are')
class MainCommandsLoader(CLICommandsLoader):
# Format string for pretty-print the command module table
header_mod = "%-20s %10s %9s %9s" % ("Name", "Load Time", "Groups", "Commands")
item_format_string = "%-20s %10.3f %9d %9d"
header_ext = header_mod + " Directory"
item_ext_format_string = item_format_string + " %s"
def __init__(self, cli_ctx=None):
super(MainCommandsLoader, self).__init__(cli_ctx)
self.cmd_to_loader_map = {}
self.loaders = []
def _update_command_definitions(self):
for cmd_name in self.command_table:
loaders = self.cmd_to_loader_map[cmd_name]
for loader in loaders:
loader.command_table = self.command_table
loader._update_command_definitions() # pylint: disable=protected-access
# pylint: disable=too-many-statements, too-many-locals
def load_command_table(self, args):
from importlib import import_module
import pkgutil
import traceback
from azure.cli.core.commands import (
_load_module_command_loader, _load_extension_command_loader, BLOCKED_MODS, ExtensionCommandSource)
from azure.cli.core.extension import (
get_extensions, get_extension_path, get_extension_modname)
def _update_command_table_from_modules(args, command_modules=None):
"""Loads command tables from modules and merge into the main command table.
:param args: Arguments of the command.
:param list command_modules: Command modules to load, in the format like ['resource', 'profile'].
If None, will do module discovery and load all modules.
If [], only ALWAYS_LOADED_MODULES will be loaded.
Otherwise, the list will be extended using ALWAYS_LOADED_MODULES.
"""
# As command modules are built-in, the existence of modules in ALWAYS_LOADED_MODULES is NOT checked
if command_modules is not None:
command_modules.extend(ALWAYS_LOADED_MODULES)
else:
# Perform module discovery
command_modules = []
try:
mods_ns_pkg = import_module('azure.cli.command_modules')
command_modules = [modname for _, modname, _ in
pkgutil.iter_modules(mods_ns_pkg.__path__)]
logger.debug('Discovered command modules: %s', command_modules)
except ImportError as e:
logger.warning(e)
count = 0
cumulative_elapsed_time = 0
cumulative_group_count = 0
cumulative_command_count = 0
logger.debug("Loading command modules:")
logger.debug(self.header_mod)
for mod in [m for m in command_modules if m not in BLOCKED_MODS]:
try:
start_time = timeit.default_timer()
module_command_table, module_group_table = _load_module_command_loader(self, args, mod)
for cmd in module_command_table.values():
cmd.command_source = mod
self.command_table.update(module_command_table)
self.command_group_table.update(module_group_table)
elapsed_time = timeit.default_timer() - start_time
logger.debug(self.item_format_string, mod, elapsed_time,
len(module_group_table), len(module_command_table))
count += 1
cumulative_elapsed_time += elapsed_time
cumulative_group_count += len(module_group_table)
cumulative_command_count += len(module_command_table)
except Exception as ex: # pylint: disable=broad-except
# Changing this error message requires updating CI script that checks for failed
# module loading.
import azure.cli.core.telemetry as telemetry
logger.error("Error loading command module '%s': %s", mod, ex)
telemetry.set_exception(exception=ex, fault_type='module-load-error-' + mod,
summary='Error loading module: {}'.format(mod))
logger.debug(traceback.format_exc())
# Summary line
logger.debug(self.item_format_string,
"Total ({})".format(count), cumulative_elapsed_time,
cumulative_group_count, cumulative_command_count)
def _update_command_table_from_extensions(ext_suppressions, extension_modname=None):
"""Loads command tables from extensions and merge into the main command table.
:param ext_suppressions: Extension suppression information.
:param extension_modname: Command modules to load, in the format like ['azext_timeseriesinsights'].
If None, will do extension discovery and load all extensions.
If [], only ALWAYS_LOADED_EXTENSIONS will be loaded.
Otherwise, the list will be extended using ALWAYS_LOADED_EXTENSIONS.
If the extensions in the list are not installed, it will be skipped.
"""
def _handle_extension_suppressions(extensions):
filtered_extensions = []
for ext in extensions:
should_include = True
for suppression in ext_suppressions:
if should_include and suppression.handle_suppress(ext):
should_include = False
if should_include:
filtered_extensions.append(ext)
return filtered_extensions
def _filter_modname(extensions):
# Extension's name may not be the same as its modname. eg. name: virtual-wan, modname: azext_vwan
filtered_extensions = []
for ext in extensions:
ext_mod = get_extension_modname(ext.name, ext.path)
# Filter the extensions according to the index
if ext_mod in extension_modname:
filtered_extensions.append(ext)
extension_modname.remove(ext_mod)
if extension_modname:
logger.debug("These extensions are not installed and will be skipped: %s", extension_modname)
return filtered_extensions
extensions = get_extensions()
if extensions:
if extension_modname is not None:
extension_modname.extend(ALWAYS_LOADED_EXTENSIONS)
extensions = _filter_modname(extensions)
allowed_extensions = _handle_extension_suppressions(extensions)
module_commands = set(self.command_table.keys())
count = 0
cumulative_elapsed_time = 0
cumulative_group_count = 0
cumulative_command_count = 0
logger.debug("Loading extensions:")
logger.debug(self.header_ext)
for ext in allowed_extensions:
try:
# Import in the `for` loop because `allowed_extensions` can be []. In such case we
# don't need to import `check_version_compatibility` at all.
from azure.cli.core.extension.operations import check_version_compatibility
check_version_compatibility(ext.get_metadata())
except CLIError as ex:
# issue warning and skip loading extensions that aren't compatible with the CLI core
logger.warning(ex)
continue
ext_name = ext.name
ext_dir = ext.path or get_extension_path(ext_name)
sys.path.append(ext_dir)
try:
ext_mod = get_extension_modname(ext_name, ext_dir=ext_dir)
# Add to the map. This needs to happen before we load commands as registering a command
# from an extension requires this map to be up-to-date.
# self._mod_to_ext_map[ext_mod] = ext_name
start_time = timeit.default_timer()
extension_command_table, extension_group_table = \
_load_extension_command_loader(self, args, ext_mod)
for cmd_name, cmd in extension_command_table.items():
cmd.command_source = ExtensionCommandSource(
extension_name=ext_name,
overrides_command=cmd_name in module_commands,
preview=ext.preview,
experimental=ext.experimental)
self.command_table.update(extension_command_table)
self.command_group_table.update(extension_group_table)
elapsed_time = timeit.default_timer() - start_time
logger.debug(self.item_ext_format_string, ext_name, elapsed_time,
len(extension_group_table), len(extension_command_table),
ext_dir)
count += 1
cumulative_elapsed_time += elapsed_time
cumulative_group_count += len(extension_group_table)
cumulative_command_count += len(extension_command_table)
except Exception as ex: # pylint: disable=broad-except
self.cli_ctx.raise_event(EVENT_FAILED_EXTENSION_LOAD, extension_name=ext_name)
logger.warning("Unable to load extension '%s: %s'. Use --debug for more information.",
ext_name, ex)
logger.debug(traceback.format_exc())
# Summary line
logger.debug(self.item_ext_format_string,
"Total ({})".format(count), cumulative_elapsed_time,
cumulative_group_count, cumulative_command_count, "")
def _wrap_suppress_extension_func(func, ext):
""" Wrapper method to handle centralization of log messages for extension filters """
res = func(ext)
should_suppress = res
reason = "Use --debug for more information."
if isinstance(res, tuple):
should_suppress, reason = res
suppress_types = (bool, type(None))
if not isinstance(should_suppress, suppress_types):
raise ValueError("Command module authoring error: "
"Valid extension suppression values are {} in {}".format(suppress_types, func))
if should_suppress:
logger.warning("Extension %s (%s) has been suppressed. %s",
ext.name, ext.version, reason)
logger.debug("Extension %s (%s) suppressed from being loaded due "
"to %s", ext.name, ext.version, func)
return should_suppress
def _get_extension_suppressions(mod_loaders):
res = []
for m in mod_loaders:
suppressions = getattr(m, 'suppress_extension', None)
if suppressions:
suppressions = suppressions if isinstance(suppressions, list) else [suppressions]
for sup in suppressions:
if isinstance(sup, ModExtensionSuppress):
res.append(sup)
return res
def _roughly_parse_command(args):
# Roughly parse the command part: <az vm create> --name vm1
# Similar to knack.invocation.CommandInvoker._rudimentary_get_command, but we don't need to bother with
# positional args
nouns = []
for arg in args:
if arg and arg[0] != '-':
nouns.append(arg)
else:
break
return ' '.join(nouns).lower()
# Clear the tables to make this method idempotent
self.command_group_table.clear()
self.command_table.clear()
command_index = None
# Set fallback=False to turn off command index in case of regression
use_command_index = self.cli_ctx.config.getboolean('core', 'use_command_index', fallback=True)
if use_command_index:
command_index = CommandIndex(self.cli_ctx)
index_result = command_index.get(args)
if index_result:
index_modules, index_extensions = index_result
# Always load modules and extensions, because some of them (like those in
# ALWAYS_LOADED_EXTENSIONS) don't expose a command, but hooks into handlers in CLI core
_update_command_table_from_modules(args, index_modules)
# The index won't contain suppressed extensions
_update_command_table_from_extensions([], index_extensions)
logger.debug("Loaded %d groups, %d commands.", len(self.command_group_table), len(self.command_table))
# The index may be outdated. Make sure the command appears in the loaded command table
command_str = _roughly_parse_command(args)
if command_str in self.command_table:
logger.debug("Found a match in the command table for '%s'", command_str)
return self.command_table
if command_str in self.command_group_table:
logger.debug("Found a match in the command group table for '%s'", command_str)
return self.command_table
logger.debug("Could not find a match in the command table for '%s'. The index may be outdated",
command_str)
else:
logger.debug("No module found from index for '%s'", args)
# No module found from the index. Load all command modules and extensions
logger.debug("Loading all modules and extensions")
_update_command_table_from_modules(args)
ext_suppressions = _get_extension_suppressions(self.loaders)
# We always load extensions even if the appropriate module has been loaded
# as an extension could override the commands already loaded.
_update_command_table_from_extensions(ext_suppressions)
logger.debug("Loaded %d groups, %d commands.", len(self.command_group_table), len(self.command_table))
if use_command_index:
command_index.update(self.command_table)
return self.command_table
def load_arguments(self, command=None):
from azure.cli.core.commands.parameters import (
resource_group_name_type, get_location_type, deployment_name_type, vnet_name_type, subnet_name_type)
from knack.arguments import ignore_type
# omit specific command to load everything
if command is None:
command_loaders = set()
for loaders in self.cmd_to_loader_map.values():
command_loaders = command_loaders.union(set(loaders))
logger.info('Applying %s command loaders...', len(command_loaders))
else:
command_loaders = self.cmd_to_loader_map.get(command, None)
if command_loaders:
for loader in command_loaders:
# register global args
with loader.argument_context('') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('location', get_location_type(self.cli_ctx))
c.argument('vnet_name', vnet_name_type)
c.argument('subnet', subnet_name_type)
c.argument('deployment_name', deployment_name_type)
c.argument('cmd', ignore_type)
if command is None:
# load all arguments via reflection
for cmd in loader.command_table.values():
cmd.load_arguments() # this loads the arguments via reflection
loader.skip_applicability = True
loader.load_arguments('') # this adds entries to the argument registries
else:
loader.command_name = command
self.command_table[command].load_arguments() # this loads the arguments via reflection
loader.load_arguments(command) # this adds entries to the argument registries
self.argument_registry.arguments.update(loader.argument_registry.arguments)
self.extra_argument_registry.update(loader.extra_argument_registry)
loader._update_command_definitions() # pylint: disable=protected-access
class CommandIndex:
_COMMAND_INDEX = 'commandIndex'
_COMMAND_INDEX_VERSION = 'version'
_COMMAND_INDEX_CLOUD_PROFILE = 'cloudProfile'
def __init__(self, cli_ctx=None):
"""Class to manage command index.
:param cli_ctx: Only needed when `get` or `update` is called.
"""
from azure.cli.core._session import INDEX
self.INDEX = INDEX
if cli_ctx:
self.version = __version__
self.cloud_profile = cli_ctx.cloud.profile
def get(self, args):
"""Get the corresponding module and extension list of a command.
:param args: command arguments, like ['network', 'vnet', 'create', '-h']
:return: a tuple containing a list of modules and a list of extensions.
"""
# If the command index version or cloud profile doesn't match those of the current command,
# invalidate the command index.
index_version = self.INDEX[self._COMMAND_INDEX_VERSION]
cloud_profile = self.INDEX[self._COMMAND_INDEX_CLOUD_PROFILE]
if not (index_version and index_version == self.version and
cloud_profile and cloud_profile == self.cloud_profile):
logger.debug("Command index version or cloud profile is invalid or doesn't match the current command.")
self.invalidate()
return None
# Make sure the top-level command is provided, like `az version`.
# Skip command index for `az` or `az --help`.
if not args or args[0].startswith('-'):
return None
# Get the top-level command, like `network` in `network vnet create -h`
top_command = args[0]
index = self.INDEX[self._COMMAND_INDEX]
# Check the command index for (command: [module]) mapping, like
# "network": ["azure.cli.command_modules.natgateway", "azure.cli.command_modules.network", "azext_firewall"]
index_modules_extensions = index.get(top_command)
if index_modules_extensions:
# This list contains both built-in modules and extensions
index_builtin_modules = []
index_extensions = []
# Found modules from index
logger.debug("Modules found from index for '%s': %s", top_command, index_modules_extensions)
command_module_prefix = 'azure.cli.command_modules.'
for m in index_modules_extensions:
if m.startswith(command_module_prefix):
# The top-level command is from a command module
index_builtin_modules.append(m[len(command_module_prefix):])
elif m.startswith('azext_'):
# The top-level command is from an extension
index_extensions.append(m)
else:
logger.warning("Unrecognized module: %s", m)
return index_builtin_modules, index_extensions
return None
def update(self, command_table):
"""Update the command index according to the given command table.
:param command_table: The command table built by azure.cli.core.MainCommandsLoader.load_command_table
"""
start_time = timeit.default_timer()
self.INDEX[self._COMMAND_INDEX_VERSION] = __version__
self.INDEX[self._COMMAND_INDEX_CLOUD_PROFILE] = self.cloud_profile
from collections import defaultdict
index = defaultdict(list)
# self.cli_ctx.invocation.commands_loader.command_table doesn't exist in DummyCli due to the lack of invocation
for command_name, command in command_table.items():
# Get the top-level name: <vm> create
top_command = command_name.split()[0]
# Get module name, like azure.cli.command_modules.vm, azext_webapp
module_name = command.loader.__module__
if module_name not in index[top_command]:
index[top_command].append(module_name)
elapsed_time = timeit.default_timer() - start_time
self.INDEX[self._COMMAND_INDEX] = index
logger.debug("Updated command index in %.3f seconds.", elapsed_time)
def invalidate(self):
"""Invalidate the command index.
This function MUST be called when installing or updating extensions. Otherwise, when an extension
1. overrides a built-in command, or
2. extends an existing command group,
the command or command group will only be loaded from the command modules as per the stale command index,
making the newly installed extension be ignored.
This function can be called when removing extensions.
"""
self.INDEX[self._COMMAND_INDEX_VERSION] = ""
self.INDEX[self._COMMAND_INDEX_CLOUD_PROFILE] = ""
self.INDEX[self._COMMAND_INDEX] = {}
logger.debug("Command index has been invalidated.")
class ModExtensionSuppress: # pylint: disable=too-few-public-methods
def __init__(self, mod_name, suppress_extension_name, suppress_up_to_version, reason=None, recommend_remove=False,
recommend_update=False):
self.mod_name = mod_name
self.suppress_extension_name = suppress_extension_name
self.suppress_up_to_version = suppress_up_to_version
self.reason = reason
self.recommend_remove = recommend_remove
self.recommend_update = recommend_update
def handle_suppress(self, ext):
from pkg_resources import parse_version
should_suppress = ext.name == self.suppress_extension_name and ext.version and \
parse_version(ext.version) <= parse_version(self.suppress_up_to_version)
if should_suppress:
reason = self.reason or "Use --debug for more information."
logger.warning("Extension %s (%s) has been suppressed. %s",
ext.name, ext.version, reason)
logger.debug("Extension %s (%s) suppressed from being loaded due "
"to %s", ext.name, ext.version, self.mod_name)
if self.recommend_remove:
logger.warning("Remove this extension with 'az extension remove --name %s'", ext.name)
if self.recommend_update:
logger.warning("Update this extension with 'az extension update --name %s'", ext.name)
return should_suppress
class AzCommandsLoader(CLICommandsLoader): # pylint: disable=too-many-instance-attributes
def __init__(self, cli_ctx=None, command_group_cls=None, argument_context_cls=None,
suppress_extension=None, **kwargs):
from azure.cli.core.commands import AzCliCommand, AzCommandGroup, AzArgumentContext
super(AzCommandsLoader, self).__init__(cli_ctx=cli_ctx,
command_cls=AzCliCommand,
excluded_command_handler_args=EXCLUDED_PARAMS)
self.suppress_extension = suppress_extension
self.module_kwargs = kwargs
self.command_name = None
self.skip_applicability = False
self._command_group_cls = command_group_cls or AzCommandGroup
self._argument_context_cls = argument_context_cls or AzArgumentContext
def _update_command_definitions(self):
master_arg_registry = self.cli_ctx.invocation.commands_loader.argument_registry
master_extra_arg_registry = self.cli_ctx.invocation.commands_loader.extra_argument_registry
for command_name, command in self.command_table.items():
# Add any arguments explicitly registered for this command
for argument_name, argument_definition in master_extra_arg_registry[command_name].items():
command.arguments[argument_name] = argument_definition
for argument_name in command.arguments:
overrides = master_arg_registry.get_cli_argument(command_name, argument_name)
command.update_argument(argument_name, overrides)
def _apply_doc_string(self, dest, command_kwargs):
from azure.cli.core.profiles._shared import APIVersionException
doc_string_source = command_kwargs.get('doc_string_source', None)
if not doc_string_source:
return
if not isinstance(doc_string_source, str):
raise CLIError("command authoring error: applying doc_string_source '{}' directly will cause slowdown. "
'Import by string name instead.'.format(doc_string_source.__name__))
model = doc_string_source
try:
model = self.get_models(doc_string_source)
except APIVersionException:
model = None
if not model:
from importlib import import_module
(path, model_name) = doc_string_source.split('#', 1)
method_name = None
if '.' in model_name:
(model_name, method_name) = model_name.split('.', 1)
module = import_module(path)
model = getattr(module, model_name)
if method_name:
model = getattr(model, method_name, None)
if not model:
raise CLIError("command authoring error: source '{}' not found.".format(doc_string_source))
dest.__doc__ = model.__doc__
def _get_resource_type(self):
resource_type = self.module_kwargs.get('resource_type', None)
if not resource_type:
command_type = self.module_kwargs.get('command_type', None)
resource_type = command_type.settings.get('resource_type', None) if command_type else None
return resource_type
def get_api_version(self, resource_type=None, operation_group=None):
from azure.cli.core.profiles import get_api_version
resource_type = resource_type or self._get_resource_type()
version = get_api_version(self.cli_ctx, resource_type)
if isinstance(version, str):
return version
version = getattr(version, operation_group, None)
if version:
return version
from azure.cli.core.profiles._shared import APIVersionException
raise APIVersionException(operation_group, self.cli_ctx.cloud.profile)
def supported_api_version(self, resource_type=None, min_api=None, max_api=None, operation_group=None):
from azure.cli.core.profiles import supported_api_version
if not min_api and not max_api:
# optimistically assume that fully supported if no api restriction listed
return True
api_support = supported_api_version(
cli_ctx=self.cli_ctx,
resource_type=resource_type or self._get_resource_type(),
min_api=min_api,
max_api=max_api,
operation_group=operation_group)
if isinstance(api_support, bool):
return api_support
if operation_group:
return getattr(api_support, operation_group)
return api_support
def supported_resource_type(self, resource_type=None):
from azure.cli.core.profiles import supported_resource_type
return supported_resource_type(
cli_ctx=self.cli_ctx,
resource_type=resource_type or self._get_resource_type())
def get_sdk(self, *attr_args, **kwargs):
from azure.cli.core.profiles import get_sdk
return get_sdk(self.cli_ctx, kwargs.pop('resource_type', self._get_resource_type()),
*attr_args, **kwargs)
def get_models(self, *attr_args, **kwargs):
from azure.cli.core.profiles import get_sdk
resource_type = kwargs.get('resource_type', self._get_resource_type())
operation_group = kwargs.get('operation_group', self.module_kwargs.get('operation_group', None))
return get_sdk(self.cli_ctx, resource_type, *attr_args, mod='models', operation_group=operation_group)
def command_group(self, group_name, command_type=None, **kwargs):
if command_type:
kwargs['command_type'] = command_type
if 'deprecate_info' in kwargs:
kwargs['deprecate_info'].target = group_name
if kwargs.get('is_preview', False):
kwargs['preview_info'] = PreviewItem(
cli_ctx=self.cli_ctx,
target=group_name,
object_type='command group'
)
if kwargs.get('is_experimental', False):
kwargs['experimental_info'] = ExperimentalItem(
cli_ctx=self.cli_ctx,
target=group_name,
object_type='command group'
)
return self._command_group_cls(self, group_name, **kwargs)
def argument_context(self, scope, **kwargs):
return self._argument_context_cls(self, scope, **kwargs)
def _cli_command(self, name, operation=None, handler=None, argument_loader=None, description_loader=None, **kwargs):
from knack.deprecation import Deprecated
kwargs['deprecate_info'] = Deprecated.ensure_new_style_deprecation(self.cli_ctx, kwargs, 'command')
if operation and not isinstance(operation, six.string_types):
raise TypeError("Operation must be a string. Got '{}'".format(operation))
if handler and not callable(handler):
raise TypeError("Handler must be a callable. Got '{}'".format(operation))
if bool(operation) == bool(handler):
raise TypeError("Must specify exactly one of either 'operation' or 'handler'")
name = ' '.join(name.split())
client_factory = kwargs.get('client_factory', None)
def default_command_handler(command_args):
from azure.cli.core.util import get_arg_list, augment_no_wait_handler_args
from azure.cli.core.commands.client_factory import resolve_client_arg_name
op = handler or self.get_op_handler(operation, operation_group=kwargs.get('operation_group'))
op_args = get_arg_list(op)
cmd = command_args.get('cmd') if 'cmd' in op_args else command_args.pop('cmd')
client = client_factory(cmd.cli_ctx, command_args) if client_factory else None
supports_no_wait = kwargs.get('supports_no_wait', None)
if supports_no_wait:
no_wait_enabled = command_args.pop('no_wait', False)
augment_no_wait_handler_args(no_wait_enabled, op, command_args)
if client:
client_arg_name = resolve_client_arg_name(operation, kwargs)
if client_arg_name in op_args:
command_args[client_arg_name] = client
return op(**command_args)
def default_arguments_loader():
op = handler or self.get_op_handler(operation, operation_group=kwargs.get('operation_group'))
self._apply_doc_string(op, kwargs)
cmd_args = list(extract_args_from_signature(op, excluded_params=self.excluded_command_handler_args))
return cmd_args
def default_description_loader():
op = handler or self.get_op_handler(operation, operation_group=kwargs.get('operation_group'))
self._apply_doc_string(op, kwargs)
return extract_full_summary_from_signature(op)
kwargs['arguments_loader'] = argument_loader or default_arguments_loader
kwargs['description_loader'] = description_loader or default_description_loader
if self.supported_api_version(resource_type=kwargs.get('resource_type'),
min_api=kwargs.get('min_api'),
max_api=kwargs.get('max_api'),
operation_group=kwargs.get('operation_group')):
self._populate_command_group_table_with_subgroups(' '.join(name.split()[:-1]))
self.command_table[name] = self.command_cls(self, name,
handler or default_command_handler,
**kwargs)
def get_op_handler(self, operation, operation_group=None):
""" Import and load the operation handler """
# Patch the unversioned sdk path to include the appropriate API version for the
# resource type in question.
from importlib import import_module
import types
from azure.cli.core.profiles import AZURE_API_PROFILES
from azure.cli.core.profiles._shared import get_versioned_sdk_path
for rt in AZURE_API_PROFILES[self.cli_ctx.cloud.profile]:
if operation.startswith(rt.import_prefix + '.'):
operation = operation.replace(rt.import_prefix,
get_versioned_sdk_path(self.cli_ctx.cloud.profile, rt,
operation_group=operation_group))
try:
mod_to_import, attr_path = operation.split('#')
op = import_module(mod_to_import)
for part in attr_path.split('.'):
op = getattr(op, part)
if isinstance(op, types.FunctionType):
return op
return six.get_method_function(op)
except (ValueError, AttributeError):
raise ValueError("The operation '{}' is invalid.".format(operation))
def get_default_cli():
from azure.cli.core.azlogging import AzCliLogging
from azure.cli.core.commands import AzCliCommandInvoker
from azure.cli.core.parser import AzCliCommandParser
from azure.cli.core._config import GLOBAL_CONFIG_DIR, ENV_VAR_PREFIX
from azure.cli.core._help import AzCliHelp
from azure.cli.core._output import AzOutputProducer
return AzCli(cli_name='az',
config_dir=GLOBAL_CONFIG_DIR,
config_env_var_prefix=ENV_VAR_PREFIX,
commands_loader_cls=MainCommandsLoader,
invocation_cls=AzCliCommandInvoker,
parser_cls=AzCliCommandParser,
logging_cls=AzCliLogging,
output_cls=AzOutputProducer,
help_cls=AzCliHelp)
| 49.901891
| 120
| 0.63112
|
79974320f4093fec90a2f9bff2924fdcaaa3d6db
| 101
|
py
|
Python
|
python/y2019/d24/__main__.py
|
luke-dixon/aoc
|
94851a5866a1ef29e3ba10098160cba883882683
|
[
"MIT"
] | 1
|
2021-01-12T20:04:01.000Z
|
2021-01-12T20:04:01.000Z
|
python/y2019/d24/__main__.py
|
luke-dixon/aoc
|
94851a5866a1ef29e3ba10098160cba883882683
|
[
"MIT"
] | null | null | null |
python/y2019/d24/__main__.py
|
luke-dixon/aoc
|
94851a5866a1ef29e3ba10098160cba883882683
|
[
"MIT"
] | null | null | null |
import sys
from .day24 import Day24
if __name__ == '__main__':
Day24(args=sys.argv[1:]).main()
| 14.428571
| 35
| 0.673267
|
3f8e6cab1d9cd448331778dafe73793da7999b2d
| 5,328
|
py
|
Python
|
model/optimizers_lib/bgd_optimizer.py
|
ksasi/La-MAML
|
05fff370282312e87eceda51dbe8c1678e2ac1e2
|
[
"Apache-2.0"
] | 51
|
2020-10-24T16:11:58.000Z
|
2022-03-10T06:39:04.000Z
|
model/optimizers_lib/bgd_optimizer.py
|
Johswald/La-MAML
|
cff3524b0f23b932d574931c3a7495c42cbd8d9d
|
[
"Apache-2.0"
] | 2
|
2020-12-09T06:42:45.000Z
|
2021-04-24T05:52:52.000Z
|
model/optimizers_lib/bgd_optimizer.py
|
Johswald/La-MAML
|
cff3524b0f23b932d574931c3a7495c42cbd8d9d
|
[
"Apache-2.0"
] | 17
|
2020-11-24T20:21:18.000Z
|
2022-01-20T19:45:57.000Z
|
import torch
from torch.optim.optimizer import Optimizer
class BGD(Optimizer):
"""Implements BGD.
A simple usage of BGD would be:
for samples, labels in batches:
for mc_iter in range(mc_iters):
optimizer.randomize_weights()
output = model.forward(samples)
loss = cirterion(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.aggregate_grads()
optimizer.step()
"""
def __init__(self, params, std_init, mean_eta=1, mc_iters=10):
"""
Initialization of BGD optimizer
group["mean_param"] is the learned mean.
group["std_param"] is the learned STD.
:param params: List of model parameters
:param std_init: Initialization value for STD parameter
:param mean_eta: Eta value
:param mc_iters: Number of Monte Carlo iteration. Used for correctness check.
Use None to disable the check.
"""
super(BGD, self).__init__(params, defaults={})
assert mc_iters is None or (type(mc_iters) == int and mc_iters > 0), "mc_iters should be positive int or None."
self.std_init = std_init
self.mean_eta = mean_eta
self.mc_iters = mc_iters
# Initialize mu (mean_param) and sigma (std_param)
for group in self.param_groups:
assert len(group["params"]) == 1, "BGD optimizer does not support multiple params in a group"
# group['params'][0] is the weights
assert isinstance(group["params"][0], torch.Tensor), "BGD expect param to be a tensor"
# We use the initialization of weights to initialize the mean.
group["mean_param"] = group["params"][0].data.clone()
group["std_param"] = torch.zeros_like(group["params"][0].data).add_(self.std_init)
self._init_accumulators()
def get_mc_iters(self):
return self.mc_iters
def _init_accumulators(self):
self.mc_iters_taken = 0
for group in self.param_groups:
group["eps"] = None
group["grad_mul_eps_sum"] = torch.zeros_like(group["params"][0].data).cuda()
group["grad_sum"] = torch.zeros_like(group["params"][0].data).cuda()
def randomize_weights(self, force_std=-1):
"""
Randomize the weights according to N(mean, std).
:param force_std: If force_std>=0 then force_std is used for STD instead of the learned STD.
:return: None
"""
for group in self.param_groups:
mean = group["mean_param"]
std = group["std_param"]
if force_std >= 0:
std = std.mul(0).add(force_std)
group["eps"] = torch.normal(torch.zeros_like(mean), 1).cuda()
# Reparameterization trick (here we set the weights to their randomized value):
group["params"][0].data.copy_(mean.add(std.mul(group["eps"])))
def aggregate_grads(self, batch_size):
"""
Aggregates a single Monte Carlo iteration gradients. Used in step() for the expectations calculations.
optimizer.zero_grad() should be used before calling .backward() once again.
:param batch_size: BGD is using non-normalized gradients, but PyTorch gives normalized gradients.
Therefore, we multiply the gradients by the batch size.
:return: None
"""
self.mc_iters_taken += 1
groups_cnt = 0
for group in self.param_groups:
if group["params"][0].grad is None:
continue
assert group["eps"] is not None, "Must randomize weights before using aggregate_grads"
groups_cnt += 1
grad = group["params"][0].grad.data.mul(batch_size)
group["grad_sum"].add_(grad)
group["grad_mul_eps_sum"].add_(grad.mul(group["eps"]))
group["eps"] = None
assert groups_cnt > 0, "Called aggregate_grads, but all gradients were None. Make sure you called .backward()"
def step(self, closure=None, print_std = False):
"""
Updates the learned mean and STD.
:return:
"""
# Makes sure that self.mc_iters had been taken.
assert self.mc_iters is None or self.mc_iters == self.mc_iters_taken, "MC iters is set to " \
+ str(self.mc_iters) \
+ ", but took " + \
str(self.mc_iters_taken) + " MC iters"
for group in self.param_groups:
mean = group["mean_param"]
std = group["std_param"]
# Divide gradients by MC iters to get expectation
e_grad = group["grad_sum"].div(self.mc_iters_taken)
e_grad_eps = group["grad_mul_eps_sum"].div(self.mc_iters_taken)
# Update mean and STD params
mean.add_(-std.pow(2).mul(e_grad).mul(self.mean_eta))
sqrt_term = torch.sqrt(e_grad_eps.mul(std).div(2).pow(2).add(1)).mul(std)
std.copy_(sqrt_term.add(-e_grad_eps.mul(std.pow(2)).div(2)))
self.randomize_weights(force_std=0)
self._init_accumulators()
| 47.571429
| 119
| 0.584835
|
cec3047405310e1e8c50a96dac4d084ca7db5a1e
| 28,556
|
py
|
Python
|
python-siren/siren/interface/classGridTable.py
|
zliobaite/redescription-China
|
cab8ee08f21a0207bed5b48fd3493a3909d2a5ad
|
[
"MIT"
] | null | null | null |
python-siren/siren/interface/classGridTable.py
|
zliobaite/redescription-China
|
cab8ee08f21a0207bed5b48fd3493a3909d2a5ad
|
[
"MIT"
] | null | null | null |
python-siren/siren/interface/classGridTable.py
|
zliobaite/redescription-China
|
cab8ee08f21a0207bed5b48fd3493a3909d2a5ad
|
[
"MIT"
] | null | null | null |
import wx
import wx.grid
### from wx import grid
### from wx import Size, Brush, Colour, NullBrush, NullPen
### from wx import DC, EVT_KEY_UP, NORMAL_FONT, SOLID, TRANSPARENT_PEN
import re, colorsys, random, datetime, math
from ..reremi.toolICList import ICList
from ..reremi.classData import ColM, NA_str_def
from ..reremi.classQuery import SYM, Query, Literal
from ..reremi.classRedescription import Redescription
import pdb
def getRGB(h,l, s):
Brgb = map(int, [255*v for v in colorsys.hls_to_rgb(h, l, s)])
if l > 0.5:
Frgb = map(int, [255*v for v in colorsys.hls_to_rgb(h, 0, s)])
else:
Frgb = map(int, [255*v for v in colorsys.hls_to_rgb(h, 1, s)])
return Brgb, Frgb
class CustRenderer(wx.grid.PyGridCellRenderer):
BACKGROUND = wx.Colour(255, 255, 255, 255) # wx.Colour(100,100,100)
TEXT = wx.Colour(76, 76, 76, 255) #wx.Colour(100,100,100)
SBRUSH = wx.SOLID
BACKGROUND_SELECTED = wx.Colour(240, 119, 70, 255) # wx.Colour(100,100,100)
TEXT_SELECTED = wx.Colour(255, 255, 255, 255) # wx.Colour(100,100,100)
SBRUSH_SELECTED = wx.SOLID
BACKGROUND_GREY = wx.Colour(240,255,240)
TEXT_GREY = wx.Colour(131,139,131)
SBRUSH_GREY = wx.SOLID
MAP_SORT_NAN = {float('Nan'): None}
"""Base class for editors"""
### Customisation points
def Draw(self, grid, attr, dc, rect, row, col, isSelected):
"""Customisation Point: Draw the data from grid in the rectangle with attributes using the dc"""
dc.SetClippingRegion( rect.x, rect.y, rect.width, rect.height )
back, fore, bstyle = self.BACKGROUND, self.TEXT, self.SBRUSH
value = grid.GetCellValue( row, col )
if row in grid.GetSelectedRows():
back, fore, bstyle = self.BACKGROUND_SELECTED, self.TEXT_SELECTED, self.SBRUSH_SELECTED
elif grid.GetTable().getEnabled(row) == 0:
back, fore, bstyle = self.BACKGROUND_GREY, self.TEXT_GREY, self.SBRUSH_GREY
try:
dc.SetTextForeground( fore )
dc.SetTextBackground( back)
dc.SetBrush( wx.Brush( back, bstyle) )
dc.SetPen(wx.TRANSPARENT_PEN)
dc.DrawRectangle( rect.x, rect.y, rect.width, rect.height )
dc.SetFont( wx.NORMAL_FONT )
dc.DrawText( value, rect.x+2,rect.y+2 )
finally:
dc.SetTextForeground( self.TEXT)
dc.SetTextBackground( self.BACKGROUND)
dc.SetPen( wx.NullPen )
dc.SetBrush( wx.NullBrush )
dc.DestroyClippingRegion( )
# def GetBestSize(self, grid, attr, dc, row, col):
# """Customisation Point: Determine the appropriate (best) size for the control, return as wxSize
# Note: You _must_ return a wxSize object. Returning a two-value-tuple
# won't raise an error, but the value won't be respected by wxPython.
# """
# x,y = dc.GetTextExtent( "%s" % grid.GetCellValue( row, col ) )
# # note that the two-tuple returned by GetTextExtent won't work,
# # need to give a wxSize object back!
# return wx.Size( min(x, 10), min(y, 10))
class ColorRenderer(CustRenderer):
### Customisation points
def Draw(self, grid, attr, dc, rect, row, col, isSelected):
"""Customisation Point: Draw the data from grid in the rectangle with attributes using the dc"""
dc.SetClippingRegion( rect.x, rect.y, rect.width, rect.height )
back, fore, bstyle = self.BACKGROUND, self.TEXT, self.SBRUSH
value = grid.GetCellValue( row, col )
tmp = re.match("^#h(?P<h>[0-9]*)l(?P<l>[0-9]*)#(?P<val>.*)$", value)
if tmp is not None:
s = 1
if row in grid.GetSelectedRows(): s=0.5
elif grid.GetTable().getEnabled(row) == 0: s= 0.2
rgb_back, rgb_fore = getRGB(int(tmp.group("h"))/255.0, int(tmp.group("l"))/255.0, s)
back, fore, bstyle = wx.Colour(*rgb_back), wx.Colour(*rgb_fore), self.SBRUSH
value = tmp.group("val")
elif row in grid.GetSelectedRows():
back, fore, bstyle = self.BACKGROUND_SELECTED, self.TEXT_SELECTED, self.SBRUSH_SELECTED
elif grid.GetTable().getEnabled(row) == 0:
back, fore, bstyle = self.BACKGROUND_GREY, self.TEXT_GREY, self.SBRUSH_GREY
try:
dc.SetTextForeground( fore )
dc.SetTextBackground( back)
dc.SetBrush( wx.Brush( back, bstyle) )
dc.SetPen(wx.TRANSPARENT_PEN)
dc.DrawRectangle( rect.x, rect.y, rect.width, rect.height )
dc.SetFont( wx.NORMAL_FONT )
dc.DrawText( value, rect.x+2,rect.y+2 )
finally:
dc.SetTextForeground( self.TEXT)
dc.SetTextBackground( self.BACKGROUND)
dc.SetPen( wx.NullPen )
dc.SetBrush( wx.NullBrush )
dc.DestroyClippingRegion( )
# def GetBestSize(self, grid, attr, dc, row, col):
# """Customisation Point: Determine the appropriate (best) size for the control, return as wxSize
# Note: You _must_ return a wxSize object. Returning a two-value-tuple
# won't raise an error, but the value won't be respected by wxPython.
# """
# x,y = dc.GetTextExtent( "%s" % grid.GetCellValue( row, col ) )
# # note that the two-tuple returned by GetTextExtent won't work,
# # need to give a wxSize object back!
# return wx.Size( min(x, 10), min(y, 10))
class GridTable(wx.grid.PyGridTableBase):
fields_def = []
renderer = CustRenderer
name_m = None
#### COLUMN WIDTHS
width_colcheck = 25
width_colid = 50
width_colname = 150
width_colnamew = 300
width_colinfo = 80
width_colinfow = 100
width_colinfon = 8
def __init__(self, parent, tabId, frame, short=None):
wx.grid.PyGridTableBase.__init__(self)
self.details = {}
self.short = short
self.sc = set() # show column (for collapsed/expanded columns)
self.parent = parent
self.tabId = tabId
self.fields = self.fields_def
self.data = ICList()
self.sortids = ICList()
self.sortP = (None, False)
self.currentRows = self.nbItems()
self.currentColumns = len(self.fields)
self.matching = [] ### for find function
#### GRID
self.grid = wx.grid.Grid(frame)
self.grid.SetTable(self)
self.setSelectedRow(0)
self.grid.EnableEditing(False)
#self.grid.AutoSizeColumns(True)
self.grid.RegisterDataType(wx.grid.GRID_VALUE_STRING,
self.renderer(),
wx.grid.GridCellAutoWrapStringEditor())
self.grid.RegisterDataType(wx.grid.GRID_VALUE_BOOL,
wx.grid.GridCellBoolRenderer(),
wx.grid.GridCellBoolEditor())
# attr = wx.grid.GridCellAttr()
# attr.SetEditor(wx.grid.GridCellBoolEditor())
# attr.SetRenderer(wx.grid.GridCellBoolRenderer())
# self.grid.SetColAttr(0,attr)
self.grid.Bind(wx.EVT_KEY_UP, self.OnKU)
self.grid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.setSort)
self.grid.Bind(wx.grid.EVT_GRID_LABEL_RIGHT_CLICK, self.setFocus)
self.grid.Bind(wx.grid.EVT_GRID_CELL_LEFT_DCLICK, self.OnViewData)
self.grid.Bind(wx.grid.EVT_GRID_CELL_RIGHT_CLICK, self.OnRightClick)
self.grid.Bind(wx.grid.EVT_GRID_CELL_LEFT_CLICK, self.OnMouse)
# def showPopupMenu(self, event):
# self.table.highlightRow(event.GetRow())
# parent.currentList = self
def GetCellBackgroundColor(self, row, col):
"""Return the value of a cell"""
return wx.Colour(100,100,100)
def Hide(self):
self.grid.Hide()
def Show(self):
self.grid.Show()
### GRID METHOD
def GetNumberRows(self):
"""Return the number of rows in the grid"""
return self.nbItems()
### GRID METHOD
def GetColLabelValue(self, col):
"""Return the number of rows in the grid"""
direct = ' '
if col == self.sortP[0]:
if self.sortP[1]:
direct = SYM.SYM_ARRTOP
else:
direct = SYM.SYM_ARRBOT
return " %s %s" % (self.fields[col][0], direct)
### GRID METHOD
def GetNumberCols(self):
"""Return the number of columns in the grid"""
return len(self.fields)
### GRID METHOD
def IsEmptyCell(self, row, col):
"""Return True if the cell is empty"""
return self.GetValue(row, col) is None
### GRID METHOD
def GetTypeName(self, row, col):
"""Return the name of the data type of the value in the cell"""
if (col == 0):
return wx.grid.GRID_VALUE_BOOL
else:
return wx.grid.GRID_VALUE_STRING
# if row < len(self.sortids) and col < len(self.fields):
# return self.getFieldV(self.sortids[row], self.fields[col], dict(self.details))
# else:
# return None
def getFieldV(self, x, field, details):
methode = eval(field[1])
if callable(methode):
if len(field) > 2 and field[2] is not None:
details.update(field[2])
try:
return methode(details)
except IndexError:
methode(details)
else:
return methode
### GRID METHOD
def GetValue(self, row, col):
"""Return the value of a cell"""
if row >= 0 and row < self.nbItems() and col >= 0 and col < len(self.fields):
details = {"aim": "list"}
details.update(self.details)
# print "Here!", self.tabId, self.parent.selectedTab['id']#, self.sortids, row, self.fields, col
#pdb.set_trace()
return "%s" % self.getFieldV(self.sortids[row], self.fields[col], details)
else:
return None
### GRID METHOD
def SetValue(self, row, col, value):
pass
def getNamesList(self):
"""Return the value of a cell"""
names_list = []
details = {"aim": "list"}
details.update(self.details)
if self.name_m is not None:
for x in self.sortids:
v = "%s" % self.getFieldV(x, (0, self.name_m), details)
names_list.append((x,v))
return names_list
def nbItems(self):
return len(self.sortids)
def getItemAtRow(self, row):
"""Return the data of a row"""
if row < self.nbItems() and self.sortids[row] < len(self.data):
return self.data[self.sortids[row]]
else:
return None
def getRowForItem(self, rid):
"""Return the row of an entry"""
try:
return self.sortids.index(rid)
except:
return None
def getPositionFromRow(self, row):
if row is not None and row < self.nbItems() and self.sortids[row] < len(self.data):
return self.sortids[row]
else:
return None
def getRowFromPosition(self, pos):
try:
return self.sortids.index(pos)
except:
return None
def resetSizes(self):
self.GetView().AutoSize()
for coli, f in enumerate(self.fields):
if len(f) > 3:
self.GetView().SetColSize(coli, f[3])
def resetDetails(self, details={}, review=True):
self.sortP = (None, False)
self.details = details
if review:
self.ResetView()
self.resetSizes()
def resetData(self, data=None, srids=None):
if data is not None:
self.data = data
else:
self.data = ICList()
if srids is not None:
self.sortids = srids
else:
self.sortids = ICList([idi for idi in range(len(self.data))], True)
self.resetFields()
self.updateSort()
self.ResetView()
self.resetSizes()
def resetFields(self, dw=None, review=True):
self.sortP = (None, False)
def getEnabled(self, row):
return self.getItemAtRow(row).getEnabled()
def notify_change(self):
if type(self.data) == ICList:
self.data.isChanged = True
def flipEnabled(self, row):
self.data[self.sortids[row]].flipEnabled()
self.notify_change()
self.ResetView()
def flipAllEnabled(self, dids=None):
if dids is None:
dids = range(len(self.data))
for did in dids:
self.data[did].flipEnabled()
self.notify_change()
self.ResetView()
def setAllDisabled(self, dids=None):
if dids is None:
dids = range(len(self.data))
for did in dids:
self.data[did].setDisabled()
self.notify_change()
self.ResetView()
def setAllEnabled(self, dids=None):
if dids is None:
dids = range(len(self.data))
for did in dids:
self.data[did].setEnabled()
self.notify_change()
self.ResetView()
def OnMouse(self,event):
if event.GetRow() < self.nbItems():
self.setSelectedRow(event.GetRow(), event.GetCol())
if event.Col == 0:
self.flipEnabled(event.GetRow())
def ResetView(self):
"""Trim/extend the control's rows and update all values"""
self.GetView().BeginBatch()
for current, new, delmsg, addmsg in [
(self.currentRows, self.GetNumberRows(), wx.grid.GRIDTABLE_NOTIFY_ROWS_DELETED, wx.grid.GRIDTABLE_NOTIFY_ROWS_APPENDED),
(self.currentColumns, self.GetNumberCols(), wx.grid.GRIDTABLE_NOTIFY_COLS_DELETED, wx.grid.GRIDTABLE_NOTIFY_COLS_APPENDED),
]:
if new < current:
msg = wx.grid.GridTableMessage(
self,
delmsg,
new, # position
current-new,
)
self.GetView().ProcessTableMessage(msg)
elif new > current:
msg = wx.grid.GridTableMessage(
self,
addmsg,
new-current
)
self.GetView().ProcessTableMessage(msg)
self.GetView().EndBatch()
self.currentRows = self.nbItems()
self.currentColumns = len(self.fields)
if self.getSelectedRow() is not None and not self.grid.IsVisible(self.getSelectedRow(), 0):
self.grid.MakeCellVisible(self.getSelectedRow(), 0)
def deleteDisabled(self):
pass
def getSelectedItem(self):
if self.getSelectedRow() is not None:
return self.getItemAtRow(self.getSelectedRow())
return
def getSelectedPos(self):
if self.getSelectedRow() is not None:
return self.getPositionFromRow(self.getSelectedRow())
return
def getSelectedRow(self):
if len(self.GetView().GetSelectedRows()) > 0:
return self.GetView().GetSelectedRows()[0]
else:
return None
def getSelectedCol(self):
return max(0,self.GetView().GetGridCursorCol())
def setSelectedRow(self, row, col=0):
if row is None: row = 0
if col is None: col = 0
self.GetView().SetGridCursor(row,col)
self.GetView().SelectRow(row)
def neutraliseSort(self):
self.sortP = (None, False)
def setSort(self, event):
colS = event.GetCol()
if colS == -1:
pass ### TODO select all
else:
old = self.sortP[0]
if self.sortP[0] == colS:
self.sortP = (self.sortP[0], not self.sortP[1])
else:
self.sortP = (colS, True)
self.updateSort()
self.ResetView()
def setFocus(self, event):
pass
def updateSort(self):
selected_row = self.getSelectedRow()
selected_col = self.getSelectedCol()
selected_id = None
if selected_row is not None:
selected_id = self.getPositionFromRow(selected_row)
if self.sortP[0] is not None:
details = {"aim": "sort"}
details.update(self.details)
self.sortids.sort(key= lambda x: self.getFieldV(x, self.fields[self.sortP[0]], details), reverse=self.sortP[1])
if selected_id is not None:
self.setSelectedRow(self.getRowFromPosition(selected_id), selected_col)
def quitFind(self):
pass
def updateFindO(self, matching, non_matching, cid=None):
if len(matching) > 0:
self.sortP = (None, False)
selected_col = self.getSelectedCol()
self.sortids = matching+non_matching
self.matching = matching
self.setSelectedRow(len(matching)-1, selected_col)
self.ResetView()
def updateFind(self, matching=None, non_matching=None, cid=None):
if matching is not None:
self.matching = matching
if matching is None or len(matching) > 0:
self.setSelectedRow(self.getNextMatch(cid), self.getSelectedCol())
self.ResetView()
def getNextMatch(self, n=None):
if n is None:
n = self.getSelectedRow()
if len(self.matching) > 0:
if n >= self.getRowForItem(self.matching[-1]):
return self.getRowForItem(self.matching[0])
else:
for si in range(len(self.matching)):
if self.getRowForItem(self.matching[si]) > n:
return self.getRowForItem(self.matching[si])
else:
n += 1
if n == self.nbItems():
n = 0
return n
def OnRightClick(self, event):
if event.GetRow() < self.nbItems():
self.setSelectedRow(event.GetRow(), event.GetCol())
self.parent.makePopupMenu(self.parent.toolFrame)
def OnKU(self, event):
if self.grid.GetGridCursorRow() < self.nbItems():
self.setSelectedRow(self.grid.GetGridCursorRow(), self.grid.GetGridCursorCol())
event.Skip()
def OnViewData(self, event):
if event.GetRow() < self.nbItems():
self.setSelectedRow(event.GetRow(), event.GetCol())
self.viewData()
class VarTable(GridTable):
fields_def = [('','self.data[x].getEnabled', None, GridTable.width_colcheck),
('id', 'self.data[x].getId', None, GridTable.width_colid),
('name', 'self.data[x].getName', None, GridTable.width_colnamew),
('type', 'self.data[x].getType', None, GridTable.width_colinfow)]
fields_miss = [('missing', 'self.data[x].getMissInfo', None, GridTable.width_colinfo)]
fields_var = {1: [('density', 'self.data[x].getDensity', None, GridTable.width_colinfo)],
2:[('categories', 'self.data[x].getCategories', None, GridTable.width_colinfo)],
3:[('min', 'self.data[x].getMin', None, GridTable.width_colinfo),
('max', 'self.data[x].getMax', None, GridTable.width_colinfo)]}
name_m = 'self.data[x].getName'
def notify_change(self):
self.parent.updateDataInfo()
if type(self.data) == ICList:
self.data.isChanged = True
def viewData(self, pos=None, viewT=None):
if viewT is None:
viewT = self.parent.viewsm.getDefaultViewT("R", self.parent.tabs[self.tabId]["type"])
if pos is None:
datVar = self.getSelectedItem()
else:
datVar = self.getItemAtRow(pos)
queries = [Query(), Query()]
queries[datVar.side].extend(-1, Literal(False, datVar.getTerm()))
self.parent.viewsm.newRedVHist(queries, viewT)
def resetFields(self, dw=None, review=True):
self.sortP = (None, False)
self.fields = []
self.fields.extend(self.fields_def)
if len([r for r in self.data if r.hasMissing()]) > 0:
self.fields.extend(self.fields_miss)
for tyid in set([r.typeId() for r in self.data]):
self.fields.extend(self.fields_var[tyid])
class RowTable(GridTable):
## (#NBROWS)
fields_def = [('','self.data[x].getEnabled'),
('id', 'self.data[x].getId')]
name_m = 'self.data[x].getRName'
renderer = ColorRenderer
def __init__(self, parent, tabId, frame, short=None):
GridTable.__init__(self, parent, tabId, frame, short)
self.fix_col = 0
def viewData(self, pos=None, viewT=None):
if viewT is None:
viewT = self.parent.viewsm.getDefaultViewT("R", self.parent.tabs[self.tabId]["type"])
queries = [Query(), Query()]
self.parent.viewsm.newRedVHist(queries, viewT)
def resetFields(self, dw=None, review=True):
self.sortP = (None, False)
self.sc = set() # show column (for collapsed/expanded columns)
self.fix_col = 2
if dw is not None:
self.cols_map = {}
self.fields = []
for f in self.fields_def:
f = (re.sub("NBROWS", "%d" % dw.getData().nbRows(), f[0]), f[1])
self.fields.append(f)
## self.fields.extend(self.fields_def)
if dw.getData().hasRNames():
self.fields.append(('name', 'self.data[x].getRName'))
name_m = 'self.data[x].getRName'
self.fix_col += 1
for side, sideS in [(0, "LHS"),(1, "RHS")]:
nb = max(1,len(dw.getDataCols(side))-1.0)
for ci, col in enumerate(dw.getDataCols(side)):
self.cols_map[(side, col.getId())] = len(self.fields)
self.fields.append(("%s:%s" % (sideS, col.getName()), 'self.data[x].getValue', {"side":side, "col": col.getId(), "range": col.getRange(), "NA": col.NA, "r":ci/nb}))
if len(self.cols_map) <= 20:
self.sc = set(self.cols_map.values())
if review:
self.ResetView()
### GRID METHOD
def GetValue(self, row, col):
"""Return the value of a cell"""
if row >= 0 and row < self.nbItems() and col >= 0 and col < len(self.fields):
details = {"aim": "row"}
details.update(self.details)
tmp = self.getFieldV(self.sortids[row], self.fields[col], details)
details = {"aim": "list"}
details.update(self.details)
labl = self.getFieldV(self.sortids[row], self.fields[col], details)
if col >= self.fix_col:
h = 125*self.fields[col][2]["side"] + int(100*self.fields[col][2]["r"])
if tmp == "-" or (math.isnan(tmp) and math.isnan(self.fields[col][2]["NA"])) \
or (tmp == self.fields[col][2]["NA"]):
l = 255
labl = NA_str_def
else:
rangeV = self.fields[col][2]["range"]
lr = row/(1.0*self.nbItems())
if type(rangeV) is dict:
if len(rangeV) > 1:
lr = rangeV.get(tmp, 0)/(len(rangeV)-1.0)
else:
lr = 1
elif type(rangeV) is tuple:
if rangeV[0] != rangeV[1]:
lr = (rangeV[1]-tmp)/(rangeV[1]-rangeV[0])
else:
lr = 1
l = 125*lr+100
# sc = 1.0*self.fields[col][2]["max"] - self.fields[col][2]["min"]
# if sc == 0:
# lr = 0.5
# else:
# lr = (tmp - self.fields[col][2]["min"])/sc
if col in self.sc:
return "#h%dl%d#%s" % (h,l,labl)
else:
try:
return "#h%dl%d#%s" % (h,l,"")
except TypeError:
print h,l, tmp, self.fields[col][2]["range"], self.fields[col][2]["NA"]
else:
return tmp
else:
# print "Get Value RowTable", row, col
return None
### GRID METHOD
def GetColLabelValue(self, col):
"""Return the column label"""
if col >= self.fix_col and col not in self.sc:
name = ""
else:
name = " %s " % self.fields[col][0]
direct = ' '
if col == self.sortP[0]:
if self.sortP[1]:
direct = SYM.SYM_ARRTOP
else:
direct = SYM.SYM_ARRBOT
return name + direct
def notify_change(self):
self.parent.updateDataInfo()
self.parent.recomputeAll()
def resetData(self, data=None, srids=None):
if data is not None:
self.data = data
else:
self.data = ICList()
if srids is not None:
self.sortids = srids
else:
self.sortids = ICList([idi for idi in range(len(self.data))], True)
self.sortP = (None, False)
self.redraw()
def resetDetails(self, details={}, review=True):
self.sortP = (None, False)
self.details = details
if review:
self.redraw()
def redraw(self, details={}, review=True):
crow, ccol = self.GetView().GetGridCursorRow(), self.GetView().GetGridCursorCol()
self.ResetView()
self.GetView().SetColMinimalAcceptableWidth(8)
#self.GetView().SetRowMinimalAcceptableHeight(5)
self.GetView().SetDefaultColSize(8, True)
#self.GetView().SetDefaultRowSize(1, True)
self.GetView().SetColSize(0, self.width_colcheck)
self.GetView().SetColSize(1, self.width_colid)
for i in range(2, self.fix_col):
# details = {"aim": "list"}
# details.update(self.details)
# sz = max([len("%s" % self.getFieldV(sid, self.fields[i], details)) for sid in self.sortids])
self.GetView().SetColSize(i, self.width_colname) #10*(sz+2))
for cid in self.sc:
pls = 2
if cid == self.sortP[0]:
pls = 4
self.GetView().SetColSize(cid, 10*(len(self.fields[cid][0])+pls))
# self.GetView().SetRowSize(self.getSelectedRow(), 10)
# # self.GetView().SetColSize(cid, wx.DC().GetTextExtent(self.fields[cid][0]))
# self.GetView().DisableDragColSize()
self.GetView().DisableDragRowSize()
self.GetView().SetGridCursor(crow,ccol)
def setFocus(self, event):
self.flipFocusCol(event.GetCol())
def flipFocusCol(self, cid):
if cid >= self.fix_col:
if cid in self.sc:
self.sc.remove(cid)
else:
self.sc.add(cid)
self.redraw()
if self.getSelectedRow() is not None:
row = self.getSelectedRow()
else:
row = 0
if not self.grid.IsVisible(row, cid):
self.grid.MakeCellVisible(row, cid)
def setFocusCol(self, cid):
if cid >= self.fix_col:
if cid not in self.sc:
self.sc.add(cid)
self.redraw()
def delFocusCol(self, cid):
if cid >= self.fix_col:
if cid in self.sc:
self.sc.remove(cid)
self.redraw()
def showCol(self, side, col):
if (side, col) in self.cols_map and self.cols_map[(side, col)] not in self.sc:
self.sc.add(self.cols_map[(side, col)])
self.redraw()
self.grid.MakeCellVisible(self.getSelectedRow(), self.cols_map[(side, col)])
def showRidRed(self, rid, red=None):
row = self.getRowForItem(rid)
if row is not None:
self.setSelectedRow(row)
if isinstance(red, Redescription):
for side in [0,1]:
for l in red.queries[side].listLiterals():
self.sc.add(self.cols_map[(side, l.colId())])
elif isinstance(red, ColM):
self.sc.add(self.cols_map[(red.getSide(), red.getId())])
self.redraw()
return row
def setSort(self, event):
self.setFocusCol(event.GetCol())
GridTable.setSort(self, event)
def resetSizes(self):
pass
| 37.085714
| 184
| 0.557291
|
c0e096adbfefa21d8ff915e8ac347b9cec7d82a3
| 2,494
|
py
|
Python
|
twitch.py
|
mtejada/twitchplayscoffeeedition
|
a66b7070b28181851e0750506cbcfa04d23e0728
|
[
"MIT"
] | null | null | null |
twitch.py
|
mtejada/twitchplayscoffeeedition
|
a66b7070b28181851e0750506cbcfa04d23e0728
|
[
"MIT"
] | null | null | null |
twitch.py
|
mtejada/twitchplayscoffeeedition
|
a66b7070b28181851e0750506cbcfa04d23e0728
|
[
"MIT"
] | null | null | null |
import socket
import sys
import re
class Twitch:
user = ""
oauth = ""
s = None
def twitch_login_status(self, data):
if not re.match(r'^:(testserver\.local|tmi\.twitch\.tv) NOTICE \* :Login unsuccessful\r\n$', data.decode()):
return True
else:
return False
def twitch_connect(self, user, key):
self.user = user
self.oauth = key
print("Connecting to twitch.tv")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.6)
connect_host = "irc.twitch.tv"
connect_port = 6667
try:
s.connect((connect_host, connect_port))
except:
print("Failed to connect to twitch")
sys.exit()
print("Connected to twitch")
print("Sending our details to twitch...")
s.send(('USER %s\r\n' % user).encode())
s.send(('PASS %s\r\n' % key).encode())
s.send(('NICK %s\r\n' % user).encode())
if not self.twitch_login_status(s.recv(1024)):
print("... and they didn't accept our details")
sys.exit()
else:
print("... they accepted our details")
print("Connected to twitch.tv!")
self.s = s
s.send(('JOIN #%s\r\n' % user).encode())
s.recv(1024)
def check_has_message(self, data):
return re.match(
r'^:[a-zA-Z0-9_]+\![a-zA-Z0-9_]+@[a-zA-Z0-9_]+(\.tmi\.twitch\.tv|\.testserver\.local) PRIVMSG #[a-zA-Z0-9_]+ :.+$',
data.decode())
def parse_message(self, data):
return {
'channel': re.findall(r'^:.+\![a-zA-Z0-9_]+@[a-zA-Z0-9_]+.+ PRIVMSG (.*?) :', data)[0],
'username': re.findall(r'^:([a-zA-Z0-9_]+)\!', data)[0],
'message': re.findall(r'PRIVMSG #[a-zA-Z0-9_]+ :(.+)', data)[0]
}
def twitch_recieve_messages(self, amount=1024):
data = None
try:
data = self.s.recv(1024)
except:
return False
if not data:
print("Lost connection to Twitch, attempting to reconnect...")
self.twitch_connect(self.user, self.oauth)
return None
if data.decode().startswith("PING"):
self.s.send(("PONG " + data.split(" ", 3)[1] + "\r\n").encode())
return None
if self.check_has_message(data):
return [self.parse_message(line) for line in filter(None, data.decode()
.split('\r\n'))]
| 32.38961
| 127
| 0.526864
|
f79235052c732b4573c21ea4fe42b9568ef62eaf
| 1,370
|
py
|
Python
|
Chapter_3/Chapter_3_1_4_2.py
|
flytian/python_machinelearning
|
004707c3e66429f102272a7da97e532255cca293
|
[
"Apache-2.0"
] | null | null | null |
Chapter_3/Chapter_3_1_4_2.py
|
flytian/python_machinelearning
|
004707c3e66429f102272a7da97e532255cca293
|
[
"Apache-2.0"
] | null | null | null |
Chapter_3/Chapter_3_1_4_2.py
|
flytian/python_machinelearning
|
004707c3e66429f102272a7da97e532255cca293
|
[
"Apache-2.0"
] | null | null | null |
# coding = utf-8
# 从sklearn.datasets中导入20类新闻文本抓取器。
from sklearn.datasets import fetch_20newsgroups
# 导入numpy,并且重命名为np。
import numpy as np
# 使用新闻抓取器从互联网上下载所有数据,并且存储在变量news中。
news = fetch_20newsgroups(subset='all')
# 从sklearn.cross_validation导入train_test_split用来分割数据。
from sklearn.cross_validation import train_test_split
# 对前3000条新闻文本进行数据分割,25%文本用于未来测试。
X_train, X_test, y_train, y_test = train_test_split(news.data[:3000], news.target[:3000], test_size=0.25,
random_state=33)
# 导入支持向量机(分类)模型。
from sklearn.svm import SVC
# 导入TfidfVectorizer文本抽取器。
from sklearn.feature_extraction.text import TfidfVectorizer
# 导入Pipeline。
from sklearn.pipeline import Pipeline
# 使用Pipeline 简化系统搭建流程,将文本抽取与分类器模型串联起来。
clf = Pipeline([('vect', TfidfVectorizer(stop_words='english', analyzer='word')), ('svc', SVC())])
# 这里需要试验的2个超参数的的个数分别是4、3, svc__gamma的参数共有10^-2, 10^-1... 。这样我们一共有12种的超参数组合,12个不同参数下的模型。
parameters = {'svc__gamma': np.logspace(-2, 1, 4), 'svc__C': np.logspace(-1, 1, 3)}
# 从sklearn.grid_search中导入网格搜索模块GridSearchCV。
from sklearn.grid_search import GridSearchCV
# 初始化配置并行网格搜索,n_jobs=-1代表使用该计算机全部的CPU。
gs = GridSearchCV(clf, parameters, verbose=2, refit=True, cv=3, n_jobs=-1)
# 执行多线程并行网格搜索。
% time _ = gs.fit(X_train, y_train)
gs.best_params_, gs.best_score_
# 输出最佳模型在测试集上的准确性。
print gs.score(X_test, y_test)
| 31.136364
| 105
| 0.751825
|
107fd7d2b07a1de86d9a61186f1e8a09a7d173bb
| 6,607
|
py
|
Python
|
tests/pytests/integration/reactor/test_reactor.py
|
WeDoSoftware/salt
|
cfd83e7cd63636f1f23230b9ebf5db496e93fd95
|
[
"Apache-2.0"
] | null | null | null |
tests/pytests/integration/reactor/test_reactor.py
|
WeDoSoftware/salt
|
cfd83e7cd63636f1f23230b9ebf5db496e93fd95
|
[
"Apache-2.0"
] | null | null | null |
tests/pytests/integration/reactor/test_reactor.py
|
WeDoSoftware/salt
|
cfd83e7cd63636f1f23230b9ebf5db496e93fd95
|
[
"Apache-2.0"
] | null | null | null |
"""
tests.pytests.integration.reactor.test_reactor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test Salt's reactor system
"""
import logging
import pathlib
import time
import pytest
import salt.utils.event
import salt.utils.reactor
from salt.serializers import yaml
from tests.support.helpers import PRE_PYTEST_SKIP_REASON
pytestmark = [
pytest.mark.slow_test,
pytest.mark.windows_whitelisted,
]
log = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def event_listener(salt_factories):
return salt_factories.event_listener
@pytest.fixture
def master_event_bus(salt_master):
with salt.utils.event.get_master_event(
salt_master.config.copy(),
salt_master.config["sock_dir"],
listen=True,
raise_errors=True,
) as event:
yield event
def test_ping_reaction(event_listener, salt_minion):
"""
Fire an event on the master and ensure that it pings the minion
"""
event_tag = "/test_event"
start_time = time.time()
# Create event bus connection
with salt.utils.event.get_event(
"minion",
sock_dir=salt_minion.config["sock_dir"],
opts=salt_minion.config.copy(),
) as event:
event.fire_event({"a": "b"}, event_tag)
event_pattern = (salt_minion.id, event_tag)
matched_events = event_listener.wait_for_events(
[event_pattern], after_time=start_time, timeout=90
)
assert matched_events.found_all_events
for event in matched_events:
assert event.data == {"a": "b"}
@pytest.mark.skip_on_windows(reason=PRE_PYTEST_SKIP_REASON)
def test_reactor_reaction(
event_listener, salt_master, salt_minion, master_event_bus, reactor_event
):
"""
Fire an event on the master and ensure the reactor event responds
"""
start_time = time.time()
master_event_bus.fire_event({"id": salt_minion.id}, reactor_event.tag)
event_pattern = (salt_master.id, reactor_event.event_tag)
matched_events = event_listener.wait_for_events(
[event_pattern], after_time=start_time, timeout=90
)
assert matched_events.found_all_events
for event in matched_events:
assert event.data["test_reaction"] is True
@pytest.mark.skip_on_windows(reason=PRE_PYTEST_SKIP_REASON)
def test_reactor_is_leader(
event_listener,
salt_master,
salt_run_cli,
master_event_bus,
reactor_event,
salt_minion,
):
"""
If reactor system is unavailable, an exception is thrown.
When leader is true (the default), the reacion event should return.
When leader is set to false reactor should timeout/not do anything.
"""
ret = salt_run_cli.run("reactor.is_leader")
assert ret.exitcode == 0
assert (
"salt.exceptions.CommandExecutionError: Reactor system is not running."
in ret.stdout
)
ret = salt_run_cli.run("reactor.set_leader", value=True)
assert ret.exitcode == 0
assert (
"salt.exceptions.CommandExecutionError: Reactor system is not running."
in ret.stdout
)
ret = salt_run_cli.run("reactor.is_leader")
assert ret.exitcode == 0
assert (
"salt.exceptions.CommandExecutionError: Reactor system is not running."
in ret.stdout
)
# make reactor not the leader; ensure reactor engine is available
engines_config = salt_master.config.get("engines").copy()
for idx, engine in enumerate(list(engines_config)):
if "reactor" in engine:
engines_config.pop(idx)
engines_config.append(
{
"reactor": {
"refresh_interval": 60,
"worker_threads": 10,
"worker_hwm": 10000,
}
}
)
config_overrides = yaml.serialize({"engines": engines_config})
confd_dir = (
pathlib.Path(salt_master.config_dir)
/ pathlib.Path(salt_master.config["default_include"]).parent
)
confd_dir.mkdir(exist_ok=True)
# Now, with the temp config in place, ensure the reactor engine is running
with pytest.helpers.temp_file("reactor-test.conf", config_overrides, confd_dir):
ret = salt_run_cli.run("reactor.set_leader", value=True)
assert ret.exitcode == 0
assert (
"CommandExecutionError" not in ret.stdout
), "reactor engine is not running"
ret = salt_run_cli.run("reactor.is_leader")
assert ret.exitcode == 0
assert ret.stdout.endswith("\ntrue\n")
ret = salt_run_cli.run("reactor.set_leader", value=False)
assert ret.exitcode == 0
ret = salt_run_cli.run("reactor.is_leader")
assert ret.exitcode == 0
assert ret.stdout.endswith("\nfalse\n")
start_time = time.time()
log.warning("START\n\n\n")
master_event_bus.fire_event({"id": salt_minion.id}, reactor_event.tag)
# Since leader is false, let's try and get the fire event to ensure it was triggered
event_pattern = (salt_master.id, reactor_event.tag)
matched_events = event_listener.wait_for_events(
[event_pattern], after_time=start_time, timeout=90
)
assert matched_events.found_all_events
# Now that we matched the trigger event, let's confirm we don't get the reaction event
event_pattern = (salt_master.id, reactor_event.event_tag)
matched_events = event_listener.wait_for_events(
[event_pattern], after_time=start_time, timeout=30
)
assert matched_events.found_all_events is not True
# make reactor the leader again; ensure reactor engine is available
ret = salt_run_cli.run("reactor.set_leader", value=True)
assert ret.exitcode == 0
ret = salt_run_cli.run("reactor.is_leader")
assert ret.exitcode == 0
assert ret.stdout.endswith("\ntrue\n")
# trigger a reaction
start_time = time.time()
master_event_bus.fire_event({"id": salt_minion.id}, reactor_event.tag)
event_pattern = (salt_master.id, reactor_event.event_tag)
matched_events = event_listener.wait_for_events(
[event_pattern], after_time=start_time, timeout=90
)
assert matched_events.found_all_events
for event in matched_events:
assert event.data["test_reaction"] is True
# Let's just confirm the engine is not running once again(because the config file is deleted by now)
ret = salt_run_cli.run("reactor.is_leader")
assert ret.exitcode == 0
assert (
"salt.exceptions.CommandExecutionError: Reactor system is not running."
in ret.stdout
)
| 32.707921
| 104
| 0.67262
|
843497db467876b9471930678ffb35592f59fec2
| 1,548
|
py
|
Python
|
examples/AdCreativePreviewsEdge.py
|
pasha-r/facebook-python-ads-sdk
|
76feadd77baed839516b53297628e7a254c8c3c0
|
[
"CNRI-Python"
] | null | null | null |
examples/AdCreativePreviewsEdge.py
|
pasha-r/facebook-python-ads-sdk
|
76feadd77baed839516b53297628e7a254c8c3c0
|
[
"CNRI-Python"
] | null | null | null |
examples/AdCreativePreviewsEdge.py
|
pasha-r/facebook-python-ads-sdk
|
76feadd77baed839516b53297628e7a254c8c3c0
|
[
"CNRI-Python"
] | 1
|
2018-09-24T14:04:48.000Z
|
2018-09-24T14:04:48.000Z
|
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.adcreative import AdCreative
from facebook_business.adobjects.adpreview import AdPreview
from facebook_business.api import FacebookAdsApi
access_token = '<ACCESS_TOKEN>'
app_secret = '<APP_SECRET>'
app_id = '<APP_ID>'
id = '<AD_CREATIVE_ID>'
FacebookAdsApi.init(access_token=access_token)
fields = [
]
params = {
'ad_format': 'DESKTOP_FEED_STANDARD',
'product_item_ids': ['<productItemID>'],
}
print(AdCreative(id).get_previews(
fields=fields,
params=params,
))
| 38.7
| 76
| 0.781008
|
95cce9d0961eb918b9549b1815f3e7049d43f2d1
| 4,627
|
py
|
Python
|
src/backend/build_models/build_emnist_balanced_model.py
|
Nauheimer/swtp-ocr
|
5590a510bfee81f2ac48ea4b56ea6c6bd48607be
|
[
"Apache-2.0"
] | null | null | null |
src/backend/build_models/build_emnist_balanced_model.py
|
Nauheimer/swtp-ocr
|
5590a510bfee81f2ac48ea4b56ea6c6bd48607be
|
[
"Apache-2.0"
] | null | null | null |
src/backend/build_models/build_emnist_balanced_model.py
|
Nauheimer/swtp-ocr
|
5590a510bfee81f2ac48ea4b56ea6c6bd48607be
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import os
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import numpy as np
import pandas as pd
from random import randint
import matplotlib.pyplot as plt
from tensorflow.python.keras.layers.core import Dropout
keras = tf.keras
""" loads each data from emnist dataset. The csv mapp file for future predictions is also read. """
def load_emnist():
mapp = pd.read_csv("docs/emnist-balanced-mapping.txt", delimiter= ',', header=None, squeeze=True)
test = pd.read_csv("docs/emnist-balanced-test.csv", delimiter= ',')
train = pd.read_csv("docs/emnist-balanced-train.csv", delimiter= ',')
return test, train, mapp
"""
extracts respectively the test and the train datas from the emnist csv file.
The Labels of both sets are extracted from the first column of the emnist csv file.
The proper datas of the both sets are extracted from the first to the last column of the emnist csv file.
both sets are transformed as arrays to make future datas reshaping possible.
returns extracted datas and labels of the train and test set respectively.
"""
def extract_datas(test, train):
test_data = test.iloc[:, 1:]
test_label = np.array(test.iloc[:, 0].values)
train_data = train.iloc[:, 1:]
train_label = np.array(train.iloc[:, 0].values)
print(train_data.shape,train_label.shape,test_data.shape,test_label.shape)
print('\n')
train_data = np.array(train_data)
test_data = np.array(test_data)
return (train_data, train_label), (test_data, test_label)
""" reshapes each data array and downscales the images pixels for a better training with CNN. """
def image_preprocessing(train_data, test_data):
#reshape each image of train and test set
train_data = [data.reshape(28, 28) for data in train_data]
train_data = [np.fliplr(image) for image in train_data]
train_data = [np.rot90(image) for image in train_data]
train_data = np.asarray(train_data)
test_data = [data.reshape(28, 28) for data in test_data]
test_data = [np.fliplr(data) for data in test_data]
test_data = [np.rot90(data) for data in test_data]
test_data = np.asarray(test_data)
train_data = train_data.astype('float32')
test_data = test_data.astype('float32')
# downscale image pixels from [0, 255] to [0, 1]
train_data /= 255.0
test_data /= 255.0
print(train_data.shape, test_data.shape)
return train_data, test_data
""" plots some sample images. """
def plot_sample_images(data, label, mapp):
plt.figure(figsize = (10,10))
row, colums = 4, 4
for i in range(16):
plt.subplot(colums, row, i+1)
index = randint(0, len(data))
plt.savefig("plot_pics/emnist_plot")
print(data[index].shape)
plt.imshow(tf.squeeze(data[index]), cmap='gray_r')
print(label[index])
num = int(label[index])
print(num)
plt.title(chr(mapp[num]))
plt.show()
""" builds and train the CNN Network to get a performant model for future predictions. """
def train_emnist():
train, test, mapp = load_emnist()
(train_data, train_label), (test_data, test_label) = extract_datas(train, test)
train_data, test_data = image_preprocessing(train_data, test_data)
plot_sample_images(train_data, train_label, mapp)
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28, 1)), # input layer (1)
keras.layers.Dense(128, activation='relu'), # hidden layer (2)
keras.layers.Dense(512, activation='relu'), # hidden layer (2)
keras.layers.Dense(47, activation='softmax') # output layer (3)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_data, train_label, epochs=10)
test_loss, test_acc = model.evaluate(test_data, test_label)
print('Test accuracy:', test_acc)
#saving the model
save_dir = "results"
model_name = "trained_emnist"
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('saved trained model at %s ', model_path)
prediction = model.predict(test_data)
plt.figure(figsize = (10,10))
row, colums = 4, 4
for i in range(16):
plt.subplot(colums, row, i + 1)
index = randint(0, len(test_data))
plt.imshow(tf.squeeze(test_data[index]), cmap='Greys')
plt.title(f"pre={chr(mapp[np.argmax(prediction[index])])} real={chr(mapp[test_label[index]])}")
plt.axis('off')
plt.savefig("demo_emnist.png", bbox_inches='tight')
plt.show()
train_emnist()
| 35.868217
| 109
| 0.681219
|
bda521c2b8ae22ba551f8aeee907bb169f966f42
| 502
|
py
|
Python
|
app.py
|
piian/bilibili_video
|
4740653aab2b8255e325716e1ca3c6b62723b2c3
|
[
"MIT"
] | null | null | null |
app.py
|
piian/bilibili_video
|
4740653aab2b8255e325716e1ca3c6b62723b2c3
|
[
"MIT"
] | 2
|
2021-04-06T18:11:25.000Z
|
2021-06-02T02:41:51.000Z
|
app.py
|
piian/bilibili_video
|
4740653aab2b8255e325716e1ca3c6b62723b2c3
|
[
"MIT"
] | null | null | null |
import os
import time
from flask import Flask
import api
import video
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'video.db'),
)
app.debug = True
@app.template_filter('datetime')
def filter_datetime(s):
return time.strftime('%Y-%m-%d %H:%M', time.localtime(s))
app.register_blueprint(video.bp)
app.register_blueprint(api.api_bp, url_prefix='/api')
if __name__ == '__main__':
app.run()
| 18.592593
| 61
| 0.729084
|
cf2bd6096db6f39f066ffef15b070cf86aef00f0
| 5,008
|
py
|
Python
|
users/views.py
|
Yuri-Lima/SharePay
|
18547053f7e86571366abf4ec4310bf1553395c5
|
[
"MIT"
] | 1
|
2021-06-14T00:42:52.000Z
|
2021-06-14T00:42:52.000Z
|
users/views.py
|
Yuri-Lima/SharePay
|
18547053f7e86571366abf4ec4310bf1553395c5
|
[
"MIT"
] | 72
|
2021-06-08T14:18:23.000Z
|
2021-07-19T05:33:40.000Z
|
users/views.py
|
Yuri-Lima/SharePay
|
18547053f7e86571366abf4ec4310bf1553395c5
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import UserManager
from users.models import CustomUser
from django.urls import reverse_lazy
from django.views.generic.edit import DeleteView, UpdateView
from django.shortcuts import render
from django.http.response import HttpResponseRedirect
from allauth.account import signals
from allauth.account.views import(
LoginView,
SignupView,
LogoutView,
PasswordSetView,
PasswordChangeView,
PasswordResetView,
EmailView,
PasswordResetFromKeyView,
PasswordResetFromKeyDoneView,
ConfirmEmailView,
)
from allauth.socialaccount.views import(
ConnectionsView,
)
from .forms import (
CustomLoginAccount,
CustomSignupAccount,
SetPasswordFormAccount,
ChangePasswordFormAccount,
ResetPasswordFormAccount,
CustomAddEmailAccount,
SignupFormSocialAccount,
DisconnectFormAccount,
CustomUserChangeForm
)
from django.contrib.auth.models import AnonymousUser
"""
[Source]
https://docs.djangoproject.com/en/3.1/topics/auth/default/#django.contrib.auth.views.PasswordChangeView
"""
class ProfileUserView(UpdateView):
model = CustomUser
template_name = 'account/profile.html'
form_class = CustomUserChangeForm
success_url = reverse_lazy('share:index')
def get(self, request, *args, **kwargs):
self.object = self.get_object(queryset=CustomUser.objects.all())
return super(ProfileUserView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
super(ProfileUserView, self).post(request, *args, **kwargs)
self.object = self.get_object(queryset=CustomUser.objects.all())
form = self.get_form()
if form.is_valid():
# print(form)
return self.form_valid(form)
else:
# print(form.errors)
return self.form_invalid(form)
# return super(ProfileUserView, self).post(request, *args, **kwargs)
class LoginUserView(LoginView):
template_name = 'account/login.html'
success_url = reverse_lazy('share:index')
# form_class = CustomLoginAccount
def get_context_data(self, **kwargs):
ctx = {
'AnonymousUser': AnonymousUser.id
}
return super(LoginUserView, self).get_context_data(**kwargs)
class SignUpUserView(SignupView):
template_name = 'account/signup.html'
form_class = CustomSignupAccount
class LogoutUserView(LogoutView):
pass
# template_name = 'account/signup.html'
# form_class = CustomLoginAccount
# success_url = reverse_lazy('users:account_login')
class PasswordSetUserView(PasswordSetView):
form_class = SetPasswordFormAccount
class PasswordChangeUserView(PasswordChangeView):
form_class = ChangePasswordFormAccount
class PasswordResetUserView(PasswordResetView):
template_name = 'account/password_reset.html'
form_class = ResetPasswordFormAccount
# success_url= reverse_lazy('users:password_reset_done')
# email_template_name = 'account/resets_template/password_reset_email.html'
# extra_context = 'Dict'
class PasswordResetFromKeyUserView(PasswordResetFromKeyView):
template_name = 'account/password_reset.html'
form_class = ResetPasswordFormAccount
class PasswordResetFromKeyDoneUserView(PasswordResetFromKeyDoneView):
template_name = 'account/password_reset_from_key_done.html'
class EmailUserView(EmailView):
template_name= 'account/email.html'
form_class = CustomAddEmailAccount
class ConnectionsUserView(ConnectionsView):
# template_name = 'account/password_reset_confirm_new.html'
form_class = DisconnectFormAccount
# success_url = reverse_lazy('users:password_reset_complete')
post_reset_login = True #A boolean indicating if the user should be automatically authenticated after a successful password reset
# extra_context = 'Dict'
class ExcludeUserData(DeleteView):
queryset = CustomUser.objects.all()
template_name = 'account/delete_user_data.html'
success_url = reverse_lazy('share:index')
def delete(self, request, *args, **kwargs):
"""
Call the delete() method on the fetched object and then redirect to the
success URL.
"""
self.object = self.get_object()
success_url = self.get_success_url()
self.object.delete()
return HttpResponseRedirect(success_url)
# Add support for browsers which only accept GET and POST for now.
def post(self, request, *args, **kwargs):
return self.delete(request, *args, **kwargs)
def privacy_policy(request):
template_name = 'account/privacy_policy.html'
content_type = 'text/html'
return render(request, template_name)
def service_term(request):
template_name = 'account/service_term.html'
content_type = 'text/html'
return render(request, template_name)
| 33.610738
| 134
| 0.708267
|
4b7bdb993b1001e68db11274ee1b9e24ed7f7fc8
| 1,190
|
py
|
Python
|
fostool/task/logger.py
|
meng-zha/FOST
|
2fc32ded470cc465a64f46a2c06c2a081e02f3e5
|
[
"MIT"
] | 181
|
2021-11-12T09:17:54.000Z
|
2022-03-22T05:53:35.000Z
|
fostool/task/logger.py
|
meng-zha/FOST
|
2fc32ded470cc465a64f46a2c06c2a081e02f3e5
|
[
"MIT"
] | 12
|
2021-11-12T16:50:47.000Z
|
2022-03-07T09:22:16.000Z
|
fostool/task/logger.py
|
meng-zha/FOST
|
2fc32ded470cc465a64f46a2c06c2a081e02f3e5
|
[
"MIT"
] | 41
|
2021-11-13T14:33:49.000Z
|
2022-03-11T04:19:58.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# coding:utf-8
from __future__ import absolute_import, division, print_function
import logging
import os,sys
class PackagePathFilter(logging.Filter):
def filter(self, record):
"""add relative path to record
"""
pathname = record.pathname
record.relativepath = None
abs_sys_paths = map(os.path.abspath, sys.path)
for path in sorted(abs_sys_paths, key=len, reverse=True): # longer paths first
if not path.endswith(os.sep):
path += os.sep
if pathname.startswith(path):
record.relativepath = os.path.relpath(pathname, path)
break
return True
def setLogger():
"""set logger formatter and level
"""
LOG_FORMAT = "%(asctime)s %(relativepath)s %(lineno)s \ - %(levelname)s - %(message)s"
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
logger=logging.StreamHandler()
logger.setLevel(logging.INFO)
logger.addFilter(PackagePathFilter())
formatter=logging.Formatter(LOG_FORMAT,DATE_FORMAT)
logger.setFormatter(formatter)
logging.getLogger().addHandler(logger)
| 32.162162
| 90
| 0.652941
|
331a0fcd42f72cce35cf4aa601c6be78b75eab21
| 7,139
|
py
|
Python
|
qiskit/transpiler/passes/scheduling/asap.py
|
QAMP-Spring-2022-Transpiler-Hackathon/qiskit-terra
|
aee0dc4d538991560f212411db92cde5f511f65b
|
[
"Apache-2.0"
] | 1,456
|
2017-08-05T16:33:05.000Z
|
2018-06-05T04:15:35.000Z
|
qiskit/transpiler/passes/scheduling/asap.py
|
QAMP-Spring-2022-Transpiler-Hackathon/qiskit-terra
|
aee0dc4d538991560f212411db92cde5f511f65b
|
[
"Apache-2.0"
] | 365
|
2017-08-04T06:09:16.000Z
|
2018-06-05T08:33:37.000Z
|
qiskit/transpiler/passes/scheduling/asap.py
|
QAMP-Spring-2022-Transpiler-Hackathon/qiskit-terra
|
aee0dc4d538991560f212411db92cde5f511f65b
|
[
"Apache-2.0"
] | 463
|
2017-08-05T04:10:01.000Z
|
2018-06-05T06:43:21.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""ASAP Scheduling."""
import warnings
from qiskit.circuit import Delay, Qubit, Measure
from qiskit.dagcircuit import DAGCircuit
from qiskit.transpiler.exceptions import TranspilerError
from .base_scheduler import BaseSchedulerTransform
class ASAPSchedule(BaseSchedulerTransform):
"""ASAP Scheduling pass, which schedules the start time of instructions as early as possible..
See :class:`~qiskit.transpiler.passes.scheduling.base_scheduler.BaseSchedulerTransform` for the
detailed behavior of the control flow operation, i.e. ``c_if``.
.. note::
This base class has been superseded by :class:`~.ASAPScheduleAnalysis` and
the new scheduling workflow. It will be deprecated and subsequently
removed in a future release.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(
"The ASAPSchedule class has been supersceded by the ASAPScheduleAnalysis class "
"which performs the as analysis pass that requires a padding pass to later modify "
"the circuit. This class will be deprecated in a future release and subsequently "
"removed after that.",
PendingDeprecationWarning,
)
def run(self, dag):
"""Run the ASAPSchedule pass on `dag`.
Args:
dag (DAGCircuit): DAG to schedule.
Returns:
DAGCircuit: A scheduled DAG.
Raises:
TranspilerError: if the circuit is not mapped on physical qubits.
TranspilerError: if conditional bit is added to non-supported instruction.
"""
if len(dag.qregs) != 1 or dag.qregs.get("q", None) is None:
raise TranspilerError("ASAP schedule runs on physical circuits only")
time_unit = self.property_set["time_unit"]
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
idle_after = {q: 0 for q in dag.qubits + dag.clbits}
bit_indices = {q: index for index, q in enumerate(dag.qubits)}
for node in dag.topological_op_nodes():
op_duration = self._get_node_duration(node, bit_indices, dag)
# compute t0, t1: instruction interval, note that
# t0: start time of instruction
# t1: end time of instruction
if isinstance(node.op, self.CONDITIONAL_SUPPORTED):
t0q = max(idle_after[q] for q in node.qargs)
if node.op.condition_bits:
# conditional is bit tricky due to conditional_latency
t0c = max(idle_after[bit] for bit in node.op.condition_bits)
if t0q > t0c:
# This is situation something like below
#
# |t0q
# Q ▒▒▒▒▒▒▒▒▒░░
# C ▒▒▒░░░░░░░░
# |t0c
#
# In this case, you can insert readout access before tq0
#
# |t0q
# Q ▒▒▒▒▒▒▒▒▒▒▒
# C ▒▒▒░░░▒▒░░░
# |t0q - conditional_latency
#
t0c = max(t0q - self.conditional_latency, t0c)
t1c = t0c + self.conditional_latency
for bit in node.op.condition_bits:
# Lock clbit until state is read
idle_after[bit] = t1c
# It starts after register read access
t0 = max(t0q, t1c)
else:
t0 = t0q
t1 = t0 + op_duration
else:
if node.op.condition_bits:
raise TranspilerError(
f"Conditional instruction {node.op.name} is not supported in ASAP scheduler."
)
if isinstance(node.op, Measure):
# measure instruction handling is bit tricky due to clbit_write_latency
t0q = max(idle_after[q] for q in node.qargs)
t0c = max(idle_after[c] for c in node.cargs)
# Assume following case (t0c > t0q)
#
# |t0q
# Q ▒▒▒▒░░░░░░░░░░░░
# C ▒▒▒▒▒▒▒▒░░░░░░░░
# |t0c
#
# In this case, there is no actual clbit access until clbit_write_latency.
# The node t0 can be push backward by this amount.
#
# |t0q' = t0c - clbit_write_latency
# Q ▒▒▒▒░░▒▒▒▒▒▒▒▒▒▒
# C ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
# |t0c' = t0c
#
# rather than naively doing
#
# |t0q' = t0c
# Q ▒▒▒▒░░░░▒▒▒▒▒▒▒▒
# C ▒▒▒▒▒▒▒▒░░░▒▒▒▒▒
# |t0c' = t0c + clbit_write_latency
#
t0 = max(t0q, t0c - self.clbit_write_latency)
t1 = t0 + op_duration
for clbit in node.cargs:
idle_after[clbit] = t1
else:
# It happens to be directives such as barrier
t0 = max(idle_after[bit] for bit in node.qargs + node.cargs)
t1 = t0 + op_duration
# Add delay to qubit wire
for bit in node.qargs:
delta = t0 - idle_after[bit]
if delta > 0 and isinstance(bit, Qubit):
new_dag.apply_operation_back(Delay(delta, time_unit), [bit], [])
idle_after[bit] = t1
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
circuit_duration = max(idle_after.values())
for bit, after in idle_after.items():
delta = circuit_duration - after
if not (delta > 0 and isinstance(bit, Qubit)):
continue
new_dag.apply_operation_back(Delay(delta, time_unit), [bit], [])
new_dag.name = dag.name
new_dag.metadata = dag.metadata
new_dag.calibrations = dag.calibrations
# set circuit duration and unit to indicate it is scheduled
new_dag.duration = circuit_duration
new_dag.unit = time_unit
return new_dag
| 41.028736
| 101
| 0.517299
|
c6e3f20dfa7543bdfb515a1f84daaa3d41f2ed6c
| 276
|
py
|
Python
|
BIT MANIPULATION/Missing Number/Code.py
|
HassanRahim26/LEETCODE
|
c0ec81b037ff7b2d6e6030ac9835c21ed825100f
|
[
"MIT"
] | 3
|
2021-08-31T11:02:28.000Z
|
2022-01-17T08:07:00.000Z
|
BIT MANIPULATION/Missing Number/Code.py
|
HassanRahim26/LEETCODE
|
c0ec81b037ff7b2d6e6030ac9835c21ed825100f
|
[
"MIT"
] | null | null | null |
BIT MANIPULATION/Missing Number/Code.py
|
HassanRahim26/LEETCODE
|
c0ec81b037ff7b2d6e6030ac9835c21ed825100f
|
[
"MIT"
] | null | null | null |
# PROBLEM LINK:- https://leetcode.com/problems/missing-number/
class Solution:
def missingNumber(self, nums: List[int]) -> int:
n = len(nums)
i = 0
for x in nums:
n ^= x;
n ^= i
i += 1
return n
| 21.230769
| 62
| 0.463768
|
04ed2808c5626e82429bc6009be797ff87908cfa
| 52,833
|
py
|
Python
|
lbry/wallet/ledger.py
|
freakypie/lbry-sdk
|
2cc7e5dfdcc3ba2dfbdf165c5aaf1569795aa920
|
[
"MIT"
] | null | null | null |
lbry/wallet/ledger.py
|
freakypie/lbry-sdk
|
2cc7e5dfdcc3ba2dfbdf165c5aaf1569795aa920
|
[
"MIT"
] | null | null | null |
lbry/wallet/ledger.py
|
freakypie/lbry-sdk
|
2cc7e5dfdcc3ba2dfbdf165c5aaf1569795aa920
|
[
"MIT"
] | null | null | null |
import os
import copy
import time
import asyncio
import logging
from datetime import datetime
from functools import partial
from operator import itemgetter
from collections import defaultdict
from binascii import hexlify, unhexlify
from typing import Dict, Tuple, Type, Iterable, List, Optional, DefaultDict, NamedTuple
from lbry.schema.result import Outputs, INVALID, NOT_FOUND
from lbry.schema.url import URL
from lbry.crypto.hash import hash160, double_sha256, sha256
from lbry.crypto.base58 import Base58
from lbry.utils import LRUCacheWithMetrics
from .tasks import TaskGroup
from .database import Database
from .stream import StreamController
from .dewies import dewies_to_lbc
from .account import Account, AddressManager, SingleKey
from .network import Network
from .transaction import Transaction, Output
from .header import Headers, UnvalidatedHeaders
from .checkpoints import HASHES
from .constants import TXO_TYPES, CLAIM_TYPES, COIN, NULL_HASH32
from .bip32 import PubKey, PrivateKey
from .coinselection import CoinSelector
log = logging.getLogger(__name__)
LedgerType = Type['BaseLedger']
class LedgerRegistry(type):
ledgers: Dict[str, LedgerType] = {}
def __new__(mcs, name, bases, attrs):
cls: LedgerType = super().__new__(mcs, name, bases, attrs)
if not (name == 'BaseLedger' and not bases):
ledger_id = cls.get_id()
assert ledger_id not in mcs.ledgers, \
f'Ledger with id "{ledger_id}" already registered.'
mcs.ledgers[ledger_id] = cls
return cls
@classmethod
def get_ledger_class(mcs, ledger_id: str) -> LedgerType:
return mcs.ledgers[ledger_id]
class TransactionEvent(NamedTuple):
address: str
tx: Transaction
class AddressesGeneratedEvent(NamedTuple):
address_manager: AddressManager
addresses: List[str]
class BlockHeightEvent(NamedTuple):
height: int
change: int
class TransactionCacheItem:
__slots__ = '_tx', 'lock', 'has_tx', 'pending_verifications'
def __init__(self, tx: Optional[Transaction] = None, lock: Optional[asyncio.Lock] = None):
self.has_tx = asyncio.Event()
self.lock = lock or asyncio.Lock()
self._tx = self.tx = tx
self.pending_verifications = 0
@property
def tx(self) -> Optional[Transaction]:
return self._tx
@tx.setter
def tx(self, tx: Transaction):
self._tx = tx
if tx is not None:
self.has_tx.set()
class Ledger(metaclass=LedgerRegistry):
name = 'LBRY Credits'
symbol = 'LBC'
network_name = 'mainnet'
headers_class = Headers
secret_prefix = bytes((0x1c,))
pubkey_address_prefix = bytes((0x55,))
script_address_prefix = bytes((0x7a,))
extended_public_key_prefix = unhexlify('0488b21e')
extended_private_key_prefix = unhexlify('0488ade4')
max_target = 0x0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
genesis_hash = '9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f463'
genesis_bits = 0x1f00ffff
target_timespan = 150
default_fee_per_byte = 50
default_fee_per_name_char = 200000
checkpoints = HASHES
def __init__(self, config=None):
self.config = config or {}
self.db: Database = self.config.get('db') or Database(
os.path.join(self.path, "blockchain.db")
)
self.db.ledger = self
self.headers: Headers = self.config.get('headers') or self.headers_class(
os.path.join(self.path, "headers")
)
self.headers.checkpoints = self.checkpoints
self.network: Network = self.config.get('network') or Network(self)
self.network.on_header.listen(self.receive_header)
self.network.on_status.listen(self.process_status_update)
self.accounts = []
self.fee_per_byte: int = self.config.get('fee_per_byte', self.default_fee_per_byte)
self._on_transaction_controller = StreamController()
self.on_transaction = self._on_transaction_controller.stream
self.on_transaction.listen(
lambda e: log.info(
'(%s) on_transaction: address=%s, height=%s, is_verified=%s, tx.id=%s',
self.get_id(), e.address, e.tx.height, e.tx.is_verified, e.tx.id
)
)
self._on_address_controller = StreamController()
self.on_address = self._on_address_controller.stream
self.on_address.listen(
lambda e: log.info('(%s) on_address: %s', self.get_id(), e.addresses)
)
self._on_header_controller = StreamController()
self.on_header = self._on_header_controller.stream
self.on_header.listen(
lambda change: log.info(
'%s: added %s header blocks, final height %s',
self.get_id(), change, self.headers.height
)
)
self._download_height = 0
self._on_ready_controller = StreamController()
self.on_ready = self._on_ready_controller.stream
self._tx_cache = LRUCacheWithMetrics(self.config.get("tx_cache_size", 1024), metric_name='tx')
self._update_tasks = TaskGroup()
self._other_tasks = TaskGroup() # that we dont need to start
self._utxo_reservation_lock = asyncio.Lock()
self._header_processing_lock = asyncio.Lock()
self._address_update_locks: DefaultDict[str, asyncio.Lock] = defaultdict(asyncio.Lock)
self._history_lock = asyncio.Lock()
self.coin_selection_strategy = None
self._known_addresses_out_of_sync = set()
self.fee_per_name_char = self.config.get('fee_per_name_char', self.default_fee_per_name_char)
self._balance_cache = LRUCacheWithMetrics(2 ** 15)
@classmethod
def get_id(cls):
return '{}_{}'.format(cls.symbol.lower(), cls.network_name.lower())
@classmethod
def hash160_to_address(cls, h160):
raw_address = cls.pubkey_address_prefix + h160
return Base58.encode(bytearray(raw_address + double_sha256(raw_address)[0:4]))
@staticmethod
def address_to_hash160(address):
return Base58.decode(address)[1:21]
@classmethod
def is_valid_address(cls, address):
decoded = Base58.decode_check(address)
return decoded[0] == cls.pubkey_address_prefix[0]
@classmethod
def public_key_to_address(cls, public_key):
return cls.hash160_to_address(hash160(public_key))
@staticmethod
def private_key_to_wif(private_key):
return b'\x1c' + private_key + b'\x01'
@property
def path(self):
return os.path.join(self.config['data_path'], self.get_id())
def add_account(self, account: Account):
self.accounts.append(account)
async def _get_account_and_address_info_for_address(self, wallet, address):
match = await self.db.get_address(accounts=wallet.accounts, address=address)
if match:
for account in wallet.accounts:
if match['account'] == account.public_key.address:
return account, match
async def get_private_key_for_address(self, wallet, address) -> Optional[PrivateKey]:
match = await self._get_account_and_address_info_for_address(wallet, address)
if match:
account, address_info = match
return account.get_private_key(address_info['chain'], address_info['pubkey'].n)
return None
async def get_public_key_for_address(self, wallet, address) -> Optional[PubKey]:
match = await self._get_account_and_address_info_for_address(wallet, address)
if match:
_, address_info = match
return address_info['pubkey']
return None
async def get_account_for_address(self, wallet, address):
match = await self._get_account_and_address_info_for_address(wallet, address)
if match:
return match[0]
async def get_effective_amount_estimators(self, funding_accounts: Iterable[Account]):
estimators = []
for account in funding_accounts:
utxos = await account.get_utxos(no_tx=True, no_channel_info=True)
for utxo in utxos:
estimators.append(utxo.get_estimator(self))
return estimators
async def get_addresses(self, **constraints):
return await self.db.get_addresses(**constraints)
def get_address_count(self, **constraints):
return self.db.get_address_count(**constraints)
async def get_spendable_utxos(self, amount: int, funding_accounts: Optional[Iterable['Account']], min_amount=1):
min_amount = min(amount // 10, min_amount)
fee = Output.pay_pubkey_hash(COIN, NULL_HASH32).get_fee(self)
selector = CoinSelector(amount, fee)
async with self._utxo_reservation_lock:
if self.coin_selection_strategy == 'sqlite':
return await self.db.get_spendable_utxos(self, amount + fee, funding_accounts, min_amount=min_amount,
fee_per_byte=self.fee_per_byte)
txos = await self.get_effective_amount_estimators(funding_accounts)
spendables = selector.select(txos, self.coin_selection_strategy)
if spendables:
await self.reserve_outputs(s.txo for s in spendables)
return spendables
def reserve_outputs(self, txos):
return self.db.reserve_outputs(txos)
def release_outputs(self, txos):
return self.db.release_outputs(txos)
def release_tx(self, tx):
return self.release_outputs([txi.txo_ref.txo for txi in tx.inputs])
def get_utxos(self, **constraints):
self.constraint_spending_utxos(constraints)
return self.db.get_utxos(**constraints)
def get_utxo_count(self, **constraints):
self.constraint_spending_utxos(constraints)
return self.db.get_utxo_count(**constraints)
async def get_txos(self, resolve=False, **constraints) -> List[Output]:
txos = await self.db.get_txos(**constraints)
if resolve:
return await self._resolve_for_local_results(constraints.get('accounts', []), txos)
return txos
def get_txo_count(self, **constraints):
return self.db.get_txo_count(**constraints)
def get_txo_sum(self, **constraints):
return self.db.get_txo_sum(**constraints)
def get_txo_plot(self, **constraints):
return self.db.get_txo_plot(**constraints)
def get_transactions(self, **constraints):
return self.db.get_transactions(**constraints)
def get_transaction_count(self, **constraints):
return self.db.get_transaction_count(**constraints)
async def get_local_status_and_history(self, address, history=None):
if not history:
address_details = await self.db.get_address(address=address)
history = (address_details['history'] if address_details else '') or ''
parts = history.split(':')[:-1]
return (
hexlify(sha256(history.encode())).decode() if history else None,
list(zip(parts[0::2], map(int, parts[1::2])))
)
@staticmethod
def get_root_of_merkle_tree(branches, branch_positions, working_branch):
for i, branch in enumerate(branches):
other_branch = unhexlify(branch)[::-1]
other_branch_on_left = bool((branch_positions >> i) & 1)
if other_branch_on_left:
combined = other_branch + working_branch
else:
combined = working_branch + other_branch
working_branch = double_sha256(combined)
return hexlify(working_branch[::-1])
async def start(self):
if not os.path.exists(self.path):
os.mkdir(self.path)
await asyncio.wait([
self.db.open(),
self.headers.open()
])
fully_synced = self.on_ready.first
asyncio.create_task(self.network.start())
await self.network.on_connected.first
async with self._header_processing_lock:
await self._update_tasks.add(self.initial_headers_sync())
self.network.on_connected.listen(self.join_network)
asyncio.ensure_future(self.join_network())
await fully_synced
await self.db.release_all_outputs()
await asyncio.gather(*(a.maybe_migrate_certificates() for a in self.accounts))
await asyncio.gather(*(a.save_max_gap() for a in self.accounts))
if len(self.accounts) > 10:
log.info("Loaded %i accounts", len(self.accounts))
else:
await self._report_state()
self.on_transaction.listen(self._reset_balance_cache)
async def join_network(self, *_):
log.info("Subscribing and updating accounts.")
await self._update_tasks.add(self.subscribe_accounts())
await self._update_tasks.done.wait()
self._on_ready_controller.add(True)
async def stop(self):
self._update_tasks.cancel()
self._other_tasks.cancel()
await self._update_tasks.done.wait()
await self._other_tasks.done.wait()
await self.network.stop()
await self.db.close()
await self.headers.close()
@property
def local_height_including_downloaded_height(self):
return max(self.headers.height, self._download_height)
async def initial_headers_sync(self):
get_chunk = partial(self.network.retriable_call, self.network.get_headers, count=1000, b64=True)
self.headers.chunk_getter = get_chunk
async def doit():
for height in reversed(sorted(self.headers.known_missing_checkpointed_chunks)):
async with self._header_processing_lock:
await self.headers.ensure_chunk_at(height)
self._other_tasks.add(doit())
await self.update_headers()
async def update_headers(self, height=None, headers=None, subscription_update=False):
rewound = 0
while True:
if height is None or height > len(self.headers):
# sometimes header subscription updates are for a header in the future
# which can't be connected, so we do a normal header sync instead
height = len(self.headers)
headers = None
subscription_update = False
if not headers:
header_response = await self.network.retriable_call(self.network.get_headers, height, 2001)
headers = header_response['hex']
if not headers:
# Nothing to do, network thinks we're already at the latest height.
return
added = await self.headers.connect(height, unhexlify(headers))
if added > 0:
height += added
self._on_header_controller.add(
BlockHeightEvent(self.headers.height, added))
if rewound > 0:
# we started rewinding blocks and apparently found
# a new chain
rewound = 0
await self.db.rewind_blockchain(height)
if subscription_update:
# subscription updates are for latest header already
# so we don't need to check if there are newer / more
# on another loop of update_headers(), just return instead
return
elif added == 0:
# we had headers to connect but none got connected, probably a reorganization
height -= 1
rewound += 1
log.warning(
"Blockchain Reorganization: attempting rewind to height %s from starting height %s",
height, height+rewound
)
self._tx_cache.clear()
else:
raise IndexError(f"headers.connect() returned negative number ({added})")
if height < 0:
raise IndexError(
"Blockchain reorganization rewound all the way back to genesis hash. "
"Something is very wrong. Maybe you are on the wrong blockchain?"
)
if rewound >= 100:
raise IndexError(
"Blockchain reorganization dropped {} headers. This is highly unusual. "
"Will not continue to attempt reorganizing. Please, delete the ledger "
"synchronization directory inside your wallet directory (folder: '{}') and "
"restart the program to synchronize from scratch."
.format(rewound, self.get_id())
)
headers = None # ready to download some more headers
# if we made it this far and this was a subscription_update
# it means something went wrong and now we're doing a more
# robust sync, turn off subscription update shortcut
subscription_update = False
async def receive_header(self, response):
async with self._header_processing_lock:
header = response[0]
await self.update_headers(
height=header['height'], headers=header['hex'], subscription_update=True
)
async def subscribe_accounts(self):
if self.network.is_connected and self.accounts:
log.info("Subscribe to %i accounts", len(self.accounts))
await asyncio.wait([
self.subscribe_account(a) for a in self.accounts
])
async def subscribe_account(self, account: Account):
for address_manager in account.address_managers.values():
await self.subscribe_addresses(address_manager, await address_manager.get_addresses())
await account.ensure_address_gap()
async def unsubscribe_account(self, account: Account):
for address in await account.get_addresses():
await self.network.unsubscribe_address(address)
async def announce_addresses(self, address_manager: AddressManager, addresses: List[str]):
await self.subscribe_addresses(address_manager, addresses)
await self._on_address_controller.add(
AddressesGeneratedEvent(address_manager, addresses)
)
async def subscribe_addresses(self, address_manager: AddressManager, addresses: List[str], batch_size: int = 1000):
if self.network.is_connected and addresses:
addresses_remaining = list(addresses)
while addresses_remaining:
batch = addresses_remaining[:batch_size]
results = await self.network.subscribe_address(*batch)
for address, remote_status in zip(batch, results):
self._update_tasks.add(self.update_history(address, remote_status, address_manager))
addresses_remaining = addresses_remaining[batch_size:]
if self.network.client and self.network.client.server_address_and_port:
log.info("subscribed to %i/%i addresses on %s:%i", len(addresses) - len(addresses_remaining),
len(addresses), *self.network.client.server_address_and_port)
if self.network.client and self.network.client.server_address_and_port:
log.info(
"finished subscribing to %i addresses on %s:%i", len(addresses),
*self.network.client.server_address_and_port
)
def process_status_update(self, update):
address, remote_status = update
self._update_tasks.add(self.update_history(address, remote_status))
async def update_history(self, address, remote_status, address_manager: AddressManager = None,
reattempt_update: bool = True):
async with self._address_update_locks[address]:
self._known_addresses_out_of_sync.discard(address)
local_status, local_history = await self.get_local_status_and_history(address)
if local_status == remote_status:
return True
remote_history = await self.network.retriable_call(self.network.get_history, address)
remote_history = list(map(itemgetter('tx_hash', 'height'), remote_history))
we_need = set(remote_history) - set(local_history)
if not we_need:
remote_missing = set(local_history) - set(remote_history)
if remote_missing:
log.warning(
"%i transactions we have for %s are not in the remote address history",
len(remote_missing), address
)
return True
to_request = {}
pending_synced_history = {}
already_synced = set()
already_synced_offset = 0
for i, (txid, remote_height) in enumerate(remote_history):
if i == already_synced_offset and i < len(local_history) and local_history[i] == (txid, remote_height):
pending_synced_history[i] = f'{txid}:{remote_height}:'
already_synced.add((txid, remote_height))
already_synced_offset += 1
continue
tx_indexes = {}
for i, (txid, remote_height) in enumerate(remote_history):
tx_indexes[txid] = i
if (txid, remote_height) in already_synced:
continue
to_request[i] = (txid, remote_height)
log.debug(
"request %i transactions, %i/%i for %s are already synced", len(to_request), len(already_synced),
len(remote_history), address
)
remote_history_txids = set(txid for txid, _ in remote_history)
async for tx in self.request_synced_transactions(to_request, remote_history_txids, address):
pending_synced_history[tx_indexes[tx.id]] = f"{tx.id}:{tx.height}:"
if len(pending_synced_history) % 100 == 0:
log.info("Syncing address %s: %d/%d", address, len(pending_synced_history), len(to_request))
log.info("Sync finished for address %s: %d/%d", address, len(pending_synced_history), len(to_request))
assert len(pending_synced_history) == len(remote_history), \
f"{len(pending_synced_history)} vs {len(remote_history)}"
synced_history = ""
for remote_i, i in zip(range(len(remote_history)), sorted(pending_synced_history.keys())):
assert i == remote_i, f"{i} vs {remote_i}"
txid, height = remote_history[remote_i]
if f"{txid}:{height}:" != pending_synced_history[i]:
log.warning("history mismatch: %s vs %s", remote_history[remote_i], pending_synced_history[i])
synced_history += pending_synced_history[i]
await self.db.set_address_history(address, synced_history)
if address_manager is None:
address_manager = await self.get_address_manager_for_address(address)
if address_manager is not None:
await address_manager.ensure_address_gap()
local_status, local_history = \
await self.get_local_status_and_history(address, synced_history)
if local_status != remote_status:
if local_history == remote_history:
log.warning(
"%s has a synced history but a mismatched status", address
)
return True
remote_set = set(remote_history)
local_set = set(local_history)
log.warning(
"%s is out of sync after syncing.\n"
"Remote: %s with %d items (%i unique), local: %s with %d items (%i unique).\n"
"Histories are mismatched on %i items.\n"
"Local is missing\n"
"%s\n"
"Remote is missing\n"
"%s\n"
"******",
address, remote_status, len(remote_history), len(remote_set),
local_status, len(local_history), len(local_set), len(remote_set.symmetric_difference(local_set)),
"\n".join([f"{txid} - {height}" for txid, height in local_set.difference(remote_set)]),
"\n".join([f"{txid} - {height}" for txid, height in remote_set.difference(local_set)])
)
self._known_addresses_out_of_sync.add(address)
return False
else:
log.debug("finished syncing transaction history for %s, %i known txs", address, len(local_history))
return True
async def maybe_verify_transaction(self, tx, remote_height, merkle=None):
tx.height = remote_height
if 0 < remote_height < len(self.headers):
# can't be tx.pending_verifications == 1 because we have to handle the transaction_show case
if not merkle:
merkle = await self.network.retriable_call(self.network.get_merkle, tx.id, remote_height)
if 'merkle' not in merkle:
return
merkle_root = self.get_root_of_merkle_tree(merkle['merkle'], merkle['pos'], tx.hash)
header = await self.headers.get(remote_height)
tx.position = merkle['pos']
tx.is_verified = merkle_root == header['merkle_root']
return tx
async def request_transactions(self, to_request: Tuple[Tuple[str, int], ...], cached=False):
batches = [[]]
remote_heights = {}
cache_hits = set()
for txid, height in sorted(to_request, key=lambda x: x[1]):
if cached:
cached_tx = self._tx_cache.get(txid)
if cached_tx is not None:
if cached_tx.tx is not None and cached_tx.tx.is_verified:
cache_hits.add(txid)
continue
else:
self._tx_cache[txid] = TransactionCacheItem()
remote_heights[txid] = height
if len(batches[-1]) == 100:
batches.append([])
batches[-1].append(txid)
if not batches[-1]:
batches.pop()
if cached and cache_hits:
yield {txid: self._tx_cache[txid].tx for txid in cache_hits}
for batch in batches:
txs = await self._single_batch(batch, remote_heights)
if cached:
for txid, tx in txs.items():
self._tx_cache[txid].tx = tx
yield txs
async def request_synced_transactions(self, to_request, remote_history, address):
async for txs in self.request_transactions(((txid, height) for txid, height in to_request.values())):
for tx in txs.values():
yield tx
await self._sync_and_save_batch(address, remote_history, txs)
async def _single_batch(self, batch, remote_heights):
heights = {remote_heights[txid] for txid in batch}
unrestriced = 0 < min(heights) < max(heights) < max(self.headers.checkpoints or [0])
batch_result = await self.network.retriable_call(self.network.get_transaction_batch, batch, not unrestriced)
txs = {}
for txid, (raw, merkle) in batch_result.items():
remote_height = remote_heights[txid]
tx = Transaction(unhexlify(raw), height=remote_height)
txs[tx.id] = tx
await self.maybe_verify_transaction(tx, remote_height, merkle)
return txs
async def _sync_and_save_batch(self, address, remote_history, pending_txs):
await asyncio.gather(*(self._sync(tx, remote_history, pending_txs) for tx in pending_txs.values()))
await self.db.save_transaction_io_batch(
pending_txs.values(), address, self.address_to_hash160(address), ""
)
while pending_txs:
self._on_transaction_controller.add(TransactionEvent(address, pending_txs.popitem()[1]))
async def _sync(self, tx, remote_history, pending_txs):
check_db_for_txos = {}
for txi in tx.inputs:
if txi.txo_ref.txo is not None:
continue
wanted_txid = txi.txo_ref.tx_ref.id
if wanted_txid not in remote_history:
continue
if wanted_txid in pending_txs:
txi.txo_ref = pending_txs[wanted_txid].outputs[txi.txo_ref.position].ref
else:
check_db_for_txos[txi] = txi.txo_ref.id
referenced_txos = {} if not check_db_for_txos else {
txo.id: txo for txo in await self.db.get_txos(
txoid__in=list(check_db_for_txos.values()), order_by='txo.txoid', no_tx=True
)
}
for txi in check_db_for_txos:
if txi.txo_ref.id in referenced_txos:
txi.txo_ref = referenced_txos[txi.txo_ref.id].ref
else:
tx_from_db = await self.db.get_transaction(txid=txi.txo_ref.tx_ref.id)
if tx_from_db is None:
log.warning("%s not on db, not on cache, but on remote history!", txi.txo_ref.id)
else:
txi.txo_ref = tx_from_db.outputs[txi.txo_ref.position].ref
return tx
async def get_address_manager_for_address(self, address) -> Optional[AddressManager]:
details = await self.db.get_address(address=address)
for account in self.accounts:
if account.id == details['account']:
return account.address_managers[details['chain']]
return None
def broadcast(self, tx):
# broadcast can't be a retriable call yet
return self.network.broadcast(hexlify(tx.raw).decode())
async def wait(self, tx: Transaction, height=-1, timeout=1):
timeout = timeout or 600 # after 10 minutes there is almost 0 hope
addresses = set()
for txi in tx.inputs:
if txi.txo_ref.txo is not None:
addresses.add(
self.hash160_to_address(txi.txo_ref.txo.pubkey_hash)
)
for txo in tx.outputs:
if txo.has_address:
addresses.add(self.hash160_to_address(txo.pubkey_hash))
start = int(time.perf_counter())
while timeout and (int(time.perf_counter()) - start) <= timeout:
if await self._wait_round(tx, height, addresses):
return
raise asyncio.TimeoutError('Timed out waiting for transaction.')
async def _wait_round(self, tx: Transaction, height: int, addresses: Iterable[str]):
records = await self.db.get_addresses(address__in=addresses)
_, pending = await asyncio.wait([
self.on_transaction.where(partial(
lambda a, e: a == e.address and e.tx.height >= height and e.tx.id == tx.id,
address_record['address']
)) for address_record in records
], timeout=1)
if not pending:
return True
records = await self.db.get_addresses(address__in=addresses)
for record in records:
local_history = (await self.get_local_status_and_history(
record['address'], history=record['history']
))[1] if record['history'] else []
for txid, local_height in local_history:
if txid == tx.id:
if local_height >= height:
return True
log.warning(
"local history has higher height than remote for %s (%i vs %i)", txid,
local_height, height
)
return False
log.warning(
"local history does not contain %s, requested height %i", tx.id, height
)
return False
async def _inflate_outputs(
self, query, accounts,
include_purchase_receipt=False,
include_is_my_output=False,
include_sent_supports=False,
include_sent_tips=False,
include_received_tips=False) -> Tuple[List[Output], dict, int, int]:
encoded_outputs = await query
outputs = Outputs.from_base64(encoded_outputs or b'') # TODO: why is the server returning None?
txs: List[Transaction] = []
if len(outputs.txs) > 0:
async for tx in self.request_transactions(tuple(outputs.txs), cached=True):
txs.extend(tx.values())
_txos, blocked = outputs.inflate(txs)
txos = []
for txo in _txos:
if isinstance(txo, Output):
# transactions and outputs are cached and shared between wallets
# we don't want to leak informaion between wallet so we add the
# wallet specific metadata on throw away copies of the txos
txo = copy.copy(txo)
channel = txo.channel
txo.purchase_receipt = None
txo.update_annotations(None)
txo.channel = channel
txos.append(txo)
includes = (
include_purchase_receipt, include_is_my_output,
include_sent_supports, include_sent_tips
)
if accounts and any(includes):
receipts = {}
if include_purchase_receipt:
priced_claims = []
for txo in txos:
if isinstance(txo, Output) and txo.has_price:
priced_claims.append(txo)
if priced_claims:
receipts = {
txo.purchased_claim_id: txo for txo in
await self.db.get_purchases(
accounts=accounts,
purchased_claim_id__in=[c.claim_id for c in priced_claims]
)
}
for txo in txos:
if isinstance(txo, Output) and txo.can_decode_claim:
if include_purchase_receipt:
txo.purchase_receipt = receipts.get(txo.claim_id)
if include_is_my_output:
mine = await self.db.get_txo_count(
claim_id=txo.claim_id, txo_type__in=CLAIM_TYPES, is_my_output=True,
is_spent=False, accounts=accounts
)
if mine:
txo.is_my_output = True
else:
txo.is_my_output = False
if include_sent_supports:
supports = await self.db.get_txo_sum(
claim_id=txo.claim_id, txo_type=TXO_TYPES['support'],
is_my_input=True, is_my_output=True,
is_spent=False, accounts=accounts
)
txo.sent_supports = supports
if include_sent_tips:
tips = await self.db.get_txo_sum(
claim_id=txo.claim_id, txo_type=TXO_TYPES['support'],
is_my_input=True, is_my_output=False,
accounts=accounts
)
txo.sent_tips = tips
if include_received_tips:
tips = await self.db.get_txo_sum(
claim_id=txo.claim_id, txo_type=TXO_TYPES['support'],
is_my_input=False, is_my_output=True,
accounts=accounts
)
txo.received_tips = tips
return txos, blocked, outputs.offset, outputs.total
async def resolve(self, accounts, urls, new_sdk_server=None, **kwargs):
txos = []
urls_copy = list(urls)
if new_sdk_server:
resolve = partial(self.network.new_resolve, new_sdk_server)
else:
resolve = partial(self.network.retriable_call, self.network.resolve)
while urls_copy:
batch, urls_copy = urls_copy[:100], urls_copy[100:]
txos.extend(
(await self._inflate_outputs(
resolve(batch), accounts, **kwargs
))[0]
)
assert len(urls) == len(txos), "Mismatch between urls requested for resolve and responses received."
result = {}
for url, txo in zip(urls, txos):
if txo:
if isinstance(txo, Output) and URL.parse(url).has_stream_in_channel:
if not txo.channel or not txo.is_signed_by(txo.channel, self):
txo = {'error': {'name': INVALID, 'text': f'{url} has invalid channel signature'}}
else:
txo = {'error': {'name': NOT_FOUND, 'text': f'{url} did not resolve to a claim'}}
result[url] = txo
return result
async def sum_supports(self, new_sdk_server, **kwargs) -> List[Dict]:
return await self.network.sum_supports(new_sdk_server, **kwargs)
async def claim_search(
self, accounts, include_purchase_receipt=False, include_is_my_output=False,
new_sdk_server=None, **kwargs) -> Tuple[List[Output], dict, int, int]:
if new_sdk_server:
claim_search = partial(self.network.new_claim_search, new_sdk_server)
else:
claim_search = self.network.claim_search
return await self._inflate_outputs(
claim_search(**kwargs), accounts,
include_purchase_receipt=include_purchase_receipt,
include_is_my_output=include_is_my_output,
)
async def get_claim_by_claim_id(self, accounts, claim_id, **kwargs) -> Output:
for claim in (await self.claim_search(accounts, claim_id=claim_id, **kwargs))[0]:
return claim
async def _report_state(self):
try:
for account in self.accounts:
balance = dewies_to_lbc(await account.get_balance(include_claims=True))
channel_count = await account.get_channel_count()
claim_count = await account.get_claim_count()
if isinstance(account.receiving, SingleKey):
log.info("Loaded single key account %s with %s LBC. "
"%d channels, %d certificates and %d claims",
account.id, balance, channel_count, len(account.channel_keys), claim_count)
else:
total_receiving = len(await account.receiving.get_addresses())
total_change = len(await account.change.get_addresses())
log.info("Loaded account %s with %s LBC, %d receiving addresses (gap: %d), "
"%d change addresses (gap: %d), %d channels, %d certificates and %d claims. ",
account.id, balance, total_receiving, account.receiving.gap, total_change,
account.change.gap, channel_count, len(account.channel_keys), claim_count)
except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.exception(
'Failed to display wallet state, please file issue '
'for this bug along with the traceback you see below:')
async def _reset_balance_cache(self, e: TransactionEvent):
account_ids = [
r['account'] for r in await self.db.get_addresses(('account',), address=e.address)
]
for account_id in account_ids:
if account_id in self._balance_cache:
del self._balance_cache[account_id]
@staticmethod
def constraint_spending_utxos(constraints):
constraints['txo_type__in'] = (0, TXO_TYPES['purchase'])
async def get_purchases(self, resolve=False, **constraints):
purchases = await self.db.get_purchases(**constraints)
if resolve:
claim_ids = [p.purchased_claim_id for p in purchases]
try:
resolved, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.exception("Resolve failed while looking up purchased claim ids:")
resolved = []
lookup = {claim.claim_id: claim for claim in resolved}
for purchase in purchases:
purchase.purchased_claim = lookup.get(purchase.purchased_claim_id)
return purchases
def get_purchase_count(self, resolve=False, **constraints):
return self.db.get_purchase_count(**constraints)
async def _resolve_for_local_results(self, accounts, txos):
txos = await self._resolve_for_local_claim_results(accounts, txos)
txos = await self._resolve_for_local_support_results(accounts, txos)
return txos
async def _resolve_for_local_claim_results(self, accounts, txos):
results = []
response = await self.resolve(
accounts, [txo.permanent_url for txo in txos if txo.can_decode_claim]
)
for txo in txos:
resolved = response.get(txo.permanent_url) if txo.can_decode_claim else None
if isinstance(resolved, Output):
resolved.update_annotations(txo)
results.append(resolved)
else:
if isinstance(resolved, dict) and 'error' in resolved:
txo.meta['error'] = resolved['error']
results.append(txo)
return results
async def _resolve_for_local_support_results(self, accounts, txos):
channel_ids = set()
signed_support_txos = []
for txo in txos:
support = txo.can_decode_support
if support and support.signing_channel_id:
channel_ids.add(support.signing_channel_id)
signed_support_txos.append(txo)
if channel_ids:
channels = {
channel.claim_id: channel for channel in
(await self.claim_search(accounts, claim_ids=list(channel_ids)))[0]
}
for txo in signed_support_txos:
txo.channel = channels.get(txo.support.signing_channel_id)
return txos
async def get_claims(self, resolve=False, **constraints):
claims = await self.db.get_claims(**constraints)
if resolve:
return await self._resolve_for_local_results(constraints.get('accounts', []), claims)
return claims
def get_claim_count(self, **constraints):
return self.db.get_claim_count(**constraints)
async def get_streams(self, resolve=False, **constraints):
streams = await self.db.get_streams(**constraints)
if resolve:
return await self._resolve_for_local_results(constraints.get('accounts', []), streams)
return streams
def get_stream_count(self, **constraints):
return self.db.get_stream_count(**constraints)
async def get_channels(self, resolve=False, **constraints):
channels = await self.db.get_channels(**constraints)
if resolve:
return await self._resolve_for_local_results(constraints.get('accounts', []), channels)
return channels
def get_channel_count(self, **constraints):
return self.db.get_channel_count(**constraints)
async def resolve_collection(self, collection, offset=0, page_size=1):
claim_ids = collection.claim.collection.claims.ids[offset:page_size + offset]
try:
resolve_results, _, _, _ = await self.claim_search([], claim_ids=claim_ids)
except Exception as err:
if isinstance(err, asyncio.CancelledError): # TODO: remove when updated to 3.8
raise
log.exception("Resolve failed while looking up collection claim ids:")
return []
claims = []
for claim_id in claim_ids:
found = False
for txo in resolve_results:
if txo.claim_id == claim_id:
claims.append(txo)
found = True
break
if not found:
claims.append(None)
return claims
async def get_collections(self, resolve_claims=0, resolve=False, **constraints):
collections = await self.db.get_collections(**constraints)
if resolve:
collections = await self._resolve_for_local_results(constraints.get('accounts', []), collections)
if resolve_claims > 0:
for collection in collections:
collection.claims = await self.resolve_collection(collection, page_size=resolve_claims)
return collections
def get_collection_count(self, resolve_claims=0, **constraints):
return self.db.get_collection_count(**constraints)
def get_supports(self, **constraints):
return self.db.get_supports(**constraints)
def get_support_count(self, **constraints):
return self.db.get_support_count(**constraints)
async def get_transaction_history(self, read_only=False, **constraints):
txs: List[Transaction] = await self.db.get_transactions(
include_is_my_output=True, include_is_spent=True,
read_only=read_only, **constraints
)
headers = self.headers
history = []
for tx in txs: # pylint: disable=too-many-nested-blocks
ts = headers.estimated_timestamp(tx.height)
item = {
'txid': tx.id,
'timestamp': ts,
'date': datetime.fromtimestamp(ts).isoformat(' ')[:-3] if tx.height > 0 else None,
'confirmations': (headers.height + 1) - tx.height if tx.height > 0 else 0,
'claim_info': [],
'update_info': [],
'support_info': [],
'abandon_info': [],
'purchase_info': []
}
is_my_inputs = all([txi.is_my_input for txi in tx.inputs])
if is_my_inputs:
# fees only matter if we are the ones paying them
item['value'] = dewies_to_lbc(tx.net_account_balance + tx.fee)
item['fee'] = dewies_to_lbc(-tx.fee)
else:
# someone else paid the fees
item['value'] = dewies_to_lbc(tx.net_account_balance)
item['fee'] = '0.0'
for txo in tx.my_claim_outputs:
item['claim_info'].append({
'address': txo.get_address(self),
'balance_delta': dewies_to_lbc(-txo.amount),
'amount': dewies_to_lbc(txo.amount),
'claim_id': txo.claim_id,
'claim_name': txo.claim_name,
'nout': txo.position,
'is_spent': txo.is_spent,
})
for txo in tx.my_update_outputs:
if is_my_inputs: # updating my own claim
previous = None
for txi in tx.inputs:
if txi.txo_ref.txo is not None:
other_txo = txi.txo_ref.txo
if (other_txo.is_claim or other_txo.script.is_support_claim) \
and other_txo.claim_id == txo.claim_id:
previous = other_txo
break
if previous is not None:
item['update_info'].append({
'address': txo.get_address(self),
'balance_delta': dewies_to_lbc(previous.amount - txo.amount),
'amount': dewies_to_lbc(txo.amount),
'claim_id': txo.claim_id,
'claim_name': txo.claim_name,
'nout': txo.position,
'is_spent': txo.is_spent,
})
else: # someone sent us their claim
item['update_info'].append({
'address': txo.get_address(self),
'balance_delta': dewies_to_lbc(0),
'amount': dewies_to_lbc(txo.amount),
'claim_id': txo.claim_id,
'claim_name': txo.claim_name,
'nout': txo.position,
'is_spent': txo.is_spent,
})
for txo in tx.my_support_outputs:
item['support_info'].append({
'address': txo.get_address(self),
'balance_delta': dewies_to_lbc(txo.amount if not is_my_inputs else -txo.amount),
'amount': dewies_to_lbc(txo.amount),
'claim_id': txo.claim_id,
'claim_name': txo.claim_name,
'is_tip': not is_my_inputs,
'nout': txo.position,
'is_spent': txo.is_spent,
})
if is_my_inputs:
for txo in tx.other_support_outputs:
item['support_info'].append({
'address': txo.get_address(self),
'balance_delta': dewies_to_lbc(-txo.amount),
'amount': dewies_to_lbc(txo.amount),
'claim_id': txo.claim_id,
'claim_name': txo.claim_name,
'is_tip': is_my_inputs,
'nout': txo.position,
'is_spent': txo.is_spent,
})
for txo in tx.my_abandon_outputs:
item['abandon_info'].append({
'address': txo.get_address(self),
'balance_delta': dewies_to_lbc(txo.amount),
'amount': dewies_to_lbc(txo.amount),
'claim_id': txo.claim_id,
'claim_name': txo.claim_name,
'nout': txo.position
})
for txo in tx.any_purchase_outputs:
item['purchase_info'].append({
'address': txo.get_address(self),
'balance_delta': dewies_to_lbc(txo.amount if not is_my_inputs else -txo.amount),
'amount': dewies_to_lbc(txo.amount),
'claim_id': txo.purchased_claim_id,
'nout': txo.position,
'is_spent': txo.is_spent,
})
history.append(item)
return history
def get_transaction_history_count(self, read_only=False, **constraints):
return self.db.get_transaction_count(read_only=read_only, **constraints)
async def get_detailed_balance(self, accounts, confirmations=0):
result = {
'total': 0,
'available': 0,
'reserved': 0,
'reserved_subtotals': {
'claims': 0,
'supports': 0,
'tips': 0
}
}
for account in accounts:
balance = self._balance_cache.get(account.id)
if not balance:
balance = self._balance_cache[account.id] = \
await account.get_detailed_balance(confirmations, reserved_subtotals=True)
for key, value in balance.items():
if key == 'reserved_subtotals':
for subkey, subvalue in value.items():
result['reserved_subtotals'][subkey] += subvalue
else:
result[key] += value
return result
class TestNetLedger(Ledger):
network_name = 'testnet'
pubkey_address_prefix = bytes((111,))
script_address_prefix = bytes((196,))
extended_public_key_prefix = unhexlify('043587cf')
extended_private_key_prefix = unhexlify('04358394')
checkpoints = {}
class RegTestLedger(Ledger):
network_name = 'regtest'
headers_class = UnvalidatedHeaders
pubkey_address_prefix = bytes((111,))
script_address_prefix = bytes((196,))
extended_public_key_prefix = unhexlify('043587cf')
extended_private_key_prefix = unhexlify('04358394')
max_target = 0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
genesis_hash = '6e3fcf1299d4ec5d79c3a4c91d624a4acf9e2e173d95a1a0504f677669687556'
genesis_bits = 0x207fffff
target_timespan = 1
checkpoints = {}
| 43.881229
| 119
| 0.596597
|
d85eacb006b6ff34ec06ca166b36599b314cde19
| 987
|
py
|
Python
|
app/core/admin.py
|
krismwas/ingredients
|
33c5718117c0bc2fd65d16fd1fdfbfae69840d62
|
[
"MIT"
] | null | null | null |
app/core/admin.py
|
krismwas/ingredients
|
33c5718117c0bc2fd65d16fd1fdfbfae69840d62
|
[
"MIT"
] | null | null | null |
app/core/admin.py
|
krismwas/ingredients
|
33c5718117c0bc2fd65d16fd1fdfbfae69840d62
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.utils.translation import ugettext as _
from core.models import User, Ingredient, Tag, Recipe
class UserAdmin(BaseUserAdmin):
ordering = ['id']
list_display = ['email', 'name']
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal Info'), {'fields': ('name',)}),
(
_('Permissions'),
{
'fields': (
'is_active',
'is_staff',
'is_superuser',
)
}
),
(_('Important dates'), {'fields': ('last_login',)}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')
}),
)
admin.site.register(User, UserAdmin)
admin.site.register(Tag)
admin.site.register(Ingredient)
admin.site.register(Recipe)
| 24.675
| 64
| 0.524823
|
ff6cdf74e5bcaf7a4e87069779bea66494b9b37d
| 2,316
|
py
|
Python
|
compiler_gym/util/timer.py
|
thecoblack/CompilerGym
|
ade54e2f1829cf41722decb0942a4d6fd3102c2c
|
[
"MIT"
] | null | null | null |
compiler_gym/util/timer.py
|
thecoblack/CompilerGym
|
ade54e2f1829cf41722decb0942a4d6fd3102c2c
|
[
"MIT"
] | null | null | null |
compiler_gym/util/timer.py
|
thecoblack/CompilerGym
|
ade54e2f1829cf41722decb0942a4d6fd3102c2c
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from time import time
from typing import Callable, Optional
from absl.logging import skip_log_prefix
def humanize_duration(seconds: float) -> str:
"""Format a time for humans."""
value = abs(seconds)
sign = "-" if seconds < 0 else ""
if value < 1e-6:
return f"{sign}{value*1e9:.1f}ns"
elif value < 1e-3:
return f"{sign}{value*1e6:.1f}us"
if value < 1:
return f"{sign}{value*1e3:.1f}ms"
elif value < 60:
return f"{sign}{value:.3f}s"
else:
return f"{sign}{value:.1f}s"
def humanize_duration_hms(seconds: float) -> str:
"""Format a time in to :code:`hours:minutes:seconds` format."""
seconds = int(seconds)
return f"{seconds // 3600}:{(seconds % 3600) // 60:02d}:{seconds % 60:02d}"
class Timer:
"""A very simple scoped timer.
Example:
>>> with Timer() as timer:
time.sleep(10)
print(f"That took {timer}")
That took 10.0s
If you're feeling even more terse:
>>> with Timer("Did stuff"):
# do stuff ...
Did stuff in 5.6ms
You can control where the print out should be logged to:
>>> with Timer("Did stuff", logging.getLogger().info)
# do stuff ...
[log] Did stuff in 11us
"""
def __init__(
self, label: Optional[str] = None, print_fn: Callable[[str], None] = print
):
self._start_time = None
self._elapsed = None
self.label = label
self.print_fn = print_fn
def reset(self) -> "Timer":
self._start_time = time()
return self
def __enter__(self) -> "Timer":
return self.reset()
@property
def time(self) -> float:
if self._elapsed:
return self._elapsed
elif self._start_time:
return time() - self._start_time
else:
return 0
@skip_log_prefix
def __exit__(self, *args):
self._elapsed = time() - self._start_time
if self.label:
self.print_fn(f"{self.label} in {self}")
def __str__(self):
return humanize_duration(self.time)
| 26.318182
| 82
| 0.582038
|
4b2441c71feef22adf22f4e5e44d413c9b118abe
| 616
|
py
|
Python
|
src/appstore/views.py
|
revanth-reddy/ns-3-Appstore
|
32ac93acf1c9a721adf9f43c0799d6daff9beb7f
|
[
"MIT"
] | null | null | null |
src/appstore/views.py
|
revanth-reddy/ns-3-Appstore
|
32ac93acf1c9a721adf9f43c0799d6daff9beb7f
|
[
"MIT"
] | null | null | null |
src/appstore/views.py
|
revanth-reddy/ns-3-Appstore
|
32ac93acf1c9a721adf9f43c0799d6daff9beb7f
|
[
"MIT"
] | null | null | null |
from django.views import generic
from django.shortcuts import render
from apps.models import App, Tag
from apps.views import findTags
def homePage(request):
new_releases = App.objects.all().filter(active=True).order_by('-latest_release_date')[:4]
top_tags, not_top_tags = findTags()
context = {
'new_releases':new_releases,
'top_tags':top_tags,
'not_top_tags':not_top_tags,
}
return render(request, 'home.html', context)
class AboutPage(generic.TemplateView):
template_name = "about.html"
def handler404(request):
return render(request, '404.html', status=404)
| 29.333333
| 93
| 0.717532
|
7c20fda064db53476d100b8b6a3bc454a07fca32
| 19,661
|
py
|
Python
|
pyracmon/graph/graph.py
|
sozu/py-pyracmon
|
4a7789ae5432a3197c58ae194de4b8065bbc2846
|
[
"MIT"
] | null | null | null |
pyracmon/graph/graph.py
|
sozu/py-pyracmon
|
4a7789ae5432a3197c58ae194de4b8065bbc2846
|
[
"MIT"
] | null | null | null |
pyracmon/graph/graph.py
|
sozu/py-pyracmon
|
4a7789ae5432a3197c58ae194de4b8065bbc2846
|
[
"MIT"
] | null | null | null |
from typing import *
from .identify import neverPolicy
from .template import GraphTemplate
def new_graph(template: GraphTemplate, *bases: GraphTemplate):
"""
Create a graph from a template.
Use this function instead of invoking constructor directly.
:param template: A template of a graph.
:param bases: Other graphs whose nodes are appended to created graph.
:returns: Created graph.
"""
graph = Graph(template)
for b in bases:
graph += b
return graph
class Graph:
"""
This class represents a graph composed of tree-structured node containers.
The structure is determined by `GraphTemplate`. Use `new_graph` Instead of constructor to create new graph instance.
>>> template = GraphSpac().new_template(
>>> a = (int, lambda x:x),
>>> b = (str, lambda x:x),
>>> c = (str, lambda x:x),
>>> )
>>> template.a << template.b << template.c
>>> graph = new_graph(template)
`append` ( `replace` ) is a method to store entities in the graph with tying them each other according to the structure.
Entites are encapsulated by `Node` which can have an edge to parent node.
>>> graph.append(a=1, b="a", c="x").append(a=2, b="b", c="y")
In `append`, entities are first sorted in descending order, and then:
- Search a node whose entity is *identical* to the first entity from the corresponding node container.
- If found, new node is not created and the *identical* node is set to next parent.
- Otherwise, new node is appended and it is set to next parent.
- Repeat above to following entities. A difference is that *identical* node is searched from the parent set in previous operation.
In example here, the identification is done by entity value itself. Next code is the example where *identical* nodes are found.
>>> graph.append(a=1, b="a", c="z").append(a=2, b="c", c="y")
In the first `append`, ``a`` and ``b`` has its *identical* node and ``a`` is *identical* in the second.
``c`` in the second one is not *identical* to any node because parent node ``b="c"`` is added as new node.
Due to the identification mechanism, repeatin `append` is sufficient to reconstruct entity relationships in the graph.
:param template: Graph template.
:param Dict[str, NodeContainer] containers: Node containers mapped by their names.
"""
def __init__(self, template: GraphTemplate):
self.template = template
self.containers = {p.name:self._to_container(p) for p in template._properties}
self._view = None
def _to_container(self, prop):
if isinstance(prop.kind, GraphTemplate):
return GraphNodeContainer(prop)
else:
return NodeContainer(prop)
def _container_of(self, prop):
cands = [c for c in self.containers.values() if c.prop.is_compatible(prop)]
if len(cands) > 1:
raise ValueError(f"Container can't be determined from property '{prop.name}'.")
return cands[0] if cands else None
def __add__(self, another):
"""
Create new graph by adding this graph and another graph.
New graph has the same template as this graph'S.
On the other hand, because this method depends on `__iadd__()`, another graph must not have the same template.
Parameters
----------
another: Graph | GraphView
Graph or its view.
Returns
-------
Graph
Created graph.
"""
graph = Graph(self.template)
graph += self
graph += another
return graph
def __iadd__(self, another):
"""
Append nodes from another graph.
Templates of this graph and another graph must not be the same.
Nodes of another graph are traversed from its root and appended to compatible containers each other.
Parameters
----------
another: Graph | GraphView
Graph or its view.
Returns
-------
Graph
This graph.
"""
another = another if isinstance(another, Graph) else another()
roots_ = filter(lambda c: c.prop.parent is None, another.containers.values())
def add(n, anc):
c = self._container_of(n.prop)
if c:
c.append(n.entity, anc)
for ch_ in n.children.values():
for m in ch_.nodes:
add(m, anc.copy())
for c_ in roots_:
for n_ in c_.nodes:
add(n_, {})
return self
@property
def view(self) -> 'GraphView':
"""
Returns an unmodifiable view of this graph.
The view object works as the accessor to graph components.
- Returns a graph instance when invoked as callable object.
- The attribute of a container name returns the container view.
- In iteration context, it iterates views of root containers.
- Root container is the container which has no parent.
>>> template = GraphSpac().new_template(a=int, b=str, c=str)
>>> template.a << template.b
>>> graph = new_graph(template)
>>> view = graph.view
>>> assert view() is graph # invocation
>>> assert view.a is graph.containers["a"].view # attribute
>>> assert [c().name for c in view] == ["a", "c"] # iteration
:getter: The view of this graph.
"""
if self._view is None:
graph = self
class GraphView:
def __call__(self):
"""Returns the greph of this view."""
return graph
def __iter__(self):
"""Iterates views of root containers."""
return map(lambda c: (c.name, c.view), filter(lambda c: c.prop.parent is None, graph.containers.values()))
def __getattr__(self, name):
"""Returns a view of a container of the name."""
return graph.containers[name].view
self._view = GraphView()
return self._view
def _append(self, to_replace, entities):
props = [p for p in self.template if p.name in entities]
filtered = set()
for p in props:
if (p.parent is None) or (p.parent.name not in entities) or (p.parent.name in filtered):
if p.entity_filter is None or p.entity_filter(entities[p.name]):
filtered.add(p.name)
ancestors = {}
for k in [p.name for p in props if p.name in filtered]:
self.containers[k].append(entities[k], ancestors, to_replace)
return self
def append(self, **entities: Any) -> 'Graph':
"""
Append entities with associated property names.
:param entities: Entities keyed with associated property names.
:returns: This graph.
"""
return self._append(False, entities)
def replace(self, **entities: Any) -> 'Graph':
"""
Works similarly to `append` , but entities of identical nodes are replaced with given entities.
:param entities: Entities keyed with associated property names.
:returns: This graph.
"""
return self._append(True, entities)
class _EmptyNodeView:
"""
:meta private:
"""
def __init__(self, prop, result):
self.prop = prop
self.result = result
def __call__(self, alt=None):
return self.result
def __iter__(self):
return iter([])
def __getattr__(self, key):
child = next(filter(lambda c: c.name == key, self.prop.children), None)
if child:
return _EmptyContainerView(child)
else:
raise KeyError(f"Graph property '{self.prop.name}' does not have a child property '{key}'.")
class _EmptyContainerView:
"""
:meta private:
"""
def __init__(self, prop):
self.prop = prop
def __bool__(self):
return False
def __call__(self):
return []
def __iter__(self):
return iter([])
def __len__(self):
return 0
def __getitem__(self, index):
if isinstance(index, slice):
return []
else:
raise IndexError(f"Index for container '{self.prop.name}' is out of range.")
def __getattr__(self, key):
child = next(filter(lambda c: c.name == key, self.prop.children), None)
if child:
return _EmptyContainerView(child)
else:
raise KeyError(f"Graph property '{self.prop.name}' does not have a child property '{key}'.")
class NodeContainer:
"""
This class represents a node container of a template property.
:param prop: Template property.
"""
def __init__(self, prop: GraphTemplate.Property):
self.nodes = []
self.keys = {}
self.prop = prop
self._view = None
@property
def name(self) -> str:
"""
Returns the container name, which is same as the name of template property.
:getter: Container name.
"""
return self.prop.name
@property
def view(self) -> 'ContainerView':
"""
Returns an unmodifiable view of this container.
The view object works as the accessor to container components.
- Returns a container instance when invoked as callable object.
- The attribute of a child name returns the child container view of the first node in this container.
- Index access returns the view of node at the index.
- In iteration context, it iterates views of nodes.
- The number of nodes is returned by being applied to `len` .
>>> template = GraphSpac().new_template(a=int, b=str, c=str)
>>> template.a << template.b
>>> graph = new_graph(template).append(a=1, b="a").append(a=1, b="b").append(a=2, b="c")
>>> container = graph.containers["a"]
>>> view = graph.view.a
>>> assert view() is container # invocation
>>> assert view.b is container.nodes[0].children["b"].view # attribute
>>> assert view[1] is container.nodes[1].view # index
>>> assert [n() for n in view] == [1, 2] # iteration
>>> assert len(view) == 2 # length
:getter: The view of this container.
"""
if self._view is None:
container = self
class ContainerView:
def __bool__(self):
"""Returns whether this container is not empty."""
return len(container.nodes) != 0
def __call__(self):
"""Returns a base container."""
return container
def __len__(self):
"""Returns the number of nodes."""
return len(container.nodes)
def __iter__(self):
"""Iterates views of nodes."""
return map(lambda n: n.view, container.nodes)
def __getitem__(self, index):
"""Returns a view of a node at the index."""
if isinstance(index, slice):
return [n.view for n in container.nodes[index]]
else:
return container.nodes[index].view
def __getattr__(self, key):
"""Returns a view of the first node or empty container view if it does not exist."""
child = next(filter(lambda c: c.name == key, container.prop.children), None)
if child:
return container.nodes[0].children[key].view if len(container.nodes) > 0 else _EmptyContainerView(child)
else:
raise KeyError(f"Graph property '{container.prop.name}' does not have a child property '{key}'.")
self._view = ContainerView()
return self._view
def append(self, entity: Any, ancestors: Dict[str, List['Node']], to_replace: bool = False):
"""
Add an entity to this container.
Identical node is searched by examining whether this container already contains a node of the identical entity
and its parent is found in `anscestors` .
:param entity: An entity to be stored in the node.
:param ancestors: Parent nodes mapped by property names.
:param to_replace: If ``True`` , the entity of identical node is replaced. Otherwise, it is not changed.
"""
def get_nodes(k):
return [self.nodes[i] for i in self.keys.get(k, [])]
policy = self.prop.policy or neverPolicy()
key = policy.get_identifier(entity)
parents, identicals = policy.identify(self.prop, [self.nodes[i] for i in self.keys.get(key, [])], ancestors)
new_nodes = identicals.copy()
for p in parents:
index = len(self.nodes)
node = Node(self.prop, entity, key, index)
self.nodes.append(node)
if key is not None:
self.keys.setdefault(key, []).append(index)
new_nodes.append(node)
if p is not None:
p.add_child(node)
if to_replace:
for n in identicals:
n.entity = entity
ancestors[self.prop.name] = new_nodes
class GraphNodeContainer(NodeContainer):
"""
:meta private:
"""
def append(self, entity, ancestors, to_replace):
if not isinstance(entity, (dict, Graph)):
raise ValueError(f"Node of graph only accepts dict or Graph object.")
policy = self.prop.policy or neverPolicy()
parents, _ = policy.identify(self.prop, [], ancestors)
for p in parents:
index = len(self.nodes)
graphs = []
if p is None or len(p.children[self.name].nodes) == 0:
g = Graph(self.prop.kind)
node = GraphNode(self.prop, g, None, index)
self.nodes.append(node)
if p is not None:
p.add_child(node)
graphs.append(g)
else:
graphs.extend([n.entity for n in p.children[self.name].nodes])
for g in graphs:
if isinstance(entity, dict):
g.append(**entity)
else:
g += entity
class Node:
"""
This class represents a node which contains an entity.
:param prop: Template property.
:param entity: Entity.
"""
class Children:
def __init__(self, prop: GraphTemplate.Property):
self.nodes = []
self.keys = set()
self.prop = prop
self._view = None
@property
def view(self):
if self._view is None:
base = self
class ChildrenView:
def __bool__(self):
"""Returns whether this container is not empty."""
return len(base.nodes) != 0
def __call__(self):
"""Returns children container."""
return base
def __iter__(self):
"""Iterates views of child nodes."""
return map(lambda n: n.view, base.nodes)
def __len__(self):
"""Returns the number of child nodes."""
return len(base.nodes)
def __getitem__(self, index):
"""Returns a view of child node at the index."""
if isinstance(index, slice):
return [n.view for n in base.nodes[index]]
else:
return base.nodes[index].view
def __getattr__(self, key):
"""Returns a view of the first node or empty container view if it does not exist."""
child = next(filter(lambda c: c.name == key, base.prop.children), None)
if child:
return base.nodes[0].children[key].view if len(base.nodes) > 0 else _EmptyContainerView(child)
else:
raise KeyError(f"Graph property '{base.prop.name}' does not have a child property '{key}'.")
self._view = ChildrenView()
return self._view
def has(self, node):
return node in self.keys
def append(self, node):
if node not in self.keys:
self.keys.add(node)
self.nodes.append(node)
def __init__(self, prop: GraphTemplate.Property, entity: Any, key: Optional[Any], index: int):
self.prop = prop
self.entity = entity
self.key = key
self.parents = set()
self.children = {c.name: Node.Children(c) for c in prop.children}
self._index = index
self._view = None
@property
def name(self) -> str:
"""
Returns the container name, which is same as the name of template property.
:getter: Container name.
"""
return self.prop.name
@property
def view(self) -> 'NodeView':
"""
Returns an unmodifiable view of this node.
The view object works as the accessor to container components.
- Returns a node instance when invoked as callable object.
- The attribute of a child name returns the child container view.
- In iteration context, it iterates pairs of child conainter name and its view.
:returns: The view of this node.
"""
if self._view is None:
node = self
class NodeView:
def __call__(self, alt=None):
"""Returns an entity of this node."""
return node.entity
def __getattr__(self, name):
"""Returns a view of child nodes by its name."""
return node.children[name].view
def __iter__(self):
"""Iterate key-value pairs of child nodes."""
return map(lambda nc: (nc[0], nc[1].view), node.children.items())
self._view = NodeView()
return self._view
def add_child(self, child: 'Node') -> 'Node':
"""
Adds a child node.
:param child: Child node.
:returns: This instance.
"""
if child.prop.template != self.prop.template:
raise ValueError(f"Nodes from difference graph template can't be associated.")
self.children[child.prop.name].append(child)
child.parents.add(self)
return self
def has_child(self, child: 'Node') -> bool:
"""
Checks this node contains the node identical to given node.
:param child: Node to search.
:returns: ``True`` if exists.
"""
if child.prop.template != self.prop.template:
return False
elif child.prop.name in self.children:
return child in self.children[child.prop.name].keys
else:
return False
class GraphNode(Node):
"""
:meta private:
"""
@property
def view(self):
return self.entity.view
def add_child(self, child):
raise TypeError(f"GraphNode does not have child.")
def has_child(self, child):
return False
| 35.108929
| 134
| 0.560602
|
abf145709e7fb615ea54f862b9b790f0474bbd58
| 1,176
|
py
|
Python
|
migrations/versions/e42c76e8aa8e_local_path_and_yd_path_removed_from_db.py
|
ShiawasenaHoshi/telegram_photo_saver_bot
|
7b9b20ed89268da9c64f94958bdf935c18cc85f8
|
[
"MIT"
] | null | null | null |
migrations/versions/e42c76e8aa8e_local_path_and_yd_path_removed_from_db.py
|
ShiawasenaHoshi/telegram_photo_saver_bot
|
7b9b20ed89268da9c64f94958bdf935c18cc85f8
|
[
"MIT"
] | 1
|
2021-06-02T02:33:29.000Z
|
2021-06-02T02:33:29.000Z
|
migrations/versions/e42c76e8aa8e_local_path_and_yd_path_removed_from_db.py
|
ShiawasenaHoshi/telegram_photo_saver_bot
|
7b9b20ed89268da9c64f94958bdf935c18cc85f8
|
[
"MIT"
] | null | null | null |
"""local_path and yd_path removed from db
Revision ID: e42c76e8aa8e
Revises: a336639792e2
Create Date: 2019-11-15 18:27:45.236183
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e42c76e8aa8e'
down_revision = 'a336639792e2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
# op.drop_column('chat', 'local_folder')
with op.batch_alter_table("chat") as batch_op:
batch_op.drop_column('local_folder')
# op.add_column('chat', sa.Column('local_folder', sa.Boolean(), default=1))
# op.drop_column('chat', 'yd_folder')
with op.batch_alter_table("chat") as batch_op:
batch_op.drop_column('yd_folder')
# op.add_column('chat', sa.Column('yd_folder', sa.Boolean(), default=1))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('chat', sa.Column('yd_folder', sa.VARCHAR(length=240), nullable=False))
op.add_column('chat', sa.Column('local_folder', sa.VARCHAR(length=240), nullable=False))
# ### end Alembic commands ###
| 31.783784
| 92
| 0.693027
|
b0bbf51fabf126f2ff5b4d5d947c9b52cdc725c2
| 549
|
py
|
Python
|
namingalgorithm/urls.py
|
Amrithasuresh/BPPRC
|
6ee01914a612d65f7084db7ce377da3bab682e66
|
[
"BSD-3-Clause"
] | 2
|
2020-01-10T18:36:37.000Z
|
2020-01-10T18:42:41.000Z
|
namingalgorithm/urls.py
|
Amrithasuresh/BPPRC
|
6ee01914a612d65f7084db7ce377da3bab682e66
|
[
"BSD-3-Clause"
] | 12
|
2020-06-05T23:39:18.000Z
|
2022-03-12T00:48:18.000Z
|
namingalgorithm/urls.py
|
Amrithasuresh/BPPRC_v1
|
6ee01914a612d65f7084db7ce377da3bab682e66
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
from namingalgorithm import views
urlpatterns = [
path('submit_home/', views.submit_home, name='submit_home'),
path('submit/', views.submit, name='submit'),
path('naming_algorithm/', views.naming_algorithm, name='naming_algorithm'),
path('run_naming_algorithm/', views.run_naming_algorithm,
name='run_naming_algorithm'),
path('run_align/', views.run_align, name='run_align'),
path('align_results/', views.align_results, name='align_results')
]
| 39.214286
| 79
| 0.734062
|
4716ea102ac70ac4793728bb7ff886169c472113
| 49,485
|
py
|
Python
|
src/marshmallow/schema.py
|
theirix/marshmallow
|
43db1072f94ae6af40cbedc058f1c02f3cedb379
|
[
"MIT"
] | null | null | null |
src/marshmallow/schema.py
|
theirix/marshmallow
|
43db1072f94ae6af40cbedc058f1c02f3cedb379
|
[
"MIT"
] | null | null | null |
src/marshmallow/schema.py
|
theirix/marshmallow
|
43db1072f94ae6af40cbedc058f1c02f3cedb379
|
[
"MIT"
] | null | null | null |
"""The :class:`Schema` class, including its metaclass and options (class Meta)."""
from collections import defaultdict, OrderedDict
from collections.abc import Mapping
from functools import lru_cache
import datetime as dt
import uuid
import decimal
import copy
import inspect
import json
import typing
import warnings
from marshmallow import base, fields as ma_fields, class_registry, types
from marshmallow.error_store import ErrorStore
from marshmallow.exceptions import ValidationError, StringNotCollectionError
from marshmallow.orderedset import OrderedSet
from marshmallow.decorators import (
POST_DUMP,
POST_LOAD,
PRE_DUMP,
PRE_LOAD,
VALIDATES,
VALIDATES_SCHEMA,
)
from marshmallow.utils import (
RAISE,
EXCLUDE,
INCLUDE,
missing,
set_value,
get_value,
is_collection,
is_instance_or_subclass,
is_iterable_but_not_string,
)
from marshmallow.warnings import RemovedInMarshmallow4Warning
_T = typing.TypeVar("_T")
def _get_fields(attrs, field_class, pop=False, ordered=False):
"""Get fields from a class. If ordered=True, fields will sorted by creation index.
:param attrs: Mapping of class attributes
:param type field_class: Base field class
:param bool pop: Remove matching fields
"""
fields = [
(field_name, field_value)
for field_name, field_value in attrs.items()
if is_instance_or_subclass(field_value, field_class)
]
if pop:
for field_name, _ in fields:
del attrs[field_name]
if ordered:
fields.sort(key=lambda pair: pair[1]._creation_index)
return fields
# This function allows Schemas to inherit from non-Schema classes and ensures
# inheritance according to the MRO
def _get_fields_by_mro(klass, field_class, ordered=False):
"""Collect fields from a class, following its method resolution order. The
class itself is excluded from the search; only its parents are checked. Get
fields from ``_declared_fields`` if available, else use ``__dict__``.
:param type klass: Class whose fields to retrieve
:param type field_class: Base field class
"""
mro = inspect.getmro(klass)
# Loop over mro in reverse to maintain correct order of fields
return sum(
(
_get_fields(
getattr(base, "_declared_fields", base.__dict__),
field_class,
ordered=ordered,
)
for base in mro[:0:-1]
),
[],
)
class SchemaMeta(type):
"""Metaclass for the Schema class. Binds the declared fields to
a ``_declared_fields`` attribute, which is a dictionary mapping attribute
names to field objects. Also sets the ``opts`` class attribute, which is
the Schema class's ``class Meta`` options.
"""
def __new__(mcs, name, bases, attrs):
meta = attrs.get("Meta")
ordered = getattr(meta, "ordered", False)
if not ordered:
# Inherit 'ordered' option
# Warning: We loop through bases instead of MRO because we don't
# yet have access to the class object
# (i.e. can't call super before we have fields)
for base_ in bases:
if hasattr(base_, "Meta") and hasattr(base_.Meta, "ordered"):
ordered = base_.Meta.ordered
break
else:
ordered = False
cls_fields = _get_fields(attrs, base.FieldABC, pop=True, ordered=ordered)
klass = super().__new__(mcs, name, bases, attrs)
inherited_fields = _get_fields_by_mro(klass, base.FieldABC, ordered=ordered)
meta = klass.Meta
# Set klass.opts in __new__ rather than __init__ so that it is accessible in
# get_declared_fields
klass.opts = klass.OPTIONS_CLASS(meta, ordered=ordered)
# Add fields specified in the `include` class Meta option
cls_fields += list(klass.opts.include.items())
dict_cls = OrderedDict if ordered else dict
# Assign _declared_fields on class
klass._declared_fields = mcs.get_declared_fields(
klass=klass,
cls_fields=cls_fields,
inherited_fields=inherited_fields,
dict_cls=dict_cls,
)
return klass
@classmethod
def get_declared_fields(
mcs,
klass: type,
cls_fields: typing.List,
inherited_fields: typing.List,
dict_cls: type,
):
"""Returns a dictionary of field_name => `Field` pairs declared on the class.
This is exposed mainly so that plugins can add additional fields, e.g. fields
computed from class Meta options.
:param klass: The class object.
:param cls_fields: The fields declared on the class, including those added
by the ``include`` class Meta option.
:param inherited_fields: Inherited fields.
:param dict_class: Either `dict` or `OrderedDict`, depending on the whether
the user specified `ordered=True`.
"""
return dict_cls(inherited_fields + cls_fields)
def __init__(cls, name, bases, attrs):
super().__init__(name, bases, attrs)
if name and cls.opts.register:
class_registry.register(name, cls)
cls._hooks = cls.resolve_hooks()
def resolve_hooks(cls) -> typing.Dict[types.Tag, typing.List[str]]:
"""Add in the decorated processors
By doing this after constructing the class, we let standard inheritance
do all the hard work.
"""
mro = inspect.getmro(cls)
hooks = defaultdict(list) # type: typing.Dict[types.Tag, typing.List[str]]
for attr_name in dir(cls):
# Need to look up the actual descriptor, not whatever might be
# bound to the class. This needs to come from the __dict__ of the
# declaring class.
for parent in mro:
try:
attr = parent.__dict__[attr_name]
except KeyError:
continue
else:
break
else:
# In case we didn't find the attribute and didn't break above.
# We should never hit this - it's just here for completeness
# to exclude the possibility of attr being undefined.
continue
try:
hook_config = attr.__marshmallow_hook__
except AttributeError:
pass
else:
for key in hook_config.keys():
# Use name here so we can get the bound method later, in
# case the processor was a descriptor or something.
hooks[key].append(attr_name)
return hooks
class SchemaOpts:
"""class Meta options for the :class:`Schema`. Defines defaults."""
def __init__(self, meta, ordered: bool = False):
self.fields = getattr(meta, "fields", ())
if not isinstance(self.fields, (list, tuple)):
raise ValueError("`fields` option must be a list or tuple.")
self.additional = getattr(meta, "additional", ())
if not isinstance(self.additional, (list, tuple)):
raise ValueError("`additional` option must be a list or tuple.")
if self.fields and self.additional:
raise ValueError(
"Cannot set both `fields` and `additional` options"
" for the same Schema."
)
self.exclude = getattr(meta, "exclude", ())
if not isinstance(self.exclude, (list, tuple)):
raise ValueError("`exclude` must be a list or tuple.")
self.dateformat = getattr(meta, "dateformat", None)
self.datetimeformat = getattr(meta, "datetimeformat", None)
if hasattr(meta, "json_module"):
warnings.warn(
"The json_module class Meta option is deprecated. Use render_module instead.",
RemovedInMarshmallow4Warning,
)
render_module = getattr(meta, "json_module", json)
else:
render_module = json
self.render_module = getattr(meta, "render_module", render_module)
self.ordered = getattr(meta, "ordered", ordered)
self.index_errors = getattr(meta, "index_errors", True)
self.include = getattr(meta, "include", {})
self.load_only = getattr(meta, "load_only", ())
self.dump_only = getattr(meta, "dump_only", ())
self.unknown = getattr(meta, "unknown", RAISE)
self.register = getattr(meta, "register", True)
class Schema(base.SchemaABC, metaclass=SchemaMeta):
"""Base schema class with which to define custom schemas.
Example usage:
.. code-block:: python
import datetime as dt
from dataclasses import dataclass
from marshmallow import Schema, fields
@dataclass
class Album:
title: str
release_date: dt.date
class AlbumSchema(Schema):
title = fields.Str()
release_date = fields.Date()
album = Album("Beggars Banquet", dt.date(1968, 12, 6))
schema = AlbumSchema()
data = schema.dump(album)
data # {'release_date': '1968-12-06', 'title': 'Beggars Banquet'}
:param only: Whitelist of the declared fields to select when
instantiating the Schema. If None, all fields are used. Nested fields
can be represented with dot delimiters.
:param exclude: Blacklist of the declared fields to exclude
when instantiating the Schema. If a field appears in both `only` and
`exclude`, it is not used. Nested fields can be represented with dot
delimiters.
:param many: Should be set to `True` if ``obj`` is a collection
so that the object will be serialized to a list.
:param context: Optional context passed to :class:`fields.Method` and
:class:`fields.Function` fields.
:param load_only: Fields to skip during serialization (write-only fields)
:param dump_only: Fields to skip during deserialization (read-only fields)
:param partial: Whether to ignore missing fields and not require
any fields declared. Propagates down to ``Nested`` fields as well. If
its value is an iterable, only missing fields listed in that iterable
will be ignored. Use dot delimiters to specify nested fields.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
.. versionchanged:: 3.0.0
`prefix` parameter removed.
.. versionchanged:: 2.0.0
`__validators__`, `__preprocessors__`, and `__data_handlers__` are removed in favor of
`marshmallow.decorators.validates_schema`,
`marshmallow.decorators.pre_load` and `marshmallow.decorators.post_dump`.
`__accessor__` and `__error_handler__` are deprecated. Implement the
`handle_error` and `get_attribute` methods instead.
"""
TYPE_MAPPING = {
str: ma_fields.String,
bytes: ma_fields.String,
dt.datetime: ma_fields.DateTime,
float: ma_fields.Float,
bool: ma_fields.Boolean,
tuple: ma_fields.Raw,
list: ma_fields.Raw,
set: ma_fields.Raw,
int: ma_fields.Integer,
uuid.UUID: ma_fields.UUID,
dt.time: ma_fields.Time,
dt.date: ma_fields.Date,
dt.timedelta: ma_fields.TimeDelta,
decimal.Decimal: ma_fields.Decimal,
} # type: typing.Dict[type, typing.Type[ma_fields.Field]]
#: Overrides for default schema-level error messages
error_messages = {} # type: typing.Dict[str, str]
_default_error_messages = {
"type": "Invalid input type.",
"unknown": "Unknown field.",
} # type: typing.Dict[str, str]
OPTIONS_CLASS = SchemaOpts # type: type
# These get set by SchemaMeta
opts = None # type: SchemaOpts
_declared_fields = {} # type: typing.Dict[str, ma_fields.Field]
_hooks = {} # type: typing.Dict[types.Tag, typing.List[str]]
class Meta:
"""Options object for a Schema.
Example usage: ::
class Meta:
fields = ("id", "email", "date_created")
exclude = ("password", "secret_attribute")
Available options:
- ``fields``: Tuple or list of fields to include in the serialized result.
- ``additional``: Tuple or list of fields to include *in addition* to the
explicitly declared fields. ``additional`` and ``fields`` are
mutually-exclusive options.
- ``include``: Dictionary of additional fields to include in the schema. It is
usually better to define fields as class variables, but you may need to
use this option, e.g., if your fields are Python keywords. May be an
`OrderedDict`.
- ``exclude``: Tuple or list of fields to exclude in the serialized result.
Nested fields can be represented with dot delimiters.
- ``dateformat``: Default format for `Date <fields.Date>` fields.
- ``datetimeformat``: Default format for `DateTime <fields.DateTime>` fields.
- ``render_module``: Module to use for `loads <Schema.loads>` and `dumps <Schema.dumps>`.
Defaults to `json` from the standard library.
- ``ordered``: If `True`, order serialization output according to the
order in which fields were declared. Output of `Schema.dump` will be a
`collections.OrderedDict`.
- ``index_errors``: If `True`, errors dictionaries will include the index
of invalid items in a collection.
- ``load_only``: Tuple or list of fields to exclude from serialized results.
- ``dump_only``: Tuple or list of fields to exclude from deserialization
- ``unknown``: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
- ``register``: Whether to register the `Schema` with marshmallow's internal
class registry. Must be `True` if you intend to refer to this `Schema`
by class name in `Nested` fields. Only set this to `False` when memory
usage is critical. Defaults to `True`.
"""
def __init__(
self,
*,
only: typing.Optional[types.StrSequenceOrSet] = None,
exclude: types.StrSequenceOrSet = (),
many: bool = False,
context: typing.Optional[typing.Dict] = None,
load_only: types.StrSequenceOrSet = (),
dump_only: types.StrSequenceOrSet = (),
partial: typing.Union[bool, types.StrSequenceOrSet] = False,
unknown: typing.Optional[str] = None
):
# Raise error if only or exclude is passed as string, not list of strings
if only is not None and not is_collection(only):
raise StringNotCollectionError('"only" should be a list of strings')
if not is_collection(exclude):
raise StringNotCollectionError('"exclude" should be a list of strings')
# copy declared fields from metaclass
self.declared_fields = copy.deepcopy(self._declared_fields)
self.many = many
self.only = only
self.exclude = set(self.opts.exclude) | set(exclude)
self.ordered = self.opts.ordered
self.load_only = set(load_only) or set(self.opts.load_only)
self.dump_only = set(dump_only) or set(self.opts.dump_only)
self.partial = partial
self.unknown = unknown or self.opts.unknown
self.context = context or {}
self._normalize_nested_options()
#: Dictionary mapping field_names -> :class:`Field` objects
self.fields = {} # type: typing.Dict[str, ma_fields.Field]
self.load_fields = {} # type: typing.Dict[str, ma_fields.Field]
self.dump_fields = {} # type: typing.Dict[str, ma_fields.Field]
self._init_fields()
messages = {}
messages.update(self._default_error_messages)
for cls in reversed(self.__class__.__mro__):
messages.update(getattr(cls, "error_messages", {}))
messages.update(self.error_messages or {})
self.error_messages = messages
def __repr__(self) -> str:
return "<{ClassName}(many={self.many})>".format(
ClassName=self.__class__.__name__, self=self
)
@property
def dict_class(self) -> type:
return OrderedDict if self.ordered else dict
@property
def set_class(self) -> type:
return OrderedSet if self.ordered else set
@classmethod
def from_dict(
cls,
fields: typing.Dict[str, typing.Union[ma_fields.Field, type]],
*,
name: str = "GeneratedSchema"
) -> type:
"""Generate a `Schema` class given a dictionary of fields.
.. code-block:: python
from marshmallow import Schema, fields
PersonSchema = Schema.from_dict({"name": fields.Str()})
print(PersonSchema().load({"name": "David"})) # => {'name': 'David'}
Generated schemas are not added to the class registry and therefore cannot
be referred to by name in `Nested` fields.
:param dict fields: Dictionary mapping field names to field instances.
:param str name: Optional name for the class, which will appear in
the ``repr`` for the class.
.. versionadded:: 3.0.0
"""
attrs = fields.copy()
attrs["Meta"] = type(
"GeneratedMeta", (getattr(cls, "Meta", object),), {"register": False}
)
schema_cls = type(name, (cls,), attrs)
return schema_cls
##### Override-able methods #####
def handle_error(
self, error: ValidationError, data: typing.Any, *, many: bool, **kwargs
):
"""Custom error handler function for the schema.
:param error: The `ValidationError` raised during (de)serialization.
:param data: The original input data.
:param many: Value of ``many`` on dump or load.
:param partial: Value of ``partial`` on load.
.. versionadded:: 2.0.0
.. versionchanged:: 3.0.0rc9
Receives `many` and `partial` (on deserialization) as keyword arguments.
"""
pass
def get_attribute(self, obj: typing.Any, attr: str, default: typing.Any):
"""Defines how to pull values from an object to serialize.
.. versionadded:: 2.0.0
.. versionchanged:: 3.0.0a1
Changed position of ``obj`` and ``attr``.
"""
return get_value(obj, attr, default)
##### Serialization/Deserialization API #####
@staticmethod
def _call_and_store(getter_func, data, *, field_name, error_store, index=None):
"""Call ``getter_func`` with ``data`` as its argument, and store any `ValidationErrors`.
:param callable getter_func: Function for getting the serialized/deserialized
value from ``data``.
:param data: The data passed to ``getter_func``.
:param str field_name: Field name.
:param int index: Index of the item being validated, if validating a collection,
otherwise `None`.
"""
try:
value = getter_func(data)
except ValidationError as error:
error_store.store_error(error.messages, field_name, index=index)
# When a Nested field fails validation, the marshalled data is stored
# on the ValidationError's valid_data attribute
return error.valid_data or missing
return value
def _serialize(
self, obj: typing.Union[_T, typing.Iterable[_T]], *, many: bool = False
):
"""Serialize ``obj``.
:param obj: The object(s) to serialize.
:param bool many: `True` if ``data`` should be serialized as a collection.
:return: A dictionary of the serialized data
.. versionchanged:: 1.0.0
Renamed from ``marshal``.
"""
if many and obj is not None:
return [
self._serialize(d, many=False)
for d in typing.cast(typing.Iterable[_T], obj)
]
ret = self.dict_class()
for attr_name, field_obj in self.dump_fields.items():
value = field_obj.serialize(attr_name, obj, accessor=self.get_attribute)
if value is missing:
continue
key = field_obj.data_key if field_obj.data_key is not None else attr_name
ret[key] = value
return ret
def dump(self, obj: typing.Any, *, many: typing.Optional[bool] = None):
"""Serialize an object to native Python data types according to this
Schema's fields.
:param obj: The object to serialize.
:param many: Whether to serialize `obj` as a collection. If `None`, the value
for `self.many` is used.
:return: A dict of serialized data
:rtype: dict
.. versionadded:: 1.0.0
.. versionchanged:: 3.0.0b7
This method returns the serialized data rather than a ``(data, errors)`` duple.
A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised
if ``obj`` is invalid.
.. versionchanged:: 3.0.0rc9
Validation no longer occurs upon serialization.
"""
many = self.many if many is None else bool(many)
if many and is_iterable_but_not_string(obj):
obj = list(obj)
if self._has_processors(PRE_DUMP):
processed_obj = self._invoke_dump_processors(
PRE_DUMP, obj, many=many, original_data=obj
)
else:
processed_obj = obj
result = self._serialize(processed_obj, many=many)
if self._has_processors(POST_DUMP):
result = self._invoke_dump_processors(
POST_DUMP, result, many=many, original_data=obj
)
return result
def dumps(
self, obj: typing.Any, *args, many: typing.Optional[bool] = None, **kwargs
):
"""Same as :meth:`dump`, except return a JSON-encoded string.
:param obj: The object to serialize.
:param many: Whether to serialize `obj` as a collection. If `None`, the value
for `self.many` is used.
:return: A ``json`` string
:rtype: str
.. versionadded:: 1.0.0
.. versionchanged:: 3.0.0b7
This method returns the serialized data rather than a ``(data, errors)`` duple.
A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised
if ``obj`` is invalid.
"""
serialized = self.dump(obj, many=many)
return self.opts.render_module.dumps(serialized, *args, **kwargs)
def _deserialize(
self,
data: typing.Union[
typing.Mapping[str, typing.Any],
typing.Iterable[typing.Mapping[str, typing.Any]],
],
*,
error_store: ErrorStore,
many: bool = False,
partial=False,
unknown=RAISE,
index=None
) -> typing.Union[_T, typing.List[_T]]:
"""Deserialize ``data``.
:param dict data: The data to deserialize.
:param ErrorStore error_store: Structure to store errors.
:param bool many: `True` if ``data`` should be deserialized as a collection.
:param bool|tuple partial: Whether to ignore missing fields and not require
any fields declared. Propagates down to ``Nested`` fields as well. If
its value is an iterable, only missing fields listed in that iterable
will be ignored. Use dot delimiters to specify nested fields.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
:param int index: Index of the item being serialized (for storing errors) if
serializing a collection, otherwise `None`.
:return: A dictionary of the deserialized data.
"""
index_errors = self.opts.index_errors
index = index if index_errors else None
if many:
if not is_collection(data):
error_store.store_error([self.error_messages["type"]], index=index)
ret = [] # type: typing.List[_T]
else:
ret = [
typing.cast(
_T,
self._deserialize(
typing.cast(typing.Mapping[str, typing.Any], d),
error_store=error_store,
many=False,
partial=partial,
unknown=unknown,
index=idx,
),
)
for idx, d in enumerate(data)
]
return ret
ret = self.dict_class()
# Check data is a dict
if not isinstance(data, Mapping):
error_store.store_error([self.error_messages["type"]], index=index)
else:
partial_is_collection = is_collection(partial)
for attr_name, field_obj in self.load_fields.items():
field_name = (
field_obj.data_key if field_obj.data_key is not None else attr_name
)
raw_value = data.get(field_name, missing)
if raw_value is missing:
# Ignore missing field if we're allowed to.
if partial is True or (
partial_is_collection and attr_name in partial
):
continue
d_kwargs = {}
# Allow partial loading of nested schemas.
if partial_is_collection:
prefix = field_name + "."
len_prefix = len(prefix)
sub_partial = [
f[len_prefix:] for f in partial if f.startswith(prefix)
]
d_kwargs["partial"] = sub_partial
else:
d_kwargs["partial"] = partial
getter = lambda val: field_obj.deserialize(
val, field_name, data, **d_kwargs
)
value = self._call_and_store(
getter_func=getter,
data=raw_value,
field_name=field_name,
error_store=error_store,
index=index,
)
if value is not missing:
key = field_obj.attribute or attr_name
set_value(typing.cast(typing.Dict, ret), key, value)
if unknown != EXCLUDE:
fields = {
field_obj.data_key if field_obj.data_key is not None else field_name
for field_name, field_obj in self.load_fields.items()
}
for key in set(data) - fields:
value = data[key]
if unknown == INCLUDE:
set_value(typing.cast(typing.Dict, ret), key, value)
elif unknown == RAISE:
error_store.store_error(
[self.error_messages["unknown"]],
key,
(index if index_errors else None),
)
return ret
def load(
self,
data: typing.Union[
typing.Mapping[str, typing.Any],
typing.Iterable[typing.Mapping[str, typing.Any]],
],
*,
many: typing.Optional[bool] = None,
partial: typing.Optional[typing.Union[bool, types.StrSequenceOrSet]] = None,
unknown: typing.Optional[str] = None
):
"""Deserialize a data structure to an object defined by this Schema's fields.
:param data: The data to deserialize.
:param many: Whether to deserialize `data` as a collection. If `None`, the
value for `self.many` is used.
:param partial: Whether to ignore missing fields and not require
any fields declared. Propagates down to ``Nested`` fields as well. If
its value is an iterable, only missing fields listed in that iterable
will be ignored. Use dot delimiters to specify nested fields.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
If `None`, the value for `self.unknown` is used.
:return: Deserialized data
.. versionadded:: 1.0.0
.. versionchanged:: 3.0.0b7
This method returns the deserialized data rather than a ``(data, errors)`` duple.
A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised
if invalid data are passed.
"""
return self._do_load(
data, many=many, partial=partial, unknown=unknown, postprocess=True
)
def loads(
self,
json_data: str,
*,
many: typing.Optional[bool] = None,
partial: typing.Optional[typing.Union[bool, types.StrSequenceOrSet]] = None,
unknown: typing.Optional[str] = None,
**kwargs
):
"""Same as :meth:`load`, except it takes a JSON string as input.
:param json_data: A JSON string of the data to deserialize.
:param many: Whether to deserialize `obj` as a collection. If `None`, the
value for `self.many` is used.
:param partial: Whether to ignore missing fields and not require
any fields declared. Propagates down to ``Nested`` fields as well. If
its value is an iterable, only missing fields listed in that iterable
will be ignored. Use dot delimiters to specify nested fields.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
If `None`, the value for `self.unknown` is used.
:return: Deserialized data
.. versionadded:: 1.0.0
.. versionchanged:: 3.0.0b7
This method returns the deserialized data rather than a ``(data, errors)`` duple.
A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised
if invalid data are passed.
"""
data = self.opts.render_module.loads(json_data, **kwargs)
return self.load(data, many=many, partial=partial, unknown=unknown)
def _run_validator(
self,
validator_func,
output,
*,
original_data,
error_store,
many,
partial,
pass_original,
index=None
):
try:
if pass_original: # Pass original, raw data (before unmarshalling)
validator_func(output, original_data, partial=partial, many=many)
else:
validator_func(output, partial=partial, many=many)
except ValidationError as err:
error_store.store_error(err.messages, err.field_name, index=index)
def validate(
self,
data: typing.Mapping,
*,
many: typing.Optional[bool] = None,
partial: typing.Optional[typing.Union[bool, types.StrSequenceOrSet]] = None
) -> typing.Dict[str, typing.List[str]]:
"""Validate `data` against the schema, returning a dictionary of
validation errors.
:param data: The data to validate.
:param many: Whether to validate `data` as a collection. If `None`, the
value for `self.many` is used.
:param partial: Whether to ignore missing fields and not require
any fields declared. Propagates down to ``Nested`` fields as well. If
its value is an iterable, only missing fields listed in that iterable
will be ignored. Use dot delimiters to specify nested fields.
:return: A dictionary of validation errors.
.. versionadded:: 1.1.0
"""
try:
self._do_load(data, many=many, partial=partial, postprocess=False)
except ValidationError as exc:
return typing.cast(typing.Dict[str, typing.List[str]], exc.messages)
return {}
##### Private Helpers #####
def _do_load(
self,
data: typing.Union[
typing.Mapping[str, typing.Any],
typing.Iterable[typing.Mapping[str, typing.Any]],
],
*,
many: typing.Optional[bool] = None,
partial: typing.Optional[typing.Union[bool, types.StrSequenceOrSet]] = None,
unknown: typing.Optional[str] = None,
postprocess: bool = True
):
"""Deserialize `data`, returning the deserialized result.
This method is private API.
:param data: The data to deserialize.
:param many: Whether to deserialize `data` as a collection. If `None`, the
value for `self.many` is used.
:param partial: Whether to validate required fields. If its
value is an iterable, only fields listed in that iterable will be
ignored will be allowed missing. If `True`, all fields will be allowed missing.
If `None`, the value for `self.partial` is used.
:param unknown: Whether to exclude, include, or raise an error for unknown
fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`.
If `None`, the value for `self.unknown` is used.
:param postprocess: Whether to run post_load methods..
:return: Deserialized data
"""
error_store = ErrorStore()
errors = {} # type: typing.Dict[str, typing.List[str]]
many = self.many if many is None else bool(many)
unknown = unknown or self.unknown
if partial is None:
partial = self.partial
# Run preprocessors
if self._has_processors(PRE_LOAD):
try:
processed_data = self._invoke_load_processors(
PRE_LOAD, data, many=many, original_data=data, partial=partial
)
except ValidationError as err:
errors = err.normalized_messages()
result = (
None
) # type: typing.Optional[typing.Union[typing.List, typing.Dict]]
else:
processed_data = data
if not errors:
# Deserialize data
result = self._deserialize(
processed_data,
error_store=error_store,
many=many,
partial=partial,
unknown=unknown,
)
# Run field-level validation
self._invoke_field_validators(
error_store=error_store, data=result, many=many
)
# Run schema-level validation
if self._has_processors(VALIDATES_SCHEMA):
field_errors = bool(error_store.errors)
self._invoke_schema_validators(
error_store=error_store,
pass_many=True,
data=result,
original_data=data,
many=many,
partial=partial,
field_errors=field_errors,
)
self._invoke_schema_validators(
error_store=error_store,
pass_many=False,
data=result,
original_data=data,
many=many,
partial=partial,
field_errors=field_errors,
)
errors = error_store.errors
# Run post processors
if not errors and postprocess and self._has_processors(POST_LOAD):
try:
result = self._invoke_load_processors(
POST_LOAD,
result,
many=many,
original_data=data,
partial=partial,
)
except ValidationError as err:
errors = err.normalized_messages()
if errors:
exc = ValidationError(errors, data=data, valid_data=result)
self.handle_error(exc, data, many=many, partial=partial)
raise exc
return result
def _normalize_nested_options(self) -> None:
"""Apply then flatten nested schema options.
This method is private API.
"""
if self.only is not None:
# Apply the only option to nested fields.
self.__apply_nested_option("only", self.only, "intersection")
# Remove the child field names from the only option.
self.only = self.set_class([field.split(".", 1)[0] for field in self.only])
if self.exclude:
# Apply the exclude option to nested fields.
self.__apply_nested_option("exclude", self.exclude, "union")
# Remove the parent field names from the exclude option.
self.exclude = self.set_class(
[field for field in self.exclude if "." not in field]
)
def __apply_nested_option(self, option_name, field_names, set_operation) -> None:
"""Apply nested options to nested fields"""
# Split nested field names on the first dot.
nested_fields = [name.split(".", 1) for name in field_names if "." in name]
# Partition the nested field names by parent field.
nested_options = defaultdict(list) # type: defaultdict
for parent, nested_names in nested_fields:
nested_options[parent].append(nested_names)
# Apply the nested field options.
for key, options in iter(nested_options.items()):
new_options = self.set_class(options)
original_options = getattr(self.declared_fields[key], option_name, ())
if original_options:
if set_operation == "union":
new_options |= self.set_class(original_options)
if set_operation == "intersection":
new_options &= self.set_class(original_options)
setattr(self.declared_fields[key], option_name, new_options)
def _init_fields(self) -> None:
"""Update self.fields, self.load_fields, and self.dump_fields based on schema options.
This method is private API.
"""
if self.opts.fields:
available_field_names = self.set_class(self.opts.fields)
else:
available_field_names = self.set_class(self.declared_fields.keys())
if self.opts.additional:
available_field_names |= self.set_class(self.opts.additional)
invalid_fields = self.set_class()
if self.only is not None:
# Return only fields specified in only option
field_names = self.set_class(self.only)
invalid_fields |= field_names - available_field_names
else:
field_names = available_field_names
# If "exclude" option or param is specified, remove those fields.
if self.exclude:
# Note that this isn't available_field_names, since we want to
# apply "only" for the actual calculation.
field_names = field_names - self.exclude
invalid_fields |= self.exclude - available_field_names
if invalid_fields:
message = "Invalid fields for {}: {}.".format(self, invalid_fields)
raise ValueError(message)
fields_dict = self.dict_class()
for field_name in field_names:
field_obj = self.declared_fields.get(field_name, ma_fields.Inferred())
self._bind_field(field_name, field_obj)
fields_dict[field_name] = field_obj
load_fields, dump_fields = self.dict_class(), self.dict_class()
for field_name, field_obj in fields_dict.items():
if not field_obj.dump_only:
load_fields[field_name] = field_obj
if not field_obj.load_only:
dump_fields[field_name] = field_obj
dump_data_keys = [
field_obj.data_key if field_obj.data_key is not None else name
for name, field_obj in dump_fields.items()
]
if len(dump_data_keys) != len(set(dump_data_keys)):
data_keys_duplicates = {
x for x in dump_data_keys if dump_data_keys.count(x) > 1
}
raise ValueError(
"The data_key argument for one or more fields collides "
"with another field's name or data_key argument. "
"Check the following field names and "
"data_key arguments: {}".format(list(data_keys_duplicates))
)
load_attributes = [obj.attribute or name for name, obj in load_fields.items()]
if len(load_attributes) != len(set(load_attributes)):
attributes_duplicates = {
x for x in load_attributes if load_attributes.count(x) > 1
}
raise ValueError(
"The attribute argument for one or more fields collides "
"with another field's name or attribute argument. "
"Check the following field names and "
"attribute arguments: {}".format(list(attributes_duplicates))
)
self.fields = fields_dict
self.dump_fields = dump_fields
self.load_fields = load_fields
def on_bind_field(self, field_name: str, field_obj: ma_fields.Field) -> None:
"""Hook to modify a field when it is bound to the `Schema`.
No-op by default.
"""
return None
def _bind_field(self, field_name: str, field_obj: ma_fields.Field) -> None:
"""Bind field to the schema, setting any necessary attributes on the
field (e.g. parent and name).
Also set field load_only and dump_only values if field_name was
specified in ``class Meta``.
"""
if field_name in self.load_only:
field_obj.load_only = True
if field_name in self.dump_only:
field_obj.dump_only = True
try:
field_obj._bind_to_schema(field_name, self)
except TypeError as error:
# Field declared as a class, not an instance. Ignore type checking because
# we handle unsupported arg types, i.e. this is dead code from
# the type checker's perspective.
if isinstance(field_obj, type) and issubclass(field_obj, base.FieldABC):
msg = (
'Field for "{}" must be declared as a '
"Field instance, not a class. "
'Did you mean "fields.{}()"?'.format(field_name, field_obj.__name__)
)
raise TypeError(msg) from error
raise error
self.on_bind_field(field_name, field_obj)
@lru_cache(maxsize=8)
def _has_processors(self, tag) -> bool:
return bool(self._hooks[(tag, True)] or self._hooks[(tag, False)])
def _invoke_dump_processors(
self, tag: str, data, *, many: bool, original_data=None
):
# The pass_many post-dump processors may do things like add an envelope, so
# invoke those after invoking the non-pass_many processors which will expect
# to get a list of items.
data = self._invoke_processors(
tag, pass_many=False, data=data, many=many, original_data=original_data
)
data = self._invoke_processors(
tag, pass_many=True, data=data, many=many, original_data=original_data
)
return data
def _invoke_load_processors(
self,
tag: str,
data,
*,
many: bool,
original_data,
partial: typing.Union[bool, types.StrSequenceOrSet]
):
# This has to invert the order of the dump processors, so run the pass_many
# processors first.
data = self._invoke_processors(
tag,
pass_many=True,
data=data,
many=many,
original_data=original_data,
partial=partial,
)
data = self._invoke_processors(
tag,
pass_many=False,
data=data,
many=many,
original_data=original_data,
partial=partial,
)
return data
def _invoke_field_validators(self, *, error_store: ErrorStore, data, many: bool):
for attr_name in self._hooks[VALIDATES]:
validator = getattr(self, attr_name)
validator_kwargs = validator.__marshmallow_hook__[VALIDATES]
field_name = validator_kwargs["field_name"]
try:
field_obj = self.fields[field_name]
except KeyError as error:
if field_name in self.declared_fields:
continue
raise ValueError(
'"{}" field does not exist.'.format(field_name)
) from error
data_key = (
field_obj.data_key if field_obj.data_key is not None else field_name
)
if many:
for idx, item in enumerate(data):
try:
value = item[field_obj.attribute or field_name]
except KeyError:
pass
else:
validated_value = self._call_and_store(
getter_func=validator,
data=value,
field_name=data_key,
error_store=error_store,
index=(idx if self.opts.index_errors else None),
)
if validated_value is missing:
data[idx].pop(field_name, None)
else:
try:
value = data[field_obj.attribute or field_name]
except KeyError:
pass
else:
validated_value = self._call_and_store(
getter_func=validator,
data=value,
field_name=data_key,
error_store=error_store,
)
if validated_value is missing:
data.pop(field_name, None)
def _invoke_schema_validators(
self,
*,
error_store: ErrorStore,
pass_many: bool,
data,
original_data,
many: bool,
partial: typing.Union[bool, types.StrSequenceOrSet],
field_errors: bool = False
):
for attr_name in self._hooks[(VALIDATES_SCHEMA, pass_many)]:
validator = getattr(self, attr_name)
validator_kwargs = validator.__marshmallow_hook__[
(VALIDATES_SCHEMA, pass_many)
]
if field_errors and validator_kwargs["skip_on_field_errors"]:
continue
pass_original = validator_kwargs.get("pass_original", False)
if many and not pass_many:
for idx, (item, orig) in enumerate(zip(data, original_data)):
self._run_validator(
validator,
item,
original_data=orig,
error_store=error_store,
many=many,
partial=partial,
index=idx,
pass_original=pass_original,
)
else:
self._run_validator(
validator,
data,
original_data=original_data,
error_store=error_store,
many=many,
pass_original=pass_original,
partial=partial,
)
def _invoke_processors(
self,
tag: str,
*,
pass_many: bool,
data,
many: bool,
original_data=None,
**kwargs
):
key = (tag, pass_many)
for attr_name in self._hooks[key]:
# This will be a bound method.
processor = getattr(self, attr_name)
processor_kwargs = processor.__marshmallow_hook__[key]
pass_original = processor_kwargs.get("pass_original", False)
if many and not pass_many:
if pass_original:
data = [
processor(item, original, many=many, **kwargs)
for item, original in zip(data, original_data)
]
else:
data = [processor(item, many=many, **kwargs) for item in data]
else:
if pass_original:
data = processor(data, original_data, many=many, **kwargs)
else:
data = processor(data, many=many, **kwargs)
return data
BaseSchema = Schema # for backwards compatibility
| 40.231707
| 97
| 0.584339
|
628a5bf65aee34084af7ff08939f96a0ae9dd24d
| 166
|
py
|
Python
|
accounting/calculations/admin.py
|
m-molecula741/web-accounting-service-for-enterprises
|
6d60abf4f7d95b2b02c40979bfe253e8f311b8b5
|
[
"Apache-2.0"
] | null | null | null |
accounting/calculations/admin.py
|
m-molecula741/web-accounting-service-for-enterprises
|
6d60abf4f7d95b2b02c40979bfe253e8f311b8b5
|
[
"Apache-2.0"
] | null | null | null |
accounting/calculations/admin.py
|
m-molecula741/web-accounting-service-for-enterprises
|
6d60abf4f7d95b2b02c40979bfe253e8f311b8b5
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import AccInfo, Income, Expenses
admin.site.register(AccInfo)
admin.site.register(Income)
admin.site.register(Expenses)
| 27.666667
| 45
| 0.825301
|
b70cf7ce6eedaef70d468a807c678dbced03bd99
| 3,709
|
py
|
Python
|
utils/multithreadeddownloader.py
|
kaustubhhiware/swift
|
7089c4e05fd831b864de46ee37b681a6249909e3
|
[
"MIT"
] | null | null | null |
utils/multithreadeddownloader.py
|
kaustubhhiware/swift
|
7089c4e05fd831b864de46ee37b681a6249909e3
|
[
"MIT"
] | null | null | null |
utils/multithreadeddownloader.py
|
kaustubhhiware/swift
|
7089c4e05fd831b864de46ee37b681a6249909e3
|
[
"MIT"
] | null | null | null |
"""
Performs Multithreaded download
"""
import urllib3
import logging
import os
import sys
import shutil
import threading
import pathlib
from utils import misc
from utils import request
from utils import constants
from utils import calculation
class MultithreadedDownloader:
"""
Main class providing interface for download
"""
def __init__(self):
self.url = None
self.range_left = None
self.range_right = None
self.threads = None
self.filepath = None
logging.getLogger("urllib3").setLevel(logging.WARNING)
def range_download_support(self, resp):
"""
returns boolean value indicating support for range downloading
"""
try:
supported = (resp.headers['Accept-Ranges'] == 'bytes')
except KeyError:
supported = False
return supported
def multithreaded_download(self, ranges_list):
"""
function to perform multithreaded download
"""
# downloading each segment
for f in range(self.threads):
# calling download_range() for each thread
t = threading.Thread(target=request.download_range,
kwargs={
'url': self.url,
'filepath': constants.SERVER_TEMP_DIR + "/temp" + str(f),
'range_left': ranges_list[f][0],
'range_right': ranges_list[f][1],
})
t.setDaemon(True)
t.start()
# except main_thread, calling join() for each thread
# it ensures that merging of parts occur only after each thread has completed downloading
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
def merge_multithreaded_download_parts(self):
""" function to perform merging of parts performed by multiple threads on single system """
# merging parts
with open(self.filepath,'wb') as wfd:
for f in range(self.threads):
tempfilepath = constants.SERVER_TEMP_DIR + "/temp" + str(f)
with open(tempfilepath, "rb") as fd:
shutil.copyfileobj(fd, wfd)
# delete copied segment
misc.delete_file(tempfilepath)
def download(self, url, range_left, range_right, filepath, response, threads):
"""
function to perform file download
"""
self.url = url
self.range_right = range_right
self.range_left = range_left
self.filepath = filepath
self.threads = threads
# if server supports segmented download
if self.range_download_support(response):
# get ranges for download for each thread
ranges_list = calculation.get_download_ranges_list( self.range_left,
self.range_right,
self.threads)
# perform multithreaded download on single system
self.multithreaded_download(ranges_list)
# merge multithreaded download parts
self.merge_multithreaded_download_parts()
else:
misc.print_log ('''[i] Server doesn't support multithreaded downloads!
Download will be performed using single thread, on master system.''')
request.download_range( self.url,
self.filepath,
self.range_left,
self.range_right)
| 36.009709
| 99
| 0.571852
|
fc7752c444843770bea7d4787963de010a58d57d
| 58,557
|
py
|
Python
|
python/paddle/optimizer/lr.py
|
ikingye/Paddle
|
766b35152713d4410d4c5b05a3fa5e5a64a6fa60
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/optimizer/lr.py
|
ikingye/Paddle
|
766b35152713d4410d4c5b05a3fa5e5a64a6fa60
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/optimizer/lr.py
|
ikingye/Paddle
|
766b35152713d4410d4c5b05a3fa5e5a64a6fa60
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy
import warnings
from paddle import Tensor
__all__ = [
'LRScheduler', 'NoamDecay', 'PiecewiseDecay', 'NaturalExpDecay',
'InverseTimeDecay', 'PolynomialDecay', 'LinearWarmup', 'ExponentialDecay',
'MultiStepDecay', 'StepDecay', 'LambdaDecay', 'ReduceOnPlateau',
'CosineAnnealingDecay'
]
class LRScheduler(object):
"""
LRScheduler Base class. Define the common interface of a learning rate scheduler.
User can import it by ``form paddle.optimizer.lr import LRScheduler`` ,
then overload it for your subclass and have a custom implementation of ``get_lr()`` .
Otherwise, an ``NotImplementedError`` exception will be thrown.
Args:
learning_rate (float): The initial learning rate. It is a python float number.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
instance to schedule learning rate.
Examples:
Here is an example of a simple ``StepDecay`` implementation.
.. code-block:: python
import paddle
form paddle.optimizer.lr import LRScheduler
class StepDecay(LRScheduler):
def __init__(self,
learning_rate,
step_size,
gamma=0.1,
last_epoch=-1,
verbose=False):
if not isinstance(step_size, int):
raise TypeError(
"The type of 'step_size' must be 'int', but received %s." %
type(step_size))
if gamma >= 1.0:
raise ValueError('gamma should be < 1.0.')
self.step_size = step_size
self.gamma = gamma
super(StepDecay, self).__init__(learning_rate, last_epoch, verbose)
def get_lr(self):
i = self.last_epoch // self.step_size
return self.base_lr * (self.gamma**i)
"""
def __init__(self, learning_rate=0.1, last_epoch=-1, verbose=False):
if not isinstance(learning_rate, (float, int)):
raise TypeError(
"The type of learning rate must be float, but received {}".
format(type(learning_rate)))
self.base_lr = float(learning_rate)
self.last_lr = float(learning_rate)
self.last_epoch = last_epoch
self.verbose = verbose
self._var_name = None
self.step()
def __call__(self):
"""
Return lastest computed learning rate on current epoch.
"""
return self.last_lr
def step(self, epoch=None):
"""
``step`` should be called after ``optimizer.step`` . It will update the learning rate in optimizer according to current ``epoch`` .
The new learning rate will take effect on next ``optimizer.step`` .
Args:
epoch (int, None): specify current epoch. Default: None. Auto-increment from last_epoch=-1.
Returns:
None
"""
if epoch is None:
self.last_epoch += 1
self.last_lr = self.get_lr()
else:
self.last_epoch = epoch
if hasattr(self, "_get_closed_form_lr"):
self.last_lr = self._get_closed_form_lr()
else:
self.last_lr = self.get_lr()
if self.verbose:
print('Epoch {}: {} set learning rate to {}.'.format(
self.last_epoch, self.__class__.__name__, self.last_lr))
def state_dict(self):
"""
Returns the state of the scheduler as a :class:`dict`.
It is a subset of ``self.__dict__`` .
"""
self.state_keys()
state_dict = {}
for key in self.keys:
if key not in self.__dict__:
continue
value = self.__dict__[key]
if isinstance(value, Tensor):
assert value.shape == [
1
], "shape of Tensor in state_dict must be [1] {}".format(
value.shape)
value = value.numpy()[0]
state_dict[key] = value
return state_dict
# For those subclass who overload LRScheduler, "last_epoch, last_lr" will be saved by default.
# (Note): you can change it for your subclass.
def state_keys(self):
"""
For those subclass who overload ``LRScheduler`` (Base Class). Acquiescently, "last_epoch, last_lr" will be saved by ``self.keys = ['last_epoch', 'last_lr']`` .
``last_epoch`` is the current epoch num, and ``last_lr`` is the current learning rate.
If you want to change the default behavior, you should have a custom implementation of ``_state_keys()`` to redefine ``self.keys`` .
"""
self.keys = ['last_epoch', 'last_lr']
def set_state_dict(self, state_dict):
"""
Loads the schedulers state.
"""
self.state_keys()
for key in self.keys:
if key in state_dict:
self.__dict__[key] = state_dict[key]
else:
raise RuntimeError(
"Please check whether state_dict is correct for optimizer. Can't find [ {} ] in state_dict".
format(key))
if len(state_dict) > len(self.keys):
warnings.warn(
"There are some unused values in state_dict. Maybe the optimizer have different 'LearningRateDecay' when invoking state_dict and set_dict"
)
# alias for set_state_dict
set_dict = set_state_dict
def get_lr(self):
"""
For those subclass who overload ``LRScheduler`` (Base Class), User should have a custom implementation of ``get_lr()`` .
Otherwise, an ``NotImplementedError`` exception will be thrown.
"""
# calculate by python float
raise NotImplementedError
class NoamDecay(LRScheduler):
"""
Applies Noam Decay to the initial learning rate.
The algorithm can be described as following.
.. math::
new\_learning\_rate = learning\_rate * d_{model}^{-0.5} * min(epoch^{-0.5}, epoch * warmup\_steps^{-1.5})
Please reference `attention is all you need <https://arxiv.org/pdf/1706.03762.pdf>`_
Args:
d$_{model}$(int): The dimensionality of input and output feature vector of model. It is a python int number.
warmup_steps(int): The number of warmup steps. A super parameter. It is a python int number
learning_rate (float): The initial learning rate. It is a python float number. Default: 1.0.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``NoamDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.NoamDecay(d_model=0.01, warmup_steps=100, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(2):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.reduce_mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step()
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.NoamDecay(d_model=0.01, warmup_steps=100, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(2):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step()
"""
def __init__(self,
d_model,
warmup_steps,
learning_rate=1.0,
last_epoch=-1,
verbose=False):
self.d_model = d_model
self.warmup_steps = warmup_steps
super(NoamDecay, self).__init__(learning_rate, last_epoch, verbose)
def get_lr(self):
if self.last_epoch == 0:
a = 1
else:
a = self.last_epoch**-0.5
b = self.warmup_steps**-1.5 * self.last_epoch
return self.base_lr * (self.d_model**-0.5) * min(a, b)
class PiecewiseDecay(LRScheduler):
"""
Piecewise learning rate scheduler.
The algorithm can be described as the code below:
.. code-block:: text
boundaries = [100, 200]
values = [1.0, 0.5, 0.1]
if epoch < 100:
learning_rate = 1.0
elif 100 <= global_step < 200:
learning_rate = 0.5
else:
learning_rate = 0.1
Args:
boundaries(list): A list of steps numbers. The type of element in the list is python int.
values(list): A list of learning rate values that will be picked during different epoch boundaries.
The type of element in the list is python float.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``PiecewiseDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=[3, 6, 9], values=[0.1, 0.2, 0.3, 0.4], verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(2):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.reduce_mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step()
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=[3, 6, 9], values=[0.1, 0.2, 0.3, 0.4], verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(2):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step()
"""
def __init__(self, boundaries, values, last_epoch=-1, verbose=False):
self.boundaries = boundaries
self.values = values
super(PiecewiseDecay, self).__init__(
last_epoch=last_epoch, verbose=verbose)
def get_lr(self):
for i in range(len(self.boundaries)):
if self.last_epoch < self.boundaries[i]:
return self.values[i]
return self.values[len(self.values) - 1]
class NaturalExpDecay(LRScheduler):
"""
Applies natural exponential decay to the initial learning rate.
The algorithm can be described as following:
.. math::
new\_learning\_rate = learning\_rate * e^{- gamma * epoch}
Args:
learning_rate (float): The initial learning rate. It is a python float number.
gamma (float, optional): A Ratio to update the learning rate. Default: 0.1.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``NaturalExpDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.NaturalExpDecay(learning_rate=0.5, gamma=0.1, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(2):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.reduce_mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step()
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.NaturalExpDecay(learning_rate=0.5, gamma=0.1, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(2):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step()
"""
def __init__(self, learning_rate, gamma, last_epoch=-1, verbose=False):
self.gamma = gamma
super(NaturalExpDecay, self).__init__(learning_rate, last_epoch,
verbose)
def get_lr(self):
return self.base_lr * math.exp(-1 * self.gamma * self.last_epoch)
class InverseTimeDecay(LRScheduler):
"""
Applies inverse time decay to the initial learning rate.
The algorithm can be described as following:
.. math::
new\_learning\_rate = \\frac{learning\_rate}{1 + gamma * epoch}
Args:
learning_rate (float): The initial learning rate. It is a python float number.
gamma (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma`` .
It should be less than 1.0. Default: 0.1.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``InverseTimeDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.InverseTimeDecay(learning_rate=0.5, gamma=0.1, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(2):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.reduce_mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step()
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.InverseTimeDecay(learning_rate=0.5, gamma=0.1, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(2):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step()
"""
def __init__(self, learning_rate, gamma, last_epoch=-1, verbose=False):
self.gamma = gamma
super(InverseTimeDecay, self).__init__(learning_rate, last_epoch,
verbose)
def get_lr(self):
return self.base_lr / (1 + self.gamma * self.last_epoch)
class PolynomialDecay(LRScheduler):
"""
Applies polynomial decay to the initial learning rate.
The algorithm can be described as following.
If cycle is set to True, then:
.. math::
decay\_steps & = decay\_steps * math.ceil(\\frac{epoch}{decay\_steps})
new\_learning\_rate & = (learning\_rate-end\_lr)*(1-\\frac{epoch}{decay\_steps})^{power}+end\_lr
If cycle is set to False, then:
.. math::
epoch & = min(epoch, decay\_steps)
new\_learning\_rate & = (learning\_rate-end\_lr)*(1-\\frac{epoch}{decay\_steps})^{power}+end\_lr
Args:
learning_rate (float): The initial learning rate. It is a python float number.
decay_steps(int): The decay step size. It determines the decay cycle.
end_lr(float, optional): The minimum final learning rate. Default: 0.0001.
power(float, optional): Power of polynomial. Default: 1.0.
cycle(bool, optional): Whether the learning rate rises again. If True, then the learning rate will rise when it decrease
to ``end_lr`` . If False, the learning rate is monotone decreasing. Default: False.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``PolynomialDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.PolynomialDecay(learning_rate=0.5, decay_steps=20, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(2):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.reduce_mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step()
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.PolynomialDecay(learning_rate=0.5, decay_steps=20, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(2):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step()
"""
def __init__(self,
learning_rate,
decay_steps,
end_lr=0.0001,
power=1.0,
cycle=False,
last_epoch=-1,
verbose=False):
self.decay_steps = decay_steps
self.end_lr = end_lr
self.power = power
self.cycle = cycle
super(PolynomialDecay, self).__init__(learning_rate, last_epoch,
verbose)
def get_lr(self):
tmp_epoch_num = self.last_epoch
tmp_decay_steps = self.decay_steps
if self.cycle:
div_res = math.ceil(
float(self.last_epoch) / float(self.decay_steps))
if self.last_epoch == 0:
div_res = 1
tmp_decay_steps = self.decay_steps * div_res
else:
tmp_epoch_num = min(self.last_epoch, self.decay_steps)
return (self.base_lr - self.end_lr) * (
(1 - float(tmp_epoch_num) / float(tmp_decay_steps)
)**self.power) + self.end_lr
class LinearWarmup(LRScheduler):
"""
Linear learning rate warm up strategy. Update the learning rate preliminarily before the normal learning rate scheduler.
For more information, please refer to `Bag of Tricks for Image Classification with Convolutional Neural Networks <https://arxiv.org/abs/1812.01187>`_
When epoch < warmup_steps, learning rate is updated as:
.. math::
lr = start\_lr + (end\_lr - start\_lr) * \\frac{epoch}{warmup\_steps}
where start_lr is the initial learning rate, and end_lr is the final learning rate;
When epoch >= warmup_steps, learning rate is updated as:
.. math::
lr = learning_rate
where ``learning_rate`` is float or any subclass of ``LRScheduler`` .
Args:
learning_rate (float|LRScheduler): The learning rate after warm-up. It is a python float number or any subclass of ``LRScheduler`` .
warmup_steps (int): total steps of warm up.
start_lr (float): Initial learning rate of warm up.
end_lr (float): Final learning rate of warm up.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``LinearWarmup`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.LinearWarmup(
learning_rate=0.5, warmup_steps=20, start_lr=0, end_lr=0.5, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(2):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.reduce_mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step()
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.LinearWarmup(
learning_rate=0.5, warmup_steps=20, start_lr=0, end_lr=0.5, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(2):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step()
"""
def __init__(self,
learning_rate,
warmup_steps,
start_lr,
end_lr,
last_epoch=-1,
verbose=False):
type_check = isinstance(learning_rate, float) or isinstance(
learning_rate, int) or isinstance(learning_rate, LRScheduler)
if not type_check:
raise TypeError(
"the type of learning_rate should be [int, float or LRScheduler], the current type is {}".
format(learning_rate))
self.learning_rate = learning_rate
self.warmup_steps = warmup_steps
self.start_lr = start_lr
self.end_lr = end_lr
assert end_lr > start_lr, "end_lr {} must be greater than start_lr {}".format(
end_lr, start_lr)
super(LinearWarmup, self).__init__(start_lr, last_epoch, verbose)
def get_lr(self):
if self.last_epoch < self.warmup_steps:
return (self.end_lr - self.start_lr) * float(
self.last_epoch) / float(self.warmup_steps) + self.start_lr
else:
if isinstance(self.learning_rate, LRScheduler):
self.learning_rate.step()
return self.learning_rate()
return self.learning_rate
class ExponentialDecay(LRScheduler):
"""
Update learning rate by `gamma` each epoch.
The algorithm can be described as following.
.. math::
new\_learning\_rate = last\_learning\_rate * gamma
Args:
learning_rate (float): The initial learning rate. It is a python float number.
gamma (float): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma`` .
It should be less than 1.0.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``ExponentialDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.ExponentialDecay(learning_rate=0.5, gamma=0.9, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(2):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.reduce_mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step()
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.ExponentialDecay(learning_rate=0.5, gamma=0.9, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(2):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step()
"""
def __init__(self, learning_rate, gamma, last_epoch=-1, verbose=False):
self.gamma = gamma
super(ExponentialDecay, self).__init__(learning_rate, last_epoch,
verbose)
def get_lr(self):
return self.base_lr * (self.gamma**self.last_epoch)
class MultiStepDecay(LRScheduler):
"""
Update the learning rate by ``gamma`` once ``epoch`` reaches one of the milestones.
The algorithm can be described as the code below.
.. code-block:: text
learning_rate = 0.5
milestones = [30, 50]
gamma = 0.1
if epoch < 30:
learning_rate = 0.5
elif epoch < 50:
learning_rate = 0.05
else:
learning_rate = 0.005
Args:
learning_rate (float): The initial learning rate. It is a python float number.
milestones (tuple|list): List or tuple of each boundaries. Must be increasing.
gamma (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma`` .
It should be less than 1.0. Default: 0.1.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``MultiStepDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(2):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.reduce_mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step()
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(2):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step()
"""
def __init__(self,
learning_rate,
milestones,
gamma=0.1,
last_epoch=-1,
verbose=False):
if not isinstance(milestones, (tuple, list)):
raise TypeError(
"The type of 'milestones' in 'MultiStepDecay' must be 'tuple, list', but received %s."
% type(milestones))
if not all([
milestones[i] < milestones[i + 1]
for i in range(len(milestones) - 1)
]):
raise ValueError('The elements of milestones must be incremented')
if gamma >= 1.0:
raise ValueError('gamma should be < 1.0.')
self.milestones = milestones
self.gamma = gamma
super(MultiStepDecay, self).__init__(learning_rate, last_epoch, verbose)
def get_lr(self):
for i in range(len(self.milestones)):
if self.last_epoch < self.milestones[i]:
return self.base_lr * (self.gamma**i)
return self.base_lr * (self.gamma**len(self.milestones))
class StepDecay(LRScheduler):
"""
Update the learning rate of ``optimizer`` by ``gamma`` every ``step_size`` number of epoch.
The algorithm can be described as the code below.
.. code-block:: text
learning_rate = 0.5
step_size = 30
gamma = 0.1
learning_rate = 0.5 if epoch < 30
learning_rate = 0.05 if 30 <= epoch < 60
learning_rate = 0.005 if 60 <= epoch < 90
...
Args:
learning_rate (float): The initial learning rate. It is a python float number.
step_size (int): the interval to update.
gamma (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma`` .
It should be less than 1.0. Default: 0.1.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``StepDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=5, gamma=0.8, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(2):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.reduce_mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step()
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=5, gamma=0.8, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(2):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step()
"""
def __init__(self,
learning_rate,
step_size,
gamma=0.1,
last_epoch=-1,
verbose=False):
if not isinstance(step_size, int):
raise TypeError(
"The type of 'step_size' must be 'int', but received %s." %
type(step_size))
if gamma >= 1.0:
raise ValueError('gamma should be < 1.0.')
self.step_size = step_size
self.gamma = gamma
super(StepDecay, self).__init__(learning_rate, last_epoch, verbose)
def get_lr(self):
i = self.last_epoch // self.step_size
return self.base_lr * (self.gamma**i)
class LambdaDecay(LRScheduler):
"""
Sets the learning rate of ``optimizer`` by function ``lr_lambda`` . ``lr_lambda`` is funciton which receives ``epoch`` .
The algorithm can be described as the code below.
.. code-block:: text
learning_rate = 0.5 # init learning_rate
lr_lambda = lambda epoch: 0.95 ** epoch
learning_rate = 0.5 # epoch 0, 0.5*0.95**0
learning_rate = 0.475 # epoch 1, 0.5*0.95**1
learning_rate = 0.45125 # epoch 2, 0.5*0.95**2
Args:
learning_rate (float): The initial learning rate. It is a python float number.
lr_lambda (function): A function which computes a factor by ``epoch`` , and then multiply the initial learning rate by this factor.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``LambdaDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.LambdaDecay(learning_rate=0.5, lr_lambda=lambda x:0.95**x, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(2):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.reduce_mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step()
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.LambdaDecay(learning_rate=0.5, lr_lambda=lambda x:0.95**x, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(2):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step()
"""
def __init__(self, learning_rate, lr_lambda, last_epoch=-1, verbose=False):
if not callable(lr_lambda):
raise TypeError(
"The type of 'lr_lambda' in 'LambdaDecay' must be 'function', but received %s."
% type(lr_lambda))
self.lr_lambda = lr_lambda
super(LambdaDecay, self).__init__(learning_rate, last_epoch, verbose)
def get_lr(self):
return self.base_lr * self.lr_lambda(self.last_epoch)
class ReduceOnPlateau(LRScheduler):
"""
Reduce learning rate when ``metrics`` has stopped descending. Models often benefit from reducing the learning rate
by 2 to 10 times once model performance has no longer improvement.
The ``metrics`` is the one which has been pass into ``step`` , it must be 1-D Tensor with shape [1]. When ``metrics``
stop descending for a ``patience`` number of epochs, the learning rate will be reduced to ``learning_rate * factor`` .
(Specially, ``mode`` can also be set to ``'max`` , in this case, when ``metrics`` stop ascending for a ``patience``
number of epochs, the learning rate will be reduced.)
In addition, After each reduction, it will wait a ``cooldown`` number of epochs before resuming above operation.
Args:
learning_rate (float): The initial learning rate. It is a python float number.
mode (str, optional): ``'min'`` or ``'max'`` can be selected. Normally, it is ``'min'`` , which means that the
learning rate will reduce when ``loss`` stops descending. Specially, if it's set to ``'max'`` , the learning
rate will reduce when ``loss`` stops ascending. Default: ``'min'`` .
factor (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * factor`` .
It should be less than 1.0. Default: 0.1.
patience (int, optional): When ``loss`` doesn't improve for this number of epochs, learing rate will be reduced.
Default: 10.
threshold (float, optional): ``threshold`` and ``threshold_mode`` will determine the minimum change of ``loss`` .
This make tiny changes of ``loss`` will be ignored. Default: 1e-4.
threshold_mode (str, optional): ``'rel'`` or ``'abs'`` can be selected. In ``'rel'`` mode, the minimum change of ``loss``
is ``last_loss * threshold`` , where ``last_loss`` is ``loss`` in last epoch. In ``'abs'`` mode, the minimum
change of ``loss`` is ``threshold`` . Default: ``'rel'`` .
cooldown (int, optional): The number of epochs to wait before resuming normal operation. Default: 0.
min_lr (float, optional): The lower bound of the learning rate after reduction. Default: 0.
epsilon (float, optional): Minimal decay applied to lr. If the difference between new and old lr is smaller than epsilon,
the update is ignored. Default: 1e-8.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False``.
Returns:
``ReduceOnPlateau`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.ReduceOnPlateau(learning_rate=1.0, factor=0.5, patience=5, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(2):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.reduce_mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step(loss)
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.ReduceOnPlateau(learning_rate=1.0, factor=0.5, patience=5, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(2):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step(out[0])
"""
def __init__(self,
learning_rate,
mode='min',
factor=0.1,
patience=10,
threshold=1e-4,
threshold_mode='rel',
cooldown=0,
min_lr=0,
epsilon=1e-8,
verbose=False):
mode = mode.lower()
if mode not in ['min', 'max']:
raise ValueError('mode: ' + mode + ' is unknown!')
self.mode = mode
if factor >= 1.0:
raise ValueError(
'new_lr = origin_lr * gamma and gamma should be < 1.0.')
self.factor = factor
threshold_mode = threshold_mode.lower()
if threshold_mode not in ['rel', 'abs']:
raise ValueError('threshold mode: ' + threshold_mode +
' is unknown!')
self.threshold_mode = threshold_mode
if not isinstance(learning_rate, (float, int)):
raise TypeError(
"The type of 'learning_rate' in 'ReduceOnPlateau' must be 'float', but received %s."
% type(learning_rate))
self.verbose = verbose
self.patience = patience
self.threshold = threshold
self.threshold_mode = threshold_mode
self.cooldown = cooldown
self.min_lr = min_lr
self.epsilon = epsilon
self.cooldown_counter = 0
self.best = None
self.num_bad_epochs = 0
# Can not call Parent __init__, so implement here.
self.base_lr = float(learning_rate)
self.last_lr = float(learning_rate)
self.last_epoch = 0
self.verbose = verbose
self._var_name = None
# "cooldown_counter / best / num_bad_epochs / last_epoch / last_lr" will be stored.
def state_keys(self):
self.keys = [
'cooldown_counter', 'best', 'num_bad_epochs', 'last_epoch',
'last_lr'
]
def step(self, metrics, epoch=None):
"""
step should be called after `optimizer.step()` . It will update the learning rate in optimizer according to ``metrics`` .
The new learning rate will take effect on next epoch.
Args:
metrics (Tensor|numpy.ndarray|float): Which will be monitored to determine whether the learning rate will reduce.
If it stop descending for a ``patience`` number of epochs, the learning rate will reduce. If it's 'Tensor' or
'numpy.ndarray', its shape must be [1].
epoch (int, None): specify current epoch. Default: None. Auto-increment from last_epoch=-1.
Returns:
None
Examples:
Please refer to the example of current LRScheduler.
"""
if epoch is None:
self.last_epoch = self.last_epoch + 1
else:
self.last_epoch = epoch
# loss must be float, numpy.ndarray or 1-D Tensor with shape [1]
if isinstance(metrics, (Tensor, numpy.ndarray)):
assert len(metrics.shape) == 1 and metrics.shape[0] == 1, "the metrics.shape " \
"should be (1L,), but the current metrics.shape is {}. Maybe that " \
"you should call paddle.mean to process it first.".format(loss.shape)
elif not isinstance(metrics,
(int, float, numpy.float32, numpy.float64)):
raise TypeError(
"metrics must be 'int', 'float', 'np.float', 'numpy.ndarray' or 'paddle.Tensor', but receive {}".
format(type(metrics)))
if self.cooldown_counter > 0:
self.cooldown_counter -= 1
else:
if self.best is None or self._is_better(metrics, self.best):
self.best = metrics
self.num_bad_epochs = 0
else:
self.num_bad_epochs += 1
if self.num_bad_epochs > self.patience:
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
new_lr = max(self.last_lr * self.factor, self.min_lr)
if self.last_lr - new_lr > self.epsilon:
self.last_lr = new_lr
if self.verbose:
print('Epoch {}: {} set learning rate to {}.'.format(
self.last_epoch, self.__class__.__name__,
self.last_lr))
def _is_better(self, current, best):
if self.mode == 'min' and self.threshold_mode == 'rel':
return current < best - best * self.threshold
elif self.mode == 'min' and self.threshold_mode == 'abs':
return current < best - self.threshold
elif self.mode == 'max' and self.threshold_mode == 'rel':
return current > best + best * self.threshold
else:
return current > best + self.threshold
class CosineAnnealingDecay(LRScheduler):
"""
Set the learning rate using a cosine annealing schedule, where :math:`\eta_{max}` is set to
the initial learning_rate. :math:`T_{cur}` is the number of epochs since the last restart in
SGDR.
The algorithm can be described as following.
.. math::
\\begin{aligned}
\eta_t & = \eta_{min} + \\frac{1}{2}(\eta_{max} - \eta_{min})\left(1
+ \cos\left(\\frac{T_{cur}}{T_{max}}\pi\\right)\\right),
& T_{cur} \\neq (2k+1)T_{max}; \\
\eta_{t+1} & = \eta_{t} + \\frac{1}{2}(\eta_{max} - \eta_{min})
\left(1 - \cos\left(\\frac{1}{T_{max}}\pi\\right)\\right),
& T_{cur} = (2k+1)T_{max}.
\end{aligned}
It has been proposed in `SGDR: Stochastic Gradient Descent with Warm Restarts <https://arxiv.org/abs/1608.03983>`_.
Note that this only implements the cosine annealing part of SGDR, and not the restarts.
Args:
learning_rate (float): The initial learning rate, that is :math:`\eta_{max}` . It can be set to python float or int number.
T_max (int): Maximum number of iterations. It is half of the decay cycle of learning rate.
eta_min (float|int, optional): Minimum learning rate, that is :math:`\eta_{min}` . Default: 0.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``CosineAnnealingDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=0.5, T_max=10, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(2):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.reduce_mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step()
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=0.5, T_max=10, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(2):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step()
"""
def __init__(self,
learning_rate,
T_max,
eta_min=0,
last_epoch=-1,
verbose=False):
if not isinstance(T_max, int):
raise TypeError(
"The type of 'T_max' in 'CosineAnnealingDecay' must be 'int', but received %s."
% type(T_max))
if not isinstance(eta_min, (float, int)):
raise TypeError(
"The type of 'eta_min' in 'CosineAnnealingDecay' must be 'float, int', but received %s."
% type(eta_min))
self.T_max = T_max
self.eta_min = float(eta_min)
super(CosineAnnealingDecay, self).__init__(learning_rate, last_epoch,
verbose)
def get_lr(self):
if self.last_epoch == 0:
return self.base_lr
elif (self.last_epoch - 1 - self.T_max) % (2 * self.T_max) == 0:
return self.last_lr + (self.base_lr - self.eta_min) * (1 - math.cos(
math.pi / self.T_max)) / 2
return (1 + math.cos(math.pi * self.last_epoch / self.T_max)) / (
1 + math.cos(math.pi * (self.last_epoch - 1) / self.T_max)) * (
self.last_lr - self.eta_min) + self.eta_min
def _get_closed_form_lr(self):
return self.eta_min + (self.base_lr - self.eta_min) * (1 + math.cos(
math.pi * self.last_epoch / self.T_max)) / 2
| 40.135024
| 167
| 0.552846
|
39d69130f273553e775f424452d4442ac16da4e4
| 5,445
|
py
|
Python
|
office365/graph_client.py
|
andreas-j-hauser/Office365-REST-Python-Client
|
4bf8ee0b65985980b50fc3b74b32fd2db34561ba
|
[
"MIT"
] | null | null | null |
office365/graph_client.py
|
andreas-j-hauser/Office365-REST-Python-Client
|
4bf8ee0b65985980b50fc3b74b32fd2db34561ba
|
[
"MIT"
] | null | null | null |
office365/graph_client.py
|
andreas-j-hauser/Office365-REST-Python-Client
|
4bf8ee0b65985980b50fc3b74b32fd2db34561ba
|
[
"MIT"
] | null | null | null |
from office365.actions.download_content_query import DownloadContentQuery
from office365.actions.search_query import SearchQuery
from office365.actions.upload_content_query import UploadContentQuery
from office365.directory.directory import Directory
from office365.directory.directoryObjectCollection import DirectoryObjectCollection
from office365.directory.group_collection import GroupCollection
from office365.directory.groupSettingTemplateCollection import GroupSettingTemplateCollection
from office365.directory.user import User
from office365.directory.userCollection import UserCollection
from office365.onedrive.driveCollection import DriveCollection
from office365.onedrive.sharedDriveItemCollection import SharedDriveItemCollection
from office365.onedrive.siteCollection import SiteCollection
from office365.outlook.contact_collection import ContactCollection
from office365.runtime.auth.token_response import TokenResponse
from office365.runtime.client_runtime_context import ClientRuntimeContext
from office365.runtime.http.http_method import HttpMethod
from office365.runtime.http.request_options import RequestOptions
from office365.runtime.odata.odata_request import ODataRequest
from office365.runtime.odata.odata_v4_batch_request import ODataV4BatchRequest
from office365.runtime.odata.v4_json_format import V4JsonFormat
from office365.runtime.queries.delete_entity_query import DeleteEntityQuery
from office365.runtime.queries.update_entity_query import UpdateEntityQuery
from office365.runtime.resource_path import ResourcePath
from office365.teams.team_collection import TeamCollection
class GraphClient(ClientRuntimeContext):
"""Graph client"""
def __init__(self, acquire_token_callback):
"""
:param () -> dict acquire_token_callback: Acquire token function
"""
super(GraphClient, self).__init__()
self._pending_request = ODataRequest(self, V4JsonFormat("minimal"))
self._pending_request.beforeExecute += self._build_specific_query
self._resource = "https://graph.microsoft.com"
self._authority_host_url = "https://login.microsoftonline.com"
self._acquire_token_callback = acquire_token_callback
def execute_batch(self):
"""Construct and submit a batch request"""
batch_request = ODataV4BatchRequest(self)
batch_request.execute_query()
def pending_request(self):
return self._pending_request
def service_root_url(self):
return "https://graph.microsoft.com/v1.0/"
def _build_specific_query(self, request):
"""
Builds Graph specific request
:type request: RequestOptions
"""
query = self.pending_request().current_query
if isinstance(query, UpdateEntityQuery):
request.method = HttpMethod.Patch
elif isinstance(query, DeleteEntityQuery):
request.method = HttpMethod.Delete
if isinstance(query, DownloadContentQuery):
request.method = HttpMethod.Get
elif isinstance(query, UploadContentQuery):
request.method = HttpMethod.Put
elif isinstance(query, SearchQuery):
request.method = HttpMethod.Get
def authenticate_request(self, request):
"""
:type request: RequestOptions
"""
token_json = self._acquire_token_callback()
token = TokenResponse.from_json(token_json)
request.set_header('Authorization', 'Bearer {0}'.format(token.accessToken))
def execute_request(self, url_or_options):
"""
Constructs and submits request directly
:type url_or_options: str or RequestOptions
"""
if not isinstance(url_or_options, RequestOptions):
url_or_options = RequestOptions("{0}/{1}".format(self.service_root_url(), url_or_options))
return self.execute_request_direct(url_or_options)
@property
def me(self):
"""The Me endpoint is provided as a shortcut for specifying the current user"""
return User(self, ResourcePath("me"))
@property
def drives(self):
"""Get one drives"""
return DriveCollection(self, ResourcePath("drives"))
@property
def users(self):
"""Get users"""
return UserCollection(self, ResourcePath("users"))
@property
def groups(self):
"""Get groups"""
return GroupCollection(self, ResourcePath("groups"))
@property
def sites(self):
"""Get sites"""
return SiteCollection(self, ResourcePath("sites"))
@property
def shares(self):
"""Get shares"""
return SharedDriveItemCollection(self, ResourcePath("shares"))
@property
def directoryObjects(self):
"""Get Directory Objects"""
return DirectoryObjectCollection(self, ResourcePath("directoryObjects"))
@property
def teams(self):
"""Get teams"""
return TeamCollection(self, ResourcePath("teams"))
@property
def groupSettingTemplates(self):
"""Get teams"""
return GroupSettingTemplateCollection(self, ResourcePath("groupSettingTemplates"))
@property
def contacts(self):
"""o get all the contacts in a user's mailbox"""
return ContactCollection(self, ResourcePath("contacts"))
@property
def directory(self):
"""Represents a deleted item in the directory"""
return Directory(self, ResourcePath("directory"))
| 37.8125
| 102
| 0.72562
|
6c828d7b0ceacb1400a52a0c44d6d5e12702c982
| 10,914
|
py
|
Python
|
xlmhg/mhg.py
|
flo-compbio/xlmhg
|
c29d913386443396254774b8cff5cff2b5731323
|
[
"BSD-3-Clause"
] | 15
|
2015-05-22T23:58:25.000Z
|
2021-12-31T03:01:54.000Z
|
xlmhg/mhg.py
|
flo-compbio/xlmhg
|
c29d913386443396254774b8cff5cff2b5731323
|
[
"BSD-3-Clause"
] | 6
|
2016-07-18T05:02:04.000Z
|
2021-08-04T09:53:35.000Z
|
xlmhg/mhg.py
|
flo-compbio/xlmhg
|
c29d913386443396254774b8cff5cff2b5731323
|
[
"BSD-3-Clause"
] | 1
|
2015-05-24T13:10:10.000Z
|
2015-05-24T13:10:10.000Z
|
# Copyright (c) 2016-2019 Florian Wagner
#
# This file is part of XL-mHG.
"""XL-mHG Python implementation."""
import numpy as np
DEFAULT_TOL = 1e-12
def is_equal(a, b, tol):
"""Ratio test to check if two floating point numbers are equal.
Parameters
----------
a: float
The first floating point number.
b: float
The second floating point number.
tol: float
The tolerance used.
Returns
-------
bool
Whether or not the two numbers are deemed equal.
"""
if a == b or abs(a-b) <= tol * max(abs(a), abs(b)):
return True
else:
return False
def get_hgp(p, k, N, K, n):
"""Calculate the hypergeometric p-value when p = f(k; N,K,n) is already known.
"""
pval = p
while k < min(K, n):
p *= (float((n-k)*(K-k) / float((k+1)*(N-K-n+k+1))))
pval += p
k += 1
return pval
def get_xlmhg_stat(v, X, L, tol=DEFAULT_TOL):
"""Calculate the XL-mHG test statistic using recurrence relations.
Parameters
----------
v: np.ndarray with dtype=np.uint8
The ranked list.
X: int
The XL-mHG ``X`` parameter.
L: int
The XL-mHG ``L`` parameter.
tol: float, optional
The tolerance used for comparing floats. [1e-12]
Returns
-------
s: float
The XL-mHG test statistic.
n_star: int
The (first) cutoff at which s was attained.
"""
assert isinstance(v, np.ndarray) and v.ndim == 1 and \
np.issubdtype(v.dtype, np.integer)
assert isinstance(X, int)
assert isinstance(L, int)
assert isinstance(tol, float)
N = v.size
if not N > 0:
raise ValueError('List is empty!')
if not (1 <= X <= N):
raise ValueError(
'Invalid value X=%d; should be >= 1 and <= %d.' % (X, N)
)
if not (1 <= L <= N):
raise ValueError(
'Invalid value L=%d; should be >= 1 and <= %d.' % (L, N)
)
if not (0.0 <= tol < 1.0):
raise ValueError('Invalid value tol=%.1e; should be in [0,1)' % (tol))
K = int(np.sum(v != 0))
if K == 0:
return 1.0, 0
p = 1.0
stat = 1.1
n_star = 0
k = 0
for n in range(L):
if v[n] == 0:
# calculate f(k; N,K,n+1) from f(k; N,K,n)
p *= (float((n+1)*(N-K-n+k)) / float((N-n)*(n-k+1)))
else:
# we hit a 1
# calculate f(k+1; N,K,n+1) from f(k; N,K,n)
p *= (float((n+1)*(K-k)) / float((N-n)*(k+1)))
k += 1
# calculate hypergeometric p-value only if enough elements have
# been seen
if k >= X:
hgp = get_hgp(p, k, N, K, n+1)
if hgp < stat and not is_equal(hgp, stat, tol):
stat = hgp
n_star = n+1
stat = min(stat, 1.0)
return stat, n_star
def get_xlmhg_pval1(N, K, X, L, stat, tol=DEFAULT_TOL):
"""Calculate the XL-mHG p-value using "Algorithm 1".
Parameters
----------
N: int
The length of the list.
K: int
The number of 1's in the list.
X: int
The XL-mHG ``X`` parameter.
L: int
The XL-mHG ``L`` parameter.
stat: float
The XL-mHG test statistic.
tol: float, optional
The tolerance used for comparing floats. [1e-12]
Returns
-------
float
The XL-mHG p-value. NaN if floating point precision was insufficient
for calculating the p-value.
"""
# type checking
assert isinstance(N, int)
assert isinstance(X, int)
assert isinstance(L, int)
assert isinstance(stat, float)
assert isinstance(tol, float)
# raise exceptions for invalid parameters
if not (N >= 1):
raise ValueError('Invalid value N=%d; should be >= 1.' % (N))
if not (1 <= X <= N):
raise ValueError(
'Invalid value X=%d; should be >= 1 and <= %d.' % (X, N)
)
if not (1 <= L <= N):
raise ValueError(
'Invalid value L=%d; should be >= 1 and <= %d.' % (L, N)
)
if not (0 < stat <= 1.0):
raise ValueError(
'Invalid value stat=%.1e; should be in (0;1].' % (stat,)
)
if not (0.0 <= tol < 1.0):
raise ValueError('Invalid value tol=%.1e; should be in [0,1)' % (tol))
# special case: stat = 1.0 => pval = 1.0
if stat == 1.0:
return 1.0
found_R = False
p_start = 1.0
p = None
hgp = None
# fill dynamic programming table by going over all cutoffs n
W = N-K
table = np.empty((K+1, W+1), dtype=np.float64)
table[0,0] = 1.0
for n in range(1, N+1):
if K >= n:
k = n
p_start *= (float(K-n+1) / float(N-n+1))
else:
k = K
p_start *= (float(n) / float(n-K))
if p_start == 0.0:
# not enough floating point accuracy to calculate the
# hypergeometric p-value
return float('nan')
p = p_start
hgp = p
w = n - k
# no configuration with n > L or n < X can be in R
if n <= L and n >= X:
while k >= X and w < W and \
(hgp < stat or is_equal(hgp, stat, tol)):
# we're still in R
found_R = True
table[k, w] = 0.0 # !
p *= (float(k*(N-K-n+k)) / float((n-k+1)*(K-k+1)))
hgp += p
w += 1
k -= 1
# fill in rest of the table based on entries for cutoff n-1
while k >= 0 and w <= W:
if k > 0 and w > 0:
table[k, w] = table[k, w-1] * (float(W-w+1)/float(N-n+1)) + \
table[k-1, w] * (float(K-k+1)/float(N-n+1))
elif k > 0:
table[k, w] = table[k-1, w] * (float(K-k+1)/float(N-n+1))
elif w > 0:
table[k, w] = table[k, w-1] * (float(W-w+1)/float(N-n+1))
w += 1
k -= 1
if found_R:
pval = 1.0 - table[K, W]
if pval < 0.0:
# insufficient floating point accuracy, set p-value to NaN
pval = float('nan')
else:
# we've never encountered R => set p-value to 0
pval = 0.0
return pval
def get_xlmhg_pval2(N, K, X, L, stat, tol=DEFAULT_TOL):
"""Calculate the XL-mHG p-value using "Algorithm 2".
Parameters
----------
N: int
The length of the list.
K: int
The number of 1's in the list.
X: int
The XL-mHG ``X`` parameter.
L: int
The XL-mHG ``L`` parameter.
stat: float
The XL-mHG test statistic.
tol: float, optional
The tolerance used for comparing floats. [1e-12]
Returns
-------
float
The XL-mHG p-value. NaN if floating point precision was insufficient
for calculating the p-value.
"""
# type checking
assert isinstance(N, int)
assert isinstance(X, int)
assert isinstance(L, int)
assert isinstance(stat, float)
assert isinstance(tol, float)
# raise exceptions for invalid parameters
if not (N >= 1):
raise ValueError('Invalid value N=%d; must be >= 1.' %(N))
if not (1 <= X <= N):
raise ValueError(
'Invalid value X=%d; must be >= 1 and <= %d.' %(X, N)
)
if not (1 <= L <= N):
raise ValueError(
'Invalid value L=%d; must be >= 1 and <= %d.' %(L, N)
)
if not (0 < stat <= 1.0):
raise ValueError(
'Invalid value s=%.1e; must be in (0,1].' %(stat)
)
if not (0.0 <= tol < 1.0):
raise ValueError('Invalid value tol=%.1e; must be in [0,1)' %(tol))
# special case: stat = 1.0 => pval = 1.0
if stat == 1.0:
return 1.0
W = N-K
table = np.empty((K+1, L+1), dtype=np.float64)
table[0,0] = 1.0 # initially, *all* paths have never entered R before
pval = 0.0
p_start = 1.0
p = None
hgp = None
k = None
w = None
# fill dynamic programming table and calculate XL-mHG p-value
# note: we only need to go over the first L cutoffs, since lower cutoffs
# cannot be in R (by definition)
for n in range(1, L+1):
if K >= n:
k = n
p_start *= (float(K-n+1) / float(N-n+1))
else:
k = K
p_start *= (float(n) / float(n-K))
if p_start == 0.0:
# not enough floating point precision to calculate
# the hypergeometric p-value
return float('nan')
p = p_start
hgp = p
w = n - k
if k == K and (hgp > stat and not is_equal(hgp, stat, tol)):
# We've exited R (or we were never in it).
# That means we're done here!
break
# Check if we've reached R. If so, "go down the diagonal" until we exit R.
# Three conditions:
# 1. k >= X // No configuration with k < X can be in R.
# 2. w < W // No configuration with w = W can be in R.
# 3. pval <= s // The basic criterion for being in R.
while k >= X and w < W and (hgp < stat or is_equal(hgp, stat, tol)):
# We're in R!
# Note:
# For w = W, we always have hgp = 1.0. Since stat < 1.0,
# we could just assume that w < W. But this assumption might fail
# due to limited floating point accuracy.
# First things first: set table[k, w] to 0 to indicate that this is
# R territory.
table[k, w] = 0
# check if we've "just entered" R (this is only possible "from below")
if table[k-1, w] > 0:
# calculate the fraction of "fresh" paths (paths which have never entered R before)
# that enter here, and add that number to r
pval += (table[k-1, w] * (float(K-k+1)/float(N-n+1)))
p *= (float(k*(N-K-n+k)) / float((n-k+1)*(K-k+1)))
hgp += p
w += 1
k -= 1
# now we're no longer in R
while k >= 0 and w <= W:
if k == 0:
# paths only come in "from the left"
table[k, w] = table[k, w-1] * (float(W-w+1)/float(N-n+1))
elif w == 0:
# paths only come in "from below"
table[k, w] = table[k-1, w] * (float(K-k+1)/float(N-n+1))
else:
# paths come in "from the left" and "from below"
table[k, w] = table[k, w-1] * (float(W-w+1)/float(N-n+1)) + \
table[k-1, w] * (float(K-k+1)/float(N-n+1))
w += 1
k -= 1
return pval
| 30.066116
| 99
| 0.48195
|
b7f4039a92939c501636083557c32e37d82c9539
| 1,349
|
py
|
Python
|
helpers/volatilityCube.py
|
neoyung/IrLib
|
942793c49a477c9f5747410be74daf868391f289
|
[
"MIT"
] | 1
|
2021-10-04T03:15:50.000Z
|
2021-10-04T03:15:50.000Z
|
helpers/volatilityCube.py
|
neoyung/IrLib
|
942793c49a477c9f5747410be74daf868391f289
|
[
"MIT"
] | null | null | null |
helpers/volatilityCube.py
|
neoyung/IrLib
|
942793c49a477c9f5747410be74daf868391f289
|
[
"MIT"
] | null | null | null |
from irLib.mathTools.estimationEngine import linearInterpolatorND
import numpy as np
class volatilityCube:
def __init__(self, tenorIndex, strikeIndex, time2MaturityIndex, volatilityValues):
self.tenorIndex = tenorIndex
self.strikeIndex = strikeIndex
self.time2MaturityIndex = time2MaturityIndex
assert len(volatilityValues) == len(tenorIndex) * len(strikeIndex) * \
len(time2MaturityIndex), 'no of vol. values doesnt match dimensions'
self.values = volatilityValues
self.linearInterpolator = None
def getVol(self, tenor, strike, time2Maturity):
assert tenor >= min(self.tenorIndex) and tenor <= max(self.tenorIndex)\
and strike >= min(self.strikeIndex) and strike <= max(self.strikeIndex)\
and time2Maturity >= min(self.time2MaturityIndex) and time2Maturity <= max(self.time2MaturityIndex), 'no extrapolation allowed'
if self.linearInterpolator is None:
xv, yv, zv = np.meshgrid(
self.tenorIndex, self.strikeIndex, self.time2MaturityIndex)
self.points = np.vstack([xv.ravel(), yv.ravel(), zv.ravel()]).T
self.linearInterpolator = linearInterpolatorND(
self.points, self.values)
return self.linearInterpolator.getValues([tenor, strike, time2Maturity])
| 46.517241
| 139
| 0.682728
|
3f59e0d26f10064566eda8e33975ec03537292b7
| 4,942
|
py
|
Python
|
tools/cygprofile/patch_orderfile_unittest.py
|
zipated/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
tools/cygprofile/patch_orderfile_unittest.py
|
cangulcan/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
tools/cygprofile/patch_orderfile_unittest.py
|
cangulcan/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
#!/usr/bin/env vpython
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import patch_orderfile
import symbol_extractor
class TestPatchOrderFile(unittest.TestCase):
def testRemoveSuffixes(self):
no_clone = 'this.does.not.contain.clone'
self.assertEquals(no_clone, patch_orderfile.RemoveSuffixes(no_clone))
with_clone = 'this.does.contain.clone.'
self.assertEquals(
'this.does.contain', patch_orderfile.RemoveSuffixes(with_clone))
with_part = 'this.is.a.part.42'
self.assertEquals(
'this.is.a', patch_orderfile.RemoveSuffixes(with_part))
def testSymbolsWithSameOffset(self):
symbol_name = "dummySymbol"
symbol_name2 = "other"
name_to_symbol_infos = {symbol_name: [
symbol_extractor.SymbolInfo(symbol_name, 0x42, 0x12,
section='.text')]}
offset_to_symbol_infos = {
0x42: [symbol_extractor.SymbolInfo(symbol_name, 0x42, 0x12,
section='.text'),
symbol_extractor.SymbolInfo(symbol_name2, 0x42, 0x12,
section='.text')]}
symbol_names = patch_orderfile._SymbolsWithSameOffset(
symbol_name, name_to_symbol_infos, offset_to_symbol_infos)
self.assertEquals(len(symbol_names), 2)
self.assertEquals(symbol_names[0], symbol_name)
self.assertEquals(symbol_names[1], symbol_name2)
self.assertEquals([], patch_orderfile._SymbolsWithSameOffset(
"symbolThatShouldntMatch",
name_to_symbol_infos, offset_to_symbol_infos))
def testSectionNameToSymbols(self):
mapping = {'.text.foo': ['foo'],
'.text.hot.bar': ['bar', 'bar1']}
self.assertEquals(list(patch_orderfile._SectionNameToSymbols(
'.text.foo', mapping)),
['foo'])
self.assertEquals(list(patch_orderfile._SectionNameToSymbols(
'.text.hot.bar', mapping)),
['bar', 'bar1'])
self.assertEquals(list(patch_orderfile._SectionNameToSymbols(
'.text.hot.bar', mapping)),
['bar', 'bar1'])
self.assertEquals(list(patch_orderfile._SectionNameToSymbols(
'.text.hot.foobar', mapping)),
['foobar'])
self.assertEquals(list(patch_orderfile._SectionNameToSymbols(
'.text.unlikely.*', mapping)),
[])
def testSectionMatchingRules(self):
symbol_name1 = 'symbol1'
symbol_name2 = 'symbol2'
symbol_name3 = 'symbol3'
section_name1 = '.text.' + symbol_name1
section_name3 = '.text.foo'
suffixed = set([section_name3])
name_to_symbol_infos = {symbol_name1: [
symbol_extractor.SymbolInfo(symbol_name1, 0x42, 0x12,
section='.text')]}
offset_to_symbol_infos = {
0x42: [symbol_extractor.SymbolInfo(symbol_name1, 0x42, 0x12,
section='.text'),
symbol_extractor.SymbolInfo(symbol_name2, 0x42, 0x12,
section='.text')]}
section_to_symbols_map = {section_name1: [symbol_name1],
section_name3: [symbol_name1, symbol_name3]}
symbol_to_sections_map = {symbol_name1:
[section_name1, section_name3],
symbol_name3: [section_name3]}
expected = [
section_name1,
section_name3,
section_name3 + '.*',
'.text.hot.' + symbol_name1,
'.text.unlikely.' + symbol_name1,
symbol_name1,
'.text.hot.symbol2',
'.text.unlikely.symbol2',
'.text.symbol2',
'symbol2']
self.assertEqual(expected, list(patch_orderfile._SectionMatchingRules(
section_name1, name_to_symbol_infos, offset_to_symbol_infos,
section_to_symbols_map, symbol_to_sections_map, suffixed)))
def testUniqueGenerator(self):
@patch_orderfile._UniqueGenerator
def TestIterator():
yield 1
yield 2
yield 1
yield 3
self.assertEqual(list(TestIterator()), [1,2,3])
def testCombineSectionListsByPrimaryName(self):
self.assertEqual(patch_orderfile._CombineSectionListsByPrimaryName(
{'foo': ['.text.foo', '.text.bar.constprop.1'],
'foo.part.1': ['.text.baz'],
'foobar': ['.text.foobar']}),
{'foo': ['.text.foo', '.text.bar', '.text.baz'],
'foobar': ['.text.foobar']})
def testSectionsWithSuffixes(self):
self.assertEqual(patch_orderfile._SectionsWithSuffixes(
{'foo': ['.text.foo', '.text.bar.constprop.1'],
'foo.part.1': ['.text.baz'],
'foobar': ['.text.foobar']}),
set(['.text.bar']))
if __name__ == "__main__":
unittest.main()
| 39.536
| 74
| 0.610684
|
d25a0d6b387da86a9c67ea6317bbb7f33a9cffe2
| 632
|
py
|
Python
|
anyfunction.py
|
PRASAD-DANGARE/PYTHON
|
36214f7dc3762d327e5a29e40752edeb098249c8
|
[
"MIT"
] | 1
|
2021-06-07T07:55:28.000Z
|
2021-06-07T07:55:28.000Z
|
anyfunction.py
|
PRASAD-DANGARE/PYTHON
|
36214f7dc3762d327e5a29e40752edeb098249c8
|
[
"MIT"
] | null | null | null |
anyfunction.py
|
PRASAD-DANGARE/PYTHON
|
36214f7dc3762d327e5a29e40752edeb098249c8
|
[
"MIT"
] | null | null | null |
# python program to know the effect of any() and all() function
'''
Function Name : Use Of Any And All Functions.
Function Date : 28 Aug 2020
Function Author : Prasad Dangare
Input : String
Output : String
'''
from numpy import*
a = array([1, 2, 3, 0])
b = array([0, 2, 3, 1])
c = a > b
print('\n Result Of a > b : ', c)
print("\n")
print('Check If Any One Elements Is True : ', any(c))
print("\n")
print('Check If All Elements Are True : ', all(c))
print("\n")
if(any(a > b)):
print('a Contains Atleast One Elements Greater Than Those Of b')
print("\n")
| 21.793103
| 69
| 0.56962
|
e5e86327b95e2e7ea18208fcc0d7e9173f249363
| 32,126
|
py
|
Python
|
test/functional/fundrawtransaction.py
|
andyfreer/dash
|
351fbf65efc9459cb69a3c843cc205a8b94c95b3
|
[
"MIT"
] | 20
|
2019-07-24T07:07:49.000Z
|
2021-09-06T15:25:55.000Z
|
test/functional/fundrawtransaction.py
|
andyfreer/dash
|
351fbf65efc9459cb69a3c843cc205a8b94c95b3
|
[
"MIT"
] | 13
|
2019-10-15T13:32:33.000Z
|
2021-12-03T14:48:49.000Z
|
test/functional/fundrawtransaction.py
|
andyfreer/dash
|
351fbf65efc9459cb69a3c843cc205a8b94c95b3
|
[
"MIT"
] | 35
|
2019-07-17T22:36:45.000Z
|
2021-06-02T15:36:46.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
self.extra_args = [['-usehd=0']] * self.num_nodes
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():500})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(2000)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 22 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26, self.nodes[1].getnewaddress() : 25 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(50) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(40) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(40) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid dash address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(40) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 10)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 10)
utx2 = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 10)
utx2 = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60, self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 10}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():11,self.nodes[1].getnewaddress():12,self.nodes[1].getnewaddress():1,self.nodes[1].getnewaddress():13,self.nodes[1].getnewaddress():2,self.nodes[1].getnewaddress():3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 12 DASH to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 12)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('11.0000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.stop_node(0)
self.nodes[1].node_encrypt_wallet("test")
self.stop_node(2)
self.stop_node(3)
self.start_nodes()
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 12)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():11}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('511.0000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('500.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Add changePosition=4 to circumvent BIP69 input/output sorting
result = [self.nodes[3].fundrawtransaction(rawtx, {"changePosition": 4}),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3], "changePosition": 4})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
| 44.619444
| 214
| 0.570286
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.