hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
018cf7e099d6e514931e82d159dcf5b78e6d9a05
| 9,387
|
py
|
Python
|
qlib/utils/resam.py
|
SunsetWolf/qlib
|
89972f6c6f9fa629b4f74093d4ba1e93c9f7a5e5
|
[
"MIT"
] | 1
|
2021-12-14T13:48:38.000Z
|
2021-12-14T13:48:38.000Z
|
qlib/utils/resam.py
|
SunsetWolf/qlib
|
89972f6c6f9fa629b4f74093d4ba1e93c9f7a5e5
|
[
"MIT"
] | null | null | null |
qlib/utils/resam.py
|
SunsetWolf/qlib
|
89972f6c6f9fa629b4f74093d4ba1e93c9f7a5e5
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from functools import partial
from typing import Union, Callable
from . import lazy_sort_index
from .time import Freq, cal_sam_minute
from ..config import C
def resam_calendar(
calendar_raw: np.ndarray, freq_raw: Union[str, Freq], freq_sam: Union[str, Freq], region: str = None
) -> np.ndarray:
"""
Resample the calendar with frequency freq_raw into the calendar with frequency freq_sam
Assumption:
- Fix length (240) of the calendar in each day.
Parameters
----------
calendar_raw : np.ndarray
The calendar with frequency freq_raw
freq_raw : str
Frequency of the raw calendar
freq_sam : str
Sample frequency
region: str
Region, for example, "cn", "us"
Returns
-------
np.ndarray
The calendar with frequency freq_sam
"""
if region is None:
region = C["region"]
freq_raw = Freq(freq_raw)
freq_sam = Freq(freq_sam)
if not len(calendar_raw):
return calendar_raw
# if freq_sam is xminute, divide each trading day into several bars evenly
if freq_sam.base == Freq.NORM_FREQ_MINUTE:
if freq_raw.base != Freq.NORM_FREQ_MINUTE:
raise ValueError("when sampling minute calendar, freq of raw calendar must be minute or min")
else:
if freq_raw.count > freq_sam.count:
raise ValueError("raw freq must be higher than sampling freq")
_calendar_minute = np.unique(list(map(lambda x: cal_sam_minute(x, freq_sam.count, region), calendar_raw)))
return _calendar_minute
# else, convert the raw calendar into day calendar, and divide the whole calendar into several bars evenly
else:
_calendar_day = np.unique(list(map(lambda x: pd.Timestamp(x.year, x.month, x.day, 0, 0, 0), calendar_raw)))
if freq_sam.base == Freq.NORM_FREQ_DAY:
return _calendar_day[:: freq_sam.count]
elif freq_sam.base == Freq.NORM_FREQ_WEEK:
_day_in_week = np.array(list(map(lambda x: x.dayofweek, _calendar_day)))
_calendar_week = _calendar_day[np.ediff1d(_day_in_week, to_begin=-1) < 0]
return _calendar_week[:: freq_sam.count]
elif freq_sam.base == Freq.NORM_FREQ_MONTH:
_day_in_month = np.array(list(map(lambda x: x.day, _calendar_day)))
_calendar_month = _calendar_day[np.ediff1d(_day_in_month, to_begin=-1) < 0]
return _calendar_month[:: freq_sam.count]
else:
raise ValueError("sampling freq must be xmin, xd, xw, xm")
def get_higher_eq_freq_feature(instruments, fields, start_time=None, end_time=None, freq="day", disk_cache=1):
"""get the feature with higher or equal frequency than `freq`.
Returns
-------
pd.DataFrame
the feature with higher or equal frequency
"""
from ..data.data import D # pylint: disable=C0415
try:
_result = D.features(instruments, fields, start_time, end_time, freq=freq, disk_cache=disk_cache)
_freq = freq
except (ValueError, KeyError) as value_key_e:
_, norm_freq = Freq.parse(freq)
if norm_freq in [Freq.NORM_FREQ_MONTH, Freq.NORM_FREQ_WEEK, Freq.NORM_FREQ_DAY]:
try:
_result = D.features(instruments, fields, start_time, end_time, freq="day", disk_cache=disk_cache)
_freq = "day"
except (ValueError, KeyError):
_result = D.features(instruments, fields, start_time, end_time, freq="1min", disk_cache=disk_cache)
_freq = "1min"
elif norm_freq == Freq.NORM_FREQ_MINUTE:
_result = D.features(instruments, fields, start_time, end_time, freq="1min", disk_cache=disk_cache)
_freq = "1min"
else:
raise ValueError(f"freq {freq} is not supported") from value_key_e
return _result, _freq
def resam_ts_data(
ts_feature: Union[pd.DataFrame, pd.Series],
start_time: Union[str, pd.Timestamp] = None,
end_time: Union[str, pd.Timestamp] = None,
method: Union[str, Callable] = "last",
method_kwargs: dict = {},
):
"""
Resample value from time-series data
- If `feature` has MultiIndex[instrument, datetime], apply the `method` to each instruemnt data with datetime in [start_time, end_time]
Example:
.. code-block::
print(feature)
$close $volume
instrument datetime
SH600000 2010-01-04 86.778313 16162960.0
2010-01-05 87.433578 28117442.0
2010-01-06 85.713585 23632884.0
2010-01-07 83.788803 20813402.0
2010-01-08 84.730675 16044853.0
SH600655 2010-01-04 2699.567383 158193.328125
2010-01-08 2612.359619 77501.406250
2010-01-11 2712.982422 160852.390625
2010-01-12 2788.688232 164587.937500
2010-01-13 2790.604004 145460.453125
print(resam_ts_data(feature, start_time="2010-01-04", end_time="2010-01-05", fields=["$close", "$volume"], method="last"))
$close $volume
instrument
SH600000 87.433578 28117442.0
SH600655 2699.567383 158193.328125
- Else, the `feature` should have Index[datetime], just apply the `method` to `feature` directly
Example:
.. code-block::
print(feature)
$close $volume
datetime
2010-01-04 86.778313 16162960.0
2010-01-05 87.433578 28117442.0
2010-01-06 85.713585 23632884.0
2010-01-07 83.788803 20813402.0
2010-01-08 84.730675 16044853.0
print(resam_ts_data(feature, start_time="2010-01-04", end_time="2010-01-05", method="last"))
$close 87.433578
$volume 28117442.0
print(resam_ts_data(feature['$close'], start_time="2010-01-04", end_time="2010-01-05", method="last"))
87.433578
Parameters
----------
ts_feature : Union[pd.DataFrame, pd.Series]
Raw time-series feature to be resampled
start_time : Union[str, pd.Timestamp], optional
start sampling time, by default None
end_time : Union[str, pd.Timestamp], optional
end sampling time, by default None
method : Union[str, Callable], optional
sample method, apply method function to each stock series data, by default "last"
- If type(method) is str or callable function, it should be an attribute of SeriesGroupBy or DataFrameGroupby, and applies groupy.method for the sliced time-series data
- If method is None, do nothing for the sliced time-series data.
method_kwargs : dict, optional
arguments of method, by default {}
Returns
-------
The resampled DataFrame/Series/value, return None when the resampled data is empty.
"""
selector_datetime = slice(start_time, end_time)
from ..data.dataset.utils import get_level_index # pylint: disable=C0415
feature = lazy_sort_index(ts_feature)
datetime_level = get_level_index(feature, level="datetime") == 0
if datetime_level:
feature = feature.loc[selector_datetime]
else:
feature = feature.loc(axis=0)[(slice(None), selector_datetime)]
if feature.empty:
return None
if isinstance(feature.index, pd.MultiIndex):
if callable(method):
method_func = method
return feature.groupby(level="instrument").apply(method_func, **method_kwargs)
elif isinstance(method, str):
return getattr(feature.groupby(level="instrument"), method)(**method_kwargs)
else:
if callable(method):
method_func = method
return method_func(feature, **method_kwargs)
elif isinstance(method, str):
return getattr(feature, method)(**method_kwargs)
return feature
def get_valid_value(series, last=True):
"""get the first/last not nan value of pd.Series with single level index
Parameters
----------
series : pd.Series
series should not be empty
last : bool, optional
whether to get the last valid value, by default True
- if last is True, get the last valid value
- else, get the first valid value
Returns
-------
Nan | float
the first/last valid value
"""
return series.fillna(method="ffill").iloc[-1] if last else series.fillna(method="bfill").iloc[0]
def _ts_data_valid(ts_feature, last=False):
"""get the first/last not nan value of pd.Series|DataFrame with single level index"""
if isinstance(ts_feature, pd.DataFrame):
return ts_feature.apply(lambda column: get_valid_value(column, last=last))
elif isinstance(ts_feature, pd.Series):
return get_valid_value(ts_feature, last=last)
else:
raise TypeError(f"ts_feature should be pd.DataFrame/Series, not {type(ts_feature)}")
ts_data_last = partial(_ts_data_valid, last=True)
ts_data_first = partial(_ts_data_valid, last=False)
| 39.1125
| 176
| 0.623415
|
8b910030e3fe69e09948f83e72934cb7c7e36c3d
| 11,261
|
py
|
Python
|
workspaceclient/tests/osc/v1/test_policy.py
|
huaweicloudsdk/osc_workspace
|
ced0c58f724aa04137132da0116e866f320978ec
|
[
"Apache-2.0"
] | 4
|
2017-02-28T08:06:01.000Z
|
2021-01-30T14:00:32.000Z
|
workspaceclient/tests/osc/v1/test_policy.py
|
huaweicloudsdk/osc_workspace
|
ced0c58f724aa04137132da0116e866f320978ec
|
[
"Apache-2.0"
] | 13
|
2017-02-17T06:31:56.000Z
|
2017-04-19T01:57:53.000Z
|
workspaceclient/tests/osc/v1/test_policy.py
|
Huawei/OpenStackClient_Workspace
|
ced0c58f724aa04137132da0116e866f320978ec
|
[
"Apache-2.0"
] | 2
|
2017-07-07T02:24:36.000Z
|
2017-09-18T22:04:23.000Z
|
# Copyright 2016 Huawei, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from workspaceclient.common import resource as base_resource
from workspaceclient.osc.v1 import policy
from workspaceclient.tests import base
from workspaceclient.v1 import policy_mgr
from workspaceclient.v1 import resource
class TestPolicy(base.WorkspaceV1BaseTestCase):
instance = {
"usb_port_redirection": {
"enable": True,
"options": {
"usb_image_enable": False,
"usb_video_enable": True,
"usb_printer_enable": False,
"usb_storage_enable": True,
"usb_smart_card_enable": False
}
},
"printer_redirection": {
"enable": True,
"options": {
"sync_client_default_printer_enable": False,
"universal_printer_driver": "Universal Printing PCL 6"
}
},
"file_redirection": {
"redirection_mode": "READ_AND_WRITE",
"options": {
"fixed_drive_enable": True,
"removable_drive_enable": False,
"cd_rom_drive_enable": True,
"network_drive_enable": True
}
},
"clipboard_redirection": "TWO_WAY_ENABLED",
"hdp_plus": {
"hdp_plus_enable": False,
"display_level": "QUALITY_FIRST",
"options": {
"bandwidth": 24315,
"frame_rate": 18,
"video_frame_rate": 20,
"smoothing_factor": 58,
"lossy_compression_quality": 88
}
}
}
def __init__(self, *args, **kwargs):
super(TestPolicy, self).__init__(*args, **kwargs)
self._policy = None
def setUp(self):
super(TestPolicy, self).setUp()
self._policy = resource.Policy(None, self.instance, attached=True)
class TestPolicyShow(TestPolicy):
def setUp(self):
super(TestPolicyShow, self).setUp()
self.cmd = policy.ShowPolicy(self.app, None)
@mock.patch.object(policy_mgr.PolicyManager, "_get")
def test_desktop_show_with_computer_name(self, mocked_get):
self.check_parser(self.cmd, [], ())
mocked_get.return_value = self._policy
columns, data = self.cmd.take_action(None)
expect_data = (
'Enabled', 'Disabled', 'Enabled', 'Disabled', 'Enabled',
'Disabled', 'Enabled', 'Disabled', 'Universal Printing PCL 6',
'READ_AND_WRITE', 'Enabled', 'Disabled', 'Enabled', 'Enabled',
'TWO_WAY_ENABLED', 'Disabled', 'QUALITY_FIRST', 24315, 18, 20,
58, 88)
self.assertEqual(expect_data, data)
class TestPolicyEdit(TestPolicy):
def setUp(self):
super(TestPolicyEdit, self).setUp()
self.cmd = policy.EditPolicy(self.app, None)
@mock.patch.object(policy_mgr.PolicyManager, "_update_all")
def test_enable_redirection_options(self, mocked_put):
args = [
"--enable-usb-port-redirection",
"--enable-usb-image",
"--enable-usb-video",
"--enable-usb-printer",
"--enable-usb-storage",
"--enable-usb-smart-card",
"--enable-printer-redirection",
"--enable-sync-client-default-printer",
"--universal-printer-driver", "Universal Printing PCL 6",
"--file-redirection-mode", "READ_AND_WRITE",
"--enable-fixed-drive",
"--enable-removable-drive",
"--enable-cd-rom-drive",
"--enable-network-drive",
"--enable-network-drive",
"--clipboard-redirection", "DISABLED",
"--enable-hdp-plus",
"--display-level", "QUALITY_FIRST",
"--bandwidth", "24315",
"--frame-rate", "18",
"--video-frame-rate", "20",
"--smoothing-factor", "58",
"--lossy-compression-quality", "88"
]
verify_args = [
("enable_usb_port_redirection", True),
("enable_usb_image", True),
("enable_usb_video", True),
("enable_usb_printer", True),
("enable_usb_storage", True),
("enable_usb_smart_card", True),
("enable_printer_redirection", True),
("enable_sync_client_default_printer", True),
("universal_printer_driver", "Universal Printing PCL 6"),
("file_redirection_mode", "READ_AND_WRITE"),
("enable_fixed_drive", True),
("enable_removable_drive", True),
("enable_cd_rom_drive", True),
("enable_network_drive", True),
("clipboard_redirection", "DISABLED"),
("enable_hdp_plus", True),
("display_level", "QUALITY_FIRST"),
("bandwidth", 24315),
("frame_rate", 18),
("video_frame_rate", 20),
("smoothing_factor", 58),
("lossy_compression_quality", 88),
]
parsed_args = self.check_parser(self.cmd, args, verify_args)
mocked_put.return_value = base_resource.StrWithMeta("", "Request-Id")
result = self.cmd.take_action(parsed_args)
json = {
"policies": {
"usb_port_redirection": {
"enable": True,
"options": {
"usb_image_enable": True,
"usb_video_enable": True,
"usb_printer_enable": True,
"usb_storage_enable": True,
"usb_smart_card_enable": True,
}
},
"printer_redirection": {
"enable": True,
"options": {
"sync_client_default_printer_enable": True,
"universal_printer_driver": "Universal Printing PCL 6"
}
},
"file_redirection": {
"redirection_mode": "READ_AND_WRITE",
"options": {
"fixed_drive_enable": True,
"removable_drive_enable": True,
"cd_rom_drive_enable": True,
"network_drive_enable": True
}
},
"clipboard_redirection": "DISABLED",
"hdp_plus": {
"hdp_plus_enable": True,
"display_level": "QUALITY_FIRST",
"options": {
"lossy_compression_quality": 88
}
}
}
}
mocked_put.assert_called_once_with(
"/policies", json=json
)
self.assertEqual('done', result)
@mock.patch.object(policy_mgr.PolicyManager, "_update_all")
def test_disable_redirection_options(self, mocked_put):
args = [
"--disable-usb-port-redirection",
"--enable-usb-image",
"--enable-usb-video",
"--enable-usb-printer",
"--enable-usb-storage",
"--enable-usb-smart-card",
"--disable-printer-redirection",
"--enable-sync-client-default-printer",
"--universal-printer-driver", "Universal Printing PCL 6",
"--file-redirection-mode", "DISABLED",
"--enable-fixed-drive",
"--enable-removable-drive",
"--enable-cd-rom-drive",
"--enable-network-drive",
"--enable-network-drive",
"--clipboard-redirection", "DISABLED",
]
verify_args = [
("enable_usb_port_redirection", False),
("enable_usb_image", True),
("enable_usb_video", True),
("enable_usb_printer", True),
("enable_usb_storage", True),
("enable_usb_smart_card", True),
("enable_printer_redirection", False),
("enable_sync_client_default_printer", True),
("universal_printer_driver", "Universal Printing PCL 6"),
("file_redirection_mode", "DISABLED"),
("enable_fixed_drive", True),
("enable_removable_drive", True),
("enable_cd_rom_drive", True),
("enable_network_drive", True),
("clipboard_redirection", "DISABLED"),
]
parsed_args = self.check_parser(self.cmd, args, verify_args)
mocked_put.return_value = base_resource.StrWithMeta("", "Request-Id")
result = self.cmd.take_action(parsed_args)
json = {
"policies": {
"usb_port_redirection": {
"enable": False,
},
"printer_redirection": {
"enable": False,
},
"file_redirection": {
"redirection_mode": "DISABLED",
},
"clipboard_redirection": "DISABLED",
}
}
mocked_put.assert_called_once_with(
"/policies", json=json
)
self.assertEqual('done', result)
@mock.patch.object(policy_mgr.PolicyManager, "_update_all")
def test_hdp_plus_disable(self, mocked_put):
args = [
"--disable-hdp-plus",
"--display-level", "QUALITY_FIRST",
"--bandwidth", "24315",
"--frame-rate", "18",
"--video-frame-rate", "20",
"--smoothing-factor", "58",
"--lossy-compression-quality", "88"
]
verify_args = [
("enable_hdp_plus", False),
("display_level", "QUALITY_FIRST"),
("bandwidth", 24315),
("frame_rate", 18),
("video_frame_rate", 20),
("smoothing_factor", 58),
("lossy_compression_quality", 88),
]
parsed_args = self.check_parser(self.cmd, args, verify_args)
mocked_put.return_value = base_resource.StrWithMeta("", "Request-Id")
result = self.cmd.take_action(parsed_args)
json = {
"policies": {
"hdp_plus": {
"hdp_plus_enable": False,
"display_level": "QUALITY_FIRST",
"options": {
"bandwidth": 24315,
"frame_rate": 18,
"video_frame_rate": 20,
"smoothing_factor": 58,
}
}
}
}
mocked_put.assert_called_once_with(
"/policies", json=json
)
self.assertEqual('done', result)
| 35.523659
| 78
| 0.523311
|
3dc899db97800483246d8c934eacf9ae7a922518
| 2,721
|
py
|
Python
|
scripts/calculate_coverages.py
|
ccivit/DeepMoji
|
98f498956951bf2da8eeb098834a71b6729535cc
|
[
"MIT"
] | 1,401
|
2017-08-03T07:03:32.000Z
|
2022-03-28T15:26:16.000Z
|
scripts/calculate_coverages.py
|
ccivit/DeepMoji
|
98f498956951bf2da8eeb098834a71b6729535cc
|
[
"MIT"
] | 62
|
2017-08-08T11:15:30.000Z
|
2022-03-15T17:26:57.000Z
|
scripts/calculate_coverages.py
|
ccivit/DeepMoji
|
98f498956951bf2da8eeb098834a71b6729535cc
|
[
"MIT"
] | 343
|
2017-08-05T17:39:40.000Z
|
2022-03-26T17:38:01.000Z
|
from __future__ import print_function
import pickle
import json
import csv
import sys
# Allow us to import the deepmoji directory
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from deepmoji.sentence_tokenizer import SentenceTokenizer, coverage
OUTPUT_PATH = 'coverage.csv'
DATASET_PATHS = [
'../data/Olympic/raw.pickle',
'../data/PsychExp/raw.pickle',
'../data/SCv1/raw.pickle',
'../data/SCv2-GEN/raw.pickle',
'../data/SE0714/raw.pickle',
# '../data/SE1604/raw.pickle', # Excluded due to Twitter's ToS
'../data/SS-Twitter/raw.pickle',
'../data/SS-Youtube/raw.pickle',
]
with open('../model/vocabulary.json', 'r') as f:
vocab = json.load(f)
results = []
for p in DATASET_PATHS:
coverage_result = [p]
print('Calculating coverage for {}'.format(p))
with open(p) as f:
s = pickle.load(f)
# Decode data
try:
s['texts'] = [unicode(x) for x in s['texts']]
except UnicodeDecodeError:
s['texts'] = [x.decode('utf-8') for x in s['texts']]
# Own
st = SentenceTokenizer({}, 30)
tests, dicts, _ = st.split_train_val_test(s['texts'], s['info'],
[s['train_ind'],
s['val_ind'],
s['test_ind']],
extend_with=10000)
coverage_result.append(coverage(tests[2]))
# Last
st = SentenceTokenizer(vocab, 30)
tests, dicts, _ = st.split_train_val_test(s['texts'], s['info'],
[s['train_ind'],
s['val_ind'],
s['test_ind']],
extend_with=0)
coverage_result.append(coverage(tests[2]))
# Full
st = SentenceTokenizer(vocab, 30)
tests, dicts, _ = st.split_train_val_test(s['texts'], s['info'],
[s['train_ind'],
s['val_ind'],
s['test_ind']],
extend_with=10000)
coverage_result.append(coverage(tests[2]))
results.append(coverage_result)
with open(OUTPUT_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter='\t', lineterminator='\n')
writer.writerow(['Dataset', 'Own', 'Last', 'Full'])
for i, row in enumerate(results):
try:
writer.writerow(row)
except Exception:
print("Exception at row {}!".format(i))
print('Saved to {}'.format(OUTPUT_PATH))
| 34.0125
| 69
| 0.513414
|
15c0cf6925665089f631e41cb23e77786cdc52eb
| 7,209
|
py
|
Python
|
tools/make_cmakelists.py
|
chwarr/upb
|
de800540183195751c80f551c665f6ed228bbd97
|
[
"BSD-3-Clause"
] | null | null | null |
tools/make_cmakelists.py
|
chwarr/upb
|
de800540183195751c80f551c665f6ed228bbd97
|
[
"BSD-3-Clause"
] | null | null | null |
tools/make_cmakelists.py
|
chwarr/upb
|
de800540183195751c80f551c665f6ed228bbd97
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""TODO(haberman): DO NOT SUBMIT without one-line documentation for make_cmakelists.
TODO(haberman): DO NOT SUBMIT without a detailed description of make_cmakelists.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import textwrap
import os
def StripColons(deps):
return map(lambda x: x[1:], deps)
def IsSourceFile(name):
return name.endswith(".c") or name.endswith(".cc")
class BuildFileFunctions(object):
def __init__(self, converter):
self.converter = converter
def _add_deps(self, kwargs, keyword=""):
if "deps" not in kwargs:
return
self.converter.toplevel += "target_link_libraries(%s%s\n %s)\n" % (
kwargs["name"],
keyword,
"\n ".join(StripColons(kwargs["deps"]))
)
def load(self, *args):
pass
def cc_library(self, **kwargs):
if kwargs["name"].endswith("amalgamation"):
return
if kwargs["name"] == "upbc_generator":
return
if kwargs["name"] == "lupb":
return
files = kwargs.get("srcs", []) + kwargs.get("hdrs", [])
found_files = []
for file in files:
if os.path.isfile(file):
found_files.append(file)
elif os.path.isfile("generated_for_cmake/" + file):
found_files.append("generated_for_cmake/" + file)
else:
print("Warning: no such file: " + file)
if list(filter(IsSourceFile, files)):
# Has sources, make this a normal library.
self.converter.toplevel += "add_library(%s\n %s)\n" % (
kwargs["name"],
"\n ".join(found_files)
)
self._add_deps(kwargs)
else:
# Header-only library, have to do a couple things differently.
# For some info, see:
# http://mariobadr.com/creating-a-header-only-library-with-cmake.html
self.converter.toplevel += "add_library(%s INTERFACE)\n" % (
kwargs["name"]
)
self._add_deps(kwargs, " INTERFACE")
def cc_binary(self, **kwargs):
pass
def cc_test(self, **kwargs):
# Disable this until we properly support upb_proto_library().
# self.converter.toplevel += "add_executable(%s\n %s)\n" % (
# kwargs["name"],
# "\n ".join(kwargs["srcs"])
# )
# self.converter.toplevel += "add_test(NAME %s COMMAND %s)\n" % (
# kwargs["name"],
# kwargs["name"],
# )
# if "data" in kwargs:
# for data_dep in kwargs["data"]:
# self.converter.toplevel += textwrap.dedent("""\
# add_custom_command(
# TARGET %s POST_BUILD
# COMMAND ${CMAKE_COMMAND} -E copy
# ${CMAKE_SOURCE_DIR}/%s
# ${CMAKE_CURRENT_BINARY_DIR}/%s)\n""" % (
# kwargs["name"], data_dep, data_dep
# ))
# self._add_deps(kwargs)
pass
def py_library(self, **kwargs):
pass
def py_binary(self, **kwargs):
pass
def lua_proto_library(self, **kwargs):
pass
def sh_test(self, **kwargs):
pass
def make_shell_script(self, **kwargs):
pass
def exports_files(self, files, **kwargs):
pass
def proto_library(self, **kwargs):
pass
def cc_proto_library(self, **kwargs):
pass
def generated_file_staleness_test(self, **kwargs):
pass
def upb_amalgamation(self, **kwargs):
pass
def upb_proto_library(self, **kwargs):
pass
def upb_proto_reflection_library(self, **kwargs):
pass
def upb_proto_srcs(self, **kwargs):
pass
def genrule(self, **kwargs):
pass
def config_setting(self, **kwargs):
pass
def select(self, arg_dict):
return []
def glob(self, *args):
return []
def licenses(self, *args):
pass
def filegroup(self, **kwargs):
pass
def map_dep(self, arg):
return arg
class WorkspaceFileFunctions(object):
def __init__(self, converter):
self.converter = converter
def load(self, *args):
pass
def workspace(self, **kwargs):
self.converter.prelude += "project(%s)\n" % (kwargs["name"])
def http_archive(self, **kwargs):
pass
def git_repository(self, **kwargs):
pass
def bazel_version_repository(self, **kwargs):
pass
def upb_deps(self):
pass
class Converter(object):
def __init__(self):
self.prelude = ""
self.toplevel = ""
self.if_lua = ""
def convert(self):
return self.template % {
"prelude": converter.prelude,
"toplevel": converter.toplevel,
}
template = textwrap.dedent("""\
# This file was generated from BUILD using tools/make_cmakelists.py.
cmake_minimum_required(VERSION 3.1)
if(${CMAKE_VERSION} VERSION_LESS 3.12)
cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION})
else()
cmake_policy(VERSION 3.12)
endif()
cmake_minimum_required (VERSION 3.0)
cmake_policy(SET CMP0048 NEW)
%(prelude)s
# Prevent CMake from setting -rdynamic on Linux (!!).
SET(CMAKE_SHARED_LIBRARY_LINK_C_FLAGS "")
SET(CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "")
# Set default build type.
if(NOT CMAKE_BUILD_TYPE)
message(STATUS "Setting build type to 'RelWithDebInfo' as none was specified.")
set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING
"Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel."
FORCE)
endif()
# When using Ninja, compiler output won't be colorized without this.
include(CheckCXXCompilerFlag)
CHECK_CXX_COMPILER_FLAG(-fdiagnostics-color=always SUPPORTS_COLOR_ALWAYS)
if(SUPPORTS_COLOR_ALWAYS)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-color=always")
endif()
# Implement ASAN/UBSAN options
if(UPB_ENABLE_ASAN)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fsanitize=address")
endif()
if(UPB_ENABLE_UBSAN)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fsanitize=address")
endif()
include_directories(.)
include_directories(generated_for_cmake)
include_directories(${CMAKE_CURRENT_BINARY_DIR})
if(APPLE)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -undefined dynamic_lookup -flat_namespace")
elseif(UNIX)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--build-id")
endif()
enable_testing()
%(toplevel)s
""")
data = {}
converter = Converter()
def GetDict(obj):
ret = {}
for k in dir(obj):
if not k.startswith("_"):
ret[k] = getattr(obj, k);
return ret
globs = GetDict(converter)
exec(open("WORKSPACE").read(), GetDict(WorkspaceFileFunctions(converter)))
exec(open("BUILD").read(), GetDict(BuildFileFunctions(converter)))
with open(sys.argv[1], "w") as f:
f.write(converter.convert())
| 25.931655
| 109
| 0.650992
|
38de0f117e86e1345c99ccdef4f23b8ecbb2f209
| 1,378
|
py
|
Python
|
main.py
|
tinytengu/postman
|
5f79e548d9de915ddbec74ab8cf255ee0c20792a
|
[
"Apache-2.0"
] | 1
|
2021-08-18T14:10:19.000Z
|
2021-08-18T14:10:19.000Z
|
main.py
|
tinytengu/postman
|
5f79e548d9de915ddbec74ab8cf255ee0c20792a
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
tinytengu/postman
|
5f79e548d9de915ddbec74ab8cf255ee0c20792a
|
[
"Apache-2.0"
] | null | null | null |
import requests
import json
from flask import Flask, render_template, request
class CustomFlask(Flask):
jinja_options = Flask.jinja_options.copy()
jinja_options.update({
'variable_start_string': '%%',
'variable_end_string': '%%',
})
app = CustomFlask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/headers', methods=['GET'])
def headers():
return dict(requests.utils.default_headers())
@app.route('/send', methods=['POST'])
def send():
methods = {
'GET': requests.get,
'POST': requests.post,
'PUT': requests.put,
'DELETE': requests.delete
}
data = json.loads(request.data)
print(data)
response = methods[data.get('method', 'GET')](
data.get('url'),
data=data.get('data', {}),
params=data.get('params', {}),
headers=data.get('headers', {})
)
try:
json_data = response.json()
except json.decoder.JSONDecodeError:
json_data = {}
return {
'url': response.url,
'status': f'{response.status_code} {requests.status_codes._codes[response.status_code][0]}',
'headers': dict(response.headers),
'raw': response.content.decode('utf-8'),
'json': json_data
}
if __name__ == '__main__':
app.run(host='127.0.0.1', port=80, debug=True)
| 21.873016
| 100
| 0.598694
|
353f98445a1863462c2e4301ed01b9f779673c38
| 132
|
py
|
Python
|
test/output/079.py
|
EliRibble/pyfmt
|
e84a5531a7c06703eddd9dbc2072b0c8deae8c57
|
[
"MIT"
] | null | null | null |
test/output/079.py
|
EliRibble/pyfmt
|
e84a5531a7c06703eddd9dbc2072b0c8deae8c57
|
[
"MIT"
] | null | null | null |
test/output/079.py
|
EliRibble/pyfmt
|
e84a5531a7c06703eddd9dbc2072b0c8deae8c57
|
[
"MIT"
] | null | null | null |
def decorator(f, g):
return print
class A():
@decorator("foo", max)
@decorator("bar", min)
def my_function():
print("hello")
| 14.666667
| 23
| 0.643939
|
caaf45551302a900cddb2766e9aa86b06be88035
| 248
|
py
|
Python
|
eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-x86_64-ucs4.egg/EGG-INFO/scripts/nib_length.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | null | null | null |
eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-x86_64-ucs4.egg/EGG-INFO/scripts/nib_length.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | null | null | null |
eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-x86_64-ucs4.egg/EGG-INFO/scripts/nib_length.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | 1
|
2020-07-25T21:03:18.000Z
|
2020-07-25T21:03:18.000Z
|
#!/afs/bx.psu.edu/project/pythons/linux-x86_64-ucs4/bin/python2.7
"""
Print the number of bases in a nib file.
usage: %prog nib_file
"""
from bx.seq import nib as seq_nib
import sys
nib = seq_nib.NibFile( file( sys.argv[1] ) )
print nib.length
| 17.714286
| 65
| 0.717742
|
e084bab9ab919c0d0a62b14928dda23911ae6751
| 16,143
|
py
|
Python
|
leo/plugins/mod_leo2ascd.py
|
ATikhonov2/leo-editor
|
225aac990a9b2804aaa9dea29574d6e072e30474
|
[
"MIT"
] | 1
|
2021-02-08T21:22:38.000Z
|
2021-02-08T21:22:38.000Z
|
leo/plugins/mod_leo2ascd.py
|
ATikhonov2/leo-editor
|
225aac990a9b2804aaa9dea29574d6e072e30474
|
[
"MIT"
] | null | null | null |
leo/plugins/mod_leo2ascd.py
|
ATikhonov2/leo-editor
|
225aac990a9b2804aaa9dea29574d6e072e30474
|
[
"MIT"
] | null | null | null |
#@+leo-ver=5-thin
#@+node:ekr.20101110093449.5822: * @file ../plugins/mod_leo2ascd.py
import re
import os
from leo.core import leoGlobals as g
from leo.core import leoPlugins
#@+<< patterns >>
#@+node:ekr.20101110094152.5834: ** << patterns >> (mod_leo2ascd.py)
# compile the patterns we'll be searching for frequently
patternSectionName = re.compile(r"\<\< *(.+?) *\>\>")
patternSectionDefinition = re.compile(r"(\<\< *)(.+?)( *\>\>)(=)")
patternDirective = re.compile(r"^@")
patternCodeDirective = re.compile(r"^(@c *$)|(@code)")
patternDocDirective = re.compile(r"^(@ |@doc)(.*)")
patternRootDirective = re.compile(r"^@root\s+(.+)")
patternAscDirective = re.compile(r"^@asc")
# New Leo2AsciiDoc directives
patternAscDirectiveConfig = re.compile(r'^@ascconfig\W+(\w+)\s+(\S+)')
patternAscDirectiveFile = re.compile(r'^@ascfile *"*([\w\\/\.]*)"*')
patternAscDirectiveExit = re.compile(r"^@ascexit")
patternAscDirectiveIgnore = re.compile(r"^@ascignore")
patternAscDirectiveSkip = re.compile(r"^@ascskip")
patternAscDirectiveSkipToggle = re.compile(r"^@ascskip\s*(\w+)+.*")
#@-<< patterns >>
#@+others
#@+node:ekr.20140920145803.17999: ** init
def init():
'''Return True if the plugin has loaded successfully.'''
leoPlugins.registerHandler(('new','menu2'), CreateAscMenu)
g.plugin_signon(__name__)
return True
#@+node:ekr.20140920145803.18000: ** Functions
#@+node:ekr.20101110094152.5837: *3* CodeChunk
def CodeChunk(text, width=72):
"""Split a line of text into a list of chunks not longer
than width."""
chunkList = []
chunkStart = 0
chunkEnd = 0
lastSpacePosition = 0
shortWidth = width - 4
prefix = ''
# suffix = ' \\'
textLen = len(text)
if width > textLen:
chunkList.append(text)
else:
while chunkEnd < textLen:
if chunkList:
prefix = ' '
chunkEnd = chunkStart + shortWidth
if chunkEnd > textLen:
chunkList.append(prefix + text[chunkStart:])
chunkEnd = textLen # get out of jail
else:
lastSpacePosition = text.rfind(' ',chunkStart, chunkEnd +1)
if lastSpacePosition != -1: # success
chunkList.append(prefix + text[chunkStart:lastSpacePosition] + ' \\')
chunkStart = lastSpacePosition + 1
else:
chunkEnd = chunkStart + shortWidth
chunkList.append(prefix + text[chunkStart:chunkEnd] + ' \\')
chunkStart = chunkEnd
return chunkList
#@+node:ekr.20101110094152.5847: *3* CreateAscMenu
def CreateAscMenu(tag,keywords):
"""Create the Outline to AsciiDoc menu item in the Export menu."""
c = keywords.get('c')
if not c: return
exportMenu = c.frame.menu.getMenu('export')
table = (
("-", None, None),
("Export all to &AsciiDoc", "Alt+Shift+A", WriteAll),
("Export current tree to AsciiDoc","Alt+Shift+T", WriteTreeOfCurrentNode),
("Log all root and ascfile to log pane","Alt+Shift+L", WriteAllRoots),
)
c.frame.menu.createMenuEntries(exportMenu, table)
#@+node:ekr.20101110094152.5836: *3* GetAscFilename
def GetAscFilename(c,p):
'Checks a node for a filename directive.'
# f is the Leo outline
ascFileName = None
lines = p.b.splitlines()
for line in lines:
containsAscFileDirective = patternAscDirectiveFile.match(line)
if containsAscFileDirective:
ascFileName = containsAscFileDirective.group(1)
if (ascFileName is not None):
base = os.path.split(c.mFileName)[0] # linux or windows
if (((base[0]=="/") and (ascFileName[0] != "/")) or
((base[1]==":") and (ascFileName[1] != ":"))):
# no full pathname specified
ascFileName = os.path.join(base, ascFileName)
Conf.GetCurrentOptions(c,p)
return ascFileName
#@+node:ekr.20101110094152.5835: *3* SectionUnderline
def SectionUnderline(h,level,v):
'Return a section underline string.'
asciiDocSectionLevels = int(Conf.current["asciiDocSectionLevels"])
if level < 0:
g.es("Section level is less than 1:\n %s" % v.headString())
level = 1
elif level > asciiDocSectionLevels - 1:
g.es("Section level is more than maximum Section Levels: %d\n %s" % (
asciiDocSectionLevels, v.headString()))
level = asciiDocSectionLevels - 1
str = Conf.current["headingUnderlines"][level] #'
return str*max(len(h),1)
#@+node:ekr.20101110094152.5843: *3* WriteAll
def WriteAll(c):
p = c.rootPosition()
while p:
ascFileN = GetAscFilename(c,p)
if ascFileN:
WriteTreeAsAsc(p,ascFileN)
p.moveToNodeAfterTree()
else:
p.moveToThreadNext()
#@+node:ekr.20101110094152.5845: *3* WriteAllRoots
def WriteAllRoots(c):
"Writes @root directive and/or @ascfile directive to log pane."
patternAscDirectiveFile = re.compile(r'^@ascfile')
patternRoot = re.compile(r'^@root')
g.es('Looking for @root or @ascfile.')
for p in c.all_positions():
printedHeading = False
for line in p.b.splitlines():
printLine = False
containsAscFileDirective = patternAscDirectiveFile.match(line)
if containsAscFileDirective:
printLine = True
containsRootDirective = patternRoot.match(line)
if containsRootDirective:
printLine = True
if printLine:
if not printedHeading:
g.es(p.h)
printedHeading = True
g.es(' ' + line)
#@+node:ekr.20101110094152.5839: *3* WriteNode
def WriteNode(v,startinglevel, ascFile):
'Writes the contents of the node v to the ascFile.'
containsAscIignore = None
skippingDocLines = False
startingCodeExtract = False
inCodeExtract = False
statusOfWriteOutputLine = None
def WriteOutputLine(lineString):
'Writes a line of text to the output file.'
try:
ascFile.write("%s\n" % lineString)
except IOError:
g.es("Could not write to output file: %s" % ascFile.name)
# statusOfWriteOutputLine = CV.END_PROGRAM
# Get the headline text.
h = v.headString()
markedupAsSection = patternSectionName.match(h)
if markedupAsSection:
h = markedupAsSection.group(1) # dump the angle brackets
# Put the body text into a list of lines.
bodyString = v.bodyString()
lines = bodyString.splitlines()
lastLinePrintedType = CV.LINE_WAS_NONE
# By default, nodes start with a code section.
pendinglineType = CV.LINE_PENDING_CODE
for line in lines:
containsRootDirective = None
containsSectionDefinition = patternSectionDefinition.match(line)
if containsSectionDefinition:
# dump the angle brackets, etc.
# line = containsSectionDefinition.group(2) + '\n' + \
# (SectionUnderline(containsSectionDefinition.group(2),2,v))
line = '.' + containsSectionDefinition.group(2)
pendinglineType = CV.LINE_PENDING_CODE
startingCodeExtract = True
containsCodeDirective = patternCodeDirective.match(line)
if containsCodeDirective:
pendinglineType = CV.LINE_PENDING_CODE
skippingDocLines = False
continue # don't print this line
containsDocDirective = patternDocDirective.match(line)
if containsDocDirective:
pendinglineType = CV.LINE_PENDING_DOC
if containsDocDirective.group(2):
# it is legal to have text on the same line
# as a doc directive.
line = containsDocDirective.group(2)
else:
continue
containsAscDirective = patternAscDirective.match(line)
if containsAscDirective:
containsAscIignore = patternAscDirectiveIgnore.match(line)
if containsAscIignore:
break
containsAscExit = patternAscDirectiveExit.match(line)
if containsAscExit:
break
containsAscSkip = patternAscDirectiveSkip.match(line)
if containsAscSkip:
containsAscSkipDirectiveToggle = patternAscDirectiveSkipToggle.match(line)
if containsAscSkipDirectiveToggle:
if containsAscSkipDirectiveToggle.group(1).lower() == "on":
skippingDocLines = True
elif containsAscSkipDirectiveToggle.group(1).lower() == "off":
skippingDocLines = False
continue
containsOtherDirective = patternDirective.match(line)
if containsOtherDirective:
containsRootDirective = patternRootDirective.match(line)
if containsRootDirective:
line = "*note*\nThe code sections that follow, when extracted from a " + \
"Leo outline, will be located in: %s\n*note*" % \
containsRootDirective.group(1)
else:
continue
# We have something to print, so print heading.
if lastLinePrintedType == CV.LINE_WAS_NONE:
if h and (Conf.current["PrintHeadings"] == "on"):
WriteOutputLine("\n\n%s" % h)
WriteOutputLine(SectionUnderline(h,v.level()-startinglevel,v))
lastLinePrintedType = CV.LINE_WAS_HEAD
if pendinglineType == CV.LINE_PENDING_DOC:
if lastLinePrintedType != CV.LINE_WAS_DOC and \
lastLinePrintedType != CV.LINE_WAS_HEAD:
WriteOutputLine("%s" % Conf.current["delimiterForCodeEnd"])
if inCodeExtract:
WriteOutputLine("\n%s" % Conf.current["delimiterForCodeSectionDefinition"])
inCodeExtract = False
lastLinePrintedType = CV.LINE_WAS_DOC
if skippingDocLines:
if not containsRootDirective: # always document a root directive
continue
if pendinglineType == CV.LINE_PENDING_CODE:
if lastLinePrintedType != CV.LINE_WAS_CODE:
if startingCodeExtract:
WriteOutputLine("\n%s" % line)
WriteOutputLine("%s" % Conf.current["delimiterForCodeSectionDefinition"])
inCodeExtract = True
line = ''
WriteOutputLine("%s" % Conf.current["delimiterForCodeStart"])
lastLinePrintedType = CV.LINE_WAS_CODE
if startingCodeExtract:
startingCodeExtract = False
continue
maxCodeLineLength = int(Conf.current["maxCodeLineLength"])
if len(line) <= maxCodeLineLength:
WriteOutputLine("%s" % line)
elif len(line.rstrip()) <= maxCodeLineLength:
WriteOutputLine("%s" % line.rstrip())
else:
lineList = CodeChunk(line, maxCodeLineLength)
for ln in lineList:
WriteOutputLine("%s" % ln)
lastLinePrintedType = CV.LINE_WAS_CODE
else:
WriteOutputLine("%s" % line)
if statusOfWriteOutputLine is not None:
return statusOfWriteOutputLine
if lastLinePrintedType == CV.LINE_WAS_CODE:
WriteOutputLine("%s" % Conf.current["delimiterForCodeEnd"])
if inCodeExtract:
WriteOutputLine("\n%s" % Conf.current["delimiterForCodeSectionDefinition"])
inCodeExtract = False
if containsAscIignore is not None:
return CV.NODE_IGNORE # flag ignore tree to caller
return None
#@+node:ekr.20101110094152.5838: *3* WriteTreeAsAsc
def WriteTreeAsAsc(p,fn):
'Writes the tree under p to the file ascFile'
try:
ascFile = open(fn,'w')
except IOError:
g.es("Could not open output file: %s" % fn)
return
stopHere = p.nodeAfterTree()
startinglevel = p.level()
while p and p != stopHere:
val = WriteNode(p,startinglevel,ascFile)
if val == CV.END_PROGRAM:
ascFile.close()
return
if val == CV.NODE_IGNORE:
p.moveToNodeAfterTree() # ran into an @ascignore
else:
p.moveToThreadNext()
ascFile.close()
g.es('wrote: %s' % fn)
#@+node:ekr.20101110094152.5841: *3* WriteTreeOfCurrentNode (not used)
def WriteTreeOfCurrentNode(c):
p = c.p
while p:
ascFileN = GetAscFilename(c,p)
if ascFileN:
break
else:
p.moveToParent()
if ascFileN is None:
g.es("Sorry, there was no @ascfile directive in this outline tree.")
else:
WriteTreeAsAsc(p,ascFileN)
#@+node:ekr.20101110094152.5824: ** class _AssignUniqueConstantValue
class _AssignUniqueConstantValue:
""" Provide unique value to be used as a constant """
#@+others
#@+node:ekr.20101110094152.5825: *3* __init__
def __init__(self):
self.UniqueInternalValue = 0
self.Assign_at_start()
#@+node:ekr.20101110094152.5826: *3* class ConstError
class ConstError(TypeError):
pass
#@+node:ekr.20101110094152.5827: *3* __setattr__
def __setattr__(self,name,value):
if name in self.__dict__:
if name != "UniqueInternalValue":
raise self.ConstError("Can't rebind const(%s)"%name)
self.__dict__[name]=value
#@+node:ekr.20101110094152.5828: *3* Assign_at_start
def Assign_at_start(self):
self.END_PROGRAM = self.Next() # signal abort
self.LINE_WAS_NONE = self.Next() # describe last line printed
self.LINE_WAS_CODE = self.Next()
self.LINE_WAS_DOC = self.Next()
self.LINE_WAS_HEAD = self.Next()
self.LINE_PENDING_NONE = self.Next() # describe next line to be printed
self.LINE_PENDING_CODE = self.Next()
self.LINE_PENDING_DOC = self.Next()
#@+node:ekr.20101110094152.5829: *3* Next
def Next(self):
self.UniqueInternalValue += 1
return(self.UniqueInternalValue)
#@-others
CV = _AssignUniqueConstantValue()
CV.NODE_IGNORE = CV.Next() # demo of adding in code
#@+node:ekr.20101110094152.5830: ** class _ConfigOptions
class _ConfigOptions:
"""Hold current configuration options."""
#@+others
#@+node:ekr.20101110094152.5831: *3* __init__
def __init__(self):
self.current = {}
self.default = {}
self.default["maxCodeLineLength"] = '76'
self.default["delimiterForCodeStart"] = '~-~--- code starts --------'
self.default["delimiterForCodeEnd"] = '~-~--- code ends ----------'
self.default["delimiterForCodeSectionDefinition"] = '*example*'
self.default["headingUnderlines"] = '=-~^+'
self.default["asciiDocSectionLevels"] = '5'
self.default["PrintHeadings"] = "on"
#@+node:ekr.20101110094152.5832: *3* __GetNodeOptions
def __GetNodeOptions(self, vnode):
bodyString = vnode.bodyString()
lines = bodyString.splitlines()
for line in lines:
containsAscConfigDirective = patternAscDirectiveConfig.match(line)
if containsAscConfigDirective:
# Leo uses unicode, convert to plain ascii
name = str(containsAscConfigDirective.group(1))
value = str(containsAscConfigDirective.group(2))
if name in self.current:
self.current[name] = value
else:
g.es(vnode.headString())
g.es(" No such config option: %s" % name)
#@+node:ekr.20101110094152.5833: *3* GetCurrentOptions
def GetCurrentOptions(self,c,p):
self.current.clear()
self.current = self.default.copy()
self.__GetNodeOptions(c.rootPosition())
self.__GetNodeOptions(p)
#@-others
Conf = _ConfigOptions()
#@-others
#@@language python
#@@tabwidth -4
#@-leo
| 40.662469
| 95
| 0.610295
|
b18dd47c0798a5130290b69b77b4af19af546ffd
| 1,549
|
py
|
Python
|
Other/d3PercentileCalc.py
|
Wanganator414/python
|
afa7a931bd9da8a5235a6cd889bfc417950165fe
|
[
"MIT"
] | 1
|
2019-08-17T03:31:19.000Z
|
2019-08-17T03:31:19.000Z
|
Other/d3PercentileCalc.py
|
Wanganator414/python
|
afa7a931bd9da8a5235a6cd889bfc417950165fe
|
[
"MIT"
] | null | null | null |
Other/d3PercentileCalc.py
|
Wanganator414/python
|
afa7a931bd9da8a5235a6cd889bfc417950165fe
|
[
"MIT"
] | null | null | null |
import math
import subprocess
# subprocess.call([r'C:\Users\ericw\Desktop\Programming\python\Other\Matrix.bat'])
#preformat input data via .bat
subprocess.call([r"C:\Users\ericw\Desktop\Programming\python\Other\preformat.bat"])
numbers = open("C:/Users/ericw/Desktop/Programming/python/Other/d3Formatted.txt", "r+")
numb = numbers.read()
# #split by , and remove all empty values
print("Len:", len(numb))
numb = list(filter(None, numb.split(" ")))
print("Len2:", len(numb))
# print(numb)
numb = [float(x) for x in numb]
numb = sorted(numb)
# set file pointer to 0 so windows doesn't create /x00 placeholders
numbers.seek(0)
# delete text in file from cursor pt 0
numbers.truncate(0)
# repopulate with ordered data
for i in range(len(numb)):
numbers.write(str(numb[i]))
if i != len(numb):
numbers.write(",")
numbers.close()
def findPercentile(percentile, data):
"""Find value of given percentile given dataset (def3 percentile)
percentile given as integer value, such as 50, for 50%
data should be type array, sorted small to large
"""
# R=P/100 * (N+1)
R = (percentile / 100) * (len(data) + 1)
# R -> IR and FR etc 10.25 -> 10 and .25
FR, IR = math.modf(R)
IR = int(IR)
print(percentile / 100, "%")
print(len(data) + 1, "N+1")
print(f"IR:{IR}, FR:{FR}")
print(R)
if FR == 0:
print(f"FR=0, {percentile}% of the data = {data[IR-1]}")
else:
print(f"{percentile}% of the data = {abs(data[IR]-data[IR+1])*FR+data[IR]}")
findPercentile(25, numb)
| 29.788462
| 87
| 0.646223
|
56efe5b1fa931a324284f14e9fa95b1b0d4af413
| 56,739
|
py
|
Python
|
AppServer/google/appengine/api/appinfo.py
|
christianbaun/appscale
|
c24ddfd987c8eed8ed8864cc839cc0556a8af3c7
|
[
"Apache-2.0"
] | 2
|
2018-10-09T17:48:12.000Z
|
2019-01-15T10:18:19.000Z
|
AppServer/google/appengine/api/appinfo.py
|
christianbaun/appscale
|
c24ddfd987c8eed8ed8864cc839cc0556a8af3c7
|
[
"Apache-2.0"
] | null | null | null |
AppServer/google/appengine/api/appinfo.py
|
christianbaun/appscale
|
c24ddfd987c8eed8ed8864cc839cc0556a8af3c7
|
[
"Apache-2.0"
] | 1
|
2022-02-20T20:57:12.000Z
|
2022-02-20T20:57:12.000Z
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""AppInfo tools.
Library for working with AppInfo records in memory, store and load from
configuration files.
"""
import os
import logging
import re
import string
import wsgiref.util
if os.environ.get('APPENGINE_RUNTIME') == 'python27':
from google.appengine.api import pagespeedinfo
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
else:
from google.appengine.api import pagespeedinfo
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
from google.appengine.api import appinfo_errors
from google.appengine.api import backendinfo
_URL_REGEX = r'(?!\^)/.*|\..*|(\(.).*(?!\$).'
_FILES_REGEX = r'(?!\^).*(?!\$).'
_URL_ROOT_REGEX = r'/.*'
_DELTA_REGEX = r'([0-9]+)([DdHhMm]|[sS]?)'
_EXPIRATION_REGEX = r'\s*(%s)(\s+%s)*\s*' % (_DELTA_REGEX, _DELTA_REGEX)
_START_PATH = '/_ah/start'
_ALLOWED_SERVICES = ['mail', 'mail_bounce', 'xmpp_message', 'xmpp_subscribe',
'xmpp_presence', 'xmpp_error', 'channel_presence', 'rest',
'warmup']
_SERVICE_RE_STRING = '(' + '|'.join(_ALLOWED_SERVICES) + ')'
_PAGE_NAME_REGEX = r'^.+$'
_EXPIRATION_CONVERSIONS = {
'd': 60 * 60 * 24,
'h': 60 * 60,
'm': 60,
's': 1,
}
APP_ID_MAX_LEN = 100
SERVER_ID_MAX_LEN = 63
SERVER_VERSION_ID_MAX_LEN = 63
MAX_URL_MAPS = 100
PARTITION_SEPARATOR = '~'
DOMAIN_SEPARATOR = ':'
VERSION_SEPARATOR = '.'
SERVER_SEPARATOR = ':'
DEFAULT_SERVER = 'default'
PARTITION_RE_STRING = (r'[a-z\d\-]{1,%d}\%s' %
(APP_ID_MAX_LEN, PARTITION_SEPARATOR))
DOMAIN_RE_STRING = (r'(?!\-)[a-z\d\-\.]{1,%d}%s' %
(APP_ID_MAX_LEN, DOMAIN_SEPARATOR))
DISPLAY_APP_ID_RE_STRING = r'(?!-)[a-z\d\-]{0,%d}[a-z\d]' % (APP_ID_MAX_LEN - 1)
APPLICATION_RE_STRING = (r'(?:%s)?(?:%s)?%s' %
(PARTITION_RE_STRING,
DOMAIN_RE_STRING,
DISPLAY_APP_ID_RE_STRING))
SERVER_ID_RE_STRING = r'^(?!-)[a-z\d\-]{0,%d}[a-z\d]$' % (SERVER_ID_MAX_LEN - 1)
SERVER_VERSION_ID_RE_STRING = (r'^(?!-)[a-z\d\-]{0,%d}[a-z\d]$' %
(SERVER_VERSION_ID_MAX_LEN - 1))
_IDLE_INSTANCES_REGEX = r'^([\d]+|automatic)$'
_INSTANCES_REGEX = r'^[\d]+$'
_INSTANCE_CLASS_REGEX = r'^([fF](1|2|4|4_1G)|[bB](1|2|4|8|4_1G))$'
_CONCURRENT_REQUESTS_REGEX = r'^([1-9]\d*)$'
_PENDING_LATENCY_REGEX = r'^(\d+((\.\d{1,3})?s|ms)|automatic)$'
_IDLE_TIMEOUT_REGEX = r'^[\d]+(s|m)$'
ALTERNATE_HOSTNAME_SEPARATOR = '-dot-'
BUILTIN_NAME_PREFIX = 'ah-builtin'
RUNTIME_RE_STRING = r'[a-z][a-z0-9]{0,29}'
API_VERSION_RE_STRING = r'[\w.]{1,32}'
SOURCE_LANGUAGE_RE_STRING = r'[\w.\-]{1,32}'
HANDLER_STATIC_FILES = 'static_files'
HANDLER_STATIC_DIR = 'static_dir'
HANDLER_SCRIPT = 'script'
HANDLER_API_ENDPOINT = 'api_endpoint'
LOGIN_OPTIONAL = 'optional'
LOGIN_REQUIRED = 'required'
LOGIN_ADMIN = 'admin'
AUTH_FAIL_ACTION_REDIRECT = 'redirect'
AUTH_FAIL_ACTION_UNAUTHORIZED = 'unauthorized'
SECURE_HTTP = 'never'
SECURE_HTTPS = 'always'
SECURE_HTTP_OR_HTTPS = 'optional'
SECURE_DEFAULT = 'default'
REQUIRE_MATCHING_FILE = 'require_matching_file'
DEFAULT_SKIP_FILES = (r'^(.*/)?('
r'(#.*#)|'
r'(.*~)|'
r'(.*\.py[co])|'
r'(.*/RCS/.*)|'
r'(\..*)|'
r')$')
DEFAULT_NOBUILD_FILES = (r'^$')
LOGIN = 'login'
AUTH_FAIL_ACTION = 'auth_fail_action'
SECURE = 'secure'
URL = 'url'
POSITION = 'position'
POSITION_HEAD = 'head'
POSITION_TAIL = 'tail'
STATIC_FILES = 'static_files'
UPLOAD = 'upload'
STATIC_DIR = 'static_dir'
MIME_TYPE = 'mime_type'
SCRIPT = 'script'
EXPIRATION = 'expiration'
API_ENDPOINT = 'api_endpoint'
HTTP_HEADERS = 'http_headers'
APPLICATION_READABLE = 'application_readable'
APPLICATION = 'application'
SERVER = 'server'
AUTOMATIC_SCALING = 'automatic_scaling'
MANUAL_SCALING = 'manual_scaling'
BASIC_SCALING = 'basic_scaling'
VM = 'vm'
VM_SETTINGS = 'vm_settings'
VERSION = 'version'
MAJOR_VERSION = 'major_version'
MINOR_VERSION = 'minor_version'
RUNTIME = 'runtime'
API_VERSION = 'api_version'
SOURCE_LANGUAGE = 'source_language'
BUILTINS = 'builtins'
INCLUDES = 'includes'
HANDLERS = 'handlers'
LIBRARIES = 'libraries'
DEFAULT_EXPIRATION = 'default_expiration'
SKIP_FILES = 'skip_files'
NOBUILD_FILES = 'nobuild_files'
SERVICES = 'inbound_services'
DERIVED_FILE_TYPE = 'derived_file_type'
JAVA_PRECOMPILED = 'java_precompiled'
PYTHON_PRECOMPILED = 'python_precompiled'
ADMIN_CONSOLE = 'admin_console'
ERROR_HANDLERS = 'error_handlers'
BACKENDS = 'backends'
THREADSAFE = 'threadsafe'
API_CONFIG = 'api_config'
CODE_LOCK = 'code_lock'
ENV_VARIABLES = 'env_variables'
PAGESPEED = 'pagespeed'
INSTANCE_CLASS = 'instance_class'
MINIMUM_PENDING_LATENCY = 'min_pending_latency'
MAXIMUM_PENDING_LATENCY = 'max_pending_latency'
MINIMUM_IDLE_INSTANCES = 'min_idle_instances'
MAXIMUM_IDLE_INSTANCES = 'max_idle_instances'
MAXIMUM_CONCURRENT_REQUEST = 'max_concurrent_requests'
INSTANCES = 'instances'
MAX_INSTANCES = 'max_instances'
IDLE_TIMEOUT = 'idle_timeout'
PAGES = 'pages'
NAME = 'name'
ERROR_CODE = 'error_code'
FILE = 'file'
_ERROR_CODE_REGEX = r'(default|over_quota|dos_api_denial|timeout)'
ON = 'on'
ON_ALIASES = ['yes', 'y', 'True', 't', '1', 'true']
OFF = 'off'
OFF_ALIASES = ['no', 'n', 'False', 'f', '0', 'false']
class _VersionedLibrary(object):
"""A versioned library supported by App Engine."""
def __init__(self,
name,
url,
description,
supported_versions,
default_version=None,
deprecated_versions=None,
experimental_versions=None):
"""Initializer for _VersionedLibrary.
Args:
name: The name of the library e.g. "django".
url: The URL for the library's project page e.g.
"http://www.djangoproject.com/".
description: A short description of the library e.g. "A framework...".
supported_versions: A list of supported version names ordered by release
date e.g. ["v1", "v2", "v3"].
default_version: The version of the library that is enabled by default
in the Python 2.7 runtime or None if the library is not available by
default e.g. "v1".
deprecated_versions: A list of the versions of the library that have been
deprecated e.g. ["v1", "v2"].
experimental_versions: A list of the versions of the library that are
current experimental e.g. ["v1"].
"""
self.name = name
self.url = url
self.description = description
self.supported_versions = supported_versions
self.default_version = default_version
self.deprecated_versions = deprecated_versions or []
self.experimental_versions = experimental_versions or []
@property
def non_deprecated_versions(self):
return [version for version in self.supported_versions
if version not in self.deprecated_versions]
_SUPPORTED_LIBRARIES = [
_VersionedLibrary(
'django',
'http://www.djangoproject.com/',
'A full-featured web application framework for Python.',
['1.2', '1.3', '1.4', '1.5'],
experimental_versions=['1.5'],
),
_VersionedLibrary(
'jinja2',
'http://jinja.pocoo.org/docs/',
'A modern and designer friendly templating language for Python.',
['2.6']),
_VersionedLibrary(
'lxml',
'http://lxml.de/',
'A Pythonic binding for the C libraries libxml2 and libxslt.',
['2.3']),
_VersionedLibrary(
'markupsafe',
'http://pypi.python.org/pypi/MarkupSafe',
'A XML/HTML/XHTML markup safe string for Python.',
['0.15']),
_VersionedLibrary(
'matplotlib',
'http://matplotlib.org/',
'A 2D plotting library which produces publication-quality figures.',
['1.2.0'],
experimental_versions=['1.2.0'],
),
_VersionedLibrary(
'MySQLdb',
'http://mysql-python.sourceforge.net/',
'A Python DB API v2.0 compatible interface to MySQL.',
['1.2.4b4'],
experimental_versions=['1.2.4b4']
),
_VersionedLibrary(
'numpy',
'http://numpy.scipy.org/',
'A general-purpose library for array-processing.',
['1.6.1']),
_VersionedLibrary(
'PIL',
'http://www.pythonware.com/library/pil/handbook/',
'A library for creating and transforming images.',
['1.1.7']),
_VersionedLibrary(
'PyAMF',
'http://www.pyamf.org/',
'A library that provides (AMF) Action Message Format functionality.',
['0.6.1']),
_VersionedLibrary(
'pycrypto',
'https://www.dlitz.net/software/pycrypto/',
'A library of cryptogoogle.appengine._internal.graphy functions such as random number generation.',
['2.3', '2.6'],
),
_VersionedLibrary(
'setuptools',
'http://pypi.python.org/pypi/setuptools',
'A library that provides package and module discovery capabilities.',
['0.6c11']),
_VersionedLibrary(
'ssl',
'http://docs.python.org/dev/library/ssl.html',
'The SSL socket wrapper built-in module.',
['2.7'],
experimental_versions=['2.7']),
_VersionedLibrary(
'webapp2',
'http://webapp-improved.appspot.com/',
'A lightweight Python web framework.',
['2.3', '2.5.1', '2.5.2'],
default_version='2.3',
deprecated_versions=['2.3']
),
_VersionedLibrary(
'webob',
'http://www.webob.org/',
'A library that provides wrappers around the WSGI request environment.',
['1.1.1', '1.2.3'],
default_version='1.1.1',
),
_VersionedLibrary(
'yaml',
'http://www.yaml.org/',
'A library for YAML serialization and deserialization.',
['3.10'],
default_version='3.10'
),
]
_NAME_TO_SUPPORTED_LIBRARY = dict((library.name, library)
for library in _SUPPORTED_LIBRARIES)
REQUIRED_LIBRARIES = {
('jinja2', '2.6'): [('markupsafe', '0.15'), ('setuptools', '0.6c11')],
('jinja2', 'latest'): [('markupsafe', 'latest'), ('setuptools', 'latest')],
('matplotlib', '1.1.1'): [('numpy', '1.6.1')],
('matplotlib', '1.2.0'): [('numpy', '1.6.1')],
('matplotlib', 'latest'): [('numpy', 'latest')],
}
_USE_VERSION_FORMAT = ('use one of: "%s" or "latest" '
'("latest" recommended for development only)')
_HTTP_SEPARATOR_CHARS = frozenset('()<>@,;:\\"/[]?={} \t')
_HTTP_TOKEN_CHARS = frozenset(string.printable[:-5]) - _HTTP_SEPARATOR_CHARS
_HTTP_TOKEN_RE = re.compile('[%s]+$' % re.escape(''.join(_HTTP_TOKEN_CHARS)))
_HTTP_REQUEST_HEADERS = frozenset([
'accept',
'accept-charset',
'accept-encoding',
'accept-language',
'authorization',
'expect',
'from',
'host',
'if-match',
'if-modified-since',
'if-none-match',
'if-range',
'if-unmodified-since',
'max-forwards',
'proxy-authorization',
'range',
'referer',
'te',
'user-agent',
])
_MAX_COOKIE_LENGTH = 4096
_MAX_URL_LENGTH = 2047
class HandlerBase(validation.Validated):
"""Base class for URLMap and ApiConfigHandler."""
ATTRIBUTES = {
URL: validation.Optional(_URL_REGEX),
LOGIN: validation.Options(LOGIN_OPTIONAL,
LOGIN_REQUIRED,
LOGIN_ADMIN,
default=LOGIN_OPTIONAL),
AUTH_FAIL_ACTION: validation.Options(AUTH_FAIL_ACTION_REDIRECT,
AUTH_FAIL_ACTION_UNAUTHORIZED,
default=AUTH_FAIL_ACTION_REDIRECT),
SECURE: validation.Options(SECURE_HTTP,
SECURE_HTTPS,
SECURE_HTTP_OR_HTTPS,
SECURE_DEFAULT,
default=SECURE_DEFAULT),
HANDLER_SCRIPT: validation.Optional(_FILES_REGEX)
}
class HttpHeadersDict(validation.ValidatedDict):
"""A dict that limits keys and values what http_headers allows.
http_headers is an static handler key i.e. it applies to handlers with
static_dir or static_files keys. An example of how http_headers is used is
handlers:
- url: /static
static_dir: static
http_headers:
X-Foo-Header: foo value
X-Bar-Header: bar value
"""
DISALLOWED_HEADERS = frozenset([
'content-encoding',
'content-length',
'date',
'server'
])
MAX_HEADER_LENGTH = 500
MAX_HEADER_VALUE_LENGTHS = {
'set-cookie': _MAX_COOKIE_LENGTH,
'set-cookie2': _MAX_COOKIE_LENGTH,
'location': _MAX_URL_LENGTH}
MAX_LEN = 500
class KeyValidator(validation.Validator):
"""Ensures that keys in HttpHeadersDict i.e. header names are valid.
An instance is used as HttpHeadersDict's KEY_VALIDATOR.
"""
def Validate(self, name, unused_key=None):
"""Returns argument, or raises an exception if it is invalid.
HTTP header names are defined by RFC 2616 section 4.2.
Args:
name: HTTP header field value.
unused_key: Unused.
Returns:
name argument, unchanged.
Raises:
appinfo_errors.InvalidHttpHeaderName: argument cannot be used as an HTTP
header name.
"""
original_name = name
if isinstance(name, unicode):
try:
name = name.encode('ascii')
except UnicodeEncodeError:
raise appinfo_errors.InvalidHttpHeaderName(
'HTTP header values must not contain non-ASCII data')
name = name.lower()
if not _HTTP_TOKEN_RE.match(name):
raise appinfo_errors.InvalidHttpHeaderName(
'An HTTP header must be a non-empty RFC 2616 token.')
if name in _HTTP_REQUEST_HEADERS:
raise appinfo_errors.InvalidHttpHeaderName(
'%r can only be used in HTTP requests, not responses.'
% original_name)
if name.startswith('x-appengine'):
raise appinfo_errors.InvalidHttpHeaderName(
'HTTP header names that begin with X-Appengine are reserved.')
if wsgiref.util.is_hop_by_hop(name):
raise appinfo_errors.InvalidHttpHeaderName(
'Only use end-to-end headers may be used. See RFC 2616 section'
' 13.5.1.')
if name in HttpHeadersDict.DISALLOWED_HEADERS:
raise appinfo_errors.InvalidHttpHeaderName(
'%s is a disallowed header.' % name)
return original_name
class ValueValidator(validation.Validator):
"""Ensures that values in HttpHeadersDict i.e. header values are valid.
An instance is used as HttpHeadersDict's VALUE_VALIDATOR.
"""
def Validate(self, value, key=None):
"""Returns value, or raises an exception if it is invalid.
According to RFC 2616 section 4.2, header field values must consist "of
either *TEXT or combinations of token, separators, and quoted-string".
TEXT = <any OCTET except CTLs, but including LWS>
Args:
value: HTTP header field value.
key: HTTP header field name.
Returns:
value argument.
Raises:
appinfo_errors.InvalidHttpHeaderValue: argument cannot be used as an
HTTP header value.
"""
if isinstance(value, unicode):
try:
value = value.encode('ascii')
except UnicodeEncodeError:
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header values must not contain non-ASCII data')
key = key.lower()
printable = set(string.printable[:-5])
if not all(char in printable for char in value):
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header field values must consist of printable characters.')
HttpHeadersDict.ValueValidator.AssertHeaderNotTooLong(key, value)
return value
@staticmethod
def AssertHeaderNotTooLong(name, value):
header_length = len('%s: %s\r\n' % (name, value))
if header_length >= HttpHeadersDict.MAX_HEADER_LENGTH:
try:
max_len = HttpHeadersDict.MAX_HEADER_VALUE_LENGTHS[name]
except KeyError:
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header (name + value) is too long.')
if len(value) > max_len:
insert = name, len(value), max_len
raise appinfo_errors.InvalidHttpHeaderValue(
'%r header value has length %d, which exceed the maximum allowed,'
' %d.' % insert)
KEY_VALIDATOR = KeyValidator()
VALUE_VALIDATOR = ValueValidator()
def Get(self, header_name):
"""Gets a header value.
Args:
header_name: HTTP header name to look for.
Returns:
A header value that corresponds to header_name. If more than one such
value is in self, one of the values is selected arbitrarily, and
returned. The selection is not deterministic.
"""
for name in self:
if name.lower() == header_name.lower():
return self[name]
def __setitem__(self, key, value):
is_addition = self.Get(key) is None
if is_addition and len(self) >= self.MAX_LEN:
raise appinfo_errors.TooManyHttpHeaders(
'Tried to add another header when the current set of HTTP headers'
' already has the maximum allowed number of headers, %d.'
% HttpHeadersDict.MAX_LEN)
super(HttpHeadersDict, self).__setitem__(key, value)
class URLMap(HandlerBase):
"""Mapping from URLs to handlers.
This class acts like something of a union type. Its purpose is to
describe a mapping between a set of URLs and their handlers. What
handler type a given instance has is determined by which handler-id
attribute is used.
Each mapping can have one and only one handler type. Attempting to
use more than one handler-id attribute will cause an UnknownHandlerType
to be raised during validation. Failure to provide any handler-id
attributes will cause MissingHandlerType to be raised during validation.
The regular expression used by the url field will be used to match against
the entire URL path and query string of the request. This means that
partial maps will not be matched. Specifying a url, say /admin, is the
same as matching against the regular expression '^/admin$'. Don't begin
your matching url with ^ or end them with $. These regular expressions
won't be accepted and will raise ValueError.
Attributes:
login: Whether or not login is required to access URL. Defaults to
'optional'.
secure: Restriction on the protocol which can be used to serve
this URL/handler (HTTP, HTTPS or either).
url: Regular expression used to fully match against the request URLs path.
See Special Cases for using static_dir.
static_files: Handler id attribute that maps URL to the appropriate
file. Can use back regex references to the string matched to url.
upload: Regular expression used by the application configuration
program to know which files are uploaded as blobs. It's very
difficult to determine this using just the url and static_files
so this attribute must be included. Required when defining a
static_files mapping.
A matching file name must fully match against the upload regex, similar
to how url is matched against the request path. Do not begin upload
with ^ or end it with $.
static_dir: Handler id that maps the provided url to a sub-directory
within the application directory. See Special Cases.
mime_type: When used with static_files and static_dir the mime-type
of files served from those directories are overridden with this
value.
script: Handler id that maps URLs to scipt handler within the application
directory that will run using CGI.
position: Used in AppInclude objects to specify whether a handler
should be inserted at the beginning of the primary handler list or at the
end. If 'tail' is specified, the handler is inserted at the end,
otherwise, the handler is inserted at the beginning. This means that
'head' is the effective default.
expiration: When used with static files and directories, the time delta to
use for cache expiration. Has the form '4d 5h 30m 15s', where each letter
signifies days, hours, minutes, and seconds, respectively. The 's' for
seconds may be omitted. Only one amount must be specified, combining
multiple amounts is optional. Example good values: '10', '1d 6h',
'1h 30m', '7d 7d 7d', '5m 30'.
api_endpoint: Handler id that identifies endpoint as an API endpoint,
calls that terminate here will be handled by the api serving framework.
Special cases:
When defining a static_dir handler, do not use a regular expression
in the url attribute. Both the url and static_dir attributes are
automatically mapped to these equivalents:
<url>/(.*)
<static_dir>/\1
For example:
url: /images
static_dir: images_folder
Is the same as this static_files declaration:
url: /images/(.*)
static_files: images_folder/\1
upload: images_folder/(.*)
"""
ATTRIBUTES = {
HANDLER_STATIC_FILES: validation.Optional(_FILES_REGEX),
UPLOAD: validation.Optional(_FILES_REGEX),
APPLICATION_READABLE: validation.Optional(bool),
HANDLER_STATIC_DIR: validation.Optional(_FILES_REGEX),
MIME_TYPE: validation.Optional(str),
EXPIRATION: validation.Optional(_EXPIRATION_REGEX),
REQUIRE_MATCHING_FILE: validation.Optional(bool),
HTTP_HEADERS: validation.Optional(HttpHeadersDict),
POSITION: validation.Optional(validation.Options(POSITION_HEAD,
POSITION_TAIL)),
HANDLER_API_ENDPOINT: validation.Optional(validation.Options(
(ON, ON_ALIASES),
(OFF, OFF_ALIASES))),
}
ATTRIBUTES.update(HandlerBase.ATTRIBUTES)
COMMON_FIELDS = set([URL, LOGIN, AUTH_FAIL_ACTION, SECURE])
ALLOWED_FIELDS = {
HANDLER_STATIC_FILES: (MIME_TYPE, UPLOAD, EXPIRATION,
REQUIRE_MATCHING_FILE, HTTP_HEADERS,
APPLICATION_READABLE),
HANDLER_STATIC_DIR: (MIME_TYPE, EXPIRATION, REQUIRE_MATCHING_FILE,
HTTP_HEADERS, APPLICATION_READABLE),
HANDLER_SCRIPT: (POSITION),
HANDLER_API_ENDPOINT: (POSITION, SCRIPT),
}
def GetHandler(self):
"""Get handler for mapping.
Returns:
Value of the handler (determined by handler id attribute).
"""
return getattr(self, self.GetHandlerType())
def GetHandlerType(self):
"""Get handler type of mapping.
Returns:
Handler type determined by which handler id attribute is set.
Raises:
UnknownHandlerType: when none of the no handler id attributes are set.
UnexpectedHandlerAttribute: when an unexpected attribute is set for the
discovered handler type.
HandlerTypeMissingAttribute: when the handler is missing a
required attribute for its handler type.
"""
if getattr(self, HANDLER_API_ENDPOINT) is not None:
mapping_type = HANDLER_API_ENDPOINT
else:
for id_field in URLMap.ALLOWED_FIELDS.iterkeys():
if getattr(self, id_field) is not None:
mapping_type = id_field
break
else:
raise appinfo_errors.UnknownHandlerType(
'Unknown url handler type.\n%s' % str(self))
allowed_fields = URLMap.ALLOWED_FIELDS[mapping_type]
for attribute in self.ATTRIBUTES.iterkeys():
if (getattr(self, attribute) is not None and
not (attribute in allowed_fields or
attribute in URLMap.COMMON_FIELDS or
attribute == mapping_type)):
raise appinfo_errors.UnexpectedHandlerAttribute(
'Unexpected attribute "%s" for mapping type %s.' %
(attribute, mapping_type))
if mapping_type == HANDLER_STATIC_FILES and not self.upload:
raise appinfo_errors.MissingHandlerAttribute(
'Missing "%s" attribute for URL "%s".' % (UPLOAD, self.url))
return mapping_type
def CheckInitialized(self):
"""Adds additional checking to make sure handler has correct fields.
In addition to normal ValidatedCheck calls GetHandlerType
which validates all the handler fields are configured
properly.
Raises:
UnknownHandlerType: when none of the no handler id attributes are set.
UnexpectedHandlerAttribute: when an unexpected attribute is set for the
discovered handler type.
HandlerTypeMissingAttribute: when the handler is missing a required
attribute for its handler type.
ContentTypeSpecifiedMultipleTimes: when mime_type is inconsistent with
http_headers.
"""
super(URLMap, self).CheckInitialized()
if self.GetHandlerType() in (STATIC_DIR, STATIC_FILES):
self.AssertUniqueContentType()
def AssertUniqueContentType(self):
"""Makes sure that self.http_headers is consistent with self.mime_type.
Assumes self is a static handler i.e. either self.static_dir or
self.static_files is set (to not None).
Raises:
appinfo_errors.ContentTypeSpecifiedMultipleTimes: Raised when
self.http_headers contains a Content-Type header, and self.mime_type is
set. For example, the following configuration would be rejected:
handlers:
- url: /static
static_dir: static
mime_type: text/html
http_headers:
content-type: text/html
As this example shows, a configuration will be rejected when
http_headers and mime_type specify a content type, even when they
specify the same content type.
"""
used_both_fields = self.mime_type and self.http_headers
if not used_both_fields:
return
content_type = self.http_headers.Get('Content-Type')
if content_type is not None:
raise appinfo_errors.ContentTypeSpecifiedMultipleTimes(
'http_header specified a Content-Type header of %r in a handler that'
' also specified a mime_type of %r.' % (content_type, self.mime_type))
def FixSecureDefaults(self):
"""Force omitted 'secure: ...' handler fields to 'secure: optional'.
The effect is that handler.secure is never equal to the (nominal)
default.
See http://b/issue?id=2073962.
"""
if self.secure == SECURE_DEFAULT:
self.secure = SECURE_HTTP_OR_HTTPS
def WarnReservedURLs(self):
"""Generates a warning for reserved URLs.
See:
https://developers.google.com/appengine/docs/python/config/appconfig#Reserved_URLs
"""
if self.url == '/form':
logging.warning(
'The URL path "/form" is reserved and will not be matched.')
def ErrorOnPositionForAppInfo(self):
"""Raises an error if position is specified outside of AppInclude objects.
"""
if self.position:
raise appinfo_errors.PositionUsedInAppYamlHandler(
'The position attribute was specified for this handler, but this is '
'an app.yaml file. Position attribute is only valid for '
'include.yaml files.')
class AdminConsolePage(validation.Validated):
"""Class representing admin console page in AdminConsole object.
"""
ATTRIBUTES = {
URL: _URL_REGEX,
NAME: _PAGE_NAME_REGEX,
}
class AdminConsole(validation.Validated):
"""Class representing admin console directives in application info.
"""
ATTRIBUTES = {
PAGES: validation.Optional(validation.Repeated(AdminConsolePage)),
}
@classmethod
def Merge(cls, adminconsole_one, adminconsole_two):
"""Return the result of merging two AdminConsole objects."""
if not adminconsole_one or not adminconsole_two:
return adminconsole_one or adminconsole_two
if adminconsole_one.pages:
if adminconsole_two.pages:
adminconsole_one.pages.extend(adminconsole_two.pages)
else:
adminconsole_one.pages = adminconsole_two.pages
return adminconsole_one
class ErrorHandlers(validation.Validated):
"""Class representing error handler directives in application info.
"""
ATTRIBUTES = {
ERROR_CODE: validation.Optional(_ERROR_CODE_REGEX),
FILE: _FILES_REGEX,
MIME_TYPE: validation.Optional(str),
}
class BuiltinHandler(validation.Validated):
"""Class representing builtin handler directives in application info.
Permits arbitrary keys but their values must be described by the
validation.Options object returned by ATTRIBUTES.
"""
class DynamicAttributes(dict):
"""Provide a dictionary object that will always claim to have a key.
This dictionary returns a fixed value for any get operation. The fixed
value passed in as a constructor parameter should be a
validation.Validated object.
"""
def __init__(self, return_value, **parameters):
self.__return_value = return_value
dict.__init__(self, parameters)
def __contains__(self, _):
return True
def __getitem__(self, _):
return self.__return_value
ATTRIBUTES = DynamicAttributes(
validation.Optional(validation.Options((ON, ON_ALIASES),
(OFF, OFF_ALIASES))))
def __init__(self, **attributes):
"""Ensure that all BuiltinHandler objects at least have attribute 'default'.
"""
self.builtin_name = ''
super(BuiltinHandler, self).__init__(**attributes)
def __setattr__(self, key, value):
"""Permit ATTRIBUTES.iteritems() to return set of items that have values.
Whenever validate calls iteritems(), it is always called on ATTRIBUTES,
not on __dict__, so this override is important to ensure that functions
such as ToYAML() return the correct set of keys.
"""
if key == 'builtin_name':
object.__setattr__(self, key, value)
elif not self.builtin_name:
self.ATTRIBUTES[key] = ''
self.builtin_name = key
super(BuiltinHandler, self).__setattr__(key, value)
else:
raise appinfo_errors.MultipleBuiltinsSpecified(
'More than one builtin defined in list element. Each new builtin '
'should be prefixed by "-".')
def __getattr__(self, key):
if key.startswith('_'):
raise AttributeError
return None
def ToDict(self):
"""Convert BuiltinHander object to a dictionary.
Returns:
dictionary of the form: {builtin_handler_name: on/off}
"""
return {self.builtin_name: getattr(self, self.builtin_name)}
@classmethod
def IsDefined(cls, builtins_list, builtin_name):
"""Find if a builtin is defined in a given list of builtin handler objects.
Args:
builtins_list: list of BuiltinHandler objects (typically yaml.builtins)
builtin_name: name of builtin to find whether or not it is defined
Returns:
true if builtin_name is defined by a member of builtins_list,
false otherwise
"""
for b in builtins_list:
if b.builtin_name == builtin_name:
return True
return False
@classmethod
def ListToTuples(cls, builtins_list):
"""Converts a list of BuiltinHandler objects to a list of (name, status)."""
return [(b.builtin_name, getattr(b, b.builtin_name)) for b in builtins_list]
@classmethod
def Validate(cls, builtins_list, runtime=None):
"""Verify that all BuiltinHandler objects are valid and not repeated.
Args:
builtins_list: list of BuiltinHandler objects to validate.
runtime: if set then warnings are generated for builtins that have been
deprecated in the given runtime.
Raises:
InvalidBuiltinFormat if the name of a Builtinhandler object
cannot be determined.
DuplicateBuiltinSpecified if a builtin handler name is used
more than once in the list.
"""
seen = set()
for b in builtins_list:
if not b.builtin_name:
raise appinfo_errors.InvalidBuiltinFormat(
'Name of builtin for list object %s could not be determined.'
% b)
if b.builtin_name in seen:
raise appinfo_errors.DuplicateBuiltinsSpecified(
'Builtin %s was specified more than once in one yaml file.'
% b.builtin_name)
if b.builtin_name == 'datastore_admin' and runtime == 'python':
logging.warning(
'The datastore_admin builtin is deprecated. You can find '
'information on how to enable it through the Administrative '
'Console here: '
'http://developers.google.com/appengine/docs/adminconsole/'
'datastoreadmin.html')
elif b.builtin_name == 'mapreduce' and runtime == 'python':
logging.warning(
'The mapreduce builtin is deprecated. You can find more '
'information on how to configure and use it here: '
'http://developers.google.com/appengine/docs/python/dataprocessing/'
'overview.html')
seen.add(b.builtin_name)
class ApiConfigHandler(HandlerBase):
"""Class representing api_config handler directives in application info."""
ATTRIBUTES = HandlerBase.ATTRIBUTES
ATTRIBUTES.update({
URL: validation.Regex(_URL_REGEX),
HANDLER_SCRIPT: validation.Regex(_FILES_REGEX)
})
class Library(validation.Validated):
"""Class representing the configuration of a single library."""
ATTRIBUTES = {'name': validation.Type(str),
'version': validation.Type(str)}
def CheckInitialized(self):
"""Raises if the library configuration is not valid."""
super(Library, self).CheckInitialized()
if self.name not in _NAME_TO_SUPPORTED_LIBRARY:
raise appinfo_errors.InvalidLibraryName(
'the library "%s" is not supported' % self.name)
supported_library = _NAME_TO_SUPPORTED_LIBRARY[self.name]
if self.version != 'latest':
if self.version not in supported_library.supported_versions:
raise appinfo_errors.InvalidLibraryVersion(
('%s version "%s" is not supported, ' + _USE_VERSION_FORMAT) % (
self.name,
self.version,
'", "'.join(supported_library.non_deprecated_versions)))
elif self.version in supported_library.deprecated_versions:
logging.warning(
('%s version "%s" is deprecated, ' + _USE_VERSION_FORMAT) % (
self.name,
self.version,
'", "'.join(supported_library.non_deprecated_versions)))
class AutomaticScaling(validation.Validated):
"""Class representing automatic scaling settings in the AppInfoExternal."""
ATTRIBUTES = {
MINIMUM_IDLE_INSTANCES: validation.Optional(_IDLE_INSTANCES_REGEX),
MAXIMUM_IDLE_INSTANCES: validation.Optional(_IDLE_INSTANCES_REGEX),
MINIMUM_PENDING_LATENCY: validation.Optional(_PENDING_LATENCY_REGEX),
MAXIMUM_PENDING_LATENCY: validation.Optional(_PENDING_LATENCY_REGEX),
MAXIMUM_CONCURRENT_REQUEST: validation.Optional(
_CONCURRENT_REQUESTS_REGEX),
}
class ManualScaling(validation.Validated):
"""Class representing manual scaling settings in the AppInfoExternal."""
ATTRIBUTES = {
INSTANCES: validation.Regex(_INSTANCES_REGEX),
}
class BasicScaling(validation.Validated):
"""Class representing basic scaling settings in the AppInfoExternal."""
ATTRIBUTES = {
MAX_INSTANCES: validation.Regex(_INSTANCES_REGEX),
IDLE_TIMEOUT: validation.Optional(_IDLE_TIMEOUT_REGEX),
}
class VmSettings(validation.ValidatedDict):
"""Class for VM settings.
We don't validate these further because the feature is in flux.
"""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
@classmethod
def Merge(cls, vm_settings_one, vm_settings_two):
result_vm_settings = (vm_settings_two or {}).copy()
result_vm_settings.update(vm_settings_one or {})
return VmSettings(**result_vm_settings) if result_vm_settings else None
class EnvironmentVariables(validation.ValidatedDict):
"""Class representing a mapping of environment variable key value pairs."""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
class AppInclude(validation.Validated):
"""Class representing the contents of an included app.yaml file.
Used for both builtins and includes directives.
"""
ATTRIBUTES = {
BUILTINS: validation.Optional(validation.Repeated(BuiltinHandler)),
INCLUDES: validation.Optional(validation.Type(list)),
HANDLERS: validation.Optional(validation.Repeated(URLMap)),
ADMIN_CONSOLE: validation.Optional(AdminConsole),
MANUAL_SCALING: validation.Optional(ManualScaling),
VM_SETTINGS: validation.Optional(VmSettings),
}
@classmethod
def MergeManualScaling(cls, appinclude_one, appinclude_two):
"""Takes the greater of <manual_scaling.instances> from the args.
Note that appinclude_one is mutated to be the merged result in this process.
Also, this function needs to be updated if ManualScaling gets additional
fields.
Args:
appinclude_one: object one to merge. Must have a "manual_scaling" field
which contains a ManualScaling().
appinclude_two: object two to merge. Must have a "manual_scaling" field
which contains a ManualScaling().
Returns:
Object that is the result of merging
appinclude_one.manual_scaling.instances and
appinclude_two.manual_scaling.instances. I.e., <appinclude_one>
after the mutations are complete.
"""
def _Instances(appinclude):
if appinclude.manual_scaling:
if appinclude.manual_scaling.instances:
return int(appinclude.manual_scaling.instances)
return None
instances = max(_Instances(appinclude_one), _Instances(appinclude_two))
if instances is not None:
appinclude_one.manual_scaling = ManualScaling(instances=str(instances))
return appinclude_one
@classmethod
def MergeAppYamlAppInclude(cls, appyaml, appinclude):
"""This function merges an app.yaml file with referenced builtins/includes.
"""
if not appinclude:
return appyaml
if appinclude.handlers:
tail = appyaml.handlers or []
appyaml.handlers = []
for h in appinclude.handlers:
if not h.position or h.position == 'head':
appyaml.handlers.append(h)
else:
tail.append(h)
h.position = None
appyaml.handlers.extend(tail)
AppInclude.MergeManualScaling(appyaml, appinclude)
appyaml.admin_console = AdminConsole.Merge(appyaml.admin_console,
appinclude.admin_console)
appyaml.vm_settings = VmSettings.Merge(appyaml.vm_settings,
appinclude.vm_settings)
return appyaml
@classmethod
def MergeAppIncludes(cls, appinclude_one, appinclude_two):
"""This function merges the non-referential state of the provided AppInclude
objects. That is, builtins and includes directives are not preserved, but
any static objects are copied into an aggregate AppInclude object that
preserves the directives of both provided AppInclude objects.
Note that appinclude_one is mutated to be the merged result in this process.
Args:
appinclude_one: object one to merge
appinclude_two: object two to merge
Returns:
AppInclude object that is the result of merging the static directives of
appinclude_one and appinclude_two. I.e., <appinclude_one> after the
mutations are complete.
"""
if not appinclude_one or not appinclude_two:
return appinclude_one or appinclude_two
if appinclude_one.handlers:
if appinclude_two.handlers:
appinclude_one.handlers.extend(appinclude_two.handlers)
else:
appinclude_one.handlers = appinclude_two.handlers
appinclude_one = AppInclude.MergeManualScaling(
appinclude_one,
appinclude_two)
appinclude_one.admin_console = (
AdminConsole.Merge(appinclude_one.admin_console,
appinclude_two.admin_console))
appinclude_one.vm_settings = VmSettings.Merge(
appinclude_one.vm_settings,
appinclude_two.vm_settings)
return appinclude_one
class AppInfoExternal(validation.Validated):
"""Class representing users application info.
This class is passed to a yaml_object builder to provide the validation
for the application information file format parser.
Attributes:
application: Unique identifier for application.
version: Application's major version.
runtime: Runtime used by application.
api_version: Which version of APIs to use.
source_language: Optional specification of the source language.
For example we specify "php-quercus" if this is a Java app
that was generated from PHP source using Quercus
handlers: List of URL handlers.
default_expiration: Default time delta to use for cache expiration for
all static files, unless they have their own specific 'expiration' set.
See the URLMap.expiration field's documentation for more information.
skip_files: An re object. Files that match this regular expression will
not be uploaded by appcfg.py. For example:
skip_files: |
.svn.*|
#.*#
nobuild_files: An re object. Files that match this regular expression will
not be built into the app. Go only.
api_config: URL root and script/servlet path for enhanced api serving
"""
ATTRIBUTES = {
APPLICATION: APPLICATION_RE_STRING,
SERVER: validation.Optional(SERVER_ID_RE_STRING),
VERSION: validation.Optional(SERVER_VERSION_ID_RE_STRING),
RUNTIME: RUNTIME_RE_STRING,
API_VERSION: API_VERSION_RE_STRING,
INSTANCE_CLASS: validation.Optional(_INSTANCE_CLASS_REGEX),
SOURCE_LANGUAGE: validation.Optional(
validation.Regex(SOURCE_LANGUAGE_RE_STRING)),
AUTOMATIC_SCALING: validation.Optional(AutomaticScaling),
MANUAL_SCALING: validation.Optional(ManualScaling),
BASIC_SCALING: validation.Optional(BasicScaling),
VM: validation.Optional(bool),
VM_SETTINGS: validation.Optional(VmSettings),
BUILTINS: validation.Optional(validation.Repeated(BuiltinHandler)),
INCLUDES: validation.Optional(validation.Type(list)),
HANDLERS: validation.Optional(validation.Repeated(URLMap)),
LIBRARIES: validation.Optional(validation.Repeated(Library)),
SERVICES: validation.Optional(validation.Repeated(
validation.Regex(_SERVICE_RE_STRING))),
DEFAULT_EXPIRATION: validation.Optional(_EXPIRATION_REGEX),
SKIP_FILES: validation.RegexStr(default=DEFAULT_SKIP_FILES),
NOBUILD_FILES: validation.RegexStr(default=DEFAULT_NOBUILD_FILES),
DERIVED_FILE_TYPE: validation.Optional(validation.Repeated(
validation.Options(JAVA_PRECOMPILED, PYTHON_PRECOMPILED))),
ADMIN_CONSOLE: validation.Optional(AdminConsole),
ERROR_HANDLERS: validation.Optional(validation.Repeated(ErrorHandlers)),
BACKENDS: validation.Optional(validation.Repeated(
backendinfo.BackendEntry)),
THREADSAFE: validation.Optional(bool),
API_CONFIG: validation.Optional(ApiConfigHandler),
CODE_LOCK: validation.Optional(bool),
ENV_VARIABLES: validation.Optional(EnvironmentVariables),
PAGESPEED: validation.Optional(pagespeedinfo.PagespeedEntry),
}
_skip_runtime_checks = False
def CheckInitialized(self):
"""Performs non-regex-based validation.
The following are verified:
- At least one url mapping is provided in the URL mappers.
- Number of url mappers doesn't exceed MAX_URL_MAPS.
- Major version does not contain the string -dot-.
- If api_endpoints are defined, an api_config stanza must be defined.
- If the runtime is python27 and threadsafe is set, then no CGI handlers
can be used.
- That the version name doesn't start with BUILTIN_NAME_PREFIX
Raises:
DuplicateLibrary: if the name library name is specified more than once.
MissingURLMapping: if no URLMap object is present in the object.
TooManyURLMappings: if there are too many URLMap entries.
MissingApiConfig: if api_endpoints exist without an api_config.
MissingThreadsafe: if threadsafe is not set but the runtime requires it.
ThreadsafeWithCgiHandler: if the runtime is python27, threadsafe is set
and CGI handlers are specified.
TooManyScalingSettingsError: if more than one scaling settings block is
present.
"""
super(AppInfoExternal, self).CheckInitialized()
if not self.handlers and not self.builtins and not self.includes:
raise appinfo_errors.MissingURLMapping(
'No URLMap entries found in application configuration')
if self.handlers and len(self.handlers) > MAX_URL_MAPS:
raise appinfo_errors.TooManyURLMappings(
'Found more than %d URLMap entries in application configuration' %
MAX_URL_MAPS)
if (self.threadsafe is None and
self.runtime == 'python27' and
not self._skip_runtime_checks):
raise appinfo_errors.MissingThreadsafe(
'threadsafe must be present and set to either "yes" or "no"')
if self.libraries:
if self.runtime != 'python27' and not self._skip_runtime_checks:
raise appinfo_errors.RuntimeDoesNotSupportLibraries(
'libraries entries are only supported by the "python27" runtime')
library_names = [library.name for library in self.libraries]
for library_name in library_names:
if library_names.count(library_name) > 1:
raise appinfo_errors.DuplicateLibrary(
'Duplicate library entry for %s' % library_name)
if self.version and self.version.find(ALTERNATE_HOSTNAME_SEPARATOR) != -1:
raise validation.ValidationError(
'Version "%s" cannot contain the string "%s"' % (
self.version, ALTERNATE_HOSTNAME_SEPARATOR))
if self.version and self.version.startswith(BUILTIN_NAME_PREFIX):
raise validation.ValidationError(
('Version "%s" cannot start with "%s" because it is a '
'reserved version name prefix.') % (self.version,
BUILTIN_NAME_PREFIX))
if self.handlers:
api_endpoints = [handler.url for handler in self.handlers
if handler.GetHandlerType() == HANDLER_API_ENDPOINT]
if api_endpoints and not self.api_config:
raise appinfo_errors.MissingApiConfig(
'An api_endpoint handler was specified, but the required '
'api_config stanza was not configured.')
if (self.threadsafe and
self.runtime == 'python27' and
not self._skip_runtime_checks):
for handler in self.handlers:
if (handler.script and (handler.script.endswith('.py') or
'/' in handler.script)):
raise appinfo_errors.ThreadsafeWithCgiHandler(
'threadsafe cannot be enabled with CGI handler: %s' %
handler.script)
if sum([bool(self.automatic_scaling),
bool(self.manual_scaling),
bool(self.basic_scaling)]) > 1:
raise appinfo_errors.TooManyScalingSettingsError(
"There may be only one of 'automatic_scaling', 'manual_scaling', "
"or 'basic_scaling'.")
def GetAllLibraries(self):
"""Returns a list of all Library instances active for this configuration.
Returns:
The list of active Library instances for this configuration. This includes
directly-specified libraries as well as any required dependencies.
"""
if not self.libraries:
return []
library_names = set(library.name for library in self.libraries)
required_libraries = []
for library in self.libraries:
for required_name, required_version in REQUIRED_LIBRARIES.get(
(library.name, library.version), []):
if required_name not in library_names:
required_libraries.append(Library(name=required_name,
version=required_version))
return [Library(**library.ToDict())
for library in self.libraries + required_libraries]
def GetNormalizedLibraries(self):
"""Returns a list of normalized Library instances for this configuration.
Returns:
The list of active Library instances for this configuration. This includes
directly-specified libraries, their required dependencies as well as any
libraries enabled by default. Any libraries with "latest" as their version
will be replaced with the latest available version.
"""
libraries = self.GetAllLibraries()
enabled_libraries = set(library.name for library in libraries)
for library in _SUPPORTED_LIBRARIES:
if library.default_version and library.name not in enabled_libraries:
libraries.append(Library(name=library.name,
version=library.default_version))
for library in libraries:
if library.version == 'latest':
library.version = _NAME_TO_SUPPORTED_LIBRARY[
library.name].supported_versions[-1]
return libraries
def ApplyBackendSettings(self, backend_name):
"""Applies settings from the indicated backend to the AppInfoExternal.
Backend entries may contain directives that modify other parts of the
app.yaml, such as the 'start' directive, which adds a handler for the start
request. This method performs those modifications.
Args:
backend_name: The name of a backend defined in 'backends'.
Raises:
BackendNotFound: If the indicated backend was not listed in 'backends'.
"""
if backend_name is None:
return
if self.backends is None:
raise appinfo_errors.BackendNotFound
self.version = backend_name
match = None
for backend in self.backends:
if backend.name != backend_name:
continue
if match:
raise appinfo_errors.DuplicateBackend
else:
match = backend
if match is None:
raise appinfo_errors.BackendNotFound
if match.start is None:
return
start_handler = URLMap(url=_START_PATH, script=match.start)
self.handlers.insert(0, start_handler)
def ValidateHandlers(handlers, is_include_file=False):
"""Validates a list of handler (URLMap) objects.
Args:
handlers: A list of a handler (URLMap) objects.
is_include_file: If true, indicates the we are performing validation
for handlers in an AppInclude file, which may contain special directives.
"""
if not handlers:
return
for handler in handlers:
handler.FixSecureDefaults()
handler.WarnReservedURLs()
if not is_include_file:
handler.ErrorOnPositionForAppInfo()
def LoadSingleAppInfo(app_info):
"""Load a single AppInfo object where one and only one is expected.
Args:
app_info: A file-like object or string. If it is a string, parse it as
a configuration file. If it is a file-like object, read in data and
parse.
Returns:
An instance of AppInfoExternal as loaded from a YAML file.
Raises:
ValueError: if a specified service is not valid.
EmptyConfigurationFile: when there are no documents in YAML file.
MultipleConfigurationFile: when there is more than one document in YAML
file.
"""
builder = yaml_object.ObjectBuilder(AppInfoExternal)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(app_info)
app_infos = handler.GetResults()
if len(app_infos) < 1:
raise appinfo_errors.EmptyConfigurationFile()
if len(app_infos) > 1:
raise appinfo_errors.MultipleConfigurationFile()
appyaml = app_infos[0]
ValidateHandlers(appyaml.handlers)
if appyaml.builtins:
BuiltinHandler.Validate(appyaml.builtins, appyaml.runtime)
if appyaml.vm:
if not appyaml.vm_settings:
appyaml.vm_settings = VmSettings()
if not 'vm_runtime' in appyaml.vm_settings:
appyaml.vm_settings['vm_runtime'] = appyaml.runtime
appyaml.runtime = 'vm'
return appyaml
class AppInfoSummary(validation.Validated):
"""This class contains only basic summary information about an app.
It is used to pass back information about the newly created app to users
after a new version has been created.
"""
ATTRIBUTES = {
APPLICATION: APPLICATION_RE_STRING,
MAJOR_VERSION: SERVER_VERSION_ID_RE_STRING,
MINOR_VERSION: validation.TYPE_LONG
}
def LoadAppInclude(app_include):
"""Load a single AppInclude object where one and only one is expected.
Args:
app_include: A file-like object or string. If it is a string, parse it as
a configuration file. If it is a file-like object, read in data and
parse.
Returns:
An instance of AppInclude as loaded from a YAML file.
Raises:
EmptyConfigurationFile: when there are no documents in YAML file.
MultipleConfigurationFile: when there is more than one document in YAML
file.
"""
builder = yaml_object.ObjectBuilder(AppInclude)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(app_include)
includes = handler.GetResults()
if len(includes) < 1:
raise appinfo_errors.EmptyConfigurationFile()
if len(includes) > 1:
raise appinfo_errors.MultipleConfigurationFile()
includeyaml = includes[0]
if includeyaml.handlers:
for handler in includeyaml.handlers:
handler.FixSecureDefaults()
handler.WarnReservedURLs()
if includeyaml.builtins:
BuiltinHandler.Validate(includeyaml.builtins)
return includeyaml
def ParseExpiration(expiration):
"""Parses an expiration delta string.
Args:
expiration: String that matches _DELTA_REGEX.
Returns:
Time delta in seconds.
"""
delta = 0
for match in re.finditer(_DELTA_REGEX, expiration):
amount = int(match.group(1))
units = _EXPIRATION_CONVERSIONS.get(match.group(2).lower(), 1)
delta += amount * units
return delta
_file_path_positive_re = re.compile(r'^[ 0-9a-zA-Z\._\+/\$-]{1,256}$')
_file_path_negative_1_re = re.compile(r'\.\.|^\./|\.$|/\./|^-|^_ah/')
_file_path_negative_2_re = re.compile(r'//|/$')
_file_path_negative_3_re = re.compile(r'^ | $|/ | /')
def ValidFilename(filename):
"""Determines if filename is valid.
filename must be a valid pathname.
- It must contain only letters, numbers, _, +, /, $, ., and -.
- It must be less than 256 chars.
- It must not contain "/./", "/../", or "//".
- It must not end in "/".
- All spaces must be in the middle of a directory or file name.
Args:
filename: The filename to validate.
Returns:
An error string if the filename is invalid. Returns '' if the filename
is valid.
"""
if _file_path_positive_re.match(filename) is None:
return 'Invalid character in filename: %s' % filename
if _file_path_negative_1_re.search(filename) is not None:
return ('Filename cannot contain "." or ".." '
'or start with "-" or "_ah/": %s' %
filename)
if _file_path_negative_2_re.search(filename) is not None:
return 'Filename cannot have trailing / or contain //: %s' % filename
if _file_path_negative_3_re.search(filename) is not None:
return 'Any spaces must be in the middle of a filename: %s' % filename
return ''
| 30.736186
| 107
| 0.678193
|
ce4b79e5ae5ad6d170b58e1a9070ebe19cacb1c4
| 112
|
py
|
Python
|
app_name/views.py
|
hotbaby/django-app-skeleton
|
db965ee14dd377681e14a02a70c258b8c1cb73d8
|
[
"MIT"
] | null | null | null |
app_name/views.py
|
hotbaby/django-app-skeleton
|
db965ee14dd377681e14a02a70c258b8c1cb73d8
|
[
"MIT"
] | 1
|
2019-02-12T09:21:19.000Z
|
2019-02-12T09:21:19.000Z
|
app_name/views.py
|
hotbaby/django-app-skeleton
|
db965ee14dd377681e14a02a70c258b8c1cb73d8
|
[
"MIT"
] | null | null | null |
# encoding: utf8
from . import models
from . import filters
from . import exceptions
from . import serializers
| 16
| 25
| 0.767857
|
54c49422110dfd590b34eec2ad843b2a55880a33
| 8,171
|
py
|
Python
|
packages/pytea/pytest/benchmarks/transformers/missing_idx/src/transformers/tokenization_gpt2_fast.py
|
lego0901/pytea
|
8ede650def2e68f4610ba816451d8b9e28f09f76
|
[
"MIT"
] | 96
|
2021-06-16T09:06:52.000Z
|
2022-03-26T09:56:32.000Z
|
packages/pytea/pytest/benchmarks/transformers/missing_idx/src/transformers/tokenization_gpt2_fast.py
|
lego0901/pytea
|
8ede650def2e68f4610ba816451d8b9e28f09f76
|
[
"MIT"
] | 16
|
2021-07-01T05:34:48.000Z
|
2022-03-28T09:40:15.000Z
|
packages/pytea/pytest/benchmarks/transformers/missing_idx/src/transformers/tokenization_gpt2_fast.py
|
lego0901/pytea
|
8ede650def2e68f4610ba816451d8b9e28f09f76
|
[
"MIT"
] | 24
|
2021-06-19T15:58:31.000Z
|
2022-03-14T09:17:19.000Z
|
# coding=utf-8
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for OpenAI GPT."""
import json
import warnings
from typing import Optional, Tuple
from tokenizers import pre_tokenizers
from .tokenization_gpt2 import GPT2Tokenizer
from .tokenization_utils_base import BatchEncoding
from .tokenization_utils_fast import PreTrainedTokenizerFast
from .utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"gpt2": 1024,
"gpt2-medium": 1024,
"gpt2-large": 1024,
"gpt2-xl": 1024,
"distilgpt2": 1024,
}
class GPT2TokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" GPT-2 tokenizer (backed by HuggingFace's `tokenizers` library). Based on byte-level
Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
::
>>> from transformers import GPT2TokenizerFast
>>> tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
>>> tokenizer("Hello world")['input_ids']
[15496, 995]
>>> tokenizer(" Hello world")['input_ids']
[18435, 995]
You can get around that behavior by passing ``add_prefix_space=True`` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
.. note::
When used with ``is_split_into_words=True``, this tokenizer needs to be instantiated with
``add_prefix_space=True``.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main
methods. Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
merges_file (:obj:`str`):
Path to the merges file.
errors (:obj:`str`, `optional`, defaults to :obj:`"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See `bytes.decode
<https://docs.python.org/3/library/stdtypes.html#bytes.decode>`__ for more information.
unk_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`):
The beginning of sequence token.
eos_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`):
The end of sequence token.
add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (GPT2 tokenizer detect beginning of words by the preceding space).
trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the post-processing step should trim offsets to avoid including whitespaces.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["attention_mask"]
slow_tokenizer_class = GPT2Tokenizer
def __init__(
self,
vocab_file,
merges_file,
tokenizer_file=None,
unk_token="<|endoftext|>",
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
add_prefix_space=False,
**kwargs
):
super().__init__(
vocab_file,
merges_file,
tokenizer_file=tokenizer_file,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
pre_tok_state["add_prefix_space"] = add_prefix_space
self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
self.add_prefix_space = add_prefix_space
def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
if "is_pretokenized" in kwargs:
warnings.warn(
"`is_pretokenized` is deprecated and will be removed in a future version, use `is_split_into_words` instead.",
FutureWarning,
)
is_split_into_words = kwargs.pop("is_pretokenized")
is_split_into_words = kwargs.get("is_split_into_words", False)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*args, **kwargs)
def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
if "is_pretokenized" in kwargs:
warnings.warn(
"`is_pretokenized` is deprecated and will be removed in a future version, use `is_split_into_words` instead.",
FutureWarning,
)
is_split_into_words = kwargs.pop("is_pretokenized")
else:
is_split_into_words = kwargs.get("is_split_into_words", False)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*args, **kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
| 43.232804
| 126
| 0.674581
|
36ce65d4ac5c2a2838368298c28b6cda5c3d976e
| 2,163
|
py
|
Python
|
benchmarks/kernels.py
|
probabilistic-numerics/probnum
|
93c26c935282b48cca4c4db72616f1e4ebad2266
|
[
"MIT"
] | 226
|
2019-11-01T09:44:09.000Z
|
2022-03-30T23:17:17.000Z
|
benchmarks/kernels.py
|
probabilistic-numerics/probnum
|
93c26c935282b48cca4c4db72616f1e4ebad2266
|
[
"MIT"
] | 590
|
2019-11-21T08:32:30.000Z
|
2022-03-31T12:37:37.000Z
|
benchmarks/kernels.py
|
probabilistic-numerics/probnum
|
93c26c935282b48cca4c4db72616f1e4ebad2266
|
[
"MIT"
] | 39
|
2020-01-13T16:29:45.000Z
|
2022-03-28T16:16:54.000Z
|
"""Benchmarks for random variables."""
import numpy as np
import probnum.kernels as kernels
# Module level variables
KERNEL_NAMES = [
"white_noise",
"linear",
"polynomial",
"exp_quad",
"rat_quad",
"matern12",
"matern32",
"matern52",
"matern72",
]
N_DATAPOINTS = [10, 100, 1000]
def get_kernel(kernel_name, input_dim):
"""Return a kernel for a given name."""
if kernel_name == "white_noise":
kernel = kernels.WhiteNoise(input_dim=input_dim)
elif kernel_name == "linear":
kernel = kernels.Linear(input_dim=input_dim)
elif kernel_name == "polynomial":
kernel = kernels.Polynomial(input_dim=input_dim)
elif kernel_name == "exp_quad":
kernel = kernels.ExpQuad(input_dim=input_dim)
elif kernel_name == "rat_quad":
kernel = kernels.RatQuad(input_dim=input_dim)
elif kernel_name == "matern12":
kernel = kernels.Matern(input_dim=input_dim, nu=0.5)
elif kernel_name == "matern32":
kernel = kernels.Matern(input_dim=input_dim, nu=1.5)
elif kernel_name == "matern52":
kernel = kernels.Matern(input_dim=input_dim, nu=2.5)
elif kernel_name == "matern72":
kernel = kernels.Matern(input_dim=input_dim, nu=3.5)
else:
raise ValueError(f"Kernel name '{kernel_name}' not recognized.")
return kernel
class Kernels:
"""Benchmark evaluation of a kernel at a set of inputs."""
param_names = ["kernel", "n_datapoints"]
params = [KERNEL_NAMES, N_DATAPOINTS]
def setup(self, kernel, n_datapoints):
rng = np.random.default_rng(42)
self.input_dim = 100
self.data = rng.normal(size=(n_datapoints, self.input_dim))
self.kernel = get_kernel(kernel_name=kernel, input_dim=self.input_dim)
def time_kernel_call(self, kernel, n_datapoints):
self.kernel(self.data, None)
def time_kernel_matrix(self, kernel, n_datapoints):
"""Times sampling from this distribution."""
self.kernel.matrix(self.data)
def peakmem_kernel_matrix(self, kernel, n_datapoints):
"""Peak memory of sampling process."""
self.kernel.matrix(self.data)
| 30.464789
| 78
| 0.665742
|
44baae71232989d69ecfc7f8c912c394024772b9
| 4,392
|
py
|
Python
|
irc/tests/test_bot.py
|
kratz00/irc
|
1331ba85b5d093f06304d316a03a832959eaf4da
|
[
"MIT"
] | 1
|
2022-02-13T08:02:02.000Z
|
2022-02-13T08:02:02.000Z
|
irc/tests/test_bot.py
|
kratz00/irc
|
1331ba85b5d093f06304d316a03a832959eaf4da
|
[
"MIT"
] | null | null | null |
irc/tests/test_bot.py
|
kratz00/irc
|
1331ba85b5d093f06304d316a03a832959eaf4da
|
[
"MIT"
] | null | null | null |
import time
import threading
import pytest
import irc.client
import irc.bot
import irc.server
from irc.bot import ServerSpec
__metaclass__ = type
class TestServerSpec:
def test_with_host(self):
server_spec = ServerSpec('irc.example.com')
assert server_spec.host == 'irc.example.com'
assert server_spec.port == 6667
assert server_spec.password is None
def test_with_host_and_port(self):
server_spec = ServerSpec('irc.example.org', port=6669)
assert server_spec.host == 'irc.example.org'
assert server_spec.port == 6669
assert server_spec.password is None
def test_with_host_and_password(self):
server_spec = ServerSpec('irc.example.net', password='heres johnny!')
assert server_spec.host == 'irc.example.net'
assert server_spec.port == 6667
assert server_spec.password == 'heres johnny!'
def test_with_host_and_port_and_password(self):
server_spec = ServerSpec(
'irc.example.gov', port=6668, password='there-is-only-zuul'
)
assert server_spec.host == 'irc.example.gov'
assert server_spec.port == 6668
assert server_spec.password == 'there-is-only-zuul'
class TestChannel:
def test_add_remove_nick(self):
channel = irc.bot.Channel()
channel.add_user('tester1')
channel.remove_user('tester1')
assert 'tester1' not in channel.users()
channel.add_user('tester1')
assert 'tester1' in channel.users()
def test_change_nick(self):
channel = irc.bot.Channel()
channel.add_user('tester1')
channel.change_nick('tester1', 'was_tester')
def test_has_user(self):
channel = irc.bot.Channel()
channel.add_user('tester1')
assert channel.has_user('Tester1')
def test_set_mode_clear_mode(self):
channel = irc.bot.Channel()
channel.add_user('tester1')
channel.set_mode('o', 'tester1')
assert channel.is_oper('tester1')
channel.clear_mode('o', 'tester1')
assert not channel.is_oper('tester1')
def test_remove_add_clears_mode(self):
channel = irc.bot.Channel()
channel.add_user('tester1')
channel.set_mode('v', 'tester1')
assert channel.is_voiced('tester1')
channel.remove_user('tester1')
channel.add_user('tester1')
assert not channel.is_voiced('tester1')
class DisconnectHandler(irc.server.IRCClient):
"""
Immediately disconnect the client after connecting
"""
def handle(self):
self.request.close()
@pytest.yield_fixture
def disconnecting_server():
"""
An IRC server that disconnects the client immediately.
"""
# bind to localhost on an ephemeral port
bind_address = '127.0.0.1', 0
try:
srv = irc.server.IRCServer(bind_address, DisconnectHandler)
threading.Thread(target=srv.serve_forever).start()
yield srv
finally:
srv.shutdown()
srv.server_close()
class TestBot:
def test_construct_bot(self):
bot = irc.bot.SingleServerIRCBot(
server_list=[('localhost', '9999')],
realname='irclibbot',
nickname='irclibbot',
)
svr = bot.servers.peek()
assert svr.host == 'localhost'
assert svr.port == '9999'
assert svr.password is None
def test_namreply_no_channel(self):
"""
If channel is '*', _on_namreply should not crash.
Regression test for #22
"""
event = irc.client.Event(
type=None, source=None, target=None, arguments=['*', '*', 'nick']
)
irc.bot.SingleServerIRCBot._on_namreply(None, None, event)
def test_reconnects_are_stable(self, disconnecting_server):
"""
Ensure that disconnects from the server don't lead to
exponential growth in reconnect attempts.
"""
recon = irc.bot.ExponentialBackoff(min_interval=0.01)
bot = irc.bot.SingleServerIRCBot(
server_list=[disconnecting_server.socket.getsockname()],
realname='reconnect_test',
nickname='reconnect_test',
recon=recon,
)
bot._connect()
for x in range(4):
bot.reactor.process_once()
time.sleep(0.01)
assert len(bot.reactor.scheduler.queue) <= 1
| 30.5
| 77
| 0.63388
|
a259d5c894100415e941bae66c060e874c958fea
| 42
|
py
|
Python
|
viz/tda/__init__.py
|
chudur-budur/visualization
|
8013fbdef55fac770d439454207dc07be88fe7c3
|
[
"Apache-2.0"
] | null | null | null |
viz/tda/__init__.py
|
chudur-budur/visualization
|
8013fbdef55fac770d439454207dc07be88fe7c3
|
[
"Apache-2.0"
] | 2
|
2022-01-13T03:22:47.000Z
|
2022-03-12T00:48:54.000Z
|
viz/tda/__init__.py
|
chudur-budur/pviz
|
8013fbdef55fac770d439454207dc07be88fe7c3
|
[
"Apache-2.0"
] | null | null | null |
# __init__.py
__all__ = ['simple_shape']
| 10.5
| 26
| 0.690476
|
60b66486521c664c6d8bd7d298f7d2ad1484fb30
| 36,465
|
py
|
Python
|
cnn_discriminator.py
|
angshenting/NMT_GAN
|
9464fc5df2dd006f4e13d9cc3a6be7071e28d16c
|
[
"Apache-2.0"
] | null | null | null |
cnn_discriminator.py
|
angshenting/NMT_GAN
|
9464fc5df2dd006f4e13d9cc3a6be7071e28d16c
|
[
"Apache-2.0"
] | null | null | null |
cnn_discriminator.py
|
angshenting/NMT_GAN
|
9464fc5df2dd006f4e13d9cc3a6be7071e28d16c
|
[
"Apache-2.0"
] | null | null | null |
# this code is implemented as a discriminator to classify the sentence
import tensorflow as tf
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope as vs
from data_iterator import disTextIterator
from data_iterator import disThreeTextIterator
from share_function import dis_length_prepare
from share_function import average_clip_gradient
from share_function import average_clip_gradient_by_value
from share_function import dis_three_length_prepare
from model import split_tensor
import time
import numpy
import os
from tensorflow.contrib.layers.python.layers import batch_norm as batch_norm
def conv_batch_norm(x, is_train, scope='bn', decay=0.9, reuse_var = False):
out = batch_norm(x,
decay=decay,
center=True,
scale=True,
updates_collections=None,
is_training=is_train,
reuse=reuse_var,
trainable=True,
scope=scope)
return out
def linear(inputs, output_size, use_bias, scope='linear'):
if not scope:
scope=tf.compat.v1.get_variable_scope()
input_size = inputs.get_shape()[1].value
dtype=inputs.dtype
with tf.compat.v1.variable_scope(scope):
weights=tf.compat.v1.get_variable('weights', [input_size, output_size], dtype=dtype)
res = tf.matmul(inputs, weights)
if not use_bias:
return res
biases=tf.compat.v1.get_variable('biases', [output_size], dtype=dtype)
return tf.add(res, biases)
def highway(input_, size, layer_size=1, bias=-2, f=tf.nn.relu, reuse_var=False):
output = input_
if reuse_var == True:
tf.compat.v1.get_variable_scope().reuse_variables()
for idx in xrange(layer_size):
output = f(linear(output, size, 0, scope='output_lin_%d' %idx))
transform_gate = tf.sigmoid(linear(input_, size, 0, scope='transform_lin_%d'%idx) +bias)
carry_gate = 1. - transform_gate
output = transform_gate * output + carry_gate * input_
return output
def highway_s(input_, size, layer_size=1, bias=-2, f=tf.nn.relu, reuse_var=False):
output = input_
if reuse_var == True:
tf.compat.v1.get_variable_scope().reuse_variables()
for idx in xrange(layer_size):
output = f(linear(output, size, 0, scope='output_s_lin_%d' %idx))
transform_gate = tf.sigmoid(linear(input_, size, 0, scope='transform_s_lin_%d'%idx) +bias)
carry_gate = 1. - transform_gate
output = transform_gate * output + carry_gate * input_
return output
class cnn_layer(object):
def __init__(self, filter_size, dim_word, num_filter, scope='cnn_layer', init_device='/cpu:0', reuse_var=False):
self.filter_size = filter_size
self.dim_word = dim_word
self.num_filter = num_filter
self.scope = scope
self.reuse_var = reuse_var
if reuse_var == False:
with tf.compat.v1.variable_scope(self.scope or 'cnn_layer'):
with tf.compat.v1.variable_scope('self_model'):
with tf.device(init_device):
filter_shape = [filter_size, dim_word, 1, num_filter]
b = tf.compat.v1.get_variable('b', initializer = tf.constant(0.1, shape=[num_filter]))
W = tf.compat.v1.get_variable('W', initializer = tf.random.truncated_normal(filter_shape, stddev=0.1))
## convolutuon with batch normalization
def conv_op(self, input_sen, stride, is_train, padding='VALID', is_batch_norm = True, f_activation=tf.nn.relu):
with tf.compat.v1.variable_scope(self.scope):
with tf.compat.v1.variable_scope('self_model'):
tf.compat.v1.get_variable_scope().reuse_variables()
b = tf.compat.v1.get_variable('b')
W = tf.compat.v1.get_variable('W')
conv = tf.nn.conv2d(
input=input_sen,
filters=W,
strides=stride,
padding=padding,
name='conv')
bias_add = tf.nn.bias_add(conv, b)
if is_batch_norm :
with tf.compat.v1.variable_scope('conv_batch_norm'):
conv_bn = conv_batch_norm(bias_add, is_train = is_train, scope='bn', reuse_var = self.reuse_var)
h = f_activation(conv_bn, name='relu')
else:
h = f_activation(bias_add, name='relu')
return h
class DisCNN(object):
"""
A CNN for sentence classification
Uses an embedding layer, followed by a convolutional layer, max_pooling and softmax layer.
"""
def __init__(self, sess, max_len, num_classes, vocab_size, batch_size, dim_word, filter_sizes, num_filters, source_dict, target_dict, gpu_device, positive_data, negative_data, source_data,
vocab_size_s = None, dev_positive_data=None, dev_negative_data=None, dev_source_data=None, max_epoches=10, dispFreq = 1, saveFreq = 10, devFreq=1000, clip_c = 1.0, optimizer='adadelta', saveto='discriminator',
reload=False, reshuffle = False, l2_reg_lambda=0.0, scope='discnn', init_device="/cpu:0", reuse_var=False):
self.sess = sess
self.max_len = max_len
self.num_classes = num_classes
self.vocab_size = vocab_size
self.dim_word = dim_word
self.filter_sizes = filter_sizes
self.num_filters = num_filters
self.l2_reg_lambda = l2_reg_lambda
self.num_filters_total = sum(self.num_filters)
self.scope = scope
self.positive_data = positive_data
self.negative_data = negative_data
self.source_data = source_data
self.dev_positive_data = dev_positive_data
self.dev_negative_data = dev_negative_data
self.dev_source_data = dev_source_data
self.reshuffle = reshuffle
self.batch_size = batch_size
self.max_epoches = max_epoches
self.dispFreq = dispFreq
self.saveFreq = saveFreq
self.devFreq = devFreq
self.clip_c = clip_c
self.saveto = saveto
self.reload = reload
if vocab_size_s is None:
self.vocab_size_s = self.vocab_size
else:
self.vocab_size_s = vocab_size_s
print('num_filters_total is ', self.num_filters_total)
if optimizer == 'adam':
self.optimizer = tf.compat.v1.train.AdamOptimizer()
print("using adam as the optimizer for the discriminator")
elif optimizer == 'adadelta':
self.optimizer = tf.compat.v1.train.AdadeltaOptimizer(learning_rate=1.,rho=0.95,epsilon=1e-6)
print("using adadelta as the optimizer for the discriminator")
elif optimizer == 'sgd':
self.optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.0001)
print("using sgd as the optimizer for the discriminator")
elif optimizer == 'rmsprop':
self.optimizer = tf.compat.v1.train.RMSPropOptimizer(0.0001)
print("using rmsprop as the optimizer for the discriminator")
else :
raise ValueError("optimizer must be adam, adadelta or sgd.")
dictionaries=[]
dictionaries.append(source_dict)
dictionaries.append(target_dict)
self.dictionaries = dictionaries
gpu_string = gpu_device
gpu_devices = []
gpu_devices = gpu_string.split('-')
self.gpu_devices = gpu_devices[1:]
self.gpu_num = len(self.gpu_devices)
#print('the gpu_num is ', self.gpu_num)
self.build_placeholder()
if reuse_var == False:
with tf.compat.v1.variable_scope(self.scope or 'disCNN'):
with tf.compat.v1.variable_scope('model_self'):
with tf.device(init_device):
embeddingtable = tf.compat.v1.get_variable('embeddingtable', initializer = tf.random.uniform([self.vocab_size, self.dim_word], -1.0, 1.0))
embeddingtable_s = tf.compat.v1.get_variable('embeddingtable_s', initializer = tf.random.uniform([self.vocab_size_s, self.dim_word], -1.0, 1.0))
W = tf.compat.v1.get_variable('W', initializer = tf.random.truncated_normal([self.num_filters_total * 2, self.num_classes], stddev=0.1))
b = tf.compat.v1.get_variable('b', initializer = tf.constant(0.1, shape=[self.num_classes]))
## build_model ##########
print('building train model')
self.build_train_model()
print('done')
print('build_discriminate ')
#self.build_discriminate(gpu_device=self.gpu_devices[-1])
self.build_discriminator_model(dis_devices=self.gpu_devices)
print('done')
params = [param for param in tf.compat.v1.global_variables() if self.scope in param.name]
if not self.sess.run(tf.compat.v1.is_variable_initialized(params[0])):
init_op = tf.compat.v1.variables_initializer(params)
self.sess.run(init_op)
saver = tf.compat.v1.train.Saver(params)
self.saver = saver
if self.reload:
#ckpt = tf.train.get_checkpoint_state('./')
#if ckpt and ckpt.model_checkpoint_path:
# print('reloading file from %s' % ckpt.model_checkpoint_path)
# self.saver.restore(self.sess, ckpt.model_checkpoint_path)
#else:
print('reloading file from %s' % self.saveto)
self.saver.restore(self.sess, self.saveto)
print('reloading file done')
def build_placeholder(self, gpu_num = None):
self.x_list = []
self.xs_list = [] ##for the source side
self.y_list = []
self.drop_list = []
if gpu_num is None:
gpu_num = self.gpu_num
for i in range(gpu_num):
input_x = tf.compat.v1.placeholder(tf.int32, [self.max_len, None], name='input_x')
input_xs = tf.compat.v1.placeholder(tf.int32, [self.max_len, None], name='input_xs')
input_y = tf.compat.v1.placeholder(tf.float32, [self.num_classes, None], name='input_y')
drop_prob = tf.compat.v1.placeholder(tf.float32, name='dropout_prob')
self.x_list.append(input_x)
self.xs_list.append(input_xs)
self.y_list.append(input_y)
self.drop_list.append(drop_prob)
def get_inputs(self, gpu_device):
try:
gpu_id = self.gpu_devices.index(gpu_device)
except:
raise ValueError('get inputs error!')
return self.x_list[gpu_id], self.xs_list[gpu_id], self.y_list[gpu_id], self.drop_list[gpu_id]
def build_model(self, reuse_var=False, gpu_device='0'):
with tf.compat.v1.variable_scope(self.scope):
with tf.device('/gpu:%d' % int(gpu_device)):
# self.input_x = tf.placeholder(tf.int32, [self.max_len, None], name='input_x')
# self.input_y = tf.placeholder(tf.float32, [self.num_classes, None], name='input_y')
#print('build model on gpu_device is ', gpu_device)
#print('building model on device %s, reuse : %d' %(gpu_device, reuse_var))
input_x, input_xs, input_y, drop_keep_prob = self.get_inputs(gpu_device)
input_x_trans = tf.transpose(a=input_x, perm=[1,0])
input_xs_trans = tf.transpose(a=input_xs, perm=[1,0])
input_y_trans = tf.transpose(a=input_y, perm=[1,0])
#self.dropout_keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')
with tf.compat.v1.variable_scope('model_self'):
tf.compat.v1.get_variable_scope().reuse_variables()
W = tf.compat.v1.get_variable('W')
b = tf.compat.v1.get_variable('b')
embeddingtable = tf.compat.v1.get_variable('embeddingtable')
embeddingtable_s = tf.compat.v1.get_variable('embeddingtable_s')
sentence_embed = tf.nn.embedding_lookup(params=embeddingtable, ids=input_x_trans)
sentence_embed_expanded = tf.expand_dims(sentence_embed, -1)
pooled_outputs = []
for filter_size, num_filter in zip(self.filter_sizes, self.num_filters):
#print('the filter size is ', filter_size)
scope = "conv_maxpool-%s" % filter_size
filter_shape = [filter_size, self.dim_word, 1, num_filter]
strides=[1,1,1,1]
conv = cnn_layer(filter_size, self.dim_word, num_filter, scope=scope, reuse_var = reuse_var)
is_train = True
conv_out = conv.conv_op(sentence_embed_expanded, strides, is_train=is_train)
pooled = tf.nn.max_pool2d(input=conv_out, ksize=[1, (self.max_len - filter_size +1), 1, 1], strides=strides, padding='VALID', name='pool')
#print('the shape of the pooled is ', pooled.get_shape())
pooled_outputs.append(pooled)
h_pool = tf.concat(axis=3, values=pooled_outputs)
#print('the shape of h_pool is ', h_pool.get_shape())
#print('the num_filters_total is ', self.num_filters_total)
h_pool_flat = tf.reshape(h_pool, [-1, self.num_filters_total])
#print('the shape of h_pool_flat is ', h_pool_flat.get_shape())
h_highway = highway(h_pool_flat, h_pool_flat.get_shape()[1], 1, 0, reuse_var=reuse_var)
h_drop = tf.nn.dropout(h_highway, 1 - (drop_keep_prob))
#print('the shape of h_drop is ', h_drop.get_shape())
## for the source sentence
sentence_embed_s = tf.nn.embedding_lookup(params=embeddingtable_s, ids=input_xs_trans)
sentence_embed_expanded_s = tf.expand_dims(sentence_embed_s, -1)
pooled_outputs_s = []
for filter_size_s, num_filter_s in zip(self.filter_sizes, self.num_filters):
scope = "conv_s_maxpool-%s" % filter_size_s
filter_shape = [filter_size_s, self.dim_word, 1, num_filter_s]
strides=[1,1,1,1]
conv = cnn_layer(filter_size_s, self.dim_word, num_filter_s, scope=scope, reuse_var=reuse_var)
is_train = True
conv_out = conv.conv_op(sentence_embed_expanded_s, strides, is_train=is_train)
pooled = tf.nn.max_pool2d(input=conv_out, ksize=[1, (self.max_len - filter_size_s + 1), 1, 1], strides=strides, padding='VALID', name='pool')
pooled_outputs_s.append(pooled)
h_pool_s = tf.concat(axis=3, values=pooled_outputs_s)
h_pool_flat_s = tf.reshape(h_pool_s, [-1, self.num_filters_total])
h_highway_s = highway_s(h_pool_flat_s, h_pool_flat_s.get_shape()[1], 1, 0, reuse_var = reuse_var)
h_drop_s = tf.nn.dropout(h_highway_s, 1 - (drop_keep_prob))
h_concat = tf.concat(axis=1, values=[h_drop, h_drop_s])
#print('the shape of h_concat is ', h_concat.get_shape())
scores = tf.compat.v1.nn.xw_plus_b(h_concat, W, b, name='scores')
ypred_for_auc = tf.nn.softmax(scores)
predictions = tf.argmax(input=scores, axis=1, name='prediction')
losses = tf.nn.softmax_cross_entropy_with_logits(logits=scores, labels=tf.stop_gradient(input_y_trans))
correct_predictions = tf.equal(predictions, tf.argmax(input=input_y_trans, axis=1))
accuracy = tf.reduce_mean(input_tensor=tf.cast(correct_predictions, 'float'), name='accuracy')
params = [param for param in tf.compat.v1.trainable_variables() if self.scope in param.name]
#for param in params:
# print param.name
#self.params = params
grads_and_vars = self.optimizer.compute_gradients(losses, params)
#for grad, var in grads_and_vars:
# print (var.name, grad)
l2_loss = tf.constant(0.0)
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
loss = tf.reduce_mean(input_tensor=losses) + self.l2_reg_lambda * l2_loss
return input_x, input_y, drop_keep_prob, ypred_for_auc, predictions, loss, correct_predictions, accuracy, grads_and_vars
def build_discriminate(self, reuse_var=True, gpu_device='0'):
with tf.compat.v1.variable_scope(self.scope):
with tf.device('/gpu:%d' % int(gpu_device)):
self.dis_input_x = tf.compat.v1.placeholder(tf.int32, [self.max_len, None], name='input_x')
self.dis_input_xs = tf.compat.v1.placeholder(tf.int32, [self.max_len, None], name='input_xs')
self.dis_input_y = tf.compat.v1.placeholder(tf.float32, [self.num_classes, None], name='input_y')
input_x_trans = tf.transpose(a=self.dis_input_x, perm=[1,0])
input_xs_trans = tf.transpose(a=self.dis_input_xs, perm=[1,0])
input_y_trans = tf.transpose(a=self.dis_input_y, perm=[1,0])
self.dis_dropout_keep_prob = tf.compat.v1.placeholder(tf.float32, name='dropout_keep_prob')
with tf.compat.v1.variable_scope('model_self'):
tf.compat.v1.get_variable_scope().reuse_variables()
W = tf.compat.v1.get_variable('W')
b = tf.compat.v1.get_variable('b')
embeddingtable = tf.compat.v1.get_variable('embeddingtable')
embeddingtable_s = tf.compat.v1.get_variable('embeddingtable_s')
sentence_embed = tf.nn.embedding_lookup(params=embeddingtable, ids=input_x_trans)
sentence_embed_expanded = tf.expand_dims(sentence_embed, -1)
#print('the shape of sentence_embed is ', sentence_embed.get_shape())
#print('the shape of sentence_embed_expanded is ', sentence_embed_expanded.get_shape())
pooled_outputs = []
for filter_size, num_filter in zip(self.filter_sizes, self.num_filters):
#print('the filter size is ', filter_size)
scope = "conv_maxpool-%s" % filter_size
filter_shape = [filter_size, self.dim_word, 1, num_filter]
strides=[1,1,1,1]
conv = cnn_layer(filter_size, self.dim_word, num_filter, scope=scope, reuse_var = reuse_var)
is_train = False
conv_out = conv.conv_op(sentence_embed_expanded, strides, is_train=is_train)
pooled = tf.nn.max_pool2d(input=conv_out, ksize=[1, (self.max_len - filter_size +1), 1, 1], strides=strides, padding='VALID', name='pool')
#print('the shape of the pooled is ', pooled.get_shape())
pooled_outputs.append(pooled)
h_pool = tf.concat(axis=3, values=pooled_outputs)
#print('the shape of h_pool is ', h_pool.get_shape())
#print('the num_filters_total is ', self.num_filters_total)
h_pool_flat = tf.reshape(h_pool, [-1, self.num_filters_total])
#print('the shape of h_pool_flat is ', h_pool_flat.get_shape())
h_highway = highway(h_pool_flat, h_pool_flat.get_shape()[1], 1, 0, reuse_var=reuse_var)
h_drop = tf.nn.dropout(h_highway, 1 - (self.dis_dropout_keep_prob))
#print('the shape of h_drop is ', h_drop.get_shape())
sentence_embed_s = tf.nn.embedding_lookup(params=embeddingtable_s, ids=input_xs_trans)
sentence_embed_expanded_s = tf.expand_dims(sentence_embed_s, -1)
pooled_output_s = []
for filter_size_s, num_filter_s in zip(self.filter_sizes, self.num_filters):
scope= "conv_s_maxpool-%s" % filter_size_s
filter_shape = [filter_size_s, self.dim_word, 1, num_filter_s]
strides=[1,1,1,1]
conv = cnn_layer(filter_size_s, self.dim_word, num_filter_s, scope=scope, reuse_var=reuse_var)
is_train = False
conv_out = conv.conv_op(sentence_embed_expanded_s, strides, is_train=is_train)
pooled = tf.nn.max_pool2d(input=conv_out, ksize=[1, (self.max_len - filter_size_s +1), 1, 1], strides=strides, padding='VALID', name='pool')
pooled_output_s.append(pooled)
h_pool_s = tf.concat(axis=3, values=pooled_output_s)
h_pool_flat_s = tf.reshape(h_pool_s, [-1, self.num_filters_total])
h_highway_s = highway_s(h_pool_flat_s, h_pool_flat_s.get_shape()[1], 1, 0, reuse_var=reuse_var)
h_drop_s = tf.nn.dropout(h_highway_s, 1 - (self.dis_dropout_keep_prob))
h_concat = tf.concat(axis=1, values=[h_drop, h_drop_s])
scores = tf.compat.v1.nn.xw_plus_b(h_concat, W, b, name='scores')
ypred_for_auc = tf.nn.softmax(scores)
predictions = tf.argmax(input=scores, axis=1, name='prediction')
losses = tf.nn.softmax_cross_entropy_with_logits(logits=scores, labels=tf.stop_gradient(input_y_trans))
correct_predictions = tf.equal(predictions, tf.argmax(input=input_y_trans, axis=1))
accuracy = tf.reduce_mean(input_tensor=tf.cast(correct_predictions, 'float'), name='accuracy')
grads_and_vars = self.optimizer.compute_gradients(losses)
l2_loss = tf.constant(0.0)
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
loss = tf.reduce_mean(input_tensor=losses) + self.l2_reg_lambda * l2_loss
self.dis_ypred_for_auc = ypred_for_auc
self.dis_prediction = predictions
self.dis_loss = loss
self.dis_accuracy = accuracy
self.dis_grads_and_vars = grads_and_vars
def build_discriminator_body(self, input_x, input_xs, input_y, dropout_keep_prob, reuse_var=True):
input_x_trans = input_x
input_xs_trans = input_xs
input_y_trans = input_y
dis_dropout_keep_prob = dropout_keep_prob
with tf.compat.v1.variable_scope('model_self'):
tf.compat.v1.get_variable_scope().reuse_variables()
W = tf.compat.v1.get_variable('W')
b = tf.compat.v1.get_variable('b')
embeddingtable = tf.compat.v1.get_variable('embeddingtable')
embeddingtable_s = tf.compat.v1.get_variable('embeddingtable_s')
sentence_embed = tf.nn.embedding_lookup(params=embeddingtable, ids=input_x_trans)
sentence_embed_expanded = tf.expand_dims(sentence_embed, -1)
#print('the shape of sentence_embed is ', sentence_embed.get_shape())
#print('the shape of sentence_embed_expanded is ', sentence_embed_expanded.get_shape())
pooled_outputs = []
for filter_size, num_filter in zip(self.filter_sizes, self.num_filters):
#print('the filter size is ', filter_size)
scope = "conv_maxpool-%s" % filter_size
filter_shape = [filter_size, self.dim_word, 1, num_filter]
strides=[1,1,1,1]
conv = cnn_layer(filter_size, self.dim_word, num_filter, scope=scope, reuse_var = reuse_var)
is_train = False
conv_out = conv.conv_op(sentence_embed_expanded, strides, is_train=is_train)
pooled = tf.nn.max_pool2d(input=conv_out, ksize=[1, (self.max_len - filter_size +1), 1, 1], strides=strides, padding='VALID', name='pool')
#print('the shape of the pooled is ', pooled.get_shape())
pooled_outputs.append(pooled)
h_pool = tf.concat(axis=3, values=pooled_outputs)
#print('the shape of h_pool is ', h_pool.get_shape())
#print('the num_filters_total is ', self.num_filters_total)
h_pool_flat = tf.reshape(h_pool, [-1, self.num_filters_total])
#print('the shape of h_pool_flat is ', h_pool_flat.get_shape())
h_highway = highway(h_pool_flat, h_pool_flat.get_shape()[1], 1, 0, reuse_var=reuse_var)
h_drop = tf.nn.dropout(h_highway, 1 - (dis_dropout_keep_prob))
#print('the shape of h_drop is ', h_drop.get_shape())
sentence_embed_s = tf.nn.embedding_lookup(params=embeddingtable_s, ids=input_xs_trans)
sentence_embed_expanded_s = tf.expand_dims(sentence_embed_s, -1)
pooled_output_s = []
for filter_size_s, num_filter_s in zip(self.filter_sizes, self.num_filters):
scope= "conv_s_maxpool-%s" % filter_size_s
filter_shape = [filter_size_s, self.dim_word, 1, num_filter_s]
strides=[1,1,1,1]
conv = cnn_layer(filter_size_s, self.dim_word, num_filter_s, scope=scope, reuse_var=reuse_var)
is_train = False
conv_out = conv.conv_op(sentence_embed_expanded_s, strides, is_train=is_train)
pooled = tf.nn.max_pool2d(input=conv_out, ksize=[1, (self.max_len - filter_size_s +1), 1, 1], strides=strides, padding='VALID', name='pool')
pooled_output_s.append(pooled)
h_pool_s = tf.concat(axis=3, values=pooled_output_s)
h_pool_flat_s = tf.reshape(h_pool_s, [-1, self.num_filters_total])
h_highway_s = highway_s(h_pool_flat_s, h_pool_flat_s.get_shape()[1], 1, 0, reuse_var=reuse_var)
h_drop_s = tf.nn.dropout(h_highway_s, 1 - (dis_dropout_keep_prob))
h_concat = tf.concat(axis=1, values=[h_drop, h_drop_s])
scores = tf.compat.v1.nn.xw_plus_b(h_concat, W, b, name='scores')
ypred_for_auc = tf.nn.softmax(scores)
predictions = tf.argmax(input=scores, axis=1, name='prediction')
losses = tf.nn.softmax_cross_entropy_with_logits(logits=scores, labels=tf.stop_gradient(input_y_trans))
correct_predictions = tf.equal(predictions, tf.argmax(input=input_y_trans, axis=1))
accuracy = tf.reduce_mean(input_tensor=tf.cast(correct_predictions, 'float'), name='accuracy')
grads_and_vars = self.optimizer.compute_gradients(losses)
l2_loss = tf.constant(0.0)
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
loss = tf.reduce_mean(input_tensor=losses) + self.l2_reg_lambda * l2_loss
return ypred_for_auc
#self.dis_ypred_for_auc = ypred_for_auc
#self.dis_prediction = predictions
#self.dis_loss = loss
#self.dis_accuracy = accuracy
#self.dis_grads_and_vars = grads_and_vars
def build_discriminator_model(self, dis_devices):
with tf.compat.v1.variable_scope(self.scope):
with tf.device('/cpu:0'):
self.dis_input_x = tf.compat.v1.placeholder(tf.int32, [self.max_len, None], name='input_x')
self.dis_input_xs = tf.compat.v1.placeholder(tf.int32, [self.max_len, None], name='input_xs')
self.dis_input_y = tf.compat.v1.placeholder(tf.float32, [self.num_classes, None], name='input_y')
self.dis_dropout_keep_prob = tf.compat.v1.placeholder(tf.float32, name='dropout_keep_prob')
dis_input_x = tf.transpose(a=self.dis_input_x, perm=[1, 0])
dis_input_xs = tf.transpose(a=self.dis_input_xs, perm=[1, 0])
dis_input_y = tf.transpose(a=self.dis_input_y, perm=[1, 0])
devices = ['/gpu:' + i for i in dis_devices]
input_x_list = split_tensor(dis_input_x, len(devices))
input_xs_list = split_tensor(dis_input_xs, len(devices))
input_y_list = split_tensor(dis_input_y, len(devices))
dis_dropout_keep_prob = [self.dis_dropout_keep_prob] * len(devices)
batch_size_list = [tf.shape(input=x)[0] for x in input_x_list]
pred_list = [None] * len(devices)
for i, (input_x, input_xs, input_y, drop, device) in enumerate(zip(input_x_list, input_xs_list, input_y_list, dis_dropout_keep_prob, devices)):
with tf.device(device):
print("building discriminator model on device %s" % device)
ypred_for_auc = self.build_discriminator_body(input_x, input_xs, input_y, drop, reuse_var=True)
pred_list[i] = ypred_for_auc
self.dis_ypred_for_auc = tf.concat(pred_list, axis=0)
def build_train_model(self):
loss = tf.convert_to_tensor(value=0.)
grads = []
accu = tf.convert_to_tensor(value=0.)
reuse_var = False
for i, gpu_device in enumerate(self.gpu_devices):
#print('i is %d, gpu is %s' %(i, gpu_device))
if i > 0:
reuse_var = True
#print('reuse_var is ', reuse_var)
_, _, _, ypred_for_auc, predictions, losses, correct_predictions, accuracy, grads_and_vars = self.build_model(reuse_var=reuse_var, gpu_device=gpu_device)
loss += losses
accu += accuracy
grads.append(grads_and_vars)
loss = loss / self.gpu_num
accuracy = accu / self.gpu_num
#grads_and_vars = average_clip_gradient(grads, self.clip_c)
grads_and_vars = average_clip_gradient_by_value(grads, -1.0, 1.0)
optm = self.optimizer.apply_gradients(grads_and_vars)
clip_ops = []
var_s = [var for var in tf.compat.v1.trainable_variables() if self.scope in var.name]
for var in var_s:
clip_ops.append(tf.compat.v1.assign(var, tf.clip_by_value(var, -1., 1.)))
clip_ops = tf.group(*clip_ops)
self.clip_ops = clip_ops
self.train_loss = loss
self.train_accuracy = accuracy
self.train_grads_and_vars = grads_and_vars
self.train_optm = optm
self.train_ypred = ypred_for_auc
def train(self, max_epoch = None, positive_data=None, negative_data=None, source_data=None):
if positive_data is None or negative_data is None or source_data is None:
positive_data = self.positive_data
negative_data = self.negative_data
source_data = self.source_data
print('the source data in training cnn is %s' %source_data)
print('the positive data in training cnn is %s' %positive_data)
print('the negative data in training cnn is %s' %negative_data)
if max_epoch is None:
max_epoch = self.max_epoches
def train_iter():
Epoch = 0
while True:
if self.reshuffle:
os.popen('python shuffle.py ' + positive_data + ' ' + negative_data +' ' + source_data)
os.popen('mv ' + positive_data + '.shuf '+ positive_data)
os.popen('mv ' + negative_data + '.shuf '+ negative_data)
os.popen('mv ' + source_data + '.shuf ' + source_data)
#disTrain = disTextIterator(positive_data, negative_data, self.dictionaries[1],
# batch=self.batch_size * self.gpu_num,
# maxlen=self.max_len, n_words_target = self.vocab_size)
disTrain = disThreeTextIterator(positive_data, negative_data, source_data, self.dictionaries[1], self.dictionaries[0],
batch = self.batch_size * self.gpu_num,
maxlen = self.max_len,
n_words_target = self.vocab_size,
n_words_source = self.vocab_size_s)
ExampleNum = 0
print( 'Epoch :', Epoch)
EpochStart = time.time()
for x, y, xs in disTrain:
if len(x) < self.gpu_num:
continue
ExampleNum+=len(x)
yield x, y, xs, Epoch
TimeCost = time.time() - EpochStart
Epoch +=1
print('Seen ', ExampleNum, ' examples for discriminator. Time Cost : ', TimeCost)
train_it = train_iter()
drop_prob = 1.0
#if self.reload:
# print('reload params from %s' %self.saveto)
# saver.restore(self.sess, self.saveto)
# print('reload params done')
TrainStart = time.time()
epoch = 0
uidx = 0
HourIdx = 0
print('train begin')
while epoch < max_epoch:
if time.time() - TrainStart >= 3600 * HourIdx:
print('------------------------------------------Hour %d --------------------' % HourIdx)
HourIdx +=1
BatchStart = time.time()
x, y, xs, epoch = next(train_it)
uidx +=1
#print('uidx is ', uidx)
if not len(x) % self.gpu_num == 0 :
print('the positive data is bad')
continue
x_data_list = numpy.split(numpy.array(x), self.gpu_num)
y_data_list = numpy.split(numpy.array(y), self.gpu_num)
xs_data_list = numpy.split(numpy.array(xs), self.gpu_num)
myFeed_dict={}
for i, x, y, xs in zip(range(self.gpu_num), x_data_list, y_data_list, xs_data_list):
x = x.tolist()
x, y, xs = dis_three_length_prepare(x, y, xs, self.max_len)
myFeed_dict[self.x_list[i]]=x
myFeed_dict[self.y_list[i]]=y
myFeed_dict[self.xs_list[i]]=xs
myFeed_dict[self.drop_list[i]]=drop_prob
_, loss_out, accuracy_out, grads_out = self.sess.run([self.train_optm, self.train_loss, self.train_accuracy, self.train_grads_and_vars], feed_dict=myFeed_dict)
if uidx == 1:
_ = self.sess.run(self.clip_ops)
#x_variable = [self.sess.run(tf.assign(x, tf.clip_by_value(x, -1.0, 1.0))) for x in tf.trainable_variables() if self.scope in x.name] # clip the value into -0.01 to 0.01
#print('ypred_for_auc is ', ypred_out)
BatchTime = time.time()-BatchStart
if numpy.mod(uidx, self.dispFreq) == 0:
print("epoch %d, samples %d, loss %f, accuracy %f BatchTime %f, for discriminator pretraining " % (epoch, uidx * self.gpu_num * self.batch_size, loss_out, accuracy_out, BatchTime))
if numpy.mod(uidx, self.saveFreq) == 0:
print('save params when epoch %d, samples %d' %(epoch, uidx * self.gpu_num * self.batch_size))
self.saver.save(self.sess, self.saveto)
if numpy.mod(uidx, self.devFreq) == 0:
print('testing the accuracy on the evaluation sets')
# def dis_train_iter():
# Epoch = 0
# while True:
# disTrain = disThreeTextIterator(self.dev_positive_data, self.dev_negative_data, self.dev_source_data, self.dictionaries[1], self.dictionaries[0],
# batch=self.batch_size,
# maxlen=self.max_len,
# n_words_target = self.vocab_size,
# n_words_source = self.vocab_size_s)
# ExampleNum = 0
# EpochStart = time.time()
# for x, y, xs in disTrain:
# ExampleNum+=len(x)
# yield x, y, xs, Epoch
# TimeCost = time.time() - EpochStart
# Epoch +=1
# dev_it = dis_train_iter()
# dev_epoch = 0
# dev_uidx = 0
# while dev_epoch < 1:
# dev_x, dev_y, dev_xs,dev_epoch = next(dev_it)
# dev_uidx +=1
#
# dev_x = numpy.array(dev_x)
# dev_y = numpy.array(dev_y)
# dev_xs = numpy.array(dev_xs)
# x, y, xs = dis_three_length_prepare(dev_x, dev_y, dev_xs, self.max_len)
# myFeed_dict={self.dis_input_x:x, self.dis_input_y:y, self.dis_input_xs:xs, self.dis_dropout_keep_prob:1.0}
# dev_ypred_out, dev_accuracy_out = self.sess.run([self.dis_ypred_for_auc, self.dis_accuracy], feed_dict=myFeed_dict)
# print('the accuracy_out in evaluation is %f' % dev_accuracy_out)
| 49.343708
| 225
| 0.609845
|
8664716f2aae4743b901149adadd8f4ff7739dab
| 8,770
|
py
|
Python
|
Day_15/part2.py
|
Uklusi/AdventOfCode2018
|
adf8bc8f5654ee5caa6a0c5611bd7eb2bba89107
|
[
"MIT"
] | null | null | null |
Day_15/part2.py
|
Uklusi/AdventOfCode2018
|
adf8bc8f5654ee5caa6a0c5611bd7eb2bba89107
|
[
"MIT"
] | null | null | null |
Day_15/part2.py
|
Uklusi/AdventOfCode2018
|
adf8bc8f5654ee5caa6a0c5611bd7eb2bba89107
|
[
"MIT"
] | null | null | null |
from AOCClasses import *
from queue import SimpleQueue as Queue
from copy import deepcopy
log = open("logp2.txt", "w")
image = open("imagep2.txt", "w")
def printLog(txt="", other=""):
txt = str(txt)
other = str(other)
log.write(txt + other + "\n")
def normal(val):
if isinstance(val, tuple):
return f"({val[0]:>2d}, {val[1]:>2d})"
return normal(val.coords(inverted=True))
powAndNumHits = [
( 3, 67), # 10
( 4, 50), # 10
( 5, 40), # 10
( 6, 34), # 10
( 7, 29), # 10
( 8, 25), # 7
( 9, 23), # 7
(10, 20), # 5
(11, 19), # 5
(12, 17), # 5
(13, 16), # 4
(14, 15), # 3
(15, 14), # 1
(16, 13), # 3
(17, 12), # 0
(19, 11), # 2
(20, 10), # 0
(23, 9), # 0
(25, 8), # 0
(29, 7), # 1
(34, 6) # 0
]
POWER = 17
def stepOrientation(f, t):
q = t - f
if q.distance() != 1:
raise(Exception("Not a step"))
elif q.y == -1:
return "U"
elif q.y == 1:
return "D"
elif q.x == -1:
return "L"
elif q.x == 1:
return "R"
else:
raise(Exception("Not a step"))
class Unit(SolidPosition):
def __init__(self, x,y,frame=None, solid=None, unitType=None):
super().__init__(x,y, reverseY=True, frame=frame, solid=solid)
# self.hp = 67 if unitType == "E" else int(200/POWER -0.1)+1
# self.atk = 1
self.hp = 200
self.atk = 3 if unitType == "G" else POWER
self.type = unitType
self.dead = False
def moveTo(self, pos):
if self == pos:
return
self.move(n=1, direction=stepOrientation(self, pos))
def attack(self, enemy):
enemy.hp -= self.atk
if enemy.hp <= 0:
enemy.dead = True
def gridAdj(self, include=None):
ret = super().gridAdj()
if include is not None and self.distance(include) == 1:
ret.append(include)
return ret
def __str__(self):
at = "X" if self.dead else "@"
return f"<{self.type} {at} {normal(self)} - HP: {self.hp:>3d}>"
def __repr__(self):
return str(self)
result = 0
units = []
enemies = {"G":[], "E":[]}
frame = []
with open("input.txt", "r") as input:
for line in input:
line = line.strip().split()[0]
frame.append(line)
def isSolid(p):
return frame[p.y][p.x] == "#" or (p in units and not all([q.dead for q in units if q == p]))
def listStr(l, indent = 0):
return "\n".join([" " * indent + str(e) for e in l])
for (y, line) in enumerate(frame):
for (x, c) in enumerate(line):
if c == "G":
g = Unit(x, y, frame=frame, solid=isSolid, unitType="G")
units.append(g)
enemies["E"].append(g)
elif c == "E":
e = Unit(x, y, frame=frame, solid=isSolid, unitType="E")
units.append(e)
enemies["G"].append(e)
def distanceAndStep(start, end, maxd):
#start is a unit, end is a target position (nonsolid or start)
# if start == Position(5,1) and end == Position(1,2):
# breakpoint()
if start == end:
return (0, start)
visited = [start]
current = Queue()
current.put((start, 0, end))
while not current.empty():
(currentPos, steps, firstStep) = current.get()
neighs = currentPos.gridAdj()
if end in currentPos.gridAdj():
return (steps + 1, firstStep)
if steps >= maxd:
return (None, None)
else:
neighs.sort()
for neigh in neighs:
if neigh not in visited:
if steps == 0:
firstStep = neigh
current.put((neigh, steps + 1, firstStep))
visited.append(neigh)
return (None, None)
def distanceAndStepList(start, end):
#start is a unit, end is a list of target positions (nonsolid or start)
# if start == Position(5,1) and end == Position(1,2):
# breakpoint()
if start in end:
return [(0, start, start)]
visited = [start]
current = Queue()
current.put((start, 0, start))
returnValues = []
returnDistance = 99999
while not current.empty():
(currentPos, steps, firstStep) = current.get()
if returnDistance < steps + 1:
return returnValues
neighs = currentPos.gridAdj()
neighs.sort()
for neigh in neighs:
if steps == 0:
firstStep = neigh
if neigh in end:
returnValues.append((steps + 1, neigh, firstStep))
returnDistance = steps + 1
elif neigh not in visited:
current.put((neigh, steps + 1, firstStep))
visited.append(neigh)
return returnValues
# solid = "#"
empty = path
def createImage(oldUnits):
positions = [a for (a, b) in oldUnits if not b.dead]
units = [b for (_, b) in oldUnits if not b.dead]
deadPositions = [a for (a, b) in oldUnits if b.dead]
deadUnits = [b for (_, b) in oldUnits if b.dead]
imageList = [" " + "".join([str(x % 10) for x in range(len(frame[0]))])]
for y in range(len(frame)):
imagerowList = [f"{y:>2d} "]
hpList = []
for x in range(len(frame[0])):
p = Position(x,y)
if p in units:
i = units.index(p)
unit = units[i]
imagerowList.append(unit.type)
hpList.append(f"{unit.hp:3d}")
elif p in deadUnits:
imagerowList.append("X")
elif p in positions:
i = positions.index(p)
t = units[i]
arrow = dirToArrow(stepOrientation(p, t))
imagerowList.append(arrow)
elif frame[y][x] == "#":
imagerowList.append(solid)
else:
imagerowList.append(empty)
imagerowList.append(" " + " ".join(hpList))
imageList.append("".join(imagerowList))
return "\n".join(imageList)
oldUnits = [(deepcopy(u), u) for u in units if not u.dead]
rounds = 0
while not all([e.dead for e in enemies["E"]]) and not all([e.dead for e in enemies["G"]]):
# for _ in range(1):
roundFinished = True
units.sort()
printLog(f"Units at round {rounds:>2d}:\n", listStr([u for u in units if not u.dead], indent=2))
printLog()
image.write(f"ROUND {rounds:>2d}\n")
image.write(createImage(oldUnits))
image.write("\n\n")
oldUnits = [(deepcopy(u), u) for u in units if not u.dead]
for unit in units:
if unit.dead:
continue
printLog("Current Unit: ", unit)
enemyList = [e for e in enemies[unit.type] if not e.dead]
enemyList.sort(key=lambda e: unit.distance(e))
# printLog("Enemy List for current unit:\n", listStr(enemyList, indent=2))
if len(enemyList) == 0:
printLog("No enemies, ending")
printLog()
roundFinished = False
break
targets = set()
for e in enemyList:
targets.update(e.gridAdj(include=unit))
distances = distanceAndStepList(unit, targets)
if len(distances) == 0:
printLog("No reachable enemies")
printLog("Unit did not move")
printLog()
continue
distances.sort()
printLog("Distances array (distance, targetTile, firstStep):\n", listStr([(d, normal(t), normal(f)) for (d,t,f) in distances], indent=2))
(distance, _, firstStep) = distances[0]
unit.moveTo(firstStep)
if distance == 0:
printLog("Unit did not move")
else:
printLog("Unit moved to ", normal(unit))
targettableEnemies = [(e.hp, e) for e in enemyList if unit.distance(e) == 1]
targettableEnemies.sort()
if len(targettableEnemies) > 0:
printLog("Targettable Enemies:\n", listStr(targettableEnemies, indent=2) )
(_, e) = targettableEnemies[0]
printLog("Attacking unit ", e)
unit.attack(e)
printLog("Result: ", e)
printLog()
# image.write(createImage(oldUnits))
# image.write("\n\n")
printLog()
if roundFinished:
rounds += 1
if rounds % 10 == 0:
print(rounds)
image.write(createImage(oldUnits))
image.write("\n\n")
printLog(f"Units at round {rounds:03d}:\n", listStr([u for u in units if not u.dead], indent=2))
E = "E"
print(f"Elves dead with power {POWER}: {len([u for u in units if u.dead and u.type == E])}")
result = sum([u.hp for u in units if not u.dead])
result *= rounds
with open("output2.txt", "w") as output:
output.write(str(result))
print(str(result))
| 30.034247
| 145
| 0.534322
|
972a4ab91015937caee84afa92222382040ceea4
| 15,935
|
py
|
Python
|
tests/test_confirmable.py
|
Kishi85/flask-security
|
e5a6899c3596ec140c12fe2924bdc0346a2a4115
|
[
"MIT"
] | null | null | null |
tests/test_confirmable.py
|
Kishi85/flask-security
|
e5a6899c3596ec140c12fe2924bdc0346a2a4115
|
[
"MIT"
] | null | null | null |
tests/test_confirmable.py
|
Kishi85/flask-security
|
e5a6899c3596ec140c12fe2924bdc0346a2a4115
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
test_confirmable
~~~~~~~~~~~~~~~~
Confirmable tests
"""
import json
import time
import pytest
from flask import Flask
from utils import authenticate, logout
from flask_security.core import UserMixin
from flask_security.confirmable import generate_confirmation_token
from flask_security.signals import confirm_instructions_sent, user_confirmed
from flask_security.utils import capture_flashes, capture_registrations, string_types
try:
from urlparse import parse_qsl, urlsplit
except ImportError: # pragma: no cover
from urllib.parse import parse_qsl, urlsplit
pytestmark = pytest.mark.confirmable()
@pytest.mark.registerable()
def test_confirmable_flag(app, client, sqlalchemy_datastore, get_message):
recorded_confirms = []
recorded_instructions_sent = []
@user_confirmed.connect_via(app)
def on_confirmed(app, user):
assert isinstance(app, Flask)
assert isinstance(user, UserMixin)
recorded_confirms.append(user)
@confirm_instructions_sent.connect_via(app)
def on_instructions_sent(app, user, token):
assert isinstance(app, Flask)
assert isinstance(user, UserMixin)
assert isinstance(token, string_types)
recorded_instructions_sent.append(user)
# Test login before confirmation
email = "dude@lp.com"
with capture_registrations() as registrations:
data = dict(email=email, password="password", next="")
response = client.post("/register", data=data)
assert response.status_code == 302
response = authenticate(client, email=email)
assert get_message("CONFIRMATION_REQUIRED") in response.data
# Test invalid token
response = client.get("/confirm/bogus", follow_redirects=True)
assert get_message("INVALID_CONFIRMATION_TOKEN") in response.data
# Test JSON
response = client.post(
"/confirm",
data='{"email": "matt@lp.com"}',
headers={"Content-Type": "application/json"},
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/json"
assert "user" in response.jdata["response"]
assert len(recorded_instructions_sent) == 1
# Test ask for instructions with invalid email
response = client.post("/confirm", data=dict(email="bogus@bogus.com"))
assert get_message("USER_DOES_NOT_EXIST") in response.data
# Test resend instructions
response = client.post("/confirm", data=dict(email=email))
assert get_message("CONFIRMATION_REQUEST", email=email) in response.data
assert len(recorded_instructions_sent) == 2
# Test confirm
token = registrations[0]["confirm_token"]
response = client.get("/confirm/" + token, follow_redirects=True)
assert get_message("EMAIL_CONFIRMED") in response.data
assert len(recorded_confirms) == 1
# Test already confirmed
response = client.get("/confirm/" + token, follow_redirects=True)
assert get_message("ALREADY_CONFIRMED") in response.data
assert len(recorded_instructions_sent) == 2
# Test already confirmed and expired token
app.config["SECURITY_CONFIRM_EMAIL_WITHIN"] = "-1 days"
with app.app_context():
user = registrations[0]["user"]
expired_token = generate_confirmation_token(user)
response = client.get("/confirm/" + expired_token, follow_redirects=True)
assert get_message("ALREADY_CONFIRMED") in response.data
assert len(recorded_instructions_sent) == 2
# Test already confirmed when asking for confirmation instructions
logout(client)
response = client.get("/confirm")
assert response.status_code == 200
response = client.post("/confirm", data=dict(email=email))
assert get_message("ALREADY_CONFIRMED") in response.data
# Test user was deleted before confirmation
with capture_registrations() as registrations:
data = dict(email="mary@lp.com", password="password", next="")
client.post("/register", data=data)
user = registrations[0]["user"]
token = registrations[0]["confirm_token"]
with app.app_context():
sqlalchemy_datastore.delete(user)
sqlalchemy_datastore.commit()
response = client.get("/confirm/" + token, follow_redirects=True)
assert get_message("INVALID_CONFIRMATION_TOKEN") in response.data
@pytest.mark.registerable()
@pytest.mark.settings(confirm_email_within="1 milliseconds")
def test_expired_confirmation_token(client, get_message):
with capture_registrations() as registrations:
data = dict(email="mary@lp.com", password="password", next="")
client.post("/register", data=data, follow_redirects=True)
user = registrations[0]["user"]
token = registrations[0]["confirm_token"]
time.sleep(1)
response = client.get("/confirm/" + token, follow_redirects=True)
msg = get_message("CONFIRMATION_EXPIRED", within="1 milliseconds", email=user.email)
assert msg in response.data
@pytest.mark.registerable()
def test_email_conflict_for_confirmation_token(
app, client, get_message, sqlalchemy_datastore
):
with capture_registrations() as registrations:
data = dict(email="mary@lp.com", password="password", next="")
client.post("/register", data=data, follow_redirects=True)
user = registrations[0]["user"]
token = registrations[0]["confirm_token"]
# Change the user's email
user.email = "tom@lp.com"
with app.app_context():
sqlalchemy_datastore.put(user)
sqlalchemy_datastore.commit()
response = client.get("/confirm/" + token, follow_redirects=True)
msg = get_message("INVALID_CONFIRMATION_TOKEN")
assert msg in response.data
@pytest.mark.registerable()
@pytest.mark.settings(login_without_confirmation=True)
def test_login_when_unconfirmed(client, get_message):
data = dict(email="mary@lp.com", password="password", next="")
response = client.post("/register", data=data, follow_redirects=True)
assert b"mary@lp.com" in response.data
@pytest.mark.registerable()
def test_no_auth_token(client_nc):
""" Make sure that register doesn't return Authentication Token
if user isn't confirmed.
"""
response = client_nc.post(
"/register?include_auth_token",
data='{"email": "dude@lp.com", "password": "password"}',
headers={"Content-Type": "application/json"},
)
assert response.status_code == 200
user = response.json["response"]["user"]
assert len(user) == 2 and all(k in user for k in ["id", "last_update"])
@pytest.mark.registerable()
@pytest.mark.settings(login_without_confirmation=True)
def test_auth_token_unconfirmed(client_nc):
""" Make sure that register returns Authentication Token
if user isn't confirmed, but the 'login_without_confirmation' flag is set.
"""
response = client_nc.post(
"/register?include_auth_token",
data='{"email": "dude@lp.com", "password": "password"}',
headers={"Content-Type": "application/json"},
)
assert response.status_code == 200
user = response.json["response"]["user"]
assert len(user) == 3 and all(
k in user for k in ["id", "last_update", "authentication_token"]
)
@pytest.mark.registerable()
@pytest.mark.settings(login_without_confirmation=True, auto_login_after_confirm=False)
def test_confirmation_different_user_when_logged_in_no_auto(client, get_message):
""" Default - AUTO_LOGIN == false so shouldn't log in second user. """
e1 = "dude@lp.com"
e2 = "lady@lp.com"
with capture_registrations() as registrations:
for e in e1, e2:
data = dict(email=e, password="password", next="")
client.post("/register", data=data)
logout(client)
token1 = registrations[0]["confirm_token"]
token2 = registrations[1]["confirm_token"]
client.get("/confirm/" + token1, follow_redirects=True)
logout(client)
authenticate(client, email=e1)
response = client.get("/confirm/" + token2, follow_redirects=True)
assert get_message("EMAIL_CONFIRMED") in response.data
# should get a login view
assert (
b'<input id="password" name="password" required type="password" value="">'
in response.data
)
@pytest.mark.registerable()
@pytest.mark.settings(login_without_confirmation=True)
def test_confirmation_different_user_when_logged_in(client, get_message):
e1 = "dude@lp.com"
e2 = "lady@lp.com"
with capture_registrations() as registrations:
for e in e1, e2:
data = dict(email=e, password="password", next="")
client.post("/register", data=data)
logout(client)
token1 = registrations[0]["confirm_token"]
token2 = registrations[1]["confirm_token"]
client.get("/confirm/" + token1, follow_redirects=True)
logout(client)
authenticate(client, email=e1)
response = client.get("/confirm/" + token2, follow_redirects=True)
assert get_message("EMAIL_CONFIRMED") in response.data
assert b"Welcome lady@lp.com" in response.data
@pytest.mark.registerable()
@pytest.mark.settings(recoverable=True)
def test_cannot_reset_password_when_email_is_not_confirmed(client, get_message):
email = "dude@lp.com"
data = dict(email=email, password="password", next="")
response = client.post("/register", data=data, follow_redirects=True)
response = client.post("/reset", data=dict(email=email), follow_redirects=True)
assert get_message("CONFIRMATION_REQUIRED") in response.data
@pytest.mark.registerable()
@pytest.mark.settings(auto_login_after_confirm=False)
def test_confirm_redirect(client, get_message):
with capture_registrations() as registrations:
data = dict(email="jane@lp.com", password="password", next="")
client.post("/register", data=data, follow_redirects=True)
token = registrations[0]["confirm_token"]
response = client.get("/confirm/" + token)
assert "location" in response.headers
assert "/login" in response.location
response = client.get(response.location)
assert get_message("EMAIL_CONFIRMED") in response.data
@pytest.mark.registerable()
@pytest.mark.settings(post_confirm_view="/post_confirm")
def test_confirm_redirect_to_post_confirm(client, get_message):
with capture_registrations() as registrations:
data = dict(email="john@lp.com", password="password", next="")
client.post("/register", data=data, follow_redirects=True)
token = registrations[0]["confirm_token"]
response = client.get("/confirm/" + token, follow_redirects=True)
assert b"Post Confirm" in response.data
@pytest.mark.registerable()
@pytest.mark.settings(
redirect_host="localhost:8081",
redirect_behavior="spa",
post_confirm_view="/confirm-redirect",
)
def test_spa_get(app, client):
"""
Test 'single-page-application' style redirects
This uses json only.
"""
with capture_flashes() as flashes:
with capture_registrations() as registrations:
response = client.post(
"/register",
data='{"email": "dude@lp.com",\
"password": "password"}',
headers={"Content-Type": "application/json"},
)
assert response.headers["Content-Type"] == "application/json"
token = registrations[0]["confirm_token"]
response = client.get("/confirm/" + token)
assert response.status_code == 302
split = urlsplit(response.headers["Location"])
assert "localhost:8081" == split.netloc
assert "/confirm-redirect" == split.path
qparams = dict(parse_qsl(split.query))
assert qparams["email"] == "dude@lp.com"
# Arguably for json we shouldn't have any - this is buried in register_user
# but really shouldn't be.
assert len(flashes) == 1
@pytest.mark.registerable()
@pytest.mark.settings(
confirm_email_within="1 milliseconds",
redirect_host="localhost:8081",
redirect_behavior="spa",
confirm_error_view="/confirm-error",
)
def test_spa_get_bad_token(app, client, get_message):
""" Test expired and invalid token"""
with capture_flashes() as flashes:
with capture_registrations() as registrations:
response = client.post(
"/register",
data='{"email": "dude@lp.com",\
"password": "password"}',
headers={"Content-Type": "application/json"},
)
assert response.headers["Content-Type"] == "application/json"
token = registrations[0]["confirm_token"]
time.sleep(1)
response = client.get("/confirm/" + token)
assert response.status_code == 302
split = urlsplit(response.headers["Location"])
assert "localhost:8081" == split.netloc
assert "/confirm-error" == split.path
qparams = dict(parse_qsl(split.query))
assert len(qparams) == 2
assert all(k in qparams for k in ["email", "error"])
msg = get_message(
"CONFIRMATION_EXPIRED", within="1 milliseconds", email="dude@lp.com"
)
assert msg == qparams["error"].encode("utf-8")
# Test mangled token
token = (
"WyIxNjQ2MzYiLCIxMzQ1YzBlZmVhM2VhZjYwODgwMDhhZGU2YzU0MzZjMiJd."
"BZEw_Q.lQyo3npdPZtcJ_sNHVHP103syjM"
"&url_id=fbb89a8328e58c181ea7d064c2987874bc54a23d"
)
response = client.get("/confirm/" + token)
assert response.status_code == 302
split = urlsplit(response.headers["Location"])
assert "localhost:8081" == split.netloc
assert "/confirm-error" == split.path
qparams = dict(parse_qsl(split.query))
assert len(qparams) == 1
assert all(k in qparams for k in ["error"])
msg = get_message("INVALID_CONFIRMATION_TOKEN")
assert msg == qparams["error"].encode("utf-8")
assert len(flashes) == 1
@pytest.mark.two_factor()
@pytest.mark.registerable()
@pytest.mark.settings(two_factor_required=True)
def test_two_factor(app, client):
""" If two-factor is enabled, the confirm shouldn't login, but start the
2-factor setup.
"""
with capture_registrations() as registrations:
data = dict(email="mary@lp.com", password="password", next="")
client.post("/register", data=data, follow_redirects=True)
# make sure not logged in
response = client.get("/profile")
assert response.status_code == 302
assert "/login?next=%2Fprofile" in response.location
token = registrations[0]["confirm_token"]
response = client.get("/confirm/" + token, follow_redirects=False)
assert "tf-setup" in response.location
@pytest.mark.two_factor()
@pytest.mark.registerable()
@pytest.mark.settings(two_factor_required=True)
def test_two_factor_json(app, client, get_message):
with capture_registrations() as registrations:
data = dict(email="dude@lp.com", password="password")
response = client.post(
"/register", content_type="application/json", data=json.dumps(data)
)
assert response.headers["content-type"] == "application/json"
assert response.jdata["meta"]["code"] == 200
assert len(response.jdata["response"]) == 2
assert all(k in response.jdata["response"] for k in ["csrf_token", "user"])
# make sure not logged in
response = client.get("/profile", headers={"accept": "application/json"})
assert response.status_code == 401
assert response.jdata["response"]["error"].encode("utf-8") == get_message(
"UNAUTHENTICATED"
)
token = registrations[0]["confirm_token"]
response = client.get("/confirm/" + token, headers={"Accept": "application/json"})
assert response.status_code == 200
assert response.jdata["response"]["tf_required"]
assert response.jdata["response"]["tf_state"] == "setup_from_login"
| 36.215909
| 88
| 0.681268
|
02797de3388554b2f9fcc7e59904ec57a23a5315
| 6,161
|
py
|
Python
|
tests/emulator/test_config.py
|
pp81381/nicett6
|
addace8fbd5350105bf4fb27d1b485bb9cf20236
|
[
"MIT"
] | null | null | null |
tests/emulator/test_config.py
|
pp81381/nicett6
|
addace8fbd5350105bf4fb27d1b485bb9cf20236
|
[
"MIT"
] | 1
|
2021-06-06T20:43:09.000Z
|
2021-06-06T20:43:09.000Z
|
tests/emulator/test_config.py
|
pp81381/nicett6
|
addace8fbd5350105bf4fb27d1b485bb9cf20236
|
[
"MIT"
] | null | null | null |
from nicett6.emulator.config import build_config, default_config_file
from nicett6.emulator.cover_emulator import percent_pos_to_step_num
from nicett6.emulator.line_handler import PRESET_POS_5
from contextlib import redirect_stderr
from io import StringIO
from unittest import TestCase
from unittest.mock import patch, mock_open
class TestConfig(TestCase):
def setUp(self):
self.filename = default_config_file()
def test_build_config1(self):
config = build_config(["-f", self.filename])
self.assertEqual(config["port"], 50200)
self.assertEqual(config["web_on"], False)
self.assertEqual(len(config["covers"]), 2)
screen = config["covers"][0]
self.assertEqual(screen.name, "screen")
self.assertAlmostEqual(screen.step_len, 0.01)
self.assertAlmostEqual(screen.unadjusted_max_drop, 1.77)
self.assertAlmostEqual(screen.speed, 0.08)
self.assertAlmostEqual(screen.percent_pos, 1.0)
mask = config["covers"][1]
self.assertEqual(mask.name, "mask")
def test_build_config2(self):
config = build_config(["-f", self.filename, "-w", "-p", "50300"])
self.assertEqual(config["web_on"], True)
self.assertEqual(config["port"], 50300)
def test_build_config3(self):
config = build_config(["-f", self.filename, "-i", "screen", "0.75"])
self.assertEqual(len(config["covers"]), 2)
screen = config["covers"][0]
self.assertAlmostEqual(screen.percent_pos, 0.75)
def test_build_config4(self):
"""Test web_on=true, preset_pos_5 set in config file"""
test_json = """
{
"web_on": true,
"covers": [
{
"address": 2,
"node": 4,
"name": "screen",
"step_len": 0.01,
"max_drop": 1.77,
"speed": 0.08,
"percent_pos": 1.0,
"preset_pos_5": 0.5
}
]
}
"""
with patch("nicett6.emulator.config.open", mock_open(read_data=test_json)) as m:
dummy_filename = "dummy"
config = build_config(["-f", dummy_filename])
m.assert_called_once_with(dummy_filename)
self.assertEqual(config["port"], 50200)
self.assertEqual(config["web_on"], True)
self.assertEqual(len(config["covers"]), 1)
screen = config["covers"][0]
self.assertEqual(screen.name, "screen")
self.assertAlmostEqual(screen.step_len, 0.01)
self.assertAlmostEqual(screen.unadjusted_max_drop, 1.77)
self.assertAlmostEqual(screen.speed, 0.08)
self.assertAlmostEqual(screen.percent_pos, 1.0)
self.assertEqual(len(screen.presets), 1)
expected_pos_5 = percent_pos_to_step_num(0.5, screen.max_steps)
self.assertEqual(screen.presets[PRESET_POS_5], expected_pos_5)
def test_build_config5a(self):
"""Test web_on config"""
test_json = """{"web_on": true}"""
with patch("nicett6.emulator.config.open", mock_open(read_data=test_json)):
config = build_config([])
self.assertEqual(config["web_on"], True)
def test_build_config5b(self):
"""Test web_on config"""
test_json = """{"web_on": false}"""
with patch("nicett6.emulator.config.open", mock_open(read_data=test_json)):
config = build_config([])
self.assertEqual(config["web_on"], False)
def test_build_config5c(self):
"""Test --web_on override"""
test_json = """{"web_on": false}"""
with patch("nicett6.emulator.config.open", mock_open(read_data=test_json)):
config = build_config(["-w"])
self.assertEqual(config["web_on"], True)
def test_build_config5d(self):
"""Test --web_off override"""
test_json = """{"web_on": true}"""
with patch("nicett6.emulator.config.open", mock_open(read_data=test_json)):
config = build_config(["-W"])
self.assertEqual(config["web_on"], False)
def test_build_config5e(self):
"""Test combination of --web_off and --web_on override"""
test_json = """{"web_on": false}"""
with patch("nicett6.emulator.config.open", mock_open(read_data=test_json)):
config = build_config(["-W", "-w"])
self.assertEqual(config["web_on"], True)
def test_build_config5f(self):
"""Test combination of --web_on and --web_off override"""
test_json = """{"web_on": true}"""
with patch("nicett6.emulator.config.open", mock_open(read_data=test_json)):
config = build_config(["-w", "-W"])
self.assertEqual(config["web_on"], False)
def test_build_config_err1(self):
ioerr = StringIO()
with redirect_stderr(ioerr):
with self.assertRaises(SystemExit):
build_config(["-f", self.filename, "-i", "screen", "xyz"])
expected_message = "error: Invalid value specified for screen: xyz\n"
message = ioerr.getvalue()[-len(expected_message) :]
self.assertEqual(expected_message, message)
def test_build_config_err2(self):
ioerr = StringIO()
with redirect_stderr(ioerr):
with self.assertRaises(SystemExit):
build_config(["-f", self.filename, "-i", "screen", "1.01"])
expected_message = "error: Invalid percentage specified for screen (range is 0.0 for fully down to 1.0 for fully up)\n"
message = ioerr.getvalue()[-len(expected_message) :]
self.assertEqual(expected_message, message)
def test_build_config_err3(self):
ioerr = StringIO()
with redirect_stderr(ioerr):
with self.assertRaises(SystemExit):
build_config(["-f", self.filename, "-i", "screen2", "0.0"])
expected_message = "error: Invalid cover_name: screen2\n"
message = ioerr.getvalue()[-len(expected_message) :]
self.assertEqual(expected_message, message)
| 43.387324
| 131
| 0.60396
|
a51264a4db6f8e2927e8d51116f9cc769feea380
| 18,386
|
py
|
Python
|
2-OOPDesignPatternsInPython/final_project/Service.py
|
mamoudmatook/PythonSpecializaionInRussian
|
3340780b2f8a876a0f59e22036147fd98909d545
|
[
"MIT"
] | null | null | null |
2-OOPDesignPatternsInPython/final_project/Service.py
|
mamoudmatook/PythonSpecializaionInRussian
|
3340780b2f8a876a0f59e22036147fd98909d545
|
[
"MIT"
] | null | null | null |
2-OOPDesignPatternsInPython/final_project/Service.py
|
mamoudmatook/PythonSpecializaionInRussian
|
3340780b2f8a876a0f59e22036147fd98909d545
|
[
"MIT"
] | null | null | null |
import pygame
import random
import yaml
import os
import Objects
OBJECT_TEXTURE = os.path.join("texture", "objects")
ENEMY_TEXTURE = os.path.join("texture", "enemies")
ALLY_TEXTURE = os.path.join("texture", "ally")
def create_sprite(img, sprite_size):
icon = pygame.image.load(img).convert_alpha()
icon = pygame.transform.scale(icon, (sprite_size, sprite_size))
sprite = pygame.Surface((sprite_size, sprite_size), pygame.HWSURFACE)
sprite.blit(icon, (0, 0))
return sprite
def reload_game(engine, hero):
global level_list
level_list_max = len(level_list) - 1
engine.level += 1
hero.position = [1, 1]
engine.objects = []
generator = level_list[min(engine.level, level_list_max)]
_map = generator['map'].get_map()
engine.load_map(_map)
engine.add_objects(generator['obj'].get_objects(_map))
engine.add_hero(hero)
def restore_hp(engine, hero):
engine.score += 0.1
hero.hp = hero.max_hp
engine.notify("HP restored")
def apply_blessing(engine, hero):
if hero.gold >= int(20 * 1.5**engine.level) - 2 * hero.stats["intelligence"]:
engine.score += 0.2
hero.gold -= int(20 * 1.5**engine.level) - \
2 * hero.stats["intelligence"]
if random.randint(0, 1) == 0:
engine.hero = Objects.Blessing(hero)
engine.notify("Blessing applied")
else:
engine.hero = Objects.Berserk(hero)
engine.notify("Berserk applied")
else:
engine.score -= 0.1
def remove_effect(engine, hero):
if hero.gold >= int(10 * 1.5**engine.level) - 2 * hero.stats["intelligence"] and "base" in dir(hero):
hero.gold -= int(10 * 1.5**engine.level) - \
2 * hero.stats["intelligence"]
engine.hero = hero.base
engine.hero.calc_max_HP()
engine.notify("Effect removed")
def add_gold(engine, hero):
if random.randint(1, 10) == 1:
engine.score -= 0.05
engine.hero = Objects.Weakness(hero)
engine.notify("You were cursed")
else:
engine.score += 0.1
gold = int(random.randint(10, 1000) * (1.1**(engine.hero.level - 1)))
hero.gold += gold
engine.notify(f"{gold} gold added")
class MapFactory(yaml.YAMLObject):
@classmethod
def from_yaml(cls, loader, node):
# FIXME
# get _map and _obj
# FIXED
_map = cls.get_map()
_obj = cls.get_objects()
_obj.config = loader.construct_mapping(node)
return {'map': _map, 'obj': _obj}
class EndMap(MapFactory):
yaml_tag = "!end_map"
class Map:
def __init__(self):
self.Map = ['000000000000000000000000000000000000000',
'0 0',
'0 0',
'0 0 0 000 0 0 00000 0 0 0',
'0 0 0 0 0 0 0 0 0 0 0',
'0 000 0 0 00000 0000 0 0 0',
'0 0 0 0 0 0 0 0 0 0 0',
'0 0 0 000 0 0 00000 00000 0',
'0 0 0',
'0 0',
'000000000000000000000000000000000000000'
]
self.Map = list(map(list, self.Map))
for i in self.Map:
for j in range(len(i)):
i[j] = wall if i[j] == '0' else floor1
def get_map(self):
return self.Map
class Objects:
def __init__(self):
self.objects = []
def get_objects(self, _map):
return self.objects
class RandomMap(MapFactory):
yaml_tag = "!random_map"
class Map:
def __init__(self):
self.Map = [[0 for _ in range(41)] for _ in range(41)]
for i in range(41):
for j in range(41):
if i == 0 or j == 0 or i == 40 or j == 40:
self.Map[j][i] = wall
else:
self.Map[j][i] = [wall, floor1, floor2, floor3, floor1,
floor2, floor3, floor1, floor2][random.randint(0, 8)]
def get_map(self):
return self.Map
class Objects:
def __init__(self):
self.objects = []
def get_objects(self, _map):
for obj_name in object_list_prob['objects']:
prop = object_list_prob['objects'][obj_name]
for i in range(random.randint(prop['min-count'], prop['max-count'])):
coord = (random.randint(1, 39), random.randint(1, 39))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(1, 39),
random.randint(1, 39))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(1, 39),
random.randint(1, 39))
self.objects.append(Objects.Ally(
prop['sprite'], prop['action'], coord))
for obj_name in object_list_prob['ally']:
prop = object_list_prob['ally'][obj_name]
for i in range(random.randint(prop['min-count'], prop['max-count'])):
coord = (random.randint(1, 39), random.randint(1, 39))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(1, 39),
random.randint(1, 39))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(1, 39),
random.randint(1, 39))
self.objects.append(Objects.Ally(
prop['sprite'], prop['action'], coord))
for obj_name in object_list_prob['enemies']:
prop = object_list_prob['enemies'][obj_name]
for i in range(random.randint(0, 5)):
coord = (random.randint(1, 30), random.randint(1, 22))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(1, 39),
random.randint(1, 39))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(1, 39),
random.randint(1, 39))
self.objects.append(Objects.Enemy(
prop['sprite'], prop, prop['experience'], coord))
return self.objects
# FIXME
# FIXED
# add classes for YAML !empty_map and !special_map{}
class Empty_Map(MapFactory):
yaml_tag = "!empty_map"
class Map:
def __init__(self):
self.Map = ['000000000000000000000000000000000000000',
'0 0',
'0 0',
'0 0',
'0 0',
'0 0',
'0 0',
'0 0',
'0 0',
'0 0',
'000000000000000000000000000000000000000'
]
self.Map = list(map(list, self.Map))
for i in self.Map:
for j in range(len(i)):
i[j] = wall if i[j] == '0' else floor1
def get_map(self):
return self.Map
class Objects:
def __init__(self):
self.objects = []
def get_objects(self, _map):
for obj_name in object_list_prob['objects']:
prop = object_list_prob['objects'][obj_name]
for i in range(random.randint(prop['min-count'], prop['max-count'])):
coord = (5, 5)
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(1, 10),
random.randint(1, 10))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(1, 10),
random.randint(1, 10))
self.objects.append(Objects.Ally(
prop['sprite'], prop['action'], coord))
for obj_name in object_list_prob['ally']:
prop = object_list_prob['ally'][obj_name]
for i in range(random.randint(prop['min-count'], prop['max-count'])):
coord = (random.randint(1, 10), random.randint(1, 10))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(1, 10),
random.randint(1, 10))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(1, 10),
random.randint(1, 10))
self.objects.append(Objects.Ally(
prop['sprite'], prop['action'], coord))
return self.objects
class Special_Map(MapFactory):
yaml_tag = "!special_map"
class Map:
def __init__(self):
self.Map = ['000000000000000000000000000000000000000',
'0 0',
'0 0000000000 0',
'0 00 0',
'0 00 0',
'0 00 0',
'0 0000000000 0',
'0 00 0',
'0 0000000000 0',
'0 0',
'000000000000000000000000000000000000000'
]
self.Map = list(map(list, self.Map))
for i in self.Map:
for j in range(len(i)):
i[j] = wall if i[j] == '0' else floor1
def get_map(self):
return self.Map
class Objects:
def __init__(self):
self.objects = []
def get_objects(self, _map):
for obj_name in object_list_prob['objects']:
prop = object_list_prob['objects'][obj_name]
for i in range(random.randint(prop['min-count'], prop['max-count'])):
coord = (random.randint(2, 37), random.randint(2, 9))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(2, 37),
random.randint(2, 9))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(2, 37),
random.randint(2, 9))
self.objects.append(Objects.Ally(
prop['sprite'], prop['action'], coord))
for obj_name in object_list_prob['ally']:
prop = object_list_prob['ally'][obj_name]
for i in range(random.randint(prop['min-count'], prop['max-count'])):
coord = (random.randint(2, 37), random.randint(2, 9))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(2, 37),
random.randint(2, 9))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(2, 37),
random.randint(2, 9))
self.objects.append(Objects.Ally(
prop['sprite'], prop['action'], coord))
for obj_name in object_list_prob['enemies']:
prop = object_list_prob['enemies'][obj_name]
for i in range(random.randint(0, 5)):
coord = (random.randint(2, 37), random.randint(2, 9))
intersect = True
while intersect:
intersect = False
if _map[coord[1]][coord[0]] == wall:
intersect = True
coord = (random.randint(2, 37),
random.randint(2, 9))
continue
for obj in self.objects:
if coord == obj.position or coord == (1, 1):
intersect = True
coord = (random.randint(2, 37),
random.randint(2, 9))
self.objects.append(Objects.Enemy(
prop['sprite'], prop, prop['experience'], coord))
return self.objects
wall = [0]
floor1 = [0]
floor2 = [0]
floor3 = [0]
def service_init(sprite_size, full=True):
global object_list_prob, level_list
global wall
global floor1
global floor2
global floor3
wall[0] = create_sprite(os.path.join("texture", "wall.png"), sprite_size)
floor1[0] = create_sprite(os.path.join("texture", "Ground_1.png"), sprite_size)
floor2[0] = create_sprite(os.path.join("texture", "Ground_2.png"), sprite_size)
floor3[0] = create_sprite(os.path.join("texture", "Ground_3.png"), sprite_size)
file = open("objects.yml", "r")
object_list_tmp = yaml.load(file.read())
if full:
object_list_prob = object_list_tmp
object_list_actions = {'reload_game': reload_game,
'add_gold': add_gold,
'apply_blessing': apply_blessing,
'remove_effect': remove_effect,
'restore_hp': restore_hp}
for obj in object_list_prob['objects']:
prop = object_list_prob['objects'][obj]
prop_tmp = object_list_tmp['objects'][obj]
prop['sprite'][0] = create_sprite(
os.path.join(OBJECT_TEXTURE, prop_tmp['sprite'][0]), sprite_size)
prop['action'] = object_list_actions[prop_tmp['action']]
for ally in object_list_prob['ally']:
prop = object_list_prob['ally'][ally]
prop_tmp = object_list_tmp['ally'][ally]
prop['sprite'][0] = create_sprite(
os.path.join(ALLY_TEXTURE, prop_tmp['sprite'][0]), sprite_size)
prop['action'] = object_list_actions[prop_tmp['action']]
for enemy in object_list_prob['enemies']:
prop = object_list_prob['enemies'][enemy]
prop_tmp = object_list_tmp['enemies'][enemy]
prop['sprite'][0] = create_sprite(
os.path.join(ENEMY_TEXTURE, prop_tmp['sprite'][0]), sprite_size)
file.close()
if full:
file = open("levels.yml", "r")
level_list = yaml.load(file.read())['levels']
level_list.append({'map': EndMap.Map(), 'obj': EndMap.Objects()})
file.close()
| 41.40991
| 106
| 0.419504
|
e7dde53f6c4f4e4dfa3d77cccf7579cd39c51275
| 1,048
|
py
|
Python
|
common.py
|
sankovalev/vk_reg_bot
|
a9ad89b768d99a15d8bca0aa0e266a429a733ed1
|
[
"MIT"
] | 2
|
2019-08-16T19:36:41.000Z
|
2020-01-27T11:19:54.000Z
|
common.py
|
sankovalev/vk_reg_bot
|
a9ad89b768d99a15d8bca0aa0e266a429a733ed1
|
[
"MIT"
] | null | null | null |
common.py
|
sankovalev/vk_reg_bot
|
a9ad89b768d99a15d8bca0aa0e266a429a733ed1
|
[
"MIT"
] | 2
|
2020-01-27T11:20:00.000Z
|
2021-03-25T16:53:47.000Z
|
import MySQLdb
from settings import DB_params as p
def check_register(user_id):
db=MySQLdb.connect(host=p['host'], user=p['user'], passwd=p['passwd'], db=p['db'], charset = "utf8", use_unicode = True)
c=db.cursor()
c.execute("""SELECT count(*) FROM Students WHERE idVK=%s""", [str(user_id)])
count = c.fetchone()[0]
db.close()
if int(count) == 0:
return False
else:
return True
def check_letter(user_id):
db=MySQLdb.connect(host=p['host'], user=p['user'], passwd=p['passwd'], db=p['db'], charset = "utf8", use_unicode = True)
c=db.cursor()
c.execute("""SELECT count(*) FROM Letter WHERE idVK=%s""", [str(user_id)])
count = c.fetchone()[0]
db.close()
if int(count) == 0:
return False
else:
return True
def add_letter(user_id):
db=MySQLdb.connect(host=p['host'], user=p['user'], passwd=p['passwd'], db=p['db'], charset = "utf8", use_unicode = True)
c=db.cursor()
c.execute("""INSERT INTO Letter (idVK) VALUES (%s)""", [str(user_id)])
db.commit()
db.close()
return
| 32.75
| 123
| 0.622137
|
e2bfe19a4d2e8cba54d73133cd35bf5996072385
| 971
|
py
|
Python
|
jsonier/util/datetimeutil.py
|
malaya-zemlya/jsonier
|
43fbb9d40aa29e9a1af9cf179691c13d7e027677
|
[
"MIT"
] | null | null | null |
jsonier/util/datetimeutil.py
|
malaya-zemlya/jsonier
|
43fbb9d40aa29e9a1af9cf179691c13d7e027677
|
[
"MIT"
] | null | null | null |
jsonier/util/datetimeutil.py
|
malaya-zemlya/jsonier
|
43fbb9d40aa29e9a1af9cf179691c13d7e027677
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from typing import (
Union
)
def datetime_to_int(dt: datetime) -> int:
return int(dt.timestamp())
def int_to_datetime(t: int) -> datetime:
return datetime.utcfromtimestamp(t)
def datetime_to_float(dt: datetime) -> float:
return dt.timestamp()
def float_to_datetime(t: float) -> datetime:
return datetime.utcfromtimestamp(t)
def datetime_to_str(dt: datetime) -> str:
return dt.isoformat() + 'Z'
def str_to_datetime(t: str) -> datetime:
if t.endswith('Z'):
t = t[:-1]
return datetime.fromisoformat(t)
def auto_to_datetime(t: Union[int, float, str, datetime]) -> datetime:
if isinstance(t, datetime):
return t
elif isinstance(t, int):
return int_to_datetime(t)
elif isinstance(t, float):
return float_to_datetime(t)
elif isinstance(t, str):
return str_to_datetime(t)
else:
raise TypeError(f'Cannot convert {type(t)} to datetime')
| 22.068182
| 70
| 0.668383
|
21cd84b4d070ace35b88615eb841521e50d1be10
| 1,216
|
py
|
Python
|
examples/evm/coverage.py
|
Srinivas11789/manticore
|
af3c6aada811833864efaccef7477f14e9b5e0dd
|
[
"Apache-2.0"
] | null | null | null |
examples/evm/coverage.py
|
Srinivas11789/manticore
|
af3c6aada811833864efaccef7477f14e9b5e0dd
|
[
"Apache-2.0"
] | null | null | null |
examples/evm/coverage.py
|
Srinivas11789/manticore
|
af3c6aada811833864efaccef7477f14e9b5e0dd
|
[
"Apache-2.0"
] | null | null | null |
from manticore.ethereum import ManticoreEVM
m = ManticoreEVM()
m.verbosity(3)
# And now make the contract account to analyze
source_code = open('coverage.sol').read()
user_account = m.create_account(balance=1000)
bytecode = m.compile(source_code)
# Initialize contract
contract_account = m.create_contract(owner=user_account,
balance=0,
init=bytecode)
m.transaction(caller=user_account,
address=contract_account,
value=m.make_symbolic_value(),
data=m.make_symbolic_buffer(164),
)
# Up to here we get only ~30% coverage.
# We need 2 transactions to fully explore the contract
m.transaction(caller=user_account,
address=contract_account,
value=m.make_symbolic_value(),
data=m.make_symbolic_buffer(164),
)
print("[+] There are %d reverted states now" % m.count_terminated_states())
print("[+] There are %d alive states now" % m.count_running_states())
# for state_id in m.running_state_ids:
# print(m.report(state_id))
print("[+] Global coverage: %x" % contract_account.address)
print(m.global_coverage(contract_account))
| 32.864865
| 75
| 0.657895
|
305ea44e252f2b7a4f415189d447820a148a1b1d
| 5,430
|
py
|
Python
|
spherical/spherical_J_integrals.py
|
jls713/jfactors
|
2427f4bad052b37fb7eefffaa4ee46d398f33504
|
[
"MIT"
] | null | null | null |
spherical/spherical_J_integrals.py
|
jls713/jfactors
|
2427f4bad052b37fb7eefffaa4ee46d398f33504
|
[
"MIT"
] | null | null | null |
spherical/spherical_J_integrals.py
|
jls713/jfactors
|
2427f4bad052b37fb7eefffaa4ee46d398f33504
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.integrate import quad
import matplotlib.pyplot as plt
import seaborn as sns
import sys
sys.path.append('/home/jls/work/data/jfactors/')
from spherical_Jfactors import wyns_formulaJ_NFW, wyns_formulaD_NFW
from matplotlib.ticker import MaxNLocator # added
GEV2cm5toMsol2kpc5 = 2.2482330e-07
GEVcm2toMsolkpc2 = 8.5358230e-15
def integrate_Jthetamax_spherical_alphabetagamma(thetamax,D,rho0,rs,alpha,beta,gamma,rt):
def rho(r):
return np.power(r/rs,-gamma)*np.power(1+np.power(r/rs,alpha),((gamma-beta)/alpha))*np.sqrt(1-np.tanh(r/rt)**2)
def J(ll,b):
z = ll
x = np.sqrt(b*b+z*z)
return b*(rho(x)**2)
return np.log10(rho0*rho0*2.*np.pi*quad(lambda y: quad(lambda z: J(y,z), 0., thetamax*D)[0],-np.inf,np.inf)[0]/D/D/GEV2cm5toMsol2kpc5)
def integrate_Jthetamax_spherical_alphabetagamma(thetamax,D,rho0,rs,alpha,beta,gamma,rt):
def rho(r):
return np.power(r/rs,-gamma)*np.power(1+np.power(r/rs,alpha),((gamma-beta)/alpha))*np.sqrt(1-np.tanh(r/rt)**2)
def J(ll,b):
z = ll
x = np.sqrt(b*b+z*z)
return b*(rho(x)**2)
return np.log10(rho0*rho0*2.*np.pi*quad(lambda y: quad(lambda z: J(y,z), 0., thetamax*D)[0],-np.inf,np.inf)[0]/D/D/GEV2cm5toMsol2kpc5)
def integrate_Dthetamax_spherical_alphabetagamma(thetamax,D,rho0,rs,alpha,beta,gamma,rt):
def rho(r):
return np.power(r/rs,-gamma)*np.power(1+np.power(r/rs,alpha),((gamma-beta)/alpha))*np.sqrt(1-np.tanh(r/rt)**2)
def J(ll,b):
z = ll
x = np.sqrt(b*b+z*z)
return b*rho(x)
return np.log10(rho0*2.*np.pi*quad(lambda y: quad(lambda z: J(y,z), 0., thetamax*D)[0],-np.inf,np.inf)[0]/D/D/GEVcm2toMsolkpc2)
def integrate_rho_spherical_alphabetagamma(R,rho0,rs,alpha,beta,gamma,rt):
def rho(r):
return np.power(r/rs,-gamma)*np.power(1+np.power(r/rs,alpha),((gamma-beta)/alpha))*np.sqrt(1-np.tanh(r/rt)**2)
def J(x):
return x*x*rho(x)
return 4.*np.pi*rho0*quad(J, 0., R)[0]
angs = np.deg2rad(np.logspace(np.log10(5e-3),np.log10(0.6),30))
Rhalf = 0.03 ## 30pc
sig = 3. ## 3km/s
G = 4.300918e-6 ## in units solar mass, km/s kpc
Mhalf = 2.5*sig**2*Rhalf/G ## Walker formula
rs = 0.15 ## scale radius of NFW units kpc
D = 30. ## distance kpc
gamma = [0.,0.2,0.4,0.6,0.8,1.,1.2]
beta = [3.,3.5,4.,4.5,5.,5.5,6.]
alpha = [1.,1.5,2.]
rt = 10.
angs_dimless = angs*D/Rhalf
max_M,min_M = np.zeros(len(angs)),np.ones(len(angs))*1e50
max_J,min_J = np.zeros(len(angs)),np.ones(len(angs))*1e50
max_D,min_D = np.zeros(len(angs)),np.ones(len(angs))*1e50
f,a=plt.subplots(3,1,figsize=[3.32,5.5])
plt.subplots_adjust(hspace=0.)
for b,c in zip(beta,sns.color_palette()):
for g in gamma:
for al in alpha:
rho0 = 1.
M = integrate_rho_spherical_alphabetagamma(Rhalf,rho0,rs,al,b,g,rt)
rho0=Mhalf/M
for n,x in enumerate(angs):
mm = integrate_rho_spherical_alphabetagamma(x*D,rho0,rs,al,b,g,rt)
if(mm>max_M[n]):
max_M[n]=mm
if(mm<min_M[n]):
min_M[n]=mm
jj = integrate_Jthetamax_spherical_alphabetagamma(x,D,rho0,rs,al,b,g,rt)
if(jj>max_J[n]):
max_J[n]=jj
if(jj<min_J[n]):
min_J[n]=jj
dd = integrate_Dthetamax_spherical_alphabetagamma(x,D,rho0,rs,al,b,g,rt)
if(dd>max_D[n]):
max_D[n]=dd
if(dd<min_D[n]):
min_D[n]=dd
# a[0].plot(angs_dimless,map(lambda x:integrate_rho_spherical_alphabetagamma(x*D,rho0,rs,alpha,b,g,rt),angs),color=c)
# a[1].plot(angs_dimless,map(lambda x:integrate_Jthetamax_spherical_alphabetagamma(x,D,rho0,rs,alpha,b,g,rt),angs),color=c)
# a[2].plot(angs_dimless,map(lambda x:integrate_Dthetamax_spherical_alphabetagamma(x,D,rho0,rs,alpha,b,g,rt),angs),color=c)
a[0].fill_between(angs_dimless,min_M,max_M,alpha=0.5,color=sns.color_palette()[0])
a[1].fill_between(angs_dimless,min_J,max_J,alpha=0.5,color=sns.color_palette()[0])
a[2].fill_between(angs_dimless,min_D,max_D,alpha=0.5,color=sns.color_palette()[0])
a[1].plot(angs_dimless,wyns_formulaJ_NFW(sig,Rhalf*1000.,D,np.rad2deg(angs),rs,walker_or_wolf="walker"),color='k')
a[2].plot(angs_dimless,wyns_formulaD_NFW(sig,Rhalf*1000.,D,np.rad2deg(angs),rs,walker_or_wolf="walker"),color='k')
a[0].semilogx()
a[1].semilogx()
a[2].semilogx()
a[0].semilogy()
a[0].set_xlim(0.1,10.)
a[1].set_xlim(0.1,10.)
a[2].set_xlim(0.1,10.)
# a[1].set_xlabel(r'$\alpha/^\circ$')
a[2].set_xlabel(r'$D\theta/R_h$')
a[0].set_xticklabels([])
a[1].set_xticklabels([])
a[1].yaxis.set_major_locator(MaxNLocator(prune='upper'))
a[2].yaxis.set_major_locator(MaxNLocator(prune='upper'))
a[0].annotate(r'$\sigma_{\mathrm{los}}=3\,\mathrm{km\,s}^{-1},\,R_{\mathrm{half}}=30\,\mathrm{pc}$',xy=(0.1,2e8),annotation_clip=False,fontsize=14)
l=a[0].axvline(1.,ls='dashed',color='k')
l.set_dashes((3,1))
l=a[1].axvline(2.,ls='dashed',color='k')
l.set_dashes((3,1))
l=a[1].axvline(1.,ls='dashed',color='k')
l.set_dashes((3,1))
a[1].annotate('Walker et al. (2011)', xy=(2.1,18.5),rotation=90.,annotation_clip=False)
l=a[2].axvline(2.,ls='dashed',color='k')
l.set_dashes((3,1))
l=a[2].axvline(1.,ls='dashed',color='k')
l.set_dashes((3,1))
l=a[1].axvline(8.72,ls='dashed',color='r')
l.set_dashes((3,1))
l=a[2].axvline(8.72,ls='dashed',color='r')
l.set_dashes((3,1))
a[2].annotate(r'$\theta=0.5^\circ$', xy=(7.,17.),rotation=90.,annotation_clip=False)
a[2].set_ylim(15.5,19.)
a[0].set_ylabel(r'$M(D\theta)/\mathrm{M}_\odot$')
a[1].set_ylabel(r'$\log_{10} [J(\theta)/\mathrm{GeV\,cm}^{-5}]$')
a[2].set_ylabel(r'$\log_{10} [D(\theta)/\mathrm{GeV\,cm}^{-2}]$')
plt.savefig('spherical_comparison.pdf',bbox_inches='tight')
| 40.222222
| 147
| 0.687661
|
0dd0577f166efbc4d06550dd2dda571351819011
| 1,559
|
py
|
Python
|
research_files/multiprocess.py
|
RexValkering/socialforcemodel
|
83950fcb34557ac4a849748c1b055cafea04013d
|
[
"MIT"
] | 9
|
2017-03-23T18:58:12.000Z
|
2021-03-24T09:55:35.000Z
|
research_files/multiprocess.py
|
bazylip/socialforcemodel
|
5599d1736cdaf7e7c3e203671208a7baeec85732
|
[
"MIT"
] | 2
|
2019-07-31T09:38:47.000Z
|
2021-04-29T09:03:56.000Z
|
research_files/multiprocess.py
|
bazylip/socialforcemodel
|
5599d1736cdaf7e7c3e203671208a7baeec85732
|
[
"MIT"
] | 12
|
2018-03-29T08:29:25.000Z
|
2021-03-29T14:10:15.000Z
|
from multiprocessing import Pool
from continuous_test import main
def run_multiple():
import argparse
import sys
from copy import deepcopy
# Setup the parser
parser = argparse.ArgumentParser()
parser.add_argument('file', help='YAML-file')
parser.add_argument('-s', '--steps', help='Number of steps', type=int, default=500)
parser.add_argument('-o', '--outfile', help='File for measurements', default='measurements')
parser.add_argument('-p', '--pedestrian_file', help='Pedestrian file', default='')
parser.add_argument('-m', '--max_pedestrians', help='max pedestrians', type=int, default=100)
parser.add_argument('-r', '--repetitions', default=1, type=int)
parser.add_argument('-b', '--barriertime', default=0, type=int)
# Get basic parser object
file = "situations/turbulence_parameters_normal.yaml"
args = parser.parse_args([file])
# Set default parameters
args.steps = 30000
# nums = range(300, 510, 20)
# barriers = [8, 16, 24]
# repetitions = range(1, 6)
nums = [300]
barriers = [8]
repetitions = range(1, 6)
arg_list = []
for m in nums:
args.max_pedestrians = m
for b in barriers:
args.barriertime = b
for r in repetitions:
args.outfile = "tbn_{}_{}_{}".format(m, b, r)
args.pedestrian_file = "stable/tbn_{}_{}.pickle".format(m, r)
arg_list.append(deepcopy(args))
p = Pool(5)
p.map(main, arg_list)
if __name__ == '__main__':
run_multiple()
| 31.816327
| 97
| 0.626684
|
99d501e57578d6d12782fdea900e39d5bdf888d9
| 7,882
|
py
|
Python
|
ckan_cloud_operator/providers/solr/manager.py
|
OriHoch/ckan-cloud-operator
|
125c3eb10f843ac62fc85659e756bd1d9620eae7
|
[
"MIT"
] | null | null | null |
ckan_cloud_operator/providers/solr/manager.py
|
OriHoch/ckan-cloud-operator
|
125c3eb10f843ac62fc85659e756bd1d9620eae7
|
[
"MIT"
] | null | null | null |
ckan_cloud_operator/providers/solr/manager.py
|
OriHoch/ckan-cloud-operator
|
125c3eb10f843ac62fc85659e756bd1d9620eae7
|
[
"MIT"
] | null | null | null |
import subprocess
import json
import os
import glob
from ckan_cloud_operator.config import manager as config_manager
from ckan_cloud_operator.infra import CkanInfra
from ckan_cloud_operator import logs
from ckan_cloud_operator import kubectl
from .constants import PROVIDER_SUBMODULE
from .solrcloud.constants import PROVIDER_ID as solrcloud_provider_id
def initialize(interactive=False, dry_run=False):
solr_skip = config_manager.interactive_set(
{
'skip-solr': False
},
secret_name='solr-skip',
interactive=interactive
)
if solr_skip['skip-solr'] == 'y':
return
ckan_infra = CkanInfra(required=False)
solr_config = config_manager.interactive_set(
{
'self-hosted': True
},
secret_name='solr-config',
interactive=interactive
)
if is_self_hosted(solr_config):
initialize_self_hosted(interactive=interactive, dry_run=dry_run)
else:
config_manager.interactive_set(
{
'http-endpoint': ckan_infra.SOLR_HTTP_ENDPOINT,
},
secret_name='solr-config',
interactive=interactive
)
config_manager.interactive_set(
{
'num-shards': ckan_infra.SOLR_NUM_SHARDS or '1',
'replication-factor': ckan_infra.SOLR_REPLICATION_FACTOR or '3'
},
secret_name='solr-config',
interactive=interactive
)
def initialize_self_hosted(interactive=False, dry_run=False):
get_provider(default=solrcloud_provider_id, verbose=True).initialize(interactive=interactive, dry_run=dry_run)
zk_set_url_scheme()
def get_internal_http_endpoint():
if is_self_hosted():
return get_provider().get_internal_http_endpoint()
else:
return config_get('http-endpoint', required=True)
def is_self_hosted(config_vals=None):
if config_vals:
config_val = config_vals['self-hosted']
else:
config_val = config_get('self-hosted', required=True)
return config_val == 'y'
def get_num_shards():
return config_get('num-shards')
def get_replication_factor():
return config_get('replication-factor')
def config_get(key, required=False):
return config_manager.get(key, secret_name='solr-config', required=required)
def get_provider(default=None, verbose=False):
from ckan_cloud_operator.providers import manager as providers_manager
return providers_manager.get_provider(PROVIDER_SUBMODULE, default=default, verbose=verbose)
def start_zoonavigator_port_forward():
get_provider().start_zoonavigator_port_forward()
def start_solrcloud_port_forward(suffix='sc-0'):
get_provider().start_solrcloud_port_forward(suffix=suffix)
def delete_collection(collection_name):
solr_curl(f'/admin/collections?action=DELETE&name={collection_name}', required=True)
def get_collection_status(collection_name):
output = solr_curl(f'/{collection_name}/schema')
if output == False:
return {'ready': False,
'collection_name': collection_name,
'solr_http_endpoint': get_internal_http_endpoint()}
else:
res = json.loads(output)
return {'ready': True,
'collection_name': collection_name,
'solr_http_endpoint': get_internal_http_endpoint(),
'schemaVersion': res['schema']['version'],
'schemaName': res['schema']['name']}
def create_collection(collection_name, config_name):
logs.info(f'creating solrcloud collection {collection_name} using config {config_name}')
replication_factor = get_replication_factor()
num_shards = get_num_shards()
output = solr_curl(f'/admin/collections?action=CREATE'
f'&name={collection_name}'
f'&collection.configName={config_name}'
f'&replicationFactor={replication_factor}'
f'&numShards={num_shards}', required=True)
logs.info(output)
def solr_curl(path, required=False, debug=False):
if is_self_hosted():
return get_provider().solr_curl(path, required=required, debug=debug)
else:
http_endpoint = get_internal_http_endpoint()
if debug:
subprocess.check_call(f'curl \'{http_endpoint}{path}\'')
else:
exitcode, output = subprocess.getstatusoutput(f'curl -s -f \'{http_endpoint}{path}\'')
if exitcode == 0:
return output
elif required:
logs.critical(output)
raise Exception(f'Failed to run solr curl: {http_endpoint}{path}')
else:
logs.warning(output)
return False
def zk_set_url_scheme(scheme='http'):
pod_name = kubectl.get('pods', '-l', 'app=provider-solr-solrcloud-zk', required=True)['items'][0]['metadata']['name']
kubectl.check_output('exec %s zkCli.sh set /clusterprops.json \'{"urlScheme":"%s"}\'' % (pod_name, scheme))
def zk_list_configs():
pod_name = kubectl.get('pods', '-l', 'app=provider-solr-solrcloud-zk', required=True)['items'][0]['metadata']['name']
lines = list(kubectl.check_output(f'exec {pod_name} zkCli.sh ls /configs').decode().splitlines())[5:]
if len(lines) == 1:
return [name.strip() for name in lines[0][1:-1].split(',')]
else:
return []
def zk_list_config_files(config_name, config_files, base_path=''):
path = f'/configs/{config_name}{base_path}'
# print(f'path={path}')
pod_name = kubectl.get('pods', '-l', 'app=provider-solr-solrcloud-zk', required=True)['items'][0]['metadata']['name']
lines = list(kubectl.check_output(f'exec {pod_name} zkCli.sh ls {path}').decode().splitlines())[5:]
# print(f'lines={lines}')
assert len(lines) == 1
num_files = 0
for name in lines[0][1:-1].split(','):
name = name.strip()
if not name: continue
# print(f'name={name}')
if zk_list_config_files(config_name, config_files, base_path=f'{base_path}/{name}') == 0:
config_files.append(f'{base_path}/{name}')
num_files += 1
return num_files
def zk_get_config_file(config_name, config_file, output_filename):
path = f'/configs/{config_name}{config_file}'
# print(f'path={path}')
pod_name = kubectl.get('pods', '-l', 'app=provider-solr-solrcloud-zk', required=True)['items'][0]['metadata']['name']
lines = list(kubectl.check_output(f'exec {pod_name} zkCli.sh get {path} 2>/dev/null').decode().splitlines())[5:]
assert len(lines) > 0
os.makedirs(os.path.dirname(output_filename), exist_ok=True)
with open(output_filename, 'w') as f:
f.write('\n'.join(lines))
def zk_put_configs(configs_dir):
pod_name = kubectl.get('pods', '-l', 'app=provider-solr-solrcloud-zk', required=True)['items'][0]['metadata']['name']
for input_filename in glob.glob(f'{configs_dir}/**/*', recursive=True):
if not os.path.isfile(input_filename): continue
output_filename = '/configs' + input_filename.replace(configs_dir, '')
print(f'{input_filename} --> {output_filename}')
output_filepath = ''
for output_filepart in output_filename.split('/')[:-1]:
output_filepart = output_filepart.strip()
if not output_filepart:
continue
output_filepath += f'/{output_filepart}'
print(f'create {output_filepath} null')
print(kubectl.call(
f'exec {pod_name} zkCli.sh create {output_filepath} null'
))
print(f'copy {output_filename}')
print(kubectl.call(
f'cp {input_filename} {pod_name}:/tmp/zk_input'
))
print(f'create {output_filename}')
print(kubectl.call(
f"exec {pod_name} bash -- -c 'zkCli.sh create {output_filename} \"$(cat /tmp/zk_input)\"'"
))
| 36.831776
| 121
| 0.651992
|
43d1d47ce23b6d0a38a9d4c5f881e316d2aa8e33
| 3,302
|
py
|
Python
|
whats_fresh/whats_fresh_api/tests/views/entry/test_new_video.py
|
osu-cass/whats-fresh-api
|
0ace76c3d7d423e95d5e3b3c7cd0f74abcf975bd
|
[
"Apache-2.0"
] | 4
|
2015-08-20T19:38:03.000Z
|
2016-01-20T18:52:24.000Z
|
whats_fresh/whats_fresh_api/tests/views/entry/test_new_video.py
|
osu-cass/whats-fresh-api
|
0ace76c3d7d423e95d5e3b3c7cd0f74abcf975bd
|
[
"Apache-2.0"
] | 39
|
2015-01-08T23:50:47.000Z
|
2021-01-05T20:19:15.000Z
|
whats_fresh/whats_fresh_api/tests/views/entry/test_new_video.py
|
osu-cass/whats-fresh-api
|
0ace76c3d7d423e95d5e3b3c7cd0f74abcf975bd
|
[
"Apache-2.0"
] | 8
|
2015-03-07T23:52:30.000Z
|
2015-12-25T04:25:23.000Z
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from whats_fresh.whats_fresh_api.models import Video
from django.contrib.auth.models import User, Group
class NewVideoTestCase(TestCase):
"""
Test that the New Video page works as expected.
Things tested:
URLs reverse correctly
The outputted page has the correct form fields
POSTing "correct" data will result in the creation of a new
object with the specified details
POSTing data with all fields missing (hitting "save" without entering
data) returns the same field with notations of missing fields
"""
def setUp(self):
user = User.objects.create_user(
'temporary', 'temporary@gmail.com', 'temporary')
user.save()
admin_group = Group(name='Administration Users')
admin_group.save()
user.groups.add(admin_group)
response = self.client.login(
username='temporary', password='temporary')
self.assertEqual(response, True)
def test_not_logged_in(self):
self.client.logout()
response = self.client.get(
reverse('new-video'))
self.assertRedirects(response, '/login?next=/entry/videos/new')
def test_url_endpoint(self):
url = reverse('new-video')
self.assertEqual(url, '/entry/videos/new')
def test_form_fields(self):
"""
Tests to see if the form contains all of the right fields
"""
response = self.client.get(reverse('new-video'))
fields = {'video': 'input', 'name': 'input', 'caption': 'input'}
form = response.context['video_form']
for field in fields:
# for the Edit tests, you should be able to access
# form[field].value
self.assertIn(fields[field], str(form[field]))
def test_successful_video_creation(self):
"""
POST a proper "new video" command to the server, and see if the
new video appears in the database. All optional fields are null.
"""
Video.objects.all().delete()
# Data that we'll post to the server to get the new video created
new_video = {
'caption': "A thrilling display of utmost might",
'name': "You won't believe number 3!",
'video': 'http://www.youtube.com/watch?v=dQw4w9WgXcQ'}
self.client.post(reverse('new-video'), new_video)
video = Video.objects.all()[0]
for field in new_video:
self.assertEqual(
getattr(video, field), new_video[field])
def test_no_data_error(self):
"""
POST a "new video" command to the server missing all of the
required fields, and test to see what the error comes back as.
"""
# Create a list of all objects before sending bad POST data
all_videos = Video.objects.all()
response = self.client.post(reverse('new-video'))
required_fields = ['video']
for field_name in required_fields:
self.assertIn(field_name,
response.context['video_form'].errors)
# Test that we didn't add any new objects
self.assertEqual(
list(Video.objects.all()), list(all_videos))
| 34.395833
| 77
| 0.620533
|
4f1d77de5909566b9905b35155567b8438a09d20
| 14,797
|
py
|
Python
|
pycollo/solution/solution_abc.py
|
oscarliu99/pycollo
|
298f983108c3c1f7c9293c3ad4f5eda1ee6f0cf6
|
[
"MIT"
] | null | null | null |
pycollo/solution/solution_abc.py
|
oscarliu99/pycollo
|
298f983108c3c1f7c9293c3ad4f5eda1ee6f0cf6
|
[
"MIT"
] | null | null | null |
pycollo/solution/solution_abc.py
|
oscarliu99/pycollo
|
298f983108c3c1f7c9293c3ad4f5eda1ee6f0cf6
|
[
"MIT"
] | null | null | null |
import collections
from abc import ABC, abstractmethod
import numpy as np
from pyproprop import processed_property
from ..mesh_refinement import MESH_REFINEMENT_ALGORITHMS
from ..vis.plot import plot_solution
import warnings
nlp_result_fields = ("solution", "info", "solve_time")
NlpResult = collections.namedtuple("NlpResult", nlp_result_fields)
phase_solution_data_fields = ("tau", "y", "dy", "u", "q", "t", "t0", "tF",
"T", "stretch", "shift", "time")
PhaseSolutionData = collections.namedtuple("PhaseSolutionData",
phase_solution_data_fields)
Polys = collections.namedtuple("Polys", ("y", "dy", "u"))
class SolutionABC(ABC):
objective = processed_property("objective", read_only=True)
initial_time = processed_property("initial_time", read_only=True)
final_time = processed_property("final_time", read_only=True)
state = processed_property("state", read_only=True)
state_derivative = processed_property("state_derivative", read_only=True)
control = processed_property("control", read_only=True)
integral = processed_property("integral", read_only=True)
time = processed_property("time", read_only=True)
parameter = processed_property("parameter", read_only=True)
def __init__(self, iteration, nlp_result):
self.it = iteration
self.ocp = iteration.optimal_control_problem
self.backend = iteration.backend
self.tau = iteration.mesh.tau
self.nlp_result = nlp_result
self.backend_specific_init()
self.process_solution()
@abstractmethod
def backend_specific_init(self):
pass
def process_solution(self):
self.extract_full_solution()
self.set_user_attributes()
if self.ocp.settings.quadrature_method == "lobatto" or self.ocp.settings.quadrature_method == "lobatto differential":
self.interpolate_solution_lobatto_trial()
elif self.ocp.settings.quadrature_method == "radau" or self.ocp.settings.quadrature_method == "radau differential":
self.interpolate_solution_radau_trial()
elif self.ocp.settings.quadrature_method == "gauss" or self.ocp.settings.quadrature_method == "gauss differential":
self.interpolate_solution_gauss_trial()
@abstractmethod
def extract_full_solution(self):
pass
@abstractmethod
def set_user_attributes(self):
pass
def interpolate_solution_lobatto(self):
self.phase_polys = []
zipped = zip(self.backend.p,
self.phase_data,
self.it.mesh.K,
self.it.mesh.N_K,
self.it.mesh.mesh_index_boundaries)
for p, p_data, K, N_K, mesh_index_boundaries in zipped:
# print('p')
# print(p)
# print('p_data')
# print(p_data)
# print('k')
# print(K)
# print('N_k')
# print(N_K)
# print('mesh_index_boundaries')
# print(mesh_index_boundaries)
y_polys = np.empty((p.num_y_var, K), dtype=object)
dy_polys = np.empty((p.num_y_var, K), dtype=object)
u_polys = np.empty((p.num_u_var, K), dtype=object)
for i_y, (state, state_deriv) in enumerate(zip(p_data.y, p_data.dy)):
for i_k, (i_start, i_stop) in enumerate(zip(mesh_index_boundaries[:-1], mesh_index_boundaries[1:])):
t_k = p_data.tau[i_start:i_stop + 1]
dy_k = state_deriv[i_start:i_stop + 1]
dy_poly = np.polynomial.Legendre.fit(t_k,
dy_k,
deg=N_K[i_k] - 1,
window=[0, 1])
dy_polys[i_y, i_k] = dy_poly
scale_factor = p_data.T / self.it.mesh._PERIOD
y_k = state[i_start:i_stop + 1]
dy_k = state_deriv[i_start:i_stop + 1] * scale_factor
dy_poly = np.polynomial.Legendre.fit(t_k,
dy_k,
deg=N_K[i_k] - 1,
window=[0, 1])
y_poly = dy_poly.integ(k=state[i_start])
y_polys[i_y, i_k] = y_poly
t_data = np.linspace(t_k[0], t_k[-1])
for i_u, control in enumerate(p_data.u):
for i_k, (i_start, i_stop) in enumerate(zip(mesh_index_boundaries[:-1], mesh_index_boundaries[1:])):
t_k = p_data.tau[i_start:i_stop + 1]
u_k = control[i_start:i_stop + 1]
u_poly = np.polynomial.Polynomial.fit(
t_k, u_k, deg=N_K[i_k] - 1, window=[0, 1])
u_polys[i_u, i_k] = u_poly
phase_polys = Polys(y_polys, dy_polys, u_polys)
self.phase_polys.append(phase_polys)
def interpolate_solution_radau(self):
self.phase_polys = []
zipped = zip(self.backend.p,
self.phase_data,
self.it.mesh.K,
self.it.mesh.N_K,
self.it.mesh.mesh_index_boundaries)
for p, p_data, K, N_K, mesh_index_boundaries in zipped:
y_polys = np.empty((p.num_y_var, K), dtype=object)
dy_polys = np.empty((p.num_y_var, K), dtype=object)
u_polys = np.empty((p.num_u_var, K), dtype=object)
for i_y, (state, state_deriv) in enumerate(zip(p_data.y, p_data.dy)):
for i_k, (i_start, i_stop) in enumerate(zip(mesh_index_boundaries[:-1], mesh_index_boundaries[1:])):
scale_factor = p_data.T / self.it.mesh._PERIOD
t_k = p_data.tau[i_start:i_stop + 1]
dy_k = state_deriv[i_start:i_stop + 1] * scale_factor
dy_poly = np.polynomial.Legendre.fit(t_k[:-1],
dy_k[:-1],
deg=N_K[i_k] - 2,
domain=[t_k[0], t_k[-1]],
window=[0, 1])
dy_polys[i_y, i_k] = dy_poly
y_poly = dy_poly.integ(k=state[i_start])
y_polys[i_y, i_k] = y_poly
dy_poly = np.polynomial.Legendre.fit(t_k[:-1],
state_deriv[i_start:i_stop + 1][:-1],
deg=N_K[i_k] - 2,
domain=[t_k[0], t_k[-1]],
window=[0, 1])
dy_polys[i_y, i_k] = dy_poly
for i_u, control in enumerate(p_data.u):
for i_k, (i_start, i_stop) in enumerate(zip(mesh_index_boundaries[:-1], mesh_index_boundaries[1:])):
t_k = p_data.tau[i_start:i_stop + 1]
u_k = control[i_start:i_stop + 1]
u_poly = np.polynomial.Polynomial.fit(
t_k, u_k, deg=N_K[i_k] - 1, window=[0, 1])
u_polys[i_u, i_k] = u_poly
phase_polys = Polys(y_polys, dy_polys, u_polys)
self.phase_polys.append(phase_polys)
def interpolate_solution_lobatto_trial(self):
self.phase_polys = []
zipped = zip(self.backend.p,
self.phase_data,
self.it.mesh.K,
self.it.mesh.N_K,
self.it.mesh.mesh_index_boundaries)
for p, p_data, K, N_K, mesh_index_boundaries in zipped:
y_polys = np.empty((p.num_y_var, K), dtype=object)
dy_polys = np.empty((p.num_y_var, K), dtype=object)
u_polys = np.empty((p.num_u_var, K), dtype=object)
for i_y, (state, state_deriv) in enumerate(zip(p_data.y, p_data.dy)):
y_tem = []
dy_tem = []
t_tem = []
for i_k, (i_start, i_stop) in enumerate(zip(mesh_index_boundaries[:-1], mesh_index_boundaries[1:])):
t_k = p_data.tau[i_start:i_stop + 1]
dy_k = state_deriv[i_start:i_stop + 1]
y_k = state[i_start:i_stop + 1]
y_poly = np.polynomial.Legendre.fit(t_k,
y_k,
deg=N_K[i_k]-1,
window=[0, 1])
dy_poly = y_poly.deriv()
y_tem.extend([y_poly(i) for i in np.linspace(t_k[0],t_k[-1],10)])
dy_tem.extend([dy_poly(i) for i in np.linspace(t_k[0],t_k[-1],10)])
t_tem.extend(list(np.linspace(t_k[0],t_k[-1],10)))
y_polys[i_y, i_k] = y_poly
dy_polys[i_y, i_k] = dy_poly
# print(y_tem)
# print(dy_tem)
for i_u, control in enumerate(p_data.u):
for i_k, (i_start, i_stop) in enumerate(zip(mesh_index_boundaries[:-1], mesh_index_boundaries[1:])):
t_k = p_data.tau[i_start:i_stop + 1]
u_k = control[i_start:i_stop + 1]
u_poly = np.polynomial.Polynomial.fit(
t_k, u_k, deg=N_K[i_k] - 1, window=[0, 1])
u_polys[i_u, i_k] = u_poly
phase_polys = Polys(y_polys, dy_polys, u_polys)
self.phase_polys.append(phase_polys)
def interpolate_solution_radau_trial(self):
self.phase_polys = []
zipped = zip(self.backend.p,
self.phase_data,
self.it.mesh.K,
self.it.mesh.N_K,
self.it.mesh.mesh_index_boundaries)
for p, p_data, K, N_K, mesh_index_boundaries in zipped:
y_polys = np.empty((p.num_y_var, K), dtype=object)
dy_polys = np.empty((p.num_y_var, K), dtype=object)
u_polys = np.empty((p.num_u_var, K), dtype=object)
for i_y, (state, state_deriv) in enumerate(zip(p_data.y, p_data.dy)):
y_tem = []
dy_tem = []
t_tem = []
for i_k, (i_start, i_stop) in enumerate(zip(mesh_index_boundaries[:-1], mesh_index_boundaries[1:])):
t_k = p_data.tau[i_start:i_stop + 1]
dy_k = state_deriv[i_start:i_stop + 1]
y_k = state[i_start:i_stop + 1]
y_poly = np.polynomial.Legendre.fit(t_k,
y_k,
deg=N_K[i_k]-1,
window=[0, 1])
dy_poly = y_poly.deriv()
y_tem.extend([y_poly(i) for i in np.linspace(t_k[0],t_k[-1],10)])
dy_tem.extend([dy_poly(i) for i in np.linspace(t_k[0],t_k[-1],10)])
t_tem.extend(list(np.linspace(t_k[0],t_k[-1],10)))
y_polys[i_y, i_k] = y_poly
dy_polys[i_y, i_k] = dy_poly
# print(y_tem)
# print(dy_tem)
for i_u, control in enumerate(p_data.u):
for i_k, (i_start, i_stop) in enumerate(zip(mesh_index_boundaries[:-1], mesh_index_boundaries[1:])):
t_k = p_data.tau[i_start:i_stop + 1]
u_k = control[i_start:i_stop + 1]
u_poly = np.polynomial.Polynomial.fit(
t_k[:-1], u_k[:-1], deg=N_K[i_k] - 2, window=[0, 1])
u_polys[i_u, i_k] = u_poly
phase_polys = Polys(y_polys, dy_polys, u_polys)
self.phase_polys.append(phase_polys)
def interpolate_solution_gauss_trial(self):
self.phase_polys = []
zipped = zip(self.backend.p,
self.phase_data,
self.it.mesh.K,
self.it.mesh.N_K,
self.it.mesh.mesh_index_boundaries)
for p, p_data, K, N_K, mesh_index_boundaries in zipped:
y_polys = np.empty((p.num_y_var, K), dtype=object)
dy_polys = np.empty((p.num_y_var, K), dtype=object)
u_polys = np.empty((p.num_u_var, K), dtype=object)
for i_y, (state, state_deriv) in enumerate(zip(p_data.y, p_data.dy)):
y_tem = []
dy_tem = []
t_tem = []
for i_k, (i_start, i_stop) in enumerate(zip(mesh_index_boundaries[:-1], mesh_index_boundaries[1:])):
t_k = p_data.tau[i_start:i_stop + 1]
dy_k = state_deriv[i_start:i_stop + 1]
y_k = state[i_start:i_stop + 1]
y_poly = np.polynomial.Legendre.fit(t_k[:-1],
y_k[:-1],
deg=N_K[i_k]-2,
window=[0, 1])
dy_poly = y_poly.deriv()
y_tem.extend([y_poly(i) for i in np.linspace(t_k[0],t_k[-1],10)])
dy_tem.extend([dy_poly(i) for i in np.linspace(t_k[0],t_k[-1],10)])
t_tem.extend(list(np.linspace(t_k[0],t_k[-1],10)))
y_polys[i_y, i_k] = y_poly
dy_polys[i_y, i_k] = dy_poly
# print(y_tem)
# print(dy_tem)
for i_u, control in enumerate(p_data.u):
for i_k, (i_start, i_stop) in enumerate(zip(mesh_index_boundaries[:-1], mesh_index_boundaries[1:])):
t_k = p_data.tau[i_start:i_stop + 1]
u_k = control[i_start:i_stop + 1]
u_poly = np.polynomial.Polynomial.fit(
t_k[1:-1], u_k[1:-1], deg=N_K[i_k] - 3, window=[0, 1])
u_polys[i_u, i_k] = u_poly
phase_polys = Polys(y_polys, dy_polys, u_polys)
self.phase_polys.append(phase_polys)
def plot(self):
plot_solution(self)
def refine_mesh(self):
dispatcher = MESH_REFINEMENT_ALGORITHMS.dispatcher
algorithm = self.ocp.settings.mesh_refinement_algorithm
self.mesh_refinement = dispatcher[algorithm](self)
return self.mesh_refinement.next_iter_mesh
| 48.674342
| 125
| 0.503751
|
37a9311abab5e84e7f9cbb26d33da131172a157d
| 196
|
py
|
Python
|
Programs/2.py
|
jishanshaikh4/statistical-methods-primer
|
f226622b03660c60b91b56a9ff73068c6bb8cca8
|
[
"MIT"
] | 12
|
2021-05-31T13:16:54.000Z
|
2021-07-20T22:51:29.000Z
|
Programs/2.py
|
Jishanshaikh4/statistical-methods-primer
|
f226622b03660c60b91b56a9ff73068c6bb8cca8
|
[
"MIT"
] | null | null | null |
Programs/2.py
|
Jishanshaikh4/statistical-methods-primer
|
f226622b03660c60b91b56a9ff73068c6bb8cca8
|
[
"MIT"
] | 1
|
2019-10-22T17:21:38.000Z
|
2019-10-22T17:21:38.000Z
|
# Regression
def regression-fitting(a, z):
b = a;
# Process the data a
# Code for regression
# Sample data for processing
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
z = [9, 8, 7, 6, 5, 4, 3, 2, 1]
| 17.818182
| 31
| 0.556122
|
6a733284c577e6ebf4f20cf752bcff3792dc01d2
| 17,900
|
py
|
Python
|
tests/asyncio/test_asyncio_socket.py
|
StoneMoe/python-engineio
|
43f8fb5cd3fefe96768f8a8d91006787fa9c1c19
|
[
"MIT"
] | 16
|
2020-08-31T19:58:45.000Z
|
2022-03-07T03:37:54.000Z
|
tests/asyncio/test_asyncio_socket.py
|
StoneMoe/python-engineio
|
43f8fb5cd3fefe96768f8a8d91006787fa9c1c19
|
[
"MIT"
] | 7
|
2020-11-16T11:13:22.000Z
|
2022-03-25T19:01:47.000Z
|
libs/tests/asyncio/test_asyncio_socket.py
|
thekingofkings/focusread
|
3c9978417433cca42bb03a154b22dd9c59f290e4
|
[
"MIT"
] | 3
|
2020-10-15T12:31:31.000Z
|
2020-12-28T02:20:46.000Z
|
import asyncio
import sys
import time
import unittest
import six
if six.PY3:
from unittest import mock
else:
import mock
from engineio import asyncio_socket
from engineio import exceptions
from engineio import packet
from engineio import payload
def AsyncMock(*args, **kwargs):
"""Return a mock asynchronous function."""
m = mock.MagicMock(*args, **kwargs)
async def mock_coro(*args, **kwargs):
return m(*args, **kwargs)
mock_coro.mock = m
return mock_coro
def _run(coro):
"""Run the given coroutine."""
return asyncio.get_event_loop().run_until_complete(coro)
@unittest.skipIf(sys.version_info < (3, 5), 'only for Python 3.5+')
class TestSocket(unittest.TestCase):
def _get_read_mock_coro(self, payload):
mock_input = mock.MagicMock()
mock_input.read = AsyncMock()
mock_input.read.mock.return_value = payload
return mock_input
def _get_mock_server(self):
mock_server = mock.Mock()
mock_server.ping_timeout = 0.2
mock_server.ping_interval = 0.2
mock_server.async_handlers = False
mock_server._async = {'asyncio': True,
'create_route': mock.MagicMock(),
'translate_request': mock.MagicMock(),
'make_response': mock.MagicMock(),
'websocket': 'w'}
mock_server._async['translate_request'].return_value = 'request'
mock_server._async['make_response'].return_value = 'response'
mock_server._trigger_event = AsyncMock()
def create_queue(*args, **kwargs):
queue = asyncio.Queue(*args, **kwargs)
queue.Empty = asyncio.QueueEmpty
return queue
mock_server.create_queue = create_queue
return mock_server
def test_create(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
self.assertEqual(s.server, mock_server)
self.assertEqual(s.sid, 'sid')
self.assertFalse(s.upgraded)
self.assertFalse(s.closed)
self.assertTrue(hasattr(s.queue, 'get'))
self.assertTrue(hasattr(s.queue, 'put'))
self.assertTrue(hasattr(s.queue, 'task_done'))
self.assertTrue(hasattr(s.queue, 'join'))
def test_empty_poll(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
self.assertRaises(exceptions.QueueEmpty, _run, s.poll())
def test_poll(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
_run(s.send(pkt1))
_run(s.send(pkt2))
self.assertEqual(_run(s.poll()), [pkt1, pkt2])
def test_poll_none(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
_run(s.queue.put(None))
self.assertEqual(_run(s.poll()), [])
def test_ping_pong(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
_run(s.receive(packet.Packet(packet.PING, data='abc')))
r = _run(s.poll())
self.assertEqual(len(r), 1)
self.assertTrue(r[0].encode(), b'3abc')
def test_message_sync_handler(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
_run(s.receive(packet.Packet(packet.MESSAGE, data='foo')))
mock_server._trigger_event.mock.assert_called_once_with(
'message', 'sid', 'foo', run_async=False)
def test_message_async_handler(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
mock_server.async_handlers = True
_run(s.receive(packet.Packet(packet.MESSAGE, data='foo')))
mock_server._trigger_event.mock.assert_called_once_with(
'message', 'sid', 'foo', run_async=True)
def test_invalid_packet(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
self.assertRaises(exceptions.UnknownPacketError, _run,
s.receive(packet.Packet(packet.OPEN)))
def test_timeout(self):
mock_server = self._get_mock_server()
mock_server.ping_interval = -6
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.last_ping = time.time() - 1
s.close = AsyncMock()
_run(s.send('packet'))
s.close.mock.assert_called_once_with(wait=False, abort=False)
def test_polling_read(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'foo')
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
_run(s.send(pkt1))
_run(s.send(pkt2))
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
packets = _run(s.handle_get_request(environ))
self.assertEqual(packets, [pkt1, pkt2])
def test_polling_read_error(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'foo')
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
self.assertRaises(exceptions.QueueEmpty, _run,
s.handle_get_request(environ))
def test_polling_write(self):
mock_server = self._get_mock_server()
mock_server.max_http_buffer_size = 1000
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
p = payload.Payload(packets=[pkt1, pkt2]).encode()
s = asyncio_socket.AsyncSocket(mock_server, 'foo')
s.receive = AsyncMock()
environ = {'REQUEST_METHOD': 'POST', 'QUERY_STRING': 'sid=foo',
'CONTENT_LENGTH': len(p),
'wsgi.input': self._get_read_mock_coro(p)}
_run(s.handle_post_request(environ))
self.assertEqual(s.receive.mock.call_count, 2)
def test_polling_write_too_large(self):
mock_server = self._get_mock_server()
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
p = payload.Payload(packets=[pkt1, pkt2]).encode()
mock_server.max_http_buffer_size = len(p) - 1
s = asyncio_socket.AsyncSocket(mock_server, 'foo')
s.receive = AsyncMock()
environ = {'REQUEST_METHOD': 'POST', 'QUERY_STRING': 'sid=foo',
'CONTENT_LENGTH': len(p),
'wsgi.input': self._get_read_mock_coro(p)}
self.assertRaises(exceptions.ContentTooLongError, _run,
s.handle_post_request(environ))
def test_upgrade_handshake(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'foo')
s._upgrade_websocket = AsyncMock()
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo',
'HTTP_CONNECTION': 'Foo,Upgrade,Bar',
'HTTP_UPGRADE': 'websocket'}
_run(s.handle_get_request(environ))
s._upgrade_websocket.mock.assert_called_once_with(environ)
def test_upgrade(self):
mock_server = self._get_mock_server()
mock_server._async['websocket'] = mock.MagicMock()
mock_ws = AsyncMock()
mock_server._async['websocket'].return_value = mock_ws
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
environ = "foo"
_run(s._upgrade_websocket(environ))
mock_server._async['websocket'].assert_called_once_with(
s._websocket_handler)
mock_ws.mock.assert_called_once_with(environ)
def test_upgrade_twice(self):
mock_server = self._get_mock_server()
mock_server._async['websocket'] = mock.MagicMock()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
s.upgraded = True
environ = "foo"
self.assertRaises(IOError, _run, s._upgrade_websocket(environ))
def test_upgrade_packet(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
_run(s.receive(packet.Packet(packet.UPGRADE)))
r = _run(s.poll())
self.assertEqual(len(r), 1)
self.assertEqual(r[0].encode(), packet.Packet(packet.NOOP).encode())
def test_upgrade_no_probe(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
ws = mock.MagicMock()
ws.wait = AsyncMock()
ws.wait.mock.return_value = packet.Packet(packet.NOOP).encode(
always_bytes=False)
_run(s._websocket_handler(ws))
self.assertFalse(s.upgraded)
def test_upgrade_no_upgrade_packet(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
s.queue.join = AsyncMock(return_value=None)
ws = mock.MagicMock()
ws.send = AsyncMock()
ws.wait = AsyncMock()
probe = six.text_type('probe')
ws.wait.mock.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.NOOP).encode(always_bytes=False)]
_run(s._websocket_handler(ws))
ws.send.mock.assert_called_once_with(packet.Packet(
packet.PONG, data=probe).encode(always_bytes=False))
self.assertEqual(_run(s.queue.get()).packet_type, packet.NOOP)
self.assertFalse(s.upgraded)
def test_upgrade_not_supported(self):
mock_server = self._get_mock_server()
mock_server._async['websocket'] = None
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
environ = "foo"
_run(s._upgrade_websocket(environ))
mock_server._bad_request.assert_called_once_with()
def test_close_packet(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
s.close = AsyncMock()
_run(s.receive(packet.Packet(packet.CLOSE)))
s.close.mock.assert_called_once_with(wait=False, abort=True)
def test_websocket_read_write(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = False
s.queue.join = AsyncMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
s.poll = AsyncMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], None])
ws = mock.MagicMock()
ws.send = AsyncMock()
ws.wait = AsyncMock()
ws.wait.mock.side_effect = [
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
None]
_run(s._websocket_handler(ws))
self.assertTrue(s.connected)
self.assertTrue(s.upgraded)
self.assertEqual(mock_server._trigger_event.mock.call_count, 2)
mock_server._trigger_event.mock.assert_has_calls([
mock.call('message', 'sid', 'foo', run_async=False),
mock.call('disconnect', 'sid')])
ws.send.mock.assert_called_with('4bar')
def test_websocket_upgrade_read_write(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
s.queue.join = AsyncMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
probe = six.text_type('probe')
s.poll = AsyncMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], exceptions.QueueEmpty])
ws = mock.MagicMock()
ws.send = AsyncMock()
ws.wait = AsyncMock()
ws.wait.mock.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
None]
_run(s._websocket_handler(ws))
self.assertTrue(s.upgraded)
self.assertEqual(mock_server._trigger_event.mock.call_count, 2)
mock_server._trigger_event.mock.assert_has_calls([
mock.call('message', 'sid', 'foo', run_async=False),
mock.call('disconnect', 'sid')])
ws.send.mock.assert_called_with('4bar')
def test_websocket_upgrade_with_payload(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
s.queue.join = AsyncMock(return_value=None)
probe = six.text_type('probe')
ws = mock.MagicMock()
ws.send = AsyncMock()
ws.wait = AsyncMock()
ws.wait.mock.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE, data=b'2').encode(
always_bytes=False)]
_run(s._websocket_handler(ws))
self.assertTrue(s.upgraded)
def test_websocket_upgrade_with_backlog(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = True
s.queue.join = AsyncMock(return_value=None)
probe = six.text_type('probe')
foo = six.text_type('foo')
ws = mock.MagicMock()
ws.send = AsyncMock()
ws.wait = AsyncMock()
ws.wait.mock.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE, data=b'2').encode(
always_bytes=False)]
s.upgrading = True
_run(s.send(packet.Packet(packet.MESSAGE, data=foo)))
_run(s._websocket_handler(ws))
self.assertTrue(s.upgraded)
self.assertFalse(s.upgrading)
self.assertEqual(s.packet_backlog, [])
ws.send.mock.assert_called_with('4foo')
def test_websocket_read_write_wait_fail(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = False
s.queue.join = AsyncMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
s.poll = AsyncMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)],
[packet.Packet(packet.MESSAGE, data=bar)], exceptions.QueueEmpty])
ws = mock.MagicMock()
ws.send = AsyncMock()
ws.wait = AsyncMock()
ws.wait.mock.side_effect = [
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
RuntimeError]
ws.send.mock.side_effect = [None, RuntimeError]
_run(s._websocket_handler(ws))
self.assertEqual(s.closed, True)
def test_websocket_ignore_invalid_packet(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.connected = False
s.queue.join = AsyncMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
s.poll = AsyncMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], exceptions.QueueEmpty])
ws = mock.MagicMock()
ws.send = AsyncMock()
ws.wait = AsyncMock()
ws.wait.mock.side_effect = [
packet.Packet(packet.OPEN).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
None]
_run(s._websocket_handler(ws))
self.assertTrue(s.connected)
self.assertEqual(mock_server._trigger_event.mock.call_count, 2)
mock_server._trigger_event.mock.assert_has_calls([
mock.call('message', 'sid', foo, run_async=False),
mock.call('disconnect', 'sid')])
ws.send.mock.assert_called_with('4bar')
def test_send_after_close(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
_run(s.close(wait=False))
self.assertRaises(exceptions.SocketIsClosedError, _run,
s.send(packet.Packet(packet.NOOP)))
def test_close_after_close(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
_run(s.close(wait=False))
self.assertTrue(s.closed)
self.assertEqual(mock_server._trigger_event.mock.call_count, 1)
mock_server._trigger_event.mock.assert_called_once_with('disconnect',
'sid')
_run(s.close())
self.assertEqual(mock_server._trigger_event.mock.call_count, 1)
def test_close_and_wait(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.queue = mock.MagicMock()
s.queue.put = AsyncMock()
s.queue.join = AsyncMock()
_run(s.close(wait=True))
s.queue.join.mock.assert_called_once_with()
def test_close_without_wait(self):
mock_server = self._get_mock_server()
s = asyncio_socket.AsyncSocket(mock_server, 'sid')
s.queue = mock.MagicMock()
s.queue.put = AsyncMock()
s.queue.join = AsyncMock()
_run(s.close(wait=False))
self.assertEqual(s.queue.join.mock.call_count, 0)
| 40.497738
| 78
| 0.629721
|
0eaf3884011a7ee45d8d563dbe38964beb1e60ec
| 20
|
py
|
Python
|
agpy/__version__.py
|
ufoym/agpy
|
2bac609f9a63a3aa3bf847ac9cbbad363b0dc853
|
[
"MIT"
] | null | null | null |
agpy/__version__.py
|
ufoym/agpy
|
2bac609f9a63a3aa3bf847ac9cbbad363b0dc853
|
[
"MIT"
] | null | null | null |
agpy/__version__.py
|
ufoym/agpy
|
2bac609f9a63a3aa3bf847ac9cbbad363b0dc853
|
[
"MIT"
] | null | null | null |
__version__='0.1.3'
| 10
| 19
| 0.7
|
440824fac601a396de2f46237265e75df120f6f6
| 488
|
py
|
Python
|
Scripts/set_replay_flag_2writebacks.py
|
samehattia/StateMover
|
8e242f6d3c370569f7267ffadda3d6e4f4b028b3
|
[
"MIT"
] | 1
|
2021-10-12T16:18:04.000Z
|
2021-10-12T16:18:04.000Z
|
Scripts/set_replay_flag_2writebacks.py
|
samehattia/StateMover
|
8e242f6d3c370569f7267ffadda3d6e4f4b028b3
|
[
"MIT"
] | null | null | null |
Scripts/set_replay_flag_2writebacks.py
|
samehattia/StateMover
|
8e242f6d3c370569f7267ffadda3d6e4f4b028b3
|
[
"MIT"
] | 1
|
2021-12-06T14:10:29.000Z
|
2021-12-06T14:10:29.000Z
|
#!/usr/bin/env python3
'''
The script sets the replay flags to the given value
'''
import sys
import os.path
import fileinput
# First argument: state file, Second argument: value
if len(sys.argv) == 3:
file_name = sys.argv[1]
value = sys.argv[2]
else:
print("Expects two arguments")
exit()
for line in fileinput.input(file_name, inplace = 1):
# Set replay flags
if 'replay_flag' in line:
words = line.split()
print(words[0] + ' ' + value)
else:
print(line.rstrip('\n'))
| 18.074074
| 52
| 0.682377
|
c909189ecd66ebf276cb7d91bc4c7191d184e582
| 7,855
|
py
|
Python
|
fairseq/tasks/sentence_prediction.py
|
nedo0shki/fairseq-editor
|
a0f09787bc0d302be5833ec0dad3e568440f4551
|
[
"MIT"
] | 429
|
2020-08-04T01:27:22.000Z
|
2022-03-20T06:37:52.000Z
|
fairseq/tasks/sentence_prediction.py
|
TokisakiKurumi2001/delight
|
ba340f2f4c2c541ceb126f87b219864058565505
|
[
"MIT"
] | 42
|
2020-11-19T09:49:21.000Z
|
2022-03-21T09:55:24.000Z
|
fairseq/tasks/sentence_prediction.py
|
TokisakiKurumi2001/delight
|
ba340f2f4c2c541ceb126f87b219864058565505
|
[
"MIT"
] | 49
|
2020-08-05T11:21:32.000Z
|
2022-02-25T12:13:38.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
from fairseq.data import (
ConcatSentencesDataset,
data_utils,
Dictionary,
IdDataset,
NestedDictionaryDataset,
NumSamplesDataset,
NumelDataset,
OffsetTokensDataset,
PrependTokenDataset,
RawLabelDataset,
RightPadDataset,
RollDataset,
SortDataset,
StripTokenDataset,
TruncateDataset,
)
from fairseq.tasks import FairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task('sentence_prediction')
class SentencePredictionTask(FairseqTask):
"""
Sentence (or sentence pair) prediction (classification or regression) task.
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', metavar='FILE',
help='file prefix for data')
parser.add_argument('--num-classes', type=int, default=-1,
help='number of classes')
parser.add_argument('--init-token', type=int, default=None,
help='add token at the beginning of each batch item')
parser.add_argument('--separator-token', type=int, default=None,
help='add separator token between inputs')
parser.add_argument('--regression-target', action='store_true', default=False)
parser.add_argument('--no-shuffle', action='store_true', default=False)
parser.add_argument('--truncate-sequence', action='store_true', default=False,
help='truncate sequence to max-positions')
parser.add_argument('--add-prev-output-tokens', action='store_true', default=False,
help='add prev_output_tokens to sample, used for encoder-decoder arch')
def __init__(self, args, data_dictionary, label_dictionary):
super().__init__(args)
self.dictionary = data_dictionary
self._label_dictionary = label_dictionary
if not hasattr(args, 'max_positions'):
self._max_positions = (
args.max_source_positions,
args.max_target_positions,
)
else:
self._max_positions = args.max_positions
args.tokens_per_sample = self._max_positions
@classmethod
def load_dictionary(cls, args, filename, source=True):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol('<mask>')
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert args.num_classes > 0, 'Must set --num-classes'
# load data dictionary
data_dict = cls.load_dictionary(
args,
os.path.join(args.data, 'input0', 'dict.txt'),
source=True,
)
logger.info('[input] dictionary: {} types'.format(len(data_dict)))
label_dict = None
if not args.regression_target:
# load label dictionary
label_dict = cls.load_dictionary(
args,
os.path.join(args.data, 'label', 'dict.txt'),
source=False,
)
logger.info('[label] dictionary: {} types'.format(len(label_dict)))
else:
label_dict = data_dict
return SentencePredictionTask(args, data_dict, label_dict)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)."""
def get_path(type, split):
return os.path.join(self.args.data, type, split)
def make_dataset(type, dictionary):
split_path = get_path(type, split)
dataset = data_utils.load_indexed_dataset(
split_path,
dictionary,
self.args.dataset_impl,
combine=combine,
)
return dataset
input0 = make_dataset('input0', self.source_dictionary)
assert input0 is not None, 'could not find dataset: {}'.format(get_path(type, split))
input1 = make_dataset('input1', self.source_dictionary)
if self.args.init_token is not None:
input0 = PrependTokenDataset(input0, self.args.init_token)
if input1 is None:
src_tokens = input0
else:
if self.args.separator_token is not None:
input1 = PrependTokenDataset(input1, self.args.separator_token)
src_tokens = ConcatSentencesDataset(input0, input1)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(src_tokens))
if self.args.truncate_sequence:
src_tokens = TruncateDataset(src_tokens, self.args.max_positions)
dataset = {
'id': IdDataset(),
'net_input': {
'src_tokens': RightPadDataset(
src_tokens,
pad_idx=self.source_dictionary.pad(),
),
'src_lengths': NumelDataset(src_tokens, reduce=False),
},
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(src_tokens, reduce=True),
}
if self.args.add_prev_output_tokens:
prev_tokens_dataset = RightPadDataset(
RollDataset(src_tokens, 1),
pad_idx=self.dictionary.pad(),
)
dataset['net_input'].update(
prev_output_tokens=prev_tokens_dataset,
)
if not self.args.regression_target:
label_dataset = make_dataset('label', self.label_dictionary)
if label_dataset is not None:
dataset.update(
target=OffsetTokensDataset(
StripTokenDataset(
label_dataset,
id_to_strip=self.label_dictionary.eos(),
),
offset=-self.label_dictionary.nspecial,
)
)
else:
label_path = "{0}.label".format(get_path('label', split))
if os.path.exists(label_path):
dataset.update(
target=RawLabelDataset([
float(x.strip()) for x in open(label_path).readlines()
])
)
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[src_tokens.sizes],
)
if self.args.no_shuffle:
dataset = nested_dataset
else:
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
logger.info("Loaded {0} with #samples: {1}".format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head(
getattr(args, 'classification_head_name', 'sentence_classification_head'),
num_classes=self.args.num_classes,
)
return model
def max_positions(self):
return self._max_positions
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
@property
def label_dictionary(self):
return self._label_dictionary
| 33.568376
| 99
| 0.587397
|
e5e76134abfe51ef287a91788913d07d277edc53
| 5,311
|
py
|
Python
|
lib/galaxy/datatypes/dataproviders/hierarchy.py
|
lawrence14701/galaxy
|
7eb2fcb708e7b63e17800c87613ddfa5497c0654
|
[
"CC-BY-3.0"
] | 4
|
2018-10-29T18:34:38.000Z
|
2021-09-29T23:30:42.000Z
|
lib/galaxy/datatypes/dataproviders/hierarchy.py
|
lawrence14701/galaxy
|
7eb2fcb708e7b63e17800c87613ddfa5497c0654
|
[
"CC-BY-3.0"
] | 30
|
2016-10-20T15:35:12.000Z
|
2018-10-02T15:59:54.000Z
|
lib/galaxy/datatypes/dataproviders/hierarchy.py
|
lawrence14701/galaxy
|
7eb2fcb708e7b63e17800c87613ddfa5497c0654
|
[
"CC-BY-3.0"
] | 7
|
2016-11-03T19:11:01.000Z
|
2020-05-11T14:23:52.000Z
|
"""
Dataproviders that iterate over lines from their sources.
"""
import logging
from xml.etree.ElementTree import (
Element,
iterparse
)
from . import line
_TODO = """
"""
log = logging.getLogger(__name__)
# ----------------------------------------------------------------------------- hierarchal/tree data providers
class HierarchalDataProvider(line.BlockDataProvider):
"""
Class that uses formats where a datum may have a parent or children
data.
e.g. XML, HTML, GFF3, Phylogenetic
"""
def __init__(self, source, **kwargs):
# TODO: (and defer to better (than I can write) parsers for each subtype)
super(HierarchalDataProvider, self).__init__(source, **kwargs)
# ----------------------------------------------------------------------------- xml
class XMLDataProvider(HierarchalDataProvider):
"""
Data provider that converts selected XML elements to dictionaries.
"""
# using xml.etree's iterparse method to keep mem down
# TODO: this, however (AFAIK), prevents the use of xpath
settings = {
'selector' : 'str', # urlencoded
'max_depth' : 'int',
}
ITERPARSE_ALL_EVENTS = ('start', 'end', 'start-ns', 'end-ns')
# TODO: move appropo into super
def __init__(self, source, selector=None, max_depth=None, **kwargs):
"""
:param selector: some partial string in the desired tags to return
:param max_depth: the number of generations of descendents to return
"""
self.selector = selector
self.max_depth = max_depth
self.namespaces = {}
super(XMLDataProvider, self).__init__(source, **kwargs)
def matches_selector(self, element, selector=None):
"""
Returns true if the ``element`` matches the ``selector``.
:param element: an XML ``Element``
:param selector: some partial string in the desired tags to return
Change point for more sophisticated selectors.
"""
# search for partial match of selector to the element tag
# TODO: add more flexibility here w/o re-implementing xpath
# TODO: fails with '#' - browser thinks it's an anchor - use urlencode
# TODO: need removal/replacement of etree namespacing here - then move to string match
return bool((selector is None) or
(isinstance(element, Element) and selector in element.tag))
def element_as_dict(self, element):
"""
Converts an XML element (its text, tag, and attributes) to dictionary form.
:param element: an XML ``Element``
"""
# TODO: Key collision is unlikely here, but still should be better handled
return {
'tag' : element.tag,
'text' : element.text.strip() if element.text else None,
# needs shallow copy to protect v. element.clear()
'attrib' : dict(element.attrib)
}
def get_children(self, element, max_depth=None):
"""
Yield all children of element (and their children - recursively)
in dictionary form.
:param element: an XML ``Element``
:param max_depth: the number of generations of descendents to return
"""
if not isinstance(max_depth, int) or max_depth >= 1:
for child in element:
child_data = self.element_as_dict(child)
next_depth = max_depth - 1 if isinstance(max_depth, int) else None
grand_children = list(self.get_children(child, next_depth))
if grand_children:
child_data['children'] = grand_children
yield child_data
def __iter__(self):
context = iterparse(self.source, events=self.ITERPARSE_ALL_EVENTS)
context = iter(context)
selected_element = None
for event, element in context:
if event == 'start-ns':
ns, uri = element
self.namespaces[ns] = uri
elif event == 'start':
if((selected_element is None) and
(self.matches_selector(element, self.selector))):
# start tag of selected element - wait for 'end' to emit/yield
selected_element = element
elif event == 'end':
if((selected_element is not None) and (element == selected_element)):
self.num_valid_data_read += 1
# offset
if self.num_valid_data_read > self.offset:
# convert to dict and yield
selected_element_dict = self.element_as_dict(selected_element)
children = list(self.get_children(selected_element, self.max_depth))
if children:
selected_element_dict['children'] = children
yield selected_element_dict
# limit
self.num_data_returned += 1
if self.limit is not None and self.num_data_returned >= self.limit:
break
selected_element.clear()
selected_element = None
self.num_data_read += 1
| 37.13986
| 110
| 0.569384
|
5d39636f633ef2cf7399c14192435d695d0f5e8b
| 955
|
py
|
Python
|
newbitcoin/newbitcoin/code-ch01/examples.py
|
tys-hiroshi/test_programmingbitcoin
|
6eb6fb1c087f6dd2cb2b01f527488a904065efa6
|
[
"MIT"
] | null | null | null |
newbitcoin/newbitcoin/code-ch01/examples.py
|
tys-hiroshi/test_programmingbitcoin
|
6eb6fb1c087f6dd2cb2b01f527488a904065efa6
|
[
"MIT"
] | null | null | null |
newbitcoin/newbitcoin/code-ch01/examples.py
|
tys-hiroshi/test_programmingbitcoin
|
6eb6fb1c087f6dd2cb2b01f527488a904065efa6
|
[
"MIT"
] | null | null | null |
"""
# tag::example1[]
>>> from ecc import FieldElement
>>> a = FieldElement(7, 13)
>>> b = FieldElement(6, 13)
>>> print(a == b)
False
>>> print(a == a)
True
# end::example1[]
# tag::example2[]
>>> print(7 % 3)
1
# end::example2[]
# tag::example3[]
>>> print(-27 % 13)
12
# end::example3[]
# tag::example4[]
>>> from ecc import FieldElement
>>> a = FieldElement(7, 13)
>>> b = FieldElement(12, 13)
>>> c = FieldElement(6, 13)
>>> print(a+b==c)
True
# end::example4[]
# tag::example5[]
>>> from ecc import FieldElement
>>> a = FieldElement(3, 13)
>>> b = FieldElement(12, 13)
>>> c = FieldElement(10, 13)
>>> print(a*b==c)
True
# end::example5[]
# tag::example6[]
>>> from ecc import FieldElement
>>> a = FieldElement(3, 13)
>>> b = FieldElement(1, 13)
>>> print(a**3==b)
True
# end::example6[]
# tag::example7[]
>>> from ecc import FieldElement
>>> a = FieldElement(7, 13)
>>> b = FieldElement(8, 13)
>>> print(a**-3==b)
True
# end::example7[]
"""
| 16.754386
| 32
| 0.590576
|
9660a7daaf2d8b2d85ca030cb5af4e15dc20287f
| 1,668
|
py
|
Python
|
wordcensor/utils.py
|
R3v1L/django-wordcensor
|
e79941b4467a2e35811450f014df799acdfebfd9
|
[
"MIT"
] | null | null | null |
wordcensor/utils.py
|
R3v1L/django-wordcensor
|
e79941b4467a2e35811450f014df799acdfebfd9
|
[
"MIT"
] | null | null | null |
wordcensor/utils.py
|
R3v1L/django-wordcensor
|
e79941b4467a2e35811450f014df799acdfebfd9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Django word censoring application utilities module
===============================================
.. module::
:platform: Django
:synopsis: Django word censoring application utilities module
.. moduleauthor:: (C) 2014 Oliver Gutiérrez
"""
# Python imports
import string
# Django imports
from django.conf import settings
# Application imports
from wordcensor.models import CensoredWord
def profanity_word_handler(word):
"""
Returns a word censored
"""
return word[0] + ''.join([settings.CENSOR_PROFANITY_REPLACEMENT_CHARACTER for I in range(len(word)-2)]) + word [-1]
def banned_word_handler(word):
"""
Returns a word totally replaced by a character
"""
return ''.join([settings.CENSOR_BANNED_REPLACEMENT_CHARACTER for x in word])
def word_filter(text,wordlist,replace_handler,separator=' '):
"""
Generic word filter
"""
for word in wordlist:
if word in text:
try:
replacement=replace_handler(word)
except Exception, e:
print e
replacement=replace_handler
text=text.replace(word,replacement)
return text
def filter_profanities(text,replace_handler=profanity_word_handler):
"""
Replaces profanities in a given text
"""
profanities=CensoredWord.objects.get_profanities_wordlist()
return word_filter(text,profanities,replace_handler)
def filter_banned(text,replace_handler=banned_word_handler):
"""
Replaces banned in a given text
"""
banned=CensoredWord.objects.get_banned_wordlist()
return word_filter(text,banned,replace_handler)
| 28.271186
| 119
| 0.672062
|
4e705a813b323512ec23af735c00da3a4e615f15
| 1,037
|
py
|
Python
|
manage.py
|
yusufhilmi/findhelp
|
87fa0f1127ecb7ab17c2625a355f3e350d2296b1
|
[
"MIT"
] | 1
|
2020-11-18T19:29:37.000Z
|
2020-11-18T19:29:37.000Z
|
manage.py
|
yusufhilmi/findhelp
|
87fa0f1127ecb7ab17c2625a355f3e350d2296b1
|
[
"MIT"
] | null | null | null |
manage.py
|
yusufhilmi/findhelp
|
87fa0f1127ecb7ab17c2625a355f3e350d2296b1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
from pathlib import Path
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# findhelp directory.
current_path = Path(__file__).parent.resolve()
sys.path.append(str(current_path / "findhelp"))
execute_from_command_line(sys.argv)
| 32.40625
| 77
| 0.657666
|
d8fab93f50cc926242468a923150c54d57deed99
| 1,492
|
py
|
Python
|
nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py
|
Conxz/nipype
|
1281723ae56eacd103597ff4081a205583706e62
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py
|
Conxz/nipype
|
1281723ae56eacd103597ff4081a205583706e62
|
[
"Apache-2.0"
] | 2
|
2017-10-05T21:08:38.000Z
|
2018-10-09T23:01:23.000Z
|
nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py
|
Conxz/nipype
|
1281723ae56eacd103597ff4081a205583706e62
|
[
"Apache-2.0"
] | 1
|
2016-10-11T19:18:53.000Z
|
2016-10-11T19:18:53.000Z
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..developer import JistBrainMp2rageDuraEstimation
def test_JistBrainMp2rageDuraEstimation_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inDistance=dict(argstr='--inDistance %f',
),
inSecond=dict(argstr='--inSecond %s',
),
inSkull=dict(argstr='--inSkull %s',
),
inoutput=dict(argstr='--inoutput %s',
),
null=dict(argstr='--null %s',
),
outDura=dict(argstr='--outDura %s',
hash_files=False,
),
terminal_output=dict(nohash=True,
),
xDefaultMem=dict(argstr='-xDefaultMem %d',
),
xMaxProcess=dict(argstr='-xMaxProcess %d',
usedefault=True,
),
xPrefExt=dict(argstr='--xPrefExt %s',
),
)
inputs = JistBrainMp2rageDuraEstimation.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_JistBrainMp2rageDuraEstimation_outputs():
output_map = dict(outDura=dict(),
)
outputs = JistBrainMp2rageDuraEstimation.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| 28.150943
| 78
| 0.657507
|
b661df1a3e7e6d9f32094e38098340925bd80a5a
| 6,153
|
py
|
Python
|
tests/test_sqlserver.py
|
njdanielsen/aws-data-wrangler
|
5cdb316224370e952dfb3a701825e1b1ab331105
|
[
"Apache-2.0"
] | null | null | null |
tests/test_sqlserver.py
|
njdanielsen/aws-data-wrangler
|
5cdb316224370e952dfb3a701825e1b1ab331105
|
[
"Apache-2.0"
] | null | null | null |
tests/test_sqlserver.py
|
njdanielsen/aws-data-wrangler
|
5cdb316224370e952dfb3a701825e1b1ab331105
|
[
"Apache-2.0"
] | null | null | null |
import logging
from decimal import Decimal
import boto3
import pandas as pd
import pyarrow as pa
import pyodbc
import pytest
import awswrangler as wr
from ._utils import ensure_data_types, get_df
logging.getLogger("awswrangler").setLevel(logging.DEBUG)
@pytest.fixture(scope="module", autouse=True)
def create_sql_server_database(databases_parameters):
connection_str = (
f"DRIVER={{ODBC Driver 17 for SQL Server}};"
f"SERVER={databases_parameters['sqlserver']['host']},{databases_parameters['sqlserver']['port']};"
f"UID={databases_parameters['user']};"
f"PWD={databases_parameters['password']}"
)
con = pyodbc.connect(connection_str, autocommit=True)
sql_create_db = (
f"IF NOT EXISTS(SELECT * FROM sys.databases WHERE name = '{databases_parameters['sqlserver']['database']}') "
"BEGIN "
f"CREATE DATABASE {databases_parameters['sqlserver']['database']} "
"END"
)
with con.cursor() as cursor:
cursor.execute(sql_create_db)
con.commit()
con.close()
yield
@pytest.fixture(scope="function")
def sqlserver_con():
con = wr.sqlserver.connect("aws-data-wrangler-sqlserver")
yield con
con.close()
def test_connection():
wr.sqlserver.connect("aws-data-wrangler-sqlserver", timeout=10).close()
def test_read_sql_query_simple(databases_parameters, sqlserver_con):
df = wr.sqlserver.read_sql_query("SELECT 1", con=sqlserver_con)
assert df.shape == (1, 1)
def test_to_sql_simple(sqlserver_table, sqlserver_con):
df = pd.DataFrame({"c0": [1, 2, 3], "c1": ["foo", "boo", "bar"]})
wr.sqlserver.to_sql(df, sqlserver_con, sqlserver_table, "dbo", "overwrite", True)
def test_sql_types(sqlserver_table, sqlserver_con):
table = sqlserver_table
df = get_df()
df.drop(["binary"], axis=1, inplace=True)
wr.sqlserver.to_sql(
df=df,
con=sqlserver_con,
table=table,
schema="dbo",
mode="overwrite",
index=True,
dtype={"iint32": "INTEGER"},
)
df = wr.sqlserver.read_sql_query(f"SELECT * FROM dbo.{table}", sqlserver_con)
ensure_data_types(df, has_list=False)
dfs = wr.sqlserver.read_sql_query(
sql=f"SELECT * FROM dbo.{table}",
con=sqlserver_con,
chunksize=1,
dtype={
"iint8": pa.int8(),
"iint16": pa.int16(),
"iint32": pa.int32(),
"iint64": pa.int64(),
"float": pa.float32(),
"ddouble": pa.float64(),
"decimal": pa.decimal128(3, 2),
"string_object": pa.string(),
"string": pa.string(),
"date": pa.date32(),
"timestamp": pa.timestamp(unit="ns"),
"binary": pa.binary(),
"category": pa.float64(),
},
)
for df in dfs:
ensure_data_types(df, has_list=False)
def test_to_sql_cast(sqlserver_table, sqlserver_con):
table = sqlserver_table
df = pd.DataFrame(
{
"col": [
"".join([str(i)[-1] for i in range(1_024)]),
"".join([str(i)[-1] for i in range(1_024)]),
"".join([str(i)[-1] for i in range(1_024)]),
]
},
dtype="string",
)
wr.sqlserver.to_sql(
df=df,
con=sqlserver_con,
table=table,
schema="dbo",
mode="overwrite",
index=False,
dtype={"col": "VARCHAR(1024)"},
)
df2 = wr.sqlserver.read_sql_query(sql=f"SELECT * FROM dbo.{table}", con=sqlserver_con)
assert df.equals(df2)
def test_null(sqlserver_table, sqlserver_con):
table = sqlserver_table
df = pd.DataFrame({"id": [1, 2, 3], "nothing": [None, None, None]})
wr.sqlserver.to_sql(
df=df,
con=sqlserver_con,
table=table,
schema="dbo",
mode="overwrite",
index=False,
dtype={"nothing": "INTEGER"},
)
wr.sqlserver.to_sql(
df=df,
con=sqlserver_con,
table=table,
schema="dbo",
mode="append",
index=False,
)
df2 = wr.sqlserver.read_sql_table(table=table, schema="dbo", con=sqlserver_con)
df["id"] = df["id"].astype("Int64")
assert pd.concat(objs=[df, df], ignore_index=True).equals(df2)
def test_decimal_cast(sqlserver_table, sqlserver_con):
table = sqlserver_table
df = pd.DataFrame(
{
"col0": [Decimal((0, (1, 9, 9), -2)), None, Decimal((0, (1, 9, 0), -2))],
"col1": [Decimal((0, (1, 9, 9), -2)), None, Decimal((0, (1, 9, 0), -2))],
"col2": [Decimal((0, (1, 9, 9), -2)), None, Decimal((0, (1, 9, 0), -2))],
}
)
wr.sqlserver.to_sql(df, sqlserver_con, table, "dbo")
df2 = wr.sqlserver.read_sql_table(
schema="dbo", table=table, con=sqlserver_con, dtype={"col0": "float32", "col1": "float64", "col2": "Int64"}
)
assert df2.dtypes.to_list() == ["float32", "float64", "Int64"]
assert 3.88 <= df2.col0.sum() <= 3.89
assert 3.88 <= df2.col1.sum() <= 3.89
assert df2.col2.sum() == 2
def test_read_retry(sqlserver_con):
try:
wr.sqlserver.read_sql_query("ERROR", sqlserver_con)
except: # noqa
pass
df = wr.sqlserver.read_sql_query("SELECT 1", sqlserver_con)
assert df.shape == (1, 1)
def test_table_name(sqlserver_con):
df = pd.DataFrame({"col0": [1]})
wr.sqlserver.to_sql(df, sqlserver_con, "Test Name", "dbo", mode="overwrite")
df = wr.sqlserver.read_sql_table(schema="dbo", con=sqlserver_con, table="Test Name")
assert df.shape == (1, 1)
with sqlserver_con.cursor() as cursor:
cursor.execute('DROP TABLE "Test Name"')
sqlserver_con.commit()
@pytest.mark.parametrize("dbname", [None, "test"])
def test_connect_secret_manager(dbname):
try:
con = wr.sqlserver.connect(secret_id="aws-data-wrangler/sqlserver", dbname=dbname)
df = wr.sqlserver.read_sql_query("SELECT 1", con=con)
con.close()
assert df.shape == (1, 1)
except boto3.client("secretsmanager").exceptions.ResourceNotFoundException:
pass # Workaround for secretmanager inconsistance
| 31.233503
| 117
| 0.60117
|
71635c162d76f7a003c9ecfaa54d523d05a47e0a
| 26,509
|
py
|
Python
|
graphql/type/definition.py
|
Suggestic/graphql-core-legacy
|
607e1834c3f467fc8d5dbd1d593098e4c3565f84
|
[
"MIT"
] | null | null | null |
graphql/type/definition.py
|
Suggestic/graphql-core-legacy
|
607e1834c3f467fc8d5dbd1d593098e4c3565f84
|
[
"MIT"
] | null | null | null |
graphql/type/definition.py
|
Suggestic/graphql-core-legacy
|
607e1834c3f467fc8d5dbd1d593098e4c3565f84
|
[
"MIT"
] | null | null | null |
import collections
try:
from collections.abc import Hashable, Mapping
except ImportError: # Python < 3.3
from collections import Hashable, Mapping
import copy
from typing import Union
from ..language import ast
from ..pyutils.cached_property import cached_property
from ..pyutils.ordereddict import OrderedDict
from ..pyutils.compat import Enum as PyEnum
from ..utils.assert_valid_name import assert_valid_name
from ..utils.undefined import Undefined
# Necessary for static type checking
if False: # flake8: noqa
from typing import List, Dict, Any, Callable, Optional, Type
def is_type(type_):
# type: (Any) -> bool
return isinstance(
type_,
(
GraphQLScalarType,
GraphQLObjectType,
GraphQLInterfaceType,
GraphQLUnionType,
GraphQLEnumType,
GraphQLInputObjectType,
GraphQLList,
GraphQLNonNull,
),
)
def is_input_type(type_):
# type: (Any) -> bool
named_type = get_named_type(type_)
return isinstance(
named_type, (GraphQLScalarType, GraphQLEnumType, GraphQLInputObjectType)
)
def get_nullable_type(type_):
if isinstance(type_, GraphQLNonNull):
return type_.of_type
return type_
def get_named_type(type_):
# type: (Optional[GraphQLType]) -> Optional[GraphQLType]
unmodified_type = type_
while isinstance(unmodified_type, (GraphQLList, GraphQLNonNull)):
unmodified_type = unmodified_type.of_type
return unmodified_type
class GraphQLType(object):
pass
class GraphQLNamedType(GraphQLType):
__slots__ = ("name",)
def __init__(self, name):
# type: (str) -> None
self.name = name
def __str__(self):
# type: () -> str
return self.name
def is_same_type(self, other):
# type: (Any) -> bool
return self.__class__ is other.__class__ and self.name == other.name
def none_func(x):
return None
class GraphQLScalarType(GraphQLNamedType):
"""Scalar Type Definition
The leaf values of any request and input values to arguments are
Scalars (or Enums) and are defined with a name and a series of coercion
functions used to ensure validity.
Example:
def coerce_odd(value):
if value % 2 == 1:
return value
return None
OddType = GraphQLScalarType(name='Odd', serialize=coerce_odd)
"""
__slots__ = "name", "description", "serialize", "parse_value", "parse_literal"
def __init__(
self,
name, # type: str
description=None, # type: Optional[str]
serialize=None, # type: Optional[Callable]
parse_value=None, # type: Optional[Callable]
parse_literal=None, # type: Optional[Callable]
):
# type: (...) -> None
assert name, "Type must be named."
assert_valid_name(name)
super(GraphQLScalarType, self).__init__(name)
self.name = name
self.description = description
assert callable(serialize), (
'{} must provide "serialize" function. If this custom Scalar is '
'also used as an input type, ensure "parse_value" and "parse_literal" '
"functions are also provided."
).format(self)
if parse_value is not None or parse_literal is not None:
assert callable(parse_value) and callable(
parse_literal
), '{} must provide both "parse_value" and "parse_literal" functions.'.format(
self
)
self.serialize = serialize
self.parse_value = parse_value or none_func
self.parse_literal = parse_literal or none_func
def __str__(self):
# type: () -> str
return self.name
class GraphQLObjectType(GraphQLNamedType):
"""Object Type Definition
Almost all of the GraphQL types you define will be object types.
Object types have a name, but most importantly describe their fields.
Example:
AddressType = GraphQLObjectType('Address', {
'street': GraphQLField(GraphQLString),
'number': GraphQLField(GraphQLInt),
'formatted': GraphQLField(GraphQLString,
resolver=lambda obj, info, **args: obj.number + ' ' + obj.street),
})
When two types need to refer to each other, or a type needs to refer to
itself in a field, you can use a static method to supply the fields
lazily.
Example:
PersonType = GraphQLObjectType('Person', lambda: {
'name': GraphQLField(GraphQLString),
'bestFriend': GraphQLField(PersonType)
})
"""
def __init__(
self,
name, # type: str
fields, # type: Union[Callable[[], Dict[str, GraphQLField]], Dict[str, GraphQLField]]
interfaces=None, # type: Optional[List[GraphQLInterfaceType]]
is_type_of=None, # type: Optional[Callable]
description=None, # type: Optional[Any]
):
# type: (...) -> None
assert name, "Type must be named."
assert_valid_name(name)
super(GraphQLObjectType, self).__init__(name)
self.name = name
self.description = description
if is_type_of is not None:
assert callable(
is_type_of
), '{} must provide "is_type_of" as a function.'.format(self)
self.is_type_of = is_type_of
self._fields = fields
self._provided_interfaces = interfaces
self._interfaces = None
@cached_property
def fields(self):
# type: () -> Dict[str, GraphQLField]
return define_field_map(self, self._fields)
@cached_property
def interfaces(self):
# type: () -> List[GraphQLInterfaceType]
return define_interfaces(self, self._provided_interfaces)
def define_field_map(
type_, # type: Union[GraphQLInterfaceType, GraphQLObjectType]
field_map, # type: Union[Callable, Dict[str, GraphQLField], OrderedDict]
):
# type: (...) -> OrderedDict
if callable(field_map):
field_map = field_map()
assert isinstance(field_map, Mapping) and len(field_map) > 0, (
"{} fields must be a mapping (dict / OrderedDict) with field names as keys or a "
"function which returns such a mapping."
).format(type_)
for field_name, field in field_map.items():
assert_valid_name(field_name)
assert isinstance(
field, GraphQLField
), "{}.{} must be an instance of GraphQLField.".format(type_, field_name)
field_args = getattr(field, "args", None)
if field_args:
assert isinstance(
field_args, Mapping
), "{}.{} args must be a mapping (dict / OrderedDict) with argument names as keys.".format(
type_, field_name
)
for arg_name, arg in field_args.items():
assert_valid_name(arg_name)
return OrderedDict(field_map)
def define_interfaces(
type_, # type: GraphQLObjectType
interfaces, # type: Optional[List[GraphQLInterfaceType]]
):
# type: (...) -> List[GraphQLInterfaceType]
if callable(interfaces):
interfaces = interfaces()
if interfaces is None:
interfaces = []
assert isinstance(
interfaces, (list, tuple)
), "{} interfaces must be a list/tuple or a function which returns a list/tuple.".format(
type_
)
for interface in interfaces:
assert isinstance(
interface, GraphQLInterfaceType
), "{} may only implement Interface types, it cannot implement: {}.".format(
type_, interface
)
if not callable(interface.resolve_type):
assert callable(type_.is_type_of), (
'Interface Type {} does not provide a "resolve_type" function '
'and implementing Type {} does not provide a "is_type_of" '
"function. There is no way to resolve this implementing type "
"during execution."
).format(interface, type_)
return interfaces
class GraphQLField(object):
__slots__ = "type", "args", "resolver", "deprecation_reason", "description"
def __init__(
self,
type_, # type: Any
args=None, # type: Optional[Dict[str, GraphQLArgument]]
resolver=None, # type: Optional[Callable]
deprecation_reason=None, # type: Optional[Any]
description=None, # type: Optional[Any]
):
# type: (...) -> None
self.type = type_
self.args = args or OrderedDict()
self.resolver = resolver
self.deprecation_reason = deprecation_reason
self.description = description
def __eq__(self, other):
return self is other or (
isinstance(other, GraphQLField)
and self.type == other.type
and self.args == other.args
and self.resolver == other.resolver
and self.deprecation_reason == other.deprecation_reason
and self.description == other.description
)
def __hash__(self):
# type: () -> int
return id(self)
@property
def is_deprecated(self):
return bool(self.deprecation_reason)
class GraphQLArgument(object):
__slots__ = "type", "default_value", "description", "out_name"
def __init__(
self,
type_, # type: Union[GraphQLInputObjectType, GraphQLNonNull, GraphQLList, GraphQLScalarType]
default_value=None, # type: Optional[Any]
description=None, # type: Optional[Any]
out_name=None, # type: Optional[str]
):
# type: (...) -> None
self.type = type_
self.default_value = default_value
self.description = description
self.out_name = out_name
def __eq__(self, other):
return self is other or (
isinstance(other, GraphQLArgument)
and self.type == other.type
and self.default_value == other.default_value
and self.description == other.description
and self.out_name == other.out_name
)
def __hash__(self):
return id(self)
class GraphQLInterfaceType(GraphQLNamedType):
"""Interface Type Definition
When a field can return one of a heterogeneous set of types, a Interface type is used to describe what types
are possible, what fields are in common across all types, as well as a function to determine which type
is actually used when the field is resolved.
Example:
EntityType = GraphQLInterfaceType(
name='Entity',
fields={
'name': GraphQLField(GraphQLString),
})
"""
def __init__(
self,
name, # type: str
fields=None, # type: Union[Callable[[], Dict[str, GraphQLField]], Dict[str, GraphQLField]]
resolve_type=None, # type: Optional[Callable]
description=None, # type: Optional[Any]
):
# type: (...) -> None
assert name, "Type must be named."
assert_valid_name(name)
super(GraphQLInterfaceType, self).__init__(name)
self.name = name
self.description = description
if resolve_type is not None:
assert callable(
resolve_type
), '{} must provide "resolve_type" as a function.'.format(self)
self.resolve_type = resolve_type
self._fields = fields
@cached_property
def fields(self):
# type: () -> Dict[str, GraphQLField]
assert self._fields is not None, '"fields" cannot be None'
return define_field_map(self, self._fields)
class GraphQLUnionType(GraphQLNamedType):
"""Union Type Definition
When a field can return one of a heterogeneous set of types, a Union type is used to describe what types are possible
as well as providing a function to determine which type is actually used when the field is resolved.
Example:
class PetType(GraphQLUnionType):
name = 'Pet'
types = [DogType, CatType]
def resolve_type(self, value):
if isinstance(value, Dog):
return DogType()
if isinstance(value, Cat):
return CatType()
"""
def __init__(
self,
name, # type: str
types, # type: Union[Callable[[], List[GraphQLObjectType]], List[GraphQLObjectType]]
resolve_type=None, # type: Optional[Callable]
description=None, # type: Optional[Any]
):
# type: (...) -> None
assert name, "Type must be named."
assert_valid_name(name)
super(GraphQLUnionType, self).__init__(name)
self.name = name
self.description = description
if resolve_type is not None:
assert callable(
resolve_type
), '{} must provide "resolve_type" as a function.'.format(self)
self.resolve_type = resolve_type
self._types = types
@cached_property
def types(self):
# type: () -> List[GraphQLObjectType]
return define_types(self, self._types)
# fmt: off
def define_types(
union_type, # type: GraphQLUnionType
types, # type: Union[Callable[[], List[GraphQLObjectType]], List[GraphQLObjectType]]
):
# type: (...) -> List[GraphQLObjectType]
# fmt: on
if callable(types):
types = types()
assert (
isinstance(types, (list, tuple)) and len(types) > 0
), "Must provide types for Union {}.".format(union_type.name)
has_resolve_type_fn = callable(union_type.resolve_type)
for type_ in types:
assert isinstance(
type_, GraphQLObjectType
), "{} may only contain Object types, it cannot contain: {}.".format(
union_type, type_
)
if not has_resolve_type_fn:
assert callable(type_.is_type_of), (
'Union Type {} does not provide a "resolve_type" function '
'and possible Type {} does not provide a "is_type_of" '
"function. There is no way to resolve this possible type "
"during execution."
).format(union_type, type_)
return types
class GraphQLEnumType(GraphQLNamedType):
"""Enum Type Definition
Some leaf values of requests and input values are Enums. GraphQL serializes Enum values as strings,
however internally Enums can be represented by any kind of type, often integers.
Example:
RGBType = GraphQLEnumType(
name='RGB',
values=OrderedDict([
('RED', GraphQLEnumValue(0)),
('GREEN', GraphQLEnumValue(1)),
('BLUE', GraphQLEnumValue(2))
])
)
Note: If a value is not provided in a definition, the name of the enum value will be used as it's internal value.
"""
def __init__(self, name, values, description=None):
assert name, "Type must provide name."
assert_valid_name(name)
super(GraphQLEnumType, self).__init__(name)
self.name = name
self.description = description
# update `values` to make it read-only, forcing any update to
# raise AttributeError
self._values = define_enum_values(self, values)
@property
def values(self):
return self._values
def get_values(self):
return self.values
def get_value(self, name):
return self._name_lookup.get(name)
def serialize(self, value):
# type: (Union[str, PyEnum]) -> Optional[str]
if isinstance(value, PyEnum):
# We handle PyEnum values
value = value.value
if isinstance(value, Hashable):
enum_value = self._value_lookup.get(value)
if enum_value:
return enum_value.name
return None
def parse_value(self, value):
if isinstance(value, Hashable):
enum_value = self._name_lookup.get(value)
if enum_value:
return enum_value.value
return None
def parse_literal(self, value_ast):
if isinstance(value_ast, ast.EnumValue):
enum_value = self._name_lookup.get(value_ast.value)
if enum_value:
return enum_value.value
@cached_property
def _value_lookup(self):
# type: () -> Dict[str, GraphQLEnumValue]
return {
value.value.value if isinstance(value.value, PyEnum) else value.value: value
for value in self.values
}
@cached_property
def _name_lookup(self):
return {value.name: value for value in self.values}
def define_enum_values(type, value_map):
assert (
isinstance(value_map, Mapping) and len(value_map) > 0
), "{} values must be a mapping (dict / OrderedDict) with value names as keys.".format(
type
)
values = []
if not isinstance(value_map, (collections.OrderedDict, OrderedDict)):
value_map = OrderedDict(sorted(list(value_map.items())))
for value_name, value in value_map.items():
assert_valid_name(value_name)
assert isinstance(
value, GraphQLEnumValue
), "{}.{} must be an instance of GraphQLEnumValue, but got: {}".format(
type, value_name, value
)
value = copy.copy(value)
value.name = value_name
if value.value == Undefined:
value.value = value_name
values.append(value)
return values
class GraphQLEnumValue(object):
__slots__ = "name", "value", "deprecation_reason", "description"
def __init__(
self, value=Undefined, deprecation_reason=None, description=None, name=None
):
self.name = name
self.value = value
self.deprecation_reason = deprecation_reason
self.description = description
@property
def is_deprecated(self):
return bool(self.deprecation_reason)
def __eq__(self, other):
return self is other or (
isinstance(other, GraphQLEnumValue)
and self.name == other.name
and self.value == other.value
and self.deprecation_reason == other.deprecation_reason
and self.description == other.description
)
class GraphQLInputObjectType(GraphQLNamedType):
"""Input Object Type Definition
An input object defines a structured collection of fields which may be
supplied to a field argument.
Using `NonNull` will ensure that a value must be provided by the query
Example:
NonNullFloat = GraphQLNonNull(GraphQLFloat())
class GeoPoint(GraphQLInputObjectType):
name = 'GeoPoint'
fields = {
'lat': GraphQLInputObjectField(NonNullFloat),
'lon': GraphQLInputObjectField(NonNullFloat),
'alt': GraphQLInputObjectField(GraphQLFloat(),
default_value=0)
}
"""
def __init__(
self,
name, # type: str
fields, # type: Union[Callable[[], Dict[str, GraphQLInputObjectField]], Dict[str, GraphQLInputObjectField]]
description=None, # type: Optional[str]
container_type=None, # type: Type[Dict[str, Any]]
):
# type: (...) -> None
assert name, "Type must be named."
self.name = name
self.description = description
super(GraphQLInputObjectType, self).__init__(name)
if container_type is None:
container_type = OrderedDict # type: ignore
assert callable(container_type), "container_type must be callable"
self.container_type = container_type
self._fields = fields
def create_container(self, data):
# type: (Dict[str, Any]) -> Dict[str, Any]
return self.container_type(data)
@cached_property
def fields(self):
# type: () -> Dict[str, GraphQLInputObjectField]
return self._define_field_map()
def _define_field_map(self):
# type: () -> OrderedDict
if callable(self._fields):
fields = self._fields()
else:
fields = self._fields
assert isinstance(fields, Mapping) and len(fields) > 0, (
"{} fields must be a mapping (dict / OrderedDict) with field names as keys or a "
"function which returns such a mapping."
).format(self)
if not isinstance(fields, (collections.OrderedDict, OrderedDict)):
fields = OrderedDict(sorted(list(fields.items())))
for field_name, field in fields.items():
assert_valid_name(field_name)
return fields
class GraphQLInputObjectField(object):
__slots__ = "type", "default_value", "description", "out_name"
def __init__(
self,
type_, # type: Union[GraphQLInputObjectType, GraphQLScalarType]
default_value=None, # type: Optional[Any]
description=None, # type: Optional[Any]
out_name=None, # type: str
):
# type: (...) -> None
self.type = type_ # type: Union[GraphQLInputObjectType, GraphQLScalarType]
self.default_value = default_value
self.description = description
self.out_name = out_name
def __eq__(self, other):
return self is other or (
isinstance(other, GraphQLInputObjectField)
and self.type == other.type
and self.description == other.description
and self.out_name == other.out_name
)
class GraphQLList(GraphQLType):
"""List Modifier
A list is a kind of type marker, a wrapping type which points to another
type. Lists are often created within the context of defining the fields
of an object type.
Example:
class PersonType(GraphQLObjectType):
name = 'Person'
def get_fields(self):
return {
'parents': GraphQLField(GraphQLList(PersonType())),
'children': GraphQLField(GraphQLList(PersonType())),
}
"""
__slots__ = ("of_type",)
def __init__(self, type_):
# type: (Any) -> None
assert is_type(
type_
), "Can only create List of a GraphQLType but got: {}.".format(type_)
self.of_type = type_
def __str__(self):
# type: () -> str
return "[" + str(self.of_type) + "]"
def is_same_type(self, other):
return isinstance(other, GraphQLList) and self.of_type.is_same_type(
other.of_type
)
# These types can all accept null as a value.
graphql_nullable_types = (
GraphQLScalarType,
GraphQLObjectType,
GraphQLInterfaceType,
GraphQLUnionType,
GraphQLEnumType,
GraphQLInputObjectType,
GraphQLList,
)
GraphQLNullableType = Union[
GraphQLScalarType,
GraphQLObjectType,
GraphQLInterfaceType,
GraphQLUnionType,
GraphQLEnumType,
GraphQLInputObjectType,
GraphQLList,
]
def is_nullable_type(type_):
# type: (Any) -> bool
return isinstance(type_, graphql_nullable_types)
class GraphQLNonNull(GraphQLType):
"""Non-Null Modifier
A non-null is a kind of type marker, a wrapping type which points to another type. Non-null types enforce
that their values are never null and can ensure an error is raised if this ever occurs during a request.
It is useful for fields which you can make a strong guarantee on non-nullability,
for example usually the id field of a database row will never be null.
Example:
class RowType(GraphQLObjectType):
name = 'Row'
fields = {
'id': GraphQLField(type=GraphQLNonNull(GraphQLString()))
}
Note: the enforcement of non-nullability occurs within the executor.
"""
__slots__ = ("of_type",)
def __init__(
self,
type_, # type: GraphQLNullableType
):
# type: (...) -> None
assert is_type(type_) and not isinstance(
type_, GraphQLNonNull
), "Can only create NonNull of a Nullable GraphQLType but got: {}.".format(type_)
self.of_type = type_ # type: GraphQLNullableType
def __str__(self):
# type: () -> str
return str(self.of_type) + "!"
def is_same_type(self, other):
return isinstance(other, GraphQLNonNull) and self.of_type.is_same_type(
other.of_type
)
# These types may be used as output types as the result of fields.
graphql_output_types = (
GraphQLScalarType,
GraphQLObjectType,
GraphQLInterfaceType,
GraphQLUnionType,
GraphQLEnumType,
)
GraphQLOutputType = Union[
GraphQLScalarType,
GraphQLObjectType,
GraphQLInterfaceType,
GraphQLUnionType,
GraphQLEnumType,
GraphQLNullableType
]
def is_output_type(type_):
# type: (Any) -> bool
named_type = get_named_type(type_)
return isinstance(named_type, graphql_output_types)
def is_union_type(type_):
# type: (Any) -> bool
return isinstance(type_, GraphQLUnionType)
def is_interface_type(type_):
# type: (Any) -> bool
return isinstance(type_, GraphQLInterfaceType)
def is_list_type(type_):
# type: (Any) -> bool
return isinstance(type_, GraphQLList)
def is_non_null_type(type_):
# type: (Any) -> bool
return isinstance(type_, GraphQLNonNull)
def is_object_type(type_):
# type: (Any) -> bool
return isinstance(type_, GraphQLObjectType)
# These types may describe types which may be leaf values.
graphql_leaf_types = (GraphQLScalarType, GraphQLEnumType)
GraphQLLeafType = Union[GraphQLScalarType, GraphQLEnumType]
def is_leaf_type(type_):
# type: (Any) -> bool
return isinstance(type_, (GraphQLScalarType, GraphQLEnumType))
# These types may describe the parent context of a selection set.
graphql_composite_types = (GraphQLObjectType, GraphQLInterfaceType, GraphQLUnionType)
GraphQLCompositeType = Union[GraphQLObjectType, GraphQLInterfaceType, GraphQLUnionType]
def is_composite_type(type_):
# type: (Any) -> bool
named_type = get_named_type(type_)
return isinstance(named_type, graphql_composite_types)
# These types may describe abstract types.
graphql_abstract_types = (GraphQLInterfaceType, GraphQLUnionType)
GraphQLAbstractType = Union[GraphQLInterfaceType, GraphQLUnionType]
def is_abstract_type(type_):
# type: (Any) -> bool
return isinstance(type_, graphql_abstract_types)
| 30.055556
| 121
| 0.627259
|
c31e88c48d9bfa040af500b977c51fa4f4a5fef4
| 2,338
|
py
|
Python
|
tests/test_timecontrol.py
|
ClNo/safe-chicken
|
8ee6d6746f9a660ae7f1ffb6a5483b459307e19e
|
[
"MIT"
] | 2
|
2021-01-21T17:45:31.000Z
|
2021-01-31T01:51:51.000Z
|
tests/test_timecontrol.py
|
ClNo/safe-chicken
|
8ee6d6746f9a660ae7f1ffb6a5483b459307e19e
|
[
"MIT"
] | null | null | null |
tests/test_timecontrol.py
|
ClNo/safe-chicken
|
8ee6d6746f9a660ae7f1ffb6a5483b459307e19e
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from dateutil.tz import tzlocal
from safechicken import timecontrol
time_control_1 = {
"latitude": 47.03,
"longitude": 7.31,
"minutes_after_sunrise": 15,
"minutes_after_sunset": 15
}
time_control_2 = {
"latitude": 47.03,
"longitude": 7.31,
"minutes_after_sunrise": 0,
"minutes_after_sunset": 0
}
time_control_3 = {
"latitude": 47.03,
"longitude": 7.31,
"minutes_after_sunrise": -10,
"minutes_after_sunset": -20
}
def test_timecontrol_1():
door_times, door_times_converted =\
timecontrol.recalc_door_times(time_control_1, datetime.fromisoformat('2020-01-01T09:00:00'), None, None)
# door_times_converted =
# {'sunrise_time': '08:17', 'sunset_time': '16:52', 'sunrise_open_time': '08:32', 'sunset_close_time': '17:07'}
assert door_times_converted['sunrise_time'] == '08:17'
assert door_times_converted['sunset_time'] == '16:52'
assert door_times_converted['sunrise_open_time'] == '08:32'
assert door_times_converted['sunset_close_time'] == '17:07'
assert door_times['sunrise_time'] == datetime.fromisoformat('2020-01-01T08:17').astimezone(tz=tzlocal())
assert door_times['sunset_time'] == datetime.fromisoformat('2020-01-01T16:52').astimezone(tz=tzlocal())
assert door_times['sunrise_open_time'] == datetime.fromisoformat('2020-01-01T08:32').astimezone(tz=tzlocal())
assert door_times['sunset_close_time'] == datetime.fromisoformat('2020-01-01T17:07').astimezone(tz=tzlocal())
def test_timecontrol_2():
door_times, door_times_converted =\
timecontrol.recalc_door_times(time_control_2, datetime.fromisoformat('2020-01-01T09:00:00'), None, None)
assert door_times_converted['sunrise_time'] == '08:17'
assert door_times_converted['sunset_time'] == '16:52'
assert door_times_converted['sunrise_open_time'] == '08:17'
assert door_times_converted['sunset_close_time'] == '16:52'
door_times, door_times_converted =\
timecontrol.recalc_door_times(time_control_3, datetime.fromisoformat('2020-01-01T09:00:00'), None, None)
assert door_times_converted['sunrise_time'] == '08:17'
assert door_times_converted['sunset_time'] == '16:52'
assert door_times_converted['sunrise_open_time'] == '08:07'
assert door_times_converted['sunset_close_time'] == '16:32'
| 39.627119
| 115
| 0.718135
|
18a20e2a6ba0f22e04e894014a6efb4eb51d9e5e
| 8,991
|
py
|
Python
|
vedo/ugrid.py
|
charliekind/vtkplotter
|
e16daac258dc0b383043575f2916ac4ea84a60b1
|
[
"MIT"
] | null | null | null |
vedo/ugrid.py
|
charliekind/vtkplotter
|
e16daac258dc0b383043575f2916ac4ea84a60b1
|
[
"MIT"
] | null | null | null |
vedo/ugrid.py
|
charliekind/vtkplotter
|
e16daac258dc0b383043575f2916ac4ea84a60b1
|
[
"MIT"
] | null | null | null |
import numpy as np
import vedo
import vedo.colors as colors
import vedo.utils as utils
import vtk
from vedo import settings
from vedo.base import BaseGrid
__all__ = ["UGrid"]
#########################################################################
class UGrid(vtk.vtkActor, BaseGrid):
"""Support for UnstructuredGrid objects."""
def __init__(self, inputobj=None):
vtk.vtkActor.__init__(self)
BaseGrid.__init__(self)
inputtype = str(type(inputobj))
self._data = None
self._polydata = None
self.name = "UGrid"
###################
if inputobj is None:
self._data = vtk.vtkUnstructuredGrid()
elif utils.isSequence(inputobj):
pts, cells, celltypes = inputobj
self._data = vtk.vtkUnstructuredGrid()
if not utils.isSequence(cells[0]):
tets=[]
nf=cells[0]+1
for i, cl in enumerate(cells):
if i==nf or i==0:
k = i+1
nf = cl+k
cell = [cells[j+k] for j in range(cl)]
tets.append(cell)
cells = tets
# This would fill the points and use those to define orientation
vpts = utils.numpy2vtk(pts, dtype=float)
points = vtk.vtkPoints()
points.SetData(vpts)
self._data.SetPoints(points)
# This fill the points and use cells to define orientation
# points = vtk.vtkPoints()
# for c in cells:
# for pid in c:
# points.InsertNextPoint(pts[pid])
# self._data.SetPoints(points)
# Fill cells
# https://vtk.org/doc/nightly/html/vtkCellType_8h_source.html
for i, ct in enumerate(celltypes):
cell_conn = cells[i]
if ct == vtk.VTK_HEXAHEDRON:
cell = vtk.vtkHexahedron()
elif ct == vtk.VTK_TETRA:
cell = vtk.vtkTetra()
elif ct == vtk.VTK_VOXEL:
cell = vtk.vtkVoxel()
elif ct == vtk.VTK_WEDGE:
cell = vtk.vtkWedge()
elif ct == vtk.VTK_PYRAMID:
cell = vtk.vtkPyramid()
elif ct == vtk.VTK_HEXAGONAL_PRISM:
cell = vtk.vtkHexagonalPrism()
elif ct == vtk.VTK_PENTAGONAL_PRISM:
cell = vtk.vtkPentagonalPrism()
else:
print("UGrid: cell type", ct, "not implemented. Skip.")
continue
cpids = cell.GetPointIds()
for j, pid in enumerate(cell_conn):
cpids.SetId(j, pid)
self._data.InsertNextCell(ct, cpids)
elif "UnstructuredGrid" in inputtype:
self._data = inputobj
elif isinstance(inputobj, str):
from vedo.io import download, loadUnStructuredGrid
if "https://" in inputobj:
inputobj = download(inputobj, verbose=False)
self._data = loadUnStructuredGrid(inputobj)
self.filename = inputobj
else:
vedo.logger.error(f"cannot understand input type {inputtype}")
return
# self._mapper = vtk.vtkDataSetMapper()
self._mapper = vtk.vtkPolyDataMapper()
self._mapper.SetInterpolateScalarsBeforeMapping(settings.interpolateScalarsBeforeMapping)
if settings.usePolygonOffset:
self._mapper.SetResolveCoincidentTopologyToPolygonOffset()
pof, pou = settings.polygonOffsetFactor, settings.polygonOffsetUnits
self._mapper.SetResolveCoincidentTopologyPolygonOffsetParameters(pof, pou)
self.GetProperty().SetInterpolationToFlat()
if not self._data:
return
# now fill the representation of the vtk unstr grid
sf = vtk.vtkShrinkFilter()
sf.SetInputData(self._data)
sf.SetShrinkFactor(1.0)
sf.Update()
gf = vtk.vtkGeometryFilter()
gf.SetInputData(sf.GetOutput())
gf.Update()
self._polydata = gf.GetOutput()
self._mapper.SetInputData(self._polydata)
sc = None
if self.useCells:
sc = self._polydata.GetCellData().GetScalars()
else:
sc = self._polydata.GetPointData().GetScalars()
if sc:
self._mapper.SetScalarRange(sc.GetRange())
self.SetMapper(self._mapper)
self.property = self.GetProperty()
# ------------------------------------------------------------------
def clone(self):
"""Clone the UGrid object to yield an exact copy."""
ugCopy = vtk.vtkUnstructuredGrid()
ugCopy.DeepCopy(self._data)
cloned = UGrid(ugCopy)
pr = self.GetProperty()
if isinstance(pr, vtk.vtkVolumeProperty):
prv = vtk.vtkVolumeProperty()
else:
prv = vtk.vtkProperty()
prv.DeepCopy(pr)
cloned.SetProperty(prv)
cloned.property = prv
#assign the same transformation to the copy
cloned.SetOrigin(self.GetOrigin())
cloned.SetScale(self.GetScale())
cloned.SetOrientation(self.GetOrientation())
cloned.SetPosition(self.GetPosition())
cloned.name = self.name
return cloned
def color(self, c=False, alpha=None):
"""
Set/get UGrid color.
If None is passed as input, will use colors from active scalars.
Same as `ugrid.c()`.
"""
if c is False:
return np.array(self.GetProperty().GetColor())
elif c is None:
self._mapper.ScalarVisibilityOn()
return self
self._mapper.ScalarVisibilityOff()
cc = colors.getColor(c)
self.property.SetColor(cc)
if self.trail:
self.trail.GetProperty().SetColor(cc)
if alpha is not None:
self.alpha(alpha)
return self
def alpha(self, opacity=None):
"""Set/get mesh's transparency. Same as `mesh.opacity()`."""
if opacity is None:
return self.property.GetOpacity()
self.property.SetOpacity(opacity)
bfp = self.GetBackfaceProperty()
if bfp:
if opacity < 1:
self._bfprop = bfp
self.SetBackfaceProperty(None)
else:
self.SetBackfaceProperty(self._bfprop)
return self
def opacity(self, alpha=None):
"""Set/get mesh's transparency. Same as `mesh.alpha()`."""
return self.alpha(alpha)
def wireframe(self, value=True):
"""Set mesh's representation as wireframe or solid surface.
Same as `mesh.wireframe()`."""
if value:
self.property.SetRepresentationToWireframe()
else:
self.property.SetRepresentationToSurface()
return self
def lineWidth(self, lw=None):
"""Set/get width of mesh edges. Same as `lw()`."""
if lw is not None:
if lw == 0:
self.property.EdgeVisibilityOff()
self.property.SetRepresentationToSurface()
return self
self.property.EdgeVisibilityOn()
self.property.SetLineWidth(lw)
else:
return self.property.GetLineWidth()
return self
def lw(self, lineWidth=None):
"""Set/get width of mesh edges. Same as `lineWidth()`."""
return self.lineWidth(lineWidth)
def lineColor(self, lc=None):
"""Set/get color of mesh edges. Same as `lc()`."""
if lc is not None:
if "ireframe" in self.property.GetRepresentationAsString():
self.property.EdgeVisibilityOff()
self.color(lc)
return self
self.property.EdgeVisibilityOn()
self.property.SetEdgeColor(colors.getColor(lc))
else:
return self.property.GetEdgeColor()
return self
def lc(self, lineColor=None):
"""Set/get color of mesh edges. Same as `lineColor()`."""
return self.lineColor(lineColor)
def extractCellType(self, ctype):
"""Extract a specific cell type and return a new UGrid."""
uarr = self._data.GetCellTypesArray()
ctarrtyp = np.where(utils.vtk2numpy(uarr)==ctype)[0]
uarrtyp = utils.numpy2vtk(ctarrtyp, deep=False, dtype='id')
selectionNode = vtk.vtkSelectionNode()
selectionNode.SetFieldType(vtk.vtkSelectionNode.CELL)
selectionNode.SetContentType(vtk.vtkSelectionNode.INDICES)
selectionNode.SetSelectionList(uarrtyp)
selection = vtk.vtkSelection()
selection.AddNode(selectionNode)
es = vtk.vtkExtractSelection()
es.SetInputData(0, self._data)
es.SetInputData(1, selection)
es.Update()
return UGrid(es.GetOutput())
| 34.580769
| 97
| 0.561339
|
719fe4ed368f91c124ef36ce2fd1673c30e89ba8
| 45,752
|
py
|
Python
|
tests/test_manager/test_manager_graph.py
|
cthoyt/pybel
|
ed66f013a77f9cbc513892b0dad1025b8f68bb46
|
[
"Apache-2.0"
] | null | null | null |
tests/test_manager/test_manager_graph.py
|
cthoyt/pybel
|
ed66f013a77f9cbc513892b0dad1025b8f68bb46
|
[
"Apache-2.0"
] | 11
|
2017-12-28T08:03:14.000Z
|
2019-01-15T02:13:58.000Z
|
tests/test_manager/test_manager_graph.py
|
cthoyt/pybel
|
ed66f013a77f9cbc513892b0dad1025b8f68bb46
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Tests for manager functions handling BEL networks."""
import time
import unittest
from collections import Counter
from random import randint
from pybel import BELGraph, from_bel_script, from_database, to_database
from pybel.constants import (
ABUNDANCE,
BIOPROCESS,
CITATION_TYPE_PUBMED,
DECREASES,
HAS_PRODUCT,
HAS_REACTANT,
IDENTIFIER,
INCREASES,
METADATA_NAME,
METADATA_VERSION,
MIRNA,
NAMESPACE,
PART_OF,
PATHOLOGY,
PROTEIN,
RELATION,
)
from pybel.dsl import (
BaseEntity,
ComplexAbundance,
CompositeAbundance,
EnumeratedFusionRange,
Fragment,
Gene,
GeneFusion,
GeneModification,
Hgvs,
NamedComplexAbundance,
Pathology,
Protein,
ProteinModification,
Reaction,
activity,
degradation,
location,
secretion,
translocation,
)
from pybel.dsl.namespaces import chebi, hgnc, mirbase
from pybel.examples import ras_tloc_graph, sialic_acid_graph
from pybel.language import Entity
from pybel.manager import models
from pybel.manager.models import Author, Citation, Edge, Evidence, NamespaceEntry, Node
from pybel.testing.cases import (
FleetingTemporaryCacheMixin,
TemporaryCacheClsMixin,
TemporaryCacheMixin,
)
from pybel.testing.constants import test_bel_simple
from pybel.testing.mocks import mock_bel_resources
from pybel.testing.utils import make_dummy_annotations, make_dummy_namespaces, n
from tests.constants import (
BelReconstitutionMixin,
akt1,
casp8,
egfr,
expected_test_simple_metadata,
fadd,
test_citation_dict,
test_evidence_text,
)
fos = hgnc(name="FOS")
jun = hgnc(name="JUN")
mirna_1 = mirbase(name=n())
mirna_2 = mirbase(name=n())
pathology_1 = Pathology("DO", n())
ap1_complex = ComplexAbundance([fos, jun])
egfr_dimer = ComplexAbundance([egfr, egfr])
yfg_data = hgnc(name="YFG")
e2f4_data = hgnc(name="E2F4")
bound_ap1_e2f4 = ComplexAbundance([ap1_complex, e2f4_data])
superoxide = chebi(name="superoxide")
hydrogen_peroxide = chebi(name="hydrogen peroxide")
oxygen = chebi(name="oxygen")
superoxide_decomposition = Reaction(reactants=[superoxide], products=[hydrogen_peroxide, oxygen])
def assert_unqualified_edge(test_case, u: BaseEntity, v: BaseEntity, rel: str) -> None:
"""Assert there's only one edge and get the data for it"""
test_case.assertIn(u, test_case.graph)
test_case.assertIn(v, test_case.graph[u])
edges = list(test_case.graph[u][v].values())
test_case.assertEqual(1, len(edges))
data = edges[0]
test_case.assertEqual(rel, data[RELATION])
class TestNetworkCache(BelReconstitutionMixin, FleetingTemporaryCacheMixin):
def test_get_network_missing(self):
network = self.manager.get_most_recent_network_by_name("This network is not here")
self.assertIsNone(network)
def test_get_graph_missing(self):
network = self.manager.get_graph_by_most_recent("This network is not here")
self.assertIsNone(network)
@mock_bel_resources
def test_reload(self, mock_get):
"""Test that a graph with the same name and version can't be added twice."""
graph = sialic_acid_graph.copy()
self.assertEqual("1.0.0", graph.version)
to_database(graph, manager=self.manager)
time.sleep(1)
self.assertEqual(1, self.manager.count_networks())
networks = self.manager.list_networks()
self.assertEqual(1, len(networks))
network = networks[0]
self.assertEqual(graph.name, network.name)
self.assertEqual(graph.version, network.version)
self.assertEqual(graph.description, network.description)
reconstituted = self.manager.get_graph_by_name_version(graph.name, graph.version)
self.assertIsInstance(reconstituted, BELGraph)
self.assertEqual(graph.nodes(data=True), reconstituted.nodes(data=True))
# self.bel_thorough_reconstituted(reconstituted)
self.assertEqual(1, self.manager.count_networks())
graph_copy = graph.copy()
graph_copy.version = "1.0.1"
network_copy = self.manager.insert_graph(graph_copy)
time.sleep(1) # Sleep so the first graph always definitely goes in first
self.assertNotEqual(network.id, network_copy.id)
self.assertTrue(self.manager.has_name_version(graph_copy.name, graph_copy.version))
self.assertFalse(self.manager.has_name_version("wrong name", "0.1.2"))
self.assertFalse(self.manager.has_name_version(graph_copy.name, "0.1.2"))
self.assertFalse(self.manager.has_name_version("wrong name", graph_copy.version))
self.assertEqual(2, self.manager.count_networks())
self.assertEqual("1.0.1", self.manager.get_most_recent_network_by_name(graph.name).version)
query_ids = {-1, network.id, network_copy.id}
query_networks_result = self.manager.get_networks_by_ids(query_ids)
self.assertEqual(2, len(query_networks_result))
self.assertEqual(
{network.id, network_copy.id},
{network.id for network in query_networks_result},
)
expected_versions = {"1.0.1", "1.0.0"}
self.assertEqual(expected_versions, set(self.manager.get_network_versions(graph.name)))
exact_name_version = from_database(graph.name, graph.version, manager=self.manager)
self.assertEqual(graph.name, exact_name_version.name)
self.assertEqual(graph.version, exact_name_version.version)
exact_name_version = from_database(graph.name, "1.0.1", manager=self.manager)
self.assertEqual(graph.name, exact_name_version.name)
self.assertEqual("1.0.1", exact_name_version.version)
most_recent_version = from_database(graph.name, manager=self.manager)
self.assertEqual(graph.name, most_recent_version.name)
self.assertEqual("1.0.1", exact_name_version.version)
recent_networks = list(self.manager.list_recent_networks()) # just try it to see if it fails
self.assertIsNotNone(recent_networks)
self.assertEqual([(network.name, "1.0.1")], [(n.name, n.version) for n in recent_networks])
self.assertEqual("1.0.1", recent_networks[0].version)
@mock_bel_resources
def test_upload_with_tloc(self, mock_get):
"""Test that the RAS translocation example graph can be uploaded."""
make_dummy_namespaces(self.manager, ras_tloc_graph)
to_database(ras_tloc_graph, manager=self.manager)
class TestTemporaryInsertNetwork(TemporaryCacheMixin):
def test_insert_with_list_annotations(self):
"""This test checks that graphs that contain list annotations, which aren't cached, can be loaded properly
into the database."""
graph = BELGraph(name="test", version="0.0.0")
graph.annotation_list["TEST"] = {"a", "b", "c"}
graph.add_increases(
fos,
jun,
evidence=test_evidence_text,
citation=test_citation_dict,
annotations={"TEST": "a"},
)
make_dummy_namespaces(self.manager, graph)
with mock_bel_resources:
self.manager.insert_graph(graph)
# TODO check that the database doesn't have anything for TEST in it
class TestTypedQuery(TemporaryCacheMixin):
def setUp(self):
super().setUp()
graph = BELGraph(name="test", version="0.0.0")
graph.annotation_list["TEST"] = {"a", "b", "c"}
graph.add_positive_correlation(
mirna_1,
pathology_1,
evidence=n(),
citation=n(),
)
graph.add_negative_correlation(
mirna_2,
pathology_1,
evidence=n(),
citation=n(),
)
make_dummy_namespaces(self.manager, graph)
make_dummy_annotations(self.manager, graph)
with mock_bel_resources:
self.manager.insert_graph(graph)
def test_query_edge_source_type(self):
rv = self.manager.query_edges(source_function=MIRNA).all()
self.assertEqual(2, len(rv))
rv = self.manager.query_edges(target_function=PATHOLOGY).all()
self.assertEqual(2, len(rv))
class TestQuery(TemporaryCacheMixin):
def setUp(self):
super(TestQuery, self).setUp()
graph = BELGraph(name="test", version="0.0.0")
graph.annotation_list["TEST"] = {"a", "b", "c"}
graph.add_increases(
fos,
jun,
evidence=test_evidence_text,
citation=test_citation_dict,
annotations={"TEST": "a"},
)
make_dummy_namespaces(self.manager, graph)
make_dummy_annotations(self.manager, graph)
with mock_bel_resources:
self.manager.insert_graph(graph)
def test_query_node_bel_1(self):
rv = self.manager.query_nodes(bel="p(HGNC:FOS)").all()
self.assertEqual(1, len(rv))
self.assertEqual(fos, rv[0].to_json())
def test_query_node_bel_2(self):
rv = self.manager.query_nodes(bel="p(HGNC:JUN)").all()
self.assertEqual(1, len(rv))
self.assertEqual(jun, rv[0].to_json())
def test_query_node_namespace_wildcard(self):
rv = self.manager.query_nodes(namespace="HG%").all()
self.assertEqual(2, len(rv))
self.assertTrue(any(x.to_json() == fos for x in rv))
self.assertTrue(any(x.to_json() == jun for x in rv))
def test_query_node_name_wildcard(self):
rv = self.manager.query_nodes(name="%J%").all()
self.assertEqual(1, len(rv), 1)
self.assertEqual(jun, rv[0].to_json())
def test_query_node_type(self):
rv = self.manager.query_nodes(type=PROTEIN).all()
self.assertEqual(2, len(rv))
def test_query_node_type_missing(self):
rv = self.manager.query_nodes(type=ABUNDANCE).all()
self.assertEqual(0, len(rv))
def test_query_edge_by_bel(self):
rv = self.manager.query_edges(bel="p(HGNC:FOS) increases p(HGNC:JUN)").all()
self.assertEqual(1, len(rv))
def test_query_edge_by_relation_wildcard(self):
# relation like, data
increased_list = self.manager.query_edges(relation="increase%").all()
self.assertEqual(1, len(increased_list))
# self.assertIn(..., increased_list)
def test_query_edge_by_evidence_wildcard(self):
# evidence like, data
evidence_list = self.manager.search_edges_with_evidence(evidence="%3%")
self.assertEqual(len(evidence_list), 0)
evidence_list = self.manager.search_edges_with_evidence(evidence="%Twit%")
self.assertEqual(len(evidence_list), 1)
def test_query_edge_by_mixed_no_result(self):
# no result
# FIXME what should this return
empty_list = self.manager.query_edges(source="p(HGNC:FADD)", relation=DECREASES)
self.assertEqual(len(empty_list), 0)
def test_query_edge_by_mixed(self):
# source, relation, data
source_list = self.manager.query_edges(source="p(HGNC:FOS)", relation=INCREASES).all()
self.assertEqual(len(source_list), 1)
def test_query_edge_by_source_function(self):
edges = self.manager.query_edges(source_function=PROTEIN).all()
self.assertEqual(1, len(edges), msg="Wrong number of edges: {}".format(edges))
edges = self.manager.query_edges(source_function=BIOPROCESS).all()
self.assertEqual(0, len(edges), msg="Wrong number of edges: {}".format(edges))
def test_query_edge_by_target_function(self):
edges = self.manager.query_edges(target_function=PROTEIN).all()
self.assertEqual(1, len(edges), msg="Wrong number of edges: {}".format(edges))
edges = self.manager.query_edges(target_function=PATHOLOGY).all()
self.assertEqual(0, len(edges), msg="Wrong number of edges: {}".format(edges))
def test_query_citation_by_type(self):
rv = self.manager.query_citations(db=CITATION_TYPE_PUBMED)
self.assertEqual(1, len(rv))
self.assertTrue(rv[0].is_pubmed)
self.assertFalse(rv[0].is_enriched)
def test_query_citaiton_by_reference(self):
rv = self.manager.query_citations(db=CITATION_TYPE_PUBMED, db_id=test_citation_dict[IDENTIFIER])
self.assertEqual(1, len(rv))
self.assertTrue(rv[0].is_pubmed)
self.assertFalse(rv[0].is_enriched)
self.assertEqual(test_citation_dict, rv[0].to_json())
@unittest.skip
def test_query_by_author_wildcard(self):
author_list = self.manager.query_citations(author="Example%")
self.assertEqual(len(author_list), 1)
@unittest.skip
def test_query_by_name(self):
# type, name, data
name_dict_list = self.manager.query_citations(
db=CITATION_TYPE_PUBMED,
name="That other article from last week",
)
self.assertEqual(len(name_dict_list), 1)
# self.assertIn(..., name_dict_list)
@unittest.skip
def test_query_by_name_wildcard(self):
# type, name like, data
name_dict_list2 = self.manager.query_citations(db=CITATION_TYPE_PUBMED, name="%article from%")
self.assertEqual(len(name_dict_list2), 2)
# self.assertIn(..., name_dict_list2)
# self.assertIn(..., name_dict_list2)
class TestEnsure(TemporaryCacheMixin):
def test_get_or_create_citation(self):
reference = str(randint(1, 1000000))
citation_dict = {
NAMESPACE: CITATION_TYPE_PUBMED,
IDENTIFIER: reference,
}
citation = self.manager.get_or_create_citation(**citation_dict)
self.manager.session.commit()
self.assertIsInstance(citation, Citation)
self.assertEqual(citation_dict, citation.to_json())
citation_reloaded_from_reference = self.manager.get_citation_by_pmid(reference)
self.assertIsNotNone(citation_reloaded_from_reference)
self.assertEqual(citation_dict, citation_reloaded_from_reference.to_json())
citation_reloaded_from_dict = self.manager.get_or_create_citation(**citation_dict)
self.assertIsNotNone(citation_reloaded_from_dict)
self.assertEqual(citation_dict, citation_reloaded_from_dict.to_json())
citation_reloaded_from_reference = self.manager.get_citation_by_reference(
citation_dict[NAMESPACE],
citation_dict[IDENTIFIER],
)
self.assertIsNotNone(citation_reloaded_from_reference)
self.assertEqual(citation_dict, citation_reloaded_from_reference.to_json())
def test_get_or_create_evidence(self):
citation_db, citation_ref = CITATION_TYPE_PUBMED, str(randint(1, 1000000))
basic_citation = self.manager.get_or_create_citation(namespace=citation_db, identifier=citation_ref)
utf8_test_evidence = "Yes, all the information is true! This contains a unicode alpha: α"
evidence = self.manager.get_or_create_evidence(basic_citation, utf8_test_evidence)
self.assertIsInstance(evidence, Evidence)
self.assertIn(evidence, self.manager.object_cache_evidence.values())
# Objects cached?
reloaded_evidence = self.manager.get_or_create_evidence(basic_citation, utf8_test_evidence)
self.assertEqual(evidence, reloaded_evidence)
def test_get_or_create_author(self):
"""This tests getting or creating author with unicode characters"""
author_name = "Jαckson M"
# Create
author = self.manager.get_or_create_author(author_name)
self.manager.session.commit()
self.assertIsInstance(author, Author)
self.assertEqual(author.name, author_name)
author_from_name = self.manager.get_author_by_name(author_name)
self.assertIsNotNone(author_from_name)
self.assertEqual(author_name, author_from_name.name)
# Get
author_from_get = self.manager.get_or_create_author(author_name)
self.assertEqual(author.name, author_from_get.name)
self.assertEqual(author, author_from_get)
class TestEdgeStore(TemporaryCacheClsMixin, BelReconstitutionMixin):
"""Tests that the cache can be queried."""
@classmethod
def setUpClass(cls):
"""Set up the class with a BEL graph and its corresponding SQLAlchemy model."""
super().setUpClass()
with mock_bel_resources:
cls.graph = from_bel_script(test_bel_simple, manager=cls.manager, disallow_nested=False)
cls.network = cls.manager.insert_graph(cls.graph)
def test_citations(self):
citations = self.manager.session.query(Citation).all()
self.assertEqual(2, len(citations), msg="Citations: {}".format(citations))
citation_db_ids = {"123455", "123456"}
self.assertEqual(citation_db_ids, {citation.db_id for citation in citations})
def test_evidences(self):
evidences = self.manager.session.query(Evidence).all()
self.assertEqual(3, len(evidences))
evidences_texts = {"Evidence 1 w extra notes", "Evidence 2", "Evidence 3"}
self.assertEqual(evidences_texts, {evidence.text for evidence in evidences})
def test_nodes(self):
nodes = self.manager.session.query(Node).all()
self.assertEqual(4, len(nodes))
def test_edges(self):
edges = self.manager.session.query(Edge).all()
x = Counter((e.source.bel, e.target.bel) for e in edges)
d = {
(akt1.as_bel(), egfr.as_bel()): 1,
(egfr.as_bel(), fadd.as_bel()): 1,
(egfr.as_bel(), casp8.as_bel()): 1,
(fadd.as_bel(), casp8.as_bel()): 1,
(akt1.as_bel(), casp8.as_bel()): 1, # two way association
(casp8.as_bel(), akt1.as_bel()): 1, # two way association
}
self.assertEqual(dict(x), d)
network_edge_associations = (
self.manager.session.query(models.network_edge).filter_by(network_id=self.network.id).all()
)
self.assertEqual(
{network_edge_association.edge_id for network_edge_association in network_edge_associations},
{edge.id for edge in edges},
)
def test_reconstitute(self):
g2 = self.manager.get_graph_by_name_version(
expected_test_simple_metadata[METADATA_NAME],
expected_test_simple_metadata[METADATA_VERSION],
)
self.bel_simple_reconstituted(g2)
class TestAddNodeFromData(unittest.TestCase):
def setUp(self):
self.graph = BELGraph()
def test_simple(self):
self.graph.add_node_from_data(yfg_data)
self.assertIn(yfg_data, self.graph)
self.assertEqual(1, self.graph.number_of_nodes())
def test_single_variant(self):
node_data = Gene("HGNC", "AKT1", variants=Hgvs("p.Phe508del"))
node_parent_data = node_data.get_parent()
self.graph.add_node_from_data(node_data)
self.assertIn(node_data, self.graph)
self.assertIn(node_parent_data, self.graph)
self.assertEqual(2, self.graph.number_of_nodes())
self.assertEqual(1, self.graph.number_of_edges())
def test_multiple_variants(self):
node_data = Gene("HGNC", "AKT1", variants=[Hgvs("p.Phe508del"), Hgvs("p.Phe509del")])
node_parent_data = node_data.get_parent()
node_parent_tuple = node_parent_data
self.graph.add_node_from_data(node_data)
self.assertIn(node_data, self.graph)
self.assertIn(node_parent_tuple, self.graph)
self.assertEqual(2, self.graph.number_of_nodes())
self.assertEqual(1, self.graph.number_of_edges())
def test_fusion(self):
node_data = GeneFusion(
partner_5p=Gene("HGNC", "TMPRSS2"),
partner_3p=Gene("HGNC", "ERG"),
range_5p=EnumeratedFusionRange("c", 1, 79),
range_3p=EnumeratedFusionRange("c", 312, 5034),
)
node_data = node_data
self.graph.add_node_from_data(node_data)
self.assertIn(node_data, self.graph)
self.assertEqual(1, self.graph.number_of_nodes())
self.assertEqual(0, self.graph.number_of_edges())
def test_composite(self):
il23 = NamedComplexAbundance(namespace="GO", name="interleukin-23 complex")
il6 = Protein(namespace="HGNC", name="IL6")
node_data = CompositeAbundance([il23, il6])
self.graph.add_node_from_data(node_data)
self.assertIn(node_data, self.graph)
self.assertEqual(3, self.graph.number_of_nodes())
self.assertIn(il6, self.graph, msg="Nodes:\n".format("\n".join(map(str, self.graph))))
self.assertIn(il23, self.graph)
self.assertEqual(2, self.graph.number_of_edges())
self.assertIn(node_data, self.graph[il6])
edges = list(self.graph[il6][node_data].values())
self.assertEqual(1, len(edges))
data = edges[0]
self.assertEqual(PART_OF, data[RELATION])
self.assertIn(node_data, self.graph[il23])
edges = list(self.graph[il23][node_data].values())
self.assertEqual(1, len(edges))
data = edges[0]
self.assertEqual(PART_OF, data[RELATION])
def test_reaction(self):
self.graph.add_node_from_data(superoxide_decomposition)
self.assertIn(superoxide_decomposition, self.graph)
self.assertEqual(4, self.graph.number_of_nodes())
self.assertEqual(3, self.graph.number_of_edges())
assert_unqualified_edge(self, superoxide_decomposition, superoxide, HAS_REACTANT)
assert_unqualified_edge(self, superoxide_decomposition, hydrogen_peroxide, HAS_PRODUCT)
assert_unqualified_edge(self, superoxide_decomposition, oxygen, HAS_PRODUCT)
def test_complex(self):
node = ComplexAbundance(members=[fos, jun])
self.graph.add_node_from_data(node)
self.assertIn(node, self.graph)
self.assertEqual(3, self.graph.number_of_nodes())
self.assertEqual(2, self.graph.number_of_edges())
assert_unqualified_edge(self, fos, node, PART_OF)
assert_unqualified_edge(self, jun, node, PART_OF)
def test_dimer_complex(self):
"""Tests what happens if a BEL statement complex(p(X), p(X)) is added"""
self.graph.add_node_from_data(egfr_dimer)
self.assertIn(egfr, self.graph)
self.assertIn(egfr_dimer, self.graph)
self.assertEqual(2, self.graph.number_of_nodes())
self.assertEqual(1, self.graph.number_of_edges())
assert_unqualified_edge(self, egfr, egfr_dimer, PART_OF)
def test_nested_complex(self):
"""Checks what happens if a theoretical BEL statement `complex(p(X), complex(p(Y), p(Z)))` is added"""
self.graph.add_node_from_data(bound_ap1_e2f4)
self.assertIn(bound_ap1_e2f4, self.graph)
self.assertEqual(5, self.graph.number_of_nodes())
self.assertIn(fos, self.graph)
self.assertIn(jun, self.graph)
self.assertIn(e2f4_data, self.graph)
self.assertIn(ap1_complex, self.graph)
self.assertEqual(4, self.graph.number_of_edges())
assert_unqualified_edge(self, fos, ap1_complex, PART_OF)
assert_unqualified_edge(self, jun, ap1_complex, PART_OF)
assert_unqualified_edge(self, ap1_complex, bound_ap1_e2f4, PART_OF)
assert_unqualified_edge(self, e2f4_data, bound_ap1_e2f4, PART_OF)
class TestReconstituteNodeTuples(TemporaryCacheMixin):
"""Tests the ability to go from PyBEL to relational database"""
def _help_reconstitute(self, node: BaseEntity, number_nodes: int, number_edges: int):
"""Help test the round-trip conversion from PyBEL data dictionary to node model."""
self.assertIsInstance(node, BaseEntity)
graph = BELGraph(name="test", version="0.0.0")
graph.add_node_from_data(node)
make_dummy_namespaces(self.manager, graph)
self.manager.insert_graph(graph)
self.assertEqual(number_nodes, self.manager.count_nodes())
self.assertEqual(number_edges, self.manager.count_edges())
node_model = self.manager.get_or_create_node(graph, node)
self.assertEqual(node.md5, node_model.md5)
self.manager.session.commit()
self.assertEqual(node, node_model.to_json())
self.assertEqual(node, self.manager.get_dsl_by_hash(node.md5))
@mock_bel_resources
def test_simple(self, mock):
self._help_reconstitute(yfg_data, 1, 0)
@mock_bel_resources
def test_Hgvs(self, mock):
node_data = Gene(namespace="HGNC", name="AKT1", variants=Hgvs("p.Phe508del"))
self._help_reconstitute(node_data, 2, 1)
@mock_bel_resources
def test_fragment_unspecified(self, mock):
dummy_namespace = n()
dummy_name = n()
node_data = Protein(namespace=dummy_namespace, name=dummy_name, variants=[Fragment()])
self._help_reconstitute(node_data, 2, 1)
@mock_bel_resources
def test_fragment_specified(self, mock):
dummy_namespace = n()
dummy_name = n()
node_data = Protein(
namespace=dummy_namespace,
name=dummy_name,
variants=[Fragment(start=5, stop=8)],
)
self._help_reconstitute(node_data, 2, 1)
@mock_bel_resources
def test_fragment_specified_start_only(self, mock):
dummy_namespace = n()
dummy_name = n()
node_data = Protein(
namespace=dummy_namespace,
name=dummy_name,
variants=[Fragment(start=5, stop="*")],
)
self._help_reconstitute(node_data, 2, 1)
@mock_bel_resources
def test_fragment_specified_end_only(self, mock):
dummy_namespace = n()
dummy_name = n()
node_data = Protein(
namespace=dummy_namespace,
name=dummy_name,
variants=[Fragment(start="*", stop=1000)],
)
self._help_reconstitute(node_data, 2, 1)
@mock_bel_resources
def test_gmod_custom(self, mock):
"""Tests a gene modification that uses a non-default namespace"""
dummy_namespace = "HGNC"
dummy_name = "AKT1"
dummy_mod_namespace = "GO"
dummy_mod_name = "DNA Methylation"
node_data = Gene(
namespace=dummy_namespace,
name=dummy_name,
variants=[GeneModification(name=dummy_mod_name, namespace=dummy_mod_namespace)],
)
self._help_reconstitute(node_data, 2, 1)
@mock_bel_resources
def test_gmod_default(self, mock):
"""Test a gene modification that uses the BEL default namespace."""
dummy_namespace = n()
dummy_name = n()
node_data = Gene(
namespace=dummy_namespace,
name=dummy_name,
variants=[GeneModification("Me")],
)
self._help_reconstitute(node_data, 2, 1)
@mock_bel_resources
def test_pmod_default_simple(self, mock):
dummy_namespace = n()
dummy_name = n()
node_data = Protein(
namespace=dummy_namespace,
name=dummy_name,
variants=[ProteinModification("Me")],
)
self._help_reconstitute(node_data, 2, 1)
@mock_bel_resources
def test_pmod_custom_simple(self, mock):
dummy_namespace = "HGNC"
dummy_name = "AKT1"
dummy_mod_namespace = "GO"
dummy_mod_name = "Protein phosphorylation"
node_data = Protein(
namespace=dummy_namespace,
name=dummy_name,
variants=[ProteinModification(name=dummy_mod_name, namespace=dummy_mod_namespace)],
)
self._help_reconstitute(node_data, 2, 1)
@mock_bel_resources
def test_pmod_default_with_residue(self, mock):
dummy_namespace = n()
dummy_name = n()
node_data = Protein(
namespace=dummy_namespace,
name=dummy_name,
variants=[ProteinModification("Me", code="Ser")],
)
self._help_reconstitute(node_data, 2, 1)
@mock_bel_resources
def test_pmod_custom_with_residue(self, mock):
dummy_namespace = "HGNC"
dummy_name = "AKT1"
dummy_mod_namespace = "GO"
dummy_mod_name = "Protein phosphorylation"
node_data = Protein(
namespace=dummy_namespace,
name=dummy_name,
variants=[ProteinModification(name=dummy_mod_name, namespace=dummy_mod_namespace, code="Ser")],
)
self._help_reconstitute(node_data, 2, 1)
@mock_bel_resources
def test_pmod_default_full(self, mock):
dummy_namespace = n()
dummy_name = n()
node_data = Protein(
namespace=dummy_namespace,
name=dummy_name,
variants=[ProteinModification("Me", code="Ser", position=5)],
)
self._help_reconstitute(node_data, 2, 1)
@mock_bel_resources
def test_pmod_custom_full(self, mock):
dummy_namespace = "HGNC"
dummy_name = "AKT1"
dummy_mod_namespace = "GO"
dummy_mod_name = "Protein phosphorylation"
node_data = Protein(
namespace=dummy_namespace,
name=dummy_name,
variants=[
ProteinModification(
name=dummy_mod_name,
namespace=dummy_mod_namespace,
code="Ser",
position=5,
)
],
)
self._help_reconstitute(node_data, 2, 1)
@mock_bel_resources
def test_multiple_variants(self, mock):
node_data = Gene(
namespace="HGNC",
name="AKT1",
variants=[Hgvs("p.Phe508del"), Hgvs("p.Phe509del")],
)
self._help_reconstitute(node_data, 2, 1)
@mock_bel_resources
def test_fusion_specified(self, mock):
node_data = GeneFusion(
Gene("HGNC", "TMPRSS2"),
Gene("HGNC", "ERG"),
EnumeratedFusionRange("c", 1, 79),
EnumeratedFusionRange("c", 312, 5034),
)
self._help_reconstitute(node_data, 1, 0)
@mock_bel_resources
def test_fusion_unspecified(self, mock):
node_data = GeneFusion(
Gene("HGNC", "TMPRSS2"),
Gene("HGNC", "ERG"),
)
self._help_reconstitute(node_data, 1, 0)
@mock_bel_resources
def test_composite(self, mock):
interleukin_23_complex = NamedComplexAbundance("GO", "interleukin-23 complex")
il6 = hgnc(name="IL6")
interleukin_23_and_il6 = CompositeAbundance([interleukin_23_complex, il6])
self._help_reconstitute(interleukin_23_and_il6, 3, 2)
@mock_bel_resources
def test_reaction(self, mock):
self._help_reconstitute(superoxide_decomposition, 4, 3)
@mock_bel_resources
def test_complex(self, mock):
self._help_reconstitute(ap1_complex, 3, 2)
@mock_bel_resources
def test_nested_complex(self, mock):
self._help_reconstitute(bound_ap1_e2f4, 5, 4)
class TestReconstituteEdges(TemporaryCacheMixin):
"""This class tests that edges with varying properties can be added and extracted losslessly"""
def setUp(self):
"""Creates a unit test with a manager and graph"""
super().setUp()
self.graph = BELGraph(name=n(), version=n())
self.graph.annotation_pattern["Species"] = r"\d+"
@mock_bel_resources
def test_translocation_default(self, mock):
"""This test checks that a translocation gets in the database properly"""
self.graph.add_increases(
Protein(name="F2", namespace="HGNC"),
Protein(name="EDN1", namespace="HGNC"),
evidence="In endothelial cells, ET-1 secretion is detectable under basal conditions, whereas thrombin "
"induces its secretion.",
citation="10473669",
source_modifier=secretion(),
)
make_dummy_namespaces(self.manager, self.graph)
network = self.manager.insert_graph(self.graph)
self.assertEqual(2, network.nodes.count(), msg="Missing one or both of the nodes.")
self.assertEqual(1, network.edges.count(), msg="Missing the edge")
# edge = network.edges.first()
# self.assertEqual(2, edge.properties.count())
@mock_bel_resources
def test_subject_translocation_custom_to_loc(self, mock):
self.graph.add_increases(
Protein(name="F2", namespace="HGNC"),
Protein(name="EDN1", namespace="HGNC"),
evidence="In endothelial cells, ET-1 secretion is detectable under basal conditions, whereas thrombin induces its secretion.",
citation="10473669",
source_modifier=translocation(
from_loc=Entity(namespace="TEST", name="A"),
to_loc=Entity(namespace="GO", name="extracellular space"),
),
)
make_dummy_namespaces(self.manager, self.graph)
network = self.manager.insert_graph(self.graph)
self.assertEqual(2, network.nodes.count())
self.assertEqual(1, network.edges.count())
edge = network.edges.first()
# self.assertEqual(2, edge.properties.count())
@mock_bel_resources
def test_subject_activity_default(self, mock):
p1_name = n()
p2_name = n()
self.graph.add_increases(
Protein(name=p1_name, namespace="HGNC"),
Protein(name=p2_name, namespace="HGNC"),
evidence=n(),
citation=n(),
source_modifier=activity("kin"),
)
make_dummy_namespaces(self.manager, self.graph)
network = self.manager.insert_graph(self.graph)
self.assertEqual(2, network.nodes.count(), msg="number of nodes")
self.assertEqual(1, network.edges.count(), msg="number of edges")
kin_list = self.manager.session.query(NamespaceEntry).filter(NamespaceEntry.name == "kinase activity").all()
self.assertEqual(1, len(kin_list), msg="number of kinase NamespaceEntrys")
kin = list(kin_list)[0]
self.assertEqual("kinase activity", kin.name)
@mock_bel_resources
def test_subject_activity_custom(self, mock):
p1_name = n()
p2_name = n()
dummy_activity_namespace = n()
dummy_activity_name = n()
self.graph.add_increases(
Protein(name=p1_name, namespace="HGNC"),
Protein(name=p2_name, namespace="HGNC"),
evidence=n(),
citation=n(),
source_modifier=activity(name=dummy_activity_name, namespace=dummy_activity_namespace),
)
make_dummy_namespaces(self.manager, self.graph)
network = self.manager.insert_graph(self.graph)
self.assertEqual(2, network.nodes.count())
self.assertEqual(1, network.edges.count())
kin_list = self.manager.session.query(NamespaceEntry).filter(NamespaceEntry.name == dummy_activity_name).all()
self.assertEqual(1, len(kin_list))
kin = list(kin_list)[0]
self.assertEqual(dummy_activity_name, kin.name)
@mock_bel_resources
def test_object_activity_default(self, mock):
p1_name = n()
p2_name = n()
self.graph.add_increases(
Protein(name=p1_name, namespace="HGNC"),
Protein(name=p2_name, namespace="HGNC"),
evidence=n(),
citation=n(),
target_modifier=activity("kin"),
)
make_dummy_namespaces(self.manager, self.graph)
network = self.manager.insert_graph(self.graph)
self.assertEqual(2, network.nodes.count())
self.assertEqual(1, network.edges.count())
kin_list = self.manager.session.query(NamespaceEntry).filter(NamespaceEntry.name == "kinase activity").all()
self.assertEqual(1, len(kin_list))
kin = list(kin_list)[0]
self.assertEqual("kinase activity", kin.name)
@mock_bel_resources
def test_object_activity_custom(self, mock):
p1_name = n()
p2_name = n()
dummy_activity_namespace = n()
dummy_activity_name = n()
self.graph.add_increases(
Protein(name=p1_name, namespace="HGNC"),
Protein(name=p2_name, namespace="HGNC"),
evidence=n(),
citation=n(),
target_modifier=activity(name=dummy_activity_name, namespace=dummy_activity_namespace),
)
make_dummy_namespaces(self.manager, self.graph)
network = self.manager.insert_graph(self.graph)
self.assertEqual(2, network.nodes.count())
self.assertEqual(1, network.edges.count())
kin_list = self.manager.session.query(NamespaceEntry).filter(NamespaceEntry.name == dummy_activity_name).all()
self.assertEqual(1, len(kin_list))
kin = list(kin_list)[0]
self.assertEqual(dummy_activity_name, kin.name)
def test_subject_degradation(self):
self.graph.add_increases(
Protein(name="YFG", namespace="HGNC"),
Protein(name="YFG2", namespace="HGNC"),
evidence=n(),
citation=n(),
source_modifier=degradation(),
)
make_dummy_namespaces(self.manager, self.graph)
network = self.manager.insert_graph(self.graph)
self.assertEqual(2, network.nodes.count())
self.assertEqual(1, network.edges.count())
edge = network.edges.first()
# self.assertEqual(1, edge.properties.count())
def test_object_degradation(self):
self.graph.add_increases(
Protein(name="YFG", namespace="HGNC"),
Protein(name="YFG2", namespace="HGNC"),
evidence=n(),
citation=n(),
target_modifier=degradation(),
)
make_dummy_namespaces(self.manager, self.graph)
network = self.manager.insert_graph(self.graph)
self.assertEqual(2, network.nodes.count())
self.assertEqual(1, network.edges.count())
edge = network.edges.first()
# self.assertEqual(1, edge.properties.count())
def test_subject_location(self):
self.graph.add_increases(
Protein(name="YFG", namespace="HGNC"),
Protein(name="YFG2", namespace="HGNC"),
evidence=n(),
citation=n(),
source_modifier=location(Entity(namespace="GO", name="nucleus", identifier="0005634")),
)
make_dummy_namespaces(self.manager, self.graph)
network = self.manager.insert_graph(self.graph)
self.assertEqual(2, network.nodes.count())
self.assertEqual(1, network.edges.count())
edge = network.edges.first()
# self.assertEqual(1, edge.properties.count())
def test_mixed_1(self):
"""Test mixed having location and something else."""
self.graph.add_increases(
Protein(namespace="HGNC", name="CDC42"),
Protein(namespace="HGNC", name="PAK2"),
evidence="""Summary: PAK proteins, a family of serine/threonine p21-activating kinases, include PAK1, PAK2,
PAK3 and PAK4. PAK proteins are critical effectors that link Rho GTPases to cytoskeleton reorganization
and nuclear signaling. They serve as targets for the small GTP binding proteins Cdc42 and Rac and have
been implicated in a wide range of biological activities. PAK4 interacts specifically with the GTP-bound
form of Cdc42Hs and weakly activates the JNK family of MAP kinases. PAK4 is a mediator of filopodia
formation and may play a role in the reorganization of the actin cytoskeleton. Multiple alternatively
spliced transcript variants encoding distinct isoforms have been found for this gene.""",
citation={
NAMESPACE: "Online Resource",
IDENTIFIER: "PAK4 Hs ENTREZ Gene Summary",
},
annotations={"Species": "9606"},
source_modifier=activity("gtp"),
target_modifier=activity("kin"),
)
make_dummy_namespaces(self.manager, self.graph)
make_dummy_annotations(self.manager, self.graph)
network = self.manager.insert_graph(self.graph)
self.assertEqual(2, network.nodes.count())
self.assertEqual(1, network.edges.count())
edge = network.edges.first()
# self.assertEqual(2, edge.properties.count())
# FIXME
def test_mixed_2(self):
"""Tests both subject and object activity with location information as well."""
self.graph.add_directly_increases(
Protein(namespace="HGNC", name="HDAC4"),
Protein(namespace="HGNC", name="MEF2A"),
citation="10487761",
evidence=""""In the nucleus, HDAC4 associates with the myocyte enhancer factor MEF2A. Binding of HDAC4 to
MEF2A results in the repression of MEF2A transcriptional activation, a function that requires the
deacetylase domain of HDAC4.""",
annotations={"Species": "9606"},
source_modifier=activity("cat", location=Entity(namespace="GO", name="nucleus")),
target_modifier=activity("tscript", location=Entity(namespace="GO", name="nucleus")),
)
make_dummy_namespaces(self.manager, self.graph)
make_dummy_annotations(self.manager, self.graph)
network = self.manager.insert_graph(self.graph)
self.assertEqual(2, network.nodes.count())
self.assertEqual(1, network.edges.count())
edge = network.edges.first()
# self.assertEqual(4, edge.properties.count())
# self.assertEqual(2, edge.properties.filter(Property.is_subject).count())
# self.assertEqual(2, edge.properties.filter(not_(Property.is_subject)).count())
class TestNoAddNode(TemporaryCacheMixin):
"""Tests scenarios where an instance of :class:`BELGraph` may contain edges that refer to uncached resources, and
therefore should not be added to the edge store."""
@mock_bel_resources
def test_regex_lookup(self, mock): # FIXME this test needs to be put somewhere else
"""Test that regular expression nodes get love too."""
graph = BELGraph(
name="Regular Expression Test Graph",
description="Help test regular expression namespaces",
version="1.0.0",
)
dbsnp = "dbSNP"
DBSNP_PATTERN = "rs[0-9]+"
graph.namespace_pattern[dbsnp] = DBSNP_PATTERN
rs1234 = Gene(namespace=dbsnp, name="rs1234")
rs1235 = Gene(namespace=dbsnp, name="rs1235")
graph.add_node_from_data(rs1234)
graph.add_node_from_data(rs1235)
rs1234_hash = rs1234.md5
rs1235_hash = rs1235.md5
self.manager.insert_graph(graph)
rs1234_lookup = self.manager.get_node_by_hash(rs1234_hash)
self.assertIsNotNone(rs1234_lookup)
self.assertEqual("Gene", rs1234_lookup.type)
self.assertEqual("g(dbSNP:rs1234)", rs1234_lookup.bel)
self.assertEqual(rs1234_hash, rs1234_lookup.md5)
self.assertIsNotNone(rs1234_lookup.namespace_entry)
self.assertEqual("rs1234", rs1234_lookup.namespace_entry.name)
self.assertEqual("dbSNP", rs1234_lookup.namespace_entry.namespace.keyword)
self.assertEqual(DBSNP_PATTERN, rs1234_lookup.namespace_entry.namespace.pattern)
rs1235_lookup = self.manager.get_node_by_hash(rs1235_hash)
self.assertIsNotNone(rs1235_lookup)
self.assertEqual("Gene", rs1235_lookup.type)
self.assertEqual("g(dbSNP:rs1235)", rs1235_lookup.bel)
self.assertEqual(rs1235_hash, rs1235_lookup.md5)
self.assertIsNotNone(rs1235_lookup.namespace_entry)
self.assertEqual("rs1235", rs1235_lookup.namespace_entry.name)
self.assertEqual("dbSNP", rs1235_lookup.namespace_entry.namespace.keyword)
self.assertEqual(DBSNP_PATTERN, rs1235_lookup.namespace_entry.namespace.pattern)
class TestEquivalentNodes(unittest.TestCase):
def test_direct_has_namespace(self):
graph = BELGraph()
n1 = Protein(namespace="HGNC", name="CD33", identifier="1659")
n2 = Protein(namespace="NOPE", name="NOPE", identifier="NOPE")
graph.add_increases(n1, n2, citation=n(), evidence=n())
self.assertEqual({n1}, graph.get_equivalent_nodes(n1))
self.assertTrue(graph.node_has_namespace(n1, "HGNC"))
self.assertFalse(graph.node_has_namespace(n2, "HGNC"))
def test_indirect_has_namespace(self):
graph = BELGraph()
a = Protein(namespace="HGNC", name="CD33")
b = Protein(namespace="HGNCID", identifier="1659")
graph.add_equivalence(a, b)
self.assertEqual({a, b}, graph.get_equivalent_nodes(a))
self.assertEqual({a, b}, graph.get_equivalent_nodes(b))
self.assertTrue(graph.node_has_namespace(a, "HGNC"))
self.assertTrue(graph.node_has_namespace(b, "HGNC"))
def test_triangle_has_namespace(self):
graph = BELGraph()
a = Protein(namespace="A", name="CD33")
b = Protein(namespace="B", identifier="1659")
c = Protein(namespace="C", identifier="1659")
d = Protein(namespace="HGNC", identifier="1659")
graph.add_equivalence(a, b)
graph.add_equivalence(b, c)
graph.add_equivalence(c, a)
graph.add_equivalence(c, d)
self.assertEqual({a, b, c, d}, graph.get_equivalent_nodes(a))
self.assertEqual({a, b, c, d}, graph.get_equivalent_nodes(b))
self.assertEqual({a, b, c, d}, graph.get_equivalent_nodes(c))
self.assertEqual({a, b, c, d}, graph.get_equivalent_nodes(d))
self.assertTrue(graph.node_has_namespace(a, "HGNC"))
self.assertTrue(graph.node_has_namespace(b, "HGNC"))
self.assertTrue(graph.node_has_namespace(c, "HGNC"))
self.assertTrue(graph.node_has_namespace(d, "HGNC"))
if __name__ == "__main__":
unittest.main()
| 37.318108
| 138
| 0.661392
|
d95a4b1b14c72fffe795ba4cd4072bc5c9800d94
| 1,916
|
py
|
Python
|
glance/cmd/registry.py
|
cloudbau/glance
|
616b097c052f5bf59b05326ed1d2d1ae1c703dc9
|
[
"Apache-2.0"
] | null | null | null |
glance/cmd/registry.py
|
cloudbau/glance
|
616b097c052f5bf59b05326ed1d2d1ae1c703dc9
|
[
"Apache-2.0"
] | null | null | null |
glance/cmd/registry.py
|
cloudbau/glance
|
616b097c052f5bf59b05326ed1d2d1ae1c703dc9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Reference implementation server for Glance Registry
"""
import eventlet
import os
import sys
# Monkey patch socket and time
eventlet.patcher.monkey_patch(all=False, socket=True, time=True)
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
from glance.common import config
from glance.common import wsgi
from glance.openstack.common import log
def main():
try:
config.parse_args()
log.setup('glance')
server = wsgi.Server()
server.start(config.load_paste_app('glance-registry'),
default_port=9191)
server.wait()
except RuntimeError as e:
sys.exit("ERROR: %s" % e)
if __name__ == '__main__':
main()
| 31.933333
| 78
| 0.688935
|
dd0738e4627f08183f55cbc961da2457611d454c
| 15,885
|
py
|
Python
|
sapmon/payload/sapmon.py
|
shaaga/AzureMonitorForSAPSolutions
|
5514ca0b813799a7565828a0d54007e8eb9d9d90
|
[
"MIT"
] | 2
|
2019-10-09T19:54:01.000Z
|
2019-11-12T14:18:55.000Z
|
sapmon/payload/sapmon.py
|
ne-msft/AzureMonitorForSAPSolutions
|
3913b701f58d8152b43dc17afcf168f0d39372fe
|
[
"MIT"
] | null | null | null |
sapmon/payload/sapmon.py
|
ne-msft/AzureMonitorForSAPSolutions
|
3913b701f58d8152b43dc17afcf168f0d39372fe
|
[
"MIT"
] | null | null | null |
#
# Azure Monitor for SAP Solutions - Payload
# (to be deployed on collector VM)
#
# License: GNU General Public License (GPL)
# (c) 2020 Microsoft Corp.
#
# Python modules
from abc import ABC, abstractmethod
import argparse
import json
import os
import re
import sys
import threading
import traceback
# Payload modules
from const import *
from helper.azure import *
from helper.context import Context
from helper.tools import *
from helper.tracing import *
from helper.providerfactory import *
from helper.updateprofile import *
from helper.updatefactory import *
###############################################################################
class ProviderInstanceThread(threading.Thread):
def __init__(self, providerInstance):
threading.Thread.__init__(self)
self.providerInstance = providerInstance
def run(self):
global ctx, tracer
for check in self.providerInstance.checks:
tracer.info("starting check %s" % (check.fullName))
# Skip this check if it's not enabled or not due yet
if (check.isEnabled() == False) or (check.isDue() == False):
continue
# Run all actions that are part of this check
resultJson = check.run()
# Ingest result into Log Analytics
ctx.azLa.ingest(check.customLog,
resultJson,
check.colTimeGenerated)
# Persist updated internal state to provider state file
self.providerInstance.writeState()
# Ingest result into Customer Analytics
enableCustomerAnalytics = ctx.globalParams.get("enableCustomerAnalytics", True)
if enableCustomerAnalytics and check.includeInCustomerAnalytics:
tracing.ingestCustomerAnalytics(tracer,
ctx,
check.customLog,
resultJson)
tracer.info("finished check %s" % (check.fullName))
return
###############################################################################
# Load entire config from KeyVault (global parameters and provider instances)
def loadConfig() -> bool:
global ctx, tracer
tracer.info("loading config from KeyVault")
secrets = ctx.azKv.getCurrentSecrets()
for secretName in secrets.keys():
tracer.debug("parsing KeyVault secret %s" % secretName)
secretValue = secrets[secretName]
try:
providerProperties = json.loads(secretValue)
except json.decoder.JSONDecodeError as e:
tracer.error("invalid JSON format for secret %s (%s)" % (secretName,
e))
continue
if secretName == CONFIG_SECTION_GLOBAL:
ctx.globalParams = providerProperties
tracer.debug("successfully loaded global config")
else:
instanceName = providerProperties.get("name", None)
providerType = providerProperties.get("type", None)
try:
providerInstance = ProviderFactory.makeProviderInstance(providerType,
tracer,
ctx,
providerProperties,
skipContent = False)
except Exception as e:
tracer.error("could not validate provider instance %s (%s)" % (instanceName,
e))
continue
ctx.instances.append(providerInstance)
tracer.debug("successfully loaded config for provider instance %s" % instanceName)
if ctx.globalParams == {} or len(ctx.instances) == 0:
tracer.error("did not find any provider instances in KeyVault")
return False
return True
# Save specific instance properties to customer KeyVault
def saveInstanceToConfig(instance: Dict[str, str]) -> bool:
global ctx, tracer
instanceName = instance.get("name", None)
tracer.info("saving instance %s to customer KeyVault" % instanceName)
try:
secretValue = json.dumps(instance)
except json.encoder.JSONEncodeError as e:
tracer.error("cannot JSON encode instance properties (%s)" % e)
return False
result = ctx.azKv.setSecret(instanceName, secretValue)
return result
# Store credentials in the customer KeyVault
# (To be executed as custom script upon initial deployment of collector VM)
def onboard(args: str) -> None:
global ctx, tracer
tracer.info("starting onboarding")
# Update global parameters and save them to KeyVault
ctx.globalParams = {"logAnalyticsWorkspaceId": args.logAnalyticsWorkspaceId,
"logAnalyticsSharedKey": args.logAnalyticsSharedKey,
"enableCustomerAnalytics": args.enableCustomerAnalytics}
if not ctx.azKv.setSecret(CONFIG_SECTION_GLOBAL,
json.dumps(ctx.globalParams)):
tracer.critical("could not save global config to KeyVault")
sys.exit(ERROR_SETTING_KEYVAULT_SECRET)
tracer.info("onboarding successfully completed")
return
# Used by "onboard" to set each provider instance,
# or by "provider add" to set a single provider instance
def addProvider(args: str = None,
instanceProperties: Dict[str, str] = None) -> bool:
global ctx, tracer
try:
# TODO: validate metadata
instanceProperties = {"name": args.name,
"type": args.type,
"properties": json.loads(args.properties),
"metadata": json.loads(args.metadata)}
except json.decoder.JSONDecodeError as e:
tracer.error("invalid JSON format (%s)" % e)
return False
tracer.info("trying to add new provider instance (name=%s, type=%s)" % (args.name,
args.type))
# Instantiate provider, so we can run validation check
try:
newProviderInstance = ProviderFactory.makeProviderInstance(args.type,
tracer,
ctx,
instanceProperties,
skipContent = True)
except Exception as e:
tracer.critical("could not instantiate %s (%s)" % (args.type,
e))
sys.exit(ERROR_ADDING_PROVIDER)
if not newProviderInstance.validate():
tracer.critical("validation check for provider instance %s failed" % newProviderInstance.fullName)
sys.exit(ERROR_ADDING_PROVIDER)
if not saveInstanceToConfig(instanceProperties):
tracer.error("could not save provider instance %s to KeyVault" % newProviderInstance.fullName)
sys.exit(ERROR_ADDING_PROVIDER)
tracer.info("successfully added provider instance %s to KeyVault" % newProviderInstance.fullName)
return True
# Delete a single provider instance by name
def deleteProvider(args: str) -> None:
global ctx, tracer
tracer.info("retrieving provider list from KeyVault")
# Clean up state file
fileToDelete = "%s.state" % args.name
found = False
for f in os.listdir(PATH_STATE):
if f == fileToDelete:
os.remove(os.path.join(PATH_STATE, f))
tracer.info("state file %s successfully deleted" % fileToDelete)
found = True
break
if not found:
tracer.error("state file %s not found" % fileToDelete)
# Delete corresponding secret from KeyVault
secretToDelete = args.name
secrets = ctx.azKv.getCurrentSecrets()
if secretToDelete not in secrets.keys():
tracer.error("provider instance %s not found in KeyVault (already deleted?)" % secretToDelete)
else:
if not ctx.azKv.deleteSecret(secretToDelete):
tracer.error("error deleting KeyVault secret %s (already marked for deletion?)" % secretToDelete)
else:
tracer.info("provider %s successfully deleted from KeyVault" % secretToDelete)
return
# Execute the actual monitoring payload
def monitor(args: str) -> None:
global ctx, tracer
tracer.info("starting monitor payload")
threads = []
if not loadConfig():
tracer.critical("failed to load config from KeyVault")
sys.exit(ERROR_LOADING_CONFIG)
logAnalyticsWorkspaceId = ctx.globalParams.get("logAnalyticsWorkspaceId", None)
logAnalyticsSharedKey = ctx.globalParams.get("logAnalyticsSharedKey", None)
if not logAnalyticsWorkspaceId or not logAnalyticsSharedKey:
tracer.critical("global config must contain logAnalyticsWorkspaceId and logAnalyticsSharedKey")
sys.exit(ERROR_GETTING_LOG_CREDENTIALS)
ctx.azLa = AzureLogAnalytics(tracer,
logAnalyticsWorkspaceId,
logAnalyticsSharedKey)
for i in ctx.instances:
thread = ProviderInstanceThread(i)
thread.start()
threads.append(thread)
for t in threads:
t.join()
tracer.info("monitor payload successfully completed")
return
# prepareUpdate will prepare the resources like keyvault, log analytics etc for the version passed as an argument
# prepareUpdate needs to be run when a version upgrade requires specific update to the content of the resources
def prepareUpdate(args: str) -> None:
global ctx, tracer
tracer.info("Preparing for %s" % args.toVersion)
try:
updateProfileFactoryObj = updateProfileFactory()
updateprofile = updateProfileFactoryObj.createUpdateProfile(args.toVersion)
updateprofile.update(ctx, args.fromVersion)
except Exception as e:
sys.stderr.write("Could not fulfill the update requirements for %s" % args.toVersion)
# Ensures the required directory structure exists
def ensureDirectoryStructure() -> None:
for path in [PATH_STATE, PATH_TRACE]:
try:
if not os.path.exists(path):
os.makedirs(path)
except Exception as e:
sys.stderr.write("could not create required directory %s; please check permissions (%s)" % (path,
e))
sys.exit(ERROR_FILE_PERMISSION_DENIED)
return
# Main function with argument parser
def main() -> None:
def addVerboseToParser(p: argparse.ArgumentParser) -> None:
p.add_argument("--verbose",
action = "store_true",
dest = "verbose",
help = "run in verbose mode")
return
global ctx, tracer
# Make sure we have all directories in place
ensureDirectoryStructure()
# Build the argument parser
parser = argparse.ArgumentParser(description = "SAP Monitor Payload")
subParsers = parser.add_subparsers(title = "actions",
help = "Select action to run")
subParsers.required = True
subParsers.dest = "command"
# Parsers for "provider" command
prvParser = subParsers.add_parser("provider",
description = "Configuration of monitoring providers",
help = "Configure monitoring providers and their properties")
prvSubParsers = prvParser.add_subparsers(title = "action",
help = "Select provider action to run")
prvSubParsers.required = True
prvSubParsers.dest = "command"
prvAddParser = prvSubParsers.add_parser("add",
description = "Add a provider",
help = "Add a new monitoring provider to this SAP Monitor")
prvAddParser.add_argument("--name",
required = True,
type = str,
help = "Name of the monitoring provider")
prvAddParser.add_argument("--type",
required = True,
type = str,
help = "Type of the monitoring provider")
prvAddParser.add_argument("--properties",
required = True,
type = str,
help = "Properties of the monitoring provider")
prvAddParser.add_argument("--metadata",
required = False,
type = str,
help = "Metadata of the monitoring provider",
default = "{}")
addVerboseToParser(prvAddParser)
prvAddParser.set_defaults(func = addProvider)
prvDelParser = prvSubParsers.add_parser("delete",
description = "Delete a provider",
help = "Delete an existing monitoring provider from this SAP Monitor")
prvDelParser.add_argument("--name",
required = True,
type = str,
help = "Name of the monitoring provider")
addVerboseToParser(prvDelParser)
prvDelParser.set_defaults(func = deleteProvider)
# Parsers for "monitor" command
monParser = subParsers.add_parser("monitor",
description = "Monitoring payload",
help = "Execute the monitoring payload")
addVerboseToParser(monParser)
monParser.set_defaults(func = monitor)
# Parsers for "onboard" command
onbParser = subParsers.add_parser("onboard",
description = "Onboard payload",
help = "Onboard payload by adding credentials into KeyVault")
onbParser.set_defaults(func = onboard,
command = "onboard")
onbParser.add_argument("--logAnalyticsWorkspaceId",
required = True,
type = str,
help = "Workspace ID (customer ID) of the Log Analytics Workspace")
onbParser.add_argument("--logAnalyticsSharedKey",
required = True,
type = str,
help = "Shared key (primary) of the Log Analytics Workspace")
onbParser.add_argument("--enableCustomerAnalytics",
required = False,
help = "Setting to enable sending metrics to Microsoft",
action = "store_true",
dest="enableCustomerAnalytics")
addVerboseToParser(onbParser)
onbParser.set_defaults(enableCustomerAnalytics=False)
# Parsers for "update" command
updParser = subParsers.add_parser("update",
description = "Prepares resources for the given version",
help = "Run this before starting the next version")
updParser.add_argument("--toVersion",
required = True,
type = str,
help = "Prepare resources for this target version")
updParser.add_argument("--fromVersion",
required = True,
type = str,
help = "Pass the previous version (i.e. the currently running version)")
addVerboseToParser(updParser)
updParser.set_defaults(func = prepareUpdate)
args = parser.parse_args()
tracer = tracing.initTracer(args)
ctx = Context(tracer, args.command)
args.func(args)
return
ctx = None
tracer = None
if __name__ == "__main__":
main()
| 42.473262
| 113
| 0.584199
|
135ffd49cdb03e8f2c94c9f5f1d54697f553b262
| 219
|
py
|
Python
|
terrascript/data/ptyng/null.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/data/ptyng/null.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/data/ptyng/null.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/data/ptyng/null.py
# Automatically generated by tools/makecode.py (24-Aug-2021 11:33:29 UTC)
import terrascript
class null_data_source(terrascript.Data):
pass
__all__ = [
"null_data_source",
]
| 16.846154
| 73
| 0.744292
|
5db87710158c00985c443569e29938c94fd78745
| 447
|
py
|
Python
|
tensormap-server/endpoints/DataProcess/models.py
|
SahanDisa/tensormap
|
ff2f26673ea745b6cb625692707a7d6dea677537
|
[
"Apache-2.0"
] | 34
|
2019-01-29T21:13:33.000Z
|
2020-09-09T15:23:35.000Z
|
tensormap-server/endpoints/DataProcess/models.py
|
SahanDisa/tensormap
|
ff2f26673ea745b6cb625692707a7d6dea677537
|
[
"Apache-2.0"
] | 83
|
2019-01-30T09:06:14.000Z
|
2020-07-05T18:12:25.000Z
|
tensormap-server/endpoints/DataProcess/models.py
|
SahanDisa/tensormap
|
ff2f26673ea745b6cb625692707a7d6dea677537
|
[
"Apache-2.0"
] | 87
|
2019-02-24T09:48:41.000Z
|
2020-08-08T17:00:06.000Z
|
from shared.utils import get_db_ref
db = get_db_ref()
class DataProcess(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
target = db.Column(db.String(10), nullable=False)
file_id = db.Column(db.Integer, db.ForeignKey('data_file.id'))
created_on = db.Column(db.DateTime, server_default=db.func.now())
updated_on = db.Column(db.DateTime, server_default=db.func.now(), server_onupdate=db.func.now())
| 37.25
| 100
| 0.731544
|
9038660c88246ec0d361fae07743b2dfad40d6b9
| 99,037
|
py
|
Python
|
mypy/messages.py
|
just-linux/mypy
|
eb6f09288625dcc147db51939dfaa524be67fc0a
|
[
"PSF-2.0"
] | 1
|
2021-08-06T13:31:04.000Z
|
2021-08-06T13:31:04.000Z
|
mypy/messages.py
|
just-linux/mypy
|
eb6f09288625dcc147db51939dfaa524be67fc0a
|
[
"PSF-2.0"
] | null | null | null |
mypy/messages.py
|
just-linux/mypy
|
eb6f09288625dcc147db51939dfaa524be67fc0a
|
[
"PSF-2.0"
] | null | null | null |
"""Facilities for generating error messages during type checking.
Don't add any non-trivial message construction logic to the type
checker, as it can compromise clarity and make messages less
consistent. Add such logic to this module instead. Literal messages, including those
with format args, should be defined as constants in mypy.message_registry.
Historically we tried to avoid all message string literals in the type
checker but we are moving away from this convention.
"""
from mypy.ordered_dict import OrderedDict
import re
import difflib
from textwrap import dedent
from typing import cast, List, Dict, Any, Sequence, Iterable, Tuple, Set, Optional, Union
from typing_extensions import Final
from mypy.erasetype import erase_type
from mypy.errors import Errors
from mypy.types import (
Type, CallableType, Instance, TypeVarType, TupleType, TypedDictType, LiteralType,
UnionType, NoneType, AnyType, Overloaded, FunctionLike, DeletedType, TypeType, TypeVarDef,
UninhabitedType, TypeOfAny, UnboundType, PartialType, get_proper_type, ProperType,
get_proper_types
)
from mypy.typetraverser import TypeTraverserVisitor
from mypy.nodes import (
TypeInfo, Context, MypyFile, op_methods, op_methods_to_symbols,
FuncDef, reverse_builtin_aliases,
ARG_POS, ARG_OPT, ARG_NAMED, ARG_NAMED_OPT, ARG_STAR, ARG_STAR2,
ReturnStmt, NameExpr, Var, CONTRAVARIANT, COVARIANT, SymbolNode,
CallExpr, IndexExpr, StrExpr, SymbolTable, TempNode
)
from mypy.subtypes import (
is_subtype, find_member, get_member_flags,
IS_SETTABLE, IS_CLASSVAR, IS_CLASS_OR_STATIC,
)
from mypy.sametypes import is_same_type
from mypy.util import unmangle
from mypy.errorcodes import ErrorCode
from mypy import message_registry, errorcodes as codes
TYPES_FOR_UNIMPORTED_HINTS = {
'typing.Any',
'typing.Callable',
'typing.Dict',
'typing.Iterable',
'typing.Iterator',
'typing.List',
'typing.Optional',
'typing.Set',
'typing.Tuple',
'typing.TypeVar',
'typing.Union',
'typing.cast',
} # type: Final
ARG_CONSTRUCTOR_NAMES = {
ARG_POS: "Arg",
ARG_OPT: "DefaultArg",
ARG_NAMED: "NamedArg",
ARG_NAMED_OPT: "DefaultNamedArg",
ARG_STAR: "VarArg",
ARG_STAR2: "KwArg",
} # type: Final
# Map from the full name of a missing definition to the test fixture (under
# test-data/unit/fixtures/) that provides the definition. This is used for
# generating better error messages when running mypy tests only.
SUGGESTED_TEST_FIXTURES = {
'builtins.list': 'list.pyi',
'builtins.dict': 'dict.pyi',
'builtins.set': 'set.pyi',
'builtins.tuple': 'tuple.pyi',
'builtins.bool': 'bool.pyi',
'builtins.Exception': 'exception.pyi',
'builtins.BaseException': 'exception.pyi',
'builtins.isinstance': 'isinstancelist.pyi',
'builtins.property': 'property.pyi',
'builtins.classmethod': 'classmethod.pyi',
} # type: Final
class MessageBuilder:
"""Helper class for reporting type checker error messages with parameters.
The methods of this class need to be provided with the context within a
file; the errors member manages the wider context.
IDEA: Support a 'verbose mode' that includes full information about types
in error messages and that may otherwise produce more detailed error
messages.
"""
# Report errors using this instance. It knows about the current file and
# import context.
errors = None # type: Errors
modules = None # type: Dict[str, MypyFile]
# Number of times errors have been disabled.
disable_count = 0
# Hack to deduplicate error messages from union types
disable_type_names = 0
def __init__(self, errors: Errors, modules: Dict[str, MypyFile]) -> None:
self.errors = errors
self.modules = modules
self.disable_count = 0
self.disable_type_names = 0
#
# Helpers
#
def copy(self) -> 'MessageBuilder':
new = MessageBuilder(self.errors.copy(), self.modules)
new.disable_count = self.disable_count
new.disable_type_names = self.disable_type_names
return new
def clean_copy(self) -> 'MessageBuilder':
errors = self.errors.copy()
errors.error_info_map = OrderedDict()
return MessageBuilder(errors, self.modules)
def add_errors(self, messages: 'MessageBuilder') -> None:
"""Add errors in messages to this builder."""
if self.disable_count <= 0:
for errs in messages.errors.error_info_map.values():
for info in errs:
self.errors.add_error_info(info)
def disable_errors(self) -> None:
self.disable_count += 1
def enable_errors(self) -> None:
self.disable_count -= 1
def is_errors(self) -> bool:
return self.errors.is_errors()
def most_recent_context(self) -> Context:
"""Return a dummy context matching the most recent generated error in current file."""
line, column = self.errors.most_recent_error_location()
node = TempNode(NoneType())
node.line = line
node.column = column
return node
def report(self,
msg: str,
context: Optional[Context],
severity: str,
*,
code: Optional[ErrorCode] = None,
file: Optional[str] = None,
origin: Optional[Context] = None,
offset: int = 0) -> None:
"""Report an error or note (unless disabled)."""
if origin is not None:
end_line = origin.end_line
elif context is not None:
end_line = context.end_line
else:
end_line = None
if self.disable_count <= 0:
self.errors.report(context.get_line() if context else -1,
context.get_column() if context else -1,
msg, severity=severity, file=file, offset=offset,
origin_line=origin.get_line() if origin else None,
end_line=end_line,
code=code)
def fail(self,
msg: str,
context: Optional[Context],
*,
code: Optional[ErrorCode] = None,
file: Optional[str] = None,
origin: Optional[Context] = None) -> None:
"""Report an error message (unless disabled)."""
self.report(msg, context, 'error', code=code, file=file, origin=origin)
def note(self,
msg: str,
context: Context,
file: Optional[str] = None,
origin: Optional[Context] = None,
offset: int = 0,
*,
code: Optional[ErrorCode] = None) -> None:
"""Report a note (unless disabled)."""
self.report(msg, context, 'note', file=file, origin=origin,
offset=offset, code=code)
def note_multiline(self, messages: str, context: Context, file: Optional[str] = None,
origin: Optional[Context] = None, offset: int = 0,
code: Optional[ErrorCode] = None) -> None:
"""Report as many notes as lines in the message (unless disabled)."""
for msg in messages.splitlines():
self.report(msg, context, 'note', file=file, origin=origin,
offset=offset, code=code)
#
# Specific operations
#
# The following operations are for generating specific error messages. They
# get some information as arguments, and they build an error message based
# on them.
def has_no_attr(self,
original_type: Type,
typ: Type,
member: str,
context: Context,
module_symbol_table: Optional[SymbolTable] = None) -> Type:
"""Report a missing or non-accessible member.
original_type is the top-level type on which the error occurred.
typ is the actual type that is missing the member. These can be
different, e.g., in a union, original_type will be the union and typ
will be the specific item in the union that does not have the member
attribute.
'module_symbol_table' is passed to this function if the type for which we
are trying to get a member was originally a module. The SymbolTable allows
us to look up and suggests attributes of the module since they are not
directly available on original_type
If member corresponds to an operator, use the corresponding operator
name in the messages. Return type Any.
"""
original_type = get_proper_type(original_type)
typ = get_proper_type(typ)
if (isinstance(original_type, Instance) and
original_type.type.has_readable_member(member)):
self.fail('Member "{}" is not assignable'.format(member), context)
elif member == '__contains__':
self.fail('Unsupported right operand type for in ({})'.format(
format_type(original_type)), context, code=codes.OPERATOR)
elif member in op_methods.values():
# Access to a binary operator member (e.g. _add). This case does
# not handle indexing operations.
for op, method in op_methods.items():
if method == member:
self.unsupported_left_operand(op, original_type, context)
break
elif member == '__neg__':
self.fail('Unsupported operand type for unary - ({})'.format(
format_type(original_type)), context, code=codes.OPERATOR)
elif member == '__pos__':
self.fail('Unsupported operand type for unary + ({})'.format(
format_type(original_type)), context, code=codes.OPERATOR)
elif member == '__invert__':
self.fail('Unsupported operand type for ~ ({})'.format(
format_type(original_type)), context, code=codes.OPERATOR)
elif member == '__getitem__':
# Indexed get.
# TODO: Fix this consistently in format_type
if isinstance(original_type, CallableType) and original_type.is_type_obj():
self.fail('The type {} is not generic and not indexable'.format(
format_type(original_type)), context)
else:
self.fail('Value of type {} is not indexable'.format(
format_type(original_type)), context, code=codes.INDEX)
elif member == '__setitem__':
# Indexed set.
self.fail('Unsupported target for indexed assignment ({})'.format(
format_type(original_type)), context, code=codes.INDEX)
elif member == '__call__':
if isinstance(original_type, Instance) and \
(original_type.type.fullname == 'builtins.function'):
# "'function' not callable" is a confusing error message.
# Explain that the problem is that the type of the function is not known.
self.fail('Cannot call function of unknown type', context, code=codes.OPERATOR)
else:
self.fail('{} not callable'.format(format_type(original_type)), context,
code=codes.OPERATOR)
else:
# The non-special case: a missing ordinary attribute.
extra = ''
if member == '__iter__':
extra = ' (not iterable)'
elif member == '__aiter__':
extra = ' (not async iterable)'
if not self.disable_type_names:
failed = False
if isinstance(original_type, Instance) and original_type.type.names:
alternatives = set(original_type.type.names.keys())
if module_symbol_table is not None:
alternatives |= {key for key in module_symbol_table.keys()}
# in some situations, the member is in the alternatives set
# but since we're in this function, we shouldn't suggest it
if member in alternatives:
alternatives.remove(member)
matches = [m for m in COMMON_MISTAKES.get(member, []) if m in alternatives]
matches.extend(best_matches(member, alternatives)[:3])
if member == '__aiter__' and matches == ['__iter__']:
matches = [] # Avoid misleading suggestion
if member == '__div__' and matches == ['__truediv__']:
# TODO: Handle differences in division between Python 2 and 3 more cleanly
matches = []
if matches:
self.fail(
'{} has no attribute "{}"; maybe {}?{}'.format(
format_type(original_type),
member,
pretty_seq(matches, "or"),
extra,
),
context,
code=codes.ATTR_DEFINED)
failed = True
if not failed:
self.fail(
'{} has no attribute "{}"{}'.format(
format_type(original_type), member, extra),
context,
code=codes.ATTR_DEFINED)
elif isinstance(original_type, UnionType):
# The checker passes "object" in lieu of "None" for attribute
# checks, so we manually convert it back.
typ_format, orig_type_format = format_type_distinctly(typ, original_type)
if typ_format == '"object"' and \
any(type(item) == NoneType for item in original_type.items):
typ_format = '"None"'
self.fail('Item {} of {} has no attribute "{}"{}'.format(
typ_format, orig_type_format, member, extra), context,
code=codes.UNION_ATTR)
return AnyType(TypeOfAny.from_error)
def unsupported_operand_types(self,
op: str,
left_type: Any,
right_type: Any,
context: Context,
*,
code: ErrorCode = codes.OPERATOR) -> None:
"""Report unsupported operand types for a binary operation.
Types can be Type objects or strings.
"""
left_str = ''
if isinstance(left_type, str):
left_str = left_type
else:
left_str = format_type(left_type)
right_str = ''
if isinstance(right_type, str):
right_str = right_type
else:
right_str = format_type(right_type)
if self.disable_type_names:
msg = 'Unsupported operand types for {} (likely involving Union)'.format(op)
else:
msg = 'Unsupported operand types for {} ({} and {})'.format(
op, left_str, right_str)
self.fail(msg, context, code=code)
def unsupported_left_operand(self, op: str, typ: Type,
context: Context) -> None:
if self.disable_type_names:
msg = 'Unsupported left operand type for {} (some union)'.format(op)
else:
msg = 'Unsupported left operand type for {} ({})'.format(
op, format_type(typ))
self.fail(msg, context, code=codes.OPERATOR)
def not_callable(self, typ: Type, context: Context) -> Type:
self.fail('{} not callable'.format(format_type(typ)), context)
return AnyType(TypeOfAny.from_error)
def untyped_function_call(self, callee: CallableType, context: Context) -> Type:
name = callable_name(callee) or '(unknown)'
self.fail('Call to untyped function {} in typed context'.format(name), context,
code=codes.NO_UNTYPED_CALL)
return AnyType(TypeOfAny.from_error)
def incompatible_argument(self,
n: int,
m: int,
callee: CallableType,
arg_type: Type,
arg_kind: int,
object_type: Optional[Type],
context: Context,
outer_context: Context) -> Optional[ErrorCode]:
"""Report an error about an incompatible argument type.
The argument type is arg_type, argument number is n and the
callee type is 'callee'. If the callee represents a method
that corresponds to an operator, use the corresponding
operator name in the messages.
Return the error code that used for the argument (multiple error
codes are possible).
"""
arg_type = get_proper_type(arg_type)
target = ''
callee_name = callable_name(callee)
if callee_name is not None:
name = callee_name
if callee.bound_args and callee.bound_args[0] is not None:
base = format_type(callee.bound_args[0])
else:
base = extract_type(name)
for method, op in op_methods_to_symbols.items():
for variant in method, '__r' + method[2:]:
# FIX: do not rely on textual formatting
if name.startswith('"{}" of'.format(variant)):
if op == 'in' or variant != method:
# Reversed order of base/argument.
self.unsupported_operand_types(op, arg_type, base,
context, code=codes.OPERATOR)
else:
self.unsupported_operand_types(op, base, arg_type,
context, code=codes.OPERATOR)
return codes.OPERATOR
if name.startswith('"__cmp__" of'):
self.unsupported_operand_types("comparison", arg_type, base,
context, code=codes.OPERATOR)
return codes.INDEX
if name.startswith('"__getitem__" of'):
self.invalid_index_type(arg_type, callee.arg_types[n - 1], base, context,
code=codes.INDEX)
return codes.INDEX
if name.startswith('"__setitem__" of'):
if n == 1:
self.invalid_index_type(arg_type, callee.arg_types[n - 1], base, context,
code=codes.INDEX)
return codes.INDEX
else:
msg = '{} (expression has type {}, target has type {})'
arg_type_str, callee_type_str = format_type_distinctly(arg_type,
callee.arg_types[n - 1])
self.fail(msg.format(message_registry.INCOMPATIBLE_TYPES_IN_ASSIGNMENT,
arg_type_str, callee_type_str),
context, code=codes.ASSIGNMENT)
return codes.ASSIGNMENT
target = 'to {} '.format(name)
msg = ''
code = codes.MISC
notes = [] # type: List[str]
if callee_name == '<list>':
name = callee_name[1:-1]
n -= 1
actual_type_str, expected_type_str = format_type_distinctly(arg_type,
callee.arg_types[0])
msg = '{} item {} has incompatible type {}; expected {}'.format(
name.title(), n, actual_type_str, expected_type_str)
code = codes.LIST_ITEM
elif callee_name == '<dict>':
name = callee_name[1:-1]
n -= 1
key_type, value_type = cast(TupleType, arg_type).items
expected_key_type, expected_value_type = cast(TupleType, callee.arg_types[0]).items
# don't increase verbosity unless there is need to do so
if is_subtype(key_type, expected_key_type):
key_type_str = format_type(key_type)
expected_key_type_str = format_type(expected_key_type)
else:
key_type_str, expected_key_type_str = format_type_distinctly(
key_type, expected_key_type)
if is_subtype(value_type, expected_value_type):
value_type_str = format_type(value_type)
expected_value_type_str = format_type(expected_value_type)
else:
value_type_str, expected_value_type_str = format_type_distinctly(
value_type, expected_value_type)
msg = '{} entry {} has incompatible type {}: {}; expected {}: {}'.format(
name.title(), n, key_type_str, value_type_str,
expected_key_type_str, expected_value_type_str)
code = codes.DICT_ITEM
elif callee_name == '<list-comprehension>':
actual_type_str, expected_type_str = map(strip_quotes,
format_type_distinctly(arg_type,
callee.arg_types[0]))
msg = 'List comprehension has incompatible type List[{}]; expected List[{}]'.format(
actual_type_str, expected_type_str)
elif callee_name == '<set-comprehension>':
actual_type_str, expected_type_str = map(strip_quotes,
format_type_distinctly(arg_type,
callee.arg_types[0]))
msg = 'Set comprehension has incompatible type Set[{}]; expected Set[{}]'.format(
actual_type_str, expected_type_str)
elif callee_name == '<dictionary-comprehension>':
actual_type_str, expected_type_str = format_type_distinctly(arg_type,
callee.arg_types[n - 1])
msg = ('{} expression in dictionary comprehension has incompatible type {}; '
'expected type {}').format(
'Key' if n == 1 else 'Value',
actual_type_str,
expected_type_str)
elif callee_name == '<generator>':
actual_type_str, expected_type_str = format_type_distinctly(arg_type,
callee.arg_types[0])
msg = 'Generator has incompatible item type {}; expected {}'.format(
actual_type_str, expected_type_str)
else:
try:
expected_type = callee.arg_types[m - 1]
except IndexError: # Varargs callees
expected_type = callee.arg_types[-1]
arg_type_str, expected_type_str = format_type_distinctly(
arg_type, expected_type, bare=True)
if arg_kind == ARG_STAR:
arg_type_str = '*' + arg_type_str
elif arg_kind == ARG_STAR2:
arg_type_str = '**' + arg_type_str
# For function calls with keyword arguments, display the argument name rather than the
# number.
arg_label = str(n)
if isinstance(outer_context, CallExpr) and len(outer_context.arg_names) >= n:
arg_name = outer_context.arg_names[n - 1]
if arg_name is not None:
arg_label = '"{}"'.format(arg_name)
if (arg_kind == ARG_STAR2
and isinstance(arg_type, TypedDictType)
and m <= len(callee.arg_names)
and callee.arg_names[m - 1] is not None
and callee.arg_kinds[m - 1] != ARG_STAR2):
arg_name = callee.arg_names[m - 1]
assert arg_name is not None
arg_type_str, expected_type_str = format_type_distinctly(
arg_type.items[arg_name],
expected_type,
bare=True)
arg_label = '"{}"'.format(arg_name)
if isinstance(outer_context, IndexExpr) and isinstance(outer_context.index, StrExpr):
msg = 'Value of "{}" has incompatible type {}; expected {}' .format(
outer_context.index.value, quote_type_string(arg_type_str),
quote_type_string(expected_type_str))
else:
msg = 'Argument {} {}has incompatible type {}; expected {}'.format(
arg_label, target, quote_type_string(arg_type_str),
quote_type_string(expected_type_str))
object_type = get_proper_type(object_type)
if isinstance(object_type, TypedDictType):
code = codes.TYPEDDICT_ITEM
else:
code = codes.ARG_TYPE
expected_type = get_proper_type(expected_type)
if isinstance(expected_type, UnionType):
expected_types = list(expected_type.items)
else:
expected_types = [expected_type]
for type in get_proper_types(expected_types):
if isinstance(arg_type, Instance) and isinstance(type, Instance):
notes = append_invariance_notes(notes, arg_type, type)
self.fail(msg, context, code=code)
if notes:
for note_msg in notes:
self.note(note_msg, context, code=code)
return code
def incompatible_argument_note(self,
original_caller_type: ProperType,
callee_type: ProperType,
context: Context,
code: Optional[ErrorCode]) -> None:
if isinstance(original_caller_type, (Instance, TupleType, TypedDictType)):
if isinstance(callee_type, Instance) and callee_type.type.is_protocol:
self.report_protocol_problems(original_caller_type, callee_type,
context, code=code)
if isinstance(callee_type, UnionType):
for item in callee_type.items:
item = get_proper_type(item)
if isinstance(item, Instance) and item.type.is_protocol:
self.report_protocol_problems(original_caller_type, item,
context, code=code)
if (isinstance(callee_type, CallableType) and
isinstance(original_caller_type, Instance)):
call = find_member('__call__', original_caller_type, original_caller_type,
is_operator=True)
if call:
self.note_call(original_caller_type, call, context, code=code)
def invalid_index_type(self, index_type: Type, expected_type: Type, base_str: str,
context: Context, *, code: ErrorCode) -> None:
index_str, expected_str = format_type_distinctly(index_type, expected_type)
self.fail('Invalid index type {} for {}; expected type {}'.format(
index_str, base_str, expected_str), context, code=code)
def too_few_arguments(self, callee: CallableType, context: Context,
argument_names: Optional[Sequence[Optional[str]]]) -> None:
if argument_names is not None:
num_positional_args = sum(k is None for k in argument_names)
arguments_left = callee.arg_names[num_positional_args:callee.min_args]
diff = [k for k in arguments_left if k not in argument_names]
if len(diff) == 1:
msg = 'Missing positional argument'
else:
msg = 'Missing positional arguments'
callee_name = callable_name(callee)
if callee_name is not None and diff and all(d is not None for d in diff):
args = '", "'.join(cast(List[str], diff))
msg += ' "{}" in call to {}'.format(args, callee_name)
else:
msg = 'Too few arguments' + for_function(callee)
else:
msg = 'Too few arguments' + for_function(callee)
self.fail(msg, context, code=codes.CALL_ARG)
def missing_named_argument(self, callee: CallableType, context: Context, name: str) -> None:
msg = 'Missing named argument "{}"'.format(name) + for_function(callee)
self.fail(msg, context, code=codes.CALL_ARG)
def too_many_arguments(self, callee: CallableType, context: Context) -> None:
msg = 'Too many arguments' + for_function(callee)
self.fail(msg, context, code=codes.CALL_ARG)
def too_many_arguments_from_typed_dict(self,
callee: CallableType,
arg_type: TypedDictType,
context: Context) -> None:
# Try to determine the name of the extra argument.
for key in arg_type.items:
if key not in callee.arg_names:
msg = 'Extra argument "{}" from **args'.format(key) + for_function(callee)
break
else:
self.too_many_arguments(callee, context)
return
self.fail(msg, context)
def too_many_positional_arguments(self, callee: CallableType,
context: Context) -> None:
msg = 'Too many positional arguments' + for_function(callee)
self.fail(msg, context)
def unexpected_keyword_argument(self, callee: CallableType, name: str, arg_type: Type,
context: Context) -> None:
msg = 'Unexpected keyword argument "{}"'.format(name) + for_function(callee)
# Suggest intended keyword, look for type match else fallback on any match.
matching_type_args = []
not_matching_type_args = []
for i, kwarg_type in enumerate(callee.arg_types):
callee_arg_name = callee.arg_names[i]
if callee_arg_name is not None and callee.arg_kinds[i] != ARG_STAR:
if is_subtype(arg_type, kwarg_type):
matching_type_args.append(callee_arg_name)
else:
not_matching_type_args.append(callee_arg_name)
matches = best_matches(name, matching_type_args)
if not matches:
matches = best_matches(name, not_matching_type_args)
if matches:
msg += "; did you mean {}?".format(pretty_seq(matches[:3], "or"))
self.fail(msg, context, code=codes.CALL_ARG)
module = find_defining_module(self.modules, callee)
if module:
assert callee.definition is not None
fname = callable_name(callee)
if not fname: # an alias to function with a different name
fname = 'Called function'
self.note('{} defined here'.format(fname), callee.definition,
file=module.path, origin=context, code=codes.CALL_ARG)
def duplicate_argument_value(self, callee: CallableType, index: int,
context: Context) -> None:
self.fail('{} gets multiple values for keyword argument "{}"'.
format(callable_name(callee) or 'Function', callee.arg_names[index]),
context)
def does_not_return_value(self, callee_type: Optional[Type], context: Context) -> None:
"""Report an error about use of an unusable type."""
name = None # type: Optional[str]
callee_type = get_proper_type(callee_type)
if isinstance(callee_type, FunctionLike):
name = callable_name(callee_type)
if name is not None:
self.fail('{} does not return a value'.format(capitalize(name)), context,
code=codes.FUNC_RETURNS_VALUE)
else:
self.fail('Function does not return a value', context, code=codes.FUNC_RETURNS_VALUE)
def deleted_as_rvalue(self, typ: DeletedType, context: Context) -> None:
"""Report an error about using an deleted type as an rvalue."""
if typ.source is None:
s = ""
else:
s = ' "{}"'.format(typ.source)
self.fail('Trying to read deleted variable{}'.format(s), context)
def deleted_as_lvalue(self, typ: DeletedType, context: Context) -> None:
"""Report an error about using an deleted type as an lvalue.
Currently, this only occurs when trying to assign to an
exception variable outside the local except: blocks.
"""
if typ.source is None:
s = ""
else:
s = ' "{}"'.format(typ.source)
self.fail('Assignment to variable{} outside except: block'.format(s), context)
def no_variant_matches_arguments(self,
plausible_targets: List[CallableType],
overload: Overloaded,
arg_types: List[Type],
context: Context,
*,
code: Optional[ErrorCode] = None) -> None:
code = code or codes.CALL_OVERLOAD
name = callable_name(overload)
if name:
name_str = ' of {}'.format(name)
else:
name_str = ''
arg_types_str = ', '.join(format_type(arg) for arg in arg_types)
num_args = len(arg_types)
if num_args == 0:
self.fail('All overload variants{} require at least one argument'.format(name_str),
context, code=code)
elif num_args == 1:
self.fail('No overload variant{} matches argument type {}'
.format(name_str, arg_types_str), context, code=code)
else:
self.fail('No overload variant{} matches argument types {}'
.format(name_str, arg_types_str), context, code=code)
self.pretty_overload_matches(plausible_targets, overload, context, offset=2, max_items=2,
code=code)
def wrong_number_values_to_unpack(self, provided: int, expected: int,
context: Context) -> None:
if provided < expected:
if provided == 1:
self.fail('Need more than 1 value to unpack ({} expected)'.format(expected),
context)
else:
self.fail('Need more than {} values to unpack ({} expected)'.format(
provided, expected), context)
elif provided > expected:
self.fail('Too many values to unpack ({} expected, {} provided)'.format(
expected, provided), context)
def unpacking_strings_disallowed(self, context: Context) -> None:
self.fail("Unpacking a string is disallowed", context)
def type_not_iterable(self, type: Type, context: Context) -> None:
self.fail('"{}" object is not iterable'.format(type), context)
def incompatible_operator_assignment(self, op: str,
context: Context) -> None:
self.fail('Result type of {} incompatible in assignment'.format(op),
context)
def overload_signature_incompatible_with_supertype(
self, name: str, name_in_super: str, supertype: str,
overload: Overloaded, context: Context) -> None:
target = self.override_target(name, name_in_super, supertype)
self.fail('Signature of "{}" incompatible with {}'.format(
name, target), context, code=codes.OVERRIDE)
note_template = 'Overload variants must be defined in the same order as they are in "{}"'
self.note(note_template.format(supertype), context, code=codes.OVERRIDE)
def signature_incompatible_with_supertype(
self, name: str, name_in_super: str, supertype: str,
context: Context) -> None:
target = self.override_target(name, name_in_super, supertype)
self.fail('Signature of "{}" incompatible with {}'.format(
name, target), context, code=codes.OVERRIDE)
def argument_incompatible_with_supertype(
self, arg_num: int, name: str, type_name: Optional[str],
name_in_supertype: str, arg_type_in_supertype: Type, supertype: str,
context: Context) -> None:
target = self.override_target(name, name_in_supertype, supertype)
arg_type_in_supertype_f = format_type_bare(arg_type_in_supertype)
self.fail('Argument {} of "{}" is incompatible with {}; '
'supertype defines the argument type as "{}"'
.format(arg_num, name, target, arg_type_in_supertype_f),
context,
code=codes.OVERRIDE)
self.note(
'This violates the Liskov substitution principle',
context,
code=codes.OVERRIDE)
self.note(
'See https://mypy.readthedocs.io/en/stable/common_issues.html#incompatible-overrides',
context,
code=codes.OVERRIDE)
if name == "__eq__" and type_name:
multiline_msg = self.comparison_method_example_msg(class_name=type_name)
self.note_multiline(multiline_msg, context, code=codes.OVERRIDE)
def comparison_method_example_msg(self, class_name: str) -> str:
return dedent('''\
It is recommended for "__eq__" to work with arbitrary objects, for example:
def __eq__(self, other: object) -> bool:
if not isinstance(other, {class_name}):
return NotImplemented
return <logic to compare two {class_name} instances>
'''.format(class_name=class_name))
def return_type_incompatible_with_supertype(
self, name: str, name_in_supertype: str, supertype: str,
original: Type, override: Type,
context: Context) -> None:
target = self.override_target(name, name_in_supertype, supertype)
override_str, original_str = format_type_distinctly(override, original)
self.fail('Return type {} of "{}" incompatible with return type {} in {}'
.format(override_str, name, original_str, target),
context,
code=codes.OVERRIDE)
def override_target(self, name: str, name_in_super: str,
supertype: str) -> str:
target = 'supertype "{}"'.format(supertype)
if name_in_super != name:
target = '"{}" of {}'.format(name_in_super, target)
return target
def incompatible_type_application(self, expected_arg_count: int,
actual_arg_count: int,
context: Context) -> None:
if expected_arg_count == 0:
self.fail('Type application targets a non-generic function or class',
context)
elif actual_arg_count > expected_arg_count:
self.fail('Type application has too many types ({} expected)'
.format(expected_arg_count), context)
else:
self.fail('Type application has too few types ({} expected)'
.format(expected_arg_count), context)
def could_not_infer_type_arguments(self, callee_type: CallableType, n: int,
context: Context) -> None:
callee_name = callable_name(callee_type)
if callee_name is not None and n > 0:
self.fail('Cannot infer type argument {} of {}'.format(n, callee_name), context)
else:
self.fail('Cannot infer function type argument', context)
def invalid_var_arg(self, typ: Type, context: Context) -> None:
self.fail('List or tuple expected as variable arguments', context)
def invalid_keyword_var_arg(self, typ: Type, is_mapping: bool, context: Context) -> None:
typ = get_proper_type(typ)
if isinstance(typ, Instance) and is_mapping:
self.fail('Keywords must be strings', context)
else:
suffix = ''
if isinstance(typ, Instance):
suffix = ', not {}'.format(format_type(typ))
self.fail(
'Argument after ** must be a mapping{}'.format(suffix),
context, code=codes.ARG_TYPE)
def undefined_in_superclass(self, member: str, context: Context) -> None:
self.fail('"{}" undefined in superclass'.format(member), context)
def first_argument_for_super_must_be_type(self, actual: Type, context: Context) -> None:
actual = get_proper_type(actual)
if isinstance(actual, Instance):
# Don't include type of instance, because it can look confusingly like a type
# object.
type_str = 'a non-type instance'
else:
type_str = format_type(actual)
self.fail('Argument 1 for "super" must be a type object; got {}'.format(type_str), context,
code=codes.ARG_TYPE)
def too_few_string_formatting_arguments(self, context: Context) -> None:
self.fail('Not enough arguments for format string', context,
code=codes.STRING_FORMATTING)
def too_many_string_formatting_arguments(self, context: Context) -> None:
self.fail('Not all arguments converted during string formatting', context,
code=codes.STRING_FORMATTING)
def unsupported_placeholder(self, placeholder: str, context: Context) -> None:
self.fail('Unsupported format character \'%s\'' % placeholder, context,
code=codes.STRING_FORMATTING)
def string_interpolation_with_star_and_key(self, context: Context) -> None:
self.fail('String interpolation contains both stars and mapping keys', context,
code=codes.STRING_FORMATTING)
def requires_int_or_char(self, context: Context,
format_call: bool = False) -> None:
self.fail('"{}c" requires int or char'.format(':' if format_call else '%'),
context, code=codes.STRING_FORMATTING)
def key_not_in_mapping(self, key: str, context: Context) -> None:
self.fail('Key \'%s\' not found in mapping' % key, context,
code=codes.STRING_FORMATTING)
def string_interpolation_mixing_key_and_non_keys(self, context: Context) -> None:
self.fail('String interpolation mixes specifier with and without mapping keys', context,
code=codes.STRING_FORMATTING)
def cannot_determine_type(self, name: str, context: Context) -> None:
self.fail('Cannot determine type of "%s"' % name, context, code=codes.HAS_TYPE)
def cannot_determine_type_in_base(self, name: str, base: str, context: Context) -> None:
self.fail('Cannot determine type of "%s" in base class "%s"' % (name, base), context)
def no_formal_self(self, name: str, item: CallableType, context: Context) -> None:
self.fail('Attribute function "%s" with type %s does not accept self argument'
% (name, format_type(item)), context)
def incompatible_self_argument(self, name: str, arg: Type, sig: CallableType,
is_classmethod: bool, context: Context) -> None:
kind = 'class attribute function' if is_classmethod else 'attribute function'
self.fail('Invalid self argument %s to %s "%s" with type %s'
% (format_type(arg), kind, name, format_type(sig)), context)
def incompatible_conditional_function_def(self, defn: FuncDef) -> None:
self.fail('All conditional function variants must have identical '
'signatures', defn)
def cannot_instantiate_abstract_class(self, class_name: str,
abstract_attributes: List[str],
context: Context) -> None:
attrs = format_string_list(['"%s"' % a for a in abstract_attributes])
self.fail('Cannot instantiate abstract class "%s" with abstract '
'attribute%s %s' % (class_name, plural_s(abstract_attributes),
attrs),
context, code=codes.ABSTRACT)
def base_class_definitions_incompatible(self, name: str, base1: TypeInfo,
base2: TypeInfo,
context: Context) -> None:
self.fail('Definition of "{}" in base class "{}" is incompatible '
'with definition in base class "{}"'.format(
name, base1.name, base2.name), context)
def cant_assign_to_method(self, context: Context) -> None:
self.fail(message_registry.CANNOT_ASSIGN_TO_METHOD, context,
code=codes.ASSIGNMENT)
def cant_assign_to_classvar(self, name: str, context: Context) -> None:
self.fail('Cannot assign to class variable "%s" via instance' % name, context)
def final_cant_override_writable(self, name: str, ctx: Context) -> None:
self.fail('Cannot override writable attribute "{}" with a final one'.format(name), ctx)
def cant_override_final(self, name: str, base_name: str, ctx: Context) -> None:
self.fail('Cannot override final attribute "{}"'
' (previously declared in base class "{}")'.format(name, base_name), ctx)
def cant_assign_to_final(self, name: str, attr_assign: bool, ctx: Context) -> None:
"""Warn about a prohibited assignment to a final attribute.
Pass `attr_assign=True` if the assignment assigns to an attribute.
"""
kind = "attribute" if attr_assign else "name"
self.fail('Cannot assign to final {} "{}"'.format(kind, unmangle(name)), ctx)
def protocol_members_cant_be_final(self, ctx: Context) -> None:
self.fail("Protocol member cannot be final", ctx)
def final_without_value(self, ctx: Context) -> None:
self.fail("Final name must be initialized with a value", ctx)
def read_only_property(self, name: str, type: TypeInfo,
context: Context) -> None:
self.fail('Property "{}" defined in "{}" is read-only'.format(
name, type.name), context)
def incompatible_typevar_value(self,
callee: CallableType,
typ: Type,
typevar_name: str,
context: Context) -> None:
self.fail(message_registry.INCOMPATIBLE_TYPEVAR_VALUE
.format(typevar_name, callable_name(callee) or 'function', format_type(typ)),
context,
code=codes.TYPE_VAR)
def dangerous_comparison(self, left: Type, right: Type, kind: str, ctx: Context) -> None:
left_str = 'element' if kind == 'container' else 'left operand'
right_str = 'container item' if kind == 'container' else 'right operand'
message = 'Non-overlapping {} check ({} type: {}, {} type: {})'
left_typ, right_typ = format_type_distinctly(left, right)
self.fail(message.format(kind, left_str, left_typ, right_str, right_typ), ctx,
code=codes.COMPARISON_OVERLAP)
def overload_inconsistently_applies_decorator(self, decorator: str, context: Context) -> None:
self.fail(
'Overload does not consistently use the "@{}" '.format(decorator)
+ 'decorator on all function signatures.',
context)
def overloaded_signatures_overlap(self, index1: int, index2: int, context: Context) -> None:
self.fail('Overloaded function signatures {} and {} overlap with '
'incompatible return types'.format(index1, index2), context)
def overloaded_signature_will_never_match(self, index1: int, index2: int,
context: Context) -> None:
self.fail(
'Overloaded function signature {index2} will never be matched: '
'signature {index1}\'s parameter type(s) are the same or broader'.format(
index1=index1,
index2=index2),
context)
def overloaded_signatures_typevar_specific(self, index: int, context: Context) -> None:
self.fail('Overloaded function implementation cannot satisfy signature {} '.format(index) +
'due to inconsistencies in how they use type variables', context)
def overloaded_signatures_arg_specific(self, index: int, context: Context) -> None:
self.fail('Overloaded function implementation does not accept all possible arguments '
'of signature {}'.format(index), context)
def overloaded_signatures_ret_specific(self, index: int, context: Context) -> None:
self.fail('Overloaded function implementation cannot produce return type '
'of signature {}'.format(index), context)
def warn_both_operands_are_from_unions(self, context: Context) -> None:
self.note('Both left and right operands are unions', context, code=codes.OPERATOR)
def warn_operand_was_from_union(self, side: str, original: Type, context: Context) -> None:
self.note('{} operand is of type {}'.format(side, format_type(original)), context,
code=codes.OPERATOR)
def operator_method_signatures_overlap(
self, reverse_class: TypeInfo, reverse_method: str, forward_class: Type,
forward_method: str, context: Context) -> None:
self.fail('Signatures of "{}" of "{}" and "{}" of {} '
'are unsafely overlapping'.format(
reverse_method, reverse_class.name,
forward_method, format_type(forward_class)),
context)
def forward_operator_not_callable(
self, forward_method: str, context: Context) -> None:
self.fail('Forward operator "{}" is not callable'.format(
forward_method), context)
def signatures_incompatible(self, method: str, other_method: str,
context: Context) -> None:
self.fail('Signatures of "{}" and "{}" are incompatible'.format(
method, other_method), context)
def yield_from_invalid_operand_type(self, expr: Type, context: Context) -> Type:
text = format_type(expr) if format_type(expr) != 'object' else expr
self.fail('"yield from" can\'t be applied to {}'.format(text), context)
return AnyType(TypeOfAny.from_error)
def invalid_signature(self, func_type: Type, context: Context) -> None:
self.fail('Invalid signature "{}"'.format(func_type), context)
def invalid_signature_for_special_method(
self, func_type: Type, context: Context, method_name: str) -> None:
self.fail('Invalid signature "{}" for "{}"'.format(func_type, method_name), context)
def reveal_type(self, typ: Type, context: Context) -> None:
self.note('Revealed type is "{}"'.format(typ), context)
def reveal_locals(self, type_map: Dict[str, Optional[Type]], context: Context) -> None:
# To ensure that the output is predictable on Python < 3.6,
# use an ordered dictionary sorted by variable name
sorted_locals = OrderedDict(sorted(type_map.items(), key=lambda t: t[0]))
self.note("Revealed local types are:", context)
for line in [' {}: {}'.format(k, v) for k, v in sorted_locals.items()]:
self.note(line, context)
def unsupported_type_type(self, item: Type, context: Context) -> None:
self.fail('Cannot instantiate type "Type[{}]"'.format(format_type_bare(item)), context)
def redundant_cast(self, typ: Type, context: Context) -> None:
self.fail('Redundant cast to {}'.format(format_type(typ)), context,
code=codes.REDUNDANT_CAST)
def unimported_type_becomes_any(self, prefix: str, typ: Type, ctx: Context) -> None:
self.fail("{} becomes {} due to an unfollowed import".format(prefix, format_type(typ)),
ctx, code=codes.NO_ANY_UNIMPORTED)
def need_annotation_for_var(self, node: SymbolNode, context: Context,
python_version: Optional[Tuple[int, int]] = None) -> None:
hint = ''
has_variable_annotations = not python_version or python_version >= (3, 6)
# Only gives hint if it's a variable declaration and the partial type is a builtin type
if (python_version and isinstance(node, Var) and isinstance(node.type, PartialType) and
node.type.type and node.type.type.fullname in reverse_builtin_aliases):
alias = reverse_builtin_aliases[node.type.type.fullname]
alias = alias.split('.')[-1]
type_dec = '<type>'
if alias == 'Dict':
type_dec = '{}, {}'.format(type_dec, type_dec)
if has_variable_annotations:
hint = ' (hint: "{}: {}[{}] = ...")'.format(node.name, alias, type_dec)
else:
hint = ' (hint: "{} = ... # type: {}[{}]")'.format(node.name, alias, type_dec)
if has_variable_annotations:
needed = 'annotation'
else:
needed = 'comment'
self.fail('Need type {} for "{}"{}'.format(needed, unmangle(node.name), hint), context,
code=codes.VAR_ANNOTATED)
def explicit_any(self, ctx: Context) -> None:
self.fail('Explicit "Any" is not allowed', ctx)
def unexpected_typeddict_keys(
self,
typ: TypedDictType,
expected_keys: List[str],
actual_keys: List[str],
context: Context) -> None:
actual_set = set(actual_keys)
expected_set = set(expected_keys)
if not typ.is_anonymous():
# Generate simpler messages for some common special cases.
if actual_set < expected_set:
# Use list comprehension instead of set operations to preserve order.
missing = [key for key in expected_keys if key not in actual_set]
self.fail('Missing {} for TypedDict {}'.format(
format_key_list(missing, short=True), format_type(typ)),
context, code=codes.TYPEDDICT_ITEM)
return
else:
extra = [key for key in actual_keys if key not in expected_set]
if extra:
# If there are both extra and missing keys, only report extra ones for
# simplicity.
self.fail('Extra {} for TypedDict {}'.format(
format_key_list(extra, short=True), format_type(typ)),
context, code=codes.TYPEDDICT_ITEM)
return
found = format_key_list(actual_keys, short=True)
if not expected_keys:
self.fail('Unexpected TypedDict {}'.format(found), context)
return
expected = format_key_list(expected_keys)
if actual_keys and actual_set < expected_set:
found = 'only {}'.format(found)
self.fail('Expected {} but found {}'.format(expected, found), context,
code=codes.TYPEDDICT_ITEM)
def typeddict_key_must_be_string_literal(
self,
typ: TypedDictType,
context: Context) -> None:
self.fail(
'TypedDict key must be a string literal; expected one of {}'.format(
format_item_name_list(typ.items.keys())), context)
def typeddict_key_not_found(
self,
typ: TypedDictType,
item_name: str,
context: Context) -> None:
if typ.is_anonymous():
self.fail('"{}" is not a valid TypedDict key; expected one of {}'.format(
item_name, format_item_name_list(typ.items.keys())), context)
else:
self.fail('TypedDict {} has no key "{}"'.format(
format_type(typ), item_name), context, code=codes.TYPEDDICT_ITEM)
matches = best_matches(item_name, typ.items.keys())
if matches:
self.note("Did you mean {}?".format(
pretty_seq(matches[:3], "or")), context)
def typeddict_context_ambiguous(
self,
types: List[TypedDictType],
context: Context) -> None:
formatted_types = ', '.join(list(format_type_distinctly(*types)))
self.fail('Type of TypedDict is ambiguous, could be any of ({})'.format(
formatted_types), context)
def typeddict_key_cannot_be_deleted(
self,
typ: TypedDictType,
item_name: str,
context: Context) -> None:
if typ.is_anonymous():
self.fail('TypedDict key "{}" cannot be deleted'.format(item_name),
context)
else:
self.fail('Key "{}" of TypedDict {} cannot be deleted'.format(
item_name, format_type(typ)), context)
def typeddict_setdefault_arguments_inconsistent(
self,
default: Type,
expected: Type,
context: Context) -> None:
msg = 'Argument 2 to "setdefault" of "TypedDict" has incompatible type {}; expected {}'
self.fail(msg.format(format_type(default), format_type(expected)), context,
code=codes.TYPEDDICT_ITEM)
def type_arguments_not_allowed(self, context: Context) -> None:
self.fail('Parameterized generics cannot be used with class or instance checks', context)
def disallowed_any_type(self, typ: Type, context: Context) -> None:
typ = get_proper_type(typ)
if isinstance(typ, AnyType):
message = 'Expression has type "Any"'
else:
message = 'Expression type contains "Any" (has type {})'.format(format_type(typ))
self.fail(message, context)
def incorrectly_returning_any(self, typ: Type, context: Context) -> None:
message = 'Returning Any from function declared to return {}'.format(
format_type(typ))
self.fail(message, context, code=codes.NO_ANY_RETURN)
def incorrect__exit__return(self, context: Context) -> None:
self.fail(
'"bool" is invalid as return type for "__exit__" that always returns False', context,
code=codes.EXIT_RETURN)
self.note(
'Use "typing_extensions.Literal[False]" as the return type or change it to "None"',
context, code=codes.EXIT_RETURN)
self.note(
'If return type of "__exit__" implies that it may return True, '
'the context manager may swallow exceptions',
context, code=codes.EXIT_RETURN)
def untyped_decorated_function(self, typ: Type, context: Context) -> None:
typ = get_proper_type(typ)
if isinstance(typ, AnyType):
self.fail("Function is untyped after decorator transformation", context)
else:
self.fail('Type of decorated function contains type "Any" ({})'.format(
format_type(typ)), context)
def typed_function_untyped_decorator(self, func_name: str, context: Context) -> None:
self.fail('Untyped decorator makes function "{}" untyped'.format(func_name), context)
def bad_proto_variance(self, actual: int, tvar_name: str, expected: int,
context: Context) -> None:
msg = capitalize('{} type variable "{}" used in protocol where'
' {} one is expected'.format(variance_string(actual),
tvar_name,
variance_string(expected)))
self.fail(msg, context)
def concrete_only_assign(self, typ: Type, context: Context) -> None:
self.fail("Can only assign concrete classes to a variable of type {}"
.format(format_type(typ)), context)
def concrete_only_call(self, typ: Type, context: Context) -> None:
self.fail("Only concrete class can be given where {} is expected"
.format(format_type(typ)), context)
def cannot_use_function_with_type(
self, method_name: str, type_name: str, context: Context) -> None:
self.fail("Cannot use {}() with {} type".format(method_name, type_name), context)
def report_non_method_protocol(self, tp: TypeInfo, members: List[str],
context: Context) -> None:
self.fail("Only protocols that don't have non-method members can be"
" used with issubclass()", context)
if len(members) < 3:
attrs = ', '.join(members)
self.note('Protocol "{}" has non-method member(s): {}'
.format(tp.name, attrs), context)
def note_call(self,
subtype: Type,
call: Type,
context: Context,
*,
code: Optional[ErrorCode]) -> None:
self.note('"{}.__call__" has type {}'.format(format_type_bare(subtype),
format_type(call, verbosity=1)),
context, code=code)
def unreachable_statement(self, context: Context) -> None:
self.fail("Statement is unreachable", context, code=codes.UNREACHABLE)
def redundant_left_operand(self, op_name: str, context: Context) -> None:
"""Indicates that the left operand of a boolean expression is redundant:
it does not change the truth value of the entire condition as a whole.
'op_name' should either be the string "and" or the string "or".
"""
self.redundant_expr('Left operand of "{}"'.format(op_name), op_name == 'and', context)
def unreachable_right_operand(self, op_name: str, context: Context) -> None:
"""Indicates that the right operand of a boolean expression is redundant:
it does not change the truth value of the entire condition as a whole.
'op_name' should either be the string "and" or the string "or".
"""
self.fail('Right operand of "{}" is never evaluated'.format(op_name),
context, code=codes.UNREACHABLE)
def redundant_condition_in_comprehension(self, truthiness: bool, context: Context) -> None:
self.redundant_expr("If condition in comprehension", truthiness, context)
def redundant_condition_in_if(self, truthiness: bool, context: Context) -> None:
self.redundant_expr("If condition", truthiness, context)
def redundant_condition_in_assert(self, truthiness: bool, context: Context) -> None:
self.redundant_expr("Condition in assert", truthiness, context)
def redundant_expr(self, description: str, truthiness: bool, context: Context) -> None:
self.fail("{} is always {}".format(description, str(truthiness).lower()),
context, code=codes.REDUNDANT_EXPR)
def impossible_intersection(self,
formatted_base_class_list: str,
reason: str,
context: Context,
) -> None:
template = "Subclass of {} cannot exist: would have {}"
self.fail(template.format(formatted_base_class_list, reason), context,
code=codes.UNREACHABLE)
def report_protocol_problems(self,
subtype: Union[Instance, TupleType, TypedDictType],
supertype: Instance,
context: Context,
*,
code: Optional[ErrorCode]) -> None:
"""Report possible protocol conflicts between 'subtype' and 'supertype'.
This includes missing members, incompatible types, and incompatible
attribute flags, such as settable vs read-only or class variable vs
instance variable.
"""
OFFSET = 4 # Four spaces, so that notes will look like this:
# note: 'Cls' is missing following 'Proto' members:
# note: method, attr
MAX_ITEMS = 2 # Maximum number of conflicts, missing members, and overloads shown
# List of special situations where we don't want to report additional problems
exclusions = {TypedDictType: ['typing.Mapping'],
TupleType: ['typing.Iterable', 'typing.Sequence'],
Instance: []} # type: Dict[type, List[str]]
if supertype.type.fullname in exclusions[type(subtype)]:
return
if any(isinstance(tp, UninhabitedType) for tp in get_proper_types(supertype.args)):
# We don't want to add notes for failed inference (e.g. Iterable[<nothing>]).
# This will be only confusing a user even more.
return
if isinstance(subtype, TupleType):
if not isinstance(subtype.partial_fallback, Instance):
return
subtype = subtype.partial_fallback
elif isinstance(subtype, TypedDictType):
if not isinstance(subtype.fallback, Instance):
return
subtype = subtype.fallback
# Report missing members
missing = get_missing_protocol_members(subtype, supertype)
if (missing and len(missing) < len(supertype.type.protocol_members) and
len(missing) <= MAX_ITEMS):
self.note('"{}" is missing following "{}" protocol member{}:'
.format(subtype.type.name, supertype.type.name, plural_s(missing)),
context,
code=code)
self.note(', '.join(missing), context, offset=OFFSET, code=code)
elif len(missing) > MAX_ITEMS or len(missing) == len(supertype.type.protocol_members):
# This is an obviously wrong type: too many missing members
return
# Report member type conflicts
conflict_types = get_conflict_protocol_types(subtype, supertype)
if conflict_types and (not is_subtype(subtype, erase_type(supertype)) or
not subtype.type.defn.type_vars or
not supertype.type.defn.type_vars):
self.note('Following member(s) of {} have '
'conflicts:'.format(format_type(subtype)),
context,
code=code)
for name, got, exp in conflict_types[:MAX_ITEMS]:
exp = get_proper_type(exp)
got = get_proper_type(got)
if (not isinstance(exp, (CallableType, Overloaded)) or
not isinstance(got, (CallableType, Overloaded))):
self.note('{}: expected {}, got {}'.format(name,
*format_type_distinctly(exp, got)),
context,
offset=OFFSET,
code=code)
else:
self.note('Expected:', context, offset=OFFSET, code=code)
if isinstance(exp, CallableType):
self.note(pretty_callable(exp), context, offset=2 * OFFSET, code=code)
else:
assert isinstance(exp, Overloaded)
self.pretty_overload(exp, context, OFFSET, MAX_ITEMS, code=code)
self.note('Got:', context, offset=OFFSET, code=code)
if isinstance(got, CallableType):
self.note(pretty_callable(got), context, offset=2 * OFFSET, code=code)
else:
assert isinstance(got, Overloaded)
self.pretty_overload(got, context, OFFSET, MAX_ITEMS, code=code)
self.print_more(conflict_types, context, OFFSET, MAX_ITEMS, code=code)
# Report flag conflicts (i.e. settable vs read-only etc.)
conflict_flags = get_bad_protocol_flags(subtype, supertype)
for name, subflags, superflags in conflict_flags[:MAX_ITEMS]:
if IS_CLASSVAR in subflags and IS_CLASSVAR not in superflags:
self.note('Protocol member {}.{} expected instance variable,'
' got class variable'.format(supertype.type.name, name),
context,
code=code)
if IS_CLASSVAR in superflags and IS_CLASSVAR not in subflags:
self.note('Protocol member {}.{} expected class variable,'
' got instance variable'.format(supertype.type.name, name),
context,
code=code)
if IS_SETTABLE in superflags and IS_SETTABLE not in subflags:
self.note('Protocol member {}.{} expected settable variable,'
' got read-only attribute'.format(supertype.type.name, name),
context,
code=code)
if IS_CLASS_OR_STATIC in superflags and IS_CLASS_OR_STATIC not in subflags:
self.note('Protocol member {}.{} expected class or static method'
.format(supertype.type.name, name),
context,
code=code)
self.print_more(conflict_flags, context, OFFSET, MAX_ITEMS, code=code)
def pretty_overload(self,
tp: Overloaded,
context: Context,
offset: int,
max_items: int,
*,
code: Optional[ErrorCode] = None) -> None:
for item in tp.items()[:max_items]:
self.note('@overload', context, offset=2 * offset, code=code)
self.note(pretty_callable(item), context, offset=2 * offset, code=code)
left = len(tp.items()) - max_items
if left > 0:
msg = '<{} more overload{} not shown>'.format(left, plural_s(left))
self.note(msg, context, offset=2 * offset, code=code)
def pretty_overload_matches(self,
targets: List[CallableType],
func: Overloaded,
context: Context,
offset: int,
max_items: int,
code: ErrorCode) -> None:
if not targets:
targets = func.items()
shown = min(max_items, len(targets))
max_matching = len(targets)
max_available = len(func.items())
# If there are 3 matches but max_items == 2, we might as well show
# all three items instead of having the 3rd item be an error message.
if shown + 1 == max_matching:
shown = max_matching
self.note('Possible overload variant{}:'.format(plural_s(shown)), context, code=code)
for item in targets[:shown]:
self.note(pretty_callable(item), context, offset=2 * offset, code=code)
assert shown <= max_matching <= max_available
if shown < max_matching <= max_available:
left = max_matching - shown
msg = '<{} more similar overload{} not shown, out of {} total overloads>'.format(
left, plural_s(left), max_available)
self.note(msg, context, offset=2 * offset, code=code)
elif shown == max_matching < max_available:
left = max_available - shown
msg = '<{} more non-matching overload{} not shown>'.format(left, plural_s(left))
self.note(msg, context, offset=2 * offset, code=code)
else:
assert shown == max_matching == max_available
def print_more(self,
conflicts: Sequence[Any],
context: Context,
offset: int,
max_items: int,
*,
code: Optional[ErrorCode] = None) -> None:
if len(conflicts) > max_items:
self.note('<{} more conflict(s) not shown>'
.format(len(conflicts) - max_items),
context, offset=offset, code=code)
def try_report_long_tuple_assignment_error(self,
subtype: ProperType,
supertype: ProperType,
context: Context,
msg: str = message_registry.INCOMPATIBLE_TYPES,
subtype_label: Optional[str] = None,
supertype_label: Optional[str] = None,
code: Optional[ErrorCode] = None) -> bool:
"""Try to generate meaningful error message for very long tuple assignment
Returns a bool: True when generating long tuple assignment error,
False when no such error reported
"""
if isinstance(subtype, TupleType):
if (len(subtype.items) > 10 and
isinstance(supertype, Instance) and
supertype.type.fullname == 'builtins.tuple'):
lhs_type = supertype.args[0]
lhs_types = [lhs_type] * len(subtype.items)
self.generate_incompatible_tuple_error(lhs_types,
subtype.items, context, msg, code)
return True
elif (isinstance(supertype, TupleType) and
(len(subtype.items) > 10 or len(supertype.items) > 10)):
if len(subtype.items) != len(supertype.items):
if supertype_label is not None and subtype_label is not None:
error_msg = "{} ({} {}, {} {})".format(msg, subtype_label,
self.format_long_tuple_type(subtype), supertype_label,
self.format_long_tuple_type(supertype))
self.fail(error_msg, context, code=code)
return True
self.generate_incompatible_tuple_error(supertype.items,
subtype.items, context, msg, code)
return True
return False
def format_long_tuple_type(self, typ: TupleType) -> str:
"""Format very long tuple type using an ellipsis notation"""
item_cnt = len(typ.items)
if item_cnt > 10:
return 'Tuple[{}, {}, ... <{} more items>]'\
.format(format_type_bare(typ.items[0]),
format_type_bare(typ.items[1]), str(item_cnt - 2))
else:
return format_type_bare(typ)
def generate_incompatible_tuple_error(self,
lhs_types: List[Type],
rhs_types: List[Type],
context: Context,
msg: str = message_registry.INCOMPATIBLE_TYPES,
code: Optional[ErrorCode] = None) -> None:
"""Generate error message for individual incompatible tuple pairs"""
error_cnt = 0
notes = [] # List[str]
for i, (lhs_t, rhs_t) in enumerate(zip(lhs_types, rhs_types)):
if not is_subtype(lhs_t, rhs_t):
if error_cnt < 3:
notes.append('Expression tuple item {} has type "{}"; "{}" expected; '
.format(str(i), format_type_bare(rhs_t), format_type_bare(lhs_t)))
error_cnt += 1
error_msg = msg + ' ({} tuple items are incompatible'.format(str(error_cnt))
if error_cnt - 3 > 0:
error_msg += '; {} items are omitted)'.format(str(error_cnt - 3))
else:
error_msg += ')'
self.fail(error_msg, context, code=code)
for note in notes:
self.note(note, context, code=code)
def add_fixture_note(self, fullname: str, ctx: Context) -> None:
self.note('Maybe your test fixture does not define "{}"?'.format(fullname), ctx)
if fullname in SUGGESTED_TEST_FIXTURES:
self.note(
'Consider adding [builtins fixtures/{}] to your test description'.format(
SUGGESTED_TEST_FIXTURES[fullname]), ctx)
def quote_type_string(type_string: str) -> str:
"""Quotes a type representation for use in messages."""
no_quote_regex = r'^<(tuple|union): \d+ items>$'
if (type_string in ['Module', 'overloaded function', '<nothing>', '<deleted>']
or re.match(no_quote_regex, type_string) is not None or type_string.endswith('?')):
# Messages are easier to read if these aren't quoted. We use a
# regex to match strings with variable contents.
return type_string
return '"{}"'.format(type_string)
def format_type_inner(typ: Type,
verbosity: int,
fullnames: Optional[Set[str]]) -> str:
"""
Convert a type to a relatively short string suitable for error messages.
Args:
verbosity: a coarse grained control on the verbosity of the type
fullnames: a set of names that should be printed in full
"""
def format(typ: Type) -> str:
return format_type_inner(typ, verbosity, fullnames)
# TODO: show type alias names in errors.
typ = get_proper_type(typ)
if isinstance(typ, Instance):
itype = typ
# Get the short name of the type.
if itype.type.fullname in ('types.ModuleType', '_importlib_modulespec.ModuleType'):
# Make some common error messages simpler and tidier.
return 'Module'
if verbosity >= 2 or (fullnames and itype.type.fullname in fullnames):
base_str = itype.type.fullname
else:
base_str = itype.type.name
if not itype.args:
# No type arguments, just return the type name
return base_str
elif itype.type.fullname == 'builtins.tuple':
item_type_str = format(itype.args[0])
return 'Tuple[{}, ...]'.format(item_type_str)
elif itype.type.fullname in reverse_builtin_aliases:
alias = reverse_builtin_aliases[itype.type.fullname]
alias = alias.split('.')[-1]
items = [format(arg) for arg in itype.args]
return '{}[{}]'.format(alias, ', '.join(items))
else:
# There are type arguments. Convert the arguments to strings.
a = [] # type: List[str]
for arg in itype.args:
a.append(format(arg))
s = ', '.join(a)
return '{}[{}]'.format(base_str, s)
elif isinstance(typ, TypeVarType):
# This is similar to non-generic instance types.
return typ.name
elif isinstance(typ, TupleType):
# Prefer the name of the fallback class (if not tuple), as it's more informative.
if typ.partial_fallback.type.fullname != 'builtins.tuple':
return format(typ.partial_fallback)
items = []
for t in typ.items:
items.append(format(t))
s = 'Tuple[{}]'.format(', '.join(items))
return s
elif isinstance(typ, TypedDictType):
# If the TypedDictType is named, return the name
if not typ.is_anonymous():
return format(typ.fallback)
items = []
for (item_name, item_type) in typ.items.items():
modifier = '' if item_name in typ.required_keys else '?'
items.append('{!r}{}: {}'.format(item_name,
modifier,
format(item_type)))
s = 'TypedDict({{{}}})'.format(', '.join(items))
return s
elif isinstance(typ, LiteralType):
if typ.is_enum_literal():
underlying_type = format(typ.fallback)
return 'Literal[{}.{}]'.format(underlying_type, typ.value)
else:
return str(typ)
elif isinstance(typ, UnionType):
# Only print Unions as Optionals if the Optional wouldn't have to contain another Union
print_as_optional = (len(typ.items) -
sum(isinstance(get_proper_type(t), NoneType)
for t in typ.items) == 1)
if print_as_optional:
rest = [t for t in typ.items if not isinstance(get_proper_type(t), NoneType)]
return 'Optional[{}]'.format(format(rest[0]))
else:
items = []
for t in typ.items:
items.append(format(t))
s = 'Union[{}]'.format(', '.join(items))
return s
elif isinstance(typ, NoneType):
return 'None'
elif isinstance(typ, AnyType):
return 'Any'
elif isinstance(typ, DeletedType):
return '<deleted>'
elif isinstance(typ, UninhabitedType):
if typ.is_noreturn:
return 'NoReturn'
else:
return '<nothing>'
elif isinstance(typ, TypeType):
return 'Type[{}]'.format(format(typ.item))
elif isinstance(typ, FunctionLike):
func = typ
if func.is_type_obj():
# The type of a type object type can be derived from the
# return type (this always works).
return format(TypeType.make_normalized(erase_type(func.items()[0].ret_type)))
elif isinstance(func, CallableType):
return_type = format(func.ret_type)
if func.is_ellipsis_args:
return 'Callable[..., {}]'.format(return_type)
arg_strings = []
for arg_name, arg_type, arg_kind in zip(
func.arg_names, func.arg_types, func.arg_kinds):
if (arg_kind == ARG_POS and arg_name is None
or verbosity == 0 and arg_kind in (ARG_POS, ARG_OPT)):
arg_strings.append(format(arg_type))
else:
constructor = ARG_CONSTRUCTOR_NAMES[arg_kind]
if arg_kind in (ARG_STAR, ARG_STAR2) or arg_name is None:
arg_strings.append("{}({})".format(
constructor,
format(arg_type)))
else:
arg_strings.append("{}({}, {})".format(
constructor,
format(arg_type),
repr(arg_name)))
return 'Callable[[{}], {}]'.format(", ".join(arg_strings), return_type)
else:
# Use a simple representation for function types; proper
# function types may result in long and difficult-to-read
# error messages.
return 'overloaded function'
elif isinstance(typ, UnboundType):
return str(typ)
elif typ is None:
raise RuntimeError('Type is None')
else:
# Default case; we simply have to return something meaningful here.
return 'object'
def collect_all_instances(t: Type) -> List[Instance]:
"""Return all instances that `t` contains (including `t`).
This is similar to collect_all_inner_types from typeanal but only
returns instances and will recurse into fallbacks.
"""
visitor = CollectAllInstancesQuery()
t.accept(visitor)
return visitor.instances
class CollectAllInstancesQuery(TypeTraverserVisitor):
def __init__(self) -> None:
self.instances = [] # type: List[Instance]
def visit_instance(self, t: Instance) -> None:
self.instances.append(t)
super().visit_instance(t)
def find_type_overlaps(*types: Type) -> Set[str]:
"""Return a set of fullnames that share a short name and appear in either type.
This is used to ensure that distinct types with the same short name are printed
with their fullname.
"""
d = {} # type: Dict[str, Set[str]]
for type in types:
for inst in collect_all_instances(type):
d.setdefault(inst.type.name, set()).add(inst.type.fullname)
for shortname in d.keys():
if 'typing.{}'.format(shortname) in TYPES_FOR_UNIMPORTED_HINTS:
d[shortname].add('typing.{}'.format(shortname))
overlaps = set() # type: Set[str]
for fullnames in d.values():
if len(fullnames) > 1:
overlaps.update(fullnames)
return overlaps
def format_type(typ: Type, verbosity: int = 0) -> str:
"""
Convert a type to a relatively short string suitable for error messages.
`verbosity` is a coarse grained control on the verbosity of the type
This function returns a string appropriate for unmodified use in error
messages; this means that it will be quoted in most cases. If
modification of the formatted string is required, callers should use
format_type_bare.
"""
return quote_type_string(format_type_bare(typ, verbosity))
def format_type_bare(typ: Type,
verbosity: int = 0,
fullnames: Optional[Set[str]] = None) -> str:
"""
Convert a type to a relatively short string suitable for error messages.
`verbosity` is a coarse grained control on the verbosity of the type
`fullnames` specifies a set of names that should be printed in full
This function will return an unquoted string. If a caller doesn't need to
perform post-processing on the string output, format_type should be used
instead. (The caller may want to use quote_type_string after
processing has happened, to maintain consistent quoting in messages.)
"""
return format_type_inner(typ, verbosity, find_type_overlaps(typ))
def format_type_distinctly(*types: Type, bare: bool = False) -> Tuple[str, ...]:
"""Jointly format types to distinct strings.
Increase the verbosity of the type strings until they become distinct
while also requiring that distinct types with the same short name are
formatted distinctly.
By default, the returned strings are created using format_type() and will be
quoted accordingly. If ``bare`` is True, the returned strings will not
be quoted; callers who need to do post-processing of the strings before
quoting them (such as prepending * or **) should use this.
"""
overlapping = find_type_overlaps(*types)
for verbosity in range(2):
strs = [
format_type_inner(type, verbosity=verbosity, fullnames=overlapping)
for type in types
]
if len(set(strs)) == len(strs):
break
if bare:
return tuple(strs)
else:
return tuple(quote_type_string(s) for s in strs)
def pretty_callable(tp: CallableType) -> str:
"""Return a nice easily-readable representation of a callable type.
For example:
def [T <: int] f(self, x: int, y: T) -> None
"""
s = ''
asterisk = False
for i in range(len(tp.arg_types)):
if s:
s += ', '
if tp.arg_kinds[i] in (ARG_NAMED, ARG_NAMED_OPT) and not asterisk:
s += '*, '
asterisk = True
if tp.arg_kinds[i] == ARG_STAR:
s += '*'
asterisk = True
if tp.arg_kinds[i] == ARG_STAR2:
s += '**'
name = tp.arg_names[i]
if name:
s += name + ': '
s += format_type_bare(tp.arg_types[i])
if tp.arg_kinds[i] in (ARG_OPT, ARG_NAMED_OPT):
s += ' = ...'
# If we got a "special arg" (i.e: self, cls, etc...), prepend it to the arg list
if isinstance(tp.definition, FuncDef) and tp.definition.name is not None:
definition_args = tp.definition.arg_names
if definition_args and tp.arg_names != definition_args \
and len(definition_args) > 0:
if s:
s = ', ' + s
s = definition_args[0] + s
s = '{}({})'.format(tp.definition.name, s)
elif tp.name:
first_arg = tp.def_extras.get('first_arg')
if first_arg:
if s:
s = ', ' + s
s = first_arg + s
s = '{}({})'.format(tp.name.split()[0], s) # skip "of Class" part
else:
s = '({})'.format(s)
s += ' -> ' + format_type_bare(tp.ret_type)
if tp.variables:
tvars = []
for tvar in tp.variables:
if isinstance(tvar, TypeVarDef):
upper_bound = get_proper_type(tvar.upper_bound)
if (isinstance(upper_bound, Instance) and
upper_bound.type.fullname != 'builtins.object'):
tvars.append('{} <: {}'.format(tvar.name, format_type_bare(upper_bound)))
elif tvar.values:
tvars.append('{} in ({})'
.format(tvar.name, ', '.join([format_type_bare(tp)
for tp in tvar.values])))
else:
tvars.append(tvar.name)
else:
# For other TypeVarLikeDefs, just use the repr
tvars.append(repr(tvar))
s = '[{}] {}'.format(', '.join(tvars), s)
return 'def {}'.format(s)
def variance_string(variance: int) -> str:
if variance == COVARIANT:
return 'covariant'
elif variance == CONTRAVARIANT:
return 'contravariant'
else:
return 'invariant'
def get_missing_protocol_members(left: Instance, right: Instance) -> List[str]:
"""Find all protocol members of 'right' that are not implemented
(i.e. completely missing) in 'left'.
"""
assert right.type.is_protocol
missing = [] # type: List[str]
for member in right.type.protocol_members:
if not find_member(member, left, left):
missing.append(member)
return missing
def get_conflict_protocol_types(left: Instance, right: Instance) -> List[Tuple[str, Type, Type]]:
"""Find members that are defined in 'left' but have incompatible types.
Return them as a list of ('member', 'got', 'expected').
"""
assert right.type.is_protocol
conflicts = [] # type: List[Tuple[str, Type, Type]]
for member in right.type.protocol_members:
if member in ('__init__', '__new__'):
continue
supertype = find_member(member, right, left)
assert supertype is not None
subtype = find_member(member, left, left)
if not subtype:
continue
is_compat = is_subtype(subtype, supertype, ignore_pos_arg_names=True)
if IS_SETTABLE in get_member_flags(member, right.type):
is_compat = is_compat and is_subtype(supertype, subtype)
if not is_compat:
conflicts.append((member, subtype, supertype))
return conflicts
def get_bad_protocol_flags(left: Instance, right: Instance
) -> List[Tuple[str, Set[int], Set[int]]]:
"""Return all incompatible attribute flags for members that are present in both
'left' and 'right'.
"""
assert right.type.is_protocol
all_flags = [] # type: List[Tuple[str, Set[int], Set[int]]]
for member in right.type.protocol_members:
if find_member(member, left, left):
item = (member,
get_member_flags(member, left.type),
get_member_flags(member, right.type))
all_flags.append(item)
bad_flags = []
for name, subflags, superflags in all_flags:
if (IS_CLASSVAR in subflags and IS_CLASSVAR not in superflags or
IS_CLASSVAR in superflags and IS_CLASSVAR not in subflags or
IS_SETTABLE in superflags and IS_SETTABLE not in subflags or
IS_CLASS_OR_STATIC in superflags and IS_CLASS_OR_STATIC not in subflags):
bad_flags.append((name, subflags, superflags))
return bad_flags
def capitalize(s: str) -> str:
"""Capitalize the first character of a string."""
if s == '':
return ''
else:
return s[0].upper() + s[1:]
def extract_type(name: str) -> str:
"""If the argument is the name of a method (of form C.m), return
the type portion in quotes (e.g. "y"). Otherwise, return the string
unmodified.
"""
name = re.sub('^"[a-zA-Z0-9_]+" of ', '', name)
return name
def strip_quotes(s: str) -> str:
"""Strip a double quote at the beginning and end of the string, if any."""
s = re.sub('^"', '', s)
s = re.sub('"$', '', s)
return s
def plural_s(s: Union[int, Sequence[Any]]) -> str:
count = s if isinstance(s, int) else len(s)
if count > 1:
return 's'
else:
return ''
def format_string_list(lst: List[str]) -> str:
assert len(lst) > 0
if len(lst) == 1:
return lst[0]
elif len(lst) <= 5:
return '%s and %s' % (', '.join(lst[:-1]), lst[-1])
else:
return '%s, ... and %s (%i methods suppressed)' % (
', '.join(lst[:2]), lst[-1], len(lst) - 3)
def format_item_name_list(s: Iterable[str]) -> str:
lst = list(s)
if len(lst) <= 5:
return '(' + ', '.join(['"%s"' % name for name in lst]) + ')'
else:
return '(' + ', '.join(['"%s"' % name for name in lst[:5]]) + ', ...)'
def callable_name(type: FunctionLike) -> Optional[str]:
name = type.get_name()
if name is not None and name[0] != '<':
return '"{}"'.format(name).replace(' of ', '" of "')
return name
def for_function(callee: CallableType) -> str:
name = callable_name(callee)
if name is not None:
return ' for {}'.format(name)
return ''
def find_defining_module(modules: Dict[str, MypyFile], typ: CallableType) -> Optional[MypyFile]:
if not typ.definition:
return None
fullname = typ.definition.fullname
if fullname is not None and '.' in fullname:
for i in range(fullname.count('.')):
module_name = fullname.rsplit('.', i + 1)[0]
try:
return modules[module_name]
except KeyError:
pass
assert False, "Couldn't determine module from CallableType"
return None
def temp_message_builder() -> MessageBuilder:
"""Return a message builder usable for throwaway errors (which may not format properly)."""
return MessageBuilder(Errors(), {})
# For hard-coding suggested missing member alternatives.
COMMON_MISTAKES = {
'add': ('append', 'extend'),
} # type: Final[Dict[str, Sequence[str]]]
def best_matches(current: str, options: Iterable[str]) -> List[str]:
ratios = {v: difflib.SequenceMatcher(a=current, b=v).ratio() for v in options}
return sorted((o for o in options if ratios[o] > 0.75),
reverse=True, key=lambda v: (ratios[v], v))
def pretty_seq(args: Sequence[str], conjunction: str) -> str:
quoted = ['"' + a + '"' for a in args]
if len(quoted) == 1:
return quoted[0]
if len(quoted) == 2:
return "{} {} {}".format(quoted[0], conjunction, quoted[1])
last_sep = ", " + conjunction + " "
return ", ".join(quoted[:-1]) + last_sep + quoted[-1]
def append_invariance_notes(notes: List[str], arg_type: Instance,
expected_type: Instance) -> List[str]:
"""Explain that the type is invariant and give notes for how to solve the issue."""
invariant_type = ''
covariant_suggestion = ''
if (arg_type.type.fullname == 'builtins.list' and
expected_type.type.fullname == 'builtins.list' and
is_subtype(arg_type.args[0], expected_type.args[0])):
invariant_type = 'List'
covariant_suggestion = 'Consider using "Sequence" instead, which is covariant'
elif (arg_type.type.fullname == 'builtins.dict' and
expected_type.type.fullname == 'builtins.dict' and
is_same_type(arg_type.args[0], expected_type.args[0]) and
is_subtype(arg_type.args[1], expected_type.args[1])):
invariant_type = 'Dict'
covariant_suggestion = ('Consider using "Mapping" instead, '
'which is covariant in the value type')
if invariant_type and covariant_suggestion:
notes.append(
'"{}" is invariant -- see '.format(invariant_type) +
"https://mypy.readthedocs.io/en/stable/common_issues.html#variance")
notes.append(covariant_suggestion)
return notes
def make_inferred_type_note(context: Context,
subtype: Type,
supertype: Type,
supertype_str: str) -> str:
"""Explain that the user may have forgotten to type a variable.
The user does not expect an error if the inferred container type is the same as the return
type of a function and the argument type(s) are a subtype of the argument type(s) of the
return type. This note suggests that they add a type annotation with the return type instead
of relying on the inferred type.
"""
subtype = get_proper_type(subtype)
supertype = get_proper_type(supertype)
if (isinstance(subtype, Instance) and
isinstance(supertype, Instance) and
subtype.type.fullname == supertype.type.fullname and
subtype.args and
supertype.args and
isinstance(context, ReturnStmt) and
isinstance(context.expr, NameExpr) and
isinstance(context.expr.node, Var) and
context.expr.node.is_inferred):
for subtype_arg, supertype_arg in zip(subtype.args, supertype.args):
if not is_subtype(subtype_arg, supertype_arg):
return ''
var_name = context.expr.name
return 'Perhaps you need a type annotation for "{}"? Suggestion: {}'.format(
var_name, supertype_str)
return ''
def format_key_list(keys: List[str], *, short: bool = False) -> str:
formatted_keys = ['"{}"'.format(key) for key in keys]
td = '' if short else 'TypedDict '
if len(keys) == 0:
return 'no {}keys'.format(td)
elif len(keys) == 1:
return '{}key {}'.format(td, formatted_keys[0])
else:
return '{}keys ({})'.format(td, ', '.join(formatted_keys))
| 46.257356
| 99
| 0.578168
|
510506e2cd8de866cf9ec95b9fec4f828cc6c7b4
| 16,147
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20180101/express_route_circuit_peering.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20180101/express_route_circuit_peering.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20180101/express_route_circuit_peering.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['ExpressRouteCircuitPeering']
class ExpressRouteCircuitPeering(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
azure_asn: Optional[pulumi.Input[int]] = None,
circuit_name: Optional[pulumi.Input[str]] = None,
gateway_manager_etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
ipv6_peering_config: Optional[pulumi.Input[pulumi.InputType['Ipv6ExpressRouteCircuitPeeringConfigArgs']]] = None,
last_modified_by: Optional[pulumi.Input[str]] = None,
microsoft_peering_config: Optional[pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringConfigArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
peer_asn: Optional[pulumi.Input[int]] = None,
peering_name: Optional[pulumi.Input[str]] = None,
peering_type: Optional[pulumi.Input[str]] = None,
primary_azure_port: Optional[pulumi.Input[str]] = None,
primary_peer_address_prefix: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_filter: Optional[pulumi.Input[pulumi.InputType['RouteFilterArgs']]] = None,
secondary_azure_port: Optional[pulumi.Input[str]] = None,
secondary_peer_address_prefix: Optional[pulumi.Input[str]] = None,
shared_key: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
stats: Optional[pulumi.Input[pulumi.InputType['ExpressRouteCircuitStatsArgs']]] = None,
vlan_id: Optional[pulumi.Input[int]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Peering in an ExpressRouteCircuit resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] azure_asn: The Azure ASN.
:param pulumi.Input[str] circuit_name: The name of the express route circuit.
:param pulumi.Input[str] gateway_manager_etag: The GatewayManager Etag.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[pulumi.InputType['Ipv6ExpressRouteCircuitPeeringConfigArgs']] ipv6_peering_config: The IPv6 peering configuration.
:param pulumi.Input[str] last_modified_by: Gets whether the provider or the customer last modified the peering.
:param pulumi.Input[pulumi.InputType['ExpressRouteCircuitPeeringConfigArgs']] microsoft_peering_config: The Microsoft peering configuration.
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[int] peer_asn: The peer ASN.
:param pulumi.Input[str] peering_name: The name of the peering.
:param pulumi.Input[str] peering_type: The PeeringType. Possible values are: 'AzurePublicPeering', 'AzurePrivatePeering', and 'MicrosoftPeering'.
:param pulumi.Input[str] primary_azure_port: The primary port.
:param pulumi.Input[str] primary_peer_address_prefix: The primary address prefix.
:param pulumi.Input[str] provisioning_state: Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[pulumi.InputType['RouteFilterArgs']] route_filter: The reference of the RouteFilter resource.
:param pulumi.Input[str] secondary_azure_port: The secondary port.
:param pulumi.Input[str] secondary_peer_address_prefix: The secondary address prefix.
:param pulumi.Input[str] shared_key: The shared key.
:param pulumi.Input[str] state: The state of peering. Possible values are: 'Disabled' and 'Enabled'
:param pulumi.Input[pulumi.InputType['ExpressRouteCircuitStatsArgs']] stats: Gets peering stats.
:param pulumi.Input[int] vlan_id: The VLAN ID.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['azure_asn'] = azure_asn
if circuit_name is None:
raise TypeError("Missing required property 'circuit_name'")
__props__['circuit_name'] = circuit_name
__props__['gateway_manager_etag'] = gateway_manager_etag
__props__['id'] = id
__props__['ipv6_peering_config'] = ipv6_peering_config
__props__['last_modified_by'] = last_modified_by
__props__['microsoft_peering_config'] = microsoft_peering_config
__props__['name'] = name
__props__['peer_asn'] = peer_asn
if peering_name is None:
raise TypeError("Missing required property 'peering_name'")
__props__['peering_name'] = peering_name
__props__['peering_type'] = peering_type
__props__['primary_azure_port'] = primary_azure_port
__props__['primary_peer_address_prefix'] = primary_peer_address_prefix
__props__['provisioning_state'] = provisioning_state
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['route_filter'] = route_filter
__props__['secondary_azure_port'] = secondary_azure_port
__props__['secondary_peer_address_prefix'] = secondary_peer_address_prefix
__props__['shared_key'] = shared_key
__props__['state'] = state
__props__['stats'] = stats
__props__['vlan_id'] = vlan_id
__props__['etag'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20150615:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20160330:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20160601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20160901:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20161201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170301:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20170901:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20171001:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20171101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180401:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ExpressRouteCircuitPeering"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ExpressRouteCircuitPeering")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ExpressRouteCircuitPeering, __self__).__init__(
'azure-nextgen:network/v20180101:ExpressRouteCircuitPeering',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ExpressRouteCircuitPeering':
"""
Get an existing ExpressRouteCircuitPeering resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ExpressRouteCircuitPeering(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="azureASN")
def azure_asn(self) -> pulumi.Output[Optional[int]]:
"""
The Azure ASN.
"""
return pulumi.get(self, "azure_asn")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="gatewayManagerEtag")
def gateway_manager_etag(self) -> pulumi.Output[Optional[str]]:
"""
The GatewayManager Etag.
"""
return pulumi.get(self, "gateway_manager_etag")
@property
@pulumi.getter(name="ipv6PeeringConfig")
def ipv6_peering_config(self) -> pulumi.Output[Optional['outputs.Ipv6ExpressRouteCircuitPeeringConfigResponse']]:
"""
The IPv6 peering configuration.
"""
return pulumi.get(self, "ipv6_peering_config")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> pulumi.Output[Optional[str]]:
"""
Gets whether the provider or the customer last modified the peering.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="microsoftPeeringConfig")
def microsoft_peering_config(self) -> pulumi.Output[Optional['outputs.ExpressRouteCircuitPeeringConfigResponse']]:
"""
The Microsoft peering configuration.
"""
return pulumi.get(self, "microsoft_peering_config")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peerASN")
def peer_asn(self) -> pulumi.Output[Optional[int]]:
"""
The peer ASN.
"""
return pulumi.get(self, "peer_asn")
@property
@pulumi.getter(name="peeringType")
def peering_type(self) -> pulumi.Output[Optional[str]]:
"""
The PeeringType. Possible values are: 'AzurePublicPeering', 'AzurePrivatePeering', and 'MicrosoftPeering'.
"""
return pulumi.get(self, "peering_type")
@property
@pulumi.getter(name="primaryAzurePort")
def primary_azure_port(self) -> pulumi.Output[Optional[str]]:
"""
The primary port.
"""
return pulumi.get(self, "primary_azure_port")
@property
@pulumi.getter(name="primaryPeerAddressPrefix")
def primary_peer_address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The primary address prefix.
"""
return pulumi.get(self, "primary_peer_address_prefix")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="routeFilter")
def route_filter(self) -> pulumi.Output[Optional['outputs.RouteFilterResponse']]:
"""
The reference of the RouteFilter resource.
"""
return pulumi.get(self, "route_filter")
@property
@pulumi.getter(name="secondaryAzurePort")
def secondary_azure_port(self) -> pulumi.Output[Optional[str]]:
"""
The secondary port.
"""
return pulumi.get(self, "secondary_azure_port")
@property
@pulumi.getter(name="secondaryPeerAddressPrefix")
def secondary_peer_address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The secondary address prefix.
"""
return pulumi.get(self, "secondary_peer_address_prefix")
@property
@pulumi.getter(name="sharedKey")
def shared_key(self) -> pulumi.Output[Optional[str]]:
"""
The shared key.
"""
return pulumi.get(self, "shared_key")
@property
@pulumi.getter
def state(self) -> pulumi.Output[Optional[str]]:
"""
The state of peering. Possible values are: 'Disabled' and 'Enabled'
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def stats(self) -> pulumi.Output[Optional['outputs.ExpressRouteCircuitStatsResponse']]:
"""
Gets peering stats.
"""
return pulumi.get(self, "stats")
@property
@pulumi.getter(name="vlanId")
def vlan_id(self) -> pulumi.Output[Optional[int]]:
"""
The VLAN ID.
"""
return pulumi.get(self, "vlan_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 53.290429
| 2,845
| 0.687682
|
b4a594e40efdd0ec7a573a58cf3f31399d23ba05
| 5,624
|
py
|
Python
|
google-cloud-sdk/lib/third_party/pyu2f/hardware.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 1
|
2017-11-29T18:52:27.000Z
|
2017-11-29T18:52:27.000Z
|
google-cloud-sdk/lib/third_party/pyu2f/hardware.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/lib/third_party/pyu2f/hardware.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 3
|
2017-07-27T18:44:13.000Z
|
2020-07-25T17:48:53.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements the low level device API.
This module exposes a low level SecurityKey class,
representing the physical security key device.
"""
import logging
from pyu2f import apdu
from pyu2f import errors
class SecurityKey(object):
"""Low level api for talking to a security key.
This class implements the low level api specified in FIDO
U2F for talking to a security key.
"""
def __init__(self, transport):
self.transport = transport
self.use_legacy_format = False
self.logger = logging.getLogger('pyu2f.hardware')
def CmdRegister(self, challenge_param, app_param):
"""Register security key.
Ask the security key to register with a particular origin & client.
Args:
challenge_param: Arbitrary 32 byte challenge string.
app_param: Arbitrary 32 byte applciation parameter.
Returns:
A binary structure containing the key handle, attestation, and a
signature over that by the attestation key. The precise format
is dictated by the FIDO U2F specs.
Raises:
TUPRequiredError: A Test of User Precense is required to proceed.
ApduError: Something went wrong on the device.
"""
self.logger.debug('CmdRegister')
if len(challenge_param) != 32 or len(app_param) != 32:
raise errors.InvalidRequestError()
body = bytearray(challenge_param + app_param)
response = self.InternalSendApdu(apdu.CommandApdu(
0,
apdu.CMD_REGISTER,
0x03, # Per the U2F reference code tests
0x00,
body))
response.CheckSuccessOrRaise()
return response.body
def CmdAuthenticate(self,
challenge_param,
app_param,
key_handle,
check_only=False):
"""Attempt to obtain an authentication signature.
Ask the security key to sign a challenge for a particular key handle
in order to authenticate the user.
Args:
challenge_param: SHA-256 hash of client_data object.
app_param: SHA-256 hash of the app id.
key_handle: The key handle to use to issue the signature
check_only: If true, only check if key_handle is valid.
Returns:
A binary structure containing the key handle, attestation, and a
signature over that by the attestation key. The precise format
is dictated by the FIDO U2F specs.
Raises:
TUPRequiredError: If check_only is False, a Test of User Precense
is required to proceed. If check_only is True, this means
the key_handle is valid.
InvalidKeyHandleError: The key_handle is not valid for this device.
ApduError: Something else went wrong on the device.
"""
self.logger.debug('CmdAuthenticate')
if len(challenge_param) != 32 or len(app_param) != 32:
raise errors.InvalidRequestError()
control = 0x07 if check_only else 0x03
body = bytearray(challenge_param + app_param + bytearray([len(key_handle)])
+ key_handle)
response = self.InternalSendApdu(apdu.CommandApdu(
0, apdu.CMD_AUTH, control, 0x00, body))
response.CheckSuccessOrRaise()
return response.body
def CmdVersion(self):
"""Obtain the version of the device and test transport format.
Obtains the version of the device and determines whether to use ISO
7816-4 or the U2f variant. This function should be called at least once
before CmdAuthenticate or CmdRegister to make sure the object is using the
proper transport for the device.
Returns:
The version of the U2F protocol in use.
"""
self.logger.debug('CmdVersion')
response = self.InternalSendApdu(apdu.CommandApdu(
0, apdu.CMD_VERSION, 0x00, 0x00))
if not response.IsSuccess():
raise errors.ApduError(response.sw1, response.sw2)
return response.body
def CmdBlink(self, time):
self.logger.debug('CmdBlink')
self.transport.SendBlink(time)
def CmdWink(self):
self.logger.debug('CmdWink')
self.transport.SendWink()
def CmdPing(self, data):
self.logger.debug('CmdPing')
return self.transport.SendPing(data)
def InternalSendApdu(self, apdu_to_send):
"""Send an APDU to the device.
Sends an APDU to the device, possibly falling back to the legacy
encoding format that is not ISO7816-4 compatible.
Args:
apdu_to_send: The CommandApdu object to send
Returns:
The ResponseApdu object constructed out of the devices reply.
"""
response = None
if not self.use_legacy_format:
response = apdu.ResponseApdu(self.transport.SendMsgBytes(
apdu_to_send.ToByteArray()))
if response.sw1 == 0x67 and response.sw2 == 0x00:
# If we failed using the standard format, retry with the
# legacy format.
self.use_legacy_format = True
return self.InternalSendApdu(apdu_to_send)
else:
response = apdu.ResponseApdu(self.transport.SendMsgBytes(
apdu_to_send.ToLegacyU2FByteArray()))
return response
| 33.278107
| 79
| 0.697546
|
dcf4a28fd31e9ff4c3d60810d9a7e05a0697b09a
| 4,296
|
py
|
Python
|
py_algo/stacks_queues/competition/monk_prisoner_azkaban.py
|
Sk0uF/Algorithms
|
236cc5b056ce2637d5d947c5fc1e3367cde886bf
|
[
"MIT"
] | 1
|
2021-07-05T15:39:04.000Z
|
2021-07-05T15:39:04.000Z
|
py_algo/stacks_queues/competition/monk_prisoner_azkaban.py
|
Sk0uF/Algorithms
|
236cc5b056ce2637d5d947c5fc1e3367cde886bf
|
[
"MIT"
] | null | null | null |
py_algo/stacks_queues/competition/monk_prisoner_azkaban.py
|
Sk0uF/Algorithms
|
236cc5b056ce2637d5d947c5fc1e3367cde886bf
|
[
"MIT"
] | 1
|
2021-09-02T21:31:34.000Z
|
2021-09-02T21:31:34.000Z
|
"""
Codemonk link: https://www.hackerearth.com/problem/algorithm/monk-and-prisoner-of-azkaban-0f96c4a2/
Monk's wizard friend Harry Potter is excited to see his Dad fight Dementors and rescue him and his Godfather Sirius
Black. Meanwhile their friend Hermoine is stuck on some silly arrays problem. Harry does not have time for all this, so
he asked Monk to solve that problem for Hermoine, so that they can go. The problem is given an array A having N integers
for each i (1<=i<=N), find x+y, where x is the largest number less than i such that A[x] > A[i] and y is the smallest
number greater than i such that A[y] > A[i]. If there is no x < i such that A[x] > A[i] then x is -1. The same holds for
y.
Input - Output:
First line consists of a single integer denoting N.
Second line consists of N space separated integers denoting the array A.
Print N space separated integers, denoting x+y.
Sample input:
5
5 4 1 3 2
Sample Output:
-2 0 6 1 3
"""
"""
We can think of the problem as 2 separate problems, one for finding x and one for finding y. To find x, we are going to
loop though the array and for each element find the closest element to its left that is bigger than our current element.
We can find this solution in linear time and more particular in O(2*N). To explain the logic, lets see an example.
5 4 1 3 2 10
1) Number 5 is the first element and doesn't have any number bigger than itself on its left. Ans = -1.
2) We compare 4 with the previous element, in that case 5, it's smaller than 5, so the Ans = 1 (index).
3) We compare 1 with the previous element, in that case 4, it's smaller than 4, so the Ans = 2 (index).
4) We compare 3 with the previous element, in that case 1, it's not smaller than 1. Now we go back to the number that
was bigger than 1. That is the number 4, 3 is smaller than 4, so the Ans = 2 (index.
5) We compare 2 with the previous element, in that case 3, it's smaller than 3, so the Ans = 4 (index).
6) We compare 10 with the previous element, in that case 2, it's not smaller than 2. Now we go back to the number that
was bigger than 2. That is the number 3, now we go back to number that was bigger than 3, that's 4, now to the number
that was bigger than 4, that was 5.
Notice than at the 6th step we skipped number 1, because if 10 > 3 then the is no way that it will be less than an
element that was less than 3. If we had an 7th step with the number 11, then we would directly know that there is no
element bigger than 11. At each step, we compare with the previous element and sometimes go back some steps, with the
maximum number of steps to be equal with the length of the array.
If we repeat the process backwards and find the right elements that are bigger than our current element and closest to
it then we can find y as well.
We can implement this technique with more than 1 ways. See the comments in the code for this particular implementation.
Final complexity: O(2*N + 2*N) => O(N)
"""
n = int(input())
array = list(map(int, input().rstrip().split()))
# Stack that hold the previous biggest numbers for each index.
# The left array holds the answer for each index.
stack = []
left = []
for i in range(0, n):
while stack:
k = stack[-1]
# While the stack has elements, check the current element
# with the previous. If its smaller then we found the answer.
# If not, then we keep popping from the stack until and if we
# find a bigger element.
if array[i] < array[k]:
left.append(k+1)
stack.append(i)
break
else:
stack.pop()
# If the stack is empty then that means
# that the is no bigger element for this
# particular index. Just append the current
# index for the next check.
if not stack:
left.append(-1)
stack.append(i)
# Repeat the same process backwards.
right = []
stack = []
for i in range(n-1, -1, -1):
while stack:
k = stack[-1]
if array[i] < array[k]:
right.append(k+1)
stack.append(i)
break
else:
stack.pop()
if not stack:
right.append(-1)
stack.append(i)
final = []
for i in range(len(left)):
final.append(left[i] + right[len(right)-1-i])
print(*final)
| 39.777778
| 120
| 0.687616
|
37706ff22eca1cec9cbbb90b05f14e936b31b672
| 68,727
|
py
|
Python
|
src/olympia/lib/settings_base.py
|
petercpg/addons-server
|
892e442ee61fe2592f19969357ed5d40a9e9fb2e
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/lib/settings_base.py
|
petercpg/addons-server
|
892e442ee61fe2592f19969357ed5d40a9e9fb2e
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/lib/settings_base.py
|
petercpg/addons-server
|
892e442ee61fe2592f19969357ed5d40a9e9fb2e
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Django settings for addons-server project.
import environ
import json
import logging
import os
import socket
from datetime import datetime
import raven
from kombu import Queue
import olympia.core.logger
env = environ.Env()
ENVIRON_SETTINGS_FILE_PATH = '/etc/olympia/settings.env'
if os.path.exists(ENVIRON_SETTINGS_FILE_PATH):
env.read_env(env_file=ENVIRON_SETTINGS_FILE_PATH)
ALLOWED_HOSTS = [
'.allizom.org',
'.mozilla.org',
'.mozilla.com',
'.mozilla.net',
'.mozaws.net',
]
# jingo-minify settings
CACHEBUST_IMGS = True
try:
# If we have build ids available, we'll grab them here and add them to our
# CACHE_KEY_PREFIX. This will let us not have to flush memcache during
# updates and it will let us preload data into it before a production push.
from build import BUILD_ID_CSS, BUILD_ID_JS
build_id = "%s%s" % (BUILD_ID_CSS[:2], BUILD_ID_JS[:2])
except ImportError:
build_id = ""
# jingo-minify: Style sheet media attribute default
CSS_MEDIA_DEFAULT = 'all'
# Make filepaths relative to the root of olympia.
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
ROOT = os.path.join(BASE_DIR, '..', '..')
def path(*folders):
return os.path.join(ROOT, *folders)
DEBUG = False
DEBUG_TOOLBAR_CONFIG = {
# Deactivate django debug toolbar by default.
'SHOW_TOOLBAR_CALLBACK': lambda request: DEBUG,
}
# Ensure that exceptions aren't re-raised.
DEBUG_PROPAGATE_EXCEPTIONS = False
SILENCED_SYSTEM_CHECKS = (
# Recommendation to use OneToOneField instead of ForeignKey(unique=True)
# but our translations are the way they are...
'fields.W342',
)
# LESS CSS OPTIONS (Debug only).
LESS_PREPROCESS = True # Compile LESS with Node, rather than client-side JS?
LESS_LIVE_REFRESH = False # Refresh the CSS on save?
LESS_BIN = env(
'LESS_BIN', default='node_modules/less/bin/lessc')
# Path to cleancss (our CSS minifier).
CLEANCSS_BIN = env(
'CLEANCSS_BIN', default='node_modules/less/bin/lessc')
# Path to uglifyjs (our JS minifier).
# Set as None to use YUI instead (at your risk).
UGLIFY_BIN = env(
'UGLIFY_BIN', default='node_modules/uglify-js/bin/uglifyjs')
# rsvg-convert is used to save our svg static theme previews to png
RSVG_CONVERT_BIN = env('RSVG_CONVERT_BIN', default='rsvg-convert')
# Path to pngcrush (to optimize the PNGs uploaded by developers).
PNGCRUSH_BIN = env('PNGCRUSH_BIN', default='pngcrush')
# Path to our addons-linter binary
ADDONS_LINTER_BIN = env(
'ADDONS_LINTER_BIN',
default='node_modules/addons-linter/bin/addons-linter')
DELETION_EMAIL = 'amo-notifications+deletion@mozilla.org'
THEMES_EMAIL = 'theme-reviews@mozilla.org'
DRF_API_VERSIONS = ['auth', 'v3', 'v4', 'v5']
DRF_API_REGEX = r'^/?api/(?:auth|v3|v4|v5)/'
# Add Access-Control-Allow-Origin: * header for the new API with
# django-cors-headers.
CORS_ORIGIN_ALLOW_ALL = True
# Exclude the `accounts/session` endpoint, see:
# https://github.com/mozilla/addons-server/issues/11100
CORS_URLS_REGEX = r'{}(?!accounts/session/)'.format(DRF_API_REGEX)
def get_db_config(environ_var, atomic_requests=True):
values = env.db(
var=environ_var,
default='mysql://root:@localhost/olympia')
values.update({
# Run all views in a transaction unless they are decorated not to.
# `atomic_requests` should be `False` for database replicas where no
# write operations will ever happen.
'ATOMIC_REQUESTS': atomic_requests,
# Pool our database connections up for 300 seconds
'CONN_MAX_AGE': 300,
'ENGINE': 'olympia.core.db.mysql',
'OPTIONS': {
'charset': 'utf8mb4',
'sql_mode': 'STRICT_ALL_TABLES',
'isolation_level': 'read committed'
},
'TEST': {
'CHARSET': 'utf8mb4',
'COLLATION': 'utf8mb4_general_ci'
},
})
return values
DATABASES = {
'default': get_db_config('DATABASES_DEFAULT_URL'),
}
# A database to be used by the services scripts, which does not use Django.
# Please note that this is not a full Django database connection
# so the amount of values supported are limited. By default we are using
# the same connection as 'default' but that changes in prod/dev/stage.
SERVICES_DATABASE = get_db_config('DATABASES_DEFAULT_URL')
DATABASE_ROUTERS = ('multidb.PinningReplicaRouter',)
# Put the aliases for your slave databases in this list.
REPLICA_DATABASES = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-US'
# Accepted locales / languages.
from olympia.core.languages import LANGUAGE_MAPPING # noqa
AMO_LANGUAGES = LANGUAGE_MAPPING.keys()
# Bidirectional languages.
# Locales in here *must* be in `AMO_LANGUAGES` too.
LANGUAGES_BIDI = ('ar', 'fa', 'he', 'ur')
# Explicit conversion of a shorter language code into a more specific one.
SHORTER_LANGUAGES = {
'en': 'en-US', 'ga': 'ga-IE', 'pt': 'pt-PT', 'sv': 'sv-SE', 'zh': 'zh-CN'
}
# Override Django's built-in with our native names
LANGUAGES = {
locale.lower(): value['native']
for locale, value in LANGUAGE_MAPPING.items()}
LANGUAGE_URL_MAP = {
locale.lower(): locale
for locale in AMO_LANGUAGES}
LOCALE_PATHS = (
path('locale'),
)
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# The host currently running the site. Only use this in code for good reason;
# the site is designed to run on a cluster and should continue to support that
HOSTNAME = socket.gethostname()
# The front end domain of the site. If you're not running on a cluster this
# might be the same as HOSTNAME but don't depend on that. Use this when you
# need the real domain.
DOMAIN = HOSTNAME
# Full base URL for your main site including protocol. No trailing slash.
# Example: https://addons.mozilla.org
SITE_URL = 'http://%s' % DOMAIN
# The base URL for the external user-facing frontend. Only really useful for
# the internal admin instances of addons-server that don't run addons-frontend.
EXTERNAL_SITE_URL = env('EXTERNAL_SITE_URL', default=SITE_URL)
# Domain of the services site. This is where your API, and in-product pages
# live.
SERVICES_DOMAIN = 'services.%s' % DOMAIN
# Full URL to your API service. No trailing slash.
# Example: https://services.addons.mozilla.org
SERVICES_URL = 'http://%s' % SERVICES_DOMAIN
# URL of the code-manager site, see:
# https://github.com/mozilla/addons-code-manager
CODE_MANAGER_URL = 'https://code.{}'.format(DOMAIN)
# Filter IP addresses of allowed clients that can post email through the API.
ALLOWED_CLIENTS_EMAIL_API = env.list('ALLOWED_CLIENTS_EMAIL_API', default=[])
# Auth token required to authorize inbound email.
INBOUND_EMAIL_SECRET_KEY = env('INBOUND_EMAIL_SECRET_KEY', default='')
# Validation key we need to send in POST response.
INBOUND_EMAIL_VALIDATION_KEY = env('INBOUND_EMAIL_VALIDATION_KEY', default='')
# Domain emails should be sent to.
INBOUND_EMAIL_DOMAIN = env('INBOUND_EMAIL_DOMAIN', default=DOMAIN)
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/user-media/'
# Tarballs in DUMPED_APPS_PATH deleted 30 days after they have been written.
DUMPED_APPS_DAYS_DELETE = 3600 * 24 * 30
# Tarballs in DUMPED_USERS_PATH deleted 30 days after they have been written.
DUMPED_USERS_DAYS_DELETE = 3600 * 24 * 30
# path that isn't just one /, and doesn't require any locale or app.
SUPPORTED_NONAPPS_NONLOCALES_REGEX = DRF_API_REGEX
# paths that don't require an app prefix
# This needs to be kept in sync with addons-frontend's
# validClientAppUrlExceptions
# https://github.com/mozilla/addons-frontend/blob/master/config/default-amo.js
SUPPORTED_NONAPPS = (
'about', 'admin', 'apps', 'contribute.json',
'developer_agreement', 'developers', 'editors',
'review_guide', 'google1f3e37b7351799a5.html',
'google231a41e803e464e9.html', 'reviewers', 'robots.txt', 'statistics',
'services', 'static', 'user-media', '__version__',
)
DEFAULT_APP = 'firefox'
# paths that don't require a locale prefix
# This needs to be kept in sync with addons-frontend's validLocaleUrlExceptions
# https://github.com/mozilla/addons-frontend/blob/master/config/default-amo.js
SUPPORTED_NONLOCALES = (
'contribute.json', 'google1f3e37b7351799a5.html',
'google231a41e803e464e9.html', 'robots.txt', 'services', 'downloads',
'static', 'user-media', '__version__',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = env(
'SECRET_KEY',
default='this-is-a-dummy-key-and-its-overridden-for-prod-servers')
# Templates configuration.
# List of path patterns for which we should be using Django Template Language.
JINJA_EXCLUDE_TEMPLATE_PATHS = (
# All emails should be processed with Django for consistency.
r'^.*\/emails\/',
# ^admin\/ covers most django admin templates, since their path should
# follow /admin/<app>/<model>/*
r'^admin\/',
# Exception for admin stuff that doesn't follow admin/<app> convention.
r'^addons\/admin\/',
# Blocklist is also admin-only, but with custom paths & templates.
r'^blocklist\/',
# Third-party apps + django.
r'debug_toolbar',
r'^rangefilter\/',
r'^registration\/',
)
TEMPLATES = [
{
'BACKEND': 'django_jinja.backend.Jinja2',
'NAME': 'jinja2',
'APP_DIRS': True,
'DIRS': (
path('media', 'docs'),
path('src/olympia/templates'),
),
'OPTIONS': {
# http://jinja.pocoo.org/docs/dev/extensions/#newstyle-gettext
'newstyle_gettext': True,
# Match our regular .html and .txt file endings except
# for the admin and a handful of other paths
'match_extension': None,
'match_regex': r'^(?!({paths})).*'.format(
paths='|'.join(JINJA_EXCLUDE_TEMPLATE_PATHS)),
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
'olympia.amo.context_processors.app',
'olympia.amo.context_processors.i18n',
'olympia.amo.context_processors.global_settings',
'olympia.amo.context_processors.static_url',
'olympia.lib.jingo_minify_helpers.build_ids',
),
'extensions': (
'jinja2.ext.autoescape',
'jinja2.ext.do',
'jinja2.ext.loopcontrols',
'jinja2.ext.with_',
'django_jinja.builtins.extensions.CsrfExtension',
'django_jinja.builtins.extensions.DjangoFiltersExtension',
'django_jinja.builtins.extensions.StaticFilesExtension',
'django_jinja.builtins.extensions.TimezoneExtension',
'django_jinja.builtins.extensions.UrlsExtension',
'puente.ext.i18n',
'waffle.jinja.WaffleExtension',
),
'finalize': lambda x: x if x is not None else '',
'translation_engine': 'django.utils.translation',
'autoescape': True,
'trim_blocks': True,
}
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
),
}
},
]
X_FRAME_OPTIONS = 'DENY'
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_HSTS_SECONDS = 31536000
# Prefer using `X-Forwarded-Port` header instead of `Port` header.
# We are behind both, the ELB and nginx which forwards requests to our
# uwsgi app.
# Our current request flow is this:
# Request -> ELB (terminates SSL) -> Nginx -> Uwsgi -> addons-server
#
# The ELB terminates SSL and properly sets `X-Forwarded-Port` header
# as well as `X-Forwarded-Proto` and others.
# Nginx on the other hand runs on port 81 and thus sets `Port` to be
# 81 but to make CSRF detection and other mechanisms work properly
# we need to know that we're running on either port 80 or 443, or do something
# with SECURE_PROXY_SSL_HEADER but we shouldn't if we can avoid that.
# So, let's simply grab the properly set `X-Forwarded-Port` header.
# https://github.com/mozilla/addons-server/issues/8835#issuecomment-405340432
#
# This is also backwards compatible for our local setup since Django falls back
# to using `Port` if `X-Forwarded-Port` isn't set.
USE_X_FORWARDED_PORT = True
MIDDLEWARE = (
# Our middleware to make safe requests non-atomic needs to be at the top.
'olympia.amo.middleware.NonAtomicRequestsForSafeHttpMethodsMiddleware',
# Test if it's an API request first so later middlewares don't need to.
'olympia.api.middleware.IdentifyAPIRequestMiddleware',
# Gzip (for API only) middleware needs to be executed after every
# modification to the response, so it's placed at the top of the list.
'olympia.api.middleware.GZipMiddlewareForAPIOnly',
# Statsd and logging come first to get timings etc. Munging REMOTE_ADDR
# must come before middlewares potentially using REMOTE_ADDR, so it's
# also up there.
'django_statsd.middleware.GraphiteRequestTimingMiddleware',
'django_statsd.middleware.GraphiteMiddleware',
'olympia.amo.middleware.SetRemoteAddrFromForwardedFor',
# AMO URL middleware is as high as possible to get locale/app aware URLs.
'olympia.amo.middleware.LocaleAndAppURLMiddleware',
'olympia.amo.middleware.RemoveSlashMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'multidb.middleware.PinningRouterMiddleware',
'waffle.middleware.WaffleMiddleware',
# CSP and CORS need to come before CommonMiddleware because they might
# need to add headers to 304 responses returned by CommonMiddleware.
'csp.middleware.CSPMiddleware',
'corsheaders.middleware.CorsMiddleware',
# Enable conditional processing, e.g ETags.
'django.middleware.http.ConditionalGetMiddleware',
'olympia.amo.middleware.CommonMiddleware',
'olympia.amo.middleware.NoVarySessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'olympia.amo.middleware.AuthenticationMiddlewareWithoutAPI',
# Our middleware that adds additional information for the user
# and API about our read-only status.
'olympia.amo.middleware.ReadOnlyMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
# This should come after AuthenticationMiddlewareWithoutAPI (to get the
# current user) and after SetRemoteAddrFromForwardedFor (to get the correct
# IP).
'olympia.access.middleware.UserAndAddrMiddleware',
'olympia.amo.middleware.ScrubRequestOnException',
'olympia.amo.middleware.RequestIdMiddleware',
)
# Auth
AUTH_USER_MODEL = 'users.UserProfile'
# Override this in the site settings.
ROOT_URLCONF = 'olympia.urls'
INSTALLED_APPS = (
# The translations app *must* be the very first. This isn't necessarily
# relevant for daily business but very important for running initial
# migrations during our tests and local setup.
# Foreign keys to the `translations` table point to `id` which isn't
# unique on it's own but has a (id, locale) unique_together index.
# If `translations` would come after `olympia.addons` for example
# Django tries to first, create the table translations, then create the
# addons table, then adds the foreign key and only after that adds the
# unique_together index to `translations`. MySQL needs that index to be
# created first though, otherwise you'll run into
# `ERROR 1215 (HY000): Cannot add foreign key constraint` errors.
'olympia.translations',
'olympia.core',
'olympia.amo', # amo comes first so it always takes precedence.
'olympia.abuse',
'olympia.access',
'olympia.accounts',
'olympia.activity',
'olympia.addons',
'olympia.api',
'olympia.applications',
'olympia.bandwagon',
'olympia.blocklist',
'olympia.browse',
'olympia.devhub',
'olympia.discovery',
'olympia.files',
'olympia.git',
'olympia.hero',
'olympia.lib.es',
'olympia.lib.akismet',
'olympia.pages',
'olympia.ratings',
'olympia.reviewers',
'olympia.scanners',
'olympia.search',
'olympia.stats',
'olympia.tags',
'olympia.users',
'olympia.versions',
'olympia.yara',
'olympia.zadmin',
# Third party apps
'csp',
'aesfield',
'django_extensions',
'raven.contrib.django',
'rest_framework',
'waffle',
'django_jinja',
'puente',
'rangefilter',
# Django contrib apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
# Has to load after auth
'django_statsd',
)
# These need to point to prod, because that's where the database lives. You can
# change it locally to test the extraction process, but be careful not to
# accidentally nuke translations when doing that!
DISCOVERY_EDITORIAL_CONTENT_API = (
'https://addons.mozilla.org/api/v4/discovery/editorial/')
SECONDARY_HERO_EDITORIAL_CONTENT_API = (
'https://addons.mozilla.org/api/v4/hero/secondary/?all=true')
# Filename where the strings will be stored. Used in puente config below.
EDITORIAL_CONTENT_FILENAME = 'src/olympia/discovery/strings.jinja2'
# Tells the extract script what files to look for l10n in and what function
# handles the extraction. The puente library expects this.
PUENTE = {
'BASE_DIR': ROOT,
# Tells the extract script what files to look for l10n in and what function
# handles the extraction.
'DOMAIN_METHODS': {
'django': [
('src/olympia/**.py', 'python'),
# Extract the generated file containing editorial content for all
# disco pane recommendations using jinja2 parser. It's not a real
# template, but it uses jinja2 syntax for convenience, hence why
# it's not in templates/ with a .html extension.
(EDITORIAL_CONTENT_FILENAME, 'jinja2'),
# Make sure we're parsing django-admin & email templates with the
# django template extractor. This should match the behavior of
# JINJA_EXCLUDE_TEMPLATE_PATHS
(
'src/olympia/**/templates/**/emails/**.*',
'django_babel.extract.extract_django'
),
(
'**/templates/admin/**.html',
'django_babel.extract.extract_django'
),
(
'**/templates/addons/admin/**.html',
'django_babel.extract.extract_django'
),
(
'**/templates/blocklist/**.html',
'django_babel.extract.extract_django'
),
('src/olympia/**/templates/**.html', 'jinja2'),
],
'djangojs': [
# We can't say **.js because that would dive into mochikit
# and timeplot and all the other baggage we're carrying.
# Timeplot, in particular, crashes the extractor with bad
# unicode data.
('static/js/**-all.js', 'ignore'),
('static/js/**-min.js', 'ignore'),
('static/js/*.js', 'javascript'),
('static/js/amo2009/**.js', 'javascript'),
('static/js/common/**.js', 'javascript'),
('static/js/impala/**.js', 'javascript'),
('static/js/zamboni/**.js', 'javascript'),
],
},
}
# Bundles is a dictionary of two dictionaries, css and js, which list css files
# and js files that can be bundled together by the minify app.
MINIFY_BUNDLES = {
'css': {
'restyle/css': (
'css/restyle/restyle.less',
),
# CSS files our DevHub (currently only required for the
# new landing page)
'devhub/new-landing/css': (
'css/devhub/new-landing/base.less',
),
# Responsive error page styling.
'errors/css': (
'css/errors/base.less',
),
# CSS files common to the entire site.
'zamboni/css': (
'css/legacy/main.css',
'css/legacy/main-mozilla.css',
'css/legacy/jquery-lightbox.css',
'css/zamboni/zamboni.css',
'css/zamboni/tags.css',
'css/zamboni/tabs.css',
'css/impala/buttons.less',
'css/impala/formset.less',
'css/impala/suggestions.less',
'css/impala/header.less',
'css/impala/moz-tab.css',
'css/impala/footer.less',
'css/impala/faux-zamboni.less',
),
'zamboni/impala': (
'css/impala/base.css',
'css/legacy/jquery-lightbox.css',
'css/impala/site.less',
'css/impala/typography.less',
'css/impala/forms.less',
'css/common/invisible-upload.less',
'css/impala/header.less',
'css/impala/footer.less',
'css/impala/moz-tab.css',
'css/impala/hovercards.less',
'css/impala/toplist.less',
'css/impala/carousel.less',
'css/impala/ratings.less',
'css/impala/buttons.less',
'css/impala/promos.less',
'css/impala/addon_details.less',
'css/impala/policy.less',
'css/impala/expando.less',
'css/impala/popups.less',
'css/impala/l10n.less',
'css/impala/lightbox.less',
'css/impala/prose.less',
'css/impala/abuse.less',
'css/impala/paginator.less',
'css/impala/listing.less',
'css/impala/versions.less',
'css/impala/users.less',
'css/impala/tooltips.less',
'css/impala/search.less',
'css/impala/suggestions.less',
'css/node_lib/jquery.minicolors.css',
'css/impala/login.less',
'css/impala/dictionaries.less',
'css/impala/apps.less',
'css/impala/formset.less',
'css/impala/tables.less',
'css/impala/compat.less',
),
'zamboni/stats': (
'css/impala/stats.less',
),
'zamboni/discovery-pane': (
'css/impala/promos.less',
'css/legacy/jquery-lightbox.css',
),
'zamboni/devhub': (
'css/impala/tooltips.less',
'css/zamboni/developers.css',
'css/zamboni/docs.less',
'css/impala/developers.less',
'css/devhub/listing.less',
'css/devhub/popups.less',
'css/devhub/compat.less',
'css/impala/formset.less',
'css/devhub/forms.less',
'css/common/invisible-upload.less',
'css/devhub/submission.less',
'css/devhub/refunds.less',
'css/devhub/buttons.less',
'css/devhub/in-app-config.less',
'css/devhub/static-theme.less',
'css/node_lib/jquery.minicolors.css',
),
'zamboni/devhub_impala': (
'css/impala/developers.less',
'css/devhub/listing.less',
'css/devhub/popups.less',
'css/devhub/compat.less',
'css/devhub/dashboard.less',
'css/devhub/forms.less',
'css/common/invisible-upload.less',
'css/devhub/submission.less',
'css/devhub/search.less',
'css/devhub/refunds.less',
'css/impala/devhub-api.less',
),
'zamboni/reviewers': (
'css/zamboni/reviewers.less',
'css/zamboni/unlisted.less',
),
'zamboni/themes_review': (
'css/zamboni/developers.css',
'css/zamboni/reviewers.less',
'css/zamboni/themes_review.less',
),
'zamboni/files': (
'css/lib/syntaxhighlighter/shCoreDefault.css',
'css/zamboni/files.css',
),
'zamboni/admin': (
'css/zamboni/admin-django.css',
'css/zamboni/admin-mozilla.css',
'css/zamboni/admin_features.css',
),
},
'js': {
# JS files common to the entire site, apart from dev-landing.
'common': (
'js/node_lib/underscore.js',
'js/zamboni/browser.js',
'js/amo2009/addons.js',
'js/zamboni/init.js',
'js/impala/capabilities.js',
'js/lib/format.js',
'js/node_lib/jquery.cookie.js',
'js/zamboni/storage.js',
'js/zamboni/buttons.js',
'js/zamboni/tabs.js',
'js/common/keys.js',
# jQuery UI
'js/node_lib/ui/version.js',
'js/node_lib/ui/data.js',
'js/node_lib/ui/disable-selection.js',
'js/node_lib/ui/ie.js',
'js/node_lib/ui/keycode.js',
'js/node_lib/ui/escape-selector.js',
'js/node_lib/ui/labels.js',
'js/node_lib/ui/jquery-1-7.js',
'js/node_lib/ui/plugin.js',
'js/node_lib/ui/safe-active-element.js',
'js/node_lib/ui/safe-blur.js',
'js/node_lib/ui/scroll-parent.js',
'js/node_lib/ui/focusable.js',
'js/node_lib/ui/tabbable.js',
'js/node_lib/ui/unique-id.js',
'js/node_lib/ui/position.js',
'js/node_lib/ui/widget.js',
'js/node_lib/ui/menu.js',
'js/node_lib/ui/mouse.js',
'js/node_lib/ui/autocomplete.js',
'js/node_lib/ui/datepicker.js',
'js/node_lib/ui/sortable.js',
'js/zamboni/helpers.js',
'js/common/banners.js',
'js/zamboni/global.js',
'js/amo2009/global.js',
'js/common/ratingwidget.js',
'js/node_lib/jqModal.js',
'js/zamboni/l10n.js',
'js/zamboni/debouncer.js',
# Homepage
'js/zamboni/homepage.js',
# Add-ons details page
'js/lib/ui.lightbox.js',
'js/zamboni/addon_details.js',
'js/impala/abuse.js',
'js/zamboni/ratings.js',
'js/lib/jquery.hoverIntent.js',
# Unicode letters for our makeslug function
'js/zamboni/unicode.js',
# Users
'js/zamboni/users.js',
# Search suggestions
'js/impala/forms.js',
'js/impala/ajaxcache.js',
'js/impala/suggestions.js',
'js/impala/site_suggestions.js',
),
# Impala and Legacy: Things to be loaded at the top of the page
'preload': (
'js/node_lib/jquery.js',
'js/node_lib/jquery.browser.js',
'js/impala/preloaded.js',
'js/zamboni/analytics.js',
),
# Impala: Things to be loaded at the bottom
'impala': (
'js/lib/ngettext-overload.js',
'js/node_lib/underscore.js',
'js/impala/carousel.js',
'js/zamboni/browser.js',
'js/amo2009/addons.js',
'js/zamboni/init.js',
'js/impala/capabilities.js',
'js/lib/format.js',
'js/node_lib/jquery.cookie.js',
'js/zamboni/storage.js',
'js/zamboni/buttons.js',
'js/node_lib/jquery.pjax.js',
# jquery.pjax.js is missing a semicolon at the end which breaks
# our wonderful minification process... so add one.
'js/lib/semicolon.js', # It's just a semicolon!
'js/impala/footer.js',
'js/common/keys.js',
# jQuery UI
'js/node_lib/ui/version.js',
'js/node_lib/ui/data.js',
'js/node_lib/ui/disable-selection.js',
'js/node_lib/ui/ie.js',
'js/node_lib/ui/keycode.js',
'js/node_lib/ui/escape-selector.js',
'js/node_lib/ui/labels.js',
'js/node_lib/ui/jquery-1-7.js',
'js/node_lib/ui/plugin.js',
'js/node_lib/ui/safe-active-element.js',
'js/node_lib/ui/safe-blur.js',
'js/node_lib/ui/scroll-parent.js',
'js/node_lib/ui/focusable.js',
'js/node_lib/ui/tabbable.js',
'js/node_lib/ui/unique-id.js',
'js/node_lib/ui/position.js',
'js/node_lib/ui/widget.js',
'js/node_lib/ui/mouse.js',
'js/node_lib/ui/menu.js',
'js/node_lib/ui/autocomplete.js',
'js/node_lib/ui/datepicker.js',
'js/node_lib/ui/sortable.js',
'js/lib/truncate.js',
'js/zamboni/truncation.js',
'js/impala/ajaxcache.js',
'js/zamboni/helpers.js',
'js/common/banners.js',
'js/zamboni/global.js',
'js/impala/global.js',
'js/common/ratingwidget.js',
'js/node_lib/jqModal.js',
'js/zamboni/l10n.js',
'js/impala/forms.js',
# Homepage
'js/impala/homepage.js',
# Add-ons details page
'js/lib/ui.lightbox.js',
'js/impala/addon_details.js',
'js/impala/abuse.js',
'js/impala/ratings.js',
# Browse listing pages
'js/impala/listing.js',
'js/lib/jquery.hoverIntent.js',
'js/common/upload-image.js',
'js/node_lib/jquery.minicolors.js',
# Unicode letters for our makeslug function
'js/zamboni/unicode.js',
# Users
'js/zamboni/users.js',
'js/impala/users.js',
# Search
'js/impala/serializers.js',
'js/impala/search.js',
'js/impala/suggestions.js',
'js/impala/site_suggestions.js',
# Login
'js/impala/login.js',
),
'zamboni/discovery': (
'js/node_lib/jquery.js',
'js/node_lib/jquery.browser.js',
'js/node_lib/underscore.js',
'js/zamboni/browser.js',
'js/zamboni/init.js',
'js/impala/capabilities.js',
'js/lib/format.js',
'js/impala/carousel.js',
'js/zamboni/analytics.js',
# Add-ons details
'js/node_lib/jquery.cookie.js',
'js/zamboni/storage.js',
'js/zamboni/buttons.js',
'js/lib/ui.lightbox.js',
'js/lib/jquery.hoverIntent.js',
'js/zamboni/debouncer.js',
'js/lib/truncate.js',
'js/zamboni/truncation.js',
),
'zamboni/devhub': (
'js/lib/truncate.js',
'js/zamboni/truncation.js',
'js/common/upload-base.js',
'js/common/upload-addon.js',
'js/common/upload-image.js',
'js/impala/formset.js',
'js/zamboni/devhub.js',
'js/zamboni/validator.js',
'js/node_lib/jquery.timeago.js',
'js/zamboni/static_theme.js',
'js/node_lib/jquery.minicolors.js',
'js/node_lib/jszip.js',
),
'devhub/new-landing/js': (
'js/common/lang_switcher.js',
'js/lib/basket-client.js',
),
'zamboni/reviewers': (
'js/lib/highcharts.src.js',
'js/lib/jquery.hoverIntent.js', # Used by jquery.zoomBox.
'js/lib/jquery.zoomBox.js', # Used by themes_review.
'js/zamboni/reviewers.js',
'js/zamboni/themes_review_templates.js',
'js/zamboni/themes_review.js',
),
'zamboni/files': (
'js/lib/diff_match_patch_uncompressed.js',
'js/lib/syntaxhighlighter/shCore.js',
'js/lib/syntaxhighlighter/shLegacy.js',
'js/lib/syntaxhighlighter/shBrushCss.js',
'js/lib/syntaxhighlighter/shBrushJava.js',
'js/lib/syntaxhighlighter/shBrushJScript.js',
'js/lib/syntaxhighlighter/shBrushPlain.js',
'js/lib/syntaxhighlighter/shBrushXml.js',
'js/zamboni/storage.js',
'js/zamboni/files_templates.js',
'js/zamboni/files.js',
),
'zamboni/stats': (
'js/lib/highcharts.src.js',
'js/impala/stats/csv_keys.js',
'js/impala/stats/helpers.js',
'js/impala/stats/dateutils.js',
'js/impala/stats/manager.js',
'js/impala/stats/controls.js',
'js/impala/stats/overview.js',
'js/impala/stats/topchart.js',
'js/impala/stats/chart.js',
'js/impala/stats/table.js',
'js/impala/stats/stats.js',
),
'zamboni/admin': (
'js/zamboni/admin.js',
'js/zamboni/admin_features.js',
'js/zamboni/admin_validation.js',
),
# This is included when DEBUG is True. Bundle in <head>.
'debug': (
'js/debug/less_setup.js',
'js/node_lib/less.js',
'js/debug/less_live.js',
),
}
}
# Prefix for cache keys (will prevent collisions when running parallel copies)
# This value is being used by `conf/settings/{dev,stage,prod}.py
CACHE_KEY_PREFIX = 'amo:%s:' % build_id
CACHE_MIDDLEWARE_KEY_PREFIX = CACHE_KEY_PREFIX
FETCH_BY_ID = True
# Number of seconds a count() query should be cached. Keep it short because
# it's not possible to invalidate these queries.
CACHE_COUNT_TIMEOUT = 60
# To enable pylibmc compression (in bytes)
PYLIBMC_MIN_COMPRESS_LEN = 0 # disabled
# External tools.
JAVA_BIN = '/usr/bin/java'
# File paths
ADDON_ICONS_DEFAULT_PATH = os.path.join(ROOT, 'static', 'img', 'addon-icons')
# URL paths
# paths for images, e.g. mozcdn.com/amo or '/static'
VAMO_URL = 'https://versioncheck.addons.mozilla.org'
# Outgoing URL bouncer
REDIRECT_URL = 'https://outgoing.prod.mozaws.net/v1/'
REDIRECT_SECRET_KEY = env('REDIRECT_SECRET_KEY', default='')
# Allow URLs from these servers. Use full domain names.
REDIRECT_URL_ALLOW_LIST = ['addons.mozilla.org']
# Default to short expiration; check "remember me" to override
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
# See: https://github.com/mozilla/addons-server/issues/1789
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# This value must be kept in sync with authTokenValidFor from addons-frontend:
# https://github.com/mozilla/addons-frontend/blob/2f480b474fe13a676237fe76a1b2a057e4a2aac7/config/default-amo.js#L111
SESSION_COOKIE_AGE = 2592000 # 30 days
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_DOMAIN = ".%s" % DOMAIN # bug 608797
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
# These should have app+locale at the start to avoid redirects
LOGIN_URL = '/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/'
# When logging in with browser ID, a username is created automatically.
# In the case of duplicates, the process is recursive up to this number
# of times.
MAX_GEN_USERNAME_TRIES = 50
# Email settings
ADDONS_EMAIL = "Mozilla Add-ons <nobody@mozilla.org>"
DEFAULT_FROM_EMAIL = ADDONS_EMAIL
# Email goes to the console by default. s/console/smtp/ for regular delivery
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Please use all lowercase for the QA allow list.
EMAIL_QA_ALLOW_LIST = env.list('EMAIL_QA_ALLOW_LIST', default=())
# Please use all lowercase for the deny_list.
EMAIL_DENY_LIST = env.list('EMAIL_DENY_LIST', default=('nobody@mozilla.org',))
# URL for Add-on Validation FAQ.
VALIDATION_FAQ_URL = ('https://wiki.mozilla.org/Add-ons/Reviewers/Guide/'
'AddonReviews#Step_2:_Automatic_validation')
SHIELD_STUDIES_SUPPORT_URL = 'https://support.mozilla.org/kb/shield'
# Celery
CELERY_BROKER_URL = env(
'CELERY_BROKER_URL',
default=os.environ.get(
'CELERY_BROKER_URL', 'amqp://olympia:olympia@localhost:5672/olympia'))
CELERY_BROKER_CONNECTION_TIMEOUT = 0.1
CELERY_BROKER_HEARTBEAT = 60 * 15
CELERY_TASK_DEFAULT_QUEUE = 'default'
CELERY_RESULT_BACKEND = env(
'CELERY_RESULT_BACKEND',
default=os.environ.get(
'CELERY_RESULT_BACKEND', 'redis://localhost:6379/1'))
CELERY_TASK_IGNORE_RESULT = True
CELERY_SEND_TASK_ERROR_EMAILS = True
CELERY_WORKER_HIJACK_ROOT_LOGGER = False
# Testing responsiveness without rate limits.
CELERY_WORKER_DISABLE_RATE_LIMITS = True
# Only serialize celery tasks using JSON.
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
# When testing, we always want tasks to raise exceptions. Good for sanity.
CELERY_TASK_EAGER_PROPAGATES = True
# Time in seconds before celery.exceptions.SoftTimeLimitExceeded is raised.
# The task can catch that and recover but should exit ASAP. Note that there is
# a separate, shorter timeout for validation tasks.
CELERY_TASK_SOFT_TIME_LIMIT = 60 * 30
# List of modules that contain tasks and that wouldn't be autodiscovered by
# celery. Typically, it's either `tasks` modules from something not in
# INSTALLED_APPS, or modules not called `tasks`.
CELERY_IMPORTS = (
'olympia.lib.crypto.tasks',
'olympia.lib.es.management.commands.reindex',
'olympia.stats.management.commands.index_stats',
)
CELERY_TASK_QUEUES = (
Queue('addons', routing_key='addons'),
Queue('amo', routing_key='amo'),
Queue('bandwagon', routing_key='bandwagon'),
Queue('cron', routing_key='cron'),
Queue('crypto', routing_key='crypto'),
Queue('default', routing_key='default'),
Queue('devhub', routing_key='devhub'),
Queue('images', routing_key='images'),
Queue('priority', routing_key='priority'),
Queue('ratings', routing_key='ratings'),
Queue('reviewers', routing_key='reviewers'),
Queue('search', routing_key='search'),
Queue('stats', routing_key='stats'),
Queue('tags', routing_key='tags'),
Queue('users', routing_key='users'),
Queue('zadmin', routing_key='zadmin'),
)
# We have separate celeryds for processing devhub & images as fast as possible
# Some notes:
# - always add routes here instead of @task(queue=<name>)
# - when adding a queue, be sure to update deploy.py so that it gets restarted
CELERY_TASK_ROUTES = {
# Priority.
# If your tasks need to be run as soon as possible, add them here so they
# are routed to the priority queue.
'olympia.addons.tasks.index_addons': {'queue': 'priority'},
'olympia.addons.tasks.unindex_addons': {'queue': 'priority'},
'olympia.blocklist.tasks.process_blocklistsubmission': {
'queue': 'priority'
},
'olympia.blocklist.tasks.import_block_from_blocklist': {
'queue': 'priority'
},
'olympia.blocklist.tasks.upload_filter_to_kinto': {
'queue': 'priority'
},
'olympia.versions.tasks.generate_static_theme_preview': {
'queue': 'priority'
},
# Other queues we prioritize below.
# 'Default' queue.
'celery.accumulate': {'queue': 'default'},
'celery.backend_cleanup': {'queue': 'default'},
'celery.chain': {'queue': 'default'},
'celery.chord': {'queue': 'default'},
'celery.chunks': {'queue': 'default'},
'celery.group': {'queue': 'default'},
'celery.map': {'queue': 'default'},
'celery.starmap': {'queue': 'default'},
# AMO Devhub.
'olympia.devhub.tasks.check_for_api_keys_in_file': {'queue': 'devhub'},
'olympia.devhub.tasks.create_initial_validation_results': {
'queue': 'devhub'
},
'olympia.devhub.tasks.forward_linter_results': {'queue': 'devhub'},
'olympia.devhub.tasks.get_preview_sizes': {'queue': 'devhub'},
'olympia.devhub.tasks.handle_file_validation_result': {'queue': 'devhub'},
'olympia.devhub.tasks.handle_upload_validation_result': {
'queue': 'devhub'
},
'olympia.devhub.tasks.revoke_api_key': {'queue': 'devhub'},
'olympia.devhub.tasks.send_welcome_email': {'queue': 'devhub'},
'olympia.devhub.tasks.submit_file': {'queue': 'devhub'},
'olympia.devhub.tasks.validate_file': {'queue': 'devhub'},
'olympia.devhub.tasks.validate_upload': {'queue': 'devhub'},
'olympia.files.tasks.repack_fileupload': {'queue': 'devhub'},
'olympia.scanners.tasks.run_customs': {'queue': 'devhub'},
'olympia.scanners.tasks.run_wat': {'queue': 'devhub'},
'olympia.scanners.tasks.run_yara': {'queue': 'devhub'},
'olympia.scanners.tasks.call_mad_api': {'queue': 'devhub'},
# Activity (goes to devhub queue).
'olympia.activity.tasks.process_email': {'queue': 'devhub'},
# This is currently used only by validation tasks.
# This puts the chord_unlock task on the devhub queue. Which means anything
# that uses chord() or group() must also be running in this queue or must
# be on a worker that listens to the same queue.
'celery.chord_unlock': {'queue': 'devhub'},
# Images.
'olympia.users.tasks.resize_photo': {'queue': 'images'},
'olympia.devhub.tasks.recreate_previews': {'queue': 'images'},
'olympia.devhub.tasks.resize_icon': {'queue': 'images'},
'olympia.devhub.tasks.resize_preview': {'queue': 'images'},
# AMO
'olympia.amo.tasks.delete_anonymous_collections': {'queue': 'amo'},
'olympia.amo.tasks.delete_logs': {'queue': 'amo'},
'olympia.amo.tasks.send_email': {'queue': 'amo'},
'olympia.amo.tasks.set_modified_on_object': {'queue': 'amo'},
'olympia.amo.tasks.sync_object_to_basket': {'queue': 'amo'},
# Addons
'olympia.addons.tasks.add_dynamic_theme_tag': {'queue': 'addons'},
'olympia.addons.tasks.delete_addons': {'queue': 'addons'},
'olympia.addons.tasks.delete_preview_files': {'queue': 'addons'},
'olympia.addons.tasks.migrate_webextensions_to_git_storage': {
'queue': 'addons'
},
'olympia.addons.tasks.version_changed': {'queue': 'addons'},
'olympia.files.tasks.extract_webext_permissions': {'queue': 'addons'},
'olympia.files.tasks.hide_disabled_files': {'queue': 'addons'},
'olympia.versions.tasks.delete_preview_files': {'queue': 'addons'},
'olympia.versions.tasks.extract_version_to_git': {'queue': 'addons'},
'olympia.versions.tasks.extract_version_source_to_git': {
'queue': 'addons'
},
'olympia.git.tasks.extract_versions_to_git': {'queue': 'addons'},
'olympia.git.tasks.on_extraction_error': {'queue': 'addons'},
'olympia.git.tasks.remove_git_extraction_entry': {'queue': 'addons'},
# Additional image processing tasks that aren't as important go in the
# addons queue to leave the 'devhub' queue free to process validations etc.
'olympia.addons.tasks.extract_colors_from_static_themes': {
'queue': 'addons'
},
'olympia.devhub.tasks.pngcrush_existing_preview': {'queue': 'addons'},
'olympia.devhub.tasks.pngcrush_existing_icons': {'queue': 'addons'},
'olympia.addons.tasks.recreate_theme_previews': {'queue': 'addons'},
# Crons
'olympia.addons.tasks.update_addon_average_daily_users': {'queue': 'cron'},
'olympia.addons.tasks.update_addon_download_totals': {'queue': 'cron'},
'olympia.addons.tasks.update_appsupport': {'queue': 'cron'},
# Bandwagon
'olympia.bandwagon.tasks.collection_meta': {'queue': 'bandwagon'},
# Reviewers
'olympia.reviewers.tasks.recalculate_post_review_weight': {
'queue': 'reviewers'
},
# Crypto
'olympia.lib.crypto.tasks.sign_addons': {'queue': 'crypto'},
# Search
'olympia.lib.es.management.commands.reindex.create_new_index': {
'queue': 'search'
},
'olympia.lib.es.management.commands.reindex.delete_indexes': {
'queue': 'search'
},
'olympia.lib.es.management.commands.reindex.flag_database': {
'queue': 'search'
},
'olympia.lib.es.management.commands.reindex.unflag_database': {
'queue': 'search'
},
'olympia.lib.es.management.commands.reindex.update_aliases': {
'queue': 'search'
},
'olympia.addons.tasks.find_inconsistencies_between_es_and_db': {
'queue': 'search'
},
# Ratings
'olympia.ratings.tasks.addon_bayesian_rating': {'queue': 'ratings'},
'olympia.ratings.tasks.addon_rating_aggregates': {'queue': 'ratings'},
'olympia.ratings.tasks.update_denorm': {'queue': 'ratings'},
# Stats
'olympia.stats.tasks.index_download_counts': {'queue': 'stats'},
'olympia.stats.tasks.index_update_counts': {'queue': 'stats'},
# Tags
'olympia.tags.tasks.update_all_tag_stats': {'queue': 'tags'},
'olympia.tags.tasks.update_tag_stat': {'queue': 'tags'},
# Users
'olympia.accounts.tasks.primary_email_change_event': {'queue': 'users'},
'olympia.users.tasks.delete_photo': {'queue': 'users'},
'olympia.users.tasks.update_user_ratings_task': {'queue': 'users'},
# Zadmin
'olympia.scanners.tasks.run_yara_query_rule': {'queue': 'zadmin'},
'olympia.scanners.tasks.run_yara_query_rule_on_versions_chunk': {
'queue': 'zadmin'
},
'olympia.scanners.tasks.mark_yara_query_rule_as_completed_or_aborted': {
'queue': 'zadmin'
},
'olympia.zadmin.tasks.celery_error': {'queue': 'zadmin'},
}
# See PEP 391 for formatting help.
LOGGING = {
'version': 1,
'filters': {},
'formatters': {
'json': {
'()': olympia.core.logger.JsonFormatter,
'logger_name': 'http_app_addons'
},
},
'handlers': {
'mozlog': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'json'
},
'null': {
'class': 'logging.NullHandler',
},
'statsd': {
'level': 'ERROR',
'class': 'django_statsd.loggers.errors.StatsdHandler',
},
},
'root': {'handlers': ['mozlog'], 'level': logging.DEBUG},
'loggers': {
'amo': {
'handlers': ['mozlog'],
'level': logging.DEBUG,
'propagate': False
},
'amqplib': {
'handlers': ['null'],
'level': logging.DEBUG,
'propagate': False
},
'caching': {
'handlers': ['mozlog'],
'level': logging.ERROR,
'propagate': False
},
'caching.invalidation': {
'handlers': ['null'],
'level': logging.DEBUG,
'propagate': False
},
'django': {
'handlers': ['statsd'],
'level': logging.ERROR,
'propagate': True,
},
# Django CSRF related warnings
'django.security.csrf': {
'handlers': ['mozlog'],
'level': logging.WARNING,
'propagate': True
},
'elasticsearch': {
'handlers': ['null'],
'level': logging.DEBUG,
'propagate': False,
},
'filtercascade': {
'handlers': ['mozlog'],
# Ignore INFO or DEBUG from filtercascade, it logs too much.
'level': logging.WARNING,
'propagate': False,
},
'mohawk.util': {
'handlers': ['mozlog'],
# Ignore INFO or DEBUG from mohawk.util, it logs too much.
'level': logging.WARNING,
'propagate': False,
},
'newrelic': {
'handlers': ['mozlog'],
'level': logging.WARNING,
'propagate': False,
},
'parso': {
'handlers': ['null'],
'level': logging.DEBUG,
'propagate': False
},
'post_request_task': {
'handlers': ['mozlog'],
# Ignore INFO or DEBUG from post-request-task, it logs too much.
'level': logging.WARNING,
'propagate': False,
},
'rdflib': {
'handlers': ['null'],
'level': logging.DEBUG,
'propagate': False,
},
'request.summary': {
'handlers': ['mozlog'],
'level': logging.DEBUG,
'propagate': False
},
's.client': {
'handlers': ['mozlog'],
'level': logging.INFO,
'propagate': False
},
'z': {
'handlers': ['mozlog'],
'level': logging.DEBUG,
'propagate': False
},
'z.celery': {
'handlers': ['statsd'],
'level': logging.ERROR,
'propagate': True,
},
'z.es': {
'handlers': ['mozlog'],
'level': logging.INFO,
'propagate': False
},
'z.pool': {
'handlers': ['mozlog'],
'level': logging.ERROR,
'propagate': False
},
'z.task': {
'handlers': ['mozlog'],
'level': logging.DEBUG,
'propagate': False
}
},
}
# CSP Settings
PROD_CDN_HOST = 'https://addons.cdn.mozilla.net'
ANALYTICS_HOST = 'https://ssl.google-analytics.com'
CSP_REPORT_URI = '/__cspreport__'
CSP_REPORT_ONLY = False
CSP_EXCLUDE_URL_PREFIXES = ()
# NOTE: CSP_DEFAULT_SRC MUST be set otherwise things not set
# will default to being open to anything.
CSP_DEFAULT_SRC = (
"'self'",
)
CSP_BASE_URI = (
"'self'",
# Required for the legacy discovery pane.
'https://addons.mozilla.org',
)
CSP_CONNECT_SRC = (
"'self'",
'https://sentry.prod.mozaws.net',
PROD_CDN_HOST,
)
CSP_FORM_ACTION = (
"'self'",
'https://developer.mozilla.org',
)
CSP_FONT_SRC = (
"'self'",
PROD_CDN_HOST,
)
CSP_CHILD_SRC = (
"'self'",
'https://www.google.com/recaptcha/',
'https://www.recaptcha.net/recaptcha/',
)
CSP_FRAME_SRC = CSP_CHILD_SRC
CSP_IMG_SRC = (
"'self'",
'data:', # Used in inlined mobile css.
'blob:', # Needed for image uploads.
ANALYTICS_HOST,
PROD_CDN_HOST,
'https://static.addons.mozilla.net', # CDN origin server.
'https://sentry.prod.mozaws.net',
)
CSP_MEDIA_SRC = (
'https://videos.cdn.mozilla.net',
)
CSP_OBJECT_SRC = ("'none'",)
CSP_SCRIPT_SRC = (
'https://ssl.google-analytics.com/ga.js',
'https://www.google.com/recaptcha/',
'https://www.recaptcha.net/recaptcha/',
'https://www.gstatic.com/recaptcha/',
'https://www.gstatic.cn/recaptcha/',
PROD_CDN_HOST,
)
CSP_STYLE_SRC = (
"'self'",
"'unsafe-inline'",
PROD_CDN_HOST,
)
RESTRICTED_DOWNLOAD_CSP = {
'DEFAULT_SRC': "'none'",
'BASE_URI': "'none'",
'FORM_ACTION': "'none'",
'OBJECT_SRC': "'none'",
'FRAME_ANCESTORS': "'none'",
'REPORT_URI': CSP_REPORT_URI
}
# Should robots.txt deny everything or disallow a calculated list of URLs we
# don't want to be crawled? Default is true, allow everything, toggled to
# False on -dev and stage.
# Also see http://www.google.com/support/webmasters/bin/answer.py?answer=93710
ENGAGE_ROBOTS = True
# Read-only mode setup.
READ_ONLY = env.bool('READ_ONLY', default=False)
# Turn on read-only mode in local_settings.py by putting this line
# at the VERY BOTTOM: read_only_mode(globals())
def read_only_mode(env):
env['READ_ONLY'] = True
# Replace the default (master) db with a slave connection.
if not env.get('REPLICA_DATABASES'):
raise Exception('We need at least one slave database.')
slave = env['REPLICA_DATABASES'][0]
env['DATABASES']['default'] = env['DATABASES'][slave]
# No sessions without the database, so disable auth.
env['AUTHENTICATION_BACKENDS'] = ('olympia.users.backends.NoAuthForYou',)
# Uploaded file limits
MAX_ICON_UPLOAD_SIZE = 4 * 1024 * 1024
MAX_IMAGE_UPLOAD_SIZE = 4 * 1024 * 1024
MAX_VIDEO_UPLOAD_SIZE = 4 * 1024 * 1024
MAX_PHOTO_UPLOAD_SIZE = MAX_ICON_UPLOAD_SIZE
MAX_STATICTHEME_SIZE = 7 * 1024 * 1024
MAX_ZIP_UNCOMPRESSED_SIZE = 200 * 1024 * 1024
# File uploads should have -rw-r--r-- permissions in order to be served by
# nginx later one. The 0o prefix is intentional, this is an octal value.
FILE_UPLOAD_PERMISSIONS = 0o644
# RECAPTCHA: overload the following key settings in local_settings.py
# with your keys.
# Old recaptcha V1
RECAPTCHA_PUBLIC_KEY = env('RECAPTCHA_PUBLIC_KEY', default='')
RECAPTCHA_PRIVATE_KEY = env('RECAPTCHA_PRIVATE_KEY', default='')
# New Recaptcha V2
NOBOT_RECAPTCHA_PUBLIC_KEY = env('NOBOT_RECAPTCHA_PUBLIC_KEY', default='')
NOBOT_RECAPTCHA_PRIVATE_KEY = env('NOBOT_RECAPTCHA_PRIVATE_KEY', default='')
# Send Django signals asynchronously on a background thread.
ASYNC_SIGNALS = True
# Number of seconds before celery tasks will abort addon validation:
VALIDATOR_TIMEOUT = 360
# Max number of warnings/errors to show from validator. Set to None for no
# limit.
VALIDATOR_MESSAGE_LIMIT = 500
# Feature flags
UNLINK_SITE_STATS = True
# See: https://www.nginx.com/resources/wiki/start/topics/examples/xsendfile/
XSENDFILE_HEADER = 'X-Accel-Redirect'
MOBILE_COOKIE = 'mamo'
# Path to `ps`.
PS_BIN = '/bin/ps'
# The maximum file size that is shown inside the file viewer.
FILE_VIEWER_SIZE_LIMIT = 1048576
# The maximum file size that you can have inside a zip file.
FILE_UNZIP_SIZE_LIMIT = 104857600
# How long to delay tasks relying on file system to cope with NFS lag.
NFS_LAG_DELAY = 3
# Elasticsearch
ES_HOSTS = [os.environ.get('ELASTICSEARCH_LOCATION', '127.0.0.1:9200')]
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = {
'default': 'addons',
'stats': 'addons_stats',
}
ES_TIMEOUT = 30
ES_DEFAULT_NUM_REPLICAS = 2
ES_DEFAULT_NUM_SHARDS = 5
# Maximum result position. ES defaults to 10000 but we'd like more to make sure
# all our extensions can be found if searching without a query and
# paginating through all results.
# NOTE: This setting is being set during reindex, if this needs changing
# we need to trigger a reindex. It's also hard-coded in amo/pagination.py
# and there's a test verifying it's value is 25000 in amo/test_pagination.py
ES_MAX_RESULT_WINDOW = 25000
# Default AMO user id to use for tasks.
TASK_USER_ID = 4757633
# Special collection that some contributors can modify.
COLLECTION_FEATURED_THEMES_ID = 2143965
# If this is False, tasks and other jobs that send non-critical emails should
# use a fake email backend.
SEND_REAL_EMAIL = False
STATSD_HOST = env('STATSD_HOST', default='localhost')
STATSD_PREFIX = env('STATSD_PREFIX', default='amo')
STATSD_PORT = 8125
# The django statsd client to use, see django-statsd for more.
STATSD_CLIENT = 'django_statsd.clients.normal'
GRAPHITE_HOST = env('GRAPHITE_HOST', default='localhost')
GRAPHITE_PREFIX = env('GRAPHITE_PREFIX', default='amo')
GRAPHITE_PORT = 2003
GRAPHITE_TIMEOUT = 1
# IP addresses of servers we use as proxies.
KNOWN_PROXIES = []
# Blog URL
DEVELOPER_BLOG_URL = 'http://blog.mozilla.com/addons/feed/'
LOGIN_RATELIMIT_USER = 5
LOGIN_RATELIMIT_ALL_USERS = '15/m'
CSRF_FAILURE_VIEW = 'olympia.amo.views.csrf_failure'
CSRF_USE_SESSIONS = True
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'olympia.amo.utils.LocalFileStorage'
# And how long we'll give the server to respond for monitoring.
# We currently do not have any actual timeouts during the signing-process.
SIGNING_SERVER_MONITORING_TIMEOUT = 10
AUTOGRAPH_CONFIG = {
'server_url': env(
'AUTOGRAPH_SERVER_URL',
default='http://autograph:5500'),
'user_id': env(
'AUTOGRAPH_HAWK_USER_ID',
default='alice'),
'key': env(
'AUTOGRAPH_HAWK_KEY',
default='fs5wgcer9qj819kfptdlp8gm227ewxnzvsuj9ztycsx08hfhzu'),
# This is configurable but we don't expect it to be set to anything else
# but `webextensions-rsa` at this moment because AMO only accepts
# regular add-ons, no system add-ons or extensions for example. These
# are already signed when submitted to AMO.
'signer': env(
'AUTOGRAPH_SIGNER_ID',
default='webextensions-rsa'),
# This signer is only used for add-ons that are recommended.
# The signer uses it's own HAWK auth credentials
'recommendation_signer': env(
'AUTOGRAPH_RECOMMENDATION_SIGNER_ID',
default='webextensions-rsa-with-recommendation'),
'recommendation_signer_user_id': env(
'AUTOGRAPH_RECOMMENDATION_SIGNER_HAWK_USER_ID',
default='bob'),
'recommendation_signer_key': env(
'AUTOGRAPH_RECOMMENDATION_SIGNER_HAWK_KEY',
default='9vh6bhlc10y63ow2k4zke7k0c3l9hpr8mo96p92jmbfqngs9e7d'),
}
# Enable addon signing. Autograph is configured to something reasonable
# when running locally so there aren't many reasons to deactivate that.
ENABLE_ADDON_SIGNING = True
# True when the Django app is running from the test suite.
IN_TEST_SUITE = False
# Temporary flag to work with navigator.mozPay() on devices that don't
# support it natively.
SIMULATE_NAV_PAY = False
# When the dev. agreement gets updated, you need users to re-accept it and the
# config 'last_dev_agreement_change_date' is not set, use this fallback.
# You won't want to do this for minor format changes.
# The tuple is passed through to datetime.date, so please use a valid date
# tuple.
DEV_AGREEMENT_CHANGE_FALLBACK = datetime(2019, 12, 2, 12, 00)
# If you want to allow self-reviews for add-ons/apps, then enable this.
# In production we do not want to allow this.
ALLOW_SELF_REVIEWS = False
# Allow URL style format override. eg. "?format=json"
URL_FORMAT_OVERRIDE = 'format'
# Connection to the hive server.
HIVE_CONNECTION = {
'host': 'peach-gw.peach.metrics.scl3.mozilla.com',
'port': 10000,
'user': 'amo_prod',
'password': '',
'auth_mechanism': 'PLAIN',
}
# CDN Host is blank on local installs, overwritten in dev/stage/prod envs.
# Useful to force some dynamic content to be served from the CDN.
CDN_HOST = ''
# Static
STATIC_ROOT = path('site-static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
path('static'),
)
# Path related settings. In dev/stage/prod `NETAPP_STORAGE_ROOT` environment
# variable will be set and point to our NFS/EFS storage
# Make sure to check overwrites in conftest.py if new settings are added
# or changed.
STORAGE_ROOT = env('NETAPP_STORAGE_ROOT', default=path('storage'))
ADDONS_PATH = os.path.join(STORAGE_ROOT, 'files')
GUARDED_ADDONS_PATH = os.path.join(STORAGE_ROOT, 'guarded-addons')
GIT_FILE_STORAGE_PATH = os.path.join(STORAGE_ROOT, 'git-storage')
MLBF_STORAGE_PATH = os.path.join(STORAGE_ROOT, 'mlbf')
SHARED_STORAGE = os.path.join(STORAGE_ROOT, 'shared_storage')
MEDIA_ROOT = os.path.join(SHARED_STORAGE, 'uploads')
TMP_PATH = os.path.join(SHARED_STORAGE, 'tmp')
# These are key files that must be present on disk to encrypt/decrypt certain
# database fields.
# {'api_key:secret': os.path.join(ROOT, 'path', 'to', 'file.key'),}
AES_KEYS = env.dict('AES_KEYS', default={})
# Time in seconds for how long a JWT auth token created by developers with
# their API key can live. When developers are creating auth tokens they cannot
# set the expiration any longer than this.
MAX_APIKEY_JWT_AUTH_TOKEN_LIFETIME = 5 * 60
# Time in seconds before the email containing the link allowing developers to
# see their api keys the first time they request one is sent. A value of None
# means it's sent instantaneously.
API_KEY_CONFIRMATION_DELAY = None
# django-rest-framework-jwt settings:
JWT_AUTH = {
# Use HMAC using SHA-256 hash algorithm. It should be the default, but we
# want to make sure it does not change behind our backs.
# See https://github.com/jpadilla/pyjwt/blob/master/docs/algorithms.rst
'JWT_ALGORITHM': 'HS256',
# This adds some padding to timestamp validation in case client/server
# clocks are off.
'JWT_LEEWAY': 5,
# We don't allow refreshes.
'JWT_ALLOW_REFRESH': False,
}
DRF_API_GATES = {
'auth': (),
'v3': (
'ratings-rating-shim',
'ratings-title-shim',
'l10n_flat_input_output',
'collections-downloads-shim',
'addons-locale_disambiguation-shim',
'del-addons-created-field',
'del-accounts-fxa-edit-email-url',
'del-version-license-is-custom',
'del-ratings-flags',
'activity-user-shim',
'autocomplete-sort-param',
'is-source-public-shim',
'is-featured-addon-shim',
),
'v4': (
'l10n_flat_input_output',
'addons-search-_score-field',
'ratings-can_reply',
'ratings-score-filter',
),
'v5': (
'addons-search-_score-field',
'ratings-can_reply',
'ratings-score-filter',
),
}
# Change this to deactivate API throttling for views using a throttling class
# depending on the one defined in olympia.api.throttling.
API_THROTTLING = True
REST_FRAMEWORK = {
# Set this because the default is to also include:
# 'rest_framework.renderers.BrowsableAPIRenderer'
# Which it will try to use if the client accepts text/html.
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'olympia.api.authentication.WebTokenAuthentication',
),
# Set parser classes to include the fix for
# https://github.com/tomchristie/django-rest-framework/issues/3951
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'olympia.api.parsers.MultiPartParser',
),
'ALLOWED_VERSIONS': DRF_API_VERSIONS,
'DEFAULT_VERSION': 'v4',
'DEFAULT_VERSIONING_CLASS': (
'rest_framework.versioning.NamespaceVersioning'),
# Add our custom exception handler, that wraps all exceptions into
# Responses and not just the ones that are api-related.
'EXCEPTION_HANDLER': 'olympia.api.exceptions.custom_exception_handler',
# Enable pagination
'PAGE_SIZE': 25,
# Use our pagination class by default, which allows clients to request a
# different page size.
'DEFAULT_PAGINATION_CLASS': (
'olympia.api.pagination.CustomPageNumberPagination'),
# Use json by default when using APIClient.
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
# Use http://ecma-international.org/ecma-262/5.1/#sec-15.9.1.15
# We can't use the default because we don't use django timezone support.
'DATETIME_FORMAT': '%Y-%m-%dT%H:%M:%SZ',
# Set our default ordering parameter
'ORDERING_PARAM': 'sort',
}
def get_raven_release():
version_json = os.path.join(ROOT, 'version.json')
version = None
if os.path.exists(version_json):
try:
with open(version_json, 'r') as fobj:
contents = fobj.read()
data = json.loads(contents)
version = data.get('version') or data.get('commit')
except (IOError, KeyError):
version = None
if not version or version == 'origin/master':
try:
version = raven.fetch_git_sha(ROOT)
except raven.exceptions.InvalidGitRepository:
version = None
return version
# This is the DSN to the Sentry service.
RAVEN_CONFIG = {
'dsn': env('SENTRY_DSN', default=os.environ.get('SENTRY_DSN')),
# Automatically configure the release based on git information.
# This uses our `version.json` file if possible or tries to fetch
# the current git-sha.
'release': get_raven_release(),
}
# Automatically do 'from olympia import amo' when running shell_plus.
SHELL_PLUS_POST_IMPORTS = (
('olympia', 'amo'),
)
FXA_CONTENT_HOST = 'https://accounts.firefox.com'
FXA_OAUTH_HOST = 'https://oauth.accounts.firefox.com/v1'
FXA_PROFILE_HOST = 'https://profile.accounts.firefox.com/v1'
DEFAULT_FXA_CONFIG_NAME = 'default'
ALLOWED_FXA_CONFIGS = ['default']
# List all jobs that should be callable with cron here.
# syntax is: job_and_method_name: full.package.path
CRON_JOBS = {
'update_addon_average_daily_users': 'olympia.addons.cron',
'update_addon_download_totals': 'olympia.addons.cron',
'addon_last_updated': 'olympia.addons.cron',
'update_addon_appsupport': 'olympia.addons.cron',
'hide_disabled_files': 'olympia.addons.cron',
'unhide_disabled_files': 'olympia.addons.cron',
'deliver_hotness': 'olympia.addons.cron',
'gc': 'olympia.amo.cron',
'category_totals': 'olympia.amo.cron',
'weekly_downloads': 'olympia.amo.cron',
'upload_mlbf_to_kinto': 'olympia.blocklist.cron',
'update_blog_posts': 'olympia.devhub.cron',
'cleanup_extracted_file': 'olympia.files.cron',
'cleanup_validation_results': 'olympia.files.cron',
'index_latest_stats': 'olympia.stats.cron',
'update_user_ratings': 'olympia.users.cron',
}
RECOMMENDATION_ENGINE_URL = env(
'RECOMMENDATION_ENGINE_URL',
default='https://taar.dev.mozaws.net/v1/api/recommendations/')
TAAR_LITE_RECOMMENDATION_ENGINE_URL = env(
'TAAR_LITE_RECOMMENDATION_ENGINE_URL',
default=('https://taar.dev.mozaws.net/taarlite/api/v1/'
'addon_recommendations/'))
RECOMMENDATION_ENGINE_TIMEOUT = env.float(
'RECOMMENDATION_ENGINE_TIMEOUT', default=1)
# Reputation service is disabled by default, enabled for dev/stage/prod via
# those 3 env variables.
REPUTATION_SERVICE_URL = env('REPUTATION_SERVICE_URL', default=None)
REPUTATION_SERVICE_TOKEN = env('REPUTATION_SERVICE_TOKEN', default=None)
REPUTATION_SERVICE_TIMEOUT = env.float('REPUTATION_SERVICE_TIMEOUT', default=1)
# This is the queue used for addons-dev, so it'll consume events (i.e. process
# then delete) before you can locally. If you really need to test get ops to
# stop the 'monitor_fxa_sqs` command.
FXA_SQS_AWS_QUEUE_URL = (
'https://sqs.us-east-1.amazonaws.com/927034868273/'
'amo-account-change-dev')
FXA_SQS_AWS_WAIT_TIME = 20 # Seconds.
AWS_STATS_S3_BUCKET = env('AWS_STATS_S3_BUCKET', default=None)
AWS_STATS_S3_PREFIX = env('AWS_STATS_S3_PREFIX', default='amo_stats')
MIGRATED_LWT_UPDATES_ENABLED = True
BASKET_URL = env('BASKET_URL', default='https://basket.allizom.org')
BASKET_API_KEY = env('BASKET_API_KEY', default=None)
# Default is 10, the API usually answers in 0.5 - 1.5 seconds.
BASKET_TIMEOUT = 5
MOZILLA_NEWLETTER_URL = env(
'MOZILLA_NEWSLETTER_URL',
default='https://www.mozilla.org/en-US/newsletter/')
GEOIP_PATH = '/usr/local/share/GeoIP/GeoLite2-Country.mmdb'
EXTENSION_WORKSHOP_URL = env(
'EXTENSION_WORKSHOP_URL',
default='https://extensionworkshop-dev.allizom.org')
# Sectools
SCANNER_TIMEOUT = 60 # seconds
CUSTOMS_API_URL = env('CUSTOMS_API_URL', default=None)
CUSTOMS_API_KEY = env('CUSTOMS_API_KEY', default=None)
WAT_API_URL = env('WAT_API_URL', default=None)
WAT_API_KEY = env('WAT_API_KEY', default=None)
MAD_API_URL = env('MAD_API_URL', default=None)
MAD_API_TIMEOUT = 5 # seconds
# Git(Hub) repository names, e.g., `owner/repo-name`
CUSTOMS_GIT_REPOSITORY = env('CUSTOMS_GIT_REPOSITORY', default=None)
YARA_GIT_REPOSITORY = env('YARA_GIT_REPOSITORY', default=None)
# Addon.average_daily_user count that forces dual sign-off for Blocklist Blocks
DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD = 100_000
KINTO_API_URL = 'https://kinto.dev.mozaws.net/v1/'
# The kinto test server needs accounts and setting up before using.
KINTO_API_IS_TEST_SERVER = False
BLOCKLIST_KINTO_USERNAME = env(
'BLOCKLIST_KINTO_USERNAME', default='amo_dev')
BLOCKLIST_KINTO_PASSWORD = env(
'BLOCKLIST_KINTO_PASSWORD', default='amo_dev_password')
# The path to the current google service account configuration. This is
# being used to query Google BigQuery as part of our stats processing.
# If this is `None` we're going to use service mocks for testing
GOOGLE_APPLICATION_CREDENTIALS = env(
'GOOGLE_APPLICATION_CREDENTIALS', default=None)
| 35.064796
| 117
| 0.649264
|
0b2951ee607b202125498cda2141c0a480a9c02a
| 4,961
|
py
|
Python
|
ask-smapi-model/ask_smapi_model/v1/skill/nlu/evaluations/evaluation_entity.py
|
alexa-labs/alexa-apis-for-python
|
52838be4f57ee1a2479402ea78b1247b56017942
|
[
"Apache-2.0"
] | 90
|
2018-09-19T21:56:42.000Z
|
2022-03-30T11:25:21.000Z
|
ask-smapi-model/ask_smapi_model/v1/skill/nlu/evaluations/evaluation_entity.py
|
ishitaojha/alexa-apis-for-python
|
a68f94b7a0e41f819595d6fe56e800403e8a4194
|
[
"Apache-2.0"
] | 11
|
2018-09-23T12:16:48.000Z
|
2021-06-10T19:49:45.000Z
|
ask-smapi-model/ask_smapi_model/v1/skill/nlu/evaluations/evaluation_entity.py
|
ishitaojha/alexa-apis-for-python
|
a68f94b7a0e41f819595d6fe56e800403e8a4194
|
[
"Apache-2.0"
] | 28
|
2018-09-19T22:30:38.000Z
|
2022-02-22T22:57:07.000Z
|
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union, Any
from datetime import datetime
from ask_smapi_model.v1.skill.nlu.evaluations.evaluation_inputs import EvaluationInputs as EvaluationInputs_14ada97b
from ask_smapi_model.v1.skill.nlu.evaluations.status import Status as Status_1263d15a
class EvaluationEntity(object):
"""
:param start_timestamp:
:type start_timestamp: (optional) datetime
:param end_timestamp:
:type end_timestamp: (optional) datetime
:param status:
:type status: (optional) ask_smapi_model.v1.skill.nlu.evaluations.status.Status
:param error_message: Error message when evaluation job fails
:type error_message: (optional) str
:param inputs:
:type inputs: (optional) ask_smapi_model.v1.skill.nlu.evaluations.evaluation_inputs.EvaluationInputs
"""
deserialized_types = {
'start_timestamp': 'datetime',
'end_timestamp': 'datetime',
'status': 'ask_smapi_model.v1.skill.nlu.evaluations.status.Status',
'error_message': 'str',
'inputs': 'ask_smapi_model.v1.skill.nlu.evaluations.evaluation_inputs.EvaluationInputs'
} # type: Dict
attribute_map = {
'start_timestamp': 'startTimestamp',
'end_timestamp': 'endTimestamp',
'status': 'status',
'error_message': 'errorMessage',
'inputs': 'inputs'
} # type: Dict
supports_multiple_types = False
def __init__(self, start_timestamp=None, end_timestamp=None, status=None, error_message=None, inputs=None):
# type: (Optional[datetime], Optional[datetime], Optional[Status_1263d15a], Optional[str], Optional[EvaluationInputs_14ada97b]) -> None
"""
:param start_timestamp:
:type start_timestamp: (optional) datetime
:param end_timestamp:
:type end_timestamp: (optional) datetime
:param status:
:type status: (optional) ask_smapi_model.v1.skill.nlu.evaluations.status.Status
:param error_message: Error message when evaluation job fails
:type error_message: (optional) str
:param inputs:
:type inputs: (optional) ask_smapi_model.v1.skill.nlu.evaluations.evaluation_inputs.EvaluationInputs
"""
self.__discriminator_value = None # type: str
self.start_timestamp = start_timestamp
self.end_timestamp = end_timestamp
self.status = status
self.error_message = error_message
self.inputs = inputs
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, EvaluationEntity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 36.211679
| 143
| 0.630518
|
343fb02e74393243b96f7c6392c0a286b30e1732
| 3,095
|
py
|
Python
|
keras_cnn.py
|
davidtranit80/keras_cnn
|
0d775fb6cf367f2f20064e711c71d0c797094264
|
[
"Apache-2.0"
] | null | null | null |
keras_cnn.py
|
davidtranit80/keras_cnn
|
0d775fb6cf367f2f20064e711c71d0c797094264
|
[
"Apache-2.0"
] | null | null | null |
keras_cnn.py
|
davidtranit80/keras_cnn
|
0d775fb6cf367f2f20064e711c71d0c797094264
|
[
"Apache-2.0"
] | null | null | null |
import sys
import keras
from keras import backend as K
from keras.callbacks import ModelCheckpoint
from keras.datasets import mnist
from keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D
from keras.models import Sequential
batch_size = 128
num_classes = 10
epochs = 20
# input image dimensions
image_rows, image_cols = 28, 28
# Load and split data into train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, image_rows, image_cols)
x_test = x_test.reshape(x_test.shape[0], 1, image_rows, image_cols)
input_shape = (1, image_rows, image_cols)
else:
x_train = x_train.reshape(x_train.shape[0], image_rows, image_cols, 1)
x_test = x_test.reshape(x_test.shape[0], image_rows, image_cols, 1)
input_shape = (image_rows, image_cols, 1)
def preprocess_data(x_train, y_train, x_test, y_test):
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255.0
x_test /= 255.0
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
return x_train, y_train, x_test, y_test
def create_model():
model = Sequential()
model.add(Conv2D(32, kernel_size=(5, 5),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
return model
def compile_model():
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.adam(),
metrics=['accuracy'])
def train_model(model, x_train, y_train, x_test, y_test):
filepath = "weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test), callbacks=callbacks_list)
def test_model(model, x_test, y_test):
score = model.evaluate(x_test, y_test, verbose=1)
print('Test loss = ', score[0])
print('Test accuracy = ', score[1])
def load_model(model, filepath):
model.load_weights(filepath)
if __name__ == "__main__":
x_train, y_train, x_test, y_test = preprocess_data(x_train, y_train, x_test, y_test)
model = create_model()
compile_model()
if (len(sys.argv) == 1):
train_model(model, x_train, y_train, x_test, y_test)
else:
filepath = sys.argv[1]
load_model(model, filepath)
test_model(model, x_test, y_test)
| 34.010989
| 105
| 0.687561
|
67f1433d1703402d7f758909ef5f9273478b10f8
| 43,317
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_08_01/aio/operations/_public_ip_addresses_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2
|
2019-08-23T21:14:00.000Z
|
2021-09-07T18:32:34.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_08_01/aio/operations/_public_ip_addresses_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 4
|
2019-04-17T17:57:49.000Z
|
2020-04-24T21:11:22.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_08_01/aio/operations/_public_ip_addresses_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PublicIPAddressesOperations:
"""PublicIPAddressesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
public_ip_address_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
public_ip_address_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified public IP address.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the subnet.
:type public_ip_address_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def get(
self,
resource_group_name: str,
public_ip_address_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.PublicIPAddress":
"""Gets the specified public IP address in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the subnet.
:type public_ip_address_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPAddress, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_08_01.models.PublicIPAddress
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "_models.PublicIPAddress",
**kwargs
) -> "_models.PublicIPAddress":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PublicIPAddress')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "_models.PublicIPAddress",
**kwargs
) -> AsyncLROPoller["_models.PublicIPAddress"]:
"""Creates or updates a static or dynamic public IP address.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the public IP address.
:type public_ip_address_name: str
:param parameters: Parameters supplied to the create or update public IP address operation.
:type parameters: ~azure.mgmt.network.v2019_08_01.models.PublicIPAddress
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PublicIPAddress or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_08_01.models.PublicIPAddress]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> "_models.PublicIPAddress":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> AsyncLROPoller["_models.PublicIPAddress"]:
"""Updates public IP address tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the public IP address.
:type public_ip_address_name: str
:param parameters: Parameters supplied to update public IP address tags.
:type parameters: ~azure.mgmt.network.v2019_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PublicIPAddress or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_08_01.models.PublicIPAddress]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["_models.PublicIPAddressListResult"]:
"""Gets all the public IP addresses in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_08_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.PublicIPAddressListResult"]:
"""Gets all public IP addresses in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_08_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses'} # type: ignore
def list_virtual_machine_scale_set_public_ip_addresses(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
**kwargs
) -> AsyncIterable["_models.PublicIPAddressListResult"]:
"""Gets information about all public IP addresses on a virtual machine scale set level.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_08_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_public_ip_addresses.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/publicipaddresses'} # type: ignore
def list_virtual_machine_scale_set_vm_public_ip_addresses(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
ip_configuration_name: str,
**kwargs
) -> AsyncIterable["_models.PublicIPAddressListResult"]:
"""Gets information about all public IP addresses in a virtual machine IP configuration in a
virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The network interface name.
:type network_interface_name: str
:param ip_configuration_name: The IP configuration name.
:type ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_08_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_vm_public_ip_addresses.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_vm_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses'} # type: ignore
async def get_virtual_machine_scale_set_public_ip_address(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
ip_configuration_name: str,
public_ip_address_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.PublicIPAddress":
"""Get the specified public IP address in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the IP configuration.
:type ip_configuration_name: str
:param public_ip_address_name: The name of the public IP Address.
:type public_ip_address_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPAddress, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_08_01.models.PublicIPAddress
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get_virtual_machine_scale_set_public_ip_address.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_public_ip_address.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses/{publicIpAddressName}'} # type: ignore
| 51.445368
| 395
| 0.674585
|
90d96a7d0bda2da4420cec0f693b0606a71eacd2
| 1,268
|
py
|
Python
|
accounts/urls.py
|
asandeep/pseudo-electronics
|
2ce9f03a43c917448bfc340f8011eaf4bac159da
|
[
"MIT"
] | null | null | null |
accounts/urls.py
|
asandeep/pseudo-electronics
|
2ce9f03a43c917448bfc340f8011eaf4bac159da
|
[
"MIT"
] | null | null | null |
accounts/urls.py
|
asandeep/pseudo-electronics
|
2ce9f03a43c917448bfc340f8011eaf4bac159da
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import views as auth_views
from django.contrib.auth.decorators import login_required
from django.urls import path
from accounts import forms, views
app_name = "accounts"
urlpatterns = [
path(
"<int:pk>/",
login_required(views.AccountDetails.as_view()),
name="details",
),
path(
"login/",
auth_views.LoginView.as_view(
template_name="accounts/login.html",
authentication_form=forms.UserAuthenticationForm,
),
name="login",
),
path(
"logout/",
login_required(
auth_views.LogoutView.as_view(template_name="home.html")
),
name="logout",
),
path("list/", login_required(views.ListAccounts.as_view()), name="list"),
path(
"create/", login_required(views.CreateAccount.as_view()), name="create"
),
path(
"update/<int:id>/",
login_required(views.UpdateAccount.as_view()),
name="update",
),
path(
"lock/<int:id>/",
login_required(views.LockAccount.as_view()),
name="lock",
),
path(
"reset-password/<int:id>/",
login_required(views.ResetPassword.as_view()),
name="reset-password",
),
]
| 25.36
| 79
| 0.590694
|
864178dc28b5648ca2bea4bd4854bf13de0f79a6
| 25,168
|
py
|
Python
|
transformers/modeling_ctrl.py
|
Tarpelite/UniNLP
|
176c2a0f88c8054bf69e1f92693d353737367c34
|
[
"MIT"
] | null | null | null |
transformers/modeling_ctrl.py
|
Tarpelite/UniNLP
|
176c2a0f88c8054bf69e1f92693d353737367c34
|
[
"MIT"
] | 3
|
2021-06-02T00:41:41.000Z
|
2022-02-10T01:07:59.000Z
|
transformers/modeling_ctrl.py
|
Tarpelite/UniNLP
|
176c2a0f88c8054bf69e1f92693d353737367c34
|
[
"MIT"
] | 1
|
2020-01-27T03:02:19.000Z
|
2020-01-27T03:02:19.000Z
|
# coding=utf-8
# Copyright 2018 Salesforce and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch CTRL model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import json
import logging
import math
import os
import sys
from io import open
import numpy as np
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .modeling_utils import PreTrainedModel, Conv1D, prune_conv1d_layer, SequenceSummary
from .configuration_ctrl import CTRLConfig
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__)
CTRL_PRETRAINED_MODEL_ARCHIVE_MAP = {"ctrl": "https://storage.googleapis.com/sf-ctrl/pytorch/seqlen256_v1.bin"}
def angle_defn(pos, i, d_model_size):
angle_rates = 1 / torch.pow(10000, (2 * (i//2)) / d_model_size)
return pos * angle_rates
def positional_encoding(position, d_model_size, dtype):
# create the sinusoidal pattern for the positional encoding
angle_rads = (angle_defn(torch.arange(position, dtype=dtype).unsqueeze(1),
torch.arange(d_model_size, dtype=dtype).unsqueeze(0),
d_model_size))
sines = torch.sin(angle_rads[:, 0::2])
cosines = torch.cos(angle_rads[:, 1::2])
pos_encoding = torch.cat([sines, cosines], dim=-1)
return pos_encoding
def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):
# calculate attention
matmul_qk = torch.matmul(q, k.permute(0,1,3,2))
dk = k.shape[-1]
scaled_attention_logits = matmul_qk / np.sqrt(dk)
if mask is not None:
scaled_attention_logits += (mask * -1e4)
if attention_mask is not None:
# Apply the attention mask
scaled_attention_logits = scaled_attention_logits + attention_mask
attention_weights = torch.softmax(scaled_attention_logits, dim=-1)
# Mask heads if we want to
if head_mask is not None:
attention_weights = attention_weights * head_mask
output = torch.matmul(attention_weights, v)
return output, attention_weights
class MultiHeadAttention(torch.nn.Module):
def __init__(self, d_model_size, num_heads, output_attentions=False):
super(MultiHeadAttention, self).__init__()
self.output_attentions = output_attentions
self.num_heads = num_heads
self.d_model_size = d_model_size
self.depth = int(d_model_size / self.num_heads)
self.Wq = torch.nn.Linear(d_model_size, d_model_size)
self.Wk = torch.nn.Linear(d_model_size, d_model_size)
self.Wv = torch.nn.Linear(d_model_size, d_model_size)
self.dense = torch.nn.Linear(d_model_size, d_model_size)
def split_into_heads(self, x, batch_size):
x = x.reshape(batch_size, -1, self.num_heads, self.depth)
return x.permute([0, 2, 1, 3])
def forward(self, v, k, q, mask, layer_past=None, attention_mask=None, head_mask=None):
batch_size = q.shape[0]
q = self.Wq(q)
k = self.Wk(k)
v = self.Wv(v)
q = self.split_into_heads(q, batch_size)
k = self.split_into_heads(k, batch_size)
v = self.split_into_heads(v, batch_size)
if layer_past is not None:
past_key, past_value = layer_past[0], layer_past[1]
k = torch.cat((past_key, k), dim=-2)
v = torch.cat((past_value, v), dim=-2)
present = torch.stack((k, v))
output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask)
scaled_attention = output[0].permute([0, 2, 1, 3])
attn = output[1]
original_size_attention = scaled_attention.reshape(batch_size, -1, self.d_model_size)
output = self.dense(original_size_attention)
outputs = (output, present)
if self.output_attentions:
outputs = outputs + (attn,)
return outputs
def point_wise_feed_forward_network(d_model_size, dff):
return torch.nn.Sequential(torch.nn.Linear(d_model_size, dff),
torch.nn.ReLU(),
torch.nn.Linear(dff, d_model_size))
class EncoderLayer(torch.nn.Module):
def __init__(self, d_model_size, num_heads, dff, rate=0.1, output_attentions=False):
super(EncoderLayer, self).__init__()
self.multi_head_attention = MultiHeadAttention(d_model_size, num_heads, output_attentions)
self.ffn = point_wise_feed_forward_network(d_model_size, dff)
self.layernorm1 = torch.nn.LayerNorm(d_model_size, eps=1e-6)
self.layernorm2 = torch.nn.LayerNorm(d_model_size, eps=1e-6)
self.dropout1 = torch.nn.Dropout(rate)
self.dropout2 = torch.nn.Dropout(rate)
def forward(self, x, mask, layer_past=None, attention_mask=None, head_mask=None):
normed = self.layernorm1(x)
attn_outputs = self.multi_head_attention(normed, normed, normed, mask,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask)
attn_output = attn_outputs[0]
attn_output = self.dropout1(attn_output)
out1 = x + attn_output
out2 = self.layernorm2(out1)
ffn_output = self.ffn(out2)
ffn_output = self.dropout2(ffn_output)
out2 = out1 + ffn_output
outputs = (out2,) + attn_outputs[1:]
return outputs
class CTRLPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = CTRLConfig
pretrained_model_archive_map = CTRL_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "transformer"
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
CTRL_START_DOCSTRING = r""" CTRL model was proposed in
`CTRL: A Conditional Transformer Language Model for Controllable Generation`_
by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
It's a causal (unidirectional) transformer pre-trained using language modeling on a very large
corpus of ~140 GB of text data with the first token reserved as a control code (such as Links, Books, Wikipedia etc.).
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`CTRL: A Conditional Transformer Language Model for Controllable Generation`:
https://www.github.com/salesforce/ctrl
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~transformers.CTRLConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
CTRL_INPUTS_DOCSTRING = r""" Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
CTRL is a model with absolute position embeddings so it's usually advised to pad the inputs on
the right rather than the left.
Indices can be obtained using :class:`transformers.CTRLTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**past**:
list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `past` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
A parallel sequence of tokens (can be used to indicate various portions of the inputs).
The embeddings from these tokens will be summed with the respective token embeddings.
Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``:
Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings("The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.",
CTRL_START_DOCSTRING, CTRL_INPUTS_DOCSTRING)
class CTRLModel(CTRLPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the last layer of the model.
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = CTRLTokenizer.from_pretrained('ctrl')
model = CTRLModel.from_pretrained('ctrl')
input_ids = torch.tensor(tokenizer.encode("Links Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(CTRLModel, self).__init__(config)
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.output_past = config.output_past
self.d_model_size = config.n_embd
self.num_layers = config.n_layer
self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size, torch.float)
self.w = nn.Embedding(config.vocab_size, config.n_embd)
self.dropout = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([EncoderLayer(config.n_embd,
config.n_head,
config.dff,
config.resid_pdrop,
config.output_attentions) for _ in range(config.n_layer)])
self.layernorm = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.init_weights()
def get_input_embeddings(self):
return self.w
def set_input_embeddings(self, new_embeddings):
self.w = new_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
def forward(self, input_ids=None, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
attention_mask = attention_mask.view(-1, input_shape[-1])
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.n_layer
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
token_type_embeds = self.w(token_type_ids)
token_type_embeds *= np.sqrt(self.d_model_size)
else:
token_type_embeds = 0
position_ids = position_ids.view(-1, input_shape[-1])
if inputs_embeds is None:
inputs_embeds = self.w(input_ids)
# inputs_embeds = embedded.unsqueeze(0) if len(input_ids.shape)<2 else embedded
seq_len = input_shape[-1]
mask = torch.triu(torch.ones(seq_len, seq_len), 1).to(inputs_embeds.device)
inputs_embeds *= np.sqrt(self.d_model_size)
pos_embeds = self.pos_encoding[position_ids, :].to(inputs_embeds.device)
hidden_states = inputs_embeds + pos_embeds + token_type_embeds
hidden_states = self.dropout(hidden_states)
output_shape = input_shape + (inputs_embeds.size(-1),)
presents = ()
all_hidden_states = ()
all_attentions = []
for i, (h, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = h(hidden_states,
mask,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i])
hidden_states, present = outputs[:2]
if self.output_past:
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.layernorm(hidden_states)
hidden_states = hidden_states.view(*output_shape)
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_past:
outputs = outputs + (presents,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]
all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs
@add_start_docstrings("""The CTRL Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """, CTRL_START_DOCSTRING, CTRL_INPUTS_DOCSTRING)
class CTRLLMHeadModel(CTRLPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-1`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import torch
from transformers import CTRLTokenizer, CTRLLMHeadModel
tokenizer = CTRLTokenizer.from_pretrained('ctrl')
model = CTRLLMHeadModel.from_pretrained('ctrl')
input_ids = torch.tensor(tokenizer.encode("Links Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=input_ids)
loss, logits = outputs[:2]
"""
def __init__(self, config):
super(CTRLLMHeadModel, self).__init__(config)
self.transformer = CTRLModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=True)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def forward(self, input_ids=None, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None,
labels=None):
transformer_outputs = self.transformer(input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)
| 50.135458
| 147
| 0.641052
|
f0cfc5c428b925a5dd64fc029452e2f7801a3ab2
| 756
|
py
|
Python
|
creator/extract_configs/templates/family_trio_config.py
|
kids-first/kf-api-study-creator
|
93a79b108b6474f9b4135ace06c89ddcf63dd257
|
[
"Apache-2.0"
] | 3
|
2019-05-04T02:07:28.000Z
|
2020-10-16T17:47:44.000Z
|
creator/extract_configs/templates/family_trio_config.py
|
kids-first/kf-api-study-creator
|
93a79b108b6474f9b4135ace06c89ddcf63dd257
|
[
"Apache-2.0"
] | 604
|
2019-02-21T18:14:51.000Z
|
2022-02-10T08:13:54.000Z
|
creator/extract_configs/templates/family_trio_config.py
|
kids-first/kf-api-study-creator
|
93a79b108b6474f9b4135ace06c89ddcf63dd257
|
[
"Apache-2.0"
] | null | null | null |
"""
This is an extract configuration for the Family Trios template
See template definitions here:
https://docs.google.com/spreadsheets/d/1ugcw1Rh3e7vXnc7OWlR4J7bafiBjGnfd4-rEThI-BNI
"""
from kf_lib_data_ingest.common.concept_schema import CONCEPT
from kf_lib_data_ingest.etl.extract.operations import keep_map
source_data_url = "{{ download_url }}"
operations = [
keep_map(in_col="Participant ID", out_col=CONCEPT.PARTICIPANT.ID),
keep_map(
in_col="Mother Participant ID", out_col=CONCEPT.PARTICIPANT.MOTHER_ID
),
keep_map(
in_col="Father Participant ID", out_col=CONCEPT.PARTICIPANT.FATHER_ID
),
keep_map(
in_col="Proband",
out_col=CONCEPT.PARTICIPANT.IS_PROBAND,
optional=True
),
]
| 28
| 83
| 0.73545
|
8ab47b215dd213a094ad1c94dce6a5f882e00bd7
| 695
|
py
|
Python
|
examples/tellurium-files/linearChain.py
|
ShaikAsifullah/distributed-tellurium
|
007e9b3842b614edd34908c001119c6da1d41897
|
[
"Apache-2.0"
] | 1
|
2019-06-19T04:40:33.000Z
|
2019-06-19T04:40:33.000Z
|
examples/tellurium-files/linearChain.py
|
ShaikAsifullah/distributed-tellurium
|
007e9b3842b614edd34908c001119c6da1d41897
|
[
"Apache-2.0"
] | null | null | null |
examples/tellurium-files/linearChain.py
|
ShaikAsifullah/distributed-tellurium
|
007e9b3842b614edd34908c001119c6da1d41897
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Linear chain of reactions.
"""
from __future__ import print_function, division
import tellurium as te
model = '''
model feedback()
// Reactions:
J0: $X0 -> S1; (VM1 * (X0 - S1/Keq1))/(1 + X0 + S1 + S4^h);
J1: S1 -> S2; (10 * S1 - 2 * S2) / (1 + S1 + S2);
J2: S2 -> S3; (10 * S2 - 2 * S3) / (1 + S2 + S3);
J3: S3 -> S4; (10 * S3 - 2 * S4) / (1 + S3 + S4);
J4: S4 -> $X1; (V4 * S4) / (KS4 + S4);
// Species initializations:
S1 = 0; S2 = 0; S3 = 0;
S4 = 0; X0 = 10; X1 = 0;
// Variable initialization:
VM1 = 10; Keq1 = 10; h = 10; V4 = 2.5; KS4 = 0.5;
end'''
r = te.loada(model)
result = r.simulate(0, 40, 500)
r.plotWithLegend(result)
| 24.821429
| 64
| 0.515108
|
1559202112638de2c014e3cfc18e011073f70308
| 816
|
py
|
Python
|
tests/test_transpose.py
|
timwuu/deep-learning-from-scratch-3
|
6f18dee8c1d764e16275ed68f90966bc85f0ae66
|
[
"MIT"
] | 539
|
2019-11-01T04:09:42.000Z
|
2022-03-26T06:25:44.000Z
|
tests/test_transpose.py
|
timwuu/deep-learning-from-scratch-3
|
6f18dee8c1d764e16275ed68f90966bc85f0ae66
|
[
"MIT"
] | 32
|
2019-11-21T07:50:16.000Z
|
2022-01-26T14:01:55.000Z
|
tests/test_transpose.py
|
timwuu/deep-learning-from-scratch-3
|
6f18dee8c1d764e16275ed68f90966bc85f0ae66
|
[
"MIT"
] | 157
|
2019-11-17T22:20:03.000Z
|
2022-03-23T02:50:51.000Z
|
import unittest
import numpy as np
from dezero import Variable
import dezero.functions as F
from dezero.utils import gradient_check
class TestTranspose(unittest.TestCase):
def test_forward1(self):
x = Variable(np.array([[1, 2, 3], [4, 5, 6]]))
y = F.transpose(x)
self.assertEqual(y.shape, (3, 2))
def test_backward1(self):
x = np.array([[1, 2, 3], [4, 5, 6]])
self.assertTrue(gradient_check(F.transpose, x))
def test_backward2(self):
x = np.array([1, 2, 3])
self.assertTrue(gradient_check(F.transpose, x))
def test_backward3(self):
x = np.random.randn(10, 5)
self.assertTrue(gradient_check(F.transpose, x))
def test_backward4(self):
x = np.array([1, 2])
self.assertTrue(gradient_check(F.transpose, x))
| 28.137931
| 55
| 0.631127
|
bb4cba446151fd5d7e90f910de602ebb6a82238a
| 5,221
|
py
|
Python
|
simulator/simple_planning_simulator/launch/simple_planning_simulator.launch.py
|
TakahiroNISHIOKA/autoware.universe
|
67459763bd61a57e044132174dd1c403550c1f00
|
[
"Apache-2.0"
] | null | null | null |
simulator/simple_planning_simulator/launch/simple_planning_simulator.launch.py
|
TakahiroNISHIOKA/autoware.universe
|
67459763bd61a57e044132174dd1c403550c1f00
|
[
"Apache-2.0"
] | 11
|
2022-01-24T10:26:37.000Z
|
2022-03-22T08:19:01.000Z
|
simulator/simple_planning_simulator/launch/simple_planning_simulator.launch.py
|
KeisukeShima/autoware.universe
|
21d5453dfa2bf75716b8737fb7b58f3b45483e29
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The Autoware Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ament_index_python.packages import get_package_share_directory
import launch
from launch.actions import DeclareLaunchArgument
from launch.actions import GroupAction
from launch.actions import IncludeLaunchDescription
from launch.actions import OpaqueFunction
from launch.actions import SetLaunchConfiguration
from launch.conditions import IfCondition
from launch.conditions import UnlessCondition
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import ComposableNodeContainer
from launch_ros.actions import LoadComposableNodes
from launch_ros.actions import Node
from launch_ros.descriptions import ComposableNode
from launch_ros.substitutions import FindPackageShare
import yaml
def launch_setup(context, *args, **kwargs):
# vehicle information param path
vehicle_info_param_path = LaunchConfiguration("vehicle_info_param_file").perform(context)
with open(vehicle_info_param_path, "r") as f:
vehicle_info_param = yaml.safe_load(f)["/**"]["ros__parameters"]
vehicle_characteristics_param_path = LaunchConfiguration("vehicle_characteristics_param_file").perform(context)
with open(vehicle_characteristics_param_path, "r") as f:
vehicle_characteristics_param = yaml.safe_load(f)["/**"]["ros__parameters"]
simulator_model_param_path = LaunchConfiguration("simulator_model_param_file").perform(context)
with open(simulator_model_param_path, "r") as f:
simulator_model_param = yaml.safe_load(f)["/**"]["ros__parameters"]
simple_planning_simulator_node = Node(
package='simple_planning_simulator',
executable='simple_planning_simulator_exe',
name='simple_planning_simulator',
namespace='simulation',
output='screen',
parameters=[
vehicle_info_param,
vehicle_characteristics_param,
simulator_model_param,
{
"initial_engage_state": LaunchConfiguration("initial_engage_state"),
},
],
remappings=[
('input/ackermann_control_command', '/control/command/control_cmd'),
('input/gear_command', '/control/command/gear_cmd'),
('input/turn_indicators_command', '/control/command/turn_indicators_cmd'),
('input/hazard_lights_command', '/control/command/hazard_lights_cmd'),
('input/trajectory', '/planning/scenario_planning/trajectory'),
('input/engage', '/vehicle/engage'),
('output/twist', '/vehicle/status/velocity_status'),
('output/odometry', '/localization/kinematic_state'),
('output/steering', '/vehicle/status/steering_status'),
('output/gear_report', '/vehicle/status/gear_status'),
('output/turn_indicators_report', '/vehicle/status/turn_indicators_status'),
('output/hazard_lights_report', '/vehicle/status/hazard_lights_status'),
('output/control_mode_report', '/vehicle/status/control_mode'),
('/initialpose', '/initialpose'),
]
)
map_to_odom_tf_publisher = Node(
package='tf2_ros',
executable='static_transform_publisher',
name='static_map_to_odom_tf_publisher',
output='screen',
arguments=['0.0', '0.0', '0.0', '0', '0', '0', 'map', 'odom'])
group = GroupAction(
[
simple_planning_simulator_node,
map_to_odom_tf_publisher
]
)
return [group]
def generate_launch_description():
launch_arguments = []
def add_launch_arg(name: str, default_value=None, description=None):
launch_arguments.append(
DeclareLaunchArgument(name, default_value=default_value, description=description)
)
add_launch_arg(
"vehicle_info_param_file",
[
FindPackageShare("vehicle_info_util"),
"/config/vehicle_info.param.yaml",
],
"path to the parameter file of vehicle information",
)
add_launch_arg(
"vehicle_characteristics_param_file",
[
FindPackageShare("simple_planning_simulator"),
"/param/vehicle_characteristics.param.yaml",
],
"path to config file for vehicle characteristics",
)
add_launch_arg(
"simulator_model_param_file",
[
FindPackageShare("simple_planning_simulator"),
"/param/simple_planning_simulator_default.param.yaml",
],
"path to config file for simulator_model",
)
return launch.LaunchDescription(
launch_arguments
+ [OpaqueFunction(function=launch_setup)]
)
| 37.833333
| 115
| 0.694503
|
c2c865151166a0154b63948749c916ff5f056581
| 186
|
py
|
Python
|
src/my_module1.py
|
t2y/jep-samples
|
bdc893cc93fce484305a70a16e780b73fbcb0ee4
|
[
"Apache-2.0"
] | null | null | null |
src/my_module1.py
|
t2y/jep-samples
|
bdc893cc93fce484305a70a16e780b73fbcb0ee4
|
[
"Apache-2.0"
] | null | null | null |
src/my_module1.py
|
t2y/jep-samples
|
bdc893cc93fce484305a70a16e780b73fbcb0ee4
|
[
"Apache-2.0"
] | null | null | null |
from my_module2 import sub
def add(x, y):
"""
>>> add(2, 3)
5
"""
return x + y
def calc(nums):
"""
>>> calc(range(10))
45
"""
return sum(nums)
| 10.941176
| 26
| 0.44086
|
ac401988c25449b6f5e68fc8e290d2d04a5d860e
| 616
|
py
|
Python
|
setup.py
|
kyzi007/firepy
|
d3333db9d14c31d133d5f73a4594d3084255eca1
|
[
"MIT"
] | 1
|
2017-02-27T05:08:23.000Z
|
2017-02-27T05:08:23.000Z
|
setup.py
|
kyzi007/firepy
|
d3333db9d14c31d133d5f73a4594d3084255eca1
|
[
"MIT"
] | null | null | null |
setup.py
|
kyzi007/firepy
|
d3333db9d14c31d133d5f73a4594d3084255eca1
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='firepy',
version='0.1.5',
description='FirePHP for Python',
long_description=('This is a python server library for FirePHP '
'supporting python built-in logging facility '
'and Django.'),
author='Sung-jin Hong',
author_email='serialx@serialx.net',
license='MIT',
url='http://code.google.com/p/firepy/',
download_url='http://code.google.com/p/firepy/downloads/list',
packages=find_packages(),
zip_safe=False,
)
| 34.222222
| 72
| 0.576299
|
71bf84d9bd849548838dd2da9b61928f3c0900c5
| 6,247
|
py
|
Python
|
ctripref_stats_gai.py
|
Fatman13/gta_swarm
|
1c4603f39cd7831f5907fd619594452b3320f75f
|
[
"MIT"
] | null | null | null |
ctripref_stats_gai.py
|
Fatman13/gta_swarm
|
1c4603f39cd7831f5907fd619594452b3320f75f
|
[
"MIT"
] | null | null | null |
ctripref_stats_gai.py
|
Fatman13/gta_swarm
|
1c4603f39cd7831f5907fd619594452b3320f75f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
import pprint
# import csv
import click
# import requests
# import datetime as datetime
# from datetime import date
# from xml.etree import ElementTree as ET
import os
# from random import sample
# import random
# import json
# import logging
import subprocess
import glob
import time
import sys
import datetime
import re
import csv
def getcdate(filename):
return datetime.datetime.fromtimestamp(os.path.getctime(filename)).date()
def find_ent(res, strftime):
for ent in res:
if ent['date'] == strftime:
return ent
return None
CONFIRMED = 'Confirmed or Completed'
@click.command()
@click.option('--days', default=-7, type=int)
# @click.option('--days', default=1, type=int)
def ctripref_stats_gai(days):
# list1 = [ ent for ent in glob.iglob('output_Search_booking_id_*.csv') if from_date <= getcdate(ent)]
# print('List1: ' + str(list1))
# list2 = [ ent for ent in glob.iglob('output_ctrip_update_res_no_*.csv') if from_date <= getcdate(ent)]
# print('List2: ' + str(list2))
# filename2_dict = {}
# for filename2 in list2:
# try:
# filename2_date = re.search('output_ctrip_update_res_no_(\d+)', filename2).group(1)
# except AttributeError:
# filename2_date = ''
# filename2_dict[filename2_date] = filename2
res = []
dates_lookup = []
target_date = datetime.datetime.today().date() + datetime.timedelta(days=days)
for i in range(abs(days)):
entry = {}
# 2017-05-03
entry['date'] = (target_date + datetime.timedelta(days=i)).strftime('%Y-%m-%d')
entry['#_of_bookings'] = 0
entry['#_of_hotel_ref'] = 0
entry['comment'] = ''
dates_lookup.append(entry['date'])
res.append(entry)
with open('output_ctrip_search_booking_store.csv', encoding='utf-8-sig') as csvfile:
reader = csv.DictReader(csvfile)
ids = set()
for row in reader:
if row['gta_api_booking_id'] not in ids:
if row['booking_status'] == CONFIRMED:
if row['booking_departure_date'] in dates_lookup:
ent = find_ent(res, row['booking_departure_date'])
if ent != None:
ent['#_of_bookings'] = ent['#_of_bookings'] + 1
ids.add(row['gta_api_booking_id'])
with open('output_ctrip_booking_store.csv', encoding='utf-8-sig') as csvfile:
reader = csv.DictReader(csvfile)
ids = set()
for row in reader:
if row['gta_api_booking_id'] not in ids:
if row['booking_status'] == CONFIRMED:
if row['booking_departure_date'] in dates_lookup:
ent = find_ent(res, row['booking_departure_date'])
if ent != None:
ent['#_of_hotel_ref'] = ent['#_of_hotel_ref'] + 1
ids.add(row['gta_api_booking_id'])
# for filename1 in list1:
# entry = {}
# try:
# filename1_date = re.search('output_Search_booking_id_(\d+)', filename1).group(1)
# except AttributeError:
# filename1_date = ''
# if filename1_date != '':
# entry['date'] = filename1_date
# entry['booking_file'] = filename1
# try:
# print(filename2_dict[filename1_date])
# except KeyError:
# print('Warning: expected date is not in the dictionary..')
# continue
# entry['ctrip_api_file'] = filename2_dict[filename1_date]
# res.append(entry)
# for ent in res:
# total_booking_num = 0
# ctrip_booking_num = 0
# with open(ent['booking_file'], encoding='utf-8-sig') as csvfile:
# reader = csv.DictReader(csvfile)
# ids = set()
# for row in reader:
# if row['gta_api_booking_id'] not in ids:
# if row['booking_status'] == CONFIRMED:
# total_booking_num = total_booking_num + 1
# ids.add(row['gta_api_booking_id'])
# with open(ent['ctrip_api_file'], encoding='utf-8-sig') as csvfile:
# reader = csv.DictReader(csvfile)
# for row in reader:
# ctrip_booking_num = ctrip_booking_num + 1
for ent in res:
# ent['booking_hotel_ref_percentage'] = '{0:.3f}'.format(float( ctrip_booking_num / total_booking_num ))
if ent['#_of_bookings'] != 0:
ent['coverage'] = '{0:.3f}'.format(float( ent['#_of_hotel_ref'] / ent['#_of_bookings'] ))
target_filename = '_'.join(['output_hotel_ref_stats', datetime.datetime.now().strftime('%y%m%d_%H%M')]) + \
'.csv'
keys = res[0].keys()
with open(target_filename, 'w', newline='', encoding='utf-8') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(res)
# # python booking_id.py --days 0 --duration 0 --client ctrip --d_type departure
# subprocess.call(['python', 'booking_id.py', '--days', str(days), '--duration', str(duration), '--client', 'ctrip', '--d_type', 'departure'])
# for i in range(3):
# print('sleeping..')
# time.sleep(1)
# newest = max(glob.iglob('output_Search_booking_id_*.csv'), key=os.path.getctime)
# subprocess.call(['python', 'search_item_hr.py', '--filename', newest])
# for i in range(3):
# print('sleeping..')
# time.sleep(1)
# newest = max(glob.iglob('output_Search_item_hr_*.csv'), key=os.path.getctime)
# subprocess.call(['python', 'hc.py', '--filename', newest])
# for i in range(3):
# print('sleeping..')
# time.sleep(1)
# # newest = max(glob.iglob('output_Search_item_hr_*.csv'), key=os.path.getctime)
# # subprocess.call(['python', 'sendmail.py', '--filename', 'output_hotel_ref_*.csv', '--title', 'Ctrip_hotel_ref'])
# newest = max(glob.iglob('output_hotel_ref_*.csv'), key=os.path.getctime)
# today_date = datetime.datetime.now().strftime('%y%m%d')
# try:
# newest_date = re.search('output_hotel_ref_(\d+)', newest).group(1)
# except AttributeError:
# newest_date = ''
# if newest_date != today_date:
# print('Error: newest date != today date.. mannual intervention needed..')
# return
# print('newest date: ' + newest_date)
# # while True:
# # sys.stdout.write("Would you like to proceed to call Ctrip's update hotel res no API? " + newest + " [Y/N]")
# # choice = input().lower()
# # if choice == 'y' or choice == 'yes':
# # break
# # if choice == 'n' or choice == 'no':
# # return
# subprocess.call(['python', 'ctrip_update_res_no.py', '--filename', newest])
if __name__ == '__main__':
ctripref_stats_gai()
| 33.406417
| 144
| 0.651353
|
997c4ab5989f7e50be7d9ab75039fa822252cafd
| 45,905
|
py
|
Python
|
tests/asyncio/test_asyncio_server.py
|
SeeringPhil/python-engineio
|
e6fb095e9dfe31b710620801563a3688002b4fe0
|
[
"MIT"
] | null | null | null |
tests/asyncio/test_asyncio_server.py
|
SeeringPhil/python-engineio
|
e6fb095e9dfe31b710620801563a3688002b4fe0
|
[
"MIT"
] | null | null | null |
tests/asyncio/test_asyncio_server.py
|
SeeringPhil/python-engineio
|
e6fb095e9dfe31b710620801563a3688002b4fe0
|
[
"MIT"
] | null | null | null |
import asyncio
import gzip
import json
import logging
import sys
import unittest
import zlib
import six
if six.PY3:
from unittest import mock
else:
import mock
from engineio import asyncio_server
from engineio.async_drivers import aiohttp as async_aiohttp
from engineio import exceptions
from engineio import packet
from engineio import payload
import pytest
def AsyncMock(*args, **kwargs):
"""Return a mock asynchronous function."""
m = mock.MagicMock(*args, **kwargs)
async def mock_coro(*args, **kwargs):
return m(*args, **kwargs)
mock_coro.mock = m
return mock_coro
def _run(coro):
"""Run the given coroutine."""
return asyncio.get_event_loop().run_until_complete(coro)
@unittest.skipIf(sys.version_info < (3, 5), 'only for Python 3.5+')
class TestAsyncServer(unittest.TestCase):
@staticmethod
def get_async_mock(environ={'REQUEST_METHOD': 'GET', 'QUERY_STRING': ''}):
a = mock.MagicMock()
a._async = {
'asyncio': True,
'create_route': mock.MagicMock(),
'translate_request': mock.MagicMock(),
'make_response': mock.MagicMock(),
'websocket': 'w',
}
a._async['translate_request'].return_value = environ
a._async['make_response'].return_value = 'response'
return a
def _get_mock_socket(self):
mock_socket = mock.MagicMock()
mock_socket.connected = False
mock_socket.closed = False
mock_socket.closing = False
mock_socket.upgraded = False
mock_socket.send = AsyncMock()
mock_socket.handle_get_request = AsyncMock()
mock_socket.handle_post_request = AsyncMock()
mock_socket.check_ping_timeout = AsyncMock()
mock_socket.close = AsyncMock()
mock_socket.session = {}
return mock_socket
@classmethod
def setUpClass(cls):
asyncio_server.AsyncServer._default_monitor_clients = False
@classmethod
def tearDownClass(cls):
asyncio_server.AsyncServer._default_monitor_clients = True
def setUp(self):
logging.getLogger('engineio').setLevel(logging.NOTSET)
def tearDown(self):
# restore JSON encoder, in case a test changed it
packet.Packet.json = json
def test_is_asyncio_based(self):
s = asyncio_server.AsyncServer()
assert s.is_asyncio_based()
def test_async_modes(self):
s = asyncio_server.AsyncServer()
assert s.async_modes() == ['aiohttp', 'sanic', 'tornado', 'asgi']
def test_async_mode_aiohttp(self):
s = asyncio_server.AsyncServer(async_mode='aiohttp')
assert s.async_mode == 'aiohttp'
assert s._async['asyncio']
assert s._async['create_route'] == async_aiohttp.create_route
assert s._async['translate_request'] == async_aiohttp.translate_request
assert s._async['make_response'] == async_aiohttp.make_response
assert s._async['websocket'].__name__ == 'WebSocket'
@mock.patch('importlib.import_module')
def test_async_mode_auto_aiohttp(self, import_module):
import_module.side_effect = [self.get_async_mock()]
s = asyncio_server.AsyncServer()
assert s.async_mode == 'aiohttp'
def test_async_modes_wsgi(self):
with pytest.raises(ValueError):
asyncio_server.AsyncServer(async_mode='eventlet')
with pytest.raises(ValueError):
asyncio_server.AsyncServer(async_mode='gevent')
with pytest.raises(ValueError):
asyncio_server.AsyncServer(async_mode='gevent_uwsgi')
with pytest.raises(ValueError):
asyncio_server.AsyncServer(async_mode='threading')
@mock.patch('importlib.import_module')
def test_attach(self, import_module):
a = self.get_async_mock()
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
s.attach('app', engineio_path='abc')
a._async['create_route'].assert_called_with('app', s, '/abc/')
s.attach('app', engineio_path='/def/')
a._async['create_route'].assert_called_with('app', s, '/def/')
s.attach('app', engineio_path='/ghi')
a._async['create_route'].assert_called_with('app', s, '/ghi/')
s.attach('app', engineio_path='jkl/')
a._async['create_route'].assert_called_with('app', s, '/jkl/')
def test_session(self):
s = asyncio_server.AsyncServer()
s.sockets['foo'] = self._get_mock_socket()
async def _func():
async with s.session('foo') as session:
await s.sleep(0)
session['username'] = 'bar'
assert await s.get_session('foo') == {'username': 'bar'}
_run(_func())
def test_disconnect(self):
s = asyncio_server.AsyncServer()
s.sockets['foo'] = mock_socket = self._get_mock_socket()
_run(s.disconnect('foo'))
assert mock_socket.close.mock.call_count == 1
mock_socket.close.mock.assert_called_once_with()
assert 'foo' not in s.sockets
def test_disconnect_all(self):
s = asyncio_server.AsyncServer()
s.sockets['foo'] = mock_foo = self._get_mock_socket()
s.sockets['bar'] = mock_bar = self._get_mock_socket()
_run(s.disconnect())
assert mock_foo.close.mock.call_count == 1
assert mock_bar.close.mock.call_count == 1
mock_foo.close.mock.assert_called_once_with()
mock_bar.close.mock.assert_called_once_with()
assert 'foo' not in s.sockets
assert 'bar' not in s.sockets
@mock.patch('importlib.import_module')
def test_jsonp_not_supported(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'j=abc'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
response = _run(s.handle_request('request'))
assert response == 'response'
a._async['translate_request'].assert_called_once_with('request')
assert a._async['make_response'].call_count == 1
assert a._async['make_response'].call_args[0][0] == '400 BAD REQUEST'
@mock.patch('importlib.import_module')
def test_jsonp_index(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'j=233'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
response = _run(s.handle_request('request'))
assert response == 'response'
a._async['translate_request'].assert_called_once_with('request')
assert a._async['make_response'].call_count == 1
assert a._async['make_response'].call_args[0][0] == '200 OK'
assert (
a._async['make_response']
.call_args[0][2]
.startswith(b'___eio[233]("')
)
assert a._async['make_response'].call_args[0][2].endswith(b'");')
@mock.patch('importlib.import_module')
def test_connect(self, import_module):
a = self.get_async_mock()
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
_run(s.handle_request('request'))
assert len(s.sockets) == 1
assert a._async['make_response'].call_count == 1
assert a._async['make_response'].call_args[0][0] == '200 OK'
assert ('Content-Type', 'application/octet-stream') in a._async[
'make_response'
].call_args[0][1]
packets = payload.Payload(
encoded_payload=a._async['make_response'].call_args[0][2]
).packets
assert len(packets) == 1
assert packets[0].packet_type == packet.OPEN
assert 'upgrades' in packets[0].data
assert packets[0].data['upgrades'] == ['websocket']
assert 'sid' in packets[0].data
@mock.patch('importlib.import_module')
def test_connect_async_request_response_handlers(self, import_module):
a = self.get_async_mock()
a._async['translate_request'] = AsyncMock(
return_value=a._async['translate_request'].return_value
)
a._async['make_response'] = AsyncMock(
return_value=a._async['make_response'].return_value
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
_run(s.handle_request('request'))
assert len(s.sockets) == 1
assert a._async['make_response'].mock.call_count == 1
assert a._async['make_response'].mock.call_args[0][0] == '200 OK'
assert ('Content-Type', 'application/octet-stream') in a._async[
'make_response'
].mock.call_args[0][1]
packets = payload.Payload(
encoded_payload=a._async['make_response'].mock.call_args[0][2]
).packets
assert len(packets) == 1
assert packets[0].packet_type == packet.OPEN
assert 'upgrades' in packets[0].data
assert packets[0].data['upgrades'] == ['websocket']
assert 'sid' in packets[0].data
@mock.patch('importlib.import_module')
def test_connect_no_upgrades(self, import_module):
a = self.get_async_mock()
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(allow_upgrades=False)
_run(s.handle_request('request'))
packets = payload.Payload(
encoded_payload=a._async['make_response'].call_args[0][2]
).packets
assert packets[0].data['upgrades'] == []
@mock.patch('importlib.import_module')
def test_connect_b64_with_1(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'b64=1'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(allow_upgrades=False)
s._generate_id = mock.MagicMock(return_value='1')
_run(s.handle_request('request'))
assert a._async['make_response'].call_count == 1
assert a._async['make_response'].call_args[0][0] == '200 OK'
assert ('Content-Type', 'text/plain; charset=UTF-8') in a._async[
'make_response'
].call_args[0][1]
_run(s.send('1', b'\x00\x01\x02', binary=True))
a._async['translate_request'].return_value = {
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'sid=1&b64=1',
}
_run(s.handle_request('request'))
assert a._async['make_response'].call_args[0][2] == b'6:b4AAEC'
@mock.patch('importlib.import_module')
def test_connect_b64_with_true(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'b64=true'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(allow_upgrades=False)
s._generate_id = mock.MagicMock(return_value='1')
_run(s.handle_request('request'))
assert a._async['make_response'].call_count == 1
assert a._async['make_response'].call_args[0][0] == '200 OK'
assert ('Content-Type', 'text/plain; charset=UTF-8') in a._async[
'make_response'
].call_args[0][1]
_run(s.send('1', b'\x00\x01\x02', binary=True))
a._async['translate_request'].return_value = {
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'sid=1&b64=true',
}
_run(s.handle_request('request'))
assert a._async['make_response'].call_args[0][2] == b'6:b4AAEC'
@mock.patch('importlib.import_module')
def test_connect_b64_with_0(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'b64=0'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(allow_upgrades=False)
s._generate_id = mock.MagicMock(return_value='1')
_run(s.handle_request('request'))
assert a._async['make_response'].call_count == 1
assert a._async['make_response'].call_args[0][0] == '200 OK'
assert ('Content-Type', 'application/octet-stream') in a._async[
'make_response'
].call_args[0][1]
_run(s.send('1', b'\x00\x01\x02', binary=True))
a._async['translate_request'].return_value = {
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'sid=1&b64=0',
}
_run(s.handle_request('request'))
assert (
a._async['make_response'].call_args[0][2]
== b'\x01\x04\xff\x04\x00\x01\x02'
)
@mock.patch('importlib.import_module')
def test_connect_b64_with_false(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'b64=false'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(allow_upgrades=False)
s._generate_id = mock.MagicMock(return_value='1')
_run(s.handle_request('request'))
assert a._async['make_response'].call_count == 1
assert a._async['make_response'].call_args[0][0] == '200 OK'
assert ('Content-Type', 'application/octet-stream') in a._async[
'make_response'
].call_args[0][1]
_run(s.send('1', b'\x00\x01\x02', binary=True))
a._async['translate_request'].return_value = {
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'sid=1&b64=false',
}
_run(s.handle_request('request'))
assert (
a._async['make_response'].call_args[0][2]
== b'\x01\x04\xff\x04\x00\x01\x02'
)
@mock.patch('importlib.import_module')
def test_connect_custom_ping_times(self, import_module):
a = self.get_async_mock()
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(ping_timeout=123, ping_interval=456)
_run(s.handle_request('request'))
packets = payload.Payload(
encoded_payload=a._async['make_response'].call_args[0][2]
).packets
assert packets[0].data['pingTimeout'] == 123000
assert packets[0].data['pingInterval'] == 456000
@mock.patch('engineio.asyncio_socket.AsyncSocket')
@mock.patch('importlib.import_module')
def test_connect_bad_poll(self, import_module, AsyncSocket):
a = self.get_async_mock()
import_module.side_effect = [a]
AsyncSocket.return_value = self._get_mock_socket()
AsyncSocket.return_value.poll.side_effect = [exceptions.QueueEmpty]
s = asyncio_server.AsyncServer()
_run(s.handle_request('request'))
assert a._async['make_response'].call_count == 1
assert a._async['make_response'].call_args[0][0] == '400 BAD REQUEST'
@mock.patch('engineio.asyncio_socket.AsyncSocket')
@mock.patch('importlib.import_module')
def test_connect_transport_websocket(self, import_module, AsyncSocket):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'transport=websocket'}
)
import_module.side_effect = [a]
AsyncSocket.return_value = self._get_mock_socket()
s = asyncio_server.AsyncServer()
s._generate_id = mock.MagicMock(return_value='123')
# force socket to stay open, so that we can check it later
AsyncSocket().closed = False
_run(s.handle_request('request'))
assert (
s.sockets['123'].send.mock.call_args[0][0].packet_type
== packet.OPEN
)
@mock.patch('engineio.asyncio_socket.AsyncSocket')
@mock.patch('importlib.import_module')
def test_connect_transport_websocket_closed(
self, import_module, AsyncSocket
):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'transport=websocket'}
)
import_module.side_effect = [a]
AsyncSocket.return_value = self._get_mock_socket()
s = asyncio_server.AsyncServer()
s._generate_id = mock.MagicMock(return_value='123')
# this mock handler just closes the socket, as it would happen on a
# real websocket exchange
async def mock_handle(environ):
s.sockets['123'].closed = True
AsyncSocket().handle_get_request = mock_handle
_run(s.handle_request('request'))
assert '123' not in s.sockets # socket should close on its own
@mock.patch('importlib.import_module')
def test_connect_transport_invalid(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'transport=foo'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
_run(s.handle_request('request'))
assert a._async['make_response'].call_count == 1
assert a._async['make_response'].call_args[0][0] == '400 BAD REQUEST'
@mock.patch('importlib.import_module')
def test_connect_cors_headers(self, import_module):
a = self.get_async_mock()
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
_run(s.handle_request('request'))
assert a._async['make_response'].call_args[0][0] == '200 OK'
headers = a._async['make_response'].call_args[0][1]
assert ('Access-Control-Allow-Credentials', 'true') in headers
@mock.patch('importlib.import_module')
def test_connect_cors_allowed_origin(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': '', 'HTTP_ORIGIN': 'b'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(cors_allowed_origins=['a', 'b'])
_run(s.handle_request('request'))
assert a._async['make_response'].call_args[0][0] == '200 OK'
headers = a._async['make_response'].call_args[0][1]
assert ('Access-Control-Allow-Origin', 'b') in headers
@mock.patch('importlib.import_module')
def test_connect_cors_not_allowed_origin(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': '', 'HTTP_ORIGIN': 'c'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(cors_allowed_origins=['a', 'b'])
_run(s.handle_request('request'))
assert a._async['make_response'].call_args[0][0] == '400 BAD REQUEST'
headers = a._async['make_response'].call_args[0][1]
assert ('Access-Control-Allow-Origin', 'c') not in headers
assert ('Access-Control-Allow-Origin', '*') not in headers
@mock.patch('importlib.import_module')
def test_connect_cors_not_allowed_origin_async_response(
self, import_module
):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': '', 'HTTP_ORIGIN': 'c'}
)
a._async['make_response'] = AsyncMock(
return_value=a._async['make_response'].return_value
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(cors_allowed_origins=['a', 'b'])
_run(s.handle_request('request'))
assert (
a._async['make_response'].mock.call_args[0][0] == '400 BAD REQUEST'
)
headers = a._async['make_response'].mock.call_args[0][1]
assert ('Access-Control-Allow-Origin', 'c') not in headers
assert ('Access-Control-Allow-Origin', '*') not in headers
@mock.patch('importlib.import_module')
def test_connect_cors_all_origins(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': '', 'HTTP_ORIGIN': 'foo'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(cors_allowed_origins='*')
_run(s.handle_request('request'))
assert a._async['make_response'].call_args[0][0] == '200 OK'
headers = a._async['make_response'].call_args[0][1]
assert ('Access-Control-Allow-Origin', 'foo') in headers
assert ('Access-Control-Allow-Credentials', 'true') in headers
@mock.patch('importlib.import_module')
def test_connect_cors_one_origin(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': '', 'HTTP_ORIGIN': 'a'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(cors_allowed_origins='a')
_run(s.handle_request('request'))
assert a._async['make_response'].call_args[0][0] == '200 OK'
headers = a._async['make_response'].call_args[0][1]
assert ('Access-Control-Allow-Origin', 'a') in headers
assert ('Access-Control-Allow-Credentials', 'true') in headers
@mock.patch('importlib.import_module')
def test_connect_cors_one_origin_not_allowed(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': '', 'HTTP_ORIGIN': 'b'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(cors_allowed_origins='a')
_run(s.handle_request('request'))
assert a._async['make_response'].call_args[0][0] == '400 BAD REQUEST'
headers = a._async['make_response'].call_args[0][1]
assert ('Access-Control-Allow-Origin', 'b') not in headers
assert ('Access-Control-Allow-Origin', '*') not in headers
@mock.patch('importlib.import_module')
def test_connect_cors_headers_default_origin(self, import_module):
a = self.get_async_mock(
{
'REQUEST_METHOD': 'GET',
'QUERY_STRING': '',
'wsgi.url_scheme': 'http',
'HTTP_HOST': 'foo',
'HTTP_ORIGIN': 'http://foo',
}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
_run(s.handle_request('request'))
assert a._async['make_response'].call_args[0][0] == '200 OK'
headers = a._async['make_response'].call_args[0][1]
assert ('Access-Control-Allow-Origin', 'http://foo') in headers
@mock.patch('importlib.import_module')
def test_connect_cors_no_credentials(self, import_module):
a = self.get_async_mock()
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(cors_credentials=False)
_run(s.handle_request('request'))
assert a._async['make_response'].call_args[0][0] == '200 OK'
headers = a._async['make_response'].call_args[0][1]
assert ('Access-Control-Allow-Credentials', 'true') not in headers
@mock.patch('importlib.import_module')
def test_connect_cors_options(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'OPTIONS', 'QUERY_STRING': ''}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(cors_credentials=False)
_run(s.handle_request('request'))
assert a._async['make_response'].call_args[0][0] == '200 OK'
headers = a._async['make_response'].call_args[0][1]
assert (
'Access-Control-Allow-Methods',
'OPTIONS, GET, POST',
) in headers
@mock.patch('importlib.import_module')
def test_connect_cors_disabled(self, import_module):
a = self.get_async_mock(
{
'REQUEST_METHOD': 'GET',
'QUERY_STRING': '',
'HTTP_ORIGIN': 'http://foo',
}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(cors_allowed_origins=[])
_run(s.handle_request('request'))
assert a._async['make_response'].call_args[0][0] == '200 OK'
headers = a._async['make_response'].call_args[0][1]
for header in headers:
assert not header[0].startswith('Access-Control-')
@mock.patch('importlib.import_module')
def test_connect_cors_default_no_origin(self, import_module):
a = self.get_async_mock({'REQUEST_METHOD': 'GET', 'QUERY_STRING': ''})
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(cors_allowed_origins=[])
_run(s.handle_request('request'))
assert a._async['make_response'].call_args[0][0] == '200 OK'
headers = a._async['make_response'].call_args[0][1]
for header in headers:
assert header[0] != 'Access-Control-Allow-Origin'
@mock.patch('importlib.import_module')
def test_connect_cors_all_no_origin(self, import_module):
a = self.get_async_mock({'REQUEST_METHOD': 'GET', 'QUERY_STRING': ''})
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(cors_allowed_origins='*')
_run(s.handle_request('request'))
assert a._async['make_response'].call_args[0][0] == '200 OK'
headers = a._async['make_response'].call_args[0][1]
for header in headers:
assert header[0] != 'Access-Control-Allow-Origin'
@mock.patch('importlib.import_module')
def test_connect_cors_disabled_no_origin(self, import_module):
a = self.get_async_mock({'REQUEST_METHOD': 'GET', 'QUERY_STRING': ''})
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(cors_allowed_origins=[])
_run(s.handle_request('request'))
assert a._async['make_response'].call_args[0][0] == '200 OK'
headers = a._async['make_response'].call_args[0][1]
for header in headers:
assert header[0] != 'Access-Control-Allow-Origin'
@mock.patch('importlib.import_module')
def test_connect_event(self, import_module):
a = self.get_async_mock()
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
s._generate_id = mock.MagicMock(return_value='123')
def mock_connect(sid, environ):
return True
s.on('connect', handler=mock_connect)
_run(s.handle_request('request'))
assert len(s.sockets) == 1
@mock.patch('importlib.import_module')
def test_connect_event_rejects(self, import_module):
a = self.get_async_mock()
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
s._generate_id = mock.MagicMock(return_value='123')
def mock_connect(sid, environ):
return False
s.on('connect')(mock_connect)
_run(s.handle_request('request'))
assert len(s.sockets) == 0
assert a._async['make_response'].call_args[0][0] == '401 UNAUTHORIZED'
assert a._async['make_response'].call_args[0][2] == b'"Unauthorized"'
@mock.patch('importlib.import_module')
def test_connect_event_rejects_with_message(self, import_module):
a = self.get_async_mock()
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
s._generate_id = mock.MagicMock(return_value='123')
def mock_connect(sid, environ):
return {'not': 'allowed'}
s.on('connect')(mock_connect)
_run(s.handle_request('request'))
assert len(s.sockets) == 0
assert a._async['make_response'].call_args[0][0] == '401 UNAUTHORIZED'
assert (
a._async['make_response'].call_args[0][2] == b'{"not": "allowed"}'
)
@mock.patch('importlib.import_module')
def test_method_not_found(self, import_module):
a = self.get_async_mock({'REQUEST_METHOD': 'PUT', 'QUERY_STRING': ''})
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
_run(s.handle_request('request'))
assert len(s.sockets) == 0
assert (
a._async['make_response'].call_args[0][0] == '405 METHOD NOT FOUND'
)
@mock.patch('importlib.import_module')
def test_get_request_with_bad_sid(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
_run(s.handle_request('request'))
assert len(s.sockets) == 0
assert a._async['make_response'].call_args[0][0] == '400 BAD REQUEST'
@mock.patch('importlib.import_module')
def test_post_request_with_bad_sid(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'POST', 'QUERY_STRING': 'sid=foo'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
_run(s.handle_request('request'))
assert len(s.sockets) == 0
assert a._async['make_response'].call_args[0][0] == '400 BAD REQUEST'
@mock.patch('importlib.import_module')
def test_send(self, import_module):
a = self.get_async_mock()
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
s.sockets['foo'] = mock_socket = self._get_mock_socket()
_run(s.send('foo', 'hello'))
assert mock_socket.send.mock.call_count == 1
assert (
mock_socket.send.mock.call_args[0][0].packet_type == packet.MESSAGE
)
assert mock_socket.send.mock.call_args[0][0].data == 'hello'
@mock.patch('importlib.import_module')
def test_send_unknown_socket(self, import_module):
a = self.get_async_mock()
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
# just ensure no exceptions are raised
_run(s.send('foo', 'hello'))
@mock.patch('importlib.import_module')
def test_get_request(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
s.sockets['foo'] = mock_socket = self._get_mock_socket()
mock_socket.handle_get_request.mock.return_value = [
packet.Packet(packet.MESSAGE, data='hello')
]
_run(s.handle_request('request'))
assert a._async['make_response'].call_args[0][0] == '200 OK'
packets = payload.Payload(
encoded_payload=a._async['make_response'].call_args[0][2]
).packets
assert len(packets) == 1
assert packets[0].packet_type == packet.MESSAGE
@mock.patch('importlib.import_module')
def test_get_request_custom_response(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
s.sockets['foo'] = mock_socket = self._get_mock_socket()
mock_socket.handle_get_request.mock.return_value = 'resp'
r = _run(s.handle_request('request'))
assert r == 'resp'
@mock.patch('importlib.import_module')
def test_get_request_closes_socket(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
s.sockets['foo'] = mock_socket = self._get_mock_socket()
async def mock_get_request(*args, **kwargs):
mock_socket.closed = True
return 'resp'
mock_socket.handle_get_request = mock_get_request
r = _run(s.handle_request('request'))
assert r == 'resp'
assert 'foo' not in s.sockets
@mock.patch('importlib.import_module')
def test_get_request_error(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
s.sockets['foo'] = mock_socket = self._get_mock_socket()
async def mock_get_request(*args, **kwargs):
raise exceptions.QueueEmpty()
mock_socket.handle_get_request = mock_get_request
_run(s.handle_request('request'))
assert a._async['make_response'].call_args[0][0] == '400 BAD REQUEST'
assert len(s.sockets) == 0
@mock.patch('importlib.import_module')
def test_post_request(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'POST', 'QUERY_STRING': 'sid=foo'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
s.sockets['foo'] = self._get_mock_socket()
_run(s.handle_request('request'))
assert a._async['make_response'].call_args[0][0] == '200 OK'
@mock.patch('importlib.import_module')
def test_post_request_error(self, import_module):
a = self.get_async_mock(
{'REQUEST_METHOD': 'POST', 'QUERY_STRING': 'sid=foo'}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer()
s.sockets['foo'] = mock_socket = self._get_mock_socket()
async def mock_post_request(*args, **kwargs):
raise exceptions.ContentTooLongError()
mock_socket.handle_post_request = mock_post_request
_run(s.handle_request('request'))
assert a._async['make_response'].call_args[0][0] == '400 BAD REQUEST'
@staticmethod
def _gzip_decompress(b):
bytesio = six.BytesIO(b)
with gzip.GzipFile(fileobj=bytesio, mode='r') as gz:
return gz.read()
@mock.patch('importlib.import_module')
def test_gzip_compression(self, import_module):
a = self.get_async_mock(
{
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'sid=foo',
'HTTP_ACCEPT_ENCODING': 'gzip,deflate',
}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(compression_threshold=0)
s.sockets['foo'] = mock_socket = self._get_mock_socket()
mock_socket.handle_get_request.mock.return_value = [
packet.Packet(packet.MESSAGE, data='hello')
]
_run(s.handle_request('request'))
headers = a._async['make_response'].call_args[0][1]
assert ('Content-Encoding', 'gzip') in headers
self._gzip_decompress(a._async['make_response'].call_args[0][2])
@mock.patch('importlib.import_module')
def test_deflate_compression(self, import_module):
a = self.get_async_mock(
{
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'sid=foo',
'HTTP_ACCEPT_ENCODING': 'deflate;q=1,gzip',
}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(compression_threshold=0)
s.sockets['foo'] = mock_socket = self._get_mock_socket()
mock_socket.handle_get_request.mock.return_value = [
packet.Packet(packet.MESSAGE, data='hello')
]
_run(s.handle_request('request'))
headers = a._async['make_response'].call_args[0][1]
assert ('Content-Encoding', 'deflate') in headers
zlib.decompress(a._async['make_response'].call_args[0][2])
@mock.patch('importlib.import_module')
def test_gzip_compression_threshold(self, import_module):
a = self.get_async_mock(
{
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'sid=foo',
'HTTP_ACCEPT_ENCODING': 'gzip',
}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(compression_threshold=1000)
s.sockets['foo'] = mock_socket = self._get_mock_socket()
mock_socket.handle_get_request.mock.return_value = [
packet.Packet(packet.MESSAGE, data='hello')
]
_run(s.handle_request('request'))
headers = a._async['make_response'].call_args[0][1]
for header, value in headers:
assert header != 'Content-Encoding'
with pytest.raises(IOError):
self._gzip_decompress(a._async['make_response'].call_args[0][2])
@mock.patch('importlib.import_module')
def test_compression_disabled(self, import_module):
a = self.get_async_mock(
{
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'sid=foo',
'HTTP_ACCEPT_ENCODING': 'gzip',
}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(
http_compression=False, compression_threshold=0
)
s.sockets['foo'] = mock_socket = self._get_mock_socket()
mock_socket.handle_get_request.mock.return_value = [
packet.Packet(packet.MESSAGE, data='hello')
]
_run(s.handle_request('request'))
headers = a._async['make_response'].call_args[0][1]
for header, value in headers:
assert header != 'Content-Encoding'
with pytest.raises(IOError):
self._gzip_decompress(a._async['make_response'].call_args[0][2])
@mock.patch('importlib.import_module')
def test_compression_unknown(self, import_module):
a = self.get_async_mock(
{
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'sid=foo',
'HTTP_ACCEPT_ENCODING': 'rar',
}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(compression_threshold=0)
s.sockets['foo'] = mock_socket = self._get_mock_socket()
mock_socket.handle_get_request.mock.return_value = [
packet.Packet(packet.MESSAGE, data='hello')
]
_run(s.handle_request('request'))
headers = a._async['make_response'].call_args[0][1]
for header, value in headers:
assert header != 'Content-Encoding'
with pytest.raises(IOError):
self._gzip_decompress(a._async['make_response'].call_args[0][2])
@mock.patch('importlib.import_module')
def test_compression_no_encoding(self, import_module):
a = self.get_async_mock(
{
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'sid=foo',
'HTTP_ACCEPT_ENCODING': '',
}
)
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(compression_threshold=0)
s.sockets['foo'] = mock_socket = self._get_mock_socket()
mock_socket.handle_get_request.mock.return_value = [
packet.Packet(packet.MESSAGE, data='hello')
]
_run(s.handle_request('request'))
headers = a._async['make_response'].call_args[0][1]
for header, value in headers:
assert header != 'Content-Encoding'
with pytest.raises(IOError):
self._gzip_decompress(a._async['make_response'].call_args[0][2])
@mock.patch('importlib.import_module')
def test_cookie(self, import_module):
a = self.get_async_mock()
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(cookie='sid')
s._generate_id = mock.MagicMock(return_value='123')
_run(s.handle_request('request'))
headers = a._async['make_response'].call_args[0][1]
assert ('Set-Cookie', 'sid=123; path=/; SameSite=Lax') in headers
@mock.patch('importlib.import_module')
def test_cookie_dict(self, import_module):
def get_path():
return '/a'
a = self.get_async_mock()
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(cookie={
'name': 'test',
'path': get_path,
'SameSite': 'None',
'Secure': True,
'HttpOnly': True
})
s._generate_id = mock.MagicMock(return_value='123')
_run(s.handle_request('request'))
headers = a._async['make_response'].call_args[0][1]
assert ('Set-Cookie', 'test=123; path=/a; SameSite=None; Secure; '
'HttpOnly') in headers
@mock.patch('importlib.import_module')
def test_no_cookie(self, import_module):
a = self.get_async_mock()
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(cookie=None)
s._generate_id = mock.MagicMock(return_value='123')
_run(s.handle_request('request'))
headers = a._async['make_response'].call_args[0][1]
for header, value in headers:
assert header != 'Set-Cookie'
def test_logger(self):
s = asyncio_server.AsyncServer(logger=False)
assert s.logger.getEffectiveLevel() == logging.ERROR
s.logger.setLevel(logging.NOTSET)
s = asyncio_server.AsyncServer(logger=True)
assert s.logger.getEffectiveLevel() == logging.INFO
s.logger.setLevel(logging.WARNING)
s = asyncio_server.AsyncServer(logger=True)
assert s.logger.getEffectiveLevel() == logging.WARNING
s.logger.setLevel(logging.NOTSET)
my_logger = logging.Logger('foo')
s = asyncio_server.AsyncServer(logger=my_logger)
assert s.logger == my_logger
def test_custom_json(self):
# Warning: this test cannot run in parallel with other tests, as it
# changes the JSON encoding/decoding functions
class CustomJSON(object):
@staticmethod
def dumps(*args, **kwargs):
return '*** encoded ***'
@staticmethod
def loads(*args, **kwargs):
return '+++ decoded +++'
asyncio_server.AsyncServer(json=CustomJSON)
pkt = packet.Packet(packet.MESSAGE, data={'foo': 'bar'})
assert pkt.encode() == b'4*** encoded ***'
pkt2 = packet.Packet(encoded_packet=pkt.encode())
assert pkt2.data == '+++ decoded +++'
# restore the default JSON module
packet.Packet.json = json
def test_background_tasks(self):
r = []
async def foo(arg):
r.append(arg)
s = asyncio_server.AsyncServer()
s.start_background_task(foo, 'bar')
pending = asyncio.Task.all_tasks()
asyncio.get_event_loop().run_until_complete(asyncio.wait(pending))
assert r == ['bar']
def test_sleep(self):
s = asyncio_server.AsyncServer()
_run(s.sleep(0))
def test_trigger_event_function(self):
result = []
def foo_handler(arg):
result.append('ok')
result.append(arg)
s = asyncio_server.AsyncServer()
s.on('message', handler=foo_handler)
_run(s._trigger_event('message', 'bar'))
assert result == ['ok', 'bar']
def test_trigger_event_coroutine(self):
result = []
async def foo_handler(arg):
result.append('ok')
result.append(arg)
s = asyncio_server.AsyncServer()
s.on('message', handler=foo_handler)
_run(s._trigger_event('message', 'bar'))
assert result == ['ok', 'bar']
def test_trigger_event_function_error(self):
def connect_handler(arg):
return 1 / 0
def foo_handler(arg):
return 1 / 0
s = asyncio_server.AsyncServer()
s.on('connect', handler=connect_handler)
s.on('message', handler=foo_handler)
assert not _run(s._trigger_event('connect', '123'))
assert _run(s._trigger_event('message', 'bar')) is None
def test_trigger_event_coroutine_error(self):
async def connect_handler(arg):
return 1 / 0
async def foo_handler(arg):
return 1 / 0
s = asyncio_server.AsyncServer()
s.on('connect', handler=connect_handler)
s.on('message', handler=foo_handler)
assert not _run(s._trigger_event('connect', '123'))
assert _run(s._trigger_event('message', 'bar')) is None
def test_trigger_event_function_async(self):
result = []
def foo_handler(arg):
result.append('ok')
result.append(arg)
s = asyncio_server.AsyncServer()
s.on('message', handler=foo_handler)
fut = _run(s._trigger_event('message', 'bar', run_async=True))
asyncio.get_event_loop().run_until_complete(fut)
assert result == ['ok', 'bar']
def test_trigger_event_coroutine_async(self):
result = []
async def foo_handler(arg):
result.append('ok')
result.append(arg)
s = asyncio_server.AsyncServer()
s.on('message', handler=foo_handler)
fut = _run(s._trigger_event('message', 'bar', run_async=True))
asyncio.get_event_loop().run_until_complete(fut)
assert result == ['ok', 'bar']
def test_trigger_event_function_async_error(self):
result = []
def foo_handler(arg):
result.append(arg)
return 1 / 0
s = asyncio_server.AsyncServer()
s.on('message', handler=foo_handler)
fut = _run(s._trigger_event('message', 'bar', run_async=True))
with pytest.raises(ZeroDivisionError):
asyncio.get_event_loop().run_until_complete(fut)
assert result == ['bar']
def test_trigger_event_coroutine_async_error(self):
result = []
async def foo_handler(arg):
result.append(arg)
return 1 / 0
s = asyncio_server.AsyncServer()
s.on('message', handler=foo_handler)
fut = _run(s._trigger_event('message', 'bar', run_async=True))
with pytest.raises(ZeroDivisionError):
asyncio.get_event_loop().run_until_complete(fut)
assert result == ['bar']
def test_create_queue(self):
s = asyncio_server.AsyncServer()
q = s.create_queue()
empty = s.get_queue_empty_exception()
with pytest.raises(empty):
q.get_nowait()
def test_create_event(self):
s = asyncio_server.AsyncServer()
e = s.create_event()
assert not e.is_set()
e.set()
assert e.is_set()
@mock.patch('importlib.import_module')
def test_service_task_started(self, import_module):
a = self.get_async_mock()
import_module.side_effect = [a]
s = asyncio_server.AsyncServer(monitor_clients=True)
s._service_task = AsyncMock()
_run(s.handle_request('request'))
s._service_task.mock.assert_called_once_with()
| 39.53919
| 79
| 0.621305
|
16366464da2d5e09edf971dbe0784e2fa8950b3a
| 11,690
|
py
|
Python
|
api_commands.py
|
LordAmit/mobile-monkey
|
7cd022330414efefe162573a28ea88ee224afeaa
|
[
"MIT"
] | 7
|
2017-08-15T17:44:37.000Z
|
2020-07-02T01:09:20.000Z
|
api_commands.py
|
LordAmit/mobile-monkey
|
7cd022330414efefe162573a28ea88ee224afeaa
|
[
"MIT"
] | 12
|
2017-10-18T10:24:19.000Z
|
2022-03-11T23:19:27.000Z
|
api_commands.py
|
LordAmit/mobile-monkey
|
7cd022330414efefe162573a28ea88ee224afeaa
|
[
"MIT"
] | 3
|
2017-10-18T10:16:55.000Z
|
2020-07-21T10:44:41.000Z
|
'''
api commands from python to gain information about android avds
'''
import xml.etree.ElementTree as ET
import subprocess
import shlex
from typing import List, Union
import config_reader as config
import util
from emulator import Emulator
from apk import Apk
ADB = config.adb
PRINT_FLAG = True
def decode_apk(apk: Apk):
'''
decodes provided apk to a folder
'''
util.check_file_directory_exists(apk.apk_path, True)
try:
result = subprocess.check_output(
['apktool', 'if', apk.apk_path]).decode()
util.debug_print(result, flag=PRINT_FLAG)
result = subprocess.check_output(
['apktool', 'd', apk.apk_path, '-o',
config.APK_FULL_PATH.split('.apk')[0], '-f']).decode()
util.debug_print(result, flag=PRINT_FLAG)
except subprocess.SubprocessError as error:
print(error)
raise ValueError("error decoding.")
def overwrite_android_manifest():
'''
adds android:exported="true" to AndroidManifest.xml
'''
file_address = config.APK_FULL_PATH.split(
'.apk')[0] + '/AndroidManifest.xml'
tree = ET.parse(file_address)
root = tree.getroot()
for activity in root.iter('activity'):
activity.set('android:exported', 'true')
tree.write(file_address)
f = open(file_address, 'r')
filedata = f.read()
f.close()
newdata = filedata.replace("ns0", "android")
f = open(file_address, 'w')
f.write(newdata)
f.close()
def build_apk(apk: Apk):
'''
builds modified apk
'''
result = subprocess.check_output(
['apktool', 'b', config.APK_FULL_PATH.split('.apk')[0], '-o',
apk.apk_path]).decode()
util.debug_print(result, flag=PRINT_FLAG)
def sign_apk(apk: Apk):
result = subprocess.check_output(
[config.JARSIGNER, '-keystore',
config.KEY, '-verbose', apk.apk_path,
config.ALIAS], input=config.PASSWORD.encode()).decode()
util.debug_print(result, flag=PRINT_FLAG)
def adb_install_apk(emulator: Emulator, apk: Apk):
'''
installs provided apk to specified emulator
'''
util.check_file_directory_exists(apk.apk_path, True)
try:
result = subprocess.check_output(
[config.adb, '-s', 'emulator-' + str(emulator.port), 'install',
apk.apk_path]).decode()
util.debug_print(result, flag=PRINT_FLAG)
except subprocess.SubprocessError as error:
print(error)
raise ValueError("error installing.")
def adb_get_activity_list(emulator: Emulator, apk: Apk):
'''
returns list of activities
'''
command = "{} dump xmltree {} AndroidManifest.xml".format(
config.AAPT, apk.apk_path)
result = subprocess.check_output(shlex.split(command)).decode()
return result
def adb_stop_activity_of_apk(emulator: Emulator, apk: Apk):
'''
stops activity specified by the apk
'''
# adb shell am force-stop com.my.app.package
emulator_name = 'emulator-' + str(emulator.port)
subprocess.check_output(
[config.adb, '-s', emulator_name, 'shell', 'am', 'force-stop',
apk.package_name])
print("package_name: " + apk.package_name + " is stopped")
def adb_start_launcher_of_apk(emulator: Emulator, apk: Apk):
'''
starts the specified apk.
'''
# adb shell monkey -p com.android.chrome -c
# android.intent.category.LAUNCHER 1
emulator_name = 'emulator-' + str(emulator.port)
subprocess.check_output(
[config.adb, '-s', emulator_name, 'shell', 'monkey', '-p',
apk.package_name, '-c', 'android.intent.category.LAUNCHER', '1'])
print("package_name: " + apk.package_name + " is started")
def adb_start_activity(emulator: Emulator, apk: Apk, activity: str):
'''
starts the specified activity.
'''
subprocess.check_output(
[config.adb, 'shell', 'am', 'start', '-n',
apk.package_name+"/"+activity])
print(activity+" is started")
def adb_display_properties():
result = subprocess.check_output(
[config.adb, 'shell', 'dumpsys', 'display'])
return result
def adb_display_scroll(height: str):
subprocess.check_output(
[config.adb, 'shell', 'input', 'swipe 0 '+height+' 0 0'])
print("scrolling up")
def adb_is_package_present(emulator: Emulator, app_package_name: str) -> int:
'''
returns 1 if the specified package is present.
returns 0 if the specified package is not present.
returns 1 if multiple entries for specified package is present.
'''
output = subprocess.check_output(
[config.adb, '-s', 'emulator-' + str(emulator.port), 'shell', 'pm',
'list', 'packages', '|', 'grep',
app_package_name]).decode().strip().split('\r\n')
# util.debug_print(output, len(output), flag=PRINT_FLAG)
# self.number_of_events = number_of_events
# self.seed = seed
if len(output) < 1:
return 0
elif len(output) > 1:
return 2
return 1
def adb_uninstall_package(emulator: Emulator, package: str):
'''
uninstalls the provided package if only one entry with the specified
package is found.
'''
# if adb_is_package_present(emulator, package) is not 1:
# raise ValueError("Package not found / Too generic.")
try:
subprocess.check_output(
[config.adb, '-s', 'emulator-' + str(emulator.port),
'uninstall', package])
print("uninstalled " + package)
except subprocess.SubprocessError as error:
print("maybe not found/uninstalled already")
def adb_uninstall_apk(emulator: Emulator, apk: Apk):
'''
uninstalls the provided apk if installed.
'''
adb_uninstall_package(emulator, apk.package_name)
def adb_start_server_safe():
'''
checks if `adb server` is running. if not, starts it.
'''
try:
status = subprocess.check_output(['pidof', ADB])
util.debug_print('adb already running in PID: ' +
status.decode(), flag=PRINT_FLAG)
return True
except subprocess.CalledProcessError as exception:
print('adb is not running, returned status: ' +
str(exception.returncode))
print('adb was not started. starting...')
try:
subprocess.check_output([ADB, 'start-server'])
return True
except subprocess.SubprocessError as exception:
print(
'something disastrous happened. maybe ' + ADB +
' was not found')
return False
def adb_list_avd_devices() -> List:
'''
returns list of running adb_devices after formatting as a list.
returns:
List of adb_devices
'''
adb_devices = subprocess.check_output([ADB, 'devices'])
adb_devices = adb_devices.decode().strip().split('\n')[1:]
adb_devices_without_attached = []
for device in adb_devices:
adb_devices_without_attached.append(device.split('\t')[0])
return adb_devices_without_attached
def adb_input_tap(emulator: Emulator, xpos: Union[int, float],
ypos: Union[int, float]):
'''
sends tap event to specified `emulator` via adb at the `X`, `Y` coordinates
'''
command = "{} -s emulator-{} shell input tap {} {}".format(config.adb,
emulator.port,
xpos, ypos)
subprocess.check_output(shlex.split(command))
def adb_uiautomator_dump(emulator: Emulator):
'''
dumps the current uiautomator view to the default dump directory in
YYYYMMDDHHmm format
'''
command = "{} -s emulator-{} shell uiautomator dump".format(
config.adb, emulator.port)
subprocess.check_output(shlex.split(command))
dump_file_address = config.DUMP_ADDRESS + \
util.return_current_time() + "_dump.xml"
command_cat = "{} -s emulator-{} shell cat /sdcard/window_dump.xml "
command_cat = command_cat.format(config.adb, emulator.port)
dump_content = subprocess.check_output(shlex.split(command_cat)).decode()
dump_file = open(dump_file_address, mode='w')
dump_file.write(dump_content)
dump_file.close()
return dump_file_address
def adb_list_avd_ports() -> List[str]:
'''
returns:
List of port of avd devices
'''
emulator_ports = []
adb_devices = adb_list_avd_devices()
for adb_device in adb_devices:
emulator_port = adb_device.split('-')[1]
if len(emulator_port) > 3:
emulator_ports.append(emulator_port)
return emulator_ports
def avd_model_from_pid(pid: int) -> str:
'''
returns:
avd_model from `pid`
'''
device_details = util.ps_details_of_pid(pid)
# print(output_raw)
"""
PID TTY STAT TIME COMMAND
15522 tty2 Rl+ 128:13 /home/amit/Android/Sdk/tools/emulator64-x86 -port 5557 -avd nexus_s # noqa
"""
device_detail = device_details.split('\n')[1:][:1][0]
print(device_detail)
"""
15521 tty2 Rl+ 134:48 /home/amit/Android/Sdk/tools/emulator64-x86 -port 5555 -avd nexus_4 # noqa
or
11803 ? Sl 9:56 /home/amit/Android/Sdk/emulator/qemu/linux-x86_64/qemu-system-i386 # noqa
-port 5555 -avd Nexus6 -use-system-libs
"""
index_of_avd = device_detail.index('-avd')
device_model = device_detail[index_of_avd + 5:].split(' ')[0]
"""
nexus_s
"""
return device_model
def adb_pidof_app(emulator: Emulator, apk: Apk):
'''
returns PID of running apk
'''
try:
result = subprocess.check_output(
[config.adb, '-s', 'emulator-' + str(emulator.port),
'shell', 'pidof',
apk.package_name])
result = result.decode().split('\n')[0]
util.debug_print(result, flag=PRINT_FLAG)
return result
except subprocess.SubprocessError:
print("maybe not found/uninstalled already")
def emulator_list_of_avds():
'''
returns the list of possible avds by executing `emulator -list-avds`
'''
list_avds = subprocess.check_output([config.EMULATOR, '-list-avds'])
return list_avds.decode().strip().split('\n')
def gradle_install(gradlew_path: str, project_path: str):
'''
`gradlew_path` is the full path of the gradlew inside the project folder
'''
util.check_file_directory_exists(gradlew_path, True)
util.check_file_directory_exists(project_path, True)
util.change_file_permission(gradlew_path, 555)
print(gradlew_path, project_path)
try:
subprocess.check_output(
[gradlew_path, '-p', project_path, 'tasks', 'installDebug',
'--info', '--debug',
'--stacktrace'])
except subprocess.CalledProcessError:
print('error: gradle problem executing: ' + gradlew_path)
def gradle_test(gradlew_path: str, project_path: str):
'''
`gradlew_path` is the full path of the gradlew inside the project folder
'''
util.check_file_directory_exists(gradlew_path, True)
util.check_file_directory_exists(project_path, True)
util.change_file_permission(gradlew_path, 555)
print(gradlew_path, project_path)
try:
subprocess.check_output(
[gradlew_path, '-p', project_path, 'tasks', 'connectedAndroidTest',
'--info', '--debug',
'--stacktrace'])
except subprocess.CalledProcessError:
print('error: gradle problem executing: ' + gradlew_path)
def main():
'''
main function
'''
devices = adb_list_avd_ports()
print(devices)
if __name__ == '__main__':
main()
| 30.363636
| 105
| 0.634046
|
81a061264792fc626f17fc021babd315c39e521d
| 1,870
|
py
|
Python
|
mapreduce/setup.py
|
brentm5/integrations-core
|
5cac8788c95d8820435ef9c5d32d6a5463cf491d
|
[
"BSD-3-Clause"
] | 4
|
2021-06-21T19:21:49.000Z
|
2021-06-23T21:21:55.000Z
|
mapreduce/setup.py
|
brentm5/integrations-core
|
5cac8788c95d8820435ef9c5d32d6a5463cf491d
|
[
"BSD-3-Clause"
] | null | null | null |
mapreduce/setup.py
|
brentm5/integrations-core
|
5cac8788c95d8820435ef9c5d32d6a5463cf491d
|
[
"BSD-3-Clause"
] | 1
|
2021-06-21T19:21:51.000Z
|
2021-06-21T19:21:51.000Z
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# Always prefer setuptools over distutils
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
HERE = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Get version info
ABOUT = {}
with open(path.join(HERE, "datadog_checks", "mapreduce", "__about__.py")) as f:
exec(f.read(), ABOUT)
def get_requirements(fpath):
with open(path.join(HERE, fpath), encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog_checks_base'
setup(
name='datadog-mapreduce',
version=ABOUT['__version__'],
description='The MapReduce check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent mapreduce check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.mapreduce'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
# Extra files to ship with the wheel package
include_package_data=True,
)
| 27.5
| 79
| 0.685561
|
0d4c2e11f9e97d06cc04298e683bd1090213beb3
| 10,544
|
py
|
Python
|
anthill/framework/auth/social/core/storage.py
|
0x55AAh/anthill_gaming
|
475af798bd08d85fc0fbfce9d2ba710f73252c15
|
[
"MIT"
] | 1
|
2018-11-30T21:56:14.000Z
|
2018-11-30T21:56:14.000Z
|
anthill/framework/auth/social/core/storage.py
|
0x55AAh/anthill_gaming
|
475af798bd08d85fc0fbfce9d2ba710f73252c15
|
[
"MIT"
] | null | null | null |
anthill/framework/auth/social/core/storage.py
|
0x55AAh/anthill_gaming
|
475af798bd08d85fc0fbfce9d2ba710f73252c15
|
[
"MIT"
] | null | null | null |
"""Models mixins for Social Auth."""
import re
import time
import base64
import uuid
import warnings
from datetime import datetime, timedelta
import six
from openid.association import Association as OpenIdAssociation
from .exceptions import MissingBackend
from .backends.utils import get_backend
NO_ASCII_REGEX = re.compile(r'[^\x00-\x7F]+')
NO_SPECIAL_REGEX = re.compile(r'[^\w.@+_-]+', re.UNICODE)
class UserMixin:
# Consider tokens that expire in 5 seconds as already expired
ACCESS_TOKEN_EXPIRED_THRESHOLD = 5
user = ''
provider = ''
uid = None
extra_data = None
def get_backend(self, strategy):
return get_backend(strategy.get_backends(), self.provider)
def get_backend_instance(self, strategy):
try:
backend_class = self.get_backend(strategy)
except MissingBackend:
return None
else:
return backend_class(strategy=strategy)
@property
def access_token(self):
"""Return access_token stored in extra_data or None."""
return self.extra_data.get('access_token')
@property
def tokens(self):
warnings.warn('tokens is deprecated, use access_token instead')
return self.access_token
def refresh_token(self, strategy, *args, **kwargs):
token = self.extra_data.get('refresh_token') or \
self.extra_data.get('access_token')
backend = self.get_backend(strategy)
if token and backend and hasattr(backend, 'refresh_token'):
backend = backend(strategy=strategy)
response = backend.refresh_token(token, *args, **kwargs)
extra_data = backend.extra_data(self,
self.uid,
response,
self.extra_data)
if self.set_extra_data(extra_data):
self.save()
def expiration_timedelta(self):
"""
Return provider session live seconds. Returns a timedelta ready to
use with session.set_expiry().
If provider returns a timestamp instead of session seconds to live, the
timedelta is inferred from current time (using UTC timezone). None is
returned if there's no value stored or it's invalid.
"""
if self.extra_data and 'expires' in self.extra_data:
try:
expires = int(self.extra_data.get('expires'))
except (ValueError, TypeError):
return None
now = datetime.utcnow()
# Detect if expires is a timestamp
if expires > time.mktime(now.timetuple()):
# expires is a datetime, return the remaining difference
return datetime.utcfromtimestamp(expires) - now
else:
# expires is the time to live seconds since creation,
# check against auth_time if present, otherwise return
# the value
auth_time = self.extra_data.get('auth_time')
if auth_time:
reference = datetime.utcfromtimestamp(auth_time)
return (reference + timedelta(seconds=expires)) - now
else:
return timedelta(seconds=expires)
def expiration_datetime(self):
# backward compatible alias
return self.expiration_timedelta()
def access_token_expired(self):
"""Return true / false if access token is already expired."""
expiration = self.expiration_timedelta()
return expiration and \
expiration.total_seconds() <= self.ACCESS_TOKEN_EXPIRED_THRESHOLD
def get_access_token(self, strategy):
"""Returns a valid access token."""
if self.access_token_expired():
self.refresh_token(strategy)
return self.access_token
def set_extra_data(self, extra_data=None):
if extra_data and self.extra_data != extra_data:
if self.extra_data and not isinstance(
self.extra_data, six.string_types):
self.extra_data.update(extra_data)
else:
self.extra_data = extra_data
return True
@classmethod
def clean_username(cls, value):
"""Clean username removing any unsupported character."""
value = NO_ASCII_REGEX.sub('', value)
value = NO_SPECIAL_REGEX.sub('', value)
return value
@classmethod
def changed(cls, user):
"""The given user instance is ready to be saved."""
raise NotImplementedError('Implement in subclass')
@classmethod
def get_username(cls, user):
"""Return the username for given user."""
raise NotImplementedError('Implement in subclass')
@classmethod
def user_model(cls):
"""Return the user model."""
raise NotImplementedError('Implement in subclass')
@classmethod
def username_max_length(cls):
"""Return the max length for username."""
raise NotImplementedError('Implement in subclass')
@classmethod
def allowed_to_disconnect(cls, user, backend_name, association_id=None):
"""
Return if it's safe to disconnect the social account for the given user.
"""
raise NotImplementedError('Implement in subclass')
@classmethod
def disconnect(cls, entry):
"""Disconnect the social account for the given user."""
raise NotImplementedError('Implement in subclass')
@classmethod
def user_exists(cls, *args, **kwargs):
"""
Return True/False if a User instance exists with the given arguments.
Arguments are directly passed to filter() manager method.
"""
raise NotImplementedError('Implement in subclass')
@classmethod
def create_user(cls, *args, **kwargs):
"""Create a user instance."""
raise NotImplementedError('Implement in subclass')
@classmethod
def get_user(cls, pk):
"""Return user instance for given id."""
raise NotImplementedError('Implement in subclass')
@classmethod
def get_users_by_email(cls, email):
"""Return users instances for given email address."""
raise NotImplementedError('Implement in subclass')
@classmethod
def get_social_auth(cls, provider, uid):
"""Return UserSocialAuth for given provider and uid."""
raise NotImplementedError('Implement in subclass')
@classmethod
def get_social_auth_for_user(cls, user, provider=None, id=None):
"""Return all the UserSocialAuth instances for given user."""
raise NotImplementedError('Implement in subclass')
@classmethod
def create_social_auth(cls, user, uid, provider):
"""Create a UserSocialAuth instance for given user."""
raise NotImplementedError('Implement in subclass')
class NonceMixin:
"""One use numbers."""
server_url = ''
timestamp = 0
salt = ''
@classmethod
def use(cls, server_url, timestamp, salt):
"""Create a Nonce instance."""
raise NotImplementedError('Implement in subclass')
class AssociationMixin:
"""OpenId account association."""
server_url = ''
handle = ''
secret = ''
issued = 0
lifetime = 0
assoc_type = ''
@classmethod
def oids(cls, server_url, handle=None):
kwargs = {'server_url': server_url}
if handle is not None:
kwargs['handle'] = handle
return sorted([
(assoc.id, cls.openid_association(assoc))
for assoc in cls.get(**kwargs)
], key=lambda x: x[1].issued, reverse=True)
@classmethod
def openid_association(cls, assoc):
secret = assoc.secret
if not isinstance(secret, six.binary_type):
secret = secret.encode()
return OpenIdAssociation(assoc.handle, base64.decodestring(secret),
assoc.issued, assoc.lifetime,
assoc.assoc_type)
@classmethod
def store(cls, server_url, association):
"""Create an Association instance."""
raise NotImplementedError('Implement in subclass')
@classmethod
def get(cls, *args, **kwargs):
"""Get an Association instance."""
raise NotImplementedError('Implement in subclass')
@classmethod
def remove(cls, ids_to_delete):
"""Remove an Association instance."""
raise NotImplementedError('Implement in subclass')
class CodeMixin:
email = ''
code = ''
verified = False
def verify(self):
self.verified = True
self.save()
@classmethod
def generate_code(cls):
return uuid.uuid4().hex
@classmethod
def make_code(cls, email):
code = cls()
code.email = email
code.code = cls.generate_code()
code.verified = False
code.save()
return code
@classmethod
def get_code(cls, code):
raise NotImplementedError('Implement in subclass')
class PartialMixin:
token = ''
data = ''
next_step = ''
backend = ''
@property
def args(self):
return self.data.get('args', [])
@args.setter
def args(self, value):
self.data['args'] = value
@property
def kwargs(self):
return self.data.get('kwargs', {})
@kwargs.setter
def kwargs(self, value):
self.data['kwargs'] = value
def extend_kwargs(self, values):
self.data['kwargs'].update(values)
@classmethod
def generate_token(cls):
return uuid.uuid4().hex
@classmethod
def load(cls, token):
raise NotImplementedError('Implement in subclass')
@classmethod
def destroy(cls, token):
raise NotImplementedError('Implement in subclass')
@classmethod
def prepare(cls, backend, next_step, data):
partial = cls()
partial.backend = backend
partial.next_step = next_step
partial.data = data
partial.token = cls.generate_token()
return partial
@classmethod
def store(cls, partial):
partial.save()
return partial
class BaseStorage:
user = UserMixin
nonce = NonceMixin
association = AssociationMixin
code = CodeMixin
partial = PartialMixin
@classmethod
def is_integrity_error(cls, exception):
"""Check if given exception flags an integrity error in the DB."""
raise NotImplementedError('Implement in subclass')
| 30.473988
| 80
| 0.621396
|
b0e64d619d39c2425211a034a22937f028a50ba8
| 231
|
py
|
Python
|
Source/Chapter5/Linear.py
|
irmoralesb/MLForDevsBook
|
4e990d720ef5888525d09d2e27e37a4db21a75db
|
[
"Unlicense"
] | null | null | null |
Source/Chapter5/Linear.py
|
irmoralesb/MLForDevsBook
|
4e990d720ef5888525d09d2e27e37a4db21a75db
|
[
"Unlicense"
] | null | null | null |
Source/Chapter5/Linear.py
|
irmoralesb/MLForDevsBook
|
4e990d720ef5888525d09d2e27e37a4db21a75db
|
[
"Unlicense"
] | null | null | null |
from Chapter5.TransferFunction import TransferFunction
import numpy as np
class Linear(TransferFunction):
def getTransferFunction(x):
return x
def getTransferFunctionDerivative(x):
return np.ones(len(x))
| 21
| 54
| 0.74026
|
4584a7ed471a4d367fb4dd4b8eb11da9762676d9
| 37,003
|
py
|
Python
|
keras/layers/rnn/gru_test.py
|
itsraina/keras
|
5e9376b5b94b6fb445dd52dbfafbc4e95bff5e35
|
[
"Apache-2.0"
] | null | null | null |
keras/layers/rnn/gru_test.py
|
itsraina/keras
|
5e9376b5b94b6fb445dd52dbfafbc4e95bff5e35
|
[
"Apache-2.0"
] | null | null | null |
keras/layers/rnn/gru_test.py
|
itsraina/keras
|
5e9376b5b94b6fb445dd52dbfafbc4e95bff5e35
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GRU layer."""
import copy
import os
import shutil
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import keras
from keras.layers.rnn import gru_lstm_utils
from keras.testing_infra import test_combinations
from keras.testing_infra import test_utils
from keras.utils import np_utils
# isort: off
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.framework import (
test_util as tf_test_util,
)
# Global config for grappler setting that is used for graph mode test.
_rewrites = rewriter_config_pb2.RewriterConfig()
_rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON
_rewrites.min_graph_nodes = -1
_graph_options = tf.compat.v1.GraphOptions(rewrite_options=_rewrites)
_config = tf.compat.v1.ConfigProto(graph_options=_graph_options)
@test_utils.run_all_without_tensor_float_32("RNN GRU can use TF32 on GPU")
@test_combinations.run_all_keras_modes(config=_config)
class GRUGraphRewriteTest(test_combinations.TestCase):
input_shape = 10
output_shape = 8
rnn_state_size = 8
timestep = 4
batch = 100
epoch = 1
@parameterized.named_parameters(
("non_tan_activation", "relu", "sigmoid", 0, False, True, True),
("non_sigmoid_recur_activation", "tanh", "relu", 0, False, True, True),
("use_recurrent_dropout", "tanh", "sigmoid", 0.1, False, True, True),
("unroll", "tanh", "sigmoid", 0, True, True, True),
("not_use_bias", "tanh", "sigmoid", 0, False, False, True),
("not_reset_after", "tanh", "sigmoid", 0, False, True, False),
)
@test_utils.run_v2_only
def test_could_use_defun_backend(
self,
activation,
recurrent_activation,
recurrent_dropout,
unroll,
use_bias,
reset_after,
):
layer = keras.layers.GRU(
1,
activation=activation,
recurrent_activation=recurrent_activation,
recurrent_dropout=recurrent_dropout,
unroll=unroll,
use_bias=use_bias,
reset_after=reset_after,
)
self.assertFalse(layer._could_use_gpu_kernel)
@test_utils.run_v2_only
def test_use_on_default_activation_with_gpu_kernel(self):
layer = keras.layers.GRU(1, activation=tf.tanh)
self.assertTrue(layer._could_use_gpu_kernel)
layer = keras.layers.GRU(1, recurrent_activation=tf.sigmoid)
self.assertTrue(layer._could_use_gpu_kernel)
def test_keras_model_with_gru(self):
epoch = 10
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=self.batch,
test_samples=0,
input_shape=(self.timestep, self.input_shape),
num_classes=self.output_shape,
)
y_train = np_utils.to_categorical(y_train, self.output_shape)
layer = keras.layers.GRU(self.rnn_state_size)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=tf.float32
)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile("rmsprop", loss="mse")
model.fit(x_train, y_train, epochs=epoch)
model.evaluate(x_train, y_train)
model.predict(x_train)
def test_dynamic_behavior_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = keras.layers.GRU(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(tf.compat.v1.train.GradientDescentOptimizer(0.001), "mse")
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_stacking_GRU(self):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.GRU(10, return_sequences=True, unroll=False))
model.add(keras.layers.GRU(5, return_sequences=True, unroll=False))
model.compile(
loss="categorical_crossentropy",
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
)
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_GRU(self):
layer_class = keras.layers.GRU
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
@parameterized.named_parameters(
# test_name, use_bias, bias_initializer, activation
("normal", True, "zeros"),
("no_bias", False, "zeros"),
("random_bias", True, "random_uniform"),
)
def test_gru_v2_model_save_load(self, use_bias, bias_initializer):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, "test.h5")
batch = 10
timestep = 3
input_dim = 5
units = 2
x = np.random.random((batch, timestep, input_dim))
def build_model():
inputs = keras.layers.Input(
shape=[timestep, input_dim], dtype=tf.float32
)
layer = keras.layers.GRU(
units, use_bias=use_bias, bias_initializer=bias_initializer
)
output = layer(inputs)
return keras.models.Model(inputs, output), layer
model, layer = build_model()
y_ref = model.predict(x)
model.save_weights(h5_path)
cloned_model, new_layer = build_model()
cloned_model.load_weights(h5_path)
y = cloned_model.predict(x)
self.assertAllClose(y, y_ref)
self.assertAllClose(layer.get_weights(), new_layer.get_weights())
def test_gru_v2_output_on_multiple_kernel(self):
x_train = np.random.random(
(self.batch, self.timestep, self.input_shape)
)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=tf.float32
)
with test_utils.device(should_use_gpu=False):
layer = keras.layers.GRU(self.rnn_state_size)
output = layer(inputs)
cpu_model = keras.models.Model(inputs, output)
weights = cpu_model.get_weights()
y_1 = cpu_model.predict(x_train)
with test_utils.device(should_use_gpu=True):
layer = keras.layers.GRU(self.rnn_state_size)
output = layer(inputs)
gpu_model = keras.models.Model(inputs, output)
gpu_model.set_weights(weights)
y_2 = gpu_model.predict(x_train)
self.assertAllClose(y_1, y_2, rtol=1e-5, atol=1e-5)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message="Skipping as ROCm MIOpen does not support padded "
"input yet.",
)
def test_with_masking_layer_GRU(self):
layer_class = keras.layers.GRU
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(
loss="categorical_crossentropy",
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.001),
)
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message="Skipping as ROCm MIOpen does not support padded "
"input yet.",
)
def test_masking_with_stacking_GRU(self):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(keras.layers.GRU(10, return_sequences=True, unroll=False))
model.add(keras.layers.GRU(5, return_sequences=True, unroll=False))
model.compile(
loss="categorical_crossentropy",
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
)
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_return_sequences_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.GRU,
kwargs={"units": units, "return_sequences": True},
input_shape=(num_samples, timesteps, embedding_dim),
)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message="Double type is not yet supported in ROCm",
)
@test_utils.run_v2_only
def test_float64_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.GRU,
kwargs={
"units": units,
"return_sequences": True,
"dtype": "float64",
},
input_shape=(num_samples, timesteps, embedding_dim),
input_dtype="float64",
)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message="Skipping as ROCm MIOpen does not support padded "
"input yet.",
)
def test_return_states_GRU(self):
layer_class = keras.layers.GRU
x = np.random.random((2, 3, 4))
y = np.abs(np.random.random((2, 5)))
s = np.abs(np.random.random((2, 5)))
inputs = keras.layers.Input(shape=[3, 4], dtype=tf.float32)
masked = keras.layers.Masking()(inputs)
outputs, states = layer_class(units=5, return_state=True)(masked)
model = keras.models.Model(inputs, [outputs, states])
model.compile(
loss="categorical_crossentropy",
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.001),
)
model.fit(x, [y, s], epochs=1, batch_size=2, verbose=1)
def test_dropout_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.GRU,
kwargs={"units": units, "dropout": 0.1, "recurrent_dropout": 0.1},
input_shape=(num_samples, timesteps, embedding_dim),
)
def test_constraints_GRU(self):
embedding_dim = 4
layer_class = keras.layers.GRU
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint,
)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
@parameterized.parameters([0, 1, 2])
def test_implementation_mode_GRU(self, implementation_mode):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.GRU,
kwargs={"units": units, "implementation": implementation_mode},
input_shape=(num_samples, timesteps, embedding_dim),
)
def test_regularizers_GRU(self):
embedding_dim = 4
layer_class = keras.layers.GRU
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer="l2",
activity_regularizer="l1",
)
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if tf.executing_eagerly():
self.assertEqual(len(layer.losses), 4)
else:
self.assertEqual(len(layer.get_losses_for(x)), 1)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message="Skipping as ROCm MIOpen does not support padded "
"input yet.",
)
def test_statefulness_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.GRU
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps),
)
)
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None
)
model.add(layer)
model.compile(
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units))
)
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
layer.reset_states()
mix_padded_input = np.ones((num_samples, timesteps))
mix_padded_input[0, 1] = 0
mix_padded_input[1, 0] = 0
mix_padded_input[1, 2] = 0
out8 = model.predict(mix_padded_input)
self.assertAllClose(out7, out6, atol=1e-5)
self.assertAllClose(out8, out7, atol=1e-5)
def test_stateful_GRU_training(self):
# See b/123587692 for more context.
vocab_size = 20
embedding_dim = 10
batch_size = 8
timestep = 12
units = 5
x = np.random.randint(0, vocab_size, size=(batch_size, timestep))
y = np.random.randint(0, vocab_size, size=(batch_size, timestep))
model = keras.Sequential(
[
keras.layers.Embedding(
vocab_size,
embedding_dim,
batch_input_shape=[batch_size, timestep],
),
keras.layers.GRU(units, return_sequences=True, stateful=True),
keras.layers.Dense(vocab_size),
]
)
model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(x, y, epochs=1, shuffle=False)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message="Skipping as ROCm MIOpen does not support padded "
"input yet.",
)
@test_utils.run_v2_only
def test_explicit_device_with_go_backward_and_mask(self):
batch_size = 8
timestep = 7
masksteps = 5
units = 4
inputs = np.random.randn(batch_size, timestep, units).astype(np.float32)
mask = np.ones((batch_size, timestep)).astype(np.bool)
mask[:, masksteps:] = 0
gru_layer = keras.layers.GRU(
units, return_sequences=True, go_backwards=True
)
with test_utils.device(should_use_gpu=True):
outputs_masked = gru_layer(inputs, mask=tf.constant(mask))
outputs_trimmed = gru_layer(inputs[:, :masksteps])
self.assertAllClose(outputs_masked[:, -masksteps:], outputs_trimmed)
@tf_test_util.enable_output_all_intermediates
def test_v1_session_behavior(self):
with tf.compat.v1.get_default_graph().as_default():
# See b/139132348 for more details.
x = np.random.uniform(size=(100, 4, 8))
y = np.random.uniform(size=(100, 1))
dataset = (
tf.data.Dataset.from_tensor_slices((x, y))
.shuffle(100)
.batch(32)
)
inp = keras.layers.Input(shape=(4, 8))
layer = keras.layers.GRU(1)(inp)
layer = keras.layers.Dense(1)(layer)
model = keras.models.Model(inp, layer)
model.compile(loss="mse", optimizer="sgd")
model.fit(dataset)
def test_with_fully_masked_inputs(self):
num_samples = 8
timestep = 5
embedding_dim = 4
vocab_size = 20
units = 2
inputs = np.random.randint(0, vocab_size, size=(num_samples, timestep))
# Set the first inputs to be fully zero.
inputs[0, :] = 0.0
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
vocab_size,
embedding_dim,
mask_zero=True,
input_length=timestep,
batch_input_shape=(num_samples, timestep),
)
)
layer = keras.layers.GRU(units)
model.add(layer)
model.compile(
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
# Make sure it doesn't crash with cudnn kernel.
model.predict(inputs)
# TODO (b/169895267): test with xla_gpu is disabled.
def test_deepcopy(self):
if not tf.executing_eagerly():
self.skipTest("v2-only test")
original_layer = keras.layers.GRU(5)
copied_layer = copy.deepcopy(original_layer)
self.assertEqual(copied_layer.units, 5)
self.assertEqual(
original_layer.get_config(), original_layer.get_config()
)
# Copy layer before layer call on inputs without weight initialization.
inputs = np.random.normal(size=[32, 10, 8]).astype(np.float32)
original_layer = keras.layers.GRU(4)
copied_layer = copy.deepcopy(original_layer)
outputs = original_layer(inputs)
copied_outputs = copied_layer(inputs)
self.assertNotAllClose(
self.evaluate(outputs), self.evaluate(copied_outputs)
)
# Copy layer after layer call on inputs with weight initialization.
original_layer = keras.layers.GRU(4)
outputs = original_layer(inputs)
copied_layer = copy.deepcopy(original_layer)
copied_outputs = copied_layer(inputs)
self.assertAllClose(
self.evaluate(outputs), self.evaluate(copied_outputs)
)
def _test_runtime_with_model(self, model):
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=self.batch,
test_samples=0,
input_shape=(self.timestep, self.input_shape),
num_classes=self.output_shape,
)
y_train = np_utils.to_categorical(y_train, self.output_shape)
model.compile(optimizer="sgd", loss=["categorical_crossentropy", None])
existing_loss = 0
for _ in range(self.epoch):
history = model.fit(x_train, y_train)
loss_value = history.history["loss"][0]
self.assertNotEqual(existing_loss, loss_value)
existing_loss = loss_value
_, runtime_value = model.predict(x_train)
if tf.test.is_gpu_available():
self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_CPU)
@test_utils.run_v2_only
def test_GRU_runtime(self):
layer = keras.layers.GRU(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=tf.float32
)
outputs, runtime = layer(inputs)
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1))(
runtime
)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
self._test_runtime_with_model(model)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message="Skipping as ROCm MIOpen does not support padded "
"input yet.",
)
@test_utils.run_v2_only
def test_GRU_runtime_with_mask(self):
# Masking will affect which backend is selected based on whether the
# mask is strictly right padded.
layer = keras.layers.GRU(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=tf.float32
)
masked_inputs = keras.layers.Masking()(inputs)
outputs, runtime = layer(masked_inputs)
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1))(
runtime
)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=self.batch,
test_samples=0,
input_shape=(self.timestep, self.input_shape),
num_classes=self.output_shape,
)
y_train = np_utils.to_categorical(y_train, self.output_shape)
model.compile(
optimizer="sgd",
loss=["categorical_crossentropy", None],
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(x_train, y_train)
# Verify unpadded data.
_, runtime_value = model.predict(x_train)
if tf.test.is_gpu_available():
self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_CPU)
# Update x/y to be right padded by setting the last timestep to 0
x_train[:, -1, :] = 0
y_train[:, -1] = 0
_, runtime_value = model.predict(x_train)
if tf.test.is_gpu_available():
self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_CPU)
# Further update x/y to be mix padded (masks in the middle), and verify
# only cpu kernel can be selected.
x_train[:, -3, :] = 0
y_train[:, -3] = 0
_, runtime_value = model.predict(x_train)
self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_CPU)
@test_utils.run_v2_only
def test_GRU_runtime_with_cond(self):
# This test is to demonstrate the graph rewrite of grappler plugin under
# the condition that the function returns different number of internal
# states.
layer = keras.layers.GRU(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=tf.float32
)
zeros = tf.zeros([self.batch, self.output_shape])
dummy_runtime = gru_lstm_utils.runtime(gru_lstm_utils.RUNTIME_UNKNOWN)
a = tf.constant(0)
b = tf.constant(1)
# Will always run the GRU layer.
outputs, runtime = tf.cond(
tf.less(a, b), lambda: layer(inputs), lambda: (zeros, dummy_runtime)
)
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1))(
runtime
)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
self._test_runtime_with_model(model)
@test_utils.run_all_without_tensor_float_32("RNN GRU can use TF32 on GPU")
class GRULayerGradientTapeTest(test_combinations.TestCase):
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_in_tape(self):
with self.test_session(config=_config):
time_steps = 10
embedding_size = 11
gru_unit_size = 12
gru_layer = keras.layers.GRU(
gru_unit_size,
return_sequences=True,
return_state=True,
recurrent_activation="sigmoid",
recurrent_initializer="glorot_uniform",
)
x = tf.random.uniform([1, time_steps, embedding_size])
y = tf.random.uniform([1, gru_unit_size])
with tf.GradientTape() as tape:
hidden_state = tf.zeros([1, gru_unit_size], dtype=tf.float32)
_, state = gru_layer(x, initial_state=hidden_state)
loss = tf.reduce_mean(tf.square(state - y))
tape.gradient(loss, gru_layer.variables)
@test_combinations.run_all_keras_modes
class GRULayerTest(test_combinations.TestCase):
def test_return_sequences_gru(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.GRU,
kwargs={"units": units, "return_sequences": True},
input_shape=(num_samples, timesteps, embedding_dim),
)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message="Double type is not yet supported in ROCm",
)
@test_utils.run_v2_only
def test_float64_gru(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.GRU,
kwargs={
"units": units,
"return_sequences": True,
"dtype": "float64",
},
input_shape=(num_samples, timesteps, embedding_dim),
input_dtype="float64",
)
def test_dynamic_behavior_gru(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = keras.layers.GRU(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(
"rmsprop", "mse", run_eagerly=test_utils.should_run_eagerly()
)
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_dropout_gru(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.GRU,
kwargs={"units": units, "dropout": 0.1, "recurrent_dropout": 0.1},
input_shape=(num_samples, timesteps, embedding_dim),
)
def test_recurrent_dropout_with_implementation_restriction(self):
layer = keras.layers.GRU(2, recurrent_dropout=0.1, implementation=2)
# The implementation is force to 1 due to the limit of
# recurrent_dropout.
self.assertEqual(layer.implementation, 1)
@parameterized.parameters([0, 1, 2])
def test_implementation_mode_gru(self, implementation_mode):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.GRU,
kwargs={"units": units, "implementation": implementation_mode},
input_shape=(num_samples, timesteps, embedding_dim),
)
def test_reset_after_gru(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=num_samples,
test_samples=0,
input_shape=(timesteps, embedding_dim),
num_classes=units,
)
y_train = np_utils.to_categorical(y_train, units)
inputs = keras.layers.Input(shape=[timesteps, embedding_dim])
gru_layer = keras.layers.GRU(units, reset_after=True)
output = gru_layer(inputs)
gru_model = keras.models.Model(inputs, output)
gru_model.compile(
"rmsprop", "mse", run_eagerly=test_utils.should_run_eagerly()
)
gru_model.fit(x_train, y_train)
gru_model.predict(x_train)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message="MIOpen only supports packed input output",
)
def test_with_masking_layer_gru(self):
layer_class = keras.layers.GRU
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(
loss="categorical_crossentropy",
optimizer="rmsprop",
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message="MIOpen only supports packed input output",
)
def test_statefulness_gru(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.GRU
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps),
)
)
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None
)
model.add(layer)
model.compile(
optimizer="sgd",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units))
)
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
np.testing.assert_allclose(out7, out6, atol=1e-5)
def test_get_initial_states(self):
batch_size = 4
cell = keras.layers.GRUCell(20)
initial_state = cell.get_initial_state(
batch_size=batch_size, dtype=tf.float32
)
_, state = cell(
np.ones((batch_size, 20), dtype=np.float32), initial_state
)
self.assertEqual(state.shape, initial_state.shape)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class GRULayerGenericTest(tf.test.TestCase):
def test_constraints_gru(self):
embedding_dim = 4
layer_class = keras.layers.GRU
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint,
)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
def test_from_config_gru(self):
layer_class = keras.layers.GRU
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_deep_copy_gru(self):
cell = keras.layers.GRUCell(5)
copied_cell = copy.deepcopy(cell)
self.assertEqual(copied_cell.units, 5)
self.assertEqual(cell.get_config(), copied_cell.get_config())
def test_regularizers_gru(self):
embedding_dim = 4
layer_class = keras.layers.GRU
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer="l2",
activity_regularizer="l1",
)
layer.build((None, None, 2))
self.assertLen(layer.losses, 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if tf.executing_eagerly():
self.assertLen(layer.losses, 4)
else:
self.assertLen(layer.get_losses_for(x), 1)
if __name__ == "__main__":
tf.test.main()
| 36.206458
| 80
| 0.619409
|
139d6f1536274d1174ed1f3c34a1906174794564
| 5,722
|
py
|
Python
|
framework/SupervisedLearning/ScikitLearn/MultiClass/OneVsRestClassifier.py
|
FlanFlanagan/raven
|
bd7fca18af94376a28e2144ba1da72c01c8d343c
|
[
"Apache-2.0"
] | 1
|
2022-03-10T18:54:09.000Z
|
2022-03-10T18:54:09.000Z
|
framework/SupervisedLearning/ScikitLearn/MultiClass/OneVsRestClassifier.py
|
FlanFlanagan/raven
|
bd7fca18af94376a28e2144ba1da72c01c8d343c
|
[
"Apache-2.0"
] | null | null | null |
framework/SupervisedLearning/ScikitLearn/MultiClass/OneVsRestClassifier.py
|
FlanFlanagan/raven
|
bd7fca18af94376a28e2144ba1da72c01c8d343c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jun 30, 2021
@author: wangc
One-vs-the-rest (OvR) multiclass strategy classifer
"""
#Internal Modules (Lazy Importer)--------------------------------------------------------------------
#Internal Modules (Lazy Importer) End----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from SupervisedLearning.ScikitLearn import ScikitLearnBase
from utils import InputData, InputTypes
#Internal Modules End--------------------------------------------------------------------------------
class OneVsRestClassifier(ScikitLearnBase):
"""
One-vs-the-rest (OvR) multiclass strategy classifer
"""
info = {'problemtype':'classification', 'normalize':False}
def __init__(self):
"""
Constructor that will appropriately initialize a supervised learning object
@ In, None
@ Out, None
"""
super().__init__()
import sklearn
import sklearn.multiclass
self.model = sklearn.multiclass.OneVsRestClassifier
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super().getInputSpecification()
specs.description = r"""The \xmlNode{OneVsRestClassifier} (\textit{One-vs-the-rest (OvR) multiclass strategy})
Also known as one-vs-all, this strategy consists in fitting one classifier per class. For each
classifier, the class is fitted against all the other classes. In addition to its computational
efficiency (only n\_classes classifiers are needed), one advantage of this approach is its
interpretability. Since each class is represented by one and one classifier only, it is
possible to gain knowledge about the class by inspecting its corresponding classifier.
This is the most commonly used strategy for multiclass classification and is a fair default choice.
\zNormalizationNotPerformed{OneVsRestClassifier}
"""
estimatorInput = InputData.assemblyInputFactory("estimator", contentType=InputTypes.StringType,
descr=r"""name of a ROM that can be used as an estimator""", default='no-default')
#TODO: Add more inputspecs for estimator
specs.addSub(estimatorInput)
specs.addSub(InputData.parameterInputFactory("n_jobs", contentType=InputTypes.IntegerType,
descr=r"""TThe number of jobs to use for the computation: the n\_classes one-vs-rest
problems are computed in parallel. None means 1 unless in a joblib.parallel\_backend
context. -1 means using all processors.""", default=None))
return specs
def _handleInput(self, paramInput):
"""
Function to handle the common parts of the distribution parameter input.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
super()._handleInput(paramInput)
settings, notFound = paramInput.findNodesAndExtractValues(['n_jobs'])
# notFound must be empty
assert(not notFound)
self.settings = settings
def setEstimator(self, estimatorList):
"""
Initialization method
@ In, estimatorList, list of ROM instances/estimators used by ROM
@ Out, None
"""
if len(estimatorList) != 1:
self.raiseAWarning('ROM', self.name, 'can only accept one estimator, but multiple estimators are provided!',
'Only the first one will be used, i.e.,', estimator.name)
estimator = estimatorList[0]
if estimator._interfaceROM.multioutputWrapper:
sklEstimator = estimator._interfaceROM.model.get_params()['estimator']
else:
sklEstimator = estimator._interfaceROM.model
if not callable(getattr(sklEstimator, "fit", None)):
self.raiseAnError(IOError, 'estimator:', estimator.name, 'can not be used! Please change to a different estimator')
###FIXME: We may need to check 'predict_proba' inside ROM class
# elif not callable(getattr(sklEstimator, "predict_proba", None)) and not callable(getattr(sklEstimator, "decision_function", None)):
# self.raiseAnError(IOError, 'estimator:', estimator.name, 'can not be used! Please change to a different estimator')
else:
self.raiseADebug('A valid estimator', estimator.name, 'is provided!')
settings = {'estimator':sklEstimator}
self.settings.update(settings)
self.initializeModel(self.settings)
| 49.756522
| 137
| 0.626704
|
490d472ced6d0f3bceb06e524bd4b3077eb011bd
| 658
|
py
|
Python
|
src/robust_deid/sequence_tagging/models/hf/crf/crf_token_classifier_output.py
|
obi-ml-public/ehr_deidentification
|
c9deaf30b8317689d28a4267d15ec13baa9791cd
|
[
"MIT"
] | null | null | null |
src/robust_deid/sequence_tagging/models/hf/crf/crf_token_classifier_output.py
|
obi-ml-public/ehr_deidentification
|
c9deaf30b8317689d28a4267d15ec13baa9791cd
|
[
"MIT"
] | null | null | null |
src/robust_deid/sequence_tagging/models/hf/crf/crf_token_classifier_output.py
|
obi-ml-public/ehr_deidentification
|
c9deaf30b8317689d28a4267d15ec13baa9791cd
|
[
"MIT"
] | null | null | null |
import torch
from dataclasses import dataclass
from transformers.modeling_outputs import TokenClassifierOutput
@dataclass
class CRFTokenClassifierOutput(TokenClassifierOutput):
"""
The default TokenClassifierOutput returns logits, loss, hidden_states and attentions
when we use the CRF module, we want the model.forward function to return the predicted
sequence from the CRF module. So we introduce this class which subclasses TokenClassifierOutput
and additionally returns the predictions tensor - which contains the sequences
training examples.
"""
predictions: torch.LongTensor = None
scores: torch.LongTensor = None
| 38.705882
| 99
| 0.794833
|
3b4c7b80d4dc90d5ad0f2c786bd5e12d2c919597
| 284
|
py
|
Python
|
automation/tincrepo/main/pxf/features/hive/errors/incorrectProfile/runTest.py
|
lchx1010/pxf
|
f6e11f91fb8c01ed27fc829beb3800f3b253c209
|
[
"Apache-2.0"
] | 46
|
2018-10-22T23:34:03.000Z
|
2022-03-31T09:31:34.000Z
|
automation/tincrepo/main/pxf/features/hive/errors/incorrectProfile/runTest.py
|
lchx1010/pxf
|
f6e11f91fb8c01ed27fc829beb3800f3b253c209
|
[
"Apache-2.0"
] | 317
|
2018-10-05T23:51:48.000Z
|
2022-03-22T17:38:52.000Z
|
automation/tincrepo/main/pxf/features/hive/errors/incorrectProfile/runTest.py
|
lchx1010/pxf
|
f6e11f91fb8c01ed27fc829beb3800f3b253c209
|
[
"Apache-2.0"
] | 46
|
2018-10-10T18:55:00.000Z
|
2022-03-28T07:27:04.000Z
|
from mpp.models import SQLConcurrencyTestCase
from mpp.models import SQLTestCase
class PxfHiveIncorrectProfile(SQLConcurrencyTestCase):
"""
@db_name pxfautomation
@concurrency 1
@gpdiff True
"""
sql_dir = 'sql'
ans_dir = 'expected'
out_dir = 'output'
| 21.846154
| 54
| 0.707746
|
4990eac824c6065a206b776206dc22831faff913
| 8,285
|
py
|
Python
|
eve_glue/notification_type.py
|
ccpgames/eve-glue
|
8d070ea612b93ac3c82d07d4af14d9eda60cdd13
|
[
"MIT"
] | 13
|
2017-11-27T16:42:59.000Z
|
2018-04-18T18:04:53.000Z
|
eve_glue/notification_type.py
|
ccpgames/eve-glue
|
8d070ea612b93ac3c82d07d4af14d9eda60cdd13
|
[
"MIT"
] | 7
|
2017-11-27T16:42:35.000Z
|
2018-05-15T11:06:03.000Z
|
eve_glue/notification_type.py
|
ccpgames/eve-glue
|
8d070ea612b93ac3c82d07d4af14d9eda60cdd13
|
[
"MIT"
] | 4
|
2017-12-20T10:03:59.000Z
|
2018-05-11T20:03:45.000Z
|
"""Helpers for character notifications."""
import enum
from eve_glue.enums import new_from_enum
class NotificationTypeEnumV2(enum.Enum):
"""Maps notification type IDs to names."""
OldLscMessages = 1
CharTerminationMsg = 2
CharMedalMsg = 3
AllMaintenanceBillMsg = 4
AllWarDeclaredMsg = 5
AllWarSurrenderMsg = 6
AllWarRetractedMsg = 7
AllWarInvalidatedMsg = 8
CorpAllBillMsg = 10
BillOutOfMoneyMsg = 11
BillPaidCorpAllMsg = 13
BountyClaimMsg = 14
CloneActivationMsg = 15
CorpAppNewMsg = 16
CorpAppRejectMsg = 17
CorpAppAcceptMsg = 18
CorpTaxChangeMsg = 19
CorpNewsMsg = 20
CharLeftCorpMsg = 21
CorpNewCEOMsg = 22
CorpDividendMsg = 23
CorpVoteMsg = 25
CorpVoteCEORevokedMsg = 26
CorpWarDeclaredMsg = 27
CorpWarFightingLegalMsg = 28
CorpWarSurrenderMsg = 29
CorpWarRetractedMsg = 30
CorpWarInvalidatedMsg = 31
ContainerPasswordMsg = 32
CustomsMsg = 33
InsuranceFirstShipMsg = 34
InsurancePayoutMsg = 35
InsuranceInvalidatedMsg = 36
SovCorpClaimFailMsg = 38
SovCorpBillLateMsg = 40
SovAllClaimLostMsg = 41
SovAllClaimAquiredMsg = 43
AllAnchoringMsg = 45
AllStructVulnerableMsg = 46
AllStrucInvulnerableMsg = 47
SovDisruptorMsg = 48
CorpStructLostMsg = 49
CorpOfficeExpirationMsg = 50
CloneRevokedMsg1 = 51
CloneMovedMsg = 52
CloneRevokedMsg2 = 53
InsuranceExpirationMsg = 54
InsuranceIssuedMsg = 55
JumpCloneDeletedMsg1 = 56
JumpCloneDeletedMsg2 = 57
FWCorpJoinMsg = 58
FWCorpLeaveMsg = 59
FWCorpKickMsg = 60
FWCharKickMsg = 61
FWCorpWarningMsg = 62
FWCharWarningMsg = 63
FWCharRankLossMsg = 64
FWCharRankGainMsg = 65
TransactionReversalMsg = 67
ReimbursementMsg = 68
LocateCharMsg = 69
ResearchMissionAvailableMsg = 70
MissionOfferExpirationMsg = 71
MissionTimeoutMsg = 72
StoryLineMissionAvailableMsg = 73
TutorialMsg = 74
TowerAlertMsg = 75
TowerResourceAlertMsg = 76
StationAggressionMsg1 = 77
StationStateChangeMsg = 78
StationConquerMsg = 79
StationAggressionMsg2 = 80
FacWarCorpJoinRequestMsg = 81
FacWarCorpLeaveRequestMsg = 82
FacWarCorpJoinWithdrawMsg = 83
FacWarCorpLeaveWithdrawMsg = 84
CorpLiquidationMsg = 85
SovereigntyTCUDamageMsg = 86
SovereigntySBUDamageMsg = 87
SovereigntyIHDamageMsg = 88
ContactAdd = 89
ContactEdit = 90
IncursionCompletedMsg = 91
CorpKicked = 92
OrbitalAttacked = 93
OrbitalReinforced = 94
OwnershipTransferred = 95
FWAllianceWarningMsg = 96
FWAllianceKickMsg = 97
AllWarCorpJoinedAllianceMsg = 98
AllyJoinedWarDefenderMsg = 99
AllyJoinedWarAggressorMsg = 100
AllyJoinedWarAllyMsg = 101
MercOfferedNegotiationMsg = 102
WarSurrenderOfferMsg = 103
WarSurrenderDeclinedMsg = 104
FacWarLPPayoutKill = 105
FacWarLPPayoutEvent = 106
FacWarLPDisqualifiedEvent = 107
FacWarLPDisqualifiedKill = 108
AllyContractCancelled = 109
WarAllyOfferDeclinedMsg = 110
BountyYourBountyClaimed = 111
BountyPlacedChar = 112
BountyPlacedCorp = 113
BountyPlacedAlliance = 114
KillRightAvailable = 115
KillRightAvailableOpen = 116
KillRightEarned = 117
KillRightUsed = 118
KillRightUnavailable = 119
KillRightUnavailableOpen = 120
DeclareWar = 121
OfferedSurrender = 122
AcceptedSurrender = 123
MadeWarMutual = 124
RetractsWar = 125
OfferedToAlly = 126
AcceptedAlly = 127
CharAppAcceptMsg = 128
CharAppRejectMsg = 129
CharAppWithdrawMsg = 130
DustAppAcceptedMsg = 131
DistrictAttacked = 132
BattlePunishFriendlyFire = 133
BountyESSTaken = 134
BountyESSShared = 135
IndustryTeamAuctionWon = 136
IndustryTeamAuctionLost = 137
CloneActivationMsg2 = 138
CorpAppInvitedMsg = 139
KillReportVictim = 140
KillReportFinalBlow = 141
CorpAppRejectCustomMsg = 142
CorpFriendlyFireEnableTimerStarted = 143 # pylint: disable=invalid-name
CorpFriendlyFireDisableTimerStarted = 144 # pylint: disable=invalid-name
CorpFriendlyFireEnableTimerCompleted = 145 # pylint: disable=invalid-name
CorpFriendlyFireDisableTimerCompleted = 146 # pylint: disable=invalid-name
EntosisCaptureStarted = 147
StationServiceEnabled = 148
StationServiceDisabled = 149
InfrastructureHubBillAboutToExpire = 152 # pylint: disable=invalid-name
SovStructureReinforced = 160
SovCommandNodeEventStarted = 161
SovStructureDestroyed = 162
SovStationEnteredFreeport = 163
IHubDestroyedByBillFailure = 164
AllianceCapitalChanged = 165
BuddyConnectContactAdd = 166
SovStructureSelfDestructRequested = 167 # pylint: disable=invalid-name
SovStructureSelfDestructCancel = 168
SovStructureSelfDestructFinished = 169 # pylint: disable=invalid-name
StructureFuelAlert = 181
StructureAnchoring = 182
StructureUnanchoring = 183
StructureUnderAttack = 184
StructureOnline = 185
StructureLostShields = 186
StructureLostArmor = 187
StructureDestroyed = 188
StructureItemsMovedToSafety = 190
StructureServicesOffline = 198
StructureItemsDelivered = 199
SeasonalChallengeCompleted = 200
StructureCourierContractChanged = 201
OperationFinished = 1012
GiftReceived = 1022
GameTimeReceived = 1030
GameTimeSent = 1031
GameTimeAdded = 1032
NPCStandingsLost = 3001
NPCStandingsGained = 3002
MoonminingExtractionStarted = 202
MoonminingExtractionCancelled = 203
MoonminingExtractionFinished = 204
MoonminingLaserFired = 205
MoonminingAutomaticFracture = 206
StructureWentLowPower = 207
StructureWentHighPower = 208
StructuresReinforcementChanged = 209
NotificationTypeEnumV3 = new_from_enum( # pylint: disable=invalid-name
"NotificationTypeEnumV3",
NotificationTypeEnumV2,
add={
"StructuresJobsPaused": 210,
"StructuresJobsCancelled": 211,
})
NotificationTypeEnumV4 = new_from_enum( # pylint: disable=invalid-name
"NotificationTypeEnumV4",
NotificationTypeEnumV3,
add={
"CombatOperationFinished": 1013,
"IndustryOperationFinished": 1014,
"ESSMainBankLink": 1015,
})
NotificationTypeEnumV5 = new_from_enum( # pylint: disable=invalid-name
"NotificationTypeEnumV5",
NotificationTypeEnumV4,
add={
"CorpBecameWarEligible": 221,
"CorpNoLongerWarEligible": 222,
"WarHQRemovedFromSpace": 223,
"CorpWarDeclaredV2": 224,
"AllianceWarDeclaredV2": 225,
}
)
NotificationTypeEnumV6 = new_from_enum( # pylint: disable=invalid-name
"NotificationTypeEnumV6",
NotificationTypeEnumV5,
add={
"InvasionSystemLogin": 226,
"MutualWarInviteSent": 229,
"MutualWarInviteRejected": 230,
"MutualWarInviteAccepted": 231,
"WarDeclared": 232,
"WarAdopted ": 233,
"MutualWarExpired": 234,
"WarInherited": 235,
"WarAllyInherited": 236,
"WarConcordInvalidates": 237,
"WarRetracted": 238,
"WarRetractedByConcord": 239,
"WarInvalid": 240,
}
)
NotificationTypeEnumV7 = new_from_enum( # pylint: disable=invalid-name
"NotificationTypeEnumV7",
NotificationTypeEnumV6,
add={
"MercOfferRetractedMsg": 241,
"OfferToAllyRetracted": 242,
})
NotificationTypeEnumV8 = new_from_enum( # pylint: disable=invalid-name
"NotificationTypeEnumV8",
NotificationTypeEnumV7,
add={
"InvasionSystemStart": 227,
"InvasionCompletedMsg": 228,
"RaffleCreated": 243,
"RaffleExpired": 244,
"RaffleFinished": 245,
"WarEndedHqSecurityDrop": 246,
"MissionCanceledTriglavian": 247,
"AgentRetiredTrigravian": 248,
"StructureImpendingAbandonmentAssetsAtRisk": 249,
"OfficeLeaseCanceledInsufficientStandings": 250,
"ContractRegionChangedToPochven": 251,
}
)
NotificationTypeEnumV9 = new_from_enum( # pylint: disable=invalid-name
"NotificationTypeEnumV9",
NotificationTypeEnumV8,
add={
"ExpertSystemExpiryImminent": 252,
"ExpertSystemExpired": 253,
}
)
| 30.018116
| 79
| 0.713458
|
bfe296881c3d952c3388294ff7f928b941fbd97b
| 13,256
|
py
|
Python
|
rest_email_auth/serializers.py
|
cdriehuys/django-rest-email-auth
|
9310b68cf24b6bf16cd0177d6b19ab0470382244
|
[
"MIT"
] | 15
|
2018-11-06T03:32:24.000Z
|
2022-01-11T14:08:28.000Z
|
rest_email_auth/serializers.py
|
cdriehuys/django-rest-email-auth
|
9310b68cf24b6bf16cd0177d6b19ab0470382244
|
[
"MIT"
] | 98
|
2017-09-01T05:42:09.000Z
|
2022-02-10T07:05:35.000Z
|
rest_email_auth/serializers.py
|
cdriehuys/django-rest-email-auth
|
9310b68cf24b6bf16cd0177d6b19ab0470382244
|
[
"MIT"
] | 6
|
2018-12-24T06:41:54.000Z
|
2022-02-25T10:39:36.000Z
|
"""Serializers for the ``rest_email_auth`` app.
The serializers handle the conversion of data between the JSON or form
data the API receives and native Python datatypes.
"""
import logging
from django.contrib.auth import get_user_model, password_validation
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from rest_email_auth import models, signals, app_settings
logger = logging.getLogger(__name__)
class EmailSerializer(serializers.ModelSerializer):
"""
Serializer for email addresses.
"""
class Meta(object):
extra_kwargs = {
"email": {
# We remove the autogenerated 'unique' validator to
# avoid leaking email addresses.
"validators": []
}
}
fields = ("id", "created_at", "email", "is_primary", "is_verified")
model = models.EmailAddress
read_only_fields = ("is_verified",)
def create(self, validated_data):
"""
Create a new email and send a confirmation to it.
Returns:
The newly creating ``EmailAddress`` instance.
"""
email_query = models.EmailAddress.objects.filter(
email=self.validated_data["email"]
)
if email_query.exists():
email = email_query.get()
email.send_duplicate_notification()
else:
email = super(EmailSerializer, self).create(validated_data)
email.send_confirmation()
user = validated_data.get("user")
query = models.EmailAddress.objects.filter(
is_primary=True, user=user
)
if not query.exists():
email.set_primary()
return email
def update(self, instance, validated_data):
"""
Update the instance the serializer is bound to.
Args:
instance:
The instance the serializer is bound to.
validated_data:
The data to update the serializer with.
Returns:
The updated instance.
"""
is_primary = validated_data.pop("is_primary", False)
instance = super(EmailSerializer, self).update(
instance, validated_data
)
if is_primary:
instance.set_primary()
return instance
def validate_email(self, email):
"""
Validate the provided email address.
The email address is first modified to match the RFC spec.
Namely, the domain portion of the email is lowercased.
Returns:
The validated email address.
Raises:
serializers.ValidationError:
If the serializer is bound and the provided email
doesn't match the existing address.
"""
user, domain = email.rsplit("@", 1)
email = "@".join([user, domain.lower()])
if self.instance and email and self.instance.email != email:
raise serializers.ValidationError(
_(
"Existing emails may not be edited. Create a new one "
"instead."
)
)
return email
def validate_is_primary(self, is_primary):
"""
Validate the provided 'is_primary' parameter.
Returns:
The validated 'is_primary' value.
Raises:
serializers.ValidationError:
If the user attempted to mark an unverified email as
their primary email address.
"""
# TODO: Setting 'is_primary' to 'False' should probably not be
# allowed.
if is_primary and not (self.instance and self.instance.is_verified):
raise serializers.ValidationError(
_(
"Unverified email addresses may not be used as the "
"primary address."
)
)
return is_primary
class EmailVerificationSerializer(serializers.Serializer):
"""
Serializer for verifying an email address.
"""
email = serializers.EmailField(read_only=True)
key = serializers.CharField(write_only=True)
password = serializers.CharField(
style={"input_type": "password"}, write_only=True
)
def __init__(self, *args, **kwargs):
"""
Conditionally remove the password field based on if a password
is required to verify an email address.
"""
super().__init__(*args, **kwargs)
self._confirmation = None
if not app_settings.EMAIL_VERIFICATION_PASSWORD_REQUIRED:
self.fields.pop("password")
def save(self):
"""
Confirm the email address matching the confirmation key.
"""
self._confirmation.confirm()
self._confirmation.delete()
def validate(self, data):
"""
Validate the provided data.
Returns:
dict:
The validated data.
Raises:
serializers.ValidationError:
If the provided password is invalid.
"""
user = self._confirmation.email.user
if (
app_settings.EMAIL_VERIFICATION_PASSWORD_REQUIRED
and not user.check_password(data["password"])
):
raise serializers.ValidationError(
_("The provided password is invalid.")
)
# Add email to returned data
data["email"] = self._confirmation.email.email
return data
def validate_key(self, key):
"""
Validate the provided confirmation key.
Returns:
str:
The validated confirmation key.
Raises:
serializers.ValidationError:
If there is no email confirmation with the given key or
the confirmation has expired.
"""
try:
confirmation = models.EmailConfirmation.objects.select_related(
"email__user"
).get(key=key)
except models.EmailConfirmation.DoesNotExist:
raise serializers.ValidationError(
_("The provided verification key is invalid.")
)
if confirmation.is_expired:
raise serializers.ValidationError(
_("That verification code has expired.")
)
# Cache confirmation instance
self._confirmation = confirmation
return key
class PasswordResetRequestSerializer(serializers.Serializer):
"""
Serializer for requesting a password reset.
"""
email = serializers.EmailField(
help_text=_("The email address to send the password reset to.")
)
def save(self):
"""
Send out a password reset if the provided data is valid.
If the provided email address exists and is verified, a reset
email is sent to the address.
Returns:
The password reset token if it was returned and ``None``
otherwise.
"""
try:
email = models.EmailAddress.objects.get(
email=self.validated_data["email"], is_verified=True
)
except models.EmailAddress.DoesNotExist:
return None
token = models.PasswordResetToken.objects.create(email=email)
token.send()
return token
class PasswordResetSerializer(serializers.Serializer):
"""
Serializer for reseting a user's password.
"""
key = serializers.UUIDField(
help_text=_(
"The key received by the user in the password reset " "email."
),
write_only=True,
)
password = serializers.CharField(
help_text=_("The user's new password."),
style={"input_type": "password"},
write_only=True,
)
def save(self):
"""
Reset the user's password if the provided information is valid.
"""
token = models.PasswordResetToken.objects.select_related(
"email__user"
).get(key=self.validated_data["key"])
token.email.user.set_password(self.validated_data["password"])
token.email.user.save()
logger.info("Reset password for %s", token.email.user)
token.delete()
def validate_key(self, key):
"""
Validate the provided reset key.
Returns:
The validated key.
Raises:
serializers.ValidationError:
If the provided key does not exist.
"""
if not models.PasswordResetToken.valid_tokens.filter(key=key).exists():
raise serializers.ValidationError(
_("The provided reset token does not exist, or is expired.")
)
return key
def validate_password(self, password):
"""
Validate the provided password by running it through Django's
password validation system.
Returns:
The validated password.
Raises:
ValidationError:
If the provided password does not pass the configured
password validators.
"""
password_validation.validate_password(password)
return password
class RegistrationSerializer(serializers.ModelSerializer):
"""
Serializer for registering new users.
"""
email = serializers.EmailField()
class Meta(object):
extra_kwargs = {
"password": {
"style": {"input_type": "password"},
"write_only": True,
}
}
fields = (get_user_model().USERNAME_FIELD, "email", "password")
model = get_user_model()
def create(self, validated_data):
"""
Create a new user from the data passed to the serializer.
If the provided email has not been verified yet, the user is
created and a verification email is sent to the address.
Otherwise we send a notification to the email address that
someone attempted to register with an email that's already been
verified.
Args:
validated_data (dict):
The data passed to the serializer after it has been
validated.
Returns:
A new user created from the provided data.
"""
email = validated_data.pop("email")
password = validated_data.pop("password")
# We don't save the user instance yet in case the provided email
# address already exists.
user = get_user_model()(**validated_data)
user.set_password(password)
# We set an ephemeral email property so that it is included in
# the data returned by the serializer.
user.email = email
email_query = models.EmailAddress.objects.filter(email=email)
if email_query.exists():
existing_email = email_query.get()
existing_email.send_duplicate_notification()
else:
user.save()
email_instance = models.EmailAddress.objects.create(
email=email, is_primary=True, user=user
)
email_instance.send_confirmation()
signals.user_registered.send(sender=self.__class__, user=user)
return user
def validate_email(self, email):
"""
Validate the provided email address.
Args:
email:
The email address to validate.
Returns:
The provided email address, transformed to match the RFC
spec. Namely, the domain portion of the email must be
lowercase.
"""
user, domain = email.rsplit("@", 1)
return "@".join([user, domain.lower()])
def validate_password(self, password):
"""
Validate the provided password.
Args:
password (str):
The password provided by the user.
Returns:
str:
The validated password.
Raises:
ValidationError:
If the provided password doesn't pass Django's provided
password validation.
"""
password_validation.validate_password(password)
return password
class ResendVerificationSerializer(serializers.Serializer):
"""
Serializer for resending a verification email.
"""
email = serializers.EmailField()
def save(self):
"""
Resend a verification email to the provided address.
If the provided email is already verified no action is taken.
"""
try:
email = models.EmailAddress.objects.get(
email=self.validated_data["email"], is_verified=False
)
logger.debug(
"Resending verification email to %s",
self.validated_data["email"],
)
email.send_confirmation()
except models.EmailAddress.DoesNotExist:
logger.debug(
"Not resending verification email to %s because the address "
"doesn't exist in the database.",
self.validated_data["email"],
)
| 28.446352
| 79
| 0.583283
|
c2958aa125af9557d0b71268e372438dc2cfa826
| 12,808
|
py
|
Python
|
versions/2.0.0/analysis_predictor_info/generate_artifacts.py
|
openvax/mhcflurry-motifs
|
37000dd5c0896cd021e4973ea310a2e2be94a8ef
|
[
"Apache-2.0"
] | null | null | null |
versions/2.0.0/analysis_predictor_info/generate_artifacts.py
|
openvax/mhcflurry-motifs
|
37000dd5c0896cd021e4973ea310a2e2be94a8ef
|
[
"Apache-2.0"
] | 1
|
2020-07-24T16:40:17.000Z
|
2020-07-24T16:40:17.000Z
|
versions/2.0.0/analysis_predictor_info/generate_artifacts.py
|
openvax/mhcflurry-motifs
|
37000dd5c0896cd021e4973ea310a2e2be94a8ef
|
[
"Apache-2.0"
] | null | null | null |
"""
Generate images for MHC binding motifs.
Note: a shared filesystem is assumed even when running on an HPC cluster.
The --out directory should be on an NFS filesystem and available to the workers.
"""
import sys
import argparse
import os
import numpy
import time
import collections
from functools import partial
import pandas
import tqdm
tqdm.monitor_interval = 0 # see https://github.com/tqdm/tqdm/issues/481
from mhcflurry.common import configure_logging
from mhcflurry.downloads import get_path
from mhcflurry.local_parallelism import (
add_local_parallelism_args,
worker_pool_with_gpu_assignments_from_args,
call_wrapped_kwargs)
from mhcflurry.cluster_parallelism import (
add_cluster_parallelism_args,
cluster_results_from_args)
# To avoid pickling large matrices to send to child processes when running in
# parallel, we use this global variable as a place to store data. Data that is
# stored here before creating the thread pool will be inherited to the child
# processes upon fork() call, allowing us to share large data with the workers
# via shared memory.
GLOBAL_DATA = {}
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
"--affinity-predictor",
metavar="DIR",
help="Pan-allele class I affinity predictor")
parser.add_argument(
"--frequency-matrices",
metavar="CSV",
help="Frequency matrices")
parser.add_argument(
"--length-distributions",
metavar="CSV",
help="Length distributions")
parser.add_argument(
"--train-data",
metavar="CSV",
help="Training data")
parser.add_argument(
"--alleles",
nargs="+",
help="Alleles to process. If not specified all alleles are used")
parser.add_argument(
"--max-alleles",
type=int,
help="Max number of allelels to process. For debugging.")
parser.add_argument(
"--chunk-size",
type=int,
default=100,
help="Number of alleles per job")
parser.add_argument(
"--logo-lengths",
type=int,
nargs="+",
default=[8, 9, 10, 11],
help="Peptide lengths for motif logos")
parser.add_argument(
"--length-distribution-lengths",
nargs="+",
default=[8, 9, 10, 11, 12, 13, 14, 15],
type=int,
help="Peptide lengths for length distribution plots",
)
parser.add_argument(
"--logo-cutoff",
default=0.01,
type=float,
help="Fraction of top to use for motifs",
)
parser.add_argument(
"--length-cutoff",
default=0.01,
type=float,
help="Fraction of top to use for length distribution",
)
parser.add_argument(
"--out",
metavar="DIR",
required=True,
help="Directory to write results to")
add_local_parallelism_args(parser)
add_cluster_parallelism_args(parser)
def run():
from mhcflurry.amino_acid import COMMON_AMINO_ACIDS
args = parser.parse_args(sys.argv[1:])
configure_logging()
serial_run = not args.cluster_parallelism and args.num_jobs == 0
if not args.affinity_predictor:
args.affinity_predictor = get_path(
"models_class1_pan", "models.combined")
print("Using downloaded affinity predictor: ", args.affinity_predictor)
if not args.frequency_matrices:
args.frequency_matrices = os.path.join(
args.affinity_predictor, "frequency_matrices.csv.bz2")
if not args.length_distributions:
args.length_distributions = os.path.join(args.affinity_predictor,
"length_distributions.csv.bz2")
if not args.train_data:
args.train_data = os.path.join(args.affinity_predictor,
"train_data.csv.bz2")
frequency_matrices_df = pandas.read_csv(args.frequency_matrices)
length_distributions = pandas.read_csv(args.length_distributions)
train_data = pandas.read_csv(args.train_data)
alleles = args.alleles
if alleles:
print("Using specified alleles, ", *alleles)
else:
alleles = frequency_matrices_df.allele.unique()
if args.max_alleles:
alleles = alleles[:args.max_alleles]
print("Using %d alleles" % len(alleles), alleles)
amino_acids = sorted(COMMON_AMINO_ACIDS)
distribution = frequency_matrices_df.loc[
(frequency_matrices_df.cutoff_fraction == 1.0), amino_acids
].mean(0)
normalized_frequency_matrices = frequency_matrices_df.copy()
normalized_frequency_matrices.loc[:, amino_acids] = (
normalized_frequency_matrices[amino_acids] / distribution)
GLOBAL_DATA["args"] = args
GLOBAL_DATA["normalized_frequency_matrices"] = normalized_frequency_matrices
GLOBAL_DATA["length_distributions"] = length_distributions
GLOBAL_DATA["train_data"] = train_data
artifacts_out = os.path.join(args.out, "artifacts")
if not os.path.exists(args.out):
os.mkdir(args.out)
if not os.path.exists(artifacts_out):
os.mkdir(artifacts_out)
tasks = [
{
"task_num": i,
"allele": allele,
"out_dir": artifacts_out,
}
for (i, allele) in enumerate(alleles)
]
jobs = []
for task in tasks:
if not jobs or len(jobs[-1]['tasks']) >= args.chunk_size:
jobs.append({'tasks': []})
jobs[-1]['tasks'].append(task)
print("Generated %d tasks, packed into %d jobs" % (len(tasks), len(jobs)))
worker_pool = None
start = time.time()
if serial_run:
# Serial run
print("Running in serial.")
results = (
do_job(**job) for job in jobs)
elif args.cluster_parallelism:
# Run using separate processes HPC cluster.
print("Running on cluster.")
results = cluster_results_from_args(
args,
work_function=do_job,
work_items=jobs,
constant_data=GLOBAL_DATA,
input_serialization_method="dill",
result_serialization_method="pickle",
clear_constant_data=False)
else:
worker_pool = worker_pool_with_gpu_assignments_from_args(args)
print("Worker pool", worker_pool)
assert worker_pool is not None
for task in tasks:
task['constant_data'] = GLOBAL_DATA
results = worker_pool.imap_unordered(
partial(call_wrapped_kwargs, do_job),
jobs,
chunksize=1)
print("Reading results")
task_results = {}
for job_result in tqdm.tqdm(results, total=len(jobs)):
for task_result in job_result:
task_results[task_result['task_num']] = task_result
print("Received all results in %0.2f sec" % (time.time() - start))
artifacts_df = pandas.DataFrame(task_results).T.set_index("task_num")
normalized_frequency_matrices_out = os.path.join(
args.out, "normalized_frequency_matrices.csv")
normalized_frequency_matrices.to_csv(
normalized_frequency_matrices_out, index=False)
print("Wrote: ", normalized_frequency_matrices_out)
length_distributions_out = os.path.join(args.out,
"length_distributions.csv")
length_distributions.to_csv(length_distributions_out,
index=False)
print("Wrote: ", length_distributions_out)
artifacts_summary_out = os.path.join(args.out, "artifacts.csv")
artifacts_df.to_csv(artifacts_summary_out)
print("Wrote: ", artifacts_summary_out)
if worker_pool:
worker_pool.close()
worker_pool.join()
def do_job(tasks, constant_data=GLOBAL_DATA):
# Nested functions are so that the do_job function can be pickled for
# running on an HPC cluster.
GLOBAL_DATA = constant_data
def do_task(task_num, allele, out_dir, constant_data=GLOBAL_DATA):
args = constant_data['args']
normalized_frequency_matrices = constant_data[
'normalized_frequency_matrices'
]
length_distributions = constant_data[
'length_distributions'
]
train_data = constant_data[
'train_data'
]
logo_filename = write_logo(
normalized_frequency_matrices,
allele=allele,
lengths=args.logo_lengths,
cutoff=args.logo_cutoff,
models_label="standard",
out_dir=out_dir,
)
length_distribution_filename = write_length_distribution(
length_distributions,
allele=allele,
lengths=args.length_distribution_lengths,
cutoff=args.length_cutoff,
models_label="standard",
out_dir=out_dir)
(train_data_filename, num_train_points) = write_train_data(
train_data,
allele=allele,
models_label="standard",
out_dir=out_dir)
return {
'task_num': task_num,
'allele': allele,
'logo_filename': logo_filename,
'length_distribution_filename': length_distribution_filename,
'train_data_filename': train_data_filename,
'num_train_points': num_train_points,
}
def write_logo(
normalized_frequency_matrices,
allele,
lengths,
cutoff,
models_label,
out_dir):
import seaborn
from matplotlib import pyplot
import logomaker
import os
from mhcflurry.amino_acid import COMMON_AMINO_ACIDS
amino_acids = sorted(COMMON_AMINO_ACIDS)
fig = pyplot.figure(figsize=(8,10))
for (i, length) in enumerate(lengths):
ax = pyplot.subplot(len(lengths), 1, i + 1)
matrix = normalized_frequency_matrices.loc[
(normalized_frequency_matrices.allele == allele) &
(normalized_frequency_matrices.length == length) &
(normalized_frequency_matrices.cutoff_fraction == cutoff)
].set_index("position")[amino_acids]
if matrix.shape[0] == 0:
return None
matrix = (matrix.T / matrix.sum(1)).T # row normalize
ss_logo = logomaker.Logo(
matrix,
color_scheme="NajafabadiEtAl2017",
font_name="Arial",
width=.8,
vpad=.05,
fade_probabilities=True,
stack_order='small_on_top',
ax=ax,
)
pyplot.title(
"%s %d-mer" % (allele, length), y=0.85)
pyplot.xticks(matrix.index.values)
seaborn.despine()
pyplot.tight_layout()
name = "%s.motifs.%s.png" % (
allele.replace("*", "-").replace(":", "-"), models_label)
filename = os.path.abspath(os.path.join(out_dir, name))
pyplot.savefig(filename)
print("Wrote: ", filename)
fig.clear()
pyplot.close(fig)
return name
def write_length_distribution(
length_distributions_df, allele, lengths, cutoff, models_label, out_dir):
from matplotlib import pyplot
import seaborn
import os
length_distribution = length_distributions_df.loc[
(length_distributions_df.allele == allele) &
(length_distributions_df.cutoff_fraction == cutoff)
]
if length_distribution.shape[0] == 0:
return None
length_distribution = length_distribution.set_index(
"length").reindex(lengths).fillna(0.0).reset_index()
length_distribution.plot(
x="length", y="fraction", kind="bar", figsize=(5, 3))
fig = pyplot.gcf()
pyplot.title("%s" % allele, fontsize=10)
pyplot.xlabel("Peptide length", fontsize=10)
pyplot.xticks(rotation=0)
pyplot.ylim(ymin=0, ymax=1.0)
pyplot.ylabel("Fraction of top %0.1f%%" % (cutoff * 100.0), fontsize=10)
pyplot.gca().get_legend().remove()
pyplot.tight_layout()
seaborn.despine()
name = "%s.lengths.%s.png" % (
allele.replace("*", "-").replace(":", "-"), models_label)
filename = os.path.abspath(os.path.join(out_dir, name))
pyplot.savefig(filename)
print("Wrote: ", filename)
fig.clear()
pyplot.close(fig)
return name
def write_train_data(train_data, allele, models_label, out_dir):
import os
sub_train = train_data.loc[
train_data.allele == allele
]
name = None
if sub_train.shape[0] > 0:
name = "%s.train_data.%s.csv" % (
allele.replace("*", "-").replace(":", "-"), models_label)
filename = os.path.abspath(os.path.join(out_dir, name))
sub_train.to_csv(filename, index=False)
print("Wrote: ", filename)
return (name, len(sub_train))
return [do_task(constant_data=constant_data, **task) for task in tasks]
if __name__ == '__main__':
run()
| 30.714628
| 85
| 0.636711
|
1e06f3fee95b6bc07ef5304fec3b45191afcb8e5
| 5,762
|
py
|
Python
|
torch/storage.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 24
|
2020-11-02T21:25:12.000Z
|
2022-03-17T07:20:33.000Z
|
torch/storage.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 1
|
2022-01-18T12:17:29.000Z
|
2022-01-18T12:17:29.000Z
|
torch/storage.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 12
|
2020-11-06T05:00:37.000Z
|
2022-01-30T19:17:36.000Z
|
import io
import torch
from ._utils import _type, _cuda
from typing import Any, TypeVar, Type
T = TypeVar('T', bound='_StorageBase')
class _StorageBase(object):
_cdata: Any
is_cuda: bool = False
is_sparse: bool = False
def __init__(self, *args, **kwargs): ... # noqa: E704
def __len__(self) -> int: ... # noqa: E704
def __getitem__(self, idx): ... # noqa: E704
def copy_(self, source: T) -> T: ... # noqa: E704
def size(self) -> int: ... # noqa: E704
def type(self, dtype: str = None, non_blocking: bool = False) -> T: ... # noqa: E704
def cuda(self, device=None, non_blocking=False, **kwargs) -> T: ... # noqa: E704
def element_size(self) -> int: ... # noqa: E704
def get_device(self) -> int: ... # noqa: E704
# Defined in torch/csrc/generic/StorageSharing.cpp
def _share_filename_(self): ... # noqa: E704
def _share_fd_(self): ... # noqa: E704
@classmethod
def _new_using_filename(cls: Type[T], size: int) -> T: ... # noqa: E704
@classmethod
def _new_using_fd(cls: Type[T], size: int) -> T: ... # noqa: E704
def __str__(self):
content = ' ' + '\n '.join(str(self[i]) for i in range(len(self)))
return content + f'\n[{torch.typename(self)} of size {len(self)}]'
def __repr__(self):
return str(self)
def __iter__(self):
return iter(map(lambda i: self[i], range(self.size())))
def __copy__(self):
return self.clone()
def __deepcopy__(self, memo):
memo = memo.setdefault('torch', {})
if self._cdata in memo:
return memo[self._cdata]
new_storage = self.clone()
memo[self._cdata] = new_storage
return new_storage
def __reduce__(self):
b = io.BytesIO()
torch.save(self, b, _use_new_zipfile_serialization=False)
return (_load_from_bytes, (b.getvalue(),))
def __sizeof__(self):
return super(_StorageBase, self).__sizeof__() + self.element_size() * self.size()
def clone(self):
"""Returns a copy of this storage"""
device = self.get_device() if self.is_cuda else -1
with torch.cuda.device(device):
return type(self)(self.size()).copy_(self)
def tolist(self):
"""Returns a list containing the elements of this storage"""
return list(self)
def cpu(self):
"""Returns a CPU copy of this storage if it's not already on the CPU"""
return self.type(getattr(torch, self.__class__.__name__))
def double(self):
"""Casts this storage to double type"""
return self.type(type(self).__module__ + '.DoubleStorage')
def float(self):
"""Casts this storage to float type"""
return self.type(type(self).__module__ + '.FloatStorage')
def half(self):
"""Casts this storage to half type"""
return self.type(type(self).__module__ + '.HalfStorage')
def long(self):
"""Casts this storage to long type"""
return self.type(type(self).__module__ + '.LongStorage')
def int(self):
"""Casts this storage to int type"""
return self.type(type(self).__module__ + '.IntStorage')
def short(self):
"""Casts this storage to short type"""
return self.type(type(self).__module__ + '.ShortStorage')
def char(self):
"""Casts this storage to char type"""
return self.type(type(self).__module__ + '.CharStorage')
def byte(self):
"""Casts this storage to byte type"""
return self.type(type(self).__module__ + '.ByteStorage')
def bool(self):
"""Casts this storage to bool type"""
return self.type(type(self).__module__ + '.BoolStorage')
def bfloat16(self):
"""Casts this storage to bfloat16 type"""
return self.type(type(self).__module__ + '.BFloat16Storage')
def complex_double(self):
"""Casts this storage to complex double type"""
return self.type(type(self).__module__ + '.ComplexDoubleStorage')
def complex_float(self):
"""Casts this storage to complex float type"""
return self.type(type(self).__module__ + '.ComplexFloatStorage')
def pin_memory(self):
"""Copies the storage to pinned memory, if it's not already pinned."""
if self.is_cuda:
raise TypeError(f"cannot pin '{self.type()}' only CPU memory can be pinned")
import torch.cuda
allocator = torch.cuda._host_allocator() # type: ignore[attr-defined]
return type(self)(self.size(), allocator=allocator).copy_(self)
def share_memory_(self):
"""Moves the storage to shared memory.
This is a no-op for storages already in shared memory and for CUDA
storages, which do not need to be moved for sharing across processes.
Storages in shared memory cannot be resized.
Returns: self
"""
from torch.multiprocessing import get_sharing_strategy
if self.is_cuda:
pass # CUDA doesn't use POSIX shared memory
elif get_sharing_strategy() == 'file_system':
self._share_filename_()
else:
self._share_fd_()
return self
@classmethod
def _new_shared(cls, size):
"""Creates a new storage in shared memory with the same data type"""
from torch.multiprocessing import get_sharing_strategy
if cls.is_cuda:
return cls(size)
elif get_sharing_strategy() == 'file_system':
return cls._new_using_filename(size)
else:
return cls._new_using_fd(size)
def _load_from_bytes(b):
return torch.load(io.BytesIO(b))
_StorageBase.type = _type # type: ignore[assignment]
_StorageBase.cuda = _cuda # type: ignore[assignment]
| 34.710843
| 89
| 0.621659
|
64eab13c9bd7930b1335d3ff7e0602c0507eb25d
| 977
|
py
|
Python
|
Day37_Habit_Tracker/main.py
|
syt1209/PythonProjects
|
0409dbd3c0b0ddf00debc38875059c828eb31dec
|
[
"MIT"
] | 1
|
2021-02-16T00:59:29.000Z
|
2021-02-16T00:59:29.000Z
|
Day37_Habit_Tracker/main.py
|
syt1209/PythonProjects
|
0409dbd3c0b0ddf00debc38875059c828eb31dec
|
[
"MIT"
] | null | null | null |
Day37_Habit_Tracker/main.py
|
syt1209/PythonProjects
|
0409dbd3c0b0ddf00debc38875059c828eb31dec
|
[
"MIT"
] | null | null | null |
import requests
from datetime import datetime
USERNAME = "ys2021"
TOKEN = "abcdefghij"
pixela_endpoint = "https://pixe.la/v1/users"
user_params = {
"token": TOKEN,
"username": USERNAME,
"agreeTermsOfService": "yes",
"notMinor": "yes",
}
# response = requests.post(url=pixela_endpoint, json=user_params)
# print(response.text)
graph_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs"
GRAPHID = "graph1"
graph_config = {
"id": GRAPHID,
"name": "Python Coding",
"unit": "min",
"type": "int",
"color": "sora",
}
headers = {
"X-USER-TOKEN": TOKEN,
}
# response_graph = requests.post(url=graph_endpoint, json=graph_config, headers=headers)
graph_pixel_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs/{GRAPHID}"
today = datetime.now()
pixel_config ={
"date": today.strftime("%Y%m%d"),
"quantity": "30",
}
response_pixel = requests.post(url=graph_pixel_endpoint, json=pixel_config, headers=headers)
print(response_pixel.text)
| 21.23913
| 92
| 0.686796
|
ac62518898d88428270dafd18225f3e8514e6ef0
| 139,669
|
py
|
Python
|
tensorflow/python/framework/ops_test.py
|
neochristou/tensorflow
|
50b55bfc5c9132c3bd82505181380bffbb47a5ff
|
[
"Apache-2.0"
] | 4
|
2015-11-12T20:37:23.000Z
|
2021-05-25T20:06:32.000Z
|
tensorflow/python/framework/ops_test.py
|
donny-stacks/tensorflow
|
1fb338b1c42930c0eef4d0b4d8d5fdf24a678654
|
[
"Apache-2.0"
] | 1
|
2021-08-28T00:32:59.000Z
|
2021-08-28T00:34:24.000Z
|
tensorflow/python/framework/ops_test.py
|
donny-stacks/tensorflow
|
1fb338b1c42930c0eef4d0b4d8d5fdf24a678654
|
[
"Apache-2.0"
] | 4
|
2022-01-13T11:23:44.000Z
|
2022-03-02T11:11:42.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import os
import threading
import weakref
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.gradients # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
class ResourceTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBuildGraph(self):
with self.cached_session():
pt = test_ops.stub_resource_handle_op(container="a", shared_name="b")
test_ops.resource_create_op(pt).run()
@test_util.run_deprecated_v1
def testInitialize(self):
with self.cached_session():
handle = test_ops.stub_resource_handle_op(container="a", shared_name="b")
resources.register_resource(
handle=handle,
create_op=test_ops.resource_create_op(handle),
is_initialized_op=test_ops.resource_initialized_op(handle))
self.assertEqual(
len(
resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 1)
resources.initialize_resources(resources.shared_resources()).run()
self.assertEqual(
len(
resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 0)
class TensorAndShapeTest(test_util.TensorFlowTestCase):
def testShape(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertEqual(tensor_shape.unknown_shape(), t.get_shape())
t.set_shape([1, 2, 3])
self.assertEqual([1, 2, 3], t.get_shape())
def testIterable(self):
if not context.executing_eagerly():
self.skipTest("Eager-mode test")
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
with self.assertRaisesRegex(TypeError, "Cannot iterate"):
iter(t)
def testIterableGraph(self):
if context.executing_eagerly():
self.skipTest("Graph-mode test")
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
with self.assertRaisesRegex(TypeError, "iterating.*not allowed in Graph"):
next(iter(t))
with self.assertRaisesRegex(TypeError, "iterating.*AutoGraph did convert"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.ENABLED):
next(iter(t))
with self.assertRaisesRegex(TypeError, "iterating.*AutoGraph is disabled"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.DISABLED):
next(iter(t))
def testImplicitBool(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.bool])
t = op.outputs[0]
with self.assertRaisesRegex(TypeError,
"using.*as a.*bool.*not allowed in Graph"):
bool(t)
with self.assertRaisesRegex(TypeError,
"using.*as a.*bool.*AutoGraph did convert"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.ENABLED):
bool(t)
with self.assertRaisesRegex(TypeError,
"using.*as a.*bool.*AutoGraph is disabled"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.DISABLED):
bool(t)
def testAddShape(self):
with self.cached_session():
a = array_ops.zeros([2, 3])
b = array_ops.ones([1, 3])
c = a + b
self.assertEqual([2, 3], c.shape)
@test_util.run_deprecated_v1
def testUnknownDim(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None, 3])
b = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None, 3])
c = a + b
self.assertEqual([2, None, 3], c.shape.as_list())
@test_util.run_deprecated_v1
def testUnknownShape(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=None)
b = array_ops.ones([1, 3])
c = a + b
self.assertEqual(tensor_shape.unknown_shape(), c.shape)
@test_util.run_deprecated_v1
def testScalarShape(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=[])
b = array_ops.ones([])
c = a + b
self.assertEqual(tensor_shape.TensorShape([]), c.shape)
@test_util.run_deprecated_v1
def testShapeFunctionError(self):
with self.cached_session():
a = array_ops.ones([1, 2, 3])
b = array_ops.ones([4, 5, 6])
with self.assertRaisesRegex(
ValueError, r"Dimensions must be equal, but are 2 and 5 for .*add"
r".*Add(V2)?.* with input shapes: \[1,2,3\], \[4,5,6\]."):
_ = a + b
def testNumpyArray(self):
with ops.Graph().as_default():
x = array_ops.ones((3, 4), name="test_ones")
with self.assertRaisesRegex(NotImplementedError,
r"Cannot convert a symbolic.+test_ones"):
np.array(x)
with self.assertRaisesRegex(TypeError, "not well defined.+test_ones"):
len(x)
# EagerTensors should still behave as numpy arrays.
with context.eager_mode():
x = array_ops.ones((3, 4))
self.assertAllEqual(x, np.ones((3, 4)))
self.assertAllEqual(np.array(x), np.ones((3, 4)))
self.assertLen(x, 3)
def testConstructor(self):
a = array_ops.ones([])
for name in ["T", "astype", "ravel", "transpose", "reshape", "clip", "size",
"tolist", "data"]:
with self.assertRaisesRegex(
AttributeError, r"If you are looking for numpy-related methods"):
getattr(a, name)
with self.assertRaisesRegex(
AttributeError, r"object has no attribute"):
a.foo_bar()
def testRef(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertEqual(x1.ref(), x1.ref())
self.assertEqual(x2.ref(), x2.ref())
self.assertEqual(x1.ref(), x2.ref())
self.assertEqual(y.ref(), y.ref())
self.assertEqual(z.ref(), z.ref())
self.assertEqual(w.ref(), w.ref())
self.assertNotEqual(x1.ref(), y.ref())
self.assertNotEqual(x1.ref(), z.ref())
self.assertNotEqual(x1.ref(), w.ref())
self.assertNotEqual(y.ref(), z.ref())
self.assertNotEqual(y.ref(), w.ref())
self.assertNotEqual(z.ref(), w.ref())
def testRefDeref(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertIs(x1, x1.ref().deref())
self.assertIs(x2, x2.ref().deref())
self.assertIs(x1, x2.ref().deref())
self.assertIs(x2, x1.ref().deref())
self.assertIs(y, y.ref().deref())
self.assertIs(z, z.ref().deref())
self.assertIsNot(x1, y.ref().deref())
self.assertIsNot(x1, z.ref().deref())
self.assertIsNot(x1, w.ref().deref())
self.assertIsNot(y, z.ref().deref())
self.assertIsNot(y, w.ref().deref())
self.assertIsNot(z, w.ref().deref())
def testRefInSet(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertEqual(x1.ref(), x2.ref())
tensor_set = {
x1.ref(),
x2.ref(),
y.ref(),
z.ref(),
w.ref(),
}
self.assertLen(tensor_set, 4)
self.assertIn(x1.ref(), tensor_set)
self.assertIn(x2.ref(), tensor_set)
self.assertIn(y.ref(), tensor_set)
self.assertIn(z.ref(), tensor_set)
self.assertIn(w.ref(), tensor_set)
def testRefInDict(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertEqual(x1.ref(), x2.ref())
tensor_dict = {
x1.ref(): "x1",
y.ref(): "y",
z.ref(): "z",
w.ref(): "w",
}
self.assertLen(tensor_dict, 4)
# Overwriting x1
tensor_dict[x2.ref()] = "x2"
self.assertLen(tensor_dict, 4)
self.assertEqual(tensor_dict[x1.ref()], "x2")
self.assertEqual(tensor_dict[x2.ref()], "x2")
self.assertEqual(tensor_dict[y.ref()], "y")
self.assertEqual(tensor_dict[z.ref()], "z")
self.assertEqual(tensor_dict[w.ref()], "w")
def testTensorRefStrong(self):
x = constant_op.constant(1.)
x_ref = x.ref()
del x
self.assertIsNotNone(x_ref.deref())
def testVariableRefStrong(self):
x = variables.Variable(1.)
x_ref = x.ref()
del x
self.assertIsNotNone(x_ref.deref())
@test_util.run_in_graph_and_eager_modes
def testBitwiseAndNumeric(self):
x = constant_op.constant([0, 1, 3])
y = constant_op.constant([1, 1, 1])
z = x & y
self.assertAllEqual(z, [0, 1, 1])
@test_util.run_in_graph_and_eager_modes
def testBitwiseAndBool(self):
x = constant_op.constant([False, False, True, True])
y = constant_op.constant([False, True, False, True])
z = x & y
self.assertAllEqual(z, [False, False, False, True])
@test_util.run_in_graph_and_eager_modes
def testBitwiseAndErrors(self):
x_int = constant_op.constant(0)
x_bool = constant_op.constant(True)
if context.executing_eagerly(): # :(
expected_errtype = errors.InvalidArgumentError
else:
expected_errtype = TypeError
with self.assertRaises(expected_errtype):
_ = x_int & x_bool
with self.assertRaises(expected_errtype):
_ = x_int & constant_op.constant("a")
with self.assertRaises(expected_errtype):
_ = x_bool & x_int
with self.assertRaises(expected_errtype):
_ = x_bool & constant_op.constant("a")
with self.assertRaises(expected_errtype):
_ = constant_op.constant("a") & constant_op.constant("b")
@test_util.run_in_graph_and_eager_modes
def testBitwiseOrNumeric(self):
x = constant_op.constant([0, 1, 2])
y = constant_op.constant([1, 1, 1])
z = x | y
self.assertAllEqual(z, [1, 1, 3])
@test_util.run_in_graph_and_eager_modes
def testBitwiseOrBool(self):
x = constant_op.constant([False, False, True, True])
y = constant_op.constant([False, True, False, True])
z = x | y
self.assertAllEqual(z, [False, True, True, True])
@test_util.run_in_graph_and_eager_modes
def testBitwiseOrErrors(self):
x_int = constant_op.constant(0)
x_bool = constant_op.constant(True)
if context.executing_eagerly(): # :(
expected_errtype = errors.InvalidArgumentError
else:
expected_errtype = TypeError
with self.assertRaises(expected_errtype):
_ = x_int | x_bool
with self.assertRaises(expected_errtype):
_ = x_int | constant_op.constant("a")
with self.assertRaises(expected_errtype):
_ = x_bool | x_int
with self.assertRaises(expected_errtype):
_ = x_bool | constant_op.constant("a")
with self.assertRaises(expected_errtype):
_ = constant_op.constant("a") | constant_op.constant("b")
@test_util.run_in_graph_and_eager_modes
def testBitwiseXorNumeric(self):
x = constant_op.constant([0, 1, 3])
y = constant_op.constant([1, 1, 1])
z = x ^ y
self.assertAllEqual(z, [1, 0, 2])
@test_util.run_in_graph_and_eager_modes
def testBitwiseXorBool(self):
x = constant_op.constant([False, False, True, True])
y = constant_op.constant([False, True, False, True])
z = x ^ y
self.assertAllEqual(z, [False, True, True, False])
@test_util.run_in_graph_and_eager_modes
def testBitwiseXorErrors(self):
x_int = constant_op.constant(0)
x_bool = constant_op.constant(True)
if context.executing_eagerly(): # :(
expected_errtype = errors.InvalidArgumentError
else:
expected_errtype = TypeError
with self.assertRaises(expected_errtype):
_ = x_int ^ x_bool
with self.assertRaises(expected_errtype):
_ = x_int ^ constant_op.constant("a")
with self.assertRaises(expected_errtype):
_ = x_bool ^ x_int
with self.assertRaises(expected_errtype):
_ = x_bool ^ constant_op.constant("a")
with self.assertRaises(expected_errtype):
_ = constant_op.constant("a") ^ constant_op.constant("b")
@test_util.run_in_graph_and_eager_modes
def testBitwiseNotNumeric(self):
x = constant_op.constant([0, dtypes.int32.min, 1])
# pylint: disable=invalid-unary-operand-type
y = ~x
self.assertAllEqual(y, [-1, dtypes.int32.max, -2])
@test_util.run_in_graph_and_eager_modes
def testBitwiseNotBool(self):
x = constant_op.constant([False, True])
# pylint: disable=invalid-unary-operand-type
y = ~x
self.assertAllEqual(y, [True, False])
@test_util.run_in_graph_and_eager_modes
def testBitwiseNotErrors(self):
if context.executing_eagerly(): # :(
expected_errtype = errors.InvalidArgumentError
else:
expected_errtype = TypeError
# pylint: disable=invalid-unary-operand-type
with self.assertRaises(expected_errtype):
_ = ~constant_op.constant("a")
@test_util.run_all_in_graph_and_eager_modes
class IndexedSlicesTest(test_util.TensorFlowTestCase):
def testToTensor(self):
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = ops.IndexedSlices(values, indices)
with self.assertRaises(ValueError):
tensor = ops.convert_to_tensor(x, name="tensor")
self.assertEqual(tensor_shape.TensorShape(None), x.shape)
dense_shape = constant_op.constant([3, 2])
y = ops.IndexedSlices(values, indices, dense_shape)
tensor = ops.convert_to_tensor(y, name="tensor")
self.assertAllEqual(tensor.shape, y.shape)
self.assertAllEqual(self.evaluate(tensor), [[2, 3], [0, 0], [5, 7]])
@test_util.run_gpu_only
def testEagerCopy(self):
with context.eager_mode():
var = variables.Variable([[0.0], [0.0], [0.0], [0.0]], name="tensor")
with backprop.GradientTape() as tape:
a = array_ops.gather(array_ops.gather(var, [0, 1]), [0, 1])
b = array_ops.gather(array_ops.gather(var, [2, 3]), [0, 1])
r = special_math_ops.einsum("ij,ij->i", a, b)
g = tape.gradient(r, [var])[0]
values = g.values if isinstance(g, ops.IndexedSlices) else g
self.assertAllEqual(values.get_shape(), [4, 1])
def testNegation(self):
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = -ops.IndexedSlices(values, indices)
self.assertAllEqual(x.values, [[-2, -3], [-5, -7]])
self.assertAllEqual(x.indices, [0, 2])
def testScalarMul(self):
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = math_ops.scalar_mul(-2, ops.IndexedSlices(values, indices))
self.assertAllEqual(x.values, [[-4, -6], [-10, -14]])
self.assertAllEqual(x.indices, [0, 2])
@test_util.run_all_in_graph_and_eager_modes
class IndexedSlicesSpecTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def assertAllTensorsEqual(self, list1, list2):
self.assertLen(list1, len(list2))
for (t1, t2) in zip(list1, list2):
self.assertAllEqual(t1, t2)
def testConstruction(self):
spec1 = indexed_slices.IndexedSlicesSpec()
self.assertIsNone(spec1._shape.rank)
self.assertEqual(spec1._values_dtype, dtypes.float32)
self.assertEqual(spec1._indices_dtype, dtypes.int64)
self.assertIsNone(spec1._dense_shape_dtype)
self.assertEqual(spec1._indices_shape.as_list(), [None])
spec2 = indexed_slices.IndexedSlicesSpec([None, None], dtypes.string,
dtypes.int32, dtypes.int64, [10])
self.assertEqual(spec2._shape.as_list(), [None, None])
self.assertEqual(spec2._values_dtype, dtypes.string)
self.assertEqual(spec2._indices_dtype, dtypes.int32)
self.assertEqual(spec2._dense_shape_dtype, dtypes.int64)
self.assertEqual(spec2._indices_shape.as_list(), [10])
def testValueType(self):
spec1 = indexed_slices.IndexedSlicesSpec()
self.assertEqual(spec1.value_type, ops.IndexedSlices)
@parameterized.parameters([
(indexed_slices.IndexedSlicesSpec(),
(tensor_shape.TensorShape(None), dtypes.float32, dtypes.int64, None,
tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(shape=[5, None, None]),
(tensor_shape.TensorShape([5, None, None]), dtypes.float32,
dtypes.int64, None, tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(
dtype=dtypes.int32, dense_shape_dtype=dtypes.int64),
(tensor_shape.TensorShape(None), dtypes.int32, dtypes.int64,
dtypes.int64, tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(indices_shape=[100]),
(tensor_shape.TensorShape(None), dtypes.float32, dtypes.int64, None,
tensor_shape.TensorShape([100]))),
]) # pyformat: disable
def testSerialize(self, spec, expected):
serialization = spec._serialize()
# TensorShape has an unconventional definition of equality, so we can't use
# assertEqual directly here. But repr() is deterministic and lossless for
# the expected values, so we can use that instead.
self.assertEqual(repr(serialization), repr(expected))
@parameterized.parameters([
(indexed_slices.IndexedSlicesSpec(dtype=dtypes.string), (
tensor_spec.TensorSpec(None, dtypes.string),
tensor_spec.TensorSpec([None], dtypes.int64),
)),
(indexed_slices.IndexedSlicesSpec(
dtype=dtypes.string, dense_shape_dtype=dtypes.int32), (
tensor_spec.TensorSpec(None, dtypes.string),
tensor_spec.TensorSpec([None], dtypes.int64),
tensor_spec.TensorSpec([None], dtypes.int32),
)),
(indexed_slices.IndexedSlicesSpec(
shape=[5, 10, 15], dense_shape_dtype=dtypes.int32), (
tensor_spec.TensorSpec([None, 10, 15], dtypes.float32),
tensor_spec.TensorSpec([None], dtypes.int64),
tensor_spec.TensorSpec([3], dtypes.int32),
)),
(indexed_slices.IndexedSlicesSpec(
shape=[5, 10, 15], dense_shape_dtype=dtypes.int32,
indices_shape=[20]), (
tensor_spec.TensorSpec([20, 10, 15], dtypes.float32),
tensor_spec.TensorSpec([20], dtypes.int64),
tensor_spec.TensorSpec([3], dtypes.int32),
)),
])
def testComponentSpecs(self, spec, expected):
self.assertEqual(spec._component_specs, expected)
@parameterized.parameters([
{
"spec": indexed_slices.IndexedSlicesSpec(),
"values": [3.0, 5.0],
"indices": [5, 10]
},
{
"spec":
indexed_slices.IndexedSlicesSpec(dense_shape_dtype=dtypes.int32),
"values": [3.0, 5.0],
"indices": [5, 10],
"dense_shape": [100]
},
])
def testToFromComponents(self, spec, indices, values, dense_shape=None):
x = ops.IndexedSlices(indices, values, dense_shape)
actual_components = spec._to_components(x)
if dense_shape is None:
self.assertAllTensorsEqual(actual_components, [indices, values])
else:
self.assertAllTensorsEqual(actual_components,
[indices, values, dense_shape])
st_reconstructed = spec._from_components(actual_components)
self.assertAllEqual(x.indices, st_reconstructed.indices)
self.assertAllEqual(x.values, st_reconstructed.values)
if dense_shape is None:
self.assertIsNone(st_reconstructed.dense_shape)
else:
self.assertAllEqual(x.dense_shape, st_reconstructed.dense_shape)
@test_util.run_v1_only("IndexedSlicesValue is deprecated in v2")
def testFromNumpyComponents(self):
indices = np.array([3, 8])
values = np.array([1.0, 9.0])
dense_shape = np.array([100])
spec1 = indexed_slices.IndexedSlicesSpec(dense_shape_dtype=dtypes.int32)
st1 = spec1._from_components((values, indices, dense_shape))
self.assertIsInstance(st1, indexed_slices.IndexedSlicesValue)
self.assertAllEqual(st1.indices, indices)
self.assertAllEqual(st1.values, values)
self.assertAllEqual(st1.dense_shape, dense_shape)
spec2 = indexed_slices.IndexedSlicesSpec()
st2 = spec2._from_components((values, indices))
self.assertIsInstance(st2, indexed_slices.IndexedSlicesValue)
self.assertAllEqual(st2.indices, indices)
self.assertAllEqual(st2.values, values)
self.assertIsNone(st2.dense_shape)
class NodeDefConstructorTest(test_util.TensorFlowTestCase):
def testNoArgs(self):
nodedef = ops._NodeDef("None", "bar")
self.assertProtoEquals("op: 'None' name: 'bar'", nodedef)
def _apply_op(g, *args, **kwargs):
op = g.create_op(*args, **kwargs)
if len(op.outputs) == 1:
return op.outputs[0]
else:
return op.outputs
class OperationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testNoInputs(self):
op = test_ops.float_output_string_output(name="myop").a.op
self.assertEqual(2, len(op.values()))
self.assertEqual(0, len(op.inputs))
self.assertEqual("myop", op.name)
float_t, label_str_t = op.values()
self.assertEqual(dtypes.float32, float_t.dtype)
self.assertEqual(op, float_t.op)
self.assertEqual(0, float_t._value_index)
self.assertEqual(0, len(float_t.consumers()))
self.assertEqual("myop", float_t._as_node_def_input())
self.assertEqual(dtypes.string, label_str_t.dtype)
self.assertEqual(op, label_str_t.op)
self.assertEqual(1, label_str_t._value_index)
self.assertEqual(0, len(label_str_t.consumers()))
self.assertEqual("myop:1", label_str_t._as_node_def_input())
self.assertProtoEquals("op:'FloatOutputStringOutput' name:'myop'",
op.node_def)
@test_util.run_deprecated_v1
def testNoOutputs(self):
op1 = test_ops.float_output(name="myop1").op
float_t, = op1.values()
op2 = test_ops.float_input(float_t, name="myop2")
self.assertEqual(0, len(op2.values()))
self.assertEqual(1, len(op2.inputs))
self.assertIs(float_t, op2.inputs[0])
self.assertEqual(1, len(float_t.consumers()))
self.assertEqual(op2, float_t.consumers()[0])
self.assertProtoEquals("op:'FloatOutput' name:'myop1'", op1.node_def)
self.assertProtoEquals("op:'FloatInput' name:'myop2' input:'myop1'",
op2.node_def)
@test_util.run_deprecated_v1
def testInputsAndOutputs(self):
op1 = test_ops.float_output(name="myop1").op
self.assertEqual(1, len(op1.values()))
float1_t, = op1.values()
op2 = test_ops.float_output_string_output(name="myop2").a.op
self.assertEqual(2, len(op2.values()))
float2_t, label2_str_t = op2.values()
# Note that we consume label2_str_t twice here.
op3 = test_ops.foo2(float1_t, label2_str_t, label2_str_t, name="myop3").d.op
self.assertEqual(2, len(op3.values()))
self.assertEqual(1, len(float1_t.consumers()))
self.assertEqual(op3, float1_t.consumers()[0])
self.assertEqual(0, len(float2_t.consumers()))
self.assertEqual(2, len(label2_str_t.consumers()))
self.assertEqual(op3, label2_str_t.consumers()[0])
self.assertEqual(op3, label2_str_t.consumers()[1])
self.assertProtoEquals("""
op:'Foo2' name:'myop3'
input:'myop1' input:'myop2:1' input:'myop2:1'
""", op3.node_def)
def testDeviceObject(self):
op = ops.Operation(ops._NodeDef("None", "myop"), ops.Graph(), [], [])
op._set_device("/job:goo/device:GPU:0")
self.assertProtoEquals(
"op:'None' name:'myop' device:'/job:goo/device:GPU:0' ", op.node_def)
op = ops.Operation(ops._NodeDef("None", "op2"), ops.Graph(), [], [])
op._set_device(
pydev.DeviceSpec(
job="muu", device_type="CPU", device_index=0))
self.assertProtoEquals(
"op:'None' name:'op2' device:'/job:muu/device:CPU:0'", op.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = ops.Operation(
ops._NodeDef("RefOutputFloatOutput", "op1"), g, [],
[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'", op1.node_def)
self.assertEqual([], list(op1.inputs))
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = ops.Operation(
ops._NodeDef("RefInputFloatInput", "op2"),
g, [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals(
"op:'RefInputFloatInput' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
self.assertEqual([ref_t, nonref_t], list(op2.inputs))
op3 = ops.Operation(
ops._NodeDef("TwoFloatInputs", "op3"), g, [ref_t, nonref_t], [])
self.assertProtoEquals(
"op:'TwoFloatInputs' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testInvalidNames(self):
g = ops.Graph()
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", ""), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "_invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "-invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "/invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "invalid:0"), g)
@test_util.run_deprecated_v1
def testNoShapeFunction(self):
op = test_ops.a()
self.assertEqual(tensor_shape.unknown_shape(), op.get_shape())
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedArray(self):
values = [[2], [3], [5], [7]]
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
def testShapeTuple(self):
with self.cached_session():
c = constant_op.constant(1)
self.assertEqual(c._shape_tuple(), ()) # pylint: disable=protected-access
def testConvertToTensorEager(self):
with context.eager_mode():
t = constant_op.constant(1)
self.assertTrue(isinstance(t, ops.EagerTensor))
converted = ops.convert_to_tensor(t)
self.assertTrue(isinstance(converted, ops.EagerTensor))
converted = ops.convert_to_tensor(1)
self.assertTrue(isinstance(converted, ops.EagerTensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedTuple(self):
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(ops.convert_to_tensor(values)))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedTensors(self):
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(
[constant_op.constant(row) for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
tensor = ops.convert_to_tensor(
[[constant_op.constant(v) for v in row] for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedMix(self):
values = ([2], (3,), [constant_op.constant(5)], constant_op.constant([7]))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(((2,), (3,), (5,), (7,)), self.evaluate(tensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorPreferred(self):
values = [2, 3, 5, 7]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.float32)
self.assertEqual(dtypes.float32, tensor.dtype)
# Convert empty tensor to anything.
values = []
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.int64, tensor.dtype)
# The preferred dtype is a type error and will convert to
# float32 instead.
values = [1.23]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.float32, tensor.dtype)
@test_util.run_in_graph_and_eager_modes
def testConvertToInvalidTensorType(self):
with self.assertRaises(TypeError):
# Forcing an invalid dtype should fail with a type error.
values = [1.23]
ops.convert_to_tensor(values, dtype=dtypes.int64)
@test_util.run_in_graph_and_eager_modes
def testConvertToLongLongTensorType(self):
tensor = ops.convert_to_tensor(
# Get a numpy array of dtype NPY_LONGLONG
np.prod(constant_op.constant([1])._shape_tuple()),
dtype=dtypes.int64)
self.assertEqual(dtypes.int64, tensor.dtype)
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorFromInvalidTensor(self):
tensor = constant_op.constant(42.0, dtype=dtypes.float32)
with self.assertRaises(ValueError):
ops.convert_to_tensor(tensor, dtype=dtypes.int32)
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorProtocol(self):
class TensorCompatible:
def __tf_tensor__(self, dtype=None, name=None):
return constant_op.constant((1, 2, 3), dtype=dtype, name=name)
tc = TensorCompatible()
tensor = ops.convert_to_tensor(tc, dtype=dtypes.int32)
self.assertEqual(tensor.dtype, dtypes.int32)
self.assertAllEqual((1, 2, 3), self.evaluate(tensor))
@test_util.run_deprecated_v1
def testNoConvert(self):
# Operation cannot be converted to Tensor.
op = control_flow_ops.no_op()
with self.assertRaisesRegex(TypeError,
"can't convert Operation '.+' to Tensor"):
ops.convert_to_tensor(op)
def testStr(self):
node_def = ops._NodeDef("None", "op1")
op = ops.Operation(node_def, ops.Graph(), [], [dtypes.float32])
self.assertEqual(str(node_def), str(op))
def testRepr(self):
op = ops.Operation(
ops._NodeDef("None", "op1"), ops.Graph(), [], [dtypes.float32])
self.assertEqual("<tf.Operation 'op1' type=None>", repr(op))
@test_util.run_deprecated_v1
def testGetAttr(self):
op = test_ops.default_attrs()
self.assertEqual(op.get_attr("string_val"), b"abc")
self.assertEqual(op.get_attr("string_list_val"), [b"abc", b""])
self.assertEqual(op.get_attr("int_val"), 123)
self.assertEqual(op.get_attr("int_list_val"), [1, 2, 3])
self.assertEqual(op.get_attr("float_val"), 10.0)
self.assertEqual(op.get_attr("float_list_val"), [10.0])
self.assertEqual(op.get_attr("bool_val"), True)
self.assertEqual(op.get_attr("bool_list_val"), [True, False])
self.assertEqual(op.get_attr("shape_val"),
tensor_shape.as_shape([2, 1]).as_proto())
self.assertEqual(op.get_attr("shape_list_val"),
[tensor_shape.as_shape([]).as_proto(),
tensor_shape.as_shape([1]).as_proto()])
self.assertEqual(op.get_attr("tensor_val"),
tensor_util.make_tensor_proto(1, dtypes.int32))
self.assertEqual(op.get_attr("tensor_list_val"),
[tensor_util.make_tensor_proto(1, dtypes.int32)])
type_val = op.get_attr("type_val")
# First check that type_val is a DType, because the assertEqual will work
# no matter what since DType overrides __eq__
self.assertIsInstance(type_val, dtypes.DType)
self.assertEqual(type_val, dtypes.int32)
type_list_val = op.get_attr("type_list_val")
self.assertTrue(all(isinstance(x, dtypes.DType) for x in type_list_val))
self.assertEqual(type_list_val, [dtypes.int32, dtypes.float32])
@function.Defun(dtypes.float32, func_name="MyFunc")
def func(x):
return x
op = test_ops.func_attr(func)
self.assertEqual(op.get_attr("f"),
attr_value_pb2.NameAttrList(name="MyFunc"))
# Try fetching missing attr
with self.assertRaisesRegex(
ValueError, "Operation 'FuncAttr' has no attr named 'FakeAttr'."):
op.get_attr("FakeAttr")
# TODO(b/65162920): remove this test when users who are directly mutating the
# node_def have been updated to proper usage.
@test_util.run_deprecated_v1
def testSetAttr(self):
op = test_ops.int_attr().op
op._set_attr("foo", attr_value_pb2.AttrValue(i=2))
# TODO(skyewm): add node_def check
self.assertEqual(op.get_attr("foo"), 2)
# TODO(nolivia): test all error cases
def testAddControlInput(self):
with ops.Graph().as_default():
x = constant_op.constant(1).op
y = constant_op.constant(2).op
z = constant_op.constant(3).op
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x])
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x])
z._add_control_inputs([x, y, y]) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x, y])
self.assertEqual(x._control_outputs, [z])
@test_util.run_deprecated_v1
def testRemoveAllControlInputs(self):
a = constant_op.constant(1)
with ops.control_dependencies([a]):
b = constant_op.constant(2)
c = constant_op.constant(3)
d = constant_op.constant(4)
e = constant_op.constant(5)
with ops.control_dependencies([a, c]):
f = d + e
self.assertEqual(a.op.control_inputs, [])
self.assertEqual(b.op.control_inputs, [a.op])
self.assertEqual(f.op.control_inputs, [a.op, c.op])
a.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(a.op.control_inputs, [])
b.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(b.op.control_inputs, [])
f.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(f.op.control_inputs, [])
self.assertEqual(list(f.op.inputs), [d, e])
@test_util.run_deprecated_v1
def testControlInputCycle(self):
graph = ops.Graph()
with graph.as_default():
z = constant_op.constant(0)
x = constant_op.constant(1)
y = constant_op.constant(2)
y.op._add_control_input(z.op) # pylint: disable=protected-access
y.op._add_control_input(x.op) # pylint: disable=protected-access
x.op._add_control_input(y.op) # pylint: disable=protected-access
with self.session(graph=graph) as sess:
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Graph is invalid, contains a cycle with 2 nodes"):
self.evaluate(x)
def testUpdateInput(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = x + y
z.op._update_input(0, y) # pylint: disable=protected-access
self.assertEqual(list(z.op.inputs), [y, y])
self.assertEqual(x.consumers(), [])
self.assertEqual(y.consumers(), [z.op, z.op])
with session.Session(graph=g) as sess:
self.assertEqual(self.evaluate(z), 4)
z.op._update_input(0, x) # pylint: disable=protected-access
self.assertEqual(list(z.op.inputs), [x, y])
self.assertEqual(x.consumers(), [z.op])
self.assertEqual(y.consumers(), [z.op])
with session.Session(graph=g) as sess:
self.assertEqual(self.evaluate(z), 3)
z.op._update_input(1, y) # pylint: disable=protected-access
self.assertEqual(list(z.op.inputs), [x, y])
self.assertEqual(x.consumers(), [z.op])
self.assertEqual(y.consumers(), [z.op])
with session.Session(graph=g) as sess:
self.assertEqual(self.evaluate(z), 3)
def testUpdateInputGraphError(self):
g_0 = ops.Graph()
g_1 = ops.Graph()
with g_0.as_default():
x = constant_op.constant(1)
with g_1.as_default():
y = constant_op.constant(2)
z = y * 2
with self.assertRaisesRegex(ValueError, "must be from the same graph"):
z.op._update_input(0, x) # pylint: disable=protected-access
def testUpdateInputTypeError(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant(0)
x = constant_op.constant("")
y = constant_op.constant(1)
z = y + w
z.op._update_input(0, x) # pylint: disable=protected-access
with session.Session(graph=g) as sess:
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Input 0 of node add was passed string from Const_1:0 incompatible "
"with expected int32"):
self.evaluate(z)
def testUpdateInputShapeError(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant(2, shape=[3, 1])
x = constant_op.constant(0, shape=[3, 1])
y = constant_op.constant(1, shape=[2, 2])
z = w + x
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"Cannot update edge, incompatible shapes: \[2,2\] and \[3,1\]"):
z.op._update_input(0, y) # pylint: disable=protected-access
def testUpdateInputOutOfRange(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
with self.assertRaisesRegex(
errors.OutOfRangeError,
r"Cannot update edge. Input index \[1\] is greater than the number of "
r"total inputs \[0\]."):
x.op._update_input(1, x) # pylint: disable=protected-access
@test_util.enable_control_flow_v2
@test_util.run_v1_only("b/120545219")
def testAddWhileInput(self):
@eager_function.defun
def test():
output = control_flow_ops.while_loop(lambda x: x < 3, lambda x: x + 1,
[1])
while_op = output.op
self.assertEqual(while_op.type, "StatelessWhile")
orig_num_inputs = len(while_op.inputs)
# Make sure we can handle the while op having a control input.
while_op._add_control_input(constant_op.constant(0).op)
new_input1 = constant_op.constant(1.0)
new_input2 = constant_op.constant(True)
# Clear output shapes to bypass shape checking.
while_op._set_shape_list_attr("output_shapes", [])
while_op._set_type_list_attr("T", [t.dtype for t in while_op.inputs] +
[new_input1.dtype, new_input2.dtype])
while_op._add_while_inputs([new_input1, new_input2])
# Can't add an edge beyond what's specified by "T"
with self.assertRaises(errors.OutOfRangeError):
while_op._add_while_inputs([new_input2])
self.assertLen(while_op.inputs, orig_num_inputs + 2) # pylint: disable=g-deprecated-assert
test()
@test_util.run_deprecated_v1
def testOpDef(self):
x = constant_op.constant(0)
y = constant_op.constant(1)
z = x + y
self.assertEqual(x.op.op_def.name, "Const")
self.assertLen(x.op.op_def.input_arg, 0)
self.assertLen(x.op.op_def.output_arg, 1)
self.assertRegex(z.op.op_def.name, "Add(V2)?")
self.assertLen(z.op.op_def.input_arg, 2)
self.assertLen(z.op.op_def.output_arg, 1)
def testInputFromDifferentGraphError(self):
g_0 = ops.Graph()
g_1 = ops.Graph()
with g_0.as_default():
x = constant_op.constant(1)
with g_1.as_default():
y = constant_op.constant(2)
with self.assertRaisesRegex(ValueError, "must be from the same graph"):
y * x # pylint: disable=pointless-statement
def testInputsAreImmutable(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
op = test_ops.int_input_int_output(x, name="myop").op
with self.assertRaisesRegex(AttributeError,
"'tuple' object has no attribute 'append'"):
op.inputs.append(None)
class CreateOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
op1 = g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
with g.device("/device:GPU:0"):
op2 = g.create_op(
"FloatOutputStringOutput", [], [dtypes.float32, dtypes.string], None,
name="myop2")
op3 = g.create_op(
"Foo3",
[list(op1.values())[0], list(op2.values())[1], list(op2.values())[0]],
[dtypes.float32, dtypes.int32],
None,
name="myop3")
self.assertDeviceEqual(None, op1.device)
self.assertDeviceEqual("/device:GPU:0", op2.device)
self.assertDeviceEqual(None, op3.device)
self.assertProtoEquals("name:'myop1' op:'FloatOutput'", op1.node_def)
self.assertProtoEquals(
"name:'myop2' op:'FloatOutputStringOutput' device:'/device:GPU:0'",
op2.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'Foo3'",
op3.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = g.create_op(
"RefOutputFloatOutput", [], [dtypes.float32_ref, dtypes.float32],
name="op1")
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'", op1.node_def)
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = g.create_op(
"RefInputFloatInput", [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals(
"op:'RefInputFloatInput' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
op3 = g.create_op("TwoFloatInputs", [ref_t, nonref_t], [], name="op3")
self.assertProtoEquals(
"op:'TwoFloatInputs' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testFinalized(self):
g = ops.Graph()
g.finalize()
with self.assertRaises(RuntimeError):
g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
# Test unfinalize.
g._unsafe_unfinalize()
g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
# NOTE(skyewm): these cases test the private Graph._create_op_from_tf_operation
# method. Arguably we should only test the public APIs that depend on this
# method. However, this logic is complex and tricky, and it can be difficult to
# ascertain if we have adequate coverage (e.g. a graph may run successfully if
# the control flow context isn't set properly, but a more complicated use case
# that might not be obvious to test will fail). Thus we instead explicitly test
# the low-level behavior.
class CreateOpFromTFOperationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
c_op = ops._create_c_op(
g, ops._NodeDef("IntInputIntOutput", "myop"), [x], [])
op = g._create_op_from_tf_operation(c_op)
self.assertEqual(op.name, "myop")
self.assertEqual(op.type, "IntInputIntOutput")
self.assertLen(op.outputs, 1)
self.assertEqual(op.outputs[0].shape, tensor_shape.unknown_shape())
self.assertEqual(list(op.inputs), [x])
self.assertEqual(op.control_inputs, [])
self.assertEqual(op.graph, g)
self.assertEqual(x.consumers(), [op])
self.assertIsNotNone(op.traceback)
self.assertEqual(g.get_operation_by_name("myop"), op)
self.assertEqual(g.get_tensor_by_name("myop:0"), op.outputs[0])
def testShape(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
c_op = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), [x], [])
op = g._create_op_from_tf_operation(c_op)
self.assertEqual(op.name, "myop")
self.assertEqual(op.type, "Identity")
self.assertLen(op.outputs, 1)
self.assertEqual(op.outputs[0].shape, tensor_shape.TensorShape([2, 3]))
def testUniqueName(self):
g = ops.Graph()
with g.as_default():
c_op = ops._create_c_op(g, ops._NodeDef("IntOutput", "myop"), [], [])
c_op2 = ops._create_c_op(g, ops._NodeDef("IntOutput", "myop_1"), [], [])
op = g._create_op_from_tf_operation(c_op)
op2 = g._create_op_from_tf_operation(c_op2)
# Create ops with same names as op1 and op2. We expect the new names to be
# uniquified.
op3 = test_ops.int_output(name="myop").op
op4 = test_ops.int_output(name="myop_1").op
self.assertEqual(op.name, "myop")
self.assertEqual(op2.name, "myop_1")
self.assertEqual(op3.name, "myop_2")
self.assertEqual(op4.name, "myop_1_1")
@test_util.run_v1_only("b/120545219")
def testCond(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def true_fn():
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "cond/myop"), [x], [])
new_ops = g._add_new_tf_operations()
self.assertLen(new_ops, 1)
return x
control_flow_ops.cond(x < 10, true_fn, lambda: x)
op = g.get_operation_by_name("cond/myop")
self.assertIsNotNone(op)
self.assertEqual(op.name, "cond/myop")
self.assertEqual(op.type, "IntInput")
self.assertEqual(op.outputs, [])
op_input = op.inputs[0].op
self.assertEqual(op_input.type, "Switch")
self.assertEqual(op_input.inputs[0], x)
self.assertEqual(op.graph, g)
# pylint: disable=protected-access
self.assertIsNotNone(op._get_control_flow_context())
self.assertEqual(op._get_control_flow_context().name,
"cond/cond_text")
# pylint: enable=protected-access
@test_util.run_v1_only("b/120545219")
def testWhileLoop(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def body(i):
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
new_ops = g._add_new_tf_operations()
self.assertLen(new_ops, 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
self.assertEqual(op.name, "myloop/myop")
self.assertEqual(op.type, "IntInput")
self.assertEqual(op.outputs, [])
op_input = op.inputs[0].op
self.assertEqual(op_input.type, "Enter")
self.assertEqual(list(op_input.inputs), [x])
self.assertEqual(op.graph, g)
# pylint: disable=protected-access
self.assertIsNotNone(op._get_control_flow_context())
self.assertEqual(op._get_control_flow_context().name,
"myloop/while_context")
# pylint: enable=protected-access
@test_util.run_v1_only("b/120545219")
def testWhileLoopWithInternalControlDep(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def body(i):
c = constant_op.constant(1.0, name="c")
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
with ops.control_dependencies([c]):
new_ops = g._add_new_tf_operations()
self.assertLen(new_ops, 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
c = g.get_operation_by_name("myloop/c")
self.assertIsNotNone(c)
# Internal control dep is preserved
self.assertEqual(op.control_inputs, [c])
@test_util.run_v1_only("b/120545219")
def testWhileLoopWithExternalControlDep(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
c = constant_op.constant(1.0)
def body(i):
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
with ops.control_dependencies([c]):
new_ops = g._add_new_tf_operations()
self.assertLen(new_ops, 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
# External control dep is removed and replaced with internal control dep
self.assertNotEqual(op.control_inputs[0], c.op)
self.assertIsNotNone(op.control_inputs[0]._get_control_flow_context())
class ApplyOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
t1 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop1")
with g.device("/device:GPU:0"):
t2 = _apply_op(
g, "TwoIntOutputs", [], [dtypes.int32, dtypes.int32], name="myop2")
t3 = _apply_op(
g,
"Foo1", [t1, t2[1], t2[0]], [dtypes.float32, dtypes.int32],
name="myop3")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, list))
self.assertTrue(isinstance(t3, list))
self.assertTrue(isinstance(t3[0], ops.Tensor))
self.assertEqual("myop1", t1._as_node_def_input())
self.assertEqual("myop2", t2[0]._as_node_def_input())
self.assertEqual("myop2:1", t2[1]._as_node_def_input())
self.assertEqual("myop3", t3[0]._as_node_def_input())
# Validate that we got the right ops as well
self.assertProtoEquals("name:'myop1' op:'FloatOutput'", t1.op.node_def)
self.assertProtoEquals(
"name:'myop2' op:'TwoIntOutputs' device:'/device:GPU:0'",
t2[0].op.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'Foo1'",
t3[0].op.node_def)
def testReferenceInput(self):
g = ops.Graph()
ref_t, nonref_t = _apply_op(
g, "RefOutputFloatOutput", [], [dtypes.float32_ref, dtypes.float32],
name="op1")
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'",
ref_t.op.node_def)
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
out_2 = _apply_op(
g,
"RefInputFloatInputIntOutput", [ref_t, nonref_t], [dtypes.int32],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals(
"op:'RefInputFloatInputIntOutput' name:'op2' input:'op1' input:'op1:1'",
out_2.op.node_def)
out_3 = _apply_op(
g, "TwoFloatInputsIntOutput", [ref_t, nonref_t], [dtypes.int32],
name="op3")
self.assertProtoEquals(
"op:'TwoFloatInputsIntOutput' name:'op3' input:'op1' input:'op1:1'",
out_3.op.node_def)
class NameStackTest(test_util.TensorFlowTestCase):
def testBasics(self):
g = ops.Graph()
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_2", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_2", g.unique_name("foo"))
self.assertEqual("foo_1_1", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_1", g.unique_name("foo_1"))
self.assertEqual("foo_1_2", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_2", g.unique_name("foo_1"))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2", mark_as_used=False))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2"))
with g.name_scope("bar"):
self.assertEqual("bar/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo", g.unique_name("foo"))
self.assertEqual("bar/foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo_1", g.unique_name("foo"))
with g.name_scope(None):
self.assertEqual("foo_3", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_3", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual(
"bar/baz/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz/foo", g.unique_name("foo"))
self.assertEqual(
"bar/baz/foo_1", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz/foo_1", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual(
"bar/baz_1/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz_1/foo", g.unique_name("foo"))
self.assertEqual(
"bar/baz_1/foo_1", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz_1/foo_1", g.unique_name("foo"))
with g.name_scope("quux"):
self.assertEqual("quux/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("quux/foo", g.unique_name("foo"))
with g.name_scope("bar"):
with g.name_scope("baz"):
self.assertEqual(
"bar_1/baz/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar_1/baz/foo", g.unique_name("foo"))
self.assertEqual("foo_4", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_4", g.unique_name("foo"))
self.assertEqual("bar_2", g.unique_name("bar", mark_as_used=False))
self.assertEqual("bar_2", g.unique_name("bar"))
def testBackslashAndDashRegex(self):
# GitHub issue 39019, all should pass
g = ops.Graph()
with g.name_scope("n_CatCntc-campaign\\c_campaign"):
pass
with g.name_scope("foo"):
with g.name_scope("n_CatCntc-campaign\\c_campaign"):
pass
with g.name_scope("n_CatCntc-campaign\\c_campaign"):
with g.name_scope("foo"):
pass
@test_util.run_deprecated_v1
def testNameAndVariableScope(self):
with self.cached_session() as sess:
with sess.graph.name_scope("l0"):
with variable_scope.variable_scope("l1"):
with sess.graph.name_scope("l1") as scope:
self.assertEqual("l0/l1/l1/", scope)
self.assertEqual(
"l0/l1/l1/foo",
sess.graph.unique_name(
"foo", mark_as_used=False))
self.assertEqual("l0/l1/l1/foo", sess.graph.unique_name("foo"))
with sess.graph.name_scope("l2") as scope:
self.assertEqual("l0/l1/l2/", scope)
self.assertEqual(
"l0/l1/l2/foo",
sess.graph.unique_name(
"foo", mark_as_used=False))
self.assertEqual("l0/l1/l2/foo", sess.graph.unique_name("foo"))
def testOutOfOrderUniqueName(self):
g = ops.Graph()
self.assertEqual("foo_2", g.unique_name("foo_2"))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_3", g.unique_name("foo"))
def testUniqueNameCaseInsensitivity(self):
g = ops.Graph()
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("Foo_1", g.unique_name("Foo"))
with g.name_scope("bar"):
self.assertEqual("bar/foo", g.unique_name("foo"))
with g.name_scope("Bar"):
self.assertEqual("Bar_1/foo", g.unique_name("foo"))
def testInvalidNameRaisesError(self):
g = ops.Graph()
with g.name_scope(""): # Should not raise
pass
with g.name_scope("foo/"): # Should not raise
with g.name_scope("_bar"): # Should not raise
pass
with self.assertRaises(ValueError):
with g.name_scope("foo:0"):
pass
with self.assertRaises(ValueError):
with g.name_scope("_bar"):
pass
def testEmptyScopeEdgeCases(self):
g = ops.Graph()
self.assertEqual("", g.get_name_scope())
with g.name_scope("") as scope:
self.assertEqual("", scope)
self.assertEqual("", g.get_name_scope())
with g.name_scope(None) as scope:
self.assertEqual("", scope)
self.assertEqual("", g.get_name_scope())
with g.name_scope("foo") as scope:
self.assertEqual("foo/", scope)
self.assertEqual("foo", g.get_name_scope())
with g.name_scope("") as scope:
self.assertEqual("", scope)
self.assertEqual("", g.get_name_scope())
with g.name_scope(None) as scope:
self.assertEqual("", scope)
self.assertEqual("", g.get_name_scope())
class NameTest(test_util.TensorFlowTestCase):
def testGenerateName(self):
g = ops.Graph()
op0 = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32])
self.assertEqual("TwoFloatOutputs", op0.name)
self.assertEqual("TwoFloatOutputs:0", op0.outputs[0].name)
self.assertEqual("TwoFloatOutputs:1", op0.outputs[1].name)
op1 = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertEqual("FloatOutput", op1.name)
self.assertEqual("FloatOutput:0", op1.outputs[0].name)
op2 = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertEqual("FloatOutput_1", op2.name)
self.assertEqual("FloatOutput_1:0", op2.outputs[0].name)
op3 = g.create_op("FloatOutput", [], [dtypes.float32], name="my_op")
self.assertEqual("my_op", op3.name)
self.assertEqual("my_op:0", op3.outputs[0].name)
def testNameScope(self):
g = ops.Graph()
with g.name_scope("foo") as foo:
self.assertEqual("foo/", foo)
with g.name_scope("foo2") as foo2:
self.assertEqual("foo/foo2/", foo2)
with g.name_scope(None) as empty1:
self.assertEqual("", empty1)
with g.name_scope("foo3") as foo3:
self.assertEqual("foo3/", foo3)
with g.name_scope("") as empty2:
self.assertEqual("", empty2)
self.assertEqual("FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
with g.name_scope("bar") as scope:
self.assertEqual("bar/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
self.assertEqual("bar/FloatOutput_1",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
# If you use the value from "with .. as", that values is used as-is.
self.assertEqual(
"bar", g.create_op(
"FloatOutput", [], [dtypes.float32], name=scope).name)
with g.name_scope("baz") as scope:
with g.name_scope("quux"):
self.assertEqual("baz/quux/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
# If you use the value from the enclosing "with .. as", nothing is pushed.
with g.name_scope(scope):
self.assertEqual("baz/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
self.assertEqual(
"baz", g.create_op(
"FloatOutput", [], [dtypes.float32], name=scope).name)
self.assertEqual(
"trailing",
g.create_op(
"FloatOutput", [], [dtypes.float32], name="trailing/").name)
with g.name_scope("bar"):
self.assertEqual("bar_1/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
with g.name_scope("bar/"):
self.assertEqual("bar/FloatOutput_2",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
class DeviceTest(test_util.TensorFlowTestCase):
def testNoDevice(self):
g = ops.Graph()
op = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertDeviceEqual(None, op.device)
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput" }
""", gd)
def testEagerBackingDevice(self):
with context.eager_mode():
with ops.device("/device:CPU:0"):
t = constant_op.constant(1.0)
self.assertRegex(t.device, "/device:CPU:0")
self.assertRegex(t.backing_device, "/device:CPU:0")
def testDevicePartialString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testDeviceFull(self):
g = ops.Graph()
with g.device(
pydev.DeviceSpec(
job="worker", replica=2, task=0, device_type="CPU",
device_index=3)):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/task:0/device:CPU:3" }
""", gd)
def testNesting(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:3/task:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testNestingString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:3/task:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testNestingOverrideGpuCpu(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:2/device:GPU:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:2/device:GPU:2" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNestingWithMergeDeviceFunction(self):
g = ops.Graph()
with g.device(pydev.merge_device("/device:GPU:0")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:worker")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/device:CPU:0")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device(None)):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/device:GPU:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/device:CPU:0" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStrings(self):
g = ops.Graph()
with g.device("/device:GPU:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:ps"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(""):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/device:GPU:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/device:CPU:0" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStringWildcard(self):
g = ops.Graph()
with g.device("/device:GPU:7"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:GPU:*"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:*"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:5"):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:7" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/device:GPU:7" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/device:CPU:*" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/device:CPU:5" }
""", gd)
def testNestingErrorGraph(self):
g = ops.Graph()
scope = g.device("/device:GPU:8")
scope.__enter__()
with g.device("/device:GPU:9"):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
def testNestingErrorEager(self):
with context.eager_mode():
scope = ops.device("/device:CPU:0")
scope.__enter__()
with ops.device(None):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
def testNoneClearsDefault(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNoneIgnoresOuterDeviceFunction(self):
g = ops.Graph()
with g.device(lambda op: "/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def _overwritingDeviceFunction(self, unused_op):
# This device function unconditionally overwrites the device of ops.
#
# NOTE(mrry): Writing device functions like this is not
# recommended. Instead, in most cases you should use
# `pydev.merge_device("/job:ps")` or simply `"/job:ps"` as the
# argument to `tf.device()` and the device component will be merged in.
return "/job:overwrite"
def testOverwritingBehavior(self):
g = ops.Graph()
with g.device(self._overwritingDeviceFunction):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:ps"): # Will be overwritten.
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")): # Will be overwritten.
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device("/job:ps"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device(pydev.merge_device("/job:ps")):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps" }
""", gd)
class MultithreadedGraphStateTest(test_util.TensorFlowTestCase):
class TestThread(threading.Thread):
def __init__(self, graph, replica_id):
super(MultithreadedGraphStateTest.TestThread, self).__init__()
self._graph = graph
self._replica_id = replica_id
# This thread sets this event when it mutated the graph. The caller can
# wait for that.
self.has_mutated_graph = threading.Event()
# This thread waits for when it should continue. The caller can set this
# event.
self.should_continue = threading.Event()
def run(self):
# Mutate a graph's stack, then set `has_mutated_graph`, then wait for
# `should_continue`, then add an op to the graph affected by the graph's
# stack.
raise NotImplementedError("must be implemented in descendants")
def testDeviceFunctionStack(self):
class DeviceSettingThread(self.TestThread):
def run(self):
with g.device("/job:worker/replica:{}".format(self._replica_id)):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
# If `switch_to_thread` isn't called, then device placement of the ops
# below is not deterministic.
g.switch_to_thread_local()
threads = [DeviceSettingThread(g, i) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput_0" op: "FloatOutput"
device: "/job:worker/replica:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:1" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testColocateWith(self):
class ColocatingThread(self.TestThread):
def __init__(self, graph, replica_id, op_to_colocate_with):
super(ColocatingThread, self).__init__(graph, replica_id)
self._op_to_colocate_with = op_to_colocate_with
def run(self):
with g.colocate_with(self._op_to_colocate_with):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
ops_to_colocate_with = []
for i in range(3):
with g.device("/job:worker/replica:{}".format(i)):
ops_to_colocate_with.append(
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="ColocateWithMe_{}".format(i)))
# If `switch_to_thread` isn't called, then `device` and `attr` values for
# the ops below are not deterministic.
g.switch_to_thread_local()
threads = [
ColocatingThread(g, i, ops_to_colocate_with[i]) for i in range(3)
]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "ColocateWithMe_0" op: "FloatOutput"
device: "/job:worker/replica:0" }
node { name: "ColocateWithMe_1" op: "FloatOutput"
device: "/job:worker/replica:1" }
node { name: "ColocateWithMe_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_0" op: "FloatOutput"
device: "/job:worker/replica:0"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_0"}}}}
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:1"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_1"}}}}
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_2"}}}}
""", gd)
def testControlDependencies(self):
class DependingThread(self.TestThread):
def __init__(self, graph, replica_id, dependency_op):
super(DependingThread, self).__init__(graph, replica_id)
self._dependency_op = dependency_op
def run(self):
with g.control_dependencies([self._dependency_op]):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
dependency_ops = []
for i in range(3):
dependency_ops.append(
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="ColocateWithMe_{}".format(i)))
# If `switch_to_thread` isn't called, then `input` values for the ops below
# are not deterministic.
g.switch_to_thread_local()
threads = [DependingThread(g, i, dependency_ops[i]) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "ColocateWithMe_0" op: "FloatOutput" }
node { name: "ColocateWithMe_1" op: "FloatOutput" }
node { name: "ColocateWithMe_2" op: "FloatOutput" }
node { name: "FloatOutput_0" op: "FloatOutput"
input: "^ColocateWithMe_0" }
node { name: "FloatOutput_1" op: "FloatOutput"
input: "^ColocateWithMe_1" }
node { name: "FloatOutput_2" op: "FloatOutput"
input: "^ColocateWithMe_2" }
""", gd)
def testNameStack(self):
class NameSettingThread(self.TestThread):
def run(self):
with g.name_scope("foo"):
op1 = g.create_op("FloatOutput", [], [dtypes.float32])
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
op2 = g.create_op("FloatOutput", [], [dtypes.float32])
self.result = (op1, op2)
g = ops.Graph()
threads = [NameSettingThread(g, i) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
suffixes = ["", "_1", "_2"]
for t, s in zip(threads, suffixes):
self.assertEqual("foo" + s + "/FloatOutput", t.result[0].name)
self.assertEqual("foo" + s + "/FloatOutput_1", t.result[1].name)
class ObjectWithName(object):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
class CollectionTest(test_util.TensorFlowTestCase):
def test_get_collections(self):
g = ops.Graph()
self.assertSequenceEqual(g.collections, [])
g.add_to_collection("key", 12)
g.add_to_collection("key", 15)
self.assertSequenceEqual(g.collections, ["key"])
g.add_to_collection("other", "foo")
self.assertSequenceEqual(sorted(g.collections), ["key", "other"])
self.assertSequenceEqual(
sorted(g.get_all_collection_keys()), ["key", "other"])
def test_add_to_collection(self):
g = ops.Graph()
g.add_to_collection("key", 12)
g.add_to_collection("other", "foo")
g.add_to_collection("key", 34)
# Note that only blank1 is returned.
g.add_to_collection("blah", 27)
blank1 = ObjectWithName("prefix/foo")
g.add_to_collection("blah", blank1)
blank2 = ObjectWithName("junk/foo")
g.add_to_collection("blah", blank2)
self.assertEqual([12, 34], g.get_collection("key"))
self.assertEqual([], g.get_collection("nothing"))
self.assertEqual([27, blank1, blank2], g.get_collection("blah"))
self.assertEqual([blank1], g.get_collection("blah", "prefix"))
self.assertEqual([blank1], g.get_collection("blah", ".*x"))
# Make sure that get_collection() returns a first-level
# copy of the collection, while get_collection_ref() returns
# the original list.
other_collection_snapshot = g.get_collection("other")
other_collection_ref = g.get_collection_ref("other")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo"], other_collection_ref)
g.add_to_collection("other", "bar")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo", "bar"], other_collection_ref)
self.assertEqual(["foo", "bar"], g.get_collection("other"))
self.assertTrue(other_collection_ref is g.get_collection_ref("other"))
# Verify that getting an empty collection ref returns a modifiable list.
empty_coll_ref = g.get_collection_ref("empty")
self.assertEqual([], empty_coll_ref)
empty_coll = g.get_collection("empty")
self.assertEqual([], empty_coll)
self.assertFalse(empty_coll is empty_coll_ref)
empty_coll_ref2 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref2 is empty_coll_ref)
# Add to the collection.
empty_coll_ref.append("something")
self.assertEqual(["something"], empty_coll_ref)
self.assertEqual(["something"], empty_coll_ref2)
self.assertEqual([], empty_coll)
self.assertEqual(["something"], g.get_collection("empty"))
empty_coll_ref3 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref3 is empty_coll_ref)
def test_add_to_collections_uniquify(self):
g = ops.Graph()
g.add_to_collections([1, 2, 1], "key")
# Make sure "key" is not added twice
self.assertEqual(["key"], g.get_collection(1))
def test_add_to_collections_from_list(self):
g = ops.Graph()
g.add_to_collections(["abc", "123"], "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_tuple(self):
g = ops.Graph()
g.add_to_collections(("abc", "123"), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_generator(self):
g = ops.Graph()
def generator():
yield "abc"
yield "123"
g.add_to_collections(generator(), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_set(self):
g = ops.Graph()
g.add_to_collections(set(["abc", "123"]), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_string(self):
g = ops.Graph()
g.add_to_collections("abc", "key")
self.assertEqual(["key"], g.get_collection("abc"))
def test_default_graph(self):
with ops.Graph().as_default():
ops.add_to_collection("key", 90)
ops.add_to_collection("key", 100)
# Collections are ordered.
self.assertEqual([90, 100], ops.get_collection("key"))
def test_defun(self):
with context.eager_mode():
@eager_function.defun
def defun():
ops.add_to_collection("int", 1)
ops.add_to_collection("tensor", constant_op.constant(2))
@eager_function.defun
def inner_defun():
self.assertEqual(ops.get_collection("int"), [1])
three = ops.get_collection("tensor")[0] + ops.get_collection("int")[0]
ops.add_to_collection("int", 2)
self.assertEqual(ops.get_collection("int"), [1, 2])
ops.add_to_collection("foo", "bar")
self.assertEqual(ops.get_collection("foo"), ["bar"])
return three
self.assertEqual(ops.get_collection("int"), [1])
three = inner_defun()
self.assertEqual(ops.get_collection("int"), [1])
self.assertEqual(ops.get_collection("foo"), [])
return three
three = defun()
self.assertEqual(three.numpy(), 3)
ops.NotDifferentiable("FloatOutput")
@ops.RegisterGradient("CopyOp")
def _CopyGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
@ops.RegisterGradient("copy_override")
def _CopyOverrideGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
class RegistrationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testRegisterGradients(self):
x = test_ops.float_output()
y = test_ops.copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyGrad, fn)
def testOverrideGradients(self):
g = ops.Graph()
with g.as_default():
x = test_ops.float_output()
with g.gradient_override_map({"CopyOp": "copy_override"}):
y = test_ops.copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyOverrideGrad, fn)
def testNonExistentOverride(self):
g = ops.Graph()
with g.as_default():
x = test_ops.float_output()
with g.gradient_override_map({"CopyOp": "unknown_override"}):
y = test_ops.copy_op(x)
with self.assertRaisesRegex(LookupError, "unknown_override"):
ops.get_gradient_function(y.op)
class ComparisonTest(test_util.TensorFlowTestCase):
def testMembershipAllowed(self):
g = ops.Graph()
t1 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop1")
t2 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop2")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, ops.Tensor))
self.assertTrue(t1 in [t1])
self.assertTrue(t1 not in [t2])
class ControlDependenciesTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
g = ops.Graph()
with g.as_default():
# Creating unregistered ops with _apply_op() doesn't work with the C API
# TODO(skyewm): address this more consistently. Possible solutions are
# to use registered ops in all tests, create a way to register ops in
# Python tests, or conditionally disable the op registration check in
# the C API.
a = constant_op.constant(1.0)
b = constant_op.constant(1.0)
with g.control_dependencies([a]):
c = constant_op.constant(1.0)
d = array_ops.identity(b)
e = array_ops.identity(c)
self.assertEqual(c.op.control_inputs, [a.op])
self.assertEqual(d.op.control_inputs, [a.op])
# e should be dominated by c.
self.assertEqual(e.op.control_inputs, [])
@test_util.run_in_graph_and_eager_modes
def testEager(self):
def future():
future.calls += 1
return constant_op.constant(2.0)
future.calls = 0
if context.executing_eagerly():
a = constant_op.constant(1.0)
b = future
with ops.control_dependencies([a, b]):
c = constant_op.constant(3.0)
self.assertEqual(future.calls, 1)
else:
g = ops.Graph()
with g.as_default():
a = constant_op.constant(1.0)
b = future()
with g.control_dependencies([a, b]):
c = constant_op.constant(3.0)
self.assertEqual(c.op.control_inputs, [a.op, b.op])
self.assertEqual(future.calls, 1)
def testBasicWithConversion(self):
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
class ConvertibleObj(object):
def _as_graph_element(self):
return a
with g.control_dependencies([ConvertibleObj()]):
c = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(c.op.control_inputs, [a.op])
def testNested(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1, a_2, a_3, a_4]):
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
b_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_1.op, a_2.op, a_3.op, a_4.op],
b_1.op.control_inputs)
self.assertItemsEqual(b_1.op.control_inputs, b_2.op.control_inputs)
def testClear(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies(None):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies(None):
# deps are None again
b_none2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testComplex(self):
g = ops.Graph()
# Usage pattern:
# * Nodes a_i are constants defined at the outermost scope, and are used
# as control inputs for the ith nested scope.
# * Nodes b_i are defined as Mul(a_3, a_4) at each scope.
# * Nodes c_i are defined as Mul(a_1, b_1) at each scope.
# * Nodes d_i are defined as Mul(b_i, c_i) at each scope.
# * Nodes e_i are defined as Mul(e_i-1, e_i-1) at each scope i > 1.
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
b_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_1, c_1],
[dtypes.float32])
e_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_2]):
b_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_2, c_2],
[dtypes.float32])
e_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_1, e_1],
[dtypes.float32])
with g.control_dependencies([a_3]):
b_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_3, c_3],
[dtypes.float32])
e_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_2, e_2],
[dtypes.float32])
with g.control_dependencies([a_4]):
b_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_4, c_4],
[dtypes.float32])
e_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_3, e_3],
[dtypes.float32])
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_2.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_3.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_4.op.control_inputs)
self.assertItemsEqual([], c_1.op.control_inputs)
self.assertItemsEqual([a_2.op], c_2.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op], c_3.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op, a_4.op], c_4.op.control_inputs)
self.assertItemsEqual([], d_1.op.control_inputs)
self.assertItemsEqual([], d_2.op.control_inputs)
self.assertItemsEqual([], d_3.op.control_inputs)
self.assertItemsEqual([], d_4.op.control_inputs)
self.assertItemsEqual([a_1.op], e_1.op.control_inputs)
self.assertItemsEqual([a_2.op], e_2.op.control_inputs)
self.assertItemsEqual([a_3.op], e_3.op.control_inputs)
self.assertItemsEqual([a_4.op], e_4.op.control_inputs)
def testRepeatedDependency(self):
g = ops.Graph()
a = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32])
a_0, a_1 = a.outputs
with g.control_dependencies([a_0]):
b = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
c = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [a])
self.assertEqual(c.op.control_inputs, [a])
def testNoControlDependencyWithDataDependency(self):
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a]):
b = _apply_op(g, "Identity", [a], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [])
class OpScopeTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testNames(self):
with ops.name_scope("foo", skip_on_eager=False) as foo:
self.assertEqual("foo/", foo)
with ops.name_scope("foo2", skip_on_eager=False) as foo2:
self.assertEqual("foo/foo2/", foo2)
with ops.name_scope(None, skip_on_eager=False) as empty1:
self.assertEqual("", empty1)
with ops.name_scope("foo3", skip_on_eager=False) as foo3:
self.assertEqual("foo3/", foo3)
with ops.name_scope("", skip_on_eager=False) as empty2:
self.assertEqual("", empty2)
with ops.name_scope("foo/", skip_on_eager=False) as outer_foo:
self.assertEqual("foo/", outer_foo)
with ops.name_scope("", skip_on_eager=False) as empty3:
self.assertEqual("", empty3)
with ops.name_scope("foo4", skip_on_eager=False) as foo4:
self.assertEqual("foo/foo4/", foo4)
with ops.name_scope("foo5//", skip_on_eager=False) as foo5:
self.assertEqual("foo5//", foo5)
with ops.name_scope("foo6", skip_on_eager=False) as foo6:
self.assertEqual("foo5//foo6/", foo6)
with ops.name_scope("/", skip_on_eager=False) as foo7:
self.assertEqual("/", foo7)
with ops.name_scope("//", skip_on_eager=False) as foo8:
self.assertEqual("//", foo8)
with ops.name_scope("a//b/c", skip_on_eager=False) as foo9:
self.assertEqual("foo/a//b/c/", foo9)
with ops.name_scope("a//b/c", skip_on_eager=False) as foo10:
self.assertEqual("a//b/c/", foo10)
@test_util.run_in_graph_and_eager_modes
def testEagerDefaultScopeName(self):
with ops.name_scope(None, "default", skip_on_eager=False) as scope:
self.assertEqual(scope, "default/")
with ops.name_scope(None, "default2", skip_on_eager=False) as scope2:
self.assertEqual(scope2, "default/default2/")
@test_util.run_in_graph_and_eager_modes
def testNameScopeV2IsReEntrant(self):
foo = ops.name_scope_v2("foo")
bar = ops.name_scope_v2("bar")
with foo as scope_name:
self.assertEqual("foo/", scope_name)
with foo as scope_name:
self.assertEqual("foo/foo/", scope_name)
with bar as scope_name:
self.assertEqual("foo/bar/", scope_name)
with foo as scope_name:
self.assertEqual("foo/bar/foo/", scope_name)
with bar as scope_name:
self.assertEqual("bar/", scope_name)
@test_util.run_deprecated_v1
def testNoScopeName(self):
g0 = ops.Graph()
values = [
g0.create_op("A", [], [dtypes.float32]),
g0.create_op("B", [], [dtypes.float32])
]
with self.assertRaises(ValueError):
with ops.name_scope(None, values=values):
pass
with self.assertRaises(ValueError):
with ops.name_scope(None, None, values):
pass
@test_util.run_deprecated_v1
def testEmptyScopeName(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
with ops.name_scope("", values=[a, b]) as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.name_scope("", "my_default_scope", [a, b]) as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
@test_util.run_deprecated_v1
def testDefaultScopeName(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
scope_name = "my_scope"
default_scope_name = "my_default_scope"
with ops.name_scope(scope_name, default_scope_name, [a, b]) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.name_scope(None, default_scope_name, [a, b]) as scope:
self.assertEqual("%s/" % default_scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
with self.assertRaises(TypeError):
with ops.name_scope(scope_name, [a, b]):
pass
def _testGraphElements(self, graph_elements):
scope_name = "my_scope"
with ops.name_scope(scope_name, values=graph_elements) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(graph_elements[0].graph, ops.get_default_graph())
g1 = ops.Graph()
a = g1.create_op("A", [], [dtypes.float32])
with self.assertRaises(ValueError):
with ops.name_scope(scope_name, values=graph_elements + [a]):
pass
@test_util.run_in_graph_and_eager_modes
def testGetCurrentNameScope(self):
self.assertEqual(ops.get_current_name_scope(), "")
with ops.name_scope_v2("aaa"):
self.assertEqual(ops.get_current_name_scope(), "aaa")
with ops.name_scope_v2("bbb"):
self.assertEqual(ops.get_current_name_scope(), "aaa/bbb")
self.assertEqual(ops.get_current_name_scope(), "aaa")
self.assertEqual(ops.get_current_name_scope(), "")
@test_util.run_deprecated_v1
def testTensor(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
self._testGraphElements([a, b])
@test_util.run_deprecated_v1
def testSparseTensor(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
sparse = sparse_tensor.SparseTensor(
_apply_op(g0, "Int64Output", [], [dtypes.int64]),
_apply_op(g0, "FloatOutput", [], [dtypes.float32]),
_apply_op(g0, "Int64Output", [], [dtypes.int64]))
self._testGraphElements([a, sparse, b])
@test_util.run_deprecated_v1
def testVariable(self):
g0 = ops.Graph()
with g0.as_default():
variable = variables.Variable([1.0])
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
self._testGraphElements([a, variable, b])
class InitScopeTest(test_util.TensorFlowTestCase):
def testClearsControlDependencies(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.as_default():
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with ops.init_scope():
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with ops.init_scope():
# deps are None again
b_none2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testLiftsOpsFromFunctions(self):
g0 = ops.Graph()
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
g2 = ops.Graph()
g2._building_function = True # pylint: disable=protected-access
with g0.as_default():
with g1.as_default():
with g2.as_default():
with ops.init_scope():
_ = constant_op.constant(1.0)
self.assertLen(g2.get_operations(), 0)
self.assertLen(g1.get_operations(), 0)
self.assertLen(g0.get_operations(), 1)
def testPreservesDevices(self):
g0 = ops.Graph()
with g0.as_default(), ops.device("CPU:0"):
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
with g1.as_default():
with ops.device("GPU:0"):
with ops.init_scope():
# init_scope should preserve device set under `g1`.
on_gpu = constant_op.constant(1.0)
self.assertEqual(on_gpu.device, "/device:GPU:0")
still_on_gpu = constant_op.constant(1.0)
self.assertEqual(still_on_gpu.device, "/device:GPU:0")
blank = constant_op.constant(1.0)
self.assertEqual(blank.device, "")
with ops.init_scope():
now_on_cpu = constant_op.constant(1.0)
self.assertEqual(now_on_cpu.device, "/device:CPU:0")
on_cpu = constant_op.constant(1.0)
self.assertEqual(on_cpu.device, "/device:CPU:0")
def testComposes(self):
g0 = ops.Graph()
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
g2 = ops.Graph()
g2._building_function = True # pylint: disable=protected-access
g3 = ops.Graph()
g3._building_function = False # pylint: disable=protected-access
with g0.as_default():
with g1.as_default():
with ops.init_scope():
# This op should be lifted into g0.
_ = constant_op.constant(1.0)
self.assertIs(g0, ops.get_default_graph())
self.assertLen(g2.get_operations(), 0)
self.assertLen(g1.get_operations(), 0)
self.assertLen(g0.get_operations(), 1)
with g2.as_default():
with ops.init_scope():
# This op should be lifted into g0.
_ = constant_op.constant(1.0)
self.assertIs(g0, ops.get_default_graph())
with g3.as_default():
with ops.init_scope():
# This op should be lifted into g3, because g3 is not building a
# function.
_ = constant_op.constant(1.0)
self.assertIs(g3, ops.get_default_graph())
self.assertLen(g3.get_operations(), 1)
self.assertLen(g2.get_operations(), 0)
self.assertLen(g1.get_operations(), 0)
self.assertLen(g0.get_operations(), 2)
def testEscapesToEagerContext(self):
g = ops.Graph()
g._building_function = True # pylint: disable=protected-access
with context.eager_mode():
with context.graph_mode():
with g.as_default():
with ops.init_scope():
# Because g is building a function, init_scope should
# escape out to the eager context.
self.assertTrue(context.executing_eagerly())
# g should be reinstated as the default graph, and the
# graph context should be re-entered.
self.assertIs(g, ops.get_default_graph())
self.assertFalse(context.executing_eagerly())
def testStaysInEagerWhenOnlyEagerContextActive(self):
with context.eager_mode():
with ops.init_scope():
self.assertTrue(context.eager_mode())
self.assertTrue(context.eager_mode())
def testEscapesDefunWhenInEagerMode(self):
def function_with_variables():
with ops.init_scope():
self.v = resource_variable_ops.ResourceVariable(3)
return self.v.assign_add(1)
with context.eager_mode():
# Each invocation of function_with_variables recreates a variable.
self.assertEqual(4, int(function_with_variables()))
self.assertEqual(4, int(function_with_variables()))
compiled = eager_function.defun(function_with_variables)
# The init_scope in function_with_variables lifts the variable out
# of the graph function constructed by defun; hence,
# compiled now appears to be stateful.
self.assertEqual(4, int(compiled()))
self.assertEqual(5, int(compiled()))
def testEscapesDefunWhenInGraphMode(self):
def function_with_variables(name):
with ops.init_scope():
_ = variable_scope.get_variable(name, shape=(1,))
g = ops.Graph()
with g.as_default():
with self.cached_session():
# First ensure that graphs that are not building functions are
# not escaped.
function_with_variables("foo")
with self.assertRaisesRegex(ValueError,
r"Variable foo already exists.*"):
# This will fail because reuse is not set to True.
function_with_variables("foo")
compiled = eager_function.defun(function_with_variables)
compiled("bar")
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 2)
# The second call to `compiled` should not create variables: the
# init_scope has lifted the variable creation code out of the defun.
compiled("bar")
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 2)
def testEscapesNestedDefun(self):
def inner_function():
with ops.init_scope():
self.v = resource_variable_ops.ResourceVariable(1)
return self.v.assign_add(2)
def outer_function(inner=None):
with ops.init_scope():
self.v0 = resource_variable_ops.ResourceVariable(0)
return self.v0.assign_add(1) + inner()
with context.eager_mode():
# Each invocation of outer_function recreates variables.
self.assertEqual(4, int(outer_function(inner=inner_function)))
self.assertEqual(4, int(outer_function(inner=inner_function)))
compiled_inner = eager_function.defun(inner_function)
compiled_outer = eager_function.defun(outer_function)
# The init_scope lifts variables out of the graph functions
# constructed by defun; hence, compiled_outer should now appear to be
# stateful.
self.assertEqual(4, int(compiled_outer(inner=compiled_inner)))
self.assertEqual(7, int(compiled_outer(inner=compiled_inner)))
@test_util.run_v1_only("b/120545219")
def testFallsBackToGlobalGraphWhenAllGraphsAreBuildingFunctions(self):
with context.graph_mode():
ops.reset_default_graph()
# This doesn't push anything onto the graph stack, but it does
# set the stack's global graph.
global_graph = ops.get_default_graph()
fn_graph = ops.Graph()
# pylint: disable=protected-access
fn_graph._building_function = True
self.assertLen(ops._default_graph_stack.stack, 0)
with fn_graph.as_default():
self.assertLen(ops._default_graph_stack.stack, 1)
with ops.init_scope():
self.assertGreater(len(ops._default_graph_stack.stack), 1)
dummy = constant_op.constant(1.0)
self.assertLen(ops._default_graph_stack.stack, 1)
# Note that the global graph is _not_ on the graph stack.
self.assertLen(ops._default_graph_stack.stack, 0)
# Ensure that `dummy` was added to the global graph.
self.assertEqual(global_graph, dummy.graph)
# pylint: enable=protected-access
def testInstallsDefaultGraphWhenGraphStackIsEmptyInGraphMode(self):
with context.graph_mode():
# pylint: disable=protected-access
self.assertLen(ops._default_graph_stack.stack, 0)
with ops.init_scope():
self.assertGreater(len(ops._default_graph_stack.stack), 0)
self.assertLen(ops._default_graph_stack.stack, 0)
# pylint: enable=protected-access
def testPreservesNameScopeInGraphConstruction(self):
with ops.Graph().as_default():
function_graph = ops.Graph()
with function_graph.as_default():
with ops.name_scope("inner", skip_on_eager=False), ops.init_scope():
self.assertEqual(ops.get_name_scope(), "inner")
self.assertEqual(ops.get_name_scope(), "")
def testEnteringGraphFromEagerIsSticky(self):
with context.eager_mode():
g = ops.Graph()
with g.as_default():
with ops.init_scope():
self.assertFalse(context.executing_eagerly())
self.assertEqual(g, ops.get_default_graph())
def testMixGraphEager(self):
with context.eager_mode():
c = constant_op.constant(1.0)
with ops.Graph().as_default():
with self.assertRaisesRegex(RuntimeError,
"Attempting to capture an EagerTensor"):
math_ops.add(c, c)
c2 = constant_op.constant(2.0)
with self.assertRaisesRegex(TypeError, "Graph tensors"):
math_ops.add(c2, c2)
def testPreservesNameScopeInEagerExecution(self):
with context.eager_mode():
def foo():
with ops.name_scope("inner", skip_on_eager=False), ops.init_scope():
if context.executing_eagerly():
# A trailing slash is always appended when eager execution is
# enabled.
self.assertEqual(context.context().scope_name, "inner/")
else:
self.assertEqual(ops.get_name_scope(), "inner")
foo()
self.assertEqual(ops.get_name_scope(), "")
foo_compiled = eager_function.defun(foo)
foo_compiled()
self.assertEqual(ops.get_name_scope(), "")
def testExecutingEagerlyOutsideFunctions(self):
@def_function.function
def f():
return ops.executing_eagerly_outside_functions()
with context.graph_mode():
self.assertFalse(ops.executing_eagerly_outside_functions())
with session.Session():
# Need self.evaluate for these as the return type of functions is
# tensors.
self.assertFalse(self.evaluate(f()))
with context.eager_mode():
self.assertTrue(ops.executing_eagerly_outside_functions())
self.assertTrue(f())
with ops.Graph().as_default():
self.assertFalse(ops.executing_eagerly_outside_functions())
with session.Session():
self.assertFalse(self.evaluate(f()))
class GraphTest(test_util.TensorFlowTestCase):
def setUp(self):
ops.reset_default_graph()
def _AssertDefault(self, expected):
self.assertIs(expected, ops.get_default_graph())
def testResetDefaultGraphNesting(self):
g0 = ops.Graph()
with self.assertRaises(AssertionError):
with g0.as_default():
ops.reset_default_graph()
def testGraphContextManagerCancelsEager(self):
with context.eager_mode():
with ops.Graph().as_default():
self.assertFalse(context.executing_eagerly())
def testGraphContextManager(self):
g0 = ops.Graph()
with g0.as_default() as g1:
self.assertIs(g0, g1)
def testDefaultGraph(self):
orig = ops.get_default_graph()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
g0 = ops.Graph()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
context_manager_0 = g0.as_default()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
with context_manager_0 as g0:
self._AssertDefault(g0)
with ops.Graph().as_default() as g1:
self.assertTrue(ops.has_default_graph())
self._AssertDefault(g1)
self._AssertDefault(g0)
self._AssertDefault(orig)
self.assertFalse(ops.has_default_graph())
def testPreventFeeding(self):
g = ops.Graph()
a = constant_op.constant(2.0)
self.assertTrue(g.is_feedable(a))
g.prevent_feeding(a)
self.assertFalse(g.is_feedable(a))
@test_util.run_deprecated_v1
def testPreventFetching(self):
g = ops.Graph()
a = constant_op.constant(2.0)
self.assertTrue(g.is_fetchable(a))
g.prevent_fetching(a.op)
self.assertFalse(g.is_fetchable(a))
def testAsGraphElementConversions(self):
class ConvertibleObj(object):
def _as_graph_element(self):
return "FloatOutput:0"
class NonConvertibleObj(object):
pass
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(a, g.as_graph_element(ConvertibleObj()))
with self.assertRaises(TypeError):
g.as_graph_element(NonConvertibleObj())
# Regression test against creating custom __del__ functions in classes
# involved in cyclic references, e.g. Graph and Operation. (Python won't gc
# cycles that require calling a __del__ method, because the __del__ method can
# theoretically increase the object's refcount to "save" it from gc, and any
# already-deleted objects in the cycle would have be to restored.)
def testGarbageCollected(self):
# Create a graph we can delete and a weak reference to monitor if it's gc'd
g = ops.Graph()
g_ref = weakref.ref(g)
# Create some ops
with g.as_default():
a = constant_op.constant(2.0)
b = constant_op.constant(3.0)
c = math_ops.add(a, b)
# Create a session we can delete
with session.Session(graph=g) as sess:
self.evaluate(c)
# Delete all references and trigger gc
del g
del a
del b
del c
del sess
gc.collect()
self.assertIsNone(g_ref())
def testRunnableAfterInvalidShape(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError):
math_ops.add([1, 2], [1, 2, 3])
a = constant_op.constant(1)
with session.Session() as sess:
self.evaluate(a)
def testRunnableAfterInvalidShapeWithKernelLabelMap(self):
g = ops.Graph()
with g.as_default():
with g._kernel_label_map({"KernelLabelRequired": "overload_1"}):
with self.assertRaises(ValueError):
test_ops.kernel_label_required(1)
a = constant_op.constant(1)
with session.Session() as sess:
self.evaluate(a)
class AttrScopeTest(test_util.TensorFlowTestCase):
def _get_test_attrs(self):
x = control_flow_ops.no_op()
try:
a = compat.as_text(x.get_attr("_A"))
except ValueError:
a = None
try:
b = compat.as_text(x.get_attr("_B"))
except ValueError:
b = None
return (a, b)
@test_util.run_deprecated_v1
def testNoLabel(self):
with self.cached_session():
self.assertAllEqual((None, None), self._get_test_attrs())
@test_util.run_deprecated_v1
def testLabelMap(self):
with self.cached_session() as sess:
a1 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": attr_value_pb2.AttrValue(s=compat.as_bytes("foo"))
}):
a2 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": None,
"_B": attr_value_pb2.AttrValue(s=compat.as_bytes("bar"))
}):
a3 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": attr_value_pb2.AttrValue(s=compat.as_bytes("baz"))
}):
a4 = self._get_test_attrs()
a5 = self._get_test_attrs()
a6 = self._get_test_attrs()
a7 = self._get_test_attrs()
self.assertAllEqual((None, None), a1)
self.assertAllEqual(("foo", None), a2)
self.assertAllEqual((None, "bar"), a3)
self.assertAllEqual(("baz", "bar"), a4)
self.assertAllEqual((None, "bar"), a5)
self.assertAllEqual(("foo", None), a6)
self.assertAllEqual((None, None), a7)
class KernelLabelTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testNoLabel(self):
with self.cached_session():
self.assertAllEqual(b"My label is: default",
test_ops.kernel_label().eval())
@test_util.run_deprecated_v1
def testLabelMap(self):
with self.cached_session() as sess:
default_1 = test_ops.kernel_label()
# pylint: disable=protected-access
with sess.graph._kernel_label_map({"KernelLabel": "overload_1"}):
overload_1_1 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": "overload_2"}):
overload_2 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": ""}):
default_2 = test_ops.kernel_label()
overload_1_2 = test_ops.kernel_label()
# pylint: enable=protected-access
default_3 = test_ops.kernel_label()
self.assertAllEqual(b"My label is: default", self.evaluate(default_1))
self.assertAllEqual(b"My label is: default", self.evaluate(default_2))
self.assertAllEqual(b"My label is: default", self.evaluate(default_3))
self.assertAllEqual(b"My label is: overload_1",
self.evaluate(overload_1_1))
self.assertAllEqual(b"My label is: overload_1",
self.evaluate(overload_1_2))
self.assertAllEqual(b"My label is: overload_2", self.evaluate(overload_2))
class AsGraphDefTest(test_util.TensorFlowTestCase):
def testGraphDefVersion(self):
"""Test that the graphdef version is plumbed through to kernels."""
with ops.Graph().as_default() as g:
version = g.graph_def_versions.producer
with self.session(graph=g):
v = test_ops.graph_def_version().eval()
self.assertEqual(version, v)
def testAddShapes(self):
with ops.Graph().as_default() as g:
t1, t2, t3, t4, t5 = _apply_op(g, "FiveFloatOutputs", [],
[dtypes.float32] * 5)
t1.set_shape(None)
t2.set_shape([])
t3.set_shape([None])
t4.set_shape([43, 37])
t5.set_shape([43, None])
b = constant_op.constant(1.0) # pylint: disable=unused-variable
gd = g.as_graph_def(add_shapes=True)
self.assertProtoEqualsVersion("""
node { name: "FiveFloatOutputs" op: "FiveFloatOutputs"
attr {
key: "_output_shapes"
value {
list {
shape { unknown_rank: true }
shape { }
shape { dim { size: -1 } }
shape { dim { size: 43 } dim { size: 37 } }
shape { dim { size: 43 } dim { size: -1 } }
}
}
}
}
node { name: "Const" op: "Const"
attr {
key: "_output_shapes"
value {
list {
shape { }
}
}
}
attr {
key: "dtype"
value { type: DT_FLOAT }
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape { }
float_val: 1.0 } } } }
""", gd)
@ops.RegisterStatistics("a", "flops")
def _calc_a_forward_flops(unused_graph, unused_node):
return ops.OpStats("flops", 20)
class StatisticsTest(test_util.TensorFlowTestCase):
def testRegisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("a", "an_a")
flops = ops.get_stats_for_node_def(graph, node, "flops")
self.assertEqual(20, flops.value)
missing_stat = ops.get_stats_for_node_def(graph, node, "missing_stat")
self.assertEqual(None, missing_stat.value)
def testUnregisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("b", "a_b")
weight_params = ops.get_stats_for_node_def(graph, node, "weight_params")
self.assertEqual(None, weight_params.value)
def testAccumulateStatistics(self):
flops_total = ops.OpStats("flops")
self.assertEqual(None, flops_total.value)
second_flops = ops.OpStats("flops", 3)
flops_total += second_flops
self.assertEqual(3, flops_total.value)
class DeviceStackTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasicDeviceAssignmentMetadata(self):
def device_func(unused_op):
return "/cpu:*"
const_zero = constant_op.constant([0.0], name="zero")
with ops.device("/cpu"):
const_one = constant_op.constant([1.0], name="one")
with ops.device("/cpu:0"):
const_two = constant_op.constant([2.0], name="two")
with ops.device(device_func):
const_three = constant_op.constant(3.0, name="three")
self.assertEqual(0, len(const_zero.op._device_assignments))
one_list = const_one.op._device_assignments
self.assertEqual(1, len(one_list))
self.assertEqual("/cpu", one_list[0].obj)
self.assertEqual("ops_test.py", os.path.basename(one_list[0].filename))
two_list = const_two.op._device_assignments
self.assertEqual(2, len(two_list))
devices = [t.obj for t in two_list]
self.assertEqual(set(["/cpu", "/cpu:0"]), set(devices))
three_list = const_three.op._device_assignments
self.assertEqual(1, len(three_list))
func_description = three_list[0].obj
expected_regex = r"device_func<.*ops_test.py, [0-9]+"
self.assertRegex(func_description, expected_regex)
@test_util.run_deprecated_v1
def testDeviceAssignmentMetadataForGraphDeviceAndTfDeviceFunctions(self):
with ops.device("/cpu"):
const_one = constant_op.constant([1.0], name="one")
with ops.get_default_graph().device("/cpu"):
const_two = constant_op.constant([2.0], name="two")
one_metadata = const_one.op._device_assignments[0]
two_metadata = const_two.op._device_assignments[0]
# Verify both types of device assignment return the right stack info.
self.assertRegex("ops_test.py", os.path.basename(one_metadata.filename))
self.assertEqual(one_metadata.filename, two_metadata.filename)
self.assertEqual(one_metadata.lineno + 2, two_metadata.lineno)
class ColocationGroupTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], a.op.colocation_groups())
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
with self.assertRaises(ValueError):
c.op.get_attr("_class")
@test_util.run_deprecated_v1
def testBasicColocationMetadata(self):
const_two = constant_op.constant([2.0], name="two")
with ops.colocate_with(const_two.op):
const_three = constant_op.constant(3.0, name="three")
locations_dict = const_three.op._colocation_dict
self.assertIn("two", locations_dict)
metadata = locations_dict["two"]
self.assertIsNone(metadata.obj)
# Check that this test's filename is recorded as the file containing the
# colocation statement.
self.assertEqual("ops_test.py", os.path.basename(metadata.filename))
@test_util.run_deprecated_v1
def testColocationDeviceInteraction(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
# 'b' is created in the scope of /cpu:0, but it is
# colocated with 'a', which is on '/device:GPU:0'. colocate_with
# overrides devices because it is a stronger constraint.
b = constant_op.constant(3.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual(a.op.device, b.op.device)
@test_util.run_deprecated_v1
def testColocationCanonicalization(self):
with ops.device("/device:GPU:0"):
_ = constant_op.constant(2.0)
with ops.device(lambda op: "/device:GPU:0"):
b = constant_op.constant(3.0)
with ops.get_default_graph().colocate_with(b):
with ops.device("/device:GPU:0"):
c = constant_op.constant(4.0)
# A's device will be /device:GPU:0
# B's device will be /device:GPU:0
# C's device will be /device:GPU:0 because it
# inherits B's device name, after canonicalizing the names.
self.assertEqual(b.op.device, c.op.device)
@test_util.run_deprecated_v1
def testLocationOverrides(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
# Note that this colocation is "redundant", since we are
# within the scope of "/device:GPU:0". However, we would like to
# preserve in the GraphDef that these two ops should be
# colocated in a portable way.
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
d = constant_op.constant(5.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual("/device:GPU:0", a.op.device)
self.assertEqual(a.op.device, b.op.device)
# Test that device function stack is restored.
self.assertEqual("/device:GPU:0", c.op.device)
self.assertEqual("/device:CPU:0", d.op.device)
@test_util.run_deprecated_v1
def testNestedColocateWith(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@a"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testMultiColocationGroups(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@a", b"loc:@b"]), set(c.op.colocation_groups()))
@test_util.run_deprecated_v1
def testColocationIgnoreStack(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op, ignore_existing=True):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@b"]), set(c.op.colocation_groups()))
@test_util.run_deprecated_v1
def testColocateWithReset(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(None, ignore_existing=True):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@c"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testColocateWithInitialNoneThenNested(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
with ops.colocate_with(None, ignore_existing=True):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(b.op):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@b"], b.op.colocation_groups())
self.assertEqual([b"loc:@b"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testColocateVariables(self):
a = variables.Variable([2.0], name="a")
with ops.colocate_with(a.op):
b = variables.Variable([3.0], name="b")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
@test_util.run_deprecated_v1
def testColocateResourceVariablesInFunction(self):
with ops.device("/device:CPU:0"):
a = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def f():
with ops.colocate_with(a):
b = array_ops.ones([], name="output")
self.assertEqual("/device:CPU:0", b.op.device)
f()
def testColocateWithVariableInFunction(self):
v = variables.Variable(1.)
@def_function.function
def f():
with ops.colocate_with(v):
return array_ops.ones([], name="output")
f()
graph_def = f.get_concrete_function().graph.as_graph_def()
wrap_function.function_from_graph_def(graph_def, [], ["output"])
class DeadlineTest(test_util.TensorFlowTestCase):
def testNoDeadlineSet(self):
with ops.Graph().as_default() as g:
get_deadline = test_ops.get_deadline()
with self.session(graph=g) as sess:
run_options = config_pb2.RunOptions()
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_deadline, options=run_options)
def testDeadlineSetTimesOut(self):
with ops.Graph().as_default() as g:
sleep_op = test_ops.sleep_op(10)
with self.session(graph=g) as sess:
run_options = config_pb2.RunOptions(timeout_in_ms=3_000)
with self.assertRaises(errors.DeadlineExceededError):
sess.run(sleep_op, options=run_options)
class DeprecatedTest(test_util.TensorFlowTestCase):
def testSuccess(self):
with ops.Graph().as_default() as g:
test_util.set_producer_version(g, 7)
old = test_ops.old()
with self.session(graph=g):
old.run()
def _error(self):
return ((r"Op Old is not available in GraphDef version %d\. "
r"It has been removed in version 8\. For reasons\.") %
versions.GRAPH_DEF_VERSION)
def testGraphConstructionFail(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(NotImplementedError, self._error()):
test_ops.old()
class NameScopeTest(test_util.TensorFlowTestCase):
def testStripAndPrependScope(self):
strs = [
"hidden1/hidden1/weights", # Same prefix. Should strip.
"hidden1///hidden1/weights", # Extra "/". Should strip.
"^hidden1/hidden1/weights", # Same prefix. Should strip.
"loc:@hidden1/hidden1/weights", # Same prefix. Should strip.
"hhidden1/hidden1/weights", # Different prefix. Should keep.
"hidden1"
] # Not a prefix. Should keep.
expected_striped = [
"hidden1/weights", "hidden1/weights", "^hidden1/weights",
"loc:@hidden1/weights", "hhidden1/hidden1/weights", "hidden1"
]
expected_prepended = [
"hidden2/hidden1/weights", "hidden2/hidden1/weights",
"^hidden2/hidden1/weights", "loc:@hidden2/hidden1/weights",
"hidden2/hhidden1/hidden1/weights", "hidden2/hidden1"
]
name_scope_to_strip = "hidden1"
name_scope_to_add = "hidden2"
for es, ep, s in zip(expected_striped, expected_prepended, strs):
striped = ops.strip_name_scope(s, name_scope_to_strip)
self.assertEqual(es, striped)
self.assertEqual(ep, ops.prepend_name_scope(striped, name_scope_to_add))
def testGetNameScope(self):
with ops.Graph().as_default() as g:
with ops.name_scope("scope1"):
with ops.name_scope("scope2"):
with ops.name_scope("scope3"):
self.assertEqual("scope1/scope2/scope3", g.get_name_scope())
self.assertEqual("scope1/scope2", g.get_name_scope())
self.assertEqual("scope1", g.get_name_scope())
self.assertEqual("", g.get_name_scope())
def testTwoGraphs(self):
def f():
g1 = ops.Graph()
g2 = ops.Graph()
with g1.as_default():
with g2.as_default():
with ops.name_scope("_"):
pass
self.assertRaisesRegex(ValueError,
"'_' is not a valid (?:root )?scope name", f)
class EnableEagerExecutionTest(test_util.TensorFlowTestCase):
@test_util.run_v1_only("b/120545219")
def testBadArgumentsToEnableEagerExecution(self):
with self.assertRaisesRegex(TypeError, "config must be a tf.ConfigProto"):
ops.enable_eager_execution(context.DEVICE_PLACEMENT_SILENT)
with self.assertRaisesRegex(ValueError, "device_policy must be one of"):
c = config_pb2.ConfigProto()
ops.enable_eager_execution(c, c)
with self.assertRaisesRegex(ValueError, "execution_mode must be one of"):
c = config_pb2.ConfigProto()
ops.enable_eager_execution(c, execution_mode=c)
class _TupleTensor(composite_tensor.CompositeTensor):
"""`Tensor`-like `tuple`-like for custom `Tensor` conversion masquerading."""
def __init__(self, components):
super(_TupleTensor, self).__init__()
self._components = tuple(ops.convert_to_tensor(c) for c in components)
@property
def _type_spec(self):
return _TupleTensorSpec(type_spec.from_value(c) for c in self._components)
def __getitem__(self, key):
return self._components[key]
def __len__(self):
return len(self._components)
def __iter__(self):
return iter(self._components)
class _TupleTensorSpec(type_spec.TypeSpec):
def __init__(self, specs):
self._specs = specs
value_type = property(lambda self: _TupleTensor)
_component_specs = property(lambda self: self._specs)
def _to_components(self, value):
return value._components
def _from_components(self, components):
return _TupleTensor(*components)
def _serialize(self):
return (self._specs,)
class _MyTuple(object):
"""Pretend user-side class for `ConvertToCompositeTensorTest ."""
def __init__(self, components):
super(_MyTuple, self).__init__()
self._components = tuple(components)
def __getitem__(self, key):
return self._components[key]
def __len__(self):
return len(self._components)
def __iter__(self):
return iter(self._components)
ops.register_tensor_conversion_function(
_MyTuple, conversion_func=lambda x, *_, **__: _TupleTensor(x))
class CustomConvertToCompositeTensorTest(test_util.TensorFlowTestCase):
@test_util.disable_tfrt("TODO(kkb): This makes Kokoro tests fail.")
def testCompositeTensorConversion(self):
"""Tests that a user can register a CompositeTensor converter."""
x = _MyTuple((1, [2., 3.], [[4, 5], [6, 7]]))
y = ops.convert_to_tensor_or_composite(x)
self.assertFalse(tensor_util.is_tf_type(y))
self.assertIsInstance(y, _TupleTensor)
self.assertLen(y, len(x))
for x_, y_ in zip(x, y):
self.assertIsInstance(y_, ops.Tensor)
self.assertTrue(tensor_util.is_tf_type(y_))
self.assertAllEqual(x_, tensor_util.constant_value(y_))
@test_util.disable_tfrt("Packing EagerTensors is not supported yet.")
class PackEagerTensorTest(test_util.TensorFlowTestCase):
def setUp(self):
super(PackEagerTensorTest, self).setUp()
context._reset_context()
cpus = config.list_physical_devices("CPU")
# Set 2 virtual CPUs
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration(),
])
def testPack(self):
with context.eager_mode():
with ops.device("CPU:0"):
var0 = resource_variable_ops.ResourceVariable(1.0)
c0 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
with ops.device("CPU:1"):
var1 = resource_variable_ops.ResourceVariable(2.0)
var2 = resource_variable_ops.ResourceVariable([3.0])
c1 = constant_op.constant([9.0])
packed_var0 = ops.pack_eager_tensors([var0.handle, var1.handle])
self.assertTrue(packed_var0.is_packed)
self.assertEqual(packed_var0.dtype, var0.handle.dtype)
self.assertEqual(packed_var0.shape, var0.handle.shape)
self.assertEqual(packed_var0._handle_data, var0.handle._handle_data)
self.assertIn("COMPOSITE:0", packed_var0.device)
self.assertIn("COMPOSITE:0", packed_var0.backing_device)
with self.assertRaises(errors.InvalidArgumentError):
packed_var0.numpy()
# Different dtypes
with self.assertRaises(ValueError):
ops.pack_eager_tensors([var0.handle, c1])
# Different shapes
with self.assertRaises(ValueError):
ops.pack_eager_tensors([c0, c1])
# Different handle data
with self.assertRaises(ValueError):
ops.pack_eager_tensors([var0.handle, var2.handle])
class GraphDefInputShapesTest(test_util.TensorFlowTestCase):
def setUpInputShapes(self, pre_add_input_shapes):
test_tensor_shape = [None, 1, 1, 1]
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=test_tensor_shape, dtype=dtypes.float32)
])
def f(x):
return array_ops.identity(x, name="output")
x = array_ops.ones([2, 1, 1, 1], dtype=dtypes.float32)
f(x)
tensor_shape_proto = tensor_shape_pb2.TensorShapeProto(dim=[
tensor_shape_pb2.TensorShapeProto.Dim(size=-1 if d is None else d)
for d in test_tensor_shape
])
list_proto = attr_value_pb2.AttrValue.ListValue(shape=[tensor_shape_proto])
concrete_function = f.get_concrete_function()
if pre_add_input_shapes:
attr_value = attr_value_pb2.AttrValue(list=list_proto)
concrete_function = eager_function.ConcreteFunction(
concrete_function.graph,
attrs={"_input_shapes": attr_value},
function_spec=concrete_function._pre_initialized_function_spec)
test_graph = ops.Graph()
with test_graph.as_default():
concrete_function.add_to_graph(g=test_graph)
graph_def = test_graph.as_graph_def(add_shapes=True)
self.assertLen(graph_def.library.function, 1)
function_def = graph_def.library.function[0]
input_shapes = function_def.attr["_input_shapes"]
return input_shapes
def testGraphDefInputShapes(self):
pre_added_input_shapes = self.setUpInputShapes(pre_add_input_shapes=True)
post_added_input_shapes = self.setUpInputShapes(pre_add_input_shapes=False)
self.assertProtoEquals(pre_added_input_shapes, post_added_input_shapes)
class TensorTest(test_util.TensorFlowTestCase):
def testToArrayEagerMode(self):
with context.eager_mode():
a = np.array(constant_op.constant(32), dtype=np.float32)
b = np.array(constant_op.constant(32, dtype=dtypes.int64))
self.assertEqual(a.dtype, np.dtype(np.float32))
self.assertEqual(b.dtype, np.dtype(np.int64))
def testToArrayFunctionMode(self):
@def_function.function
def f():
# Raises during trace compilation.
return np.array(constant_op.constant(32), dtype=np.int32)
@def_function.function
def g():
# Raises during trace compilation.
return np.array(constant_op.constant(32))
with self.assertRaisesRegex(NotImplementedError,
"Cannot convert a symbolic Tensor"):
f()
with self.assertRaisesRegex(NotImplementedError,
"Cannot convert a symbolic Tensor"):
g()
if __name__ == "__main__":
googletest.main()
| 37.146011
| 97
| 0.657268
|
a41d55917919d538688886b9d0ed2128d85d13ba
| 1,909
|
py
|
Python
|
benchmark/startPyquil2829.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startPyquil2829.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startPyquil2829.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=43
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += CNOT(0,3) # number=13
prog += CNOT(0,3) # number=17
prog += X(3) # number=18
prog += CNOT(0,3) # number=19
prog += CNOT(0,3) # number=15
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=12
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(3) # number=37
prog += CZ(0,3) # number=38
prog += H(3) # number=39
prog += X(3) # number=28
prog += H(3) # number=30
prog += CZ(0,3) # number=31
prog += H(3) # number=32
prog += H(0) # number=33
prog += CZ(3,0) # number=34
prog += RX(0.33300882128051834,2) # number=36
prog += H(0) # number=35
prog += H(0) # number=40
prog += CZ(3,0) # number=41
prog += H(0) # number=42
prog += Z(3) # number=24
prog += CNOT(3,0) # number=25
prog += CNOT(3,0) # number=22
prog += H(3) # number=8
prog += H(0) # number=9
prog += Y(2) # number=10
prog += Y(2) # number=11
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil2829.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 25.453333
| 64
| 0.56417
|
35b962c50af0c62f6c706782f0540fe9d70d4bae
| 1,010
|
py
|
Python
|
api/utils.py
|
OppoZeit/OppoZeit
|
e14fe39e4dd0638f31407992a5e63a8b0932ccaf
|
[
"MIT"
] | null | null | null |
api/utils.py
|
OppoZeit/OppoZeit
|
e14fe39e4dd0638f31407992a5e63a8b0932ccaf
|
[
"MIT"
] | 3
|
2020-10-22T20:42:53.000Z
|
2020-10-22T20:42:57.000Z
|
api/utils.py
|
OppoZeit/OppoZeit
|
e14fe39e4dd0638f31407992a5e63a8b0932ccaf
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from itertools import chain, ifilterfalse
# http://stackoverflow.com/a/10734224/396967
def datetime_parser(dct):
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
for k, v in dct.items():
if isinstance(v, basestring):
try:
dct[k] = datetime.strptime(v, DATE_FORMAT)
except:
pass
return dct
def flatten(nested):
return chain.from_iterable(nested)
def unique(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in ifilterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
| 27.297297
| 78
| 0.59703
|
e33db667e2348895fdfb669d61ef12352caffcfb
| 3,592
|
py
|
Python
|
models/config/coinbase_pro_parser.py
|
ronaldderksen/pycryptobot
|
01a11e3fe121360b1a4138ed193fdc3d05e5857a
|
[
"Apache-2.0"
] | null | null | null |
models/config/coinbase_pro_parser.py
|
ronaldderksen/pycryptobot
|
01a11e3fe121360b1a4138ed193fdc3d05e5857a
|
[
"Apache-2.0"
] | 1
|
2021-04-29T20:44:33.000Z
|
2021-04-29T20:44:33.000Z
|
models/config/coinbase_pro_parser.py
|
letsautom8/pycryptobot
|
f5b8eebf66114890e43c569539f984b520c5a8da
|
[
"Apache-2.0"
] | null | null | null |
import re, logging
from .default_parser import isCurrencyValid, defaultConfigParse, merge_config_and_args
def isMarketValid(market) -> bool:
p = re.compile(r"^[1-9A-Z]{2,5}\-[1-9A-Z]{2,5}$")
return p.match(market) is not None
def parseMarket(market):
if not isMarketValid(market):
raise ValueError('Coinbase Pro market invalid: ' + market)
base_currency, quote_currency = market.split('-', 2)
return market, base_currency, quote_currency
def parser(app, coinbase_config, args={}):
logging.info('CoinbasePro Configuration parse')
if not coinbase_config:
raise Exception('There is an error in your config dictionary')
if not app:
raise Exception('No app is passed')
if 'api_key' in coinbase_config and 'api_secret' in coinbase_config and \
'api_passphrase' in coinbase_config and 'api_url' in coinbase_config:
# validates the api key is syntactically correct
p = re.compile(r"^[a-f0-9]{32}$")
if not p.match(coinbase_config['api_key']):
raise TypeError('Coinbase Pro API key is invalid')
app.api_key = coinbase_config['api_key']
# validates the api secret is syntactically correct
p = re.compile(r"^[A-z0-9+\/]+==$")
if not p.match(coinbase_config['api_secret']):
raise TypeError('Coinbase Pro API secret is invalid')
app.api_secret = coinbase_config['api_secret']
# validates the api passphrase is syntactically correct
p = re.compile(r"^[a-z0-9]{10,11}$")
if not p.match(coinbase_config['api_passphrase']):
raise TypeError('Coinbase Pro API passphrase is invalid')
app.api_passphrase = coinbase_config['api_passphrase']
valid_urls = [
'https://api.pro.coinbase.com/',
'https://api.pro.coinbase.com'
]
# validate Coinbase Pro API
if coinbase_config['api_url'] not in valid_urls:
raise ValueError('Coinbase Pro API URL is invalid')
app.api_url = coinbase_config['api_url']
config = merge_config_and_args(coinbase_config, args)
defaultConfigParse(app, config)
if 'base_currency' in config and config['base_currency'] is not None:
if not isCurrencyValid(config['base_currency']):
raise TypeError('Base currency is invalid.')
app.base_currency = config['base_currency']
if 'quote_currency' in config and config['quote_currency'] is not None:
if not isCurrencyValid(config['quote_currency']):
raise TypeError('Quote currency is invalid.')
app.quote_currency = config['quote_currency']
if 'market' in config and config['market'] is not None:
app.market, app.base_currency, app.quote_currency = parseMarket(config['market'])
if app.base_currency != '' and app.quote_currency != '':
app.market = app.base_currency + '-' + app.quote_currency
if 'granularity' in config and config['granularity'] is not None:
granularity = 0
if isinstance(config['granularity'], str) and config['granularity'].isnumeric() is True:
granularity = int(config['granularity'])
elif isinstance(config['granularity'], int):
granularity = config['granularity']
if granularity in [60, 300, 900, 3600, 21600, 86400]:
app.granularity = granularity
app.smart_switch = 0
else:
raise Exception('There is an error in your config dictionary')
| 37.416667
| 100
| 0.639477
|
a6f37f86d8ff35ef14a0bf4d876efae96fc10d1f
| 53,723
|
py
|
Python
|
lib/googlecloudsdk/third_party/apis/datafusion/v1beta1/datafusion_v1beta1_messages.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/third_party/apis/datafusion/v1beta1/datafusion_v1beta1_messages.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/third_party/apis/datafusion/v1beta1/datafusion_v1beta1_messages.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | null | null | null |
"""Generated message classes for datafusion version v1beta1.
Cloud Data Fusion is a fully-managed, cloud native, enterprise data
integration service for quickly building and managing data pipelines. It
provides a graphical interface to increase time efficiency and reduce
complexity, and allows business users, developers, and data scientists to
easily and reliably build scalable data integration solutions to cleanse,
prepare, blend, transfer and transform data without having to wrestle with
infrastructure.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'datafusion'
class AuditConfig(_messages.Message):
r"""Specifies the audit configuration for a service. The configuration
determines which permission types are logged, and what identities, if any,
are exempted from logging. An AuditConfig must have one or more
AuditLogConfigs. If there are AuditConfigs for both `allServices` and a
specific service, the union of the two AuditConfigs is used for that
service: the log_types specified in each AuditConfig are enabled, and the
exempted_members in each AuditLogConfig are exempted. Example Policy with
multiple AuditConfigs: { "audit_configs": [ {
"service": "allServices" "audit_log_configs": [ {
"log_type": "DATA_READ", "exempted_members": [
"user:foo@gmail.com" ] }, {
"log_type": "DATA_WRITE", }, {
"log_type": "ADMIN_READ", } ] }, {
"service": "fooservice.googleapis.com" "audit_log_configs": [
{ "log_type": "DATA_READ", }, {
"log_type": "DATA_WRITE", "exempted_members": [
"user:bar@gmail.com" ] } ] }
] } For fooservice, this policy enables DATA_READ, DATA_WRITE and
ADMIN_READ logging. It also exempts foo@gmail.com from DATA_READ logging,
and bar@gmail.com from DATA_WRITE logging.
Fields:
auditLogConfigs: The configuration for logging of each type of permission.
exemptedMembers: A string attribute.
service: Specifies a service that will be enabled for audit logging. For
example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
`allServices` is a special value that covers all services.
"""
auditLogConfigs = _messages.MessageField('AuditLogConfig', 1, repeated=True)
exemptedMembers = _messages.StringField(2, repeated=True)
service = _messages.StringField(3)
class AuditLogConfig(_messages.Message):
r"""Provides the configuration for logging a type of permissions. Example:
{ "audit_log_configs": [ { "log_type": "DATA_READ",
"exempted_members": [ "user:foo@gmail.com" ]
}, { "log_type": "DATA_WRITE", } ] }
This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting
foo@gmail.com from DATA_READ logging.
Enums:
LogTypeValueValuesEnum: The log type that this config enables.
Fields:
exemptedMembers: Specifies the identities that do not cause logging for
this type of permission. Follows the same format of Binding.members.
logType: The log type that this config enables.
"""
class LogTypeValueValuesEnum(_messages.Enum):
r"""The log type that this config enables.
Values:
LOG_TYPE_UNSPECIFIED: Default case. Should never be this.
ADMIN_READ: Admin reads. Example: CloudIAM getIamPolicy
DATA_WRITE: Data writes. Example: CloudSQL Users create
DATA_READ: Data reads. Example: CloudSQL Users list
"""
LOG_TYPE_UNSPECIFIED = 0
ADMIN_READ = 1
DATA_WRITE = 2
DATA_READ = 3
exemptedMembers = _messages.StringField(1, repeated=True)
logType = _messages.EnumField('LogTypeValueValuesEnum', 2)
class AuthorizationLoggingOptions(_messages.Message):
r"""Authorization-related information used by Cloud Audit Logging.
Enums:
PermissionTypeValueValuesEnum: The type of the permission that was
checked.
Fields:
permissionType: The type of the permission that was checked.
"""
class PermissionTypeValueValuesEnum(_messages.Enum):
r"""The type of the permission that was checked.
Values:
PERMISSION_TYPE_UNSPECIFIED: Default. Should not be used.
ADMIN_READ: A read of admin (meta) data.
ADMIN_WRITE: A write of admin (meta) data.
DATA_READ: A read of standard data.
DATA_WRITE: A write of standard data.
"""
PERMISSION_TYPE_UNSPECIFIED = 0
ADMIN_READ = 1
ADMIN_WRITE = 2
DATA_READ = 3
DATA_WRITE = 4
permissionType = _messages.EnumField('PermissionTypeValueValuesEnum', 1)
class Binding(_messages.Message):
r"""Associates `members` with a `role`.
Fields:
condition: The condition that is associated with this binding. NOTE: An
unsatisfied condition will not allow user access via current binding.
Different bindings, including their conditions, are examined
independently.
members: Specifies the identities requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet;
with or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example, `alice@gmail.com`
. * `serviceAccount:{emailid}`: An email address that represents a
service account. For example, `my-other-
app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address
that represents a Google group. For example, `admins@example.com`.
* `domain:{domain}`: The G Suite domain (primary) that represents all
the users of that domain. For example, `google.com` or `example.com`.
role: Role that is assigned to `members`. For example, `roles/viewer`,
`roles/editor`, or `roles/owner`.
"""
condition = _messages.MessageField('Expr', 1)
members = _messages.StringField(2, repeated=True)
role = _messages.StringField(3)
class CancelOperationRequest(_messages.Message):
r"""The request message for Operations.CancelOperation."""
class CloudAuditOptions(_messages.Message):
r"""Write a Cloud Audit log
Enums:
LogNameValueValuesEnum: The log_name to populate in the Cloud Audit
Record.
Fields:
authorizationLoggingOptions: Information used by the Cloud Audit Logging
pipeline.
logName: The log_name to populate in the Cloud Audit Record.
"""
class LogNameValueValuesEnum(_messages.Enum):
r"""The log_name to populate in the Cloud Audit Record.
Values:
UNSPECIFIED_LOG_NAME: Default. Should not be used.
ADMIN_ACTIVITY: Corresponds to "cloudaudit.googleapis.com/activity"
DATA_ACCESS: Corresponds to "cloudaudit.googleapis.com/data_access"
"""
UNSPECIFIED_LOG_NAME = 0
ADMIN_ACTIVITY = 1
DATA_ACCESS = 2
authorizationLoggingOptions = _messages.MessageField('AuthorizationLoggingOptions', 1)
logName = _messages.EnumField('LogNameValueValuesEnum', 2)
class Condition(_messages.Message):
r"""A condition to be met.
Enums:
IamValueValuesEnum: Trusted attributes supplied by the IAM system.
OpValueValuesEnum: An operator to apply the subject with.
SysValueValuesEnum: Trusted attributes supplied by any service that owns
resources and uses the IAM system for access control.
Fields:
iam: Trusted attributes supplied by the IAM system.
op: An operator to apply the subject with.
svc: Trusted attributes discharged by the service.
sys: Trusted attributes supplied by any service that owns resources and
uses the IAM system for access control.
values: The objects of the condition.
"""
class IamValueValuesEnum(_messages.Enum):
r"""Trusted attributes supplied by the IAM system.
Values:
NO_ATTR: Default non-attribute.
AUTHORITY: Either principal or (if present) authority selector.
ATTRIBUTION: The principal (even if an authority selector is present),
which must only be used for attribution, not authorization.
SECURITY_REALM: Any of the security realms in the IAMContext (go
/security-realms). When used with IN, the condition indicates "any of
the request's realms match one of the given values; with NOT_IN, "none
of the realms match any of the given values". Note that a value can
be: - 'self' (i.e., allow connections from clients that are in the
same security realm) - a realm (e.g., 'campus-abc') - a realm group
(e.g., 'realms-for-borg-cell-xx', see: go/realm-groups) A match is
determined by a realm group membership check performed by a
RealmAclRep object (go/realm-acl-howto). It is not permitted to grant
access based on the *absence* of a realm, so realm conditions can only
be used in a "positive" context (e.g., ALLOW/IN or DENY/NOT_IN).
APPROVER: An approver (distinct from the requester) that has authorized
this request. When used with IN, the condition indicates that one of
the approvers associated with the request matches the specified
principal, or is a member of the specified group. Approvers can only
grant additional access, and are thus only used in a strictly positive
context (e.g. ALLOW/IN or DENY/NOT_IN).
JUSTIFICATION_TYPE: What types of justifications have been supplied with
this request. String values should match enum names from
tech.iam.JustificationType, e.g. "MANUAL_STRING". It is not permitted
to grant access based on the *absence* of a justification, so
justification conditions can only be used in a "positive" context
(e.g., ALLOW/IN or DENY/NOT_IN). Multiple justifications, e.g., a
Buganizer ID and a manually-entered reason, are normal and supported.
CREDENTIALS_TYPE: What type of credentials have been supplied with this
request. String values should match enum names from
security_loas_l2.CredentialsType - currently, only
CREDS_TYPE_EMERGENCY is supported. It is not permitted to grant access
based on the *absence* of a credentials type, so the conditions can
only be used in a "positive" context (e.g., ALLOW/IN or DENY/NOT_IN).
"""
NO_ATTR = 0
AUTHORITY = 1
ATTRIBUTION = 2
SECURITY_REALM = 3
APPROVER = 4
JUSTIFICATION_TYPE = 5
CREDENTIALS_TYPE = 6
class OpValueValuesEnum(_messages.Enum):
r"""An operator to apply the subject with.
Values:
NO_OP: Default no-op.
EQUALS: DEPRECATED. Use IN instead.
NOT_EQUALS: DEPRECATED. Use NOT_IN instead.
IN: The condition is true if the subject (or any element of it if it is
a set) matches any of the supplied values.
NOT_IN: The condition is true if the subject (or every element of it if
it is a set) matches none of the supplied values.
DISCHARGED: Subject is discharged
"""
NO_OP = 0
EQUALS = 1
NOT_EQUALS = 2
IN = 3
NOT_IN = 4
DISCHARGED = 5
class SysValueValuesEnum(_messages.Enum):
r"""Trusted attributes supplied by any service that owns resources and
uses the IAM system for access control.
Values:
NO_ATTR: Default non-attribute type
REGION: Region of the resource
SERVICE: Service name
NAME: Resource name
IP: IP address of the caller
"""
NO_ATTR = 0
REGION = 1
SERVICE = 2
NAME = 3
IP = 4
iam = _messages.EnumField('IamValueValuesEnum', 1)
op = _messages.EnumField('OpValueValuesEnum', 2)
svc = _messages.StringField(3)
sys = _messages.EnumField('SysValueValuesEnum', 4)
values = _messages.StringField(5, repeated=True)
class CounterOptions(_messages.Message):
r"""Increment a streamz counter with the specified metric and field names.
Metric names should start with a '/', generally be lowercase-only, and end
in "_count". Field names should not contain an initial slash. The actual
exported metric names will have "/iam/policy" prepended. Field names
correspond to IAM request parameters and field values are their respective
values. Supported field names: - "authority", which is "[token]" if
IAMContext.token is present, otherwise the value of
IAMContext.authority_selector if present, and otherwise a
representation of IAMContext.principal; or - "iam_principal", a
representation of IAMContext.principal even if a token or authority
selector is present; or - "" (empty string), resulting in a counter with
no fields. Examples: counter { metric: "/debug_access_count" field:
"iam_principal" } ==> increment counter
/iam/policy/backend_debug_access_count
{iam_principal=[value of IAMContext.principal]} At this time we do not
support multiple field names (though this may be supported in the future).
Fields:
field: The field value to attribute.
metric: The metric to update.
"""
field = _messages.StringField(1)
metric = _messages.StringField(2)
class DataAccessOptions(_messages.Message):
r"""Write a Data Access (Gin) log
Enums:
LogModeValueValuesEnum: Whether Gin logging should happen in a fail-closed
manner at the caller. This is relevant only in the LocalIAM
implementation, for now.
Fields:
logMode: Whether Gin logging should happen in a fail-closed manner at the
caller. This is relevant only in the LocalIAM implementation, for now.
"""
class LogModeValueValuesEnum(_messages.Enum):
r"""Whether Gin logging should happen in a fail-closed manner at the
caller. This is relevant only in the LocalIAM implementation, for now.
Values:
LOG_MODE_UNSPECIFIED: Client is not required to write a partial Gin log
immediately after the authorization check. If client chooses to write
one and it fails, client may either fail open (allow the operation to
continue) or fail closed (handle as a DENY outcome).
LOG_FAIL_CLOSED: The application's operation in the context of which
this authorization check is being made may only be performed if it is
successfully logged to Gin. For instance, the authorization library
may satisfy this obligation by emitting a partial log entry at
authorization check time and only returning ALLOW to the application
if it succeeds. If a matching Rule has this directive, but the client
has not indicated that it will honor such requirements, then the IAM
check will result in authorization failure by setting
CheckPolicyResponse.success=false.
"""
LOG_MODE_UNSPECIFIED = 0
LOG_FAIL_CLOSED = 1
logMode = _messages.EnumField('LogModeValueValuesEnum', 1)
class DatafusionProjectsLocationsGetRequest(_messages.Message):
r"""A DatafusionProjectsLocationsGetRequest object.
Fields:
name: Resource name for the location.
"""
name = _messages.StringField(1, required=True)
class DatafusionProjectsLocationsInstancesCreateRequest(_messages.Message):
r"""A DatafusionProjectsLocationsInstancesCreateRequest object.
Fields:
instance: A Instance resource to be passed as the request body.
instanceId: The name of the instance to create.
parent: The instance's project and location in the format
projects/{project}/locations/{location}.
"""
instance = _messages.MessageField('Instance', 1)
instanceId = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class DatafusionProjectsLocationsInstancesDeleteRequest(_messages.Message):
r"""A DatafusionProjectsLocationsInstancesDeleteRequest object.
Fields:
name: The instance resource name in the format
projects/{project}/locations/{location}/instances/{instance}
"""
name = _messages.StringField(1, required=True)
class DatafusionProjectsLocationsInstancesGetIamPolicyRequest(_messages.Message):
r"""A DatafusionProjectsLocationsInstancesGetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
resource = _messages.StringField(1, required=True)
class DatafusionProjectsLocationsInstancesGetRequest(_messages.Message):
r"""A DatafusionProjectsLocationsInstancesGetRequest object.
Fields:
name: The instance resource name in the format
projects/{project}/locations/{location}/instances/{instance}.
"""
name = _messages.StringField(1, required=True)
class DatafusionProjectsLocationsInstancesListRequest(_messages.Message):
r"""A DatafusionProjectsLocationsInstancesListRequest object.
Fields:
filter: List filter.
orderBy: Sort results. Supported values are "name", "name desc", or ""
(unsorted).
pageSize: The maximum number of items to return.
pageToken: The next_page_token value to use if there are additional
results to retrieve for this list request.
parent: The project and location for which to retrieve instance
information in the format projects/{project}/locations/{location}. If
the location is specified as '-' (wildcard), then all regions available
to the project are queried, and the results are aggregated.
"""
filter = _messages.StringField(1)
orderBy = _messages.StringField(2)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
parent = _messages.StringField(5, required=True)
class DatafusionProjectsLocationsInstancesPatchRequest(_messages.Message):
r"""A DatafusionProjectsLocationsInstancesPatchRequest object.
Fields:
instance: A Instance resource to be passed as the request body.
name: Output only. The name of this instance is in the form of
projects/{project}/locations/{location}/instances/{instance}.
updateMask: Field mask is used to specify the fields that the update will
overwrite in an instance resource. The fields specified in the
update_mask are relative to the resource, not the full request. A field
will be overwritten if it is in the mask. If the user does not provide a
mask, all the supported fields (labels and options currently) will be
overwritten.
"""
instance = _messages.MessageField('Instance', 1)
name = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class DatafusionProjectsLocationsInstancesRestartRequest(_messages.Message):
r"""A DatafusionProjectsLocationsInstancesRestartRequest object.
Fields:
name: Name of the Data Fusion instance which need to be restarted in the
form of projects/{project}/locations/{location}/instances/{instance}
restartInstanceRequest: A RestartInstanceRequest resource to be passed as
the request body.
"""
name = _messages.StringField(1, required=True)
restartInstanceRequest = _messages.MessageField('RestartInstanceRequest', 2)
class DatafusionProjectsLocationsInstancesSetIamPolicyRequest(_messages.Message):
r"""A DatafusionProjectsLocationsInstancesSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class DatafusionProjectsLocationsInstancesTestIamPermissionsRequest(_messages.Message):
r"""A DatafusionProjectsLocationsInstancesTestIamPermissionsRequest object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class DatafusionProjectsLocationsInstancesUpgradeRequest(_messages.Message):
r"""A DatafusionProjectsLocationsInstancesUpgradeRequest object.
Fields:
name: Name of the Data Fusion instance which need to be upgraded in the
form of projects/{project}/locations/{location}/instances/{instance}
Instance will be upgraded with the latest stable version of the Data
Fusion.
upgradeInstanceRequest: A UpgradeInstanceRequest resource to be passed as
the request body.
"""
name = _messages.StringField(1, required=True)
upgradeInstanceRequest = _messages.MessageField('UpgradeInstanceRequest', 2)
class DatafusionProjectsLocationsListRequest(_messages.Message):
r"""A DatafusionProjectsLocationsListRequest object.
Fields:
filter: The standard list filter.
name: The resource that owns the locations collection, if applicable.
pageSize: The standard list page size.
pageToken: The standard list page token.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class DatafusionProjectsLocationsOperationsCancelRequest(_messages.Message):
r"""A DatafusionProjectsLocationsOperationsCancelRequest object.
Fields:
cancelOperationRequest: A CancelOperationRequest resource to be passed as
the request body.
name: The name of the operation resource to be cancelled.
"""
cancelOperationRequest = _messages.MessageField('CancelOperationRequest', 1)
name = _messages.StringField(2, required=True)
class DatafusionProjectsLocationsOperationsDeleteRequest(_messages.Message):
r"""A DatafusionProjectsLocationsOperationsDeleteRequest object.
Fields:
name: The name of the operation resource to be deleted.
"""
name = _messages.StringField(1, required=True)
class DatafusionProjectsLocationsOperationsGetRequest(_messages.Message):
r"""A DatafusionProjectsLocationsOperationsGetRequest object.
Fields:
name: The name of the operation resource.
"""
name = _messages.StringField(1, required=True)
class DatafusionProjectsLocationsOperationsListRequest(_messages.Message):
r"""A DatafusionProjectsLocationsOperationsListRequest object.
Fields:
filter: The standard list filter.
name: The name of the operation's parent resource.
pageSize: The standard list page size.
pageToken: The standard list page token.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class Empty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class Expr(_messages.Message):
r"""Represents an expression text. Example: title: "User account
presence" description: "Determines whether the request has a user
account" expression: "size(request.user) > 0"
Fields:
description: An optional description of the expression. This is a longer
text which describes the expression, e.g. when hovered over it in a UI.
expression: Textual representation of an expression in Common Expression
Language syntax. The application context of the containing message
determines which well-known feature set of CEL is supported.
location: An optional string indicating the location of the expression for
error reporting, e.g. a file name and a position in the file.
title: An optional title for the expression, i.e. a short string
describing its purpose. This can be used e.g. in UIs which allow to
enter the expression.
"""
description = _messages.StringField(1)
expression = _messages.StringField(2)
location = _messages.StringField(3)
title = _messages.StringField(4)
class Instance(_messages.Message):
r"""Represents a Data Fusion instance.
Enums:
StateValueValuesEnum: Output only. The current state of this Data Fusion
instance.
TypeValueValuesEnum: Required. Instance type.
Messages:
LabelsValue: The resource labels for instance to use to annotate any
related underlying resources such as GCE VMs. The character '=' is not
allowed to be used within the labels.
OptionsValue: Map of additional options used to configure the behavior of
Data Fusion instance.
Fields:
createTime: Output only. The time the instance was created.
description: An optional description of this instance.
displayName: Display name for an instance.
enableStackdriverLogging: Option to enable Stackdriver Logging.
enableStackdriverMonitoring: Option to enable Stackdriver Monitoring.
labels: The resource labels for instance to use to annotate any related
underlying resources such as GCE VMs. The character '=' is not allowed
to be used within the labels.
name: Output only. The name of this instance is in the form of
projects/{project}/locations/{location}/instances/{instance}.
networkConfig: Network configuration options. These are required when a
private Data Fusion instance is to be created.
options: Map of additional options used to configure the behavior of Data
Fusion instance.
privateInstance: Specifies whether the Data Fusion instance should be
private. If set to true, all Data Fusion nodes will have private IP
addresses and will not be able to access the public internet.
serviceAccount: Output only. Service account which will be used to access
resources in the customer project."
serviceEndpoint: Output only. Endpoint on which the Data Fusion UI and
REST APIs are accessible.
state: Output only. The current state of this Data Fusion instance.
stateMessage: Output only. Additional information about the current state
of this Data Fusion instance if available.
type: Required. Instance type.
updateTime: Output only. The time the instance was last updated.
version: Output only. Current version of the Data Fusion.
zone: Name of the zone in which the Data Fusion instance will be created.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Output only. The current state of this Data Fusion instance.
Values:
STATE_UNSPECIFIED: Instance does not have a state yet
CREATING: Instance is being created
RUNNING: Instance is running and ready for requests
FAILED: Instance creation failed
DELETING: Instance is being deleted
UPGRADING: Instance is being upgraded
RESTARTING: Instance is being restarted
UPDATING: Instance is being updated
"""
STATE_UNSPECIFIED = 0
CREATING = 1
RUNNING = 2
FAILED = 3
DELETING = 4
UPGRADING = 5
RESTARTING = 6
UPDATING = 7
class TypeValueValuesEnum(_messages.Enum):
r"""Required. Instance type.
Values:
TYPE_UNSPECIFIED: No type specified. The instance creation will fail.
BASIC: Basic Data Fusion instance. In Basic type, the user will be able
to create data pipelines using point and click UI. However, there are
certain limitations, such as fewer number of concurrent pipelines, no
support for streaming pipelines, etc.
ENTERPRISE: Enterprise Data Fusion instance. In Enterprise type, the
user will have more features available, such as support for streaming
pipelines, higher number of concurrent pipelines, etc.
"""
TYPE_UNSPECIFIED = 0
BASIC = 1
ENTERPRISE = 2
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""The resource labels for instance to use to annotate any related
underlying resources such as GCE VMs. The character '=' is not allowed to
be used within the labels.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class OptionsValue(_messages.Message):
r"""Map of additional options used to configure the behavior of Data
Fusion instance.
Messages:
AdditionalProperty: An additional property for a OptionsValue object.
Fields:
additionalProperties: Additional properties of type OptionsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a OptionsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
createTime = _messages.StringField(1)
description = _messages.StringField(2)
displayName = _messages.StringField(3)
enableStackdriverLogging = _messages.BooleanField(4)
enableStackdriverMonitoring = _messages.BooleanField(5)
labels = _messages.MessageField('LabelsValue', 6)
name = _messages.StringField(7)
networkConfig = _messages.MessageField('NetworkConfig', 8)
options = _messages.MessageField('OptionsValue', 9)
privateInstance = _messages.BooleanField(10)
serviceAccount = _messages.StringField(11)
serviceEndpoint = _messages.StringField(12)
state = _messages.EnumField('StateValueValuesEnum', 13)
stateMessage = _messages.StringField(14)
type = _messages.EnumField('TypeValueValuesEnum', 15)
updateTime = _messages.StringField(16)
version = _messages.StringField(17)
zone = _messages.StringField(18)
class ListInstancesResponse(_messages.Message):
r"""Response message for the list instance request.
Fields:
instances: Represents a list of Data Fusion instances.
nextPageToken: Token to retrieve the next page of results or empty if
there are no more results in the list.
unreachable: Locations that could not be reached.
"""
instances = _messages.MessageField('Instance', 1, repeated=True)
nextPageToken = _messages.StringField(2)
unreachable = _messages.StringField(3, repeated=True)
class ListLocationsResponse(_messages.Message):
r"""The response message for Locations.ListLocations.
Fields:
locations: A list of locations that matches the specified filter in the
request.
nextPageToken: The standard List next-page token.
"""
locations = _messages.MessageField('Location', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListOperationsResponse(_messages.Message):
r"""The response message for Operations.ListOperations.
Fields:
nextPageToken: The standard List next-page token.
operations: A list of operations that matches the specified filter in the
request.
"""
nextPageToken = _messages.StringField(1)
operations = _messages.MessageField('Operation', 2, repeated=True)
class Location(_messages.Message):
r"""A resource that represents Google Cloud Platform location.
Messages:
LabelsValue: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
MetadataValue: Service-specific metadata. For example the available
capacity at the given location.
Fields:
displayName: The friendly name for this location, typically a nearby city
name. For example, "Tokyo".
labels: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
locationId: The canonical id for this location. For example: `"us-east1"`.
metadata: Service-specific metadata. For example the available capacity at
the given location.
name: Resource name for the location, which may vary between
implementations. For example: `"projects/example-project/locations/us-
east1"`
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata. For example the available capacity at the
given location.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
displayName = _messages.StringField(1)
labels = _messages.MessageField('LabelsValue', 2)
locationId = _messages.StringField(3)
metadata = _messages.MessageField('MetadataValue', 4)
name = _messages.StringField(5)
class LogConfig(_messages.Message):
r"""Specifies what kind of log the caller must write
Fields:
cloudAudit: Cloud audit options.
counter: Counter options.
dataAccess: Data access options.
"""
cloudAudit = _messages.MessageField('CloudAuditOptions', 1)
counter = _messages.MessageField('CounterOptions', 2)
dataAccess = _messages.MessageField('DataAccessOptions', 3)
class NetworkConfig(_messages.Message):
r"""Network configuration for a Data Fusion instance. These configurations
are used for peering with the customer network. Configurations are optional
when a public Data Fusion instance is to be created. However, providing
these configurations allows several benefits, such as reduced network
latency while accessing the customer resources from managed Data Fusion
instance nodes, as well as access to the customer on-prem resources.
Fields:
ipAllocation: The IP range in CIDR notation to use for the managed Data
Fusion instance nodes. This range must not overlap with any other ranges
used in the Data Fusion instance network.
network: Name of the network in the customer project with which the Tenant
Project will be peered for executing pipelines.
"""
ipAllocation = _messages.StringField(1)
network = _messages.StringField(2)
class Operation(_messages.Message):
r"""This resource represents a long-running operation that is the result of
a network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation.
It typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success.
If the original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Fields:
done: If the value is `false`, it means the operation is still in
progress. If `true`, the operation is completed, and either `error` or
`response` is available.
error: The error result of the operation in case of failure or
cancellation.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the `name` should be a resource name ending with
`operations/{unique_id}`.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ResponseValue(_messages.Message):
r"""The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the response
is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
done = _messages.BooleanField(1)
error = _messages.MessageField('Status', 2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
response = _messages.MessageField('ResponseValue', 5)
class OperationMetadata(_messages.Message):
r"""Represents the metadata of a long-running operation.
Fields:
apiVersion: API version used to start the operation.
createTime: The time the operation was created.
endTime: The time the operation finished running.
requestedCancellation: Identifies whether the user has requested
cancellation of the operation. Operations that have successfully been
cancelled have Operation.error value with a google.rpc.Status.code of 1,
corresponding to `Code.CANCELLED`.
statusDetail: Human-readable status of the operation if any.
target: Server-defined resource path for the target of the operation.
verb: Name of the verb executed by the operation.
"""
apiVersion = _messages.StringField(1)
createTime = _messages.StringField(2)
endTime = _messages.StringField(3)
requestedCancellation = _messages.BooleanField(4)
statusDetail = _messages.StringField(5)
target = _messages.StringField(6)
verb = _messages.StringField(7)
class Policy(_messages.Message):
r"""Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform resources. A `Policy`
consists of a list of `bindings`. A `binding` binds a list of `members` to a
`role`, where the members can be user accounts, Google groups, Google
domains, and service accounts. A `role` is a named list of permissions
defined by IAM. **JSON Example** { "bindings": [ {
"role": "roles/owner", "members": [
"user:mike@example.com", "group:admins@example.com",
"domain:google.com", "serviceAccount:my-other-
app@appspot.gserviceaccount.com" ] }, {
"role": "roles/viewer", "members": ["user:sean@example.com"]
} ] } **YAML Example** bindings: - members: -
user:mike@example.com - group:admins@example.com -
domain:google.com - serviceAccount:my-other-
app@appspot.gserviceaccount.com role: roles/owner - members:
- user:sean@example.com role: roles/viewer For a description of IAM
and its features, see the [IAM developer's
guide](https://cloud.google.com/iam/docs).
Fields:
auditConfigs: Specifies cloud audit logging configuration for this policy.
bindings: Associates a list of `members` to a `role`. `bindings` with no
members will result in an error.
etag: `etag` is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the `etag` in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An `etag` is returned in the response to `getIamPolicy`, and
systems are expected to put that etag in the request to `setIamPolicy`
to ensure that their change will be applied to the same version of the
policy. If no `etag` is provided in the call to `setIamPolicy`, then
the existing policy is overwritten blindly.
iamOwned: A boolean attribute.
rules: If more than one rule is specified, the rules are applied in the
following manner: - All matching LOG rules are always applied. - If any
DENY/DENY_WITH_LOG rule matches, permission is denied. Logging will be
applied if one or more matching rule requires logging. - Otherwise, if
any ALLOW/ALLOW_WITH_LOG rule matches, permission is granted.
Logging will be applied if one or more matching rule requires logging. -
Otherwise, if no rule applies, permission is denied.
version: Deprecated.
"""
auditConfigs = _messages.MessageField('AuditConfig', 1, repeated=True)
bindings = _messages.MessageField('Binding', 2, repeated=True)
etag = _messages.BytesField(3)
iamOwned = _messages.BooleanField(4)
rules = _messages.MessageField('Rule', 5, repeated=True)
version = _messages.IntegerField(6, variant=_messages.Variant.INT32)
class RestartInstanceRequest(_messages.Message):
r"""Request message for restarting a Data Fusion instance."""
class Rule(_messages.Message):
r"""A rule to be applied in a Policy.
Enums:
ActionValueValuesEnum: Required
Fields:
action: Required
conditions: Additional restrictions that must be met. All conditions must
pass for the rule to match.
description: Human-readable description of the rule.
in_: If one or more 'in' clauses are specified, the rule matches if the
PRINCIPAL/AUTHORITY_SELECTOR is in at least one of these entries.
logConfig: The config returned to callers of tech.iam.IAM.CheckPolicy for
any entries that match the LOG action.
notIn: If one or more 'not_in' clauses are specified, the rule matches if
the PRINCIPAL/AUTHORITY_SELECTOR is in none of the entries. The format
for in and not_in entries can be found at in the Local IAM documentation
(see go/local-iam#features).
permissions: A permission is a string of form '<service>.<resource
type>.<verb>' (e.g., 'storage.buckets.list'). A value of '*' matches all
permissions, and a verb part of '*' (e.g., 'storage.buckets.*') matches
all verbs.
"""
class ActionValueValuesEnum(_messages.Enum):
r"""Required
Values:
NO_ACTION: Default no action.
ALLOW: Matching 'Entries' grant access.
ALLOW_WITH_LOG: Matching 'Entries' grant access and the caller promises
to log the request per the returned log_configs.
DENY: Matching 'Entries' deny access.
DENY_WITH_LOG: Matching 'Entries' deny access and the caller promises to
log the request per the returned log_configs.
LOG: Matching 'Entries' tell IAM.Check callers to generate logs.
"""
NO_ACTION = 0
ALLOW = 1
ALLOW_WITH_LOG = 2
DENY = 3
DENY_WITH_LOG = 4
LOG = 5
action = _messages.EnumField('ActionValueValuesEnum', 1)
conditions = _messages.MessageField('Condition', 2, repeated=True)
description = _messages.StringField(3)
in_ = _messages.StringField(4, repeated=True)
logConfig = _messages.MessageField('LogConfig', 5, repeated=True)
notIn = _messages.StringField(6, repeated=True)
permissions = _messages.StringField(7, repeated=True)
class SetIamPolicyRequest(_messages.Message):
r"""Request message for `SetIamPolicy` method.
Fields:
policy: REQUIRED: The complete policy to be applied to the `resource`. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
updateMask: OPTIONAL: A FieldMask specifying which fields of the policy to
modify. Only the fields in the mask will be modified. If no mask is
provided, the following default mask is used: paths: "bindings, etag"
This field is only used by Cloud IAM.
"""
policy = _messages.MessageField('Policy', 1)
updateMask = _messages.StringField(2)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class Status(_messages.Message):
r"""The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). Each `Status` message contains
three pieces of data: error code, error message, and error details. You can
find out more about this error model and how to work with it in the [API
Design Guide](https://cloud.google.com/apis/design/errors).
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There is a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
r"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
class TestIamPermissionsRequest(_messages.Message):
r"""Request message for `TestIamPermissions` method.
Fields:
permissions: The set of permissions to check for the `resource`.
Permissions with wildcards (such as '*' or 'storage.*') are not allowed.
For more information see [IAM
Overview](https://cloud.google.com/iam/docs/overview#permissions).
"""
permissions = _messages.StringField(1, repeated=True)
class TestIamPermissionsResponse(_messages.Message):
r"""Response message for `TestIamPermissions` method.
Fields:
permissions: A subset of `TestPermissionsRequest.permissions` that the
caller is allowed.
"""
permissions = _messages.StringField(1, repeated=True)
class UpgradeInstanceRequest(_messages.Message):
r"""Request message for upgrading a Data Fusion instance. To change the
instance properties, instance update should be used.
"""
encoding.AddCustomJsonFieldMapping(
Rule, 'in_', 'in')
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| 39.97247
| 89
| 0.727752
|
732ab0e18335d872208baaa154f16e292ca1b8d1
| 7,502
|
py
|
Python
|
Bio/SearchIO/InterproscanIO/interproscan_xml.py
|
erpeg/biopython
|
296b6b451ce7161fdace2fd36d0817722491d733
|
[
"BSD-3-Clause"
] | 2
|
2020-06-25T12:52:03.000Z
|
2020-07-11T09:47:34.000Z
|
Bio/SearchIO/InterproscanIO/interproscan_xml.py
|
cosign070128/biopython
|
2f02e34ba76306e9c27eec9e051809bec2cece9b
|
[
"BSD-3-Clause"
] | 9
|
2020-05-05T00:54:23.000Z
|
2020-06-09T17:10:45.000Z
|
Bio/SearchIO/InterproscanIO/interproscan_xml.py
|
cosign070128/biopython
|
2f02e34ba76306e9c27eec9e051809bec2cece9b
|
[
"BSD-3-Clause"
] | 3
|
2020-06-29T13:07:46.000Z
|
2021-06-14T20:11:55.000Z
|
# Copyright 2018 by Adhemar Zerlotini. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Bio.SearchIO parser for InterProScan XML output formats."""
# for more info: https://github.com/ebi-pf-team/interproscan/wiki/OutputFormats
import re
from xml.etree import ElementTree
from Bio.Alphabet import generic_protein
from Bio.SearchIO._model import QueryResult, Hit, HSP, HSPFragment
# element - hit attribute name mapping
_ELEM_HIT = {
"name": ("accession", str),
"ac": ("id", str),
"desc": ("description", str),
}
# element - hsp attribute name mapping
_ELEM_HSP = {"score": ("bitscore", float), "evalue": ("evalue", float)}
# element - fragment attribute name mapping
_ELEM_FRAG = {
"start": ("query_start", int),
"end": ("query_end", int),
"hmm-start": ("hit_start", int),
"hmm-end": ("hit_end", int),
}
class InterproscanXmlParser:
"""Parser for the InterProScan XML format."""
def __init__(self, handle):
"""Initialize the class."""
self.xml_iter = iter(ElementTree.iterparse(handle, events=("start", "end")))
self._meta = self._parse_header()
def __iter__(self):
"""Iterate qresults."""
yield from self._parse_qresult()
def _parse_header(self):
"""Parse the header for the InterProScan version (PRIVATE)."""
event, elem = next(self.xml_iter)
meta = {}
meta["target"] = "InterPro"
meta["program"] = "InterProScan"
meta["version"] = elem.attrib["interproscan-version"]
# store the namespace value
self.NS = re.sub("protein-matches", "", elem.tag)
return meta
def _parse_qresult(self):
"""Parse query results (PRIVATE)."""
for event, elem in self.xml_iter:
if event == "end" and elem.tag == self.NS + "protein":
# store the query sequence
seq = elem.find(self.NS + "sequence")
query_seq = seq.text
# store the query id and description
xref = elem.find(self.NS + "xref")
query_id = xref.attrib["id"]
query_desc = xref.attrib["name"]
# parse each hit
hit_list = []
for hit_new in self._parse_hit(
elem.find(self.NS + "matches"), query_id, query_seq
):
# interproscan results contain duplicate hits rather than
# a single hit with multiple hsps. In this case the hsps
# of a duplicate hit will be appended to the already
# existing hit
for hit in hit_list:
if hit.id == hit_new.id:
for hsp in hit_new.hsps:
hit.hsps.append(hsp)
break
else:
hit_list.append(hit_new)
# create qresult and assing attributes
qresult = QueryResult(hit_list, query_id)
setattr(qresult, "description", query_desc)
for key, value in self._meta.items():
setattr(qresult, key, value)
yield qresult
def _parse_hit(self, root_hit_elem, query_id, query_seq=None):
"""Parse hit (PRIVATE)."""
# feed the loop below an empty list so iteration still works
if root_hit_elem is None:
root_hit_elem = []
for hit_elem in root_hit_elem:
# store the match/location type
hit_type = re.sub(r"%s(\w+)-match" % self.NS, r"\1", hit_elem.find(".").tag)
# store the hit id
signature = hit_elem.find(self.NS + "signature")
hit_id = signature.attrib["ac"]
# store xrefs and alt_descs
xrefs = self._parse_xrefs(signature.find(self.NS + "entry"))
# parse each hsp
hsps = list(
self._parse_hsp(
hit_elem.find(self.NS + "locations"), query_id, hit_id, query_seq
)
)
# create hit and assign attributes
hit = Hit(hsps, hit_id)
setattr(hit, "dbxrefs", xrefs)
for key, (attr, caster) in _ELEM_HIT.items():
value = signature.attrib.get(key)
if value is not None:
setattr(hit, attr, caster(value))
# format specific attributes
hit.attributes["Hit type"] = str(hit_type)
signature_lib = signature.find(self.NS + "signature-library-release")
hit.attributes["Target"] = str(signature_lib.attrib.get("library"))
hit.attributes["Target version"] = str(signature_lib.attrib.get("version"))
yield hit
def _parse_hsp(self, root_hsp_elem, query_id, hit_id, query_seq=None):
"""Parse hsp (PRIVATE)."""
# feed the loop below an empty list so iteration still works
if root_hsp_elem is None:
root_hsp_elem = []
for hsp_elem in root_hsp_elem:
# create frag and assign attributes
frag = HSPFragment(hit_id, query_id)
setattr(frag, "alphabet", generic_protein)
if query_seq is not None:
setattr(frag, "query", query_seq)
for key, (attr, caster) in _ELEM_FRAG.items():
value = hsp_elem.attrib.get(key)
if value is not None:
# start should be 0-based
if attr.endswith("start"):
value = caster(value) - 1
# store query start and end to calculate aln_span
if attr == "query_start":
start = int(value)
if attr == "query_end":
end = int(value)
setattr(frag, attr, caster(value))
# calculate aln_span and store
setattr(frag, "aln_span", end - start)
# create hsp and assign attributes
hsp = HSP([frag])
setattr(hsp, "query_id", query_id)
setattr(hsp, "hit_id", hit_id)
for key, (attr, caster) in _ELEM_HSP.items():
value = hsp_elem.attrib.get(key)
if value is not None:
setattr(hsp, attr, caster(value))
yield hsp
def _parse_xrefs(self, root_entry_elem):
"""Parse xrefs (PRIVATE)."""
xrefs = []
# store entry id and description
if root_entry_elem is not None:
xrefs.append("IPR:" + root_entry_elem.attrib["ac"])
# store go-xrefs and pathway-refs id and description
if root_entry_elem is not None:
xref_elems = []
xref_elems = xref_elems + root_entry_elem.findall(self.NS + "go-xref")
xref_elems = xref_elems + root_entry_elem.findall(self.NS + "pathway-xref")
for entry in xref_elems:
xref = entry.attrib["id"]
if ":" not in xref:
xref = entry.attrib["db"] + ":" + xref
xrefs.append(xref)
return xrefs
# if not used as a module, run the doctest
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
| 38.27551
| 88
| 0.553852
|
66eb56da7830565fb8589af2c8afe15d4bad08af
| 379
|
py
|
Python
|
Client.py
|
SammedDoshi03/Files-Transfer-using-UDP
|
47fa5c48473d07c8ec24a22b627dba263e470233
|
[
"MIT"
] | null | null | null |
Client.py
|
SammedDoshi03/Files-Transfer-using-UDP
|
47fa5c48473d07c8ec24a22b627dba263e470233
|
[
"MIT"
] | null | null | null |
Client.py
|
SammedDoshi03/Files-Transfer-using-UDP
|
47fa5c48473d07c8ec24a22b627dba263e470233
|
[
"MIT"
] | null | null | null |
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
addr = ('localhost', 55558)
fname = input("Enter File Name For transmission: ")
f = open(fname, "rb")
info = f.read(1024)
s.sendto(str.encode(fname, "utf-8"), addr)
# s.sendto(info, addr)
while info:
if s.sendto(info, addr):
info = f.read(1024)
#print("sending file")
s.close()
f.close()
| 19.947368
| 52
| 0.649077
|
68c8e24802956e634f89500ccc5b11d64bf94dea
| 1,189
|
py
|
Python
|
ml/test.py
|
DmiitriyJarosh/MTSHack
|
f4009f71f2c3d5c06019327196bd7f09c2c4d983
|
[
"MIT"
] | null | null | null |
ml/test.py
|
DmiitriyJarosh/MTSHack
|
f4009f71f2c3d5c06019327196bd7f09c2c4d983
|
[
"MIT"
] | null | null | null |
ml/test.py
|
DmiitriyJarosh/MTSHack
|
f4009f71f2c3d5c06019327196bd7f09c2c4d983
|
[
"MIT"
] | null | null | null |
import pickle
import pandas as pd
import sys
import sklearn.feature_extraction
from sklearn.feature_extraction.text import CountVectorizer
import xgboost
from xgboost.sklearn import XGBClassifier
from sklearn.naive_bayes import MultinomialNB
def load_model(path):
return pickle.load(open(path, "rb"))
def load_vectorizer():
return pickle.load(open("ml/vector.pickle", "rb"))
def predict(host, path):
data = pd.DataFrame({'url': [host]})
CountVectorizer()
data['len'] = list(map(len, data['url'].values))
data['dots'] = list(map(lambda x: x.count('.'), data['url'].values))
data['hyphen'] = list(map(lambda x: x.count('-'), data['url'].values))
data['www'] = list(map(lambda x: int('www' in x), data['url'].values))
data['numbers'] = list(map(lambda x: sum(c.isdigit() for c in x), data['url'].values))
corpus = data['url'].values
vectorizer = load_vectorizer()
X = vectorizer.transform(corpus).toarray()
data = pd.concat([data, pd.DataFrame(X)], axis=1).drop(['url'], axis=1)
model = load_model(path)
return model.predict(data)[0]
if __name__ == "__main__":
host = sys.argv[1]
print(predict(host, sys.argv[2]))
| 29.725
| 90
| 0.66947
|
770ad2efa3eee75372f5bdbde48cc52f19ef957b
| 20,365
|
py
|
Python
|
scripts/upload.py
|
hperala/kontuwikibot
|
f409e6fb45adf4e553dc326d9fb3c0d29eda6373
|
[
"MIT"
] | null | null | null |
scripts/upload.py
|
hperala/kontuwikibot
|
f409e6fb45adf4e553dc326d9fb3c0d29eda6373
|
[
"MIT"
] | null | null | null |
scripts/upload.py
|
hperala/kontuwikibot
|
f409e6fb45adf4e553dc326d9fb3c0d29eda6373
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script to upload images to wikipedia.
Arguments:
-keep Keep the filename as is
-filename Target filename without the namespace prefix
-noverify Do not ask for verification of the upload description if one
is given
-abortonwarn: Abort upload on the specified warning type. If no warning type
is specified, aborts on any warning.
-ignorewarn: Ignores specified upload warnings. If no warning type is
specified, ignores all warnings. Use with caution
-chunked: Upload the file in chunks (more overhead, but restartable). If
no value is specified the chunk size is 1 MiB. The value must
be a number which can be preceded by a suffix. The units are:
No suffix: Bytes
'k': Kilobytes (1000 B)
'M': Megabytes (1000000 B)
'Ki': Kibibytes (1024 B)
'Mi': Mebibytes (1024x1024 B)
The suffixes are case insensitive.
If any other arguments are given, the first is either URL, filename or directory
to upload, and the rest is a proposed description to go with the upload. If none
of these are given, the user is asked for the directory, file or URL to upload.
The bot will then upload the image to the wiki.
The script will ask for the location of an image(s), if not given as a parameter,
and for a description.
"""
#
# (C) Rob W.W. Hooft, Andre Engels 2003-2004
# (C) Pywikibot team, 2003-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: 95c3a7718c2627e1ca1bd38d4b8786a441fd0443 $'
#
import os
import time
import tempfile
import re
import math
import sys
import pywikibot
import pywikibot.data.api
from pywikibot import config
from pywikibot.tools import (
deprecated
)
if sys.version_info[0] > 2:
from urllib.parse import urlparse
from urllib.request import URLopener
basestring = (str,)
else:
from urlparse import urlparse
from urllib import URLopener
class UploadRobot:
"""Upload bot."""
def __init__(self, url, urlEncoding=None, description=u'',
useFilename=None, keepFilename=False,
verifyDescription=True, ignoreWarning=False,
targetSite=None, uploadByUrl=False, aborts=[], chunk_size=0):
"""
Constructor.
@param url: path to url or local file (deprecated), or list of urls or
paths to local files.
@type url: string (deprecated) or list
@param description: Description of file for its page. If multiple files
are uploading the same description is used for every file.
@type description: string
@param useFilename: Specify title of the file's page. If multiple
files are uploading it asks to change the name for second, third,
etc. files, otherwise the last file will overwrite the other.
@type useFilename: string
@param keepFilename: Set to True to keep original names of urls and
files, otherwise it will ask to enter a name for each file.
@type keepFilename: bool
@param verifyDescription: Set to False to not proofread the description.
@type verifyDescription: bool
@param ignoreWarning: Set this to True to upload even if another file
would be overwritten or another mistake would be risked. Set it to
an array of warning codes to selectively ignore specific warnings.
@type ignoreWarning: bool or list
@param targetSite: Set the site to upload to. If target site is not
given it's taken from user-config.py.
@type targetSite: object
@param aborts: List of the warning types to abort upload on. Set to True
to abort on any warning.
@type aborts: bool or list
@param chunk_size: Upload the file in chunks (more overhead, but
restartable) specified in bytes. If no value is specified the file
will be uploaded as whole.
@type chunk_size: integer
@deprecated: Using upload_image() is deprecated, use upload_file() with
file_url param instead
"""
self.url = url
if isinstance(self.url, basestring):
pywikibot.warning("url as string is deprecated. "
"Use an iterable instead.")
self.urlEncoding = urlEncoding
self.description = description
self.useFilename = useFilename
self.keepFilename = keepFilename
self.verifyDescription = verifyDescription
self.ignoreWarning = ignoreWarning
self.aborts = aborts
self.chunk_size = chunk_size
if config.upload_to_commons:
self.targetSite = targetSite or pywikibot.Site('commons',
'commons')
else:
self.targetSite = targetSite or pywikibot.Site()
self.targetSite.login()
self.uploadByUrl = uploadByUrl
@deprecated()
def urlOK(self):
"""Return True if self.url is a URL or an existing local file."""
return "://" in self.url or os.path.exists(self.url)
def read_file_content(self, file_url=None):
"""Return name of temp file in which remote file is saved."""
if not file_url:
file_url = self.url
pywikibot.warning("file_url is not given. "
"Set to self.url by default.")
pywikibot.output(u'Reading file %s' % file_url)
resume = False
rlen = 0
_contents = None
dt = 15
uo = URLopener()
retrieved = False
while not retrieved:
if resume:
pywikibot.output(u"Resume download...")
uo.addheader('Range', 'bytes=%s-' % rlen)
infile = uo.open(file_url)
if 'text/html' in infile.info().getheader('Content-Type'):
pywikibot.output(u"Couldn't download the image: "
"the requested URL was not found on server.")
return
content_len = infile.info().getheader('Content-Length')
accept_ranges = infile.info().getheader('Accept-Ranges') == 'bytes'
if resume:
_contents += infile.read()
else:
_contents = infile.read()
infile.close()
retrieved = True
if content_len:
rlen = len(_contents)
content_len = int(content_len)
if rlen < content_len:
retrieved = False
pywikibot.output(
u"Connection closed at byte %s (%s left)"
% (rlen, content_len))
if accept_ranges and rlen > 0:
resume = True
pywikibot.output(u"Sleeping for %d seconds..." % dt)
time.sleep(dt)
if dt <= 60:
dt += 15
elif dt < 360:
dt += 60
else:
pywikibot.log(
u"WARNING: length check of retrieved data not possible.")
handle, tempname = tempfile.mkstemp()
with os.fdopen(handle, "wb") as t:
t.write(_contents)
t.close()
return tempname
def process_filename(self, file_url=None):
"""Return base filename portion of file_url."""
if not file_url:
file_url = self.url
pywikibot.warning("file_url is not given. "
"Set to self.url by default.")
# Isolate the pure name
filename = file_url
# Filename may be either a URL or a local file path
if "://" in filename:
# extract the path portion of the URL
filename = urlparse(filename).path
filename = os.path.basename(filename)
if self.useFilename:
filename = self.useFilename
if not self.keepFilename:
pywikibot.output(
u"The filename on the target wiki will default to: %s"
% filename)
newfn = pywikibot.input(
u'Enter a better name, or press enter to accept:')
if newfn != "":
filename = newfn
# FIXME: these 2 belong somewhere else, presumably in family
# forbidden characters are handled by pywikibot/page.py
forbidden = ':*?/\\' # to be extended
allowed_formats = (u'gif', u'jpg', u'jpeg', u'mid', u'midi',
u'ogg', u'png', u'svg', u'xcf', u'djvu',
u'ogv', u'oga', u'tif', u'tiff', u'webm',
u'flac', u'wav')
# ask until it's valid
first_check = True
while True:
if not first_check:
filename = pywikibot.input('Enter a better name, or press '
'enter to skip the file:')
if not filename:
return None
first_check = False
ext = os.path.splitext(filename)[1].lower().strip('.')
# are any chars in forbidden also in filename?
invalid = set(forbidden) & set(filename)
if invalid:
c = "".join(invalid)
pywikibot.output(
'Invalid character(s): %s. Please try again' % c)
continue
if ext not in allowed_formats:
if not pywikibot.input_yn(
u"File format is not one of [%s], but %s. Continue?"
% (u' '.join(allowed_formats), ext),
default=False, automatic_quit=False):
continue
potential_file_page = pywikibot.FilePage(self.targetSite, filename)
if potential_file_page.exists():
if self.aborts is True:
pywikibot.output("File exists and you asked to abort. Skipping.")
return None
if potential_file_page.canBeEdited():
if pywikibot.input_yn(u"File with name %s already exists. "
"Would you like to change the name? "
"(Otherwise file will be overwritten.)"
% filename, default=True,
automatic_quit=False):
continue
else:
break
else:
pywikibot.output(u"File with name %s already exists and "
"cannot be overwritten." % filename)
continue
else:
try:
if potential_file_page.fileIsShared():
pywikibot.output(u"File with name %s already exists in shared "
"repository and cannot be overwritten."
% filename)
continue
else:
break
except pywikibot.NoPage:
break
# A proper description for the submission.
# Empty descriptions are not accepted.
pywikibot.output(u'The suggested description is:\n%s'
% self.description)
# Description must be set and verified
if not self.description:
self.verifyDescription = True
while not self.description or self.verifyDescription:
if not self.description:
pywikibot.output(
u'\03{lightred}It is not possible to upload a file '
'without a summary/description.\03{default}')
# if no description, default is 'yes'
if pywikibot.input_yn(
u'Do you want to change this description?',
default=not self.description):
from pywikibot import editor as editarticle
editor = editarticle.TextEditor()
try:
newDescription = editor.edit(self.description)
except Exception as e:
pywikibot.error(e)
continue
# if user saved / didn't press Cancel
if newDescription:
self.description = newDescription
self.verifyDescription = False
return filename
def abort_on_warn(self, warn_code):
"""Determine if the warning message should cause an abort."""
if self.aborts is True:
return True
else:
return warn_code in self.aborts
def ignore_on_warn(self, warn_code):
"""Determine if the warning message should be ignored."""
if self.ignoreWarning is True:
return True
else:
return warn_code in self.ignoreWarning
@deprecated('UploadRobot.upload_file()')
def upload_image(self, debug=False):
"""Upload image."""
self.upload_file(self.url, debug)
def upload_file(self, file_url, debug=False):
"""Upload the image at file_url to the target wiki.
Return the filename that was used to upload the image.
If the upload fails, ask the user whether to try again or not.
If the user chooses not to retry, return null.
"""
filename = self.process_filename(file_url)
if not filename:
return None
site = self.targetSite
imagepage = pywikibot.FilePage(site, filename) # normalizes filename
imagepage.text = self.description
pywikibot.output(u'Uploading file to %s via API...' % site)
try:
apiIgnoreWarnings = False
if self.ignoreWarning is True:
apiIgnoreWarnings = True
if self.uploadByUrl:
site.upload(imagepage, source_url=file_url,
ignore_warnings=apiIgnoreWarnings)
else:
if "://" in file_url:
temp = self.read_file_content(file_url)
else:
temp = file_url
site.upload(imagepage, source_filename=temp,
ignore_warnings=apiIgnoreWarnings,
chunk_size=self.chunk_size)
except pywikibot.data.api.UploadWarning as warn:
pywikibot.output(
u'We got a warning message: {0} - {1}'.format(warn.code, warn.message))
if self.abort_on_warn(warn.code):
answer = False
elif self.ignore_on_warn(warn.code):
answer = True
else:
answer = pywikibot.input_yn(u"Do you want to ignore?",
default=False, automatic_quit=False)
if answer:
self.ignoreWarning = True
self.keepFilename = True
return self.upload_file(file_url, debug)
else:
pywikibot.output(u"Upload aborted.")
return
except pywikibot.data.api.APIError as error:
if error.code == u'uploaddisabled':
pywikibot.error("Upload error: Local file uploads are disabled on %s."
% site)
else:
pywikibot.error("Upload error: ", exc_info=True)
except Exception:
pywikibot.error("Upload error: ", exc_info=True)
else:
# No warning, upload complete.
pywikibot.output(u"Upload of %s successful." % filename)
return filename # data['filename']
def run(self):
"""Run bot."""
# early check that upload is enabled
if self.targetSite.is_uploaddisabled():
pywikibot.error(
"Upload error: Local file uploads are disabled on %s."
% self.targetSite)
return
# early check that user has proper rights to upload
if "upload" not in self.targetSite.userinfo["rights"]:
pywikibot.error(
"User '%s' does not have upload rights on site %s."
% (self.targetSite.user(), self.targetSite))
return
if isinstance(self.url, basestring):
return self.upload_file(self.url)
for file_url in self.url:
self.upload_file(file_url)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
url = u''
description = []
keepFilename = False
useFilename = None
verifyDescription = True
aborts = set()
ignorewarn = set()
chunk_size = 0
chunk_size_regex = r'^-chunked(?::(\d+(?:\.\d+)?)[ \t]*(k|ki|m|mi)?b?)?$'
chunk_size_regex = re.compile(chunk_size_regex, re.I)
# process all global bot args
# returns a list of non-global args, i.e. args for upload.py
for arg in pywikibot.handle_args(args):
if arg:
if arg.startswith('-keep'):
keepFilename = True
elif arg.startswith('-filename:'):
useFilename = arg[10:]
elif arg.startswith('-noverify'):
verifyDescription = False
elif arg.startswith('-abortonwarn'):
if len(arg) > len('-abortonwarn:') and aborts is not True:
aborts.add(arg[len('-abortonwarn:'):])
else:
aborts = True
elif arg.startswith('-ignorewarn'):
if len(arg) > len('-ignorewarn:') and ignorewarn is not True:
ignorewarn.add(arg[len('-ignorewarn:'):])
else:
ignorewarn = True
elif arg.startswith('-chunked'):
match = chunk_size_regex.match(arg)
if match:
if match.group(1): # number was in there
base = float(match.group(1))
if match.group(2): # suffix too
suffix = match.group(2).lower()
if suffix == "k":
suffix = 1000
elif suffix == "m":
suffix = 1000000
elif suffix == "ki":
suffix = 1 << 10
elif suffix == "mi":
suffix = 1 << 20
else:
pass # huh?
else:
suffix = 1
chunk_size = math.trunc(base * suffix)
else:
chunk_size = 1 << 20 # default to 1 MiB
else:
pywikibot.error('Chunk size parameter is not valid.')
elif url == u'':
url = arg
else:
description.append(arg)
while not ("://" in url or os.path.exists(url)):
if not url:
pywikibot.output(u'No input filename given.')
else:
pywikibot.output(u'Invalid input filename given. Try again.')
url = pywikibot.input(u'URL, file or directory where files are now:')
if os.path.isdir(url):
file_list = []
for directory_info in os.walk(url):
for dir_file in directory_info[2]:
file_list.append(os.path.join(directory_info[0], dir_file))
url = file_list
else:
url = [url]
description = u' '.join(description)
bot = UploadRobot(url, description=description, useFilename=useFilename,
keepFilename=keepFilename,
verifyDescription=verifyDescription,
aborts=aborts, ignoreWarning=ignorewarn,
chunk_size=chunk_size)
bot.run()
if __name__ == "__main__":
main()
| 39.620623
| 87
| 0.537933
|
dcd4d58c6f52880f7969a8080d179af46cf9138b
| 678
|
py
|
Python
|
tests/test_runtime.py
|
spirali/orco
|
32c839b4d691a3eb83cfa379a1ec429adcf7f1b0
|
[
"MIT"
] | 3
|
2019-08-15T08:06:59.000Z
|
2020-06-14T13:13:09.000Z
|
tests/test_runtime.py
|
spirali/orco
|
32c839b4d691a3eb83cfa379a1ec429adcf7f1b0
|
[
"MIT"
] | 8
|
2019-08-06T11:38:08.000Z
|
2020-03-01T21:44:12.000Z
|
tests/test_runtime.py
|
spirali/xstore
|
32c839b4d691a3eb83cfa379a1ec429adcf7f1b0
|
[
"MIT"
] | 2
|
2019-07-29T18:33:13.000Z
|
2019-08-30T07:54:43.000Z
|
from test_database import announce
import time
import pytest
from orco import Builder
def test_wait_for_others(env):
r = env.test_runtime()
c = r.register_builder(Builder(lambda x: x, "col1"))
assert announce(r, [c(x="test1"), c(x="test2")])
start = time.time()
with pytest.raises(Exception, match="claimed by another"):
r.compute(c(x="test1"))
end = time.time()
assert end - start < 0.5
start = time.time()
with pytest.raises(Exception, match="claimed by another"):
r.compute(c(x="test1"), wait_for_others=4)
end = time.time()
assert 3.9 < end - start < 6
r.drop_unfinished_jobs()
r.compute(c(x="test1"))
| 26.076923
| 62
| 0.644543
|
7f84d2bdcdbfae742369cc947b0d7a45045ce182
| 1,860
|
py
|
Python
|
.history/my_classes/ScopesClosuresAndDecorators/Closures_20210711183348.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/ScopesClosuresAndDecorators/Closures_20210711183348.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
.history/my_classes/ScopesClosuresAndDecorators/Closures_20210711183348.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
""" Closuers
Free variables and closures
Remember: Functions defined inside another function can access the outer (nonLocal) variables
"""
def outer():
x = 'python'
/ this x refers to the one in outer's scope', this nonlocal variable x is called a free variable
def inner(): /
print("{0} rocks!".format(x)) when we consider inner, we are really looking at:
The function inner
the free variable x (with the current value python)
This is called a closure, # x thru the print statement
inner()
outer() # python rocks!
""" Returning the inner function
What happens if, instead of calling(running) inner from inside outer, we rune it?
def outer():
x = 'python' # x is a free variable in inner, it is bound to the variable x in outer, this happens when outer runs
def inner():
print("{0} rocks!".format(x))
return inner # when returning inner, we are actually 'returning' the closure
We can assign that return value to a variable name: fn = outer()
fn() # python rocks!
When we called fn
at that time Python determined the value of x in the extended scope
But notice that outer had finished running before we called fn - it's scope was gone
Python cells and Multi-Scopped Variables
def outer(): # Here the value of x is shared between two scoped
x = 'python' # outer
def inner(): # inner
print(x)
return inner # The label x is in two different scopes
Python does this by creating a cell as an intermediary object
outer,x ---- # cell 0xA500 / str 0xFF100
OxFF199 / python
"""
| 34.444444
| 132
| 0.58871
|
bfb36e7f1fab0fb40680437c6d9b58e63df9dddd
| 374
|
py
|
Python
|
myWeb/myWeb/model/User.py
|
aousum/pyMysqlLink
|
4bafe631e8a9286c8781d3cfd053d44e93084189
|
[
"Apache-2.0"
] | 3
|
2018-10-09T06:39:38.000Z
|
2018-10-09T06:39:45.000Z
|
myWeb/myWeb/model/User.py
|
aousum/zqyw
|
4bafe631e8a9286c8781d3cfd053d44e93084189
|
[
"Apache-2.0"
] | null | null | null |
myWeb/myWeb/model/User.py
|
aousum/zqyw
|
4bafe631e8a9286c8781d3cfd053d44e93084189
|
[
"Apache-2.0"
] | null | null | null |
from myWeb import db
class User(db.Model):
__tablename__='b_user'
id=db.Column(db.Integer,primary_key=True)
username=db.Column(db.String(10),unique=True)
password=db.Column(db.String(16))
def __init__(self,username,password):
self.username=username
self.password=password
def __repr__(self):
return '<user %r>' % self.username
| 34
| 49
| 0.687166
|
7eaf97f5055f2d2a403e699bc1b4aa49f5e8fe70
| 404
|
py
|
Python
|
users/migrations/0002_profile_video.py
|
iamshane10/django-fitness-centre
|
bfac2bd2a9919fabd832f6581223aa9a0c757b0c
|
[
"MIT"
] | null | null | null |
users/migrations/0002_profile_video.py
|
iamshane10/django-fitness-centre
|
bfac2bd2a9919fabd832f6581223aa9a0c757b0c
|
[
"MIT"
] | null | null | null |
users/migrations/0002_profile_video.py
|
iamshane10/django-fitness-centre
|
bfac2bd2a9919fabd832f6581223aa9a0c757b0c
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.3 on 2021-11-09 09:43
from django.db import migrations
import embed_video.fields
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='profile',
name='video',
field=embed_video.fields.EmbedVideoField(blank=True),
),
]
| 20.2
| 65
| 0.611386
|
0a696d03970af5acacb1ef24e2a173491d228acf
| 822
|
py
|
Python
|
tests/spot/wallet/test_transfer_dust.py
|
Banging12/binance-connector-python
|
dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b
|
[
"MIT"
] | 512
|
2021-06-15T08:52:44.000Z
|
2022-03-31T09:49:53.000Z
|
tests/spot/wallet/test_transfer_dust.py
|
Banging12/binance-connector-python
|
dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b
|
[
"MIT"
] | 75
|
2021-06-20T13:49:50.000Z
|
2022-03-30T02:45:31.000Z
|
tests/spot/wallet/test_transfer_dust.py
|
Banging12/binance-connector-python
|
dc6fbbd0bb64fb08d73ad8b31e0b81d776efa30b
|
[
"MIT"
] | 156
|
2021-06-18T11:56:36.000Z
|
2022-03-29T16:34:22.000Z
|
import responses
from binance.spot import Spot as Client
from tests.util import mock_http_response
from tests.util import random_str
from binance.error import ParameterRequiredError
mock_item = {"key_1": "value_1", "key_2": "value_2"}
key = random_str()
secret = random_str()
params = {"asset": ["LTC", "EOS"]}
def test_withdraw_without_coin():
"""Tests the API endpoint to transfer dust without coin"""
client = Client(key, secret)
client.transfer_dust.when.called_with("").should.throw(ParameterRequiredError)
@mock_http_response(
responses.POST, "/sapi/v1/asset/dust\\?asset=LTC&asset=EOS", mock_item, 200
)
def test_withdraw():
"""Tests the API endpoint to transfer dust"""
client = Client(key, secret)
response = client.transfer_dust(**params)
response.should.equal(mock_item)
| 27.4
| 82
| 0.733577
|
e1e2e96d2a5ccab3be8ea61aae0ff2c76934ea1f
| 2,542
|
py
|
Python
|
daily_coding_challenge/october_2020/Bag_of_Tokens_948.py
|
anjaligopi/leetcode
|
26fa64525c92e01dfbcdd7851f5b3a91f6ec203b
|
[
"Apache-2.0"
] | null | null | null |
daily_coding_challenge/october_2020/Bag_of_Tokens_948.py
|
anjaligopi/leetcode
|
26fa64525c92e01dfbcdd7851f5b3a91f6ec203b
|
[
"Apache-2.0"
] | null | null | null |
daily_coding_challenge/october_2020/Bag_of_Tokens_948.py
|
anjaligopi/leetcode
|
26fa64525c92e01dfbcdd7851f5b3a91f6ec203b
|
[
"Apache-2.0"
] | null | null | null |
"""
Question:
You have an initial power of P, an initial score of 0, and a
bag of tokens where tokens[i] is the value of the ith token (0-indexed).
Your goal is to maximize your total score by potentially playing each token in one of two ways:
If your current power is at least tokens[i], you may play the ith token
face up, losing tokens[i] power and gaining 1 score.
If your current score is at least 1, you may play the ith token
face down, gaining tokens[i] power and losing 1 score.
Each token may be played at most once and in any order.
You do not have to play all the tokens.
Return the largest possible score you can achieve
after playing any number of tokens.
Example 1:
Input: tokens = [100], P = 50
Output: 0
Explanation: Playing the only token in the bag is impossible
because you either have too little power or too little score.
Example 2:
Input: tokens = [100,200], P = 150
Output: 1
Explanation: Play the 0th token (100) face up,
your power becomes 50 and score becomes 1.
There is no need to play the 1st token since
you cannot play it face up to add to your score.
Example 3:
Input: tokens = [100,200,300,400], P = 200
Output: 2
Explanation: Play the tokens in this order to get a score of 2:
1. Play the 0th token (100) face up,
your power becomes 100 and score becomes 1.
2. Play the 3rd token (400) face down,
your power becomes 500 and score becomes 0.
3. Play the 1st token (200) face up,
your power becomes 300 and score becomes 1.
4. Play the 2nd token (300) face up,
your power becomes 0 and score becomes 2.
"""
import pytest
from typing import List
class Solution:
def bag_of_tokens_score(self, tokens: List[int], P: int) -> int:
tokens.sort()
l, r = 0, len(tokens)-1
point, maxpoints = 0, 0
while l<=r:
if P>= tokens[l]:
P -= tokens[l]
point += 1
maxpoints = max(maxpoints, point)
l += 1
elif point > 0:
P += tokens[r]
r -= 1
point -= 1
else:
break
return maxpoints
@pytest.mark.timeout(3)
@pytest.mark.parametrize("tokens, P, ans", [ ([100,200,300,400], 200, 2) , ([100], 50, 0) ])
def test_bag_of_tokens_score(tokens, P, ans):
sol1 = Solution()
assert sol1.bag_of_tokens_score(tokens, P) == ans
# pytest daily_coding_challenge/october_2020/Bag_of_Tokens_948.py --maxfail=4
| 30.261905
| 95
| 0.6369
|
ef6aff6f0b13e432a1590ef042a76d79a2142c93
| 744
|
py
|
Python
|
scripts/generate_dataset.py
|
ChaoYan/drl4rec
|
81aebc1598cc913a9512920422b3bb463762e003
|
[
"MIT"
] | 92
|
2020-10-26T11:19:32.000Z
|
2022-03-18T02:14:59.000Z
|
scripts/generate_dataset.py
|
ChaoYan/drl4rec
|
81aebc1598cc913a9512920422b3bb463762e003
|
[
"MIT"
] | 3
|
2021-01-29T13:42:26.000Z
|
2022-02-15T09:29:12.000Z
|
scripts/generate_dataset.py
|
ChaoYan/drl4rec
|
81aebc1598cc913a9512920422b3bb463762e003
|
[
"MIT"
] | 12
|
2020-10-26T19:06:10.000Z
|
2022-03-18T04:14:17.000Z
|
import argparse
import logging
import os
from ml_skeleton_py import etl
from ml_skeleton_py import settings as s
logger = logging.getLogger(__name__)
logging.getLogger().setLevel(logging.INFO)
def generate() -> None:
"""
Load the dataset, remove outliers and store in data directory.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset",
default="creditcard.csv",
help="raw dataset to generate train and test data",
)
args = parser.parse_args()
input_location = os.path.join(s.DATA_RAW, args.dataset)
output_location = os.path.join(s.DATA_TRANSFORMED, args.dataset)
etl.generate(input_location, output_location)
if __name__ == "__main__":
generate()
| 24
| 68
| 0.704301
|
7c2213488cba646ab166b565e0aebab5768fae01
| 1,441
|
py
|
Python
|
web3/_utils/module_testing/fallback_contract.py
|
bhardwajRahul/web3.py
|
efecadcdea64f9481fcace558a8ea103462e2923
|
[
"MIT"
] | 3,041
|
2017-11-22T16:23:46.000Z
|
2022-03-31T15:19:39.000Z
|
web3/_utils/module_testing/fallback_contract.py
|
bhardwajRahul/web3.py
|
efecadcdea64f9481fcace558a8ea103462e2923
|
[
"MIT"
] | 1,506
|
2017-11-22T15:44:34.000Z
|
2022-03-31T18:40:05.000Z
|
web3/_utils/module_testing/fallback_contract.py
|
bhardwajRahul/web3.py
|
efecadcdea64f9481fcace558a8ea103462e2923
|
[
"MIT"
] | 1,095
|
2017-11-22T18:20:22.000Z
|
2022-03-31T13:05:31.000Z
|
CONTRACT_FALLBACK_FUNCTION_SOURCE = """
contract A {
uint data;
function A() public payable { data = 0; }
function getData() returns (uint r) { return data; }
function() { data = 1; }
}
"""
CONTRACT_FALLBACK_FUNCTION_CODE = "60606040526000808190555060ae806100196000396000f300606060405260043610603f576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680633bc5de30146053575b3415604957600080fd5b6001600081905550005b3415605d57600080fd5b60636079565b6040518082815260200191505060405180910390f35b600080549050905600a165627a7a72305820045439389e4742569ec078687e6a0c81997709778a0097adbe07ccfd9f7b1a330029" # noqa: E501
CONTRACT_FALLBACK_FUNCTION_RUNTIME = "606060405260043610603f576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680633bc5de30146053575b3415604957600080fd5b6001600081905550005b3415605d57600080fd5b60636079565b6040518082815260200191505060405180910390f35b600080549050905600a165627a7a72305820045439389e4742569ec078687e6a0c81997709778a0097adbe07ccfd9f7b1a330029" # noqa: E501
CONTRACT_FALLBACK_FUNCTION_ABI = '[{"constant": false, "inputs": [], "name": "getData", "outputs": [{"name": "r", "type": "uint256"}], "payable": false, "stateMutability": "nonpayable", "type": "function"}, {"inputs": [], "payable": true, "stateMutability": "payable", "type": "constructor"}, {"payable": false, "stateMutability": "nonpayable", "type": "fallback"}]' # noqa: E501
| 96.066667
| 448
| 0.830673
|
d806493c9fbcb002e9a88ccc09ee3071ff10bab0
| 38
|
py
|
Python
|
course1/hello.py
|
plbenpeterson/dash-and-jupyter-notebook-with-gitpod
|
79d8f3319ec4f10565efe6343ce85e17e47788e5
|
[
"MIT"
] | null | null | null |
course1/hello.py
|
plbenpeterson/dash-and-jupyter-notebook-with-gitpod
|
79d8f3319ec4f10565efe6343ce85e17e47788e5
|
[
"MIT"
] | null | null | null |
course1/hello.py
|
plbenpeterson/dash-and-jupyter-notebook-with-gitpod
|
79d8f3319ec4f10565efe6343ce85e17e47788e5
|
[
"MIT"
] | null | null | null |
message = 'Hello Jane and Jim and Kim'
| 38
| 38
| 0.736842
|
f5f0c56c762336ac2e41bb30adf78a87de25dc26
| 5,057
|
py
|
Python
|
Sea/actions/factory.py
|
FRidh/Sea
|
b474e93a449570a9ba3b915c4d80f814feee2545
|
[
"BSD-3-Clause"
] | 2
|
2015-07-02T13:34:09.000Z
|
2015-09-28T09:07:52.000Z
|
Sea/actions/factory.py
|
FRidh/Sea
|
b474e93a449570a9ba3b915c4d80f814feee2545
|
[
"BSD-3-Clause"
] | null | null | null |
Sea/actions/factory.py
|
FRidh/Sea
|
b474e93a449570a9ba3b915c4d80f814feee2545
|
[
"BSD-3-Clause"
] | 1
|
2022-01-22T03:01:54.000Z
|
2022-01-22T03:01:54.000Z
|
"""
The factory contains functions for creating SEA objects in FreeCAD. These functions should not be called directly.
"""
from Sea.adapter.object_maps import *
import Sea
import FreeCAD as App
import logging
#def makeComponent(system, sort, material, part):
#"""
#Add a component from :mod:`Sea.adapter.components` to an SEA model.
#:param system: a instance of :class:`Sea.adapter.system.System` to which the component will be added.
#:param sort: type of component as specified in :class:`Sea.adapter.components.components_map`
#:param material: an instance of a child of :class:`Sea.adapter.baseclasses.Material` that the component is made of.
#:param part: an instance of :class:`Freecad.Part` that the component is based on
#"""
#obj = system.ComponentsGroup.newObject("App::DocumentObjectGroupPython", 'Component')
#components_map[sort](obj, system, material, part)
#logging.info("Sea: Created %s.", obj.Name)
#obj.Document.recompute()
#return obj
#def makeComponentCavity(system, sort, material, position):
#"""
#Add a component from :mod:`Sea.adapter.components` to an SEA model.
#:param system: :class:`Sea.adapter.system.System` to which the component will be added
#:param position: a :class:`FreeCAD.Vector` describing the position in the cavity.
#:param sort: Type of component specified in :class:`Sea.adapter.components.components_map`
#"""
#obj = system.ComponentsGroup.newObject("App::DocumentObjectGroupPython", 'Component')
#components_map[sort](obj, system, material, position)
#logging.info("Sea: Created %s.", obj.Name)
#obj.Document.recompute()
#return obj
#def makeSubsystem(component, sort, model):
#"""
#Add a subsystem to a component.
#:param component: an instance of a child of :class:`Sea.adapter.baseclasses.Component`.
#:param sort: type of subsystem.
#:param model: model of the subsysten belonging to :attr:`component` and specified in :mod:`Sea.model.components`
#"""
#obj = component.newObject("App::FeaturePython", "Subsystem")
#subsystems_map[sort](obj, component, model)
#logging.info("Sea: Created %s.", obj.Name)
#obj.Document.recompute()
#return obj
#def makeConnection(system, sort, components):
#"""
#Add a connection to system.
#:param system: :class:`Sea.adapter.system.System` to which the connection will be added
#:param sort: sort
#:param components: list of components
#"""
#obj = system.ConnectionsGroup.newObject("App::DocumentObjectGroupPython", "Connection")
#connections_map[sort](obj, system, components)
#logging.info("Sea: Created %s.", obj.Name)
#obj.Document.recompute()
#return obj
#def makeCoupling(connection, component_from, subsystem_from, component_to, subsystem_to, sort):
#"""
#Add a coupling to system.
#:param connection: an instance of :class:`Sea.adapter.baseclasses.Connection`
#:param component_from: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
#:param subsystem_from: string representing the type of subsystem
#:param component_to: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
#:param subsystem_to: string representing the type of subsystem
#:param sort: sort of coupling as specified in :class:`Sea.adapter.couplings.couplings_map`
#"""
##if connection.System == component_from.System == component_to.System:
#obj = connection.newObject("App::FeaturePython", 'Coupling')
#couplings_map[sort](obj, connection, component_from, subsystem_from, component_to, subsystem_to)
#logging.info("Sea: Created %s.", obj.Name)
#obj.Document.recompute()
#return obj
#def makeExcitation(system, component, subsystem, sort):
#"""
#Add an excitation from :mod:`Sea.adapter.excitations` to the subsystem of component.
#:param component: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
#:param subsystem: Subsystem that is excited
#:param sort: Type of excitation specified in :class:`Sea.adapter.excitations.excitations_map`
#"""
#obj = system.ExcitationsGroup.newObject("App::FeaturePython", 'Excitation')
##obj.Label = sort.capitalize()
#excitations_map[sort](obj, component, subsystem)
#logging.info("Sea: Created %s.", obj.Name)
#obj.Document.recompute()
#return obj
#def makeMaterial(system, sort):
#"""
#Add a material from :mod:`Sea.adapter.materials` to SEA system.
#:param system: :class:`Sea.adapter.system.System` to which the component will be added
#:param sort: Type of material specified in :class:`Sea.adapter.materials.materials_map`
#"""
#obj = system.MaterialsGroup.newObject("App::FeaturePython", 'Material')
##obj.Label = sort
#materials_map[sort](obj, system)
#logging.info("Sea: Created %s.", obj.Name)
#obj.Document.recompute()
#return obj
| 42.495798
| 120
| 0.692308
|
af2f7af9a084d657f0708c33446396f7cd5e4687
| 21,937
|
py
|
Python
|
pyvaspflow/utils.py
|
ChangChunHe/pyflow
|
c991c4bf04d33d1350dd0e75576a0aec3ffd2df5
|
[
"MIT"
] | 2
|
2019-04-21T12:40:08.000Z
|
2019-04-25T00:13:24.000Z
|
pyvaspflow/utils.py
|
ChangChunHe/VASP-calculation
|
c991c4bf04d33d1350dd0e75576a0aec3ffd2df5
|
[
"MIT"
] | 2
|
2019-03-12T10:51:15.000Z
|
2019-03-14T02:18:18.000Z
|
pyvaspflow/utils.py
|
ChangChunHe/VASP-calculation
|
c991c4bf04d33d1350dd0e75576a0aec3ffd2df5
|
[
"MIT"
] | 7
|
2019-03-12T10:23:12.000Z
|
2019-04-29T06:39:43.000Z
|
import numpy as np
from sagar.toolkit.mathtool import refine_positions
from sagar.molecule.structure import Molecule
from sagar.crystal.utils import non_dup_hnfs, snf
from sagar.io.vasp import write_vasp, read_vasp
from sagar.toolkit.derivetool import remove_redundant
from sagar.crystal.structure import Cell
from sagar.element.base import periodic_table_dict as ptd
from sagar.toolkit.mathtool import is_int_np_array
from sagar.crystal.derive import PermutationGroup as PG
from pyvaspflow.io.vasp_out import ExtractValue
from sagar.element.base import periodic_table_dict as ptd
from itertools import combinations
from os import path
import configparser
def refine_points(tetra,extend_S,C,min_d=1):
n = np.shape(tetra)[0]
tetra_cen = np.zeros((n,3))
for ii in range(n):
tetra_cen[ii] = np.mean(extend_S[tetra[ii]],axis=0)
tetra_cen = [cen for cen in tetra_cen if min(np.linalg.norm(cen-extend_S,axis=1))>1.5]
final_res = []
for cen in tetra_cen:
d = np.linalg.norm(cen-tetra_cen,axis=1)
d = d[d>0]
if min(d) > min_d:
final_res.append(cen)
if len(final_res) == 0:
return np.array([])
final_res = np.dot(final_res,np.linalg.inv(C))
final_res = np.unique(np.round(final_res,decimals=3),axis=0)
final_res[final_res>0.99] = 0
final_res[final_res<0.01] = 0
return np.unique(final_res,axis=0)
def write_poscar(cell,folder='.',idx=0,comment=""):
filename = '{:s}{:s}'.format('POSCAR'+comment, str(idx))
file = path.join(folder, filename)
write_vasp(cell,file,suffix='')
def get_delete_atom_num(no_defect_poscar,one_defect_poscar):
no_defect = read_vasp(no_defect_poscar)
one_defect = read_vasp(one_defect_poscar)
no_def_pos = no_defect.positions
one_def_pos = one_defect.positions
no_def_pos[abs(no_def_pos-1) < 0.01] = 0
one_def_pos[abs(one_def_pos-1) < 0.01] = 0
if len(no_defect.atoms)-1 == len(one_defect.atoms):
num = no_def_pos.shape[0]
for ii in range(num):
d = np.linalg.norm(no_def_pos[ii] - one_def_pos,axis=1)
if min(d) > 0.1:
break
_no_def_pos = np.unique(np.delete(no_def_pos,ii,0),axis=0)
_one_def_pos = np.unique(one_def_pos,axis=0)
d = 0
for i in _no_def_pos:
d = d + min(np.linalg.norm(i - _one_def_pos,axis=1))
for key,val in ptd.items():
if val == no_defect.atoms[ii]:
rm_atom = key
break
print('This is a vacancy defect','atom: \n',
rm_atom,ii+1,'in the defect-free POSCAR has benn removed')
with open('element-in-out','w') as f:
f.writelines(str(rm_atom)+'='+str(1)+'\n')
f.writelines('Vacc=-1 \n')
return ii,d
elif len(no_defect.atoms) == len(one_defect.atoms):
no_def_atoms,def_atoms = np.unique(no_defect.atoms),np.unique(one_defect.atoms)
purity_atom = np.setdiff1d(def_atoms,no_def_atoms)
if len(purity_atom) != 0: # introduce a new atom purity
idx = np.where(one_defect.atoms==purity_atom)[0]
d = np.linalg.norm(one_def_pos[idx]-no_def_pos,axis=1)
ii,d = np.argmin(d), np.min(d)
for key,val in ptd.items():
if val == no_defect.atoms[ii]:
rm_atom = key
if val == purity_atom:
in_atom = key
print('This is a purity defect','atom: \n',
rm_atom, ii+1,'in the defect-free POSCAR has benn dopped by', in_atom)
with open('element-in-out','w') as f:
f.writelines(str(rm_atom)+'='+str(1)+'\n')
f.writelines(str(in_atom)+'='+str(-1)+'\n')
return ii,d
else:
purity_atom = []
for _atom in no_def_atoms:
idx_num_1,idx_num_2 = len(np.where(_atom==no_defect.atoms)[0]),len(np.where(_atom==one_defect.atoms)[0])
if abs(idx_num_2-idx_num_1) == 1:
purity_atom.append(_atom)
elif abs(idx_num_1-idx_num_2) > 1:
raise ValueError("The POSCAR has two or more defect atoms")
if len(purity_atom) > 2:
raise ValueError("The POSCAR has two or more defect atoms")
no_def_pos_0 = no_def_pos[no_defect.atoms==purity_atom[0]]
no_def_pos_1 = no_def_pos[no_defect.atoms==purity_atom[1]]
one_def_pos_0 = one_def_pos[one_defect.atoms==purity_atom[0]]
one_def_pos_1 = one_def_pos[one_defect.atoms==purity_atom[1]]
if no_def_pos_0.shape[0]- one_def_pos_0.shape[0] == 1:
purity_in = purity_atom[1]
purity_out = purity_atom[0]
d = [min(np.linalg.norm(pos-one_def_pos_0,axis=1)) for pos in no_def_pos_0]
ahead_num = np.where(no_defect.atoms==purity_out)[0][0]
idx = np.argmax(d)
for key,val in ptd.items():
if val == purity_out:
rm_atom = key
if val == purity_in:
in_atom = key
print('This is a purity defect','atom: \n',
rm_atom, ahead_num+idx+1,'in the defect-free POSCAR has benn dopped by', in_atom)
with open('element-in-out','w') as f:
f.writelines(str(rm_atom)+'='+str(1)+'\n')
f.writelines(str(in_atom)+'='+str(-1)+'\n')
return ahead_num+idx,d[idx]
else:
purity_in = purity_atom[0]
purity_out = purity_atom[1]
d = [min(np.linalg.norm(pos-one_def_pos_1,axis=1)) for pos in no_def_pos_1]
idx = np.argmax(d)
ahead_num = np.where(no_defect.atoms==purity_out)[0][0]
# import pdb; pdb.set_trace()
for key,val in ptd.items():
if val == purity_out:
rm_atom = key
if val == purity_in:
in_atom = key
print('This is a purity defect','atom: \n',
rm_atom, ahead_num+idx+1,'in the defect-free POSCAR has benn dopped by', in_atom)
with open('element-in-out','w') as f:
f.writelines(str(rm_atom)+'='+str(1)+'\n')
f.writelines(str(in_atom)+'='+str(-1)+'\n')
return ahead_num+idx,d[idx]
else:
print('This kind of defect is not supported here right now')
def generate_all_basis(N1,N2,N3):
n1,n2,n3 = 2*N1+1, 2*N2+1, 2*N3+1
x = np.tile(np.arange(-N3,N3+1),n1*n2)
y = np.tile(np.repeat(np.arange(-N2,N2+1),n3),n1)
z = np.repeat(np.arange(-N1,N1+1),n2*n3)
x,y,z = np.reshape(x,(-1,1)),np.reshape(y,(-1,1)),np.reshape(z,(-1,1))
tmp = np.hstack((z,y))
return np.hstack((tmp,x))
def get_farther_atom_num(no_defect_poscar, one_defect_poscar):
'''
Return:
1: atom number of the farther atom in defect system
2: atom number of the farther atom in defect-free system
'''
all_basis = generate_all_basis(1,1,1)
no_defect = read_vasp(no_defect_poscar)
one_defect = read_vasp(one_defect_poscar)
no_def_pos = no_defect.positions
one_def_pos = one_defect.positions
c = no_defect.lattice
no_def_pos = np.dot(no_def_pos,c)
one_def_pos = np.dot(one_def_pos,c)
ii,d = get_delete_atom_num(no_defect_poscar,one_defect_poscar)
defect_atom = no_def_pos[ii]
extend_S = []
d,idx = np.zeros((no_def_pos.shape[0],27)),0
for basis in all_basis:
i,j,k = basis
d[:,idx] = np.linalg.norm(defect_atom-(no_def_pos+i*c[0]+j*c[1]+k*c[2]),axis=1)
idx += 1
max_idx_no_def = np.argmax(np.min(d,axis=1))#no-defect-farther-atom-number
# import pdb;pdb.set_trace()
d,idx = np.zeros((one_def_pos.shape[0],27)),0
for basis in all_basis:
i,j,k = basis
d[:,idx] = np.linalg.norm(defect_atom-(one_def_pos+i*c[0]+j*c[1]+k*c[2]),axis=1)
idx += 1
max_idx_one_def = np.argmax(np.min(d,axis=1))
return max_idx_one_def+1,max_idx_no_def+1
def str_delimited(results, header=None, delimiter="\t"):
"""
Given a tuple of tuples, generate a delimited string form.
>>> results = [["a","b","c"],["d","e","f"],[1,2,3]]
>>> print(str_delimited(results,delimiter=","))
a,b,c
d,e,f
1,2,3
Args:
result: 2d sequence of arbitrary types.
header: optional header
Returns:
Aligned string output in a table-like format.
"""
returnstr = ""
if header is not None:
returnstr += delimiter.join(header) + "\n"
return returnstr + "\n".join([delimiter.join([str(m) for m in result])
for result in results])
def clean_lines(string_list, remove_empty_lines=True):
for s in string_list:
clean_s = s
if '#' in s:
ind = s.index('#')
clean_s = s[:ind]
clean_s = clean_s.strip()
if (not remove_empty_lines) or clean_s != '':
yield clean_s
def zread(filename):
name, ext = path.splitext(filename)
ext = ext.upper()
if ext in (".GZ", ".Z"):
with open(filename,'rb') as f:
data = f.read()
return unlzw(data).decode('utf-8')
else:
with open(filename,'r') as f:
data = f.read()
return data
def unlzw(data):
"""
This function was adapted for Python from Mark Adler's C implementation
https://github.com/umeat/unlzw
Decompress compressed data generated by the Unix compress utility (LZW
compression, files with .Z suffix). Input can be given as any type which
can be 'converted' to a bytearray (e.g. string, or bytearray). Returns
decompressed data as string, or raises error.
Written by Brandon Owen, May 2016, brandon.owen@hotmail.com
Adapted from original work by Mark Adler - orginal copyright notice below
Copyright (C) 2014, 2015 Mark Adler
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Mark Adler
madler@alumni.caltech.edu
"""
# Convert input data stream to byte array, and get length of that array
try:
ba_in = bytearray(data)
except ValueError:
raise TypeError("Unable to convert inputted data to bytearray")
inlen = len(ba_in)
prefix = [None] * 65536 # index to LZW prefix string
suffix = [None] * 65536 # one-character LZW suffix
# Process header
if inlen < 3:
raise ValueError(
"Invalid Input: Length of input too short for processing")
if (ba_in[0] != 0x1f) or (ba_in[1] != 0x9d):
raise ValueError(
"Invalid Header Flags Byte: Incorrect magic bytes")
flags = ba_in[2]
if flags & 0x60:
raise ValueError(
"Invalid Header Flags Byte: Flag byte contains invalid data")
max_ = flags & 0x1f
if (max_ < 9) or (max_ > 16):
raise ValueError(
"Invalid Header Flags Byte: Max code size bits out of range")
if (max_ == 9):
max_ = 10 # 9 doesn't really mean 9
flags &= 0x80 # true if block compressed
# Clear table, start at nine bits per symbol
bits = 9
mask = 0x1ff
end = 256 if flags else 255
# Ensure stream is initially valid
if inlen == 3:
return 0 # zero-length input is permitted
if inlen == 4: # a partial code is not okay
raise ValueError("Invalid Data: Stream ended in the middle of a code")
# Set up: get the first 9-bit code, which is the first decompressed byte,
# but don't create a table entry until the next code
buf = ba_in[3]
buf += ba_in[4] << 8
final = prev = buf & mask # code
buf >>= bits
left = 16 - bits
if prev > 255:
raise ValueError("Invalid Data: First code must be a literal")
# We have output - allocate and set up an output buffer with first byte
put = [final]
# Decode codes
mark = 3 # start of compressed data
nxt = 5 # consumed five bytes so far
while nxt < inlen:
# If the table will be full after this, increment the code size
if (end >= mask) and (bits < max_):
# Flush unused input bits and bytes to next 8*bits bit boundary
# (this is a vestigial aspect of the compressed data format
# derived from an implementation that made use of a special VAX
# machine instruction!)
rem = (nxt - mark) % bits
if (rem):
rem = bits - rem
if rem >= inlen - nxt:
break
nxt += rem
buf = 0
left = 0
# mark this new location for computing the next flush
mark = nxt
# increment the number of bits per symbol
bits += 1
mask <<= 1
mask += 1
# Get a code of bits bits
buf += ba_in[nxt] << left
nxt += 1
left += 8
if left < bits:
if nxt == inlen:
raise ValueError(
"Invalid Data: Stream ended in the middle of a code")
buf += ba_in[nxt] << left
nxt += 1
left += 8
code = buf & mask
buf >>= bits
left -= bits
# process clear code (256)
if (code == 256) and flags:
# Flush unused input bits and bytes to next 8*bits bit boundary
rem = (nxt - mark) % bits
if rem:
rem = bits - rem
if rem > inlen - nxt:
break
nxt += rem
buf = 0
left = 0
# Mark this location for computing the next flush
mark = nxt
# Go back to nine bits per symbol
bits = 9 # initialize bits and mask
mask = 0x1ff
end = 255 # empty table
continue # get next code
# Process LZW code
temp = code # save the current code
stack = [] # buffer for reversed match - empty stack
# Special code to reuse last match
if code > end:
# Be picky on the allowed code here, and make sure that the
# code we drop through (prev) will be a valid index so that
# random input does not cause an exception
if (code != end + 1) or (prev > end):
raise ValueError("Invalid Data: Invalid code detected")
stack.append(final)
code = prev
# Walk through linked list to generate output in reverse order
while code >= 256:
stack.append(suffix[code])
code = prefix[code]
stack.append(code)
final = code
# Link new table entry
if end < mask:
end += 1
prefix[end] = prev
suffix[end] = final
# Set previous code for next iteration
prev = temp
# Write stack to output in forward order
put += stack[::-1]
# Return the decompressed data as string
return bytes(bytearray(put))
def get_kw(attribute):
kw = {}
if attribute:
if attribute[-1] == ",":
attribute = attribute[:-1]
attribute = attribute.split('=')
n = len(attribute)
if len(attribute[1].split(',')) == 2 :
kw[attribute[0]] = attribute[1].split(',')[0]
else:
kw[attribute[0]] = attribute[1].split(',')[:-1]
for ii in range(1,n-1):
if len(attribute[ii+1].split(',')) == 2:
kw[attribute[ii].split(',')[-1]] = attribute[ii+1].split(',')[0]
else:
kw[attribute[ii].split(',')[-1]] = attribute[ii+1].split(',')[:-1]
if len(attribute[-1].split(',')) > 1:
kw[attribute[-2].split(',')[-1]] = attribute[-1].split(',')
else:
kw[attribute[-2].split(',')[-1]] = attribute[-1]
if 'kpts' in kw:
kw['kpts'] = tuple(int(i) for i in kw['kpts'])
if 'shift' in kw:
kw['shift'] = tuple(float(i) for i in kw['shift'])
return kw
def get_idx_in_pri_pos(pri_pos,pos):
return [np.argmin(np.linalg.norm(p-pri_pos,axis=1)) for p in pos]
def _get_min_serial(perms,serial):
return np.unique(np.sort(perms[:,serial],axis=1),axis=0)[0]
def is_2d_structure(cell):
pos = cell.positions
pos_std = np.std(pos,axis=0)
if min(pos_std) < 0.1*max(pos_std):
idx = np.argmin(pos_std)
return True,idx
return False
def get_grd_state(job_name,start_job_num,end_job_num):
energy = []
for ii in range(start_job_num,end_job_num+1):
EV = ExtractValue(data_folder=job_name+str(ii))
energy.append(EV.get_energy())
return np.argmin(energy)
def get_perms(cell,str_type='crystal',symprec=1e-3):
latt = cell.lattice
pos = cell.positions
pos = np.dot(pos,latt)
if str_type == "crystal":
symm = cell.get_symmetry()
trans,rots = symm['translations'],symm['rotations']
perms = np.zeros((np.shape(trans)[0],len(cell.atoms)))
origin_positions = refine_positions(cell.positions)
for ix, rot in enumerate(rots):
for iy,o_pos in enumerate(origin_positions):
new_pos = np.dot(rot,o_pos.T) + trans[ix]
new_pos = np.mod(new_pos,1)
new_pos = refine_positions(new_pos)
idx = np.argmin(np.linalg.norm(new_pos-origin_positions,axis=1))
perms[ix,iy] = idx
perms_table = np.unique(perms,axis=0)
else:
mol = Molecule(pos,cell.atoms)
perms_table = mol.get_symmetry_permutation(symprec)
return perms_table
def add_log_shell_file(shell_file,log_dir,main_pid):
with open(shell_file,"r") as f:
lines = f.readlines()
new_lines = []
for line in lines:
if 'pyvasp run_' in line:
line = line.rstrip()
line += " -d "+log_dir + " -m " + str(main_pid)+"\n"
new_lines.append(line)
else:
new_lines.append(line)
return new_lines
def get_max_volume(pcell, sites, max_volume, min_volume=1, dimension=3, symprec=1e-5):
for volume in range(min_volume, max_volume + 1):
hnfs = non_dup_hnfs(pcell, volume, dimension, symprec)
dict_trans = {} # 记录已经产生过的snf,相同snf的平移操作相同。
for h in hnfs:
hfpg = PG(pcell, h)
perms = hfpg.get_symmetry_perms(symprec)
if dimension == 2:
supercell = pcell.extend(h)._get_niggli_2D()
else:
supercell = pcell.extend(h)._get_niggli_3D()
_sites = np.repeat(sites, volume, axis=0)
for mol, _ in remove_redundant(supercell.positions, _sites, perms):
c = Cell(supercell.lattice, mol[0], mol[1])
if c.is_primitive(symprec):
yield c
def get_identity_atoms(cell,symprec,style="crystal"):
atom_number = cell.atoms
if style == "crystal":
equ_atom = cell.get_symmetry(symprec)['equivalent_atoms']
atom_uniq_type = np.unique(equ_atom)
atom_type = np.zeros(np.shape(equ_atom))
for idx,ea in enumerate(equ_atom):
atom_type[idx] = np.where(atom_uniq_type==ea)[0]
return atom_type
def read_config():
from os import pardir,getcwd
home = path.expanduser("~")
wd = getcwd()
if path.isfile(path.join(wd,'config.ini')):
conf_file_path = path.join(wd,'config.ini')
elif path.isfile(path.join(home,'.config','pyvaspflow','config.ini')):
conf_file_path = path.join(home,'.config','pyvaspflow','config.ini')
else:
config_ini = '''
[RUN_VASP]
prepend = module load vasp/5.4.4-impi-mkl
exec = mpirun -n ${SLURM_NPROCS} vasp_std
append = exit
[POTCAR_PATH]
paw_pbe = /opt/ohpc/pub/apps/vasp/pps/paw_PBE
paw_lda = /opt/ohpc/pub/apps/vasp/pps/paw_LDA
paw_pw91 = /opt/ohpc/pub/apps/vasp/pps/paw_PW91
uspp_lda = /opt/ohpc/pub/apps/vasp/pps/USPP_LDA
uspp_pw91 = /opt/ohpc/pub/apps/vasp/pps/USPP_PW91
default_type = paw_pbe
[Task_Schedule]
default_node_name = short_q
default_cpu_num = 24
default_schedule = SLURM
[SLURM]
submission = sbatch ./job.sh
job_queue = squeue
node_state = sinfo
[LSF]
submission = bsub < ./job.lsf
job_queue = bjobs
node_state = bhost\n
'''
home = path.expanduser("~")
if not path.isdir(path.join(home,'.config')):
mkdir(path.join(home,'.config'))
if not path.isdir(path.join(home,'.config','pyvaspflow')):
mkdir(path.join(home,'.config','pyvaspflow'))
if not path.isfile(path.join(home,'.config','pyvaspflow','config.ini')):
with open(path.join(home,'.config','pyvaspflow','config.ini'),'w') as outfile:
outfile.write(config_ini)
conf_file_path = path.join(home,'.config','pyvaspflow','config.ini')
config = configparser.ConfigParser()
config.read(conf_file_path)
return config
def clean_parse(kw,key,def_val):
val = kw.get(key,def_val)
kw.pop(key,None)
return val,kw
| 36.622705
| 120
| 0.590783
|
b79c88fea6f5abb47389cd1c66743caadbe5ee7b
| 4,116
|
py
|
Python
|
main_tau.py
|
pqrs6/clee-fast
|
e08a2e1c88024f640f14def2618dcac18b2dfc99
|
[
"MIT"
] | null | null | null |
main_tau.py
|
pqrs6/clee-fast
|
e08a2e1c88024f640f14def2618dcac18b2dfc99
|
[
"MIT"
] | 1
|
2016-10-26T12:49:09.000Z
|
2016-10-26T12:49:09.000Z
|
main_tau.py
|
pqrs6/clee_fast
|
e08a2e1c88024f640f14def2618dcac18b2dfc99
|
[
"MIT"
] | null | null | null |
'''
This assumes that we have already gotten the E and B spectra computed from CAMB.
This should be very fast.
'''
# degree of the multivariate polynomial to be fit
degree = 9
# determines whether or not to use E- or B-mode power spectrum.
consider = 'EE' #'BB'
import numpy as np
import matplotlib.pyplot as plt
import os
DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data/'))
# PolynomialFeatures essentially creates a Vandermonde matrix in the parameters
# you're interested in, making all possible variable combinations up to order
# degree, i.e. x^5, x^4..., x^4y, x^3y,... 1.
from sklearn.preprocessing import PolynomialFeatures
# LinearRegression creates the pipeline that fits the model that we're
# interested in. Here, the model is a linear one, where the coefficients of the
# polynomial are the parameters that are fit.
from sklearn.linear_model import LinearRegression
# Training data.
global values_EE, values_BB, points
# These are pre-computed by the trs_regression.py file. The more points
# computed, the more precisely the regression fits the true solution, but the
# time goes up as O(C^2*N), where N is the number of training samples and C is
# the degree of the polynomial.
values_EE = np.loadtxt(DATA_DIR+'/training_data_tau_EE.txt')
values_BB = np.loadtxt(DATA_DIR+'/training_data_tau_BB.txt')
points = np.loadtxt(DATA_DIR+'/training_params_tau.txt')
def get_cl(tau, consider='EE', degree=5):
if consider == 'EE':
values = values_EE
else:
values = values_BB
v = values#[:100]
p = points#[:100]
poly = PolynomialFeatures(degree=degree)
# Vandermonde matrix of pre-computed paramter values.
X_ = poly.fit_transform(p.reshape(-1,1))
predict = np.array([tau]).reshape(1,-1)
# Creates matrix of values you want to estimate from the existing
# measurements. Computation speed scales very slowly when you ask for
# estimate of many sets of parameters.
predict_ = poly.fit_transform(predict)
clf = LinearRegression()
estimate = []
for l in range(2, v.shape[1]):
values_l = v[:,l]
clf.fit(X_, values_l)
estimate_l = clf.predict(predict_)
estimate.append(estimate_l)
estimate = np.array(estimate)
ell = np.arange(2, l+1)
Z = 2*np.pi/(ell*(ell+1))
return ell, Z*estimate[:,0]
from scipy.interpolate import griddata
def get_cl_grid(tau, consider='EE'):
data = values_EE
pars = points
out = griddata(pars, data[:,2:], (tau))
ell = np.arange(2, len(out)+2)
Z = 2*np.pi/(ell*(ell+1))
return ell, Z*out
if __name__ == '__main__':
# Sample computation.
color_idx = np.linspace(0,1, 10)
taus = np.linspace(0.05, 0.2, 10)
times = []
import time
for ind, tau in zip(color_idx, taus):
t0 = time.time()
ell, Cl = get_cl(tau, consider=consider)
times.append(time.time()-t0)
plt.loglog(ell, Cl, color=plt.cm.viridis(ind), alpha=0.8, lw=5)
plt.xlim([2, 200])
plt.xlabel(r'$\ell$', size=20)
plt.ylabel(r'$C_\ell^\mathrm{{ {0} }}\ (\mathrm{{\mu K_{{CMB}}^2}})$'.format(consider), size=20)
sm = plt.cm.ScalarMappable(cmap=plt.cm.viridis,
norm=plt.Normalize(vmin=taus.min(), vmax=taus.max()))
sm._A = []
plt.colorbar(sm, label=r'$\tau$')
plt.savefig('plots/tau_example.png')
print('Takes ~{0} seconds'.format(round(np.mean(times),2)))
plt.figure()
for ind, tau in zip(color_idx, taus):
t0 = time.time()
ell, Cl = get_cl_grid(tau, consider=consider)
times.append(time.time()-t0)
plt.loglog(ell, Cl, color=plt.cm.viridis(ind), alpha=0.8, lw=5)
plt.xlim([2, 200])
plt.xlabel(r'$\ell$', size=20)
plt.ylabel(r'$C_\ell^\mathrm{{ {0} }}\ (\mathrm{{\mu K_{{CMB}}^2}})$'.format(consider), size=20)
sm = plt.cm.ScalarMappable(cmap=plt.cm.viridis,
norm=plt.Normalize(vmin=taus.min(), vmax=taus.max()))
sm._A = []
plt.colorbar(sm, label=r'$\tau$')
#plt.savefig('plots/tau_example.png')
print('Takes ~{0} seconds'.format(round(np.mean(times),2)))
plt.show()
| 36.105263
| 100
| 0.660593
|
7ed241bdc8a02c16e6ea477e0032b989c2862ec7
| 14,833
|
py
|
Python
|
decofre/infer.py
|
LoicGrobol/decofre
|
68e12c8da4a6c032bb5ea3edff9e8484344e94e2
|
[
"MIT"
] | 9
|
2021-01-15T10:34:02.000Z
|
2021-12-24T13:58:36.000Z
|
decofre/infer.py
|
LoicGrobol/decofre
|
68e12c8da4a6c032bb5ea3edff9e8484344e94e2
|
[
"MIT"
] | 8
|
2020-03-13T10:52:48.000Z
|
2022-02-06T22:15:28.000Z
|
decofre/infer.py
|
LoicGrobol/decofre
|
68e12c8da4a6c032bb5ea3edff9e8484344e94e2
|
[
"MIT"
] | null | null | null |
import contextlib
import pathlib
import shutil
import sys
import tempfile
import typing as ty
import click
import click_pathlib
import jsonlines
import numpy as np
import spacy
import ujson as json
from typing import Any, Dict, List, Literal, Optional, TextIO
from typing_extensions import TypedDict
from decofre.formats import formats
from decofre import detmentions, score, clusterize
spacy.tokens.Doc.set_extension("clusters", default=None)
spacy.tokens.Span.set_extension("cluster", default=None)
spacy.tokens.Span.set_extension("singleton", default=True)
@contextlib.contextmanager
def smart_open(
filename: str, mode: str = "r", *args, **kwargs
) -> ty.Generator[ty.IO, None, None]:
"""Open files and i/o streams transparently."""
if filename == "-":
if "r" in mode:
stream = sys.stdin
else:
stream = sys.stdout
if "b" in mode:
fh = stream.buffer # type: ty.IO
else:
fh = stream
close = False
else:
fh = open(filename, mode, *args, **kwargs)
close = True
try:
yield fh
finally:
if close:
try:
fh.close()
except AttributeError:
pass
@contextlib.contextmanager
def dir_manager(
path: ty.Optional[ty.Union[pathlib.Path, str]] = None, cleanup=None
) -> ty.Generator[pathlib.Path, None, None]:
"""A context manager to deal with a directory, default to a self-destruct temp one."""
if path is None:
d_path = pathlib.Path(tempfile.mkdtemp())
if cleanup is None:
cleanup = True
else:
d_path = pathlib.Path(path).resolve()
d_path.mkdir(parents=True, exist_ok=True)
if cleanup is None:
cleanup = False
elif cleanup:
if d_path.glob("*"):
raise ValueError(f"{d_path} is not empty.")
try:
yield d_path
finally:
if cleanup:
shutil.rmtree(d_path)
class AntecedentFeaturesDict(TypedDict):
w_distance: int
u_distance: int
m_distance: int
spk_agreement: bool
overlap: bool
token_incl: int
token_com: int
def antecedents_from_mentions(
mentions: ty.Iterable[ty.Dict[str, ty.Any]],
max_candidates: int = 128,
distance_buckets: ty.Sequence[int] = (1, 2, 3, 4, 5, 7, 15, 32, 63),
) -> ty.Dict[str, ty.Dict[str, AntecedentFeaturesDict]]:
"""Extract an antecedent dataset from a list of detected mentions."""
sorted_mentions = sorted(mentions, key=lambda m: (m["start"], m["end"]))
if len(sorted_mentions) < 2:
return dict()
# The first mention in a document has no antecedent candidates
res = dict()
for i, mention in enumerate(sorted_mentions[1:], start=1):
mention_content_set = set(mention["content"])
antecedent_candidates = sorted_mentions[max(0, i - max_candidates) : i]
antecedents: ty.Dict[str, AntecedentFeaturesDict] = dict()
for j, candidate in enumerate(antecedent_candidates):
candidate_content_set = set(candidate["content"])
w_distance = int(
np.digitize(
mention["start"] - candidate["end"],
bins=distance_buckets,
right=True,
)
)
u_distance = int(
np.digitize(
mention["sentence"] - candidate["sentence"],
bins=distance_buckets,
)
)
m_distance: int = int(
np.digitize(
len(antecedent_candidates) - j,
bins=distance_buckets,
right=True,
)
)
spk_agreement = mention.get("speaker") == candidate.get("speaker")
intersect = len(mention_content_set.intersection(candidate_content_set))
token_incl_ratio = int(
10
* intersect
/ min(len(mention_content_set), len(candidate_content_set))
)
token_com_ratio = int(
10 * intersect / len(mention_content_set.union(candidate_content_set))
)
overlap = mention["start"] < candidate["end"]
antecedents[candidate["span_id"]] = {
"w_distance": w_distance,
"u_distance": u_distance,
"m_distance": m_distance,
"spk_agreement": spk_agreement,
"overlap": overlap,
"token_incl": token_incl_ratio,
"token_com": token_com_ratio,
}
res[mention["span_id"]] = antecedents
return res
def text_out(doc: spacy.tokens.Doc, latex: bool = False) -> str:
mentions_spans = sorted(
(m for i, c in doc._.clusters.items() for m in c),
key=lambda m: (m.start_char, -m.end_char),
)
text = doc.text
res = []
open_spans: ty.List[spacy.tokens.Span] = []
current_char = 0
for m in mentions_spans:
while open_spans and open_spans[-1].end_char <= m.start_char:
span_to_close = open_spans.pop()
res.append(text[current_char : span_to_close.end_char])
if span_to_close._.singleton:
if latex:
res.append("}")
else:
res.append("]")
else:
if latex:
res.append("}")
else:
res.append(f"][{span_to_close._.cluster}]")
current_char = span_to_close.end_char
if current_char < m.start_char:
res.append(text[current_char : m.start_char])
current_char = m.start_char
if latex:
if m._.singleton:
res.append(r"\mention{")
else:
res.append(f"\\mention[{m._.cluster}]{{")
else:
res.append("[")
open_spans.append(m)
while open_spans:
span_to_close = open_spans.pop()
res.append(text[current_char : span_to_close.end_char])
if span_to_close._.singleton:
if latex:
res.append("}")
else:
res.append("]")
else:
if latex:
res.append("}")
else:
res.append(f"][{span_to_close._.cluster}]")
current_char = span_to_close.end_char
res.append(text[current_char:])
return "".join(res)
def mention_to_json(mention: spacy.tokens.Span) -> Dict[str, Any]:
return {
"text": mention.text,
"start": mention.start_char,
"token_start": mention.start,
"token_end": mention.end,
"end": mention.end_char,
"type": "pattern",
"label": "mention",
}
def token_to_json(token: spacy.tokens.Token) -> Dict[str, Any]:
return {
"text": token.text,
"start": token.idx,
"end": token.idx + len(token),
"id": token.i,
"ws": bool(token.whitespace_),
"disabled": False,
}
def prodigy_out(doc: spacy.tokens.Doc) -> Dict[str, Any]:
res = {
"text": doc.text,
"tokens": [token_to_json(t) for t in doc],
"spans": [],
"relations": [],
}
processed: List[spacy.tokens.Span] = []
for c in doc._.clusters.values():
antecedent: Optional[spacy.tokens.Span] = None
for m in sorted(c, key=lambda m: (m.end, m.start)):
# This because prodigy doesn't allow nested spans
if any(
o.start <= m.start <= o.end or o.start <= m.end <= o.end
for o in processed
):
continue
res["spans"].append(mention_to_json(m))
if antecedent is not None:
res["relations"].append(
{
"head": m.start,
"child": antecedent.start,
"head_span": mention_to_json(m),
"child_span": mention_to_json(antecedent),
"label": "COREF",
}
)
antecedent = m
processed.append(m)
return res
def sacr_out(doc: spacy.tokens.Doc) -> str:
res = []
open_spans: ty.List[spacy.tokens.Span]
sents = doc.spans.get("utterances", doc.sents)
for sentence in sents:
sentence_res = []
# FIXME: this relies on having imported avp, which sets these extensions in the global space
# we need a better mechanism
if sentence._.speaker is not None:
sentence_res.append(f"#speaker: {sentence._.speaker}\n\n")
if sentence._.uid is not None:
sentence_res.append(f"#uid: {sentence._.uid}\n\n")
mentions_spans = sorted(
(
m
for i, c in doc._.clusters.items()
for m in c
if sentence.start_char <= m.start_char < m.end_char <= sentence.end_char
),
key=lambda m: (m.start_char, -m.end_char),
)
text = sentence.text
current_char = 0
open_spans: ty.List[spacy.tokens.Span] = []
for m in mentions_spans:
# TODO: stop fiddling with char indices ffs
while open_spans and open_spans[-1].end_char <= m.start_char:
span_to_close = open_spans.pop()
sentence_res.append(
text[current_char : span_to_close.end_char - sentence.start_char]
)
sentence_res.append("}")
current_char = span_to_close.end_char - sentence.start_char
if current_char < m.start_char:
sentence_res.append(
text[current_char : m.start_char - sentence.start_char]
)
current_char = m.start_char - sentence.start_char
sentence_res.append(f"{{{m._.cluster} ")
open_spans.append(m)
while open_spans:
span_to_close = open_spans.pop()
sentence_res.append(
text[current_char : span_to_close.end_char - sentence.start_char]
)
sentence_res.append("}")
current_char = span_to_close.end_char - sentence.start_char
sentence_res.append(text[current_char:])
res.append("".join(sentence_res).strip())
return "\n\n".join((s for s in res if s and not s.isspace()))
@click.command(help="End-to-end coreference resolution")
@click.argument(
"detect-model",
type=click_pathlib.Path(exists=True, dir_okay=False),
)
@click.argument(
"coref-model",
type=click_pathlib.Path(exists=True, dir_okay=False),
)
@click.argument(
"input_file",
type=click.File("r"),
)
@click.argument(
"output_file",
type=click.File("w", atomic=True),
default="-",
)
@click.option(
"--from",
"input_format",
type=click.Choice(formats.keys()),
default="raw_text",
help="The input format",
show_default=True,
)
@click.option(
"--intermediary-dir",
"intermediary_dir_path",
type=click_pathlib.Path(resolve_path=True, file_okay=False),
help="A path to a directory to use for intermediary files, defaults to a self-destructing temp dir",
)
@click.option(
"--lang",
default="fr_core_news_lg",
help="A spaCy model handle for the document.",
show_default=True,
)
@click.option(
"--to",
"output_format",
type=click.Choice(["latex", "prodigy", "sacr", "text"]),
default="text",
help="Output formats (experimental)",
)
def main_entry_point(
coref_model: pathlib.Path,
detect_model: pathlib.Path,
input_format: str,
input_file: TextIO,
intermediary_dir_path: Optional[pathlib.Path],
lang: str,
output_file: TextIO,
output_format: Literal["latex", "prodigy", "sacr", "text"],
):
with dir_manager(intermediary_dir_path) as intermediary_dir:
doc, spans = formats[input_format].get_doc_and_spans(input_file, lang)
initial_doc_path = intermediary_dir / "initial_doc.spacy.json"
with open(initial_doc_path, "w") as out_stream:
json.dump(doc.to_json(), out_stream, ensure_ascii=False)
spans_path = intermediary_dir / "spans.json"
with open(spans_path, "w") as out_stream:
json.dump(spans, out_stream, ensure_ascii=False)
mentions_path = intermediary_dir / "mentions.json"
detmentions.main_entry_point(
[
"--mentions",
"--no-overlap",
str(detect_model),
str(spans_path),
str(mentions_path),
]
)
with open(mentions_path, "r") as in_stream:
mentions_lst = json.load(in_stream)
antecedents = antecedents_from_mentions(mentions_lst)
mention_dict = {m["span_id"]: m for m in mentions_lst}
antecedents_path = intermediary_dir / "antecedents.json"
with open(antecedents_path, "w") as out_stream:
json.dump(
{"mentions": mention_dict, "antecedents": antecedents},
out_stream,
ensure_ascii=False,
)
coref_scores_path = intermediary_dir / "coref_scores.json"
score.main_entry_point(
[str(coref_model), str(antecedents_path), str(coref_scores_path)]
)
clusters_path = intermediary_dir / "clusters.json"
clusterize.main_entry_point([str(coref_scores_path), str(clusters_path)])
with open(clusters_path, "r") as in_stream:
clusters = json.load(in_stream)["clusters"]
doc._.clusters = dict()
for i, c in clusters.items():
doc._.clusters[i] = []
for m_id in c:
mention = mention_dict[m_id]
mention_span = doc[mention["start"] : mention["end"] + 1]
mention_span._.cluster = i
if len(c) > 1:
mention_span._.singleton = False
doc._.clusters[i].append(mention_span)
augmented_doc_path = intermediary_dir / "coref_doc.spacy.json"
with open(augmented_doc_path, "w") as out_stream:
json.dump(doc.to_json(), out_stream, ensure_ascii=False)
if output_format == "latex":
output_file.write(text_out(doc, latex=True))
output_file.write("\n")
elif output_format == "prodigy":
output_dict = prodigy_out(doc)
writer = jsonlines.Writer(output_file)
writer.write(output_dict)
writer.close()
elif output_format == "sacr":
output_file.write(sacr_out(doc))
else:
output_file.write(text_out(doc))
output_file.write("\n")
if __name__ == "__main__":
main_entry_point()
| 32.6
| 104
| 0.569945
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.