hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a26f99f8d854ce3c7fb2a966113557a4382f5ef8
| 4,109
|
py
|
Python
|
src/libnrl/classify.py
|
Tadelaide/OpenNE
|
53a558826e3ab38de96bd4be1f3f0f5f3cd0f377
|
[
"MIT"
] | 2
|
2018-09-01T17:35:11.000Z
|
2020-10-22T13:52:07.000Z
|
src/libnrl/classify.py
|
Tadelaide/OpenNE
|
53a558826e3ab38de96bd4be1f3f0f5f3cd0f377
|
[
"MIT"
] | null | null | null |
src/libnrl/classify.py
|
Tadelaide/OpenNE
|
53a558826e3ab38de96bd4be1f3f0f5f3cd0f377
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import numpy
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import f1_score
from sklearn.preprocessing import MultiLabelBinarizer
from time import time
from sklearn.preprocessing import LabelEncoder
class TopKRanker(OneVsRestClassifier):
def predict(self, X, top_k_list):
probs = numpy.asarray(super(TopKRanker, self).predict_proba(X))
all_labels = []
for i, k in enumerate(top_k_list):
probs_ = probs[i, :]
labels = self.classes_[probs_.argsort()[-k:]].tolist()
probs_[:] = 0
probs_[labels] = 1
all_labels.append(probs_)
return numpy.asarray(all_labels)
class Classifier(object):
def __init__(self, vectors, clf):
self.embeddings = vectors
self.clf = TopKRanker(clf)
self.binarizer = MultiLabelBinarizer(sparse_output=True)
# self.lebelEncoder = LabelEncoder()
def train(self, X, Y, Y_all):
self.binarizer.fit(Y_all)
X_train = [self.embeddings[x] for x in X]
Y = self.binarizer.transform(Y)
self.clf.fit(X_train, Y)
def evaluate(self, X, Y):
top_k_list = [len(l) for l in Y]
Y_ = self.predict(X, top_k_list)
Y = self.binarizer.transform(Y)
averages = ["micro", "macro", "samples", "weighted"]
results = {}
for average in averages:
results[average] = f1_score(Y, Y_, average=average)
# print('Results, using embeddings of dimensionality', len(self.embeddings[X[0]]))
# print('-------------------')
print(results)
return results
# print('-------------------')
def predict(self, X, top_k_list):
X_ = numpy.asarray([self.embeddings[x] for x in X])
Y = self.clf.predict(X_, top_k_list=top_k_list)
return Y
def split_train_evaluate(self, X, Y, train_precent, seed=0):
state = numpy.random.get_state()
# training_size = int(train_precent * len(X))
numpy.random.seed(seed)
# shuffle_indices = numpy.random.permutation(numpy.arange(len(X)))
# X_train = [X[shuffle_indices[i]] for i in range(training_size)]
# Y_train = [Y[shuffle_indices[i]] for i in range(training_size)]
# X_test = [X[shuffle_indices[i]] for i in range(training_size, len(X))]
# Y_test = [Y[shuffle_indices[i]] for i in range(training_size, len(X))]
# print(X)
K = {}
for i, item in enumerate(Y):
if item[0] not in list(K.keys()):
K[item[0]] = [X[i]]
else:
K[item[0]].append(X[i])
X_train = []
Y_train = []
X_test = []
Y_test = []
for label in K.keys():
train_size_label = int(train_precent * len(K[label]))
shuffle_indices = numpy.random.permutation(numpy.arange(len(K[label])))
X_test.extend([K[label][shuffle_indices[i]] for i in range(train_size_label)])
X_train.extend([K[label][shuffle_indices[i]] for i in range(train_size_label, len(K[label]))])
Y_test.extend([[label] for i in range(train_size_label)])
Y_train.extend([[label] for i in range(train_size_label, len(K[label]))])
self.train(X_train, Y_train, Y)
numpy.random.set_state(state)
return self.evaluate(X_test, Y_test)
def load_embeddings(filename):
fin = open(filename, 'r')
node_num, size = [int(x) for x in fin.readline().strip().split()]
vectors = {}
while 1:
l = fin.readline()
if l == '':
break
vec = l.strip().split(' ')
assert len(vec) == size+1
vectors[vec[0]] = [float(x) for x in vec[1:]]
fin.close()
assert len(vectors) == node_num
return vectors
def read_node_label(filename):
fin = open(filename, 'r')
X = []
Y = []
while 1:
l = fin.readline()
if l == '':
break
vec = l.strip().split(' ')
X.append(vec[0])
Y.append(vec[1:])
fin.close()
return X, Y
| 34.241667
| 106
| 0.583597
|
b071a015e2847597b09e83f26850ab5c50dc96bd
| 7,040
|
py
|
Python
|
lib/ziming_yang_1ConfigFilterApplicationDemo/ziming_yang_1ConfigFilterApplicationDemoImpl.py
|
Yzm1234/ConfigFilterDemoRepo
|
d40353a9c2d32d0b7471f80d717e3bd192cd3984
|
[
"MIT"
] | null | null | null |
lib/ziming_yang_1ConfigFilterApplicationDemo/ziming_yang_1ConfigFilterApplicationDemoImpl.py
|
Yzm1234/ConfigFilterDemoRepo
|
d40353a9c2d32d0b7471f80d717e3bd192cd3984
|
[
"MIT"
] | null | null | null |
lib/ziming_yang_1ConfigFilterApplicationDemo/ziming_yang_1ConfigFilterApplicationDemoImpl.py
|
Yzm1234/ConfigFilterDemoRepo
|
d40353a9c2d32d0b7471f80d717e3bd192cd3984
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#BEGIN_HEADER
# The header block is where all import statments should live
import logging
import os
from pprint import pformat
from Bio import SeqIO
from installed_clients.AssemblyUtilClient import AssemblyUtil
from installed_clients.KBaseReportClient import KBaseReport
#END_HEADER
class ziming_yang_1ConfigFilterApplicationDemo:
'''
Module Name:
ziming_yang_1ConfigFilterApplicationDemo
Module Description:
A KBase module: ziming_yang_1ConfigFilterApplicationDemo
This sample module contains one small method that filters contigs.
'''
######## WARNING FOR GEVENT USERS ####### noqa
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
######################################### noqa
VERSION = "0.0.1"
GIT_URL = ""
GIT_COMMIT_HASH = ""
#BEGIN_CLASS_HEADER
# Class variables and functions can be defined in this block
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
# Any configuration parameters that are important should be parsed and
# saved in the constructor.
self.callback_url = os.environ['SDK_CALLBACK_URL']
self.shared_folder = config['scratch']
logging.basicConfig(format='%(created)s %(levelname)s: %(message)s',
level=logging.INFO)
#END_CONSTRUCTOR
pass
def run_ziming_yang_1ConfigFilterApplicationDemo(self, ctx, params):
"""
This example function accepts any number of parameters and returns results in a KBaseReport
:param params: instance of mapping from String to unspecified object
:returns: instance of type "ReportResults" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN run_ziming_yang_1ConfigFilterApplicationDemo
# Print statements to stdout/stderr are captured and available as the App log
logging.info('Starting run_ziming_yang_1ConfigFilterApplicationDemo function. Params=' + pformat(params))
# Step 1 - Parse/examine the parameters and catch any errors
# It is important to check that parameters exist and are defined, and that nice error
# messages are returned to users. Parameter values go through basic validation when
# defined in a Narrative App, but advanced users or other SDK developers can call
# this function directly, so validation is still important.
logging.info('Validating parameters.')
if 'workspace_name' not in params:
raise ValueError('Parameter workspace_name is not set in input arguments')
workspace_name = params['workspace_name']
if 'assembly_input_ref' not in params:
raise ValueError('Parameter assembly_input_ref is not set in input arguments')
assembly_input_ref = params['assembly_input_ref']
if 'min_length' not in params:
raise ValueError('Parameter min_length is not set in input arguments')
min_length_orig = params['min_length']
min_length = None
try:
min_length = int(min_length_orig)
except ValueError:
raise ValueError('Cannot parse integer from min_length parameter (' + str(min_length_orig) + ')')
if min_length < 0:
raise ValueError('min_length parameter cannot be negative (' + str(min_length) + ')')
# Step 2 - Download the input data as a Fasta and
# We can use the AssemblyUtils module to download a FASTA file from our Assembly data object.
# The return object gives us the path to the file that was created.
logging.info('Downloading Assembly data as a Fasta file.')
assemblyUtil = AssemblyUtil(self.callback_url)
fasta_file = assemblyUtil.get_assembly_as_fasta({'ref': assembly_input_ref})
# Step 3 - Actually perform the filter operation, saving the good contigs to a new fasta file.
# We can use BioPython to parse the Fasta file and build and save the output to a file.
good_contigs = []
n_total = 0
n_remaining = 0
for record in SeqIO.parse(fasta_file['path'], 'fasta'):
n_total += 1
if len(record.seq) >= min_length:
good_contigs.append(record)
n_remaining += 1
logging.info('Filtered Assembly to ' + str(n_remaining) + ' contigs out of ' + str(n_total))
filtered_fasta_file = os.path.join(self.shared_folder, 'filtered.fasta')
SeqIO.write(good_contigs, filtered_fasta_file, 'fasta')
# Step 4 - Save the new Assembly back to the system
logging.info('Uploading filtered Assembly data.')
new_assembly = assemblyUtil.save_assembly_from_fasta({'file': {'path': filtered_fasta_file},
'workspace_name': workspace_name,
'assembly_name': fasta_file['assembly_name']
})
# Step 5 - Build a Report and return
reportObj = {
'objects_created': [{'ref': new_assembly, 'description': 'Filtered contigs'}],
'text_message': 'Filtered Assembly to ' + str(n_remaining) + ' contigs out of ' + str(n_total)
}
report = KBaseReport(self.callback_url)
report_info = report.create({'report': reportObj, 'workspace_name': params['workspace_name']})
# STEP 6: contruct the output to send back
output = {'report_name': report_info['name'],
'report_ref': report_info['ref'],
'assembly_output': new_assembly,
'n_initial_contigs': n_total,
'n_contigs_removed': n_total - n_remaining,
'n_contigs_remaining': n_remaining
}
logging.info('returning:' + pformat(output))
#END run_ziming_yang_1ConfigFilterApplicationDemo
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method run_ziming_yang_1ConfigFilterApplicationDemo return value ' +
'output is not type dict as required.')
# return the results
return [output]
def status(self, ctx):
#BEGIN_STATUS
returnVal = {'state': "OK",
'message': "",
'version': self.VERSION,
'git_url': self.GIT_URL,
'git_commit_hash': self.GIT_COMMIT_HASH}
#END_STATUS
return [returnVal]
| 44
| 113
| 0.634091
|
bbef751a1b46c9c1bbd9350a0de56593e6b73331
| 2,010
|
py
|
Python
|
NBApredict/br_web_scraper/http_client.py
|
anshu7919/NBApredict
|
59f4a128c1c964a5f78860227d3309edbc57766e
|
[
"MIT"
] | 10
|
2019-08-23T07:49:52.000Z
|
2022-02-22T14:46:01.000Z
|
NBApredict/br_web_scraper/http_client.py
|
anshu7919/NBApredict
|
59f4a128c1c964a5f78860227d3309edbc57766e
|
[
"MIT"
] | 9
|
2018-11-16T23:11:03.000Z
|
2019-08-07T21:17:22.000Z
|
NBApredict/br_web_scraper/http_client.py
|
Spencer-Weston/NBA_bet
|
e84867b18e2dbaf58340c30c061f172d62f52699
|
[
"MIT"
] | 8
|
2020-02-01T07:47:41.000Z
|
2021-06-14T15:09:55.000Z
|
import requests
from nbapredict.br_web_scraper.errors import InvalidDate
from nbapredict.br_web_scraper.parsers.box_scores import parse_player_box_scores
from nbapredict.br_web_scraper.parsers.schedule import parse_schedule, parse_schedule_for_month_url_paths
from nbapredict.br_web_scraper.parsers.players_season_totals import parse_players_season_totals
BASE_URL = 'https://www.basketball-reference.com'
def player_box_scores(day, month, year):
url = '{BASE_URL}/friv/dailyleaders.cgi?month={month}&day={day}&year={year}'.format(
BASE_URL=BASE_URL,
day=day,
month=month,
year=year
)
response = requests.get(url=url, allow_redirects=False)
if 200 <= response.status_code < 300:
return parse_player_box_scores(response.content)
raise InvalidDate(day=day, month=month, year=year)
def schedule_for_month(url):
response = requests.get(url=url)
response.raise_for_status()
return parse_schedule(response.content)
def season_schedule(season_end_year):
url = '{BASE_URL}/leagues/NBA_{season_end_year}_games.html'.format(
BASE_URL=BASE_URL,
season_end_year=season_end_year
)
response = requests.get(url=url)
response.raise_for_status()
season_schedule_values = parse_schedule(response.content)
other_month_url_paths = parse_schedule_for_month_url_paths(response.content)
for month_url_path in other_month_url_paths:
url = '{BASE_URL}{month_url_path}'.format(BASE_URL=BASE_URL, month_url_path=month_url_path)
monthly_schedule = schedule_for_month(url=url)
season_schedule_values.extend(monthly_schedule)
return season_schedule_values
def players_season_totals(season_end_year):
url = '{BASE_URL}/leagues/NBA_{season_end_year}_totals.html'.format(
BASE_URL=BASE_URL,
season_end_year=season_end_year,
)
response = requests.get(url=url)
response.raise_for_status()
return parse_players_season_totals(response.content)
| 30
| 105
| 0.758209
|
78a0517921044bf782731ec1d4d381b865e1821b
| 6,873
|
py
|
Python
|
homeassistant/components/mqtt/sensor.py
|
rr326/core
|
479ff92acbc9ec4db09b7a300d2beb67f00ab746
|
[
"Apache-2.0"
] | 1
|
2021-02-22T11:57:01.000Z
|
2021-02-22T11:57:01.000Z
|
homeassistant/components/mqtt/sensor.py
|
hartley94ad/core
|
7148071be89b0ecbb5cfb33140b3f4702db7035f
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/mqtt/sensor.py
|
hartley94ad/core
|
7148071be89b0ecbb5cfb33140b3f4702db7035f
|
[
"Apache-2.0"
] | null | null | null |
"""Support for MQTT sensors."""
from datetime import timedelta
import functools
import logging
from typing import Optional
import voluptuous as vol
from homeassistant.components import sensor
from homeassistant.components.sensor import DEVICE_CLASSES_SCHEMA
from homeassistant.const import (
CONF_DEVICE,
CONF_DEVICE_CLASS,
CONF_FORCE_UPDATE,
CONF_ICON,
CONF_NAME,
CONF_UNIQUE_ID,
CONF_UNIT_OF_MEASUREMENT,
CONF_VALUE_TEMPLATE,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.util import dt as dt_util
from . import CONF_QOS, CONF_STATE_TOPIC, DOMAIN, PLATFORMS, subscription
from .. import mqtt
from .debug_info import log_messages
from .mixins import (
MQTT_AVAILABILITY_SCHEMA,
MQTT_ENTITY_DEVICE_INFO_SCHEMA,
MQTT_JSON_ATTRS_SCHEMA,
MqttAvailability,
MqttEntity,
async_setup_entry_helper,
)
_LOGGER = logging.getLogger(__name__)
CONF_EXPIRE_AFTER = "expire_after"
DEFAULT_NAME = "MQTT Sensor"
DEFAULT_FORCE_UPDATE = False
PLATFORM_SCHEMA = (
mqtt.MQTT_RO_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_DEVICE): MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_EXPIRE_AFTER): cv.positive_int,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
}
)
.extend(MQTT_AVAILABILITY_SCHEMA.schema)
.extend(MQTT_JSON_ATTRS_SCHEMA.schema)
)
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up MQTT sensors through configuration.yaml."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
await _async_setup_entity(hass, async_add_entities, config)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT sensors dynamically through MQTT discovery."""
setup = functools.partial(
_async_setup_entity, hass, async_add_entities, config_entry=config_entry
)
await async_setup_entry_helper(hass, sensor.DOMAIN, setup, PLATFORM_SCHEMA)
async def _async_setup_entity(
hass, async_add_entities, config: ConfigType, config_entry=None, discovery_data=None
):
"""Set up MQTT sensor."""
async_add_entities([MqttSensor(hass, config, config_entry, discovery_data)])
class MqttSensor(MqttEntity, Entity):
"""Representation of a sensor that can be updated using MQTT."""
def __init__(self, hass, config, config_entry, discovery_data):
"""Initialize the sensor."""
self._state = None
self._expiration_trigger = None
expire_after = config.get(CONF_EXPIRE_AFTER)
if expire_after is not None and expire_after > 0:
self._expired = True
else:
self._expired = None
MqttEntity.__init__(self, hass, config, config_entry, discovery_data)
@staticmethod
def config_schema():
"""Return the config schema."""
return PLATFORM_SCHEMA
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._config = config
template = self._config.get(CONF_VALUE_TEMPLATE)
if template is not None:
template.hass = self.hass
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
@callback
@log_messages(self.hass, self.entity_id)
def message_received(msg):
"""Handle new MQTT messages."""
payload = msg.payload
# auto-expire enabled?
expire_after = self._config.get(CONF_EXPIRE_AFTER)
if expire_after is not None and expire_after > 0:
# When expire_after is set, and we receive a message, assume device is not expired since it has to be to receive the message
self._expired = False
# Reset old trigger
if self._expiration_trigger:
self._expiration_trigger()
self._expiration_trigger = None
# Set new trigger
expiration_at = dt_util.utcnow() + timedelta(seconds=expire_after)
self._expiration_trigger = async_track_point_in_utc_time(
self.hass, self._value_is_expired, expiration_at
)
template = self._config.get(CONF_VALUE_TEMPLATE)
if template is not None:
payload = template.async_render_with_possible_json_value(
payload, self._state
)
self._state = payload
self.async_write_ha_state()
self._sub_state = await subscription.async_subscribe_topics(
self.hass,
self._sub_state,
{
"state_topic": {
"topic": self._config[CONF_STATE_TOPIC],
"msg_callback": message_received,
"qos": self._config[CONF_QOS],
}
},
)
@callback
def _value_is_expired(self, *_):
"""Triggered when value is expired."""
self._expiration_trigger = None
self._expired = True
self.async_write_ha_state()
@property
def name(self):
"""Return the name of the sensor."""
return self._config[CONF_NAME]
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._config.get(CONF_UNIT_OF_MEASUREMENT)
@property
def force_update(self):
"""Force update."""
return self._config[CONF_FORCE_UPDATE]
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def icon(self):
"""Return the icon."""
return self._config.get(CONF_ICON)
@property
def device_class(self) -> Optional[str]:
"""Return the device class of the sensor."""
return self._config.get(CONF_DEVICE_CLASS)
@property
def available(self) -> bool:
"""Return true if the device is available and value has not expired."""
expire_after = self._config.get(CONF_EXPIRE_AFTER)
# pylint: disable=no-member
return MqttAvailability.available.fget(self) and (
expire_after is None or not self._expired
)
| 33.202899
| 140
| 0.667103
|
f97bd414c6fcb2b89bc39e9d97e24f21cc4601ff
| 4,493
|
py
|
Python
|
src/malkit/utils.py
|
xymy/malkit
|
8e0d6f5e1c993c706f0f76e10fa8095b16f4208f
|
[
"MIT"
] | null | null | null |
src/malkit/utils.py
|
xymy/malkit
|
8e0d6f5e1c993c706f0f76e10fa8095b16f4208f
|
[
"MIT"
] | null | null | null |
src/malkit/utils.py
|
xymy/malkit
|
8e0d6f5e1c993c706f0f76e10fa8095b16f4208f
|
[
"MIT"
] | null | null | null |
import functools
import shutil
from pathlib import Path
from typing import Any, Iterable, List, Optional, Tuple
import numpy as np
import pandas as pd
from .parallel import execute_parallel
from .typing import FilePath
__all__ = [
"categorize_folders",
"split_labels",
"build_srcs_dsts",
"convert_bytes_to_binary",
"convert_bytes_to_binary_parallel",
]
def categorize_folders(root: FilePath, labels: pd.DataFrame, *, suffix: Optional[str] = None) -> None:
"""Categorize samples and move them into class name folders."""
root = Path(root)
sample_names = [str(item) for item in labels.iloc[:, 0]]
target_names = [str(item) for item in labels.iloc[:, 1]]
for klass in set(target_names):
cat_dir = root / klass
if cat_dir.is_dir():
return
cat_dir.mkdir(parents=True, exist_ok=True)
for sample_name, target_name in zip(sample_names, target_names):
src = root / sample_name
if suffix is not None:
src = src.with_suffix(suffix)
shutil.move(src, root / target_name)
def split_labels(
labels: pd.DataFrame,
*,
test_size: Optional[float] = None,
train_size: Optional[float] = None,
random_state: Optional[int] = None,
shuffle: bool = True,
stratified: bool = True,
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Split labels into two parts."""
from sklearn.model_selection import train_test_split
stratify = None
if shuffle and stratified:
stratify = labels.iloc[:, 1].to_numpy()
indices = np.arange(len(labels))
idx1, idx2 = train_test_split(
indices,
test_size=test_size,
train_size=train_size,
random_state=random_state,
shuffle=shuffle,
stratify=stratify,
)
return labels.iloc[idx1], labels.iloc[idx2]
def _build_srcs_dsts(
src_dir: Path, dst_dir: Path, *, skip_exist: bool = True, suffix: Optional[str] = None
) -> Tuple[List[Path], List[Path]]:
srcs = []
dsts = []
dst_dir.mkdir(parents=True, exist_ok=True)
for src in src_dir.iterdir():
if suffix is None:
name = src.name
else:
name = src.with_suffix(suffix).name
dst = dst_dir / name
if skip_exist and dst.exists():
continue
srcs.append(src)
dsts.append(dst)
return srcs, dsts
def _build_srcs_dsts_cat(
src_dir: Path, dst_dir: Path, *, skip_exist: bool = True, suffix: Optional[str] = None
) -> Tuple[List[Path], List[Path]]:
srcs = []
dsts = []
dst_dir.mkdir(parents=True, exist_ok=True)
for src_cat_dir in src_dir.iterdir():
dst_cat_dir = dst_dir / src_cat_dir.name
dst_cat_dir.mkdir(parents=True, exist_ok=True)
for src in src_cat_dir.iterdir():
if suffix is None:
name = src.name
else:
name = src.with_suffix(suffix).name
dst = dst_cat_dir / name
if skip_exist and dst.exists():
continue
srcs.append(src)
dsts.append(dst)
return srcs, dsts
def build_srcs_dsts(
src_dir: FilePath, dst_dir: FilePath, *, cat: bool = True, skip_exist: bool = True, suffix: Optional[str] = None
) -> Tuple[List[Path], List[Path]]:
"""Build source paths and destination paths."""
src_dir = Path(src_dir)
dst_dir = Path(dst_dir)
if cat:
return _build_srcs_dsts_cat(src_dir, dst_dir, suffix=suffix, skip_exist=skip_exist)
else:
return _build_srcs_dsts(src_dir, dst_dir, suffix=suffix, skip_exist=skip_exist)
def convert_bytes_to_binary(bytes_file: FilePath, binary_file: FilePath, *, qq: str = "00") -> None:
"""Convert bytes file to binary file."""
with open(bytes_file, "r", encoding="ascii") as src, open(binary_file, "wb") as dst:
for line in src:
i = line.find(" ")
if i < 0:
raise ValueError(f"invalid bytes file {bytes_file!r}")
data = line[i + 1 :].replace("??", qq)
dst.write(bytes.fromhex(data))
def convert_bytes_to_binary_parallel(
bytes_files: Iterable[FilePath],
binary_files: Iterable[FilePath],
*,
qq: str = "00",
n_jobs: Optional[int] = None,
**kwargs: Any,
) -> None:
"""Convert bytes file to binary file in parallel."""
function = functools.partial(convert_bytes_to_binary, qq=qq)
execute_parallel(function, bytes_files, binary_files, n_jobs=n_jobs, **kwargs)
| 30.358108
| 116
| 0.634543
|
e4d688c91f9aa54c27331b80dccb5f2074edd6de
| 9,020
|
py
|
Python
|
boot/rpi/tools/binman/entry.py
|
yodaos-project/yodaos
|
d0d7bbc277c0fc1c64e2e0a1c82fe6e63f6eb954
|
[
"Apache-2.0"
] | 1,144
|
2018-12-18T09:46:47.000Z
|
2022-03-07T14:51:46.000Z
|
boot/rpi/tools/binman/entry.py
|
Rokid/YodaOS
|
d0d7bbc277c0fc1c64e2e0a1c82fe6e63f6eb954
|
[
"Apache-2.0"
] | 16
|
2019-01-28T06:08:40.000Z
|
2019-12-04T10:26:41.000Z
|
boot/rpi/tools/binman/entry.py
|
Rokid/YodaOS
|
d0d7bbc277c0fc1c64e2e0a1c82fe6e63f6eb954
|
[
"Apache-2.0"
] | 129
|
2018-12-18T09:46:50.000Z
|
2022-03-30T07:30:13.000Z
|
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2016 Google, Inc
#
# Base class for all entries
#
from __future__ import print_function
# importlib was introduced in Python 2.7 but there was a report of it not
# working in 2.7.12, so we work around this:
# http://lists.denx.de/pipermail/u-boot/2016-October/269729.html
try:
import importlib
have_importlib = True
except:
have_importlib = False
import fdt_util
import os
import sys
import tools
modules = {}
our_path = os.path.dirname(os.path.realpath(__file__))
class Entry(object):
"""An Entry in the section
An entry corresponds to a single node in the device-tree description
of the section. Each entry ends up being a part of the final section.
Entries can be placed either right next to each other, or with padding
between them. The type of the entry determines the data that is in it.
This class is not used by itself. All entry objects are subclasses of
Entry.
Attributes:
section: The section containing this entry
node: The node that created this entry
pos: Absolute position of entry within the section, None if not known
size: Entry size in bytes, None if not known
contents_size: Size of contents in bytes, 0 by default
align: Entry start position alignment, or None
align_size: Entry size alignment, or None
align_end: Entry end position alignment, or None
pad_before: Number of pad bytes before the contents, 0 if none
pad_after: Number of pad bytes after the contents, 0 if none
data: Contents of entry (string of bytes)
"""
def __init__(self, section, etype, node, read_node=True, name_prefix=''):
self.section = section
self.etype = etype
self._node = node
self.name = node and (name_prefix + node.name) or 'none'
self.pos = None
self.size = None
self.contents_size = 0
self.align = None
self.align_size = None
self.align_end = None
self.pad_before = 0
self.pad_after = 0
self.pos_unset = False
if read_node:
self.ReadNode()
@staticmethod
def Create(section, node, etype=None):
"""Create a new entry for a node.
Args:
section: Image object containing this node
node: Node object containing information about the entry to create
etype: Entry type to use, or None to work it out (used for tests)
Returns:
A new Entry object of the correct type (a subclass of Entry)
"""
if not etype:
etype = fdt_util.GetString(node, 'type', node.name)
# Convert something like 'u-boot@0' to 'u_boot' since we are only
# interested in the type.
module_name = etype.replace('-', '_')
if '@' in module_name:
module_name = module_name.split('@')[0]
module = modules.get(module_name)
# Also allow entry-type modules to be brought in from the etype directory.
# Import the module if we have not already done so.
if not module:
old_path = sys.path
sys.path.insert(0, os.path.join(our_path, 'etype'))
try:
if have_importlib:
module = importlib.import_module(module_name)
else:
module = __import__(module_name)
except ImportError:
raise ValueError("Unknown entry type '%s' in node '%s'" %
(etype, node.path))
finally:
sys.path = old_path
modules[module_name] = module
# Call its constructor to get the object we want.
obj = getattr(module, 'Entry_%s' % module_name)
return obj(section, etype, node)
def ReadNode(self):
"""Read entry information from the node
This reads all the fields we recognise from the node, ready for use.
"""
self.pos = fdt_util.GetInt(self._node, 'pos')
self.size = fdt_util.GetInt(self._node, 'size')
self.align = fdt_util.GetInt(self._node, 'align')
if tools.NotPowerOfTwo(self.align):
raise ValueError("Node '%s': Alignment %s must be a power of two" %
(self._node.path, self.align))
self.pad_before = fdt_util.GetInt(self._node, 'pad-before', 0)
self.pad_after = fdt_util.GetInt(self._node, 'pad-after', 0)
self.align_size = fdt_util.GetInt(self._node, 'align-size')
if tools.NotPowerOfTwo(self.align_size):
raise ValueError("Node '%s': Alignment size %s must be a power "
"of two" % (self._node.path, self.align_size))
self.align_end = fdt_util.GetInt(self._node, 'align-end')
self.pos_unset = fdt_util.GetBool(self._node, 'pos-unset')
def SetPrefix(self, prefix):
"""Set the name prefix for a node
Args:
prefix: Prefix to set, or '' to not use a prefix
"""
if prefix:
self.name = prefix + self.name
def ObtainContents(self):
"""Figure out the contents of an entry.
Returns:
True if the contents were found, False if another call is needed
after the other entries are processed.
"""
# No contents by default: subclasses can implement this
return True
def Pack(self, pos):
"""Figure out how to pack the entry into the section
Most of the time the entries are not fully specified. There may be
an alignment but no size. In that case we take the size from the
contents of the entry.
If an entry has no hard-coded position, it will be placed at @pos.
Once this function is complete, both the position and size of the
entry will be know.
Args:
Current section position pointer
Returns:
New section position pointer (after this entry)
"""
if self.pos is None:
if self.pos_unset:
self.Raise('No position set with pos-unset: should another '
'entry provide this correct position?')
self.pos = tools.Align(pos, self.align)
needed = self.pad_before + self.contents_size + self.pad_after
needed = tools.Align(needed, self.align_size)
size = self.size
if not size:
size = needed
new_pos = self.pos + size
aligned_pos = tools.Align(new_pos, self.align_end)
if aligned_pos != new_pos:
size = aligned_pos - self.pos
new_pos = aligned_pos
if not self.size:
self.size = size
if self.size < needed:
self.Raise("Entry contents size is %#x (%d) but entry size is "
"%#x (%d)" % (needed, needed, self.size, self.size))
# Check that the alignment is correct. It could be wrong if the
# and pos or size values were provided (i.e. not calculated), but
# conflict with the provided alignment values
if self.size != tools.Align(self.size, self.align_size):
self.Raise("Size %#x (%d) does not match align-size %#x (%d)" %
(self.size, self.size, self.align_size, self.align_size))
if self.pos != tools.Align(self.pos, self.align):
self.Raise("Position %#x (%d) does not match align %#x (%d)" %
(self.pos, self.pos, self.align, self.align))
return new_pos
def Raise(self, msg):
"""Convenience function to raise an error referencing a node"""
raise ValueError("Node '%s': %s" % (self._node.path, msg))
def GetPath(self):
"""Get the path of a node
Returns:
Full path of the node for this entry
"""
return self._node.path
def GetData(self):
return self.data
def GetPositions(self):
return {}
def SetPositionSize(self, pos, size):
self.pos = pos
self.size = size
def ProcessContents(self):
pass
def WriteSymbols(self, section):
"""Write symbol values into binary files for access at run time
Args:
section: Section containing the entry
"""
pass
def CheckPosition(self):
"""Check that the entry positions are correct
This is used for entries which have extra position requirements (other
than having to be fully inside their section). Sub-classes can implement
this function and raise if there is a problem.
"""
pass
def WriteMap(self, fd, indent):
"""Write a map of the entry to a .map file
Args:
fd: File to write the map to
indent: Curent indent level of map (0=none, 1=one level, etc.)
"""
print('%s%08x %08x %s' % (' ' * indent, self.pos, self.size,
self.name), file=fd)
| 35.511811
| 82
| 0.599446
|
f9b6cd01403202827461bdd7252577f1402274ca
| 546
|
py
|
Python
|
tests/test_csmotormanager.py
|
BenBradnick/csmotormanager
|
80c287ae57bcad53eb01f0b791378c8af394dc73
|
[
"Apache-2.0"
] | null | null | null |
tests/test_csmotormanager.py
|
BenBradnick/csmotormanager
|
80c287ae57bcad53eb01f0b791378c8af394dc73
|
[
"Apache-2.0"
] | null | null | null |
tests/test_csmotormanager.py
|
BenBradnick/csmotormanager
|
80c287ae57bcad53eb01f0b791378c8af394dc73
|
[
"Apache-2.0"
] | null | null | null |
from csmotormanager import cli, hello
def test_hello_class_formats_greeting() -> None:
inst = hello.HelloClass("person")
assert inst.format_greeting() == "Hello person"
def test_hello_lots_defaults(capsys) -> None:
hello.say_hello_lots()
captured = capsys.readouterr()
assert captured.out == "Hello me\n" * 5
assert captured.err == ""
def test_cli(capsys) -> None:
cli.main(["person", "--times=2"])
captured = capsys.readouterr()
assert captured.out == "Hello person\n" * 2
assert captured.err == ""
| 26
| 51
| 0.672161
|
78f780cf252c01f861bc9aeffc02073479c7bd6d
| 752
|
py
|
Python
|
Multiple roi.py
|
guyfromthesky/OCR-Project
|
c126c9844ddecbeefd1de6ae49074d3b56062df3
|
[
"MIT"
] | null | null | null |
Multiple roi.py
|
guyfromthesky/OCR-Project
|
c126c9844ddecbeefd1de6ae49074d3b56062df3
|
[
"MIT"
] | null | null | null |
Multiple roi.py
|
guyfromthesky/OCR-Project
|
c126c9844ddecbeefd1de6ae49074d3b56062df3
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
#image_path
img_path= r"C:\Users\evan\Documents\GitHub\OCR-Project\sample.jpg"
#read image
img_raw = cv2.imread(img_path)
#select ROIs function
ROIs = cv2.selectROIs("Select Rois",img_raw)
#print rectangle points of selected roi
print(ROIs)
#Crop selected roi ffrom raw image
#counter to save image with different name
crop_number=0
#loop over every bounding box save in array "ROIs"
for rect in ROIs:
x1=rect[0]
y1=rect[1]
x2=rect[2]
y2=rect[3]
#crop roi from original image
img_crop=img_raw[y1:y1+y2,x1:x1+x2]
#show cropped image
cv2.imshow("crop"+str(crop_number),img_crop)
#save cropped image
cv2.imwrite("crop"+str(crop_number)+".jpeg",img_crop)
crop_number+=1
#hold window
cv2.waitKey(0)
| 18.341463
| 66
| 0.744681
|
c6d46fbc30b376e9845ffc8e8a8a764fe0774139
| 4,436
|
py
|
Python
|
molmodmt/rmsd.py
|
LMMV/MolModMT
|
5725d6d5627b07edcbbd5e55318345a136b28c35
|
[
"MIT"
] | null | null | null |
molmodmt/rmsd.py
|
LMMV/MolModMT
|
5725d6d5627b07edcbbd5e55318345a136b28c35
|
[
"MIT"
] | null | null | null |
molmodmt/rmsd.py
|
LMMV/MolModMT
|
5725d6d5627b07edcbbd5e55318345a136b28c35
|
[
"MIT"
] | null | null | null |
import numpy as _np
from copy import deepcopy as _deepcopy
from .lib import rmsd as _librmsd
from .multitool import get_form as _get_form, select as _select, convert as _convert
from .utils.digest_inputs import _comparison_two_systems as _digest_comparison_two_systems
def rmsd(ref_item=None, ref_selection=None, ref_frame=0, item=None, selection='backbone',
parallel=False, precentered=True, syntaxis='mdtraj', engine='molmodmt'):
if item is None:
in_form=_get_form(ref_item)
output_item=ref_item
else:
in_form=_get_form(item)
output_item=item
if engine=='molmodmt':
x_form='molmodmt.Trajectory'
elif engine=='mdtraj':
x_form='mdtraj.Trajectory'
tmp_ref_item, ref_atom_indices, ref_frame_indices, \
tmp_item, atom_indices, frame_indices,\
single_item, diff_selection = _digest_comparison_two_systems(ref_item, ref_selection, ref_frame,
item, selection, 'all',
form=x_form, syntaxis=syntaxis)
if engine=='molmodmt':
tmp_coordinates=_np.asfortranarray(tmp_item.coordinates,dtype='float64')
tmp_ref_coordinates=_np.asfortranarray(tmp_ref_item.coordinates[ref_frame,:,:],dtype='float64')
rmsd_val=_librmsd.rmsd(tmp_ref_coordinates, ref_atom_indices,
tmp_coordinates, atom_indices,
tmp_ref_item.n_atoms, ref_atom_indices.shape[0],
tmp_item.n_frames, tmp_item.n_atoms, atom_indices.shape[0])
return rmsd_val
elif engine=='mdtraj':
from mdtraj import rmsd as _mdtraj_rmsd
rmsd_val = _mdtraj_rmsd(tmp_item, tmp_ref_item, frame=ref_frame_indices,
ref_atom_indices=ref_atom_indices, atom_indices=atom_indices,
parallel=parallel, precentered=precentered)
return rmsd_val
else:
raise NotImplementedError
def least_rmsd_fit(ref_item=None, item=None,
ref_selection=None, selection='backbone',
ref_frame_index=0, frame_indices='all',
engine='molmodmt',
parallel=True, syntaxis='mdtraj'):
if item is None:
in_form=_get_form(ref_item)
item=ref_item
else:
in_form=_get_form(item)
if engine=='molmodmt':
x_form='molmodmt.MolMod'
elif engine=='mdtraj':
x_form='mdtraj.Trajectory'
tmp_ref_item, ref_atom_indices, ref_frame_indices, \
tmp_item, atom_indices, frame_indices,\
single_item, diff_selection = _digest_comparison_two_systems(ref_item, ref_selection,
ref_frame_index,
item, selection, frame_indices,
form=x_form, syntaxis=syntaxis)
if engine=='molmodmt':
tmp_coordinates=_np.asfortranarray(tmp_item.coordinates,dtype='float64')
tmp_ref_coordinates=_np.asfortranarray(tmp_ref_item.coordinates[ref_frame,:,:],dtype='float64')
_librmsd.least_rmsd_fit(tmp_ref_coordinates, ref_atom_indices,
tmp_coordinates, atom_indices,
tmp_ref_item.n_atoms, ref_atom_indices.shape[0],
tmp_item.n_frames, tmp_item.n_atoms, atom_indices.shape[0])
if in_form==x_form:
tmp_item.coordinates=_np.ascontiguousarray(tmp_coordinates)
elif in_form=='molmodmt.MolMod':
tmp_item.trajectory.coordinates=_np.ascontiguousarray(tmp_coordinates)
else:
tmp_item.coordinates=_np.ascontiguousarray(tmp_coordinates)
tmp_item=_convert(tmp_item,in_form)
pass
elif engine=='mdtraj':
tmp_item.superpose(tmp_ref_item,frame=ref_frame_indices,atom_indices=atom_indices,ref_atom_indices=ref_atom_indices,parallel=parallel)
if in_form==x_form:
item=tmp_item
elif in_form=='molmodmt.Trajectory':
item._import_mdtraj_data(tmp_item)
elif in_form=='molmodmt.MolMod':
item.trajectory._import_mdtraj_data(tmp_item)
else:
item=_convert(tmp_item,in_form)
else:
raise NotImplementedError
| 41.457944
| 142
| 0.624662
|
c8ad1502b7cd9fe8f1efb985dc24bb7f7d3b8eda
| 11,552
|
py
|
Python
|
mdstudio_gromacs/gromacs_topology.py
|
MD-Studio/lie_md
|
fe85dbe164ffb119a8d1e4ca02e13b26d305fd38
|
[
"Apache-2.0"
] | null | null | null |
mdstudio_gromacs/gromacs_topology.py
|
MD-Studio/lie_md
|
fe85dbe164ffb119a8d1e4ca02e13b26d305fd38
|
[
"Apache-2.0"
] | 12
|
2018-08-13T09:45:51.000Z
|
2019-03-14T13:12:42.000Z
|
mdstudio_gromacs/gromacs_topology.py
|
MD-Studio/lie_md
|
fe85dbe164ffb119a8d1e4ca02e13b26d305fd38
|
[
"Apache-2.0"
] | 1
|
2021-05-18T07:41:43.000Z
|
2021-05-18T07:41:43.000Z
|
# -*- coding: utf-8 -*-
"""
file: gromacs_setup.py
Function for preparing input definitions for a GROMACS
Linear Interaction Energy MD calculation
"""
import os
import logging
import re
def correct_itp(topfile, topOutFn, posre=True, outitp={}, removeMols=[], replaceMols=[], excludePosre=[], excludeHH=[],
miscMols=[]):
"""
Correct hydrogen and heavy atom masses in the .itp file
makes position restraint file for the ligand
outitp={'atomtypes': {'outfile':'attype.itp', 'overwrite':True}}
"""
print("CORRECT ITP")
if posre:
posreNm = "%s-posre.itp"%os.path.splitext(os.path.basename(topOutFn))[0]
else:
posreNm = None
#read itp
print("READ TOP")
blocks, listBlocks, listMols = readCard(topfile)
print("REMOVE MOLS")
# remove mols; eg. WAT to be substituted with SOL in amber to gromacs conversion
blocks, listBlocks, listMols = topRmMols(blocks, listBlocks, removeMols)
print("REPLACE MOLS")
blocks, listBlocks=topReplaceMols(blocks, listBlocks, replaceMols)
print("HH")
#apply heavy hydrogens(HH)
newBlocks=heavyH(listBlocks, blocks, listMols, excludeList=excludeHH)
print("POSRES")
#create positional restraints file
if posre:
posreNm=outPosre(blocks, listBlocks, listMols, excludePosre)
else:
posreNm={}
print("ADD MOLS")
#add additional moleculetypes (e.g. solvent and ions)
miscBlocks, miscListBlocks, miscListMols=([], [], [])
for mol in miscMols:
b, lb, lm=readCard(mol)
miscBlocks+=b
miscListBlocks+=lb
miscListMols+=lm
fixNewBlocks, fixListBlocks=itpAddMols(blocks, listBlocks, miscBlocks, miscListBlocks)
# replace mols in system definition
print("OUT ITP")
#write corrected itp (with HH and no atomtype section
topOut, extItps=itpOut(fixNewBlocks, fixListBlocks, topOutFn, posre=posreNm, excludeList=outitp)
results={
'top':topOut,
'posre':[ posreNm[i] for i in posreNm],
'externalItps':extItps
}
return results
def readCard(filetop):
logging.debug('read topology')
blockNames=[]
listBlocks=[]
title=False
read=False
with open(filetop, 'r') as itp:
block=[]
for line in itp:
atom=[]
if line.startswith('#'):
if block!=[]:
listBlocks.append(block)
block=[]
listBlocks.append(line)
blockNames.append(None)
else:
line_sp=re.split('\s*', line[:-1])
for item in line_sp:
if re.match(";", item):
break
elif item == "[":
if block!=[]:
listBlocks.append(block)
block=[]
title=True
read=False
elif item=="]":
title=False
read=True
elif (title==True) and (item!=''):
blockNames.append(item)
elif (read==True) and (item!=''):
atom.append(item)
if (atom!=[]):
block.append(atom)
if block!=[]:
listBlocks.append(block)
# for molecule get:
# name
# index of the block with atoms
# index of block with bonds
listMols=[]
mol={}
for nbl, blockNm in enumerate(blockNames):
if blockNm == 'moleculetype':
if len(mol)>0:
listMols.append(mol)
mol={}
mol['name']=listBlocks[nbl][0][0]
elif blockNm == 'atoms':
mol['atoms']=nbl
elif blockNm == 'bonds':
mol['bonds']=nbl
if len(mol)>0:
listMols.append(mol)
return (listBlocks, blockNames, listMols)
def topRmMols(blocks, blockNames, mols2Del):
print("TOP RM MOLS")
popOut=False
listOut=[]
for nbl, blName in enumerate(blockNames):
if blName=='moleculetype':
if blocks[nbl][0][0] in mols2Del:
popOut=True
else:
popOut=False
if blName=='system':
popOut=False
if popOut:
listOut.append(nbl)
listOut.sort(reverse=True)
print("EXCLUDE", listOut)
for nbl in listOut:
blocks.pop(nbl)
blockNames.pop(nbl)
print("CREATE LISTMOLS")
listMols=[]
mol={}
for nbl, blockNm in enumerate(blockNames):
if blockNm == 'moleculetype':
if len(mol)>0:
listMols.append(mol)
mol={}
mol['name']=blocks[nbl][0][0]
elif blockNm == 'atoms':
mol['atoms']=nbl
elif blockNm == 'bonds':
mol['bonds']=nbl
if len(mol)>0:
listMols.append(mol)
print("LISTMOLS ", listMols)
return (blocks, blockNames, listMols)
def topReplaceMols(blocks, blockNames, mols2Rep):
# nol2Rep: [{'in':'WAT', 'out':'SOL'}, ..]
print('TOPREPLACE')
listin=[x['in'] for x in mols2Rep]
for nbl, blName in enumerate(blockNames):
if blName=='molecules':
for mol in blocks[nbl]:
if mol[0] in listin:
mol[0]=mols2Rep[listin.index(mol[0])]['out']
return (blocks, blockNames)
def heavyH(blockNames, blocks, listMols, excludeList=['WAT']):
'''Adjust the weights of hydrogens, and their heavy atom partner'''
for mol in listMols:
if mol['name'] not in excludeList:
for bond in blocks[mol['bonds']]:
for hI in [0, 1]:
if re.match("^h|^H", blocks[mol['atoms']][int(bond[hI])-1] [1]):
if hI==0:
hJ=1
elif hI==1:
hJ=0
## Change heavy atom (heavy -3*H)
blocks[mol['atoms']][int(bond[hJ])-1][7]=("%.5f" % ( float(blocks[mol['atoms']][int(bond[hJ])-1][7]) \
- float(blocks[mol['atoms']][int(bond[hI])-1][7])*3 ) )
## Change hydrogen (4*H)
blocks[mol['atoms']][int(bond[hI])-1][7]=("%.5f" % ( float(blocks[mol['atoms']][int(bond[hI])-1][7])*4) )
return(blocks)
def outPosre(blocks, listBlocks, listMols, excludeList):
outposre={}
for mol in listMols:
if mol['name'] not in excludeList:
oitp='%s-posre.itp'%mol['name']
outposre[mol['name']]=oitp
with open(oitp, "w") as outFile:
outFile.write(\
'#ifndef 1POSCOS\n\
#define 1POSCOS 10000\n\
#endif\n\
#ifndef 2POSCOS\n\
#define 2POSCOS 5000\n\
#endif\n\
#ifndef 3POSCOS\n\
#define 3POSCOS 2000\n\
#endif\n\
#ifndef 4POSCOS\n\
#define 4POSCOS 1000\n\
#endif\n\
[ position_restraints ]\n')
for atom in blocks[mol['atoms']]:
if not atom[4].startswith('H'):
if atom[3] == 'HEM':
outFile.write("%-4s 1 1POSCOS 1POSCOS 1POSCOS\n" % atom[0])
elif atom[4] in ['CA', 'N', 'O', 'C']:
outFile.write("%-4s 1 1POSCOS 1POSCOS 1POSCOS\n" % atom[0])
elif atom[4] in ['CB']:
outFile.write("%-4s 1 2POSCOS 2POSCOS 2POSCOS\n" % atom[0])
elif atom[4] in ['CG']:
outFile.write("%-4s 1 3POSCOS 3POSCOS 3POSCOS\n" % atom[0])
else:
outFile.write("%-4s 1 4POSCOS 4POSCOS 4POSCOS\n" % atom[0])
return outposre
def itpAddMols(blocks, nameBlocks, miscBlocks, miscNameBlocks):
##FIX ATOMTYPES
idxTypes=nameBlocks.index('atomtypes')
idxNewTypes=[ i for i, x in enumerate(miscNameBlocks) if x=='atomtypes']
for AttypeBlock in idxNewTypes:
for newAtm in miscBlocks[AttypeBlock]:
addAtm=True
for atm in blocks[idxTypes]:
if newAtm[0]==atm[0]:
addAtm=False
break
if addAtm:
blocks[idxTypes].append(newAtm)
## ADD MOLECULETYPE
# new molecules are added before the system statement
idxSystem=nameBlocks.index('system')
blNoAty=0
for bl in range(len(miscNameBlocks)):
if bl not in idxNewTypes:
insIdx=idxSystem+blNoAty
blocks.insert(insIdx, miscBlocks[bl])
nameBlocks.insert(insIdx, miscNameBlocks[bl])
blNoAty+=1
return blocks, nameBlocks
def itpOut(blocks, nameBlocks, oitp, posre, excludeList={}):
'''write new top. blocks defined in excludeList are removed and saved in the file 'outfile'. e.g atomtypes'''
def outPosre(posreFN):
outFile.write('#ifdef POSRES\n#include "%s"\n#endif\n\n'%posreFN)
def outBlock(blockName, block, output):
output.write("[ %s ]\n"%blockName)
outFormat=defineFMTblock(block)
for item in block:
output.write(outFormat.format(d=item))
extItps=[]
with open(oitp, "w") as outFile:
molWithPosre=False
molName=None
for nbl, blockName in enumerate(nameBlocks):
if blockName is None: # preprocessing instructions
outFile.write(blocks[nbl])
elif blockName in excludeList: # specific itp
#WRITE EXTERNAL ITP TO INCLUDE IF REQUIRED
if excludeList[blockName]['overwrite']:
openMode='w'
else:
openMode='a'
with open(excludeList[blockName]['outfile'], openMode) as outItp:
outBlock(blockName, blocks[nbl], outItp)
extItps.append(excludeList[blockName]['outfile'])
outFile.write('#include "%s"\n\n'%excludeList[blockName]['outfile'])
# outitp
else:
# WRITE INCLUDE POSRE IF REQUIRED
if blockName=='moleculetype':
if molWithPosre:
outPosre(posre[molName])
molName=blocks[nbl][0][0]
if molName in posre:
molWithPosre=True
else:
molWithPosre=False
if blockName=='system':
if molWithPosre:
outPosre(posre[molName])
# PRINT OUT BLOCK
outBlock(blockName, blocks[nbl], outFile)
outFile.write("\n")
return oitp, extItps
def defineFMTblock(block):
listFmt=[]
for atom in block:
for i, item in enumerate(atom):
try:
listFmt[i].append(len(item))
except IndexError:
listFmt.append([len(item)])
nchars=[max(x)+2 for x in listFmt]
fmtOut=""
for n, col in enumerate(nchars):
fmtOut=fmtOut+"{d[%d]:>%ds}"%(n, col)
fmtOut=fmtOut+"\n"
return fmtOut
def correctAttype(itp, newtypes):
oldtypes=[x[0] for x in itp['atomtypes']]
for attype in newtypes:
if not attype[0] in oldtypes:
itp['atomtypes'].append(attype)
return itp
| 31.562842
| 135
| 0.522247
|
100b6b7eda9d5ca9f1a23a3e57137855b2b312c2
| 88
|
py
|
Python
|
passwords/data.py
|
k3170makan/PyMLProjects
|
0676bb89119d509c9c44d7af8820aa8d620d0e4a
|
[
"MIT"
] | 156
|
2017-07-26T17:33:24.000Z
|
2021-11-17T16:52:20.000Z
|
passwords/data.py
|
k3170makan/PyMLProjects
|
0676bb89119d509c9c44d7af8820aa8d620d0e4a
|
[
"MIT"
] | 1
|
2017-09-01T01:34:35.000Z
|
2017-09-01T01:34:35.000Z
|
passwords/data.py
|
k3170makan/PyMLProjects
|
0676bb89119d509c9c44d7af8820aa8d620d0e4a
|
[
"MIT"
] | 29
|
2017-07-30T13:39:45.000Z
|
2021-06-01T06:17:51.000Z
|
DATA_LIB="../data/"
DATA_SOURCE="downloads.skullsecurity.org/passwords/rockyou.txt.bz2"
| 29.333333
| 67
| 0.795455
|
514e7815b08f51c9304a5eac01b0287489c95e78
| 8,245
|
py
|
Python
|
themes/default/base16-synth-midnight-dark.config.py
|
dgmulf/base16-qutebrowser
|
3d71ea89adfb3ede9eee2f9764d4a59d26fe4f9b
|
[
"MIT"
] | null | null | null |
themes/default/base16-synth-midnight-dark.config.py
|
dgmulf/base16-qutebrowser
|
3d71ea89adfb3ede9eee2f9764d4a59d26fe4f9b
|
[
"MIT"
] | null | null | null |
themes/default/base16-synth-midnight-dark.config.py
|
dgmulf/base16-qutebrowser
|
3d71ea89adfb3ede9eee2f9764d4a59d26fe4f9b
|
[
"MIT"
] | null | null | null |
# base16-qutebrowser (https://github.com/theova/base16-qutebrowser)
# Base16 qutebrowser template by theova
# Synth Midnight Dark scheme by Michaël Ball (http://github.com/michael-ball/)
base00 = "#040404"
base01 = "#141414"
base02 = "#242424"
base03 = "#61507A"
base04 = "#BFBBBF"
base05 = "#DFDBDF"
base06 = "#EFEBEF"
base07 = "#FFFBFF"
base08 = "#B53B50"
base09 = "#E4600E"
base0A = "#DAE84D"
base0B = "#06EA61"
base0C = "#7CEDE9"
base0D = "#03AEFF"
base0E = "#EA5CE2"
base0F = "#9D4D0E"
# set qutebrowser colors
# Text color of the completion widget. May be a single color to use for
# all columns or a list of three colors, one for each column.
c.colors.completion.fg = base05
# Background color of the completion widget for odd rows.
c.colors.completion.odd.bg = base03
# Background color of the completion widget for even rows.
c.colors.completion.even.bg = base00
# Foreground color of completion widget category headers.
c.colors.completion.category.fg = base0A
# Background color of the completion widget category headers.
c.colors.completion.category.bg = base00
# Top border color of the completion widget category headers.
c.colors.completion.category.border.top = base00
# Bottom border color of the completion widget category headers.
c.colors.completion.category.border.bottom = base00
# Foreground color of the selected completion item.
c.colors.completion.item.selected.fg = base01
# Background color of the selected completion item.
c.colors.completion.item.selected.bg = base0A
# Top border color of the completion widget category headers.
c.colors.completion.item.selected.border.top = base0A
# Bottom border color of the selected completion item.
c.colors.completion.item.selected.border.bottom = base0A
# Foreground color of the matched text in the selected completion item.
c.colors.completion.item.selected.match.fg = base08
# Foreground color of the matched text in the completion.
c.colors.completion.match.fg = base0B
# Color of the scrollbar handle in the completion view.
c.colors.completion.scrollbar.fg = base05
# Color of the scrollbar in the completion view.
c.colors.completion.scrollbar.bg = base00
# Background color for the download bar.
c.colors.downloads.bar.bg = base00
# Color gradient start for download text.
c.colors.downloads.start.fg = base00
# Color gradient start for download backgrounds.
c.colors.downloads.start.bg = base0D
# Color gradient end for download text.
c.colors.downloads.stop.fg = base00
# Color gradient stop for download backgrounds.
c.colors.downloads.stop.bg = base0C
# Foreground color for downloads with errors.
c.colors.downloads.error.fg = base08
# Font color for hints.
c.colors.hints.fg = base00
# Background color for hints. Note that you can use a `rgba(...)` value
# for transparency.
c.colors.hints.bg = base0A
# Font color for the matched part of hints.
c.colors.hints.match.fg = base05
# Text color for the keyhint widget.
c.colors.keyhint.fg = base05
# Highlight color for keys to complete the current keychain.
c.colors.keyhint.suffix.fg = base05
# Background color of the keyhint widget.
c.colors.keyhint.bg = base00
# Foreground color of an error message.
c.colors.messages.error.fg = base00
# Background color of an error message.
c.colors.messages.error.bg = base08
# Border color of an error message.
c.colors.messages.error.border = base08
# Foreground color of a warning message.
c.colors.messages.warning.fg = base00
# Background color of a warning message.
c.colors.messages.warning.bg = base0E
# Border color of a warning message.
c.colors.messages.warning.border = base0E
# Foreground color of an info message.
c.colors.messages.info.fg = base05
# Background color of an info message.
c.colors.messages.info.bg = base00
# Border color of an info message.
c.colors.messages.info.border = base00
# Foreground color for prompts.
c.colors.prompts.fg = base05
# Border used around UI elements in prompts.
c.colors.prompts.border = base00
# Background color for prompts.
c.colors.prompts.bg = base00
# Background color for the selected item in filename prompts.
c.colors.prompts.selected.bg = base0A
# Foreground color of the statusbar.
c.colors.statusbar.normal.fg = base0B
# Background color of the statusbar.
c.colors.statusbar.normal.bg = base00
# Foreground color of the statusbar in insert mode.
c.colors.statusbar.insert.fg = base00
# Background color of the statusbar in insert mode.
c.colors.statusbar.insert.bg = base0D
# Foreground color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.fg = base00
# Background color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.bg = base0C
# Foreground color of the statusbar in private browsing mode.
c.colors.statusbar.private.fg = base00
# Background color of the statusbar in private browsing mode.
c.colors.statusbar.private.bg = base03
# Foreground color of the statusbar in command mode.
c.colors.statusbar.command.fg = base05
# Background color of the statusbar in command mode.
c.colors.statusbar.command.bg = base00
# Foreground color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.fg = base05
# Background color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.bg = base00
# Foreground color of the statusbar in caret mode.
c.colors.statusbar.caret.fg = base00
# Background color of the statusbar in caret mode.
c.colors.statusbar.caret.bg = base0E
# Foreground color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.fg = base00
# Background color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.bg = base0D
# Background color of the progress bar.
c.colors.statusbar.progress.bg = base0D
# Default foreground color of the URL in the statusbar.
c.colors.statusbar.url.fg = base05
# Foreground color of the URL in the statusbar on error.
c.colors.statusbar.url.error.fg = base08
# Foreground color of the URL in the statusbar for hovered links.
c.colors.statusbar.url.hover.fg = base05
# Foreground color of the URL in the statusbar on successful load
# (http).
c.colors.statusbar.url.success.http.fg = base0C
# Foreground color of the URL in the statusbar on successful load
# (https).
c.colors.statusbar.url.success.https.fg = base0B
# Foreground color of the URL in the statusbar when there's a warning.
c.colors.statusbar.url.warn.fg = base0E
# Background color of the tab bar.
c.colors.tabs.bar.bg = base00
# Color gradient start for the tab indicator.
c.colors.tabs.indicator.start = base0D
# Color gradient end for the tab indicator.
c.colors.tabs.indicator.stop = base0C
# Color for the tab indicator on errors.
c.colors.tabs.indicator.error = base08
# Foreground color of unselected odd tabs.
c.colors.tabs.odd.fg = base05
# Background color of unselected odd tabs.
c.colors.tabs.odd.bg = base03
# Foreground color of unselected even tabs.
c.colors.tabs.even.fg = base05
# Background color of unselected even tabs.
c.colors.tabs.even.bg = base00
# Background color of pinned unselected even tabs.
c.colors.tabs.pinned.even.bg = base0C
# Foreground color of pinned unselected even tabs.
c.colors.tabs.pinned.even.fg = base07
# Background color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.bg = base0B
# Foreground color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.fg = base07
# Background color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.bg = base05
# Foreground color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.fg = base00
# Background color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.bg = base05
# Foreground color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.fg = base0E
# Foreground color of selected odd tabs.
c.colors.tabs.selected.odd.fg = base00
# Background color of selected odd tabs.
c.colors.tabs.selected.odd.bg = base05
# Foreground color of selected even tabs.
c.colors.tabs.selected.even.fg = base00
# Background color of selected even tabs.
c.colors.tabs.selected.even.bg = base05
# Background color for webpages if unset (or empty to use the theme's
# color).
# c.colors.webpage.bg = base00
| 29.446429
| 78
| 0.771377
|
01a3c43317c4c7cd220324d22c1117e705d108e7
| 6,861
|
py
|
Python
|
tests/scenario_tests_async/test_authorize.py
|
stevengill/bolt-python
|
e29e061928fc62c80eb162fab54dd09108e45d05
|
[
"MIT"
] | null | null | null |
tests/scenario_tests_async/test_authorize.py
|
stevengill/bolt-python
|
e29e061928fc62c80eb162fab54dd09108e45d05
|
[
"MIT"
] | null | null | null |
tests/scenario_tests_async/test_authorize.py
|
stevengill/bolt-python
|
e29e061928fc62c80eb162fab54dd09108e45d05
|
[
"MIT"
] | 1
|
2022-03-26T12:13:53.000Z
|
2022-03-26T12:13:53.000Z
|
import asyncio
import json
from time import time
from urllib.parse import quote
import pytest
from slack_sdk.signature import SignatureVerifier
from slack_sdk.web.async_client import AsyncWebClient
from slack_bolt.app.async_app import AsyncApp
from slack_bolt.authorization import AuthorizeResult
from slack_bolt.request.async_request import AsyncBoltRequest
from tests.mock_web_api_server import (
setup_mock_web_api_server,
cleanup_mock_web_api_server,
)
from tests.utils import remove_os_env_temporarily, restore_os_env
valid_token = "xoxb-valid"
valid_user_token = "xoxp-valid"
async def authorize(enterprise_id, team_id, user_id, client: AsyncWebClient):
assert enterprise_id == "E111"
assert team_id == "T111"
assert user_id == "W99999"
auth_test = await client.auth_test(token=valid_token)
return AuthorizeResult.from_auth_test_response(
auth_test_response=auth_test,
bot_token=valid_token,
)
async def user_authorize(enterprise_id, team_id, user_id, client: AsyncWebClient):
assert enterprise_id == "E111"
assert team_id == "T111"
assert user_id == "W99999"
auth_test = await client.auth_test(token=valid_user_token)
return AuthorizeResult.from_auth_test_response(
auth_test_response=auth_test,
user_token=valid_user_token,
)
async def error_authorize(enterprise_id, team_id, user_id):
assert enterprise_id == "E111"
assert team_id == "T111"
assert user_id == "W99999"
return None
class TestAsyncAuthorize:
signing_secret = "secret"
mock_api_server_base_url = "http://localhost:8888"
signature_verifier = SignatureVerifier(signing_secret)
web_client = AsyncWebClient(
token=valid_token,
base_url=mock_api_server_base_url,
)
@pytest.fixture
def event_loop(self):
old_os_env = remove_os_env_temporarily()
try:
setup_mock_web_api_server(self)
loop = asyncio.get_event_loop()
yield loop
loop.close()
cleanup_mock_web_api_server(self)
finally:
restore_os_env(old_os_env)
def generate_signature(self, body: str, timestamp: str):
return self.signature_verifier.generate_signature(
body=body,
timestamp=timestamp,
)
def build_headers(self, timestamp: str, body: str):
return {
"content-type": ["application/x-www-form-urlencoded"],
"x-slack-signature": [self.generate_signature(body, timestamp)],
"x-slack-request-timestamp": [timestamp],
}
def build_valid_request(self) -> AsyncBoltRequest:
timestamp = str(int(time()))
return AsyncBoltRequest(
body=raw_body, headers=self.build_headers(timestamp, raw_body)
)
@pytest.mark.asyncio
async def test_success(self):
app = AsyncApp(
client=self.web_client,
authorize=authorize,
signing_secret=self.signing_secret,
)
app.action("a")(simple_listener)
request = self.build_valid_request()
response = await app.async_dispatch(request)
assert response.status == 200
assert response.body == ""
assert self.mock_received_requests["/auth.test"] == 1
@pytest.mark.asyncio
async def test_failure(self):
app = AsyncApp(
client=self.web_client,
authorize=error_authorize,
signing_secret=self.signing_secret,
)
app.block_action("a")(simple_listener)
request = self.build_valid_request()
response = await app.async_dispatch(request)
assert response.status == 200
assert response.body == ":x: Please install this app into the workspace :bow:"
assert self.mock_received_requests.get("/auth.test") == None
@pytest.mark.asyncio
async def test_bot_context_attributes(self):
app = AsyncApp(
client=self.web_client,
authorize=authorize,
signing_secret=self.signing_secret,
)
app.action("a")(assert_bot_context_attributes)
request = self.build_valid_request()
response = await app.async_dispatch(request)
assert response.status == 200
assert response.body == ""
assert self.mock_received_requests["/auth.test"] == 1
@pytest.mark.asyncio
async def test_user_context_attributes(self):
app = AsyncApp(
client=self.web_client,
authorize=user_authorize,
signing_secret=self.signing_secret,
)
app.action("a")(assert_user_context_attributes)
request = self.build_valid_request()
response = await app.async_dispatch(request)
assert response.status == 200
assert response.body == ""
assert self.mock_received_requests["/auth.test"] == 1
body = {
"type": "block_actions",
"user": {
"id": "W99999",
"username": "primary-owner",
"name": "primary-owner",
"team_id": "T111",
},
"api_app_id": "A111",
"token": "verification_token",
"container": {
"type": "message",
"message_ts": "111.222",
"channel_id": "C111",
"is_ephemeral": True,
},
"trigger_id": "111.222.valid",
"team": {
"id": "T111",
"domain": "workspace-domain",
"enterprise_id": "E111",
"enterprise_name": "Sandbox Org",
},
"channel": {"id": "C111", "name": "test-channel"},
"response_url": "https://hooks.slack.com/actions/T111/111/random-value",
"actions": [
{
"action_id": "a",
"block_id": "b",
"text": {"type": "plain_text", "text": "Button", "emoji": True},
"value": "click_me_123",
"type": "button",
"action_ts": "1596530385.194939",
}
],
}
raw_body = f"payload={quote(json.dumps(body))}"
async def simple_listener(ack, body, payload, action):
assert body["trigger_id"] == "111.222.valid"
assert body["actions"][0] == payload
assert payload == action
assert action["action_id"] == "a"
await ack()
async def assert_bot_context_attributes(ack, context):
assert context["bot_id"] == "BZYBOTHED"
assert context["bot_user_id"] == "W23456789"
assert context["bot_token"] == "xoxb-valid"
assert context["token"] == "xoxb-valid"
assert context["user_id"] == "W99999"
assert context.get("user_token") is None
await ack()
async def assert_user_context_attributes(ack, context):
assert context.get("bot_id") is None
assert context.get("bot_user_id") is None
assert context.get("bot_token") is None
assert context["token"] == "xoxp-valid"
assert context["user_id"] == "W99999"
assert context["user_token"] == "xoxp-valid"
await ack()
| 31.328767
| 86
| 0.643201
|
b8f45c6bb46b395c11c430d97789b5450c17a774
| 725
|
py
|
Python
|
setup.py
|
Spajderix/tinyserializable
|
bc2896681a762dccaeb8877d189c002983a48225
|
[
"MIT"
] | 1
|
2021-08-01T11:24:57.000Z
|
2021-08-01T11:24:57.000Z
|
setup.py
|
Spajderix/tinyserializable
|
bc2896681a762dccaeb8877d189c002983a48225
|
[
"MIT"
] | null | null | null |
setup.py
|
Spajderix/tinyserializable
|
bc2896681a762dccaeb8877d189c002983a48225
|
[
"MIT"
] | null | null | null |
import setuptools
import tinyserializable
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="tinyserializable",
version=tinyserializable.__version__,
author="Spajderix",
author_email="spajderix@gmail.com",
description="Library to allow of creation of serializable/deserializable class-based structures",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Spajderix/tinyserializable",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 31.521739
| 101
| 0.711724
|
333e35548fbb4ff9796752b99877e4bd0c4b57e3
| 513
|
py
|
Python
|
test/test_message.py
|
bilmyers/pyflock
|
b440ffbcd6a18c0d81b81dcdcbae7ae16c025d39
|
[
"Apache-2.0"
] | 14
|
2017-02-14T07:02:59.000Z
|
2022-03-30T13:59:59.000Z
|
test/test_message.py
|
bilmyers/pyflock
|
b440ffbcd6a18c0d81b81dcdcbae7ae16c025d39
|
[
"Apache-2.0"
] | 10
|
2016-10-22T20:52:00.000Z
|
2021-05-10T10:40:30.000Z
|
test/test_message.py
|
bilmyers/pyflock
|
b440ffbcd6a18c0d81b81dcdcbae7ae16c025d39
|
[
"Apache-2.0"
] | 8
|
2017-03-03T13:16:34.000Z
|
2020-07-23T17:59:54.000Z
|
# coding: utf-8
from __future__ import absolute_import
import os
import sys
import unittest
import flockos
from flockos.rest import ApiException
from flockos.models.message import Message
class TestMessage(unittest.TestCase):
""" Message unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testMessage(self):
"""
Test Message
"""
model = flockos.models.message.Message()
if __name__ == '__main__':
unittest.main()
| 15.088235
| 48
| 0.65692
|
b35de01b56e43138403ec288a1265af1154ee55a
| 94
|
py
|
Python
|
calculator/__init__.py
|
chlemagne/python-oop-calculator
|
0259ce0f7a72faab60b058588a6838fe107e88eb
|
[
"MIT"
] | null | null | null |
calculator/__init__.py
|
chlemagne/python-oop-calculator
|
0259ce0f7a72faab60b058588a6838fe107e88eb
|
[
"MIT"
] | null | null | null |
calculator/__init__.py
|
chlemagne/python-oop-calculator
|
0259ce0f7a72faab60b058588a6838fe107e88eb
|
[
"MIT"
] | null | null | null |
""" Global constants.
"""
import os
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
| 11.75
| 54
| 0.702128
|
68f67d6a02e4b2b265964442709b5f58f6b32756
| 23,233
|
py
|
Python
|
vyper/parser/parser_utils.py
|
mkeen/vyper
|
0d92d86752bcfca875e042cec6048488db3d479c
|
[
"MIT"
] | 1
|
2021-01-06T21:26:16.000Z
|
2021-01-06T21:26:16.000Z
|
vyper/parser/parser_utils.py
|
mkeen/vyper
|
0d92d86752bcfca875e042cec6048488db3d479c
|
[
"MIT"
] | null | null | null |
vyper/parser/parser_utils.py
|
mkeen/vyper
|
0d92d86752bcfca875e042cec6048488db3d479c
|
[
"MIT"
] | null | null | null |
import re
from evm.constants import GAS_IDENTITY, GAS_IDENTITYWORD
from vyper.exceptions import TypeMismatchException
from vyper.opcodes import comb_opcodes
from vyper.types import (
BaseType,
ByteArrayType,
NodeType,
NullType,
StructType,
MappingType,
TupleType,
ListType,
)
from vyper.types import (
is_base_type,
are_units_compatible,
get_size_of_type,
ceil32
)
from vyper.utils import MemoryPositions, DECIMAL_DIVISOR
class NullAttractor():
def __add__(self, other):
return NullAttractor()
def __repr__(self):
return 'None'
__radd__ = __add__
__mul__ = __add__
# Data structure for LLL parse tree
class LLLnode():
repr_show_gas = False
def __init__(self, value, args=None, typ=None, location=None, pos=None, annotation='', mutable=True, add_gas_estimate=0):
if args is None:
args = []
self.value = value
self.args = args
self.typ = typ
assert isinstance(self.typ, NodeType) or self.typ is None, repr(self.typ)
self.location = location
self.pos = pos
self.annotation = annotation
self.mutable = mutable
self.add_gas_estimate = add_gas_estimate
# Determine this node's valency (1 if it pushes a value on the stack,
# 0 otherwise) and checks to make sure the number and valencies of
# children are correct. Also, find an upper bound on gas consumption
# Numbers
if isinstance(self.value, int):
self.valency = 1
self.gas = 5
elif isinstance(self.value, str):
# Opcodes and pseudo-opcodes (e.g. clamp)
if self.value.upper() in comb_opcodes:
_, ins, outs, gas = comb_opcodes[self.value.upper()]
self.valency = outs
if len(self.args) != ins:
raise Exception("Number of arguments mismatched: %r %r" % (self.value, self.args))
# We add 2 per stack height at push time and take it back
# at pop time; this makes `break` easier to handle
self.gas = gas + 2 * (outs - ins)
for arg in self.args:
if arg.valency == 0:
raise Exception("Can't have a zerovalent argument to an opcode or a pseudo-opcode! %r" % arg)
self.gas += arg.gas
# Dynamic gas cost: 8 gas for each byte of logging data
if self.value.upper()[0:3] == 'LOG' and isinstance(self.args[1].value, int):
self.gas += self.args[1].value * 8
# Dynamic gas cost: non-zero-valued call
if self.value.upper() == 'CALL' and self.args[2].value != 0:
self.gas += 34000
# Dynamic gas cost: filling sstore (ie. not clearing)
elif self.value.upper() == 'SSTORE' and self.args[1].value != 0:
self.gas += 15000
# Dynamic gas cost: calldatacopy
elif self.value.upper() in ('CALLDATACOPY', 'CODECOPY'):
size = 34000
if isinstance(self.args[2].value, int):
size = self.args[2].value
elif isinstance(self.args[2], LLLnode) and len(self.args[2].args) > 0:
size = self.args[2].args / [-1].value
self.gas += ceil32(size) // 32 * 3
# Gas limits in call
if self.value.upper() == 'CALL' and isinstance(self.args[0].value, int):
self.gas += self.args[0].value
# If statements
elif self.value == 'if':
if len(self.args) == 3:
self.gas = self.args[0].gas + max(self.args[1].gas, self.args[2].gas) + 3
if self.args[1].valency != self.args[2].valency:
raise Exception("Valency mismatch between then and else clause: %r %r" % (self.args[1], self.args[2]))
if len(self.args) == 2:
self.gas = self.args[0].gas + self.args[1].gas + 17
if self.args[1].valency:
raise Exception("2-clause if statement must have a zerovalent body: %r" % self.args[1])
if not self.args[0].valency:
raise Exception("Can't have a zerovalent argument as a test to an if statement! %r" % self.args[0])
if len(self.args) not in (2, 3):
raise Exception("If can only have 2 or 3 arguments")
self.valency = self.args[1].valency
# With statements: with <var> <initial> <statement>
elif self.value == 'with':
if len(self.args) != 3:
raise Exception("With statement must have 3 arguments")
if len(self.args[0].args) or not isinstance(self.args[0].value, str):
raise Exception("First argument to with statement must be a variable")
if not self.args[1].valency:
raise Exception("Second argument to with statement (initial value) cannot be zerovalent: %r" % self.args[1])
self.valency = self.args[2].valency
self.gas = sum([arg.gas for arg in self.args]) + 5
# Repeat statements: repeat <index_memloc> <startval> <rounds> <body>
elif self.value == 'repeat':
if len(self.args[2].args) or not isinstance(self.args[2].value, int) or self.args[2].value <= 0:
raise Exception("Number of times repeated must be a constant nonzero positive integer: %r" % self.args[2])
if not self.args[0].valency:
raise Exception("First argument to repeat (memory location) cannot be zerovalent: %r" % self.args[0])
if not self.args[1].valency:
raise Exception("Second argument to repeat (start value) cannot be zerovalent: %r" % self.args[1])
if self.args[3].valency:
raise Exception("Third argument to repeat (clause to be repeated) must be zerovalent: %r" % self.args[3])
self.valency = 0
if self.args[1].value == 'mload' or self.args[1].value == 'sload':
rounds = self.args[2].value
else:
rounds = abs(self.args[2].value - self.args[1].value)
self.gas = rounds * (self.args[3].gas + 50) + 30
# Seq statements: seq <statement> <statement> ...
elif self.value == 'seq':
self.valency = self.args[-1].valency if self.args else 0
self.gas = sum([arg.gas for arg in self.args]) + 30
# Multi statements: multi <expr> <expr> ...
elif self.value == 'multi':
for arg in self.args:
if not arg.valency:
raise Exception("Multi expects all children to not be zerovalent: %r" % arg)
self.valency = sum([arg.valency for arg in self.args])
self.gas = sum([arg.gas for arg in self.args])
# LLL brackets (don't bother gas counting)
elif self.value == 'lll':
self.valency = 1
self.gas = NullAttractor()
# Stack variables
else:
self.valency = 1
self.gas = 5
elif self.value is None and isinstance(self.typ, NullType):
self.valency = 1
self.gas = 5
else:
raise Exception("Invalid value for LLL AST node: %r" % self.value)
assert isinstance(self.args, list)
self.gas += self.add_gas_estimate
def to_list(self):
return [self.value] + [a.to_list() for a in self.args]
def repr(self):
if not len(self.args):
if self.annotation:
return '%r <%s>' % (self.value, self.annotation)
else:
return str(self.value)
# x = repr(self.to_list())
# if len(x) < 80:
# return x
o = ''
if self.annotation:
o += '/* %s */ \n' % self.annotation
if self.repr_show_gas and self.gas:
OKBLUE = '\033[94m'
ENDC = '\033[0m'
o += OKBLUE + "{" + ENDC + str(self.gas) + OKBLUE + "} " + ENDC # add gas for info.
o += '[' + str(self.value)
prev_lineno = self.pos[0] if self.pos else None
arg_lineno = None
annotated = False
has_inner_newlines = False
for arg in self.args:
o += ',\n '
arg_lineno = arg.pos[0] if arg.pos else None
if arg_lineno is not None and arg_lineno != prev_lineno and self.value in ('seq', 'if'):
o += '# Line %d\n ' % (arg_lineno)
prev_lineno = arg_lineno
annotated = True
arg_repr = arg.repr()
if '\n' in arg_repr:
has_inner_newlines = True
sub = arg_repr.replace('\n', '\n ').strip(' ')
o += sub
output = o.rstrip(' ') + ']'
output_on_one_line = re.sub(r',\n *', ', ', output).replace('\n', '')
if (len(output_on_one_line) < 80 or len(self.args) == 1) and not annotated and not has_inner_newlines:
return output_on_one_line
else:
return output
def __repr__(self):
return self.repr()
@classmethod
def from_list(cls, obj, typ=None, location=None, pos=None, annotation=None, mutable=True, add_gas_estimate=0):
if isinstance(typ, str):
typ = BaseType(typ)
if isinstance(obj, LLLnode):
if obj.pos is None:
obj.pos = pos
if obj.location is None:
obj.location = location
return obj
elif not isinstance(obj, list):
return cls(obj, [], typ, location, pos, annotation, mutable, add_gas_estimate=add_gas_estimate)
else:
return cls(obj[0], [cls.from_list(o, pos=pos) for o in obj[1:]], typ, location, pos, annotation, mutable, add_gas_estimate=add_gas_estimate)
# Get a decimal number as a fraction with denominator multiple of 10
def get_number_as_fraction(expr, context):
context_slice = context.origcode.splitlines()[expr.lineno - 1][expr.col_offset:]
t = 0
while t < len(context_slice) and context_slice[t] in '0123456789.':
t += 1
top = int(context_slice[:t].replace('.', ''))
bottom = 1 if '.' not in context_slice[:t] else 10**(t - context_slice[:t].index('.') - 1)
if expr.n < 0:
top *= -1
return context_slice[:t], top, bottom
# Is a number of decimal form (e.g. 65281) or 0x form (e.g. 0xff01)
def get_original_if_0x_prefixed(expr, context):
context_slice = context.origcode.splitlines()[expr.lineno - 1][expr.col_offset:]
if context_slice[:2] != '0x':
return None
t = 0
while t + 2 < len(context_slice) and context_slice[t + 2] in '0123456789abcdefABCDEF':
t += 1
return context_slice[:t + 2]
# Copies byte array
def make_byte_array_copier(destination, source):
if not isinstance(source.typ, (ByteArrayType, NullType)):
raise TypeMismatchException("Can only set a byte array to another byte array")
if isinstance(source.typ, ByteArrayType) and source.typ.maxlen > destination.typ.maxlen:
raise TypeMismatchException("Cannot cast from greater max-length %d to shorter max-length %d" % (source.typ.maxlen, destination.typ.maxlen))
# Special case: memory to memory
if source.location == "memory" and destination.location == "memory":
gas_calculation = GAS_IDENTITY + GAS_IDENTITYWORD * (ceil32(source.typ.maxlen) // 32)
o = LLLnode.from_list(
['with', '_source', source,
['with', '_sz', ['add', 32, ['mload', '_source']],
['assert', ['call', ['add', 18, ['div', '_sz', 10]], 4, 0, '_source', '_sz', destination, '_sz']]]],
typ=None, add_gas_estimate=gas_calculation, annotation='Memory copy'
)
return o
pos_node = LLLnode.from_list('_pos', typ=source.typ, location=source.location)
# Get the length
if isinstance(source.typ, NullType):
length = 1
elif source.location == "memory":
length = ['add', ['mload', '_pos'], 32]
elif source.location == "storage":
length = ['add', ['sload', '_pos'], 32]
pos_node = LLLnode.from_list(['sha3_32', pos_node], typ=source.typ, location=source.location)
else:
raise Exception("Unsupported location:" + source.location)
if destination.location == "storage":
destination = LLLnode.from_list(['sha3_32', destination], typ=destination.typ, location=destination.location)
# Maximum theoretical length
max_length = 32 if isinstance(source.typ, NullType) else source.typ.maxlen + 32
return LLLnode.from_list(['with', '_pos', 0 if isinstance(source.typ, NullType) else source,
make_byte_slice_copier(destination, pos_node, length, max_length)], typ=None)
# Copy bytes
# Accepts 4 arguments:
# (i) an LLL node for the start position of the source
# (ii) an LLL node for the start position of the destination
# (iii) an LLL node for the length
# (iv) a constant for the max length
def make_byte_slice_copier(destination, source, length, max_length):
# Special case: memory to memory
if source.location == "memory" and destination.location == "memory":
return LLLnode.from_list(['with', '_l', max_length,
['pop', ['call', 18 + max_length // 10, 4, 0, source,
'_l', destination, '_l']]], typ=None, annotation='copy byte slice')
# Copy over data
if isinstance(source.typ, NullType):
loader = 0
elif source.location == "memory":
loader = ['mload', ['add', '_pos', ['mul', 32, ['mload', MemoryPositions.FREE_LOOP_INDEX]]]]
elif source.location == "storage":
loader = ['sload', ['add', '_pos', ['mload', MemoryPositions.FREE_LOOP_INDEX]]]
else:
raise Exception("Unsupported location:" + source.location)
# Where to paste it?
if destination.location == "memory":
setter = ['mstore', ['add', '_opos', ['mul', 32, ['mload', MemoryPositions.FREE_LOOP_INDEX]]], loader]
elif destination.location == "storage":
setter = ['sstore', ['add', '_opos', ['mload', MemoryPositions.FREE_LOOP_INDEX]], loader]
else:
raise Exception("Unsupported location:" + destination.location)
# Check to see if we hit the length
checker = ['if', ['gt', ['mul', 32, ['mload', MemoryPositions.FREE_LOOP_INDEX]], '_actual_len'], 'break']
# Make a loop to do the copying
o = ['with', '_pos', source,
['with', '_opos', destination,
['with', '_actual_len', length,
['repeat', MemoryPositions.FREE_LOOP_INDEX, 0, (max_length + 31) // 32,
['seq', checker, setter]]]]]
return LLLnode.from_list(o, typ=None, annotation='copy byte slice src: %s dst: %s' % (source, destination))
# Takes a <32 byte array as input, and outputs a number.
def byte_array_to_num(arg, expr, out_type, offset=32,):
if arg.location == "memory":
lengetter = LLLnode.from_list(['mload', '_sub'], typ=BaseType('int128'))
first_el_getter = LLLnode.from_list(['mload', ['add', 32, '_sub']], typ=BaseType('int128'))
elif arg.location == "storage":
lengetter = LLLnode.from_list(['sload', ['sha3_32', '_sub']], typ=BaseType('int128'))
first_el_getter = LLLnode.from_list(['sload', ['add', 1, ['sha3_32', '_sub']]], typ=BaseType('int128'))
if out_type == 'int128':
result = ['clamp',
['mload', MemoryPositions.MINNUM],
['div', '_el1', ['exp', 256, ['sub', 32, '_len']]],
['mload', MemoryPositions.MAXNUM]]
elif out_type == 'uint256':
result = ['div', '_el1', ['exp', 256, ['sub', offset, '_len']]]
return LLLnode.from_list(['with', '_sub', arg,
['with', '_el1', first_el_getter,
['with', '_len', ['clamp', 0, lengetter, 32],
result
]]],
typ=BaseType(out_type), annotation='bytearray to number (%s)' % out_type)
def get_length(arg):
if arg.location == "memory":
return LLLnode.from_list(['mload', arg], typ=BaseType('int128'))
elif arg.location == "storage":
return LLLnode.from_list(['sload', ['sha3_32', arg]], typ=BaseType('int128'))
def getpos(node):
return (node.lineno, node.col_offset)
# Take a value representing a memory or storage location, and descend down to an element or member variable
def add_variable_offset(parent, key, pos):
typ, location = parent.typ, parent.location
if isinstance(typ, (StructType, TupleType)):
if isinstance(typ, StructType):
if not isinstance(key, str):
raise TypeMismatchException("Expecting a member variable access; cannot access element %r" % key, pos)
if key not in typ.members:
raise TypeMismatchException("Object does not have member variable %s" % key, pos)
subtype = typ.members[key]
attrs = sorted(typ.members.keys())
if key not in attrs:
raise TypeMismatchException("Member %s not found. Only the following available: %s" % (key, " ".join(attrs)), pos)
index = attrs.index(key)
annotation = key
else:
if not isinstance(key, int):
raise TypeMismatchException("Expecting a static index; cannot access element %r" % key, pos)
attrs = list(range(len(typ.members)))
index = key
annotation = None
if location == 'storage':
return LLLnode.from_list(['add', ['sha3_32', parent], LLLnode.from_list(index, annotation=annotation)],
typ=subtype,
location='storage')
elif location == 'storage_prehashed':
return LLLnode.from_list(['add', parent, LLLnode.from_list(index, annotation=annotation)],
typ=subtype,
location='storage')
elif location == 'memory':
offset = 0
for i in range(index):
offset += 32 * get_size_of_type(typ.members[attrs[i]])
return LLLnode.from_list(['add', offset, parent],
typ=typ.members[key],
location='memory',
annotation=annotation)
else:
raise TypeMismatchException("Not expecting a member variable access")
elif isinstance(typ, MappingType):
if isinstance(key.typ, ByteArrayType):
if not isinstance(typ.keytype, ByteArrayType) or (typ.keytype.maxlen < key.typ.maxlen):
raise TypeMismatchException(
'Mapping keys of bytes cannot be cast, use exact same bytes type of: %s' % str(typ.keytype), pos
)
subtype = typ.valuetype
if len(key.args[0].args) >= 3: # handle bytes literal.
sub = LLLnode.from_list([
'seq',
key,
['sha3', ['add', key.args[0].args[-1], 32], ['mload', key.args[0].args[-1]]]
])
else:
sub = LLLnode.from_list(['sha3', ['add', key.args[0].value, 32], ['mload', key.args[0].value]])
else:
subtype = typ.valuetype
sub = base_type_conversion(key, key.typ, typ.keytype, pos=pos)
if location == 'storage':
return LLLnode.from_list(['sha3_64', parent, sub],
typ=subtype,
location='storage')
elif location == 'memory':
raise TypeMismatchException("Can only have fixed-side arrays in memory, not mappings", pos)
elif isinstance(typ, ListType):
subtype = typ.subtype
sub = ['uclamplt', base_type_conversion(key, key.typ, BaseType('int128'), pos=pos), typ.count]
if location == 'storage':
return LLLnode.from_list(['add', ['sha3_32', parent], sub],
typ=subtype,
location='storage')
elif location == 'storage_prehashed':
return LLLnode.from_list(['add', parent, sub],
typ=subtype,
location='storage')
elif location == 'memory':
offset = 32 * get_size_of_type(subtype)
return LLLnode.from_list(['add', ['mul', offset, sub], parent],
typ=subtype,
location='memory')
else:
raise TypeMismatchException("Not expecting an array access ", pos)
else:
raise TypeMismatchException("Cannot access the child of a constant variable! %r" % typ, pos)
# Convert from one base type to another
def base_type_conversion(orig, frm, to, pos):
orig = unwrap_location(orig)
if not isinstance(frm, (BaseType, NullType)) or not isinstance(to, BaseType):
raise TypeMismatchException("Base type conversion from or to non-base type: %r %r" % (frm, to), pos)
elif is_base_type(frm, to.typ) and are_units_compatible(frm, to):
return LLLnode(orig.value, orig.args, typ=to, add_gas_estimate=orig.add_gas_estimate)
elif is_base_type(frm, 'int128') and is_base_type(to, 'decimal') and are_units_compatible(frm, to):
return LLLnode.from_list(['mul', orig, DECIMAL_DIVISOR], typ=BaseType('decimal', to.unit, to.positional))
elif is_base_type(frm, 'uint256') and is_base_type(to, 'int128') and are_units_compatible(frm, to):
return LLLnode.from_list(['uclample', orig, ['mload', MemoryPositions.MAXNUM]], typ=BaseType('int128'))
elif isinstance(frm, NullType):
if to.typ not in ('int128', 'bool', 'uint256', 'address', 'bytes32', 'decimal'):
# This is only to future proof the use of base_type_conversion.
raise TypeMismatchException("Cannot convert null-type object to type %r" % to, pos) # pragma: no cover
return LLLnode.from_list(0, typ=to)
# Integer literal conversion.
elif (frm.typ, to.typ, frm.is_literal) == ('int128', 'uint256', True):
return LLLnode(orig.value, orig.args, typ=to, add_gas_estimate=orig.add_gas_estimate)
else:
raise TypeMismatchException("Typecasting from base type %r to %r unavailable" % (frm, to), pos)
# Unwrap location
def unwrap_location(orig):
if orig.location == 'memory':
return LLLnode.from_list(['mload', orig], typ=orig.typ)
elif orig.location == 'storage':
return LLLnode.from_list(['sload', orig], typ=orig.typ)
else:
return orig
| 48.002066
| 152
| 0.56734
|
3254e885db5aad86d563e900fb5df486c5c4baf1
| 605
|
py
|
Python
|
python/crab/Moriond17/TTJets_PowhegPythia8_isrup.py
|
EmyrClement/NTupleProduction
|
4c1b67ac8826656e804912512f4c4dc6695c7674
|
[
"Apache-2.0"
] | 1
|
2018-02-20T21:23:21.000Z
|
2018-02-20T21:23:21.000Z
|
python/crab/Moriond17/TTJets_PowhegPythia8_isrup.py
|
RickeyEstes/NTupleProduction
|
1319183de0ce00749c8f5841fa925479b9024b48
|
[
"Apache-2.0"
] | 116
|
2015-01-09T22:38:07.000Z
|
2017-05-24T08:12:48.000Z
|
python/crab/Moriond17/TTJets_PowhegPythia8_isrup.py
|
RickeyEstes/NTupleProduction
|
1319183de0ce00749c8f5841fa925479b9024b48
|
[
"Apache-2.0"
] | 1
|
2020-11-01T00:01:17.000Z
|
2020-11-01T00:01:17.000Z
|
import crab.base
from copy import deepcopy
NAME = __file__.split('/')[-1].replace('.pyc', '')
NAME = NAME.split('/')[-1].replace('.py', '')
CAMPAIGN = __file__.split('/')[-2]
config = deepcopy(crab.base.config)
config.General.requestName = NAME
config.Data.outputDatasetTag = NAME
config.Data.outLFNDirBase += '/' + CAMPAIGN
config.Data.inputDataset = '/TT_TuneCUETP8M2T4_13TeV-powheg-isrup-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6_ext1-v1/MINIAODSIM'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 5
config.JobType.pyCfgParams = ['isTTbarMC=1']
| 37.8125
| 168
| 0.765289
|
62a9d5e16ff2cb5d44222f2ae5ab69b895521038
| 38,475
|
py
|
Python
|
rl/core.py
|
NunoEdgarGFlowHub/keras-rl
|
544796e89a342b6a512aabacb3625cf1d215f11d
|
[
"MIT"
] | 3
|
2018-10-22T11:13:06.000Z
|
2022-02-16T20:40:20.000Z
|
rl/core.py
|
NunoEdgarGFlowHub/keras-rl
|
544796e89a342b6a512aabacb3625cf1d215f11d
|
[
"MIT"
] | null | null | null |
rl/core.py
|
NunoEdgarGFlowHub/keras-rl
|
544796e89a342b6a512aabacb3625cf1d215f11d
|
[
"MIT"
] | 5
|
2018-10-22T11:13:08.000Z
|
2020-12-03T00:57:11.000Z
|
# -*- coding: utf-8 -*-
import warnings
from copy import deepcopy
from collections import deque
import os
import numpy as np
from keras.callbacks import History
import pickle
from rl.callbacks import (
CallbackList,
TestLogger,
TrainEpisodeLogger,
TrainIntervalLogger,
Visualizer,
FileLogger
)
class Agent(object):
"""Abstract base class for all implemented agents.
Each agent interacts with the environment (as defined by the `Env` class) by first observing the
state of the environment. Based on this observation the agent changes the environment by performing
an action.
Do not use this abstract base class directly but instead use one of the concrete agents implemented.
Each agent realizes a reinforcement learning algorithm. Since all agents conform to the same
interface, you can use them interchangeably.
To implement your own agent, you have to implement the following methods:
- `forward`
- `backward`
- `compile`
- `load_weights`
- `save_weights`
- `layers`
# Arguments
processor (`Processor` instance): See [Processor](#processor) for details.
"""
def __init__(self, processor=None):
self.processor = processor
self.training = False
self.step = 0
def get_config(self):
"""Configuration of the agent for serialization.
# Returns
Dictionnary with agent configuration
"""
return {}
def fit(self, env, nb_steps, action_repetition=1, callbacks=None, verbose=1,
visualize=False, nb_max_start_steps=0, start_step_policy=None, log_interval=10000,
nb_max_episode_steps=None, episode_averaging_length=10, success_threshold=None,
stopping_patience=None, min_nb_steps=500, single_cycle=True):
"""Trains the agent on the given environment.
# Arguments
env: (`Env` instance): Environment that the agent interacts with. See [Env](#env) for details.
nb_steps (integer): Number of training steps to be performed.
action_repetition (integer): Number of times the agent repeats the same action without
observing the environment again. Setting this to a value > 1 can be useful
if a single action only has a very small effect on the environment.
callbacks (list of `keras.callbacks.Callback` or `rl.callbacks.Callback` instances):
List of callbacks to apply during training. See [callbacks](/callbacks) for details.
verbose (integer): 0 for no logging, 1 for interval logging (compare `log_interval`), 2 for episode logging
visualize (boolean): If `True`, the environment is visualized during training. However,
this is likely going to slow down training significantly and is thus intended to be
a debugging instrument.
nb_max_start_steps (integer): Number of maximum steps that the agent performs at the beginning
of each episode using `start_step_policy`. Notice that this is an upper limit since
the exact number of steps to be performed is sampled uniformly from [0, max_start_steps]
at the beginning of each episode.
start_step_policy (`lambda observation: action`): The policy
to follow if `nb_max_start_steps` > 0. If set to `None`, a random action is performed.
log_interval (integer): If `verbose` = 1, the number of steps that are considered to be an interval.
nb_max_episode_steps (integer): Number of steps per episode that the agent performs before
automatically resetting the environment. Set to `None` if each episode should run
(potentially indefinitely) until the environment signals a terminal state.
# Returns
A `keras.callbacks.History` instance that recorded the entire training process.
"""
if not self.compiled:
raise RuntimeError('Your tried to fit your agent but it hasn\'t been compiled yet. Please call `compile()` before `fit()`.')
if action_repetition < 1:
raise ValueError('action_repetition must be >= 1, is {}'.format(action_repetition))
self.training = True
callbacks = [] if not callbacks else callbacks[:]
for cb in callbacks:
if isinstance(cb, FileLogger):
save_path = cb.filepath
folder_index = save_path.index("training_history.json")
weights_file = os.path.join(save_path[:folder_index],"dqn_weights.h5f")
if verbose == 1:
callbacks += [TrainIntervalLogger(interval=log_interval)]
elif verbose > 1:
callbacks += [TrainEpisodeLogger(interval=log_interval)]
if visualize:
callbacks += [Visualizer()]
history = History()
callbacks += [history]
callbacks = CallbackList(callbacks)
if hasattr(callbacks, 'set_model'):
callbacks.set_model(self)
else:
callbacks._set_model(self)
callbacks._set_env(env)
params = {
'nb_steps': nb_steps,
}
if hasattr(callbacks, 'set_params'):
callbacks.set_params(params)
else:
callbacks._set_params(params)
self._on_train_begin()
callbacks.on_train_begin()
episode = np.int16(0)
self.step = np.int16(0)
observation = None
episode_reward = None
episode_step = None
episode_num_errors = None
did_abort = False
# ------ Early stopping and reporting averages ------------------
#
# It would be ideal to do this via a callback, but returning flags from callbacks seems tricky. Eish!
# So, we automatically include early stopping here in the fit method.
# NB: We have hardcoded in something which is probably not ideal to hard code, but I just want it
# to work, and can fix things and make them nicer/more flexible at a later stage!
#
# --------------------------------------------------------------
if not single_cycle:
recent_episode_lifetimes = deque([], episode_averaging_length)
episode_lifetimes_rolling_avg = 0
best_rolling_avg = 0
best_episode = 0
time_since_best = 0
elif single_cycle:
recent_episode_wins = deque([], episode_averaging_length)
best_rolling_avg = 0
best_episode = 0
time_since_best = 0
rolling_win_fraction = 0
stop_training = False
has_succeeded = False
stopped_improving = False
try:
while self.step < nb_steps and not stop_training:
if observation is None: # start of a new episode
callbacks.on_episode_begin(episode)
episode_step = np.int16(0)
episode_reward = np.float32(0)
# Obtain the initial observation by resetting the environment.
self.reset_states()
observation = deepcopy(env.reset())
# print("Episode Step:", episode_step)
# print("hidden state: ")
# print(env.hidden_state)
# print("Board State: ")
# print(observation)
if self.processor is not None:
observation = self.processor.process_observation(observation)
assert observation is not None
# Perform random starts at beginning of episode and do not record them into the experience.
# This slightly changes the start position between games.
nb_random_start_steps = 0 if nb_max_start_steps == 0 else np.random.randint(nb_max_start_steps)
for _ in range(nb_random_start_steps):
if start_step_policy is None:
action = env.action_space.sample()
else:
action = start_step_policy(observation)
if self.processor is not None:
action = self.processor.process_action(action)
callbacks.on_action_begin(action)
observation, reward, done, info = env.step(action)
observation = deepcopy(observation)
if self.processor is not None:
observation, reward, done, info = self.processor.process_step(observation, reward, done, info)
callbacks.on_action_end(action)
if done:
warnings.warn('Env ended before {} random steps could be performed at the start. You should probably lower the `nb_max_start_steps` parameter.'.format(nb_random_start_steps))
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
break
# At this point, we expect to be fully initialized.
assert episode_reward is not None
assert episode_step is not None
assert observation is not None
# print("Episode Step:", episode_step)
# Run a single step.
callbacks.on_step_begin(episode_step)
# This is were all of the work happens. We first perceive and compute the action
# (forward step) and then use the reward to improve (backward step).
if hasattr(env, "legal_actions"):
legal_actions = list(env.legal_actions)
action = self.forward(observation, legal_actions)
# print("legal actions: ", legal_actions)
# print("chosen action: ", action)
else:
action = self.forward(observation)
if self.processor is not None:
action = self.processor.process_action(action)
reward = np.float32(0)
accumulated_info = {}
done = False
for _ in range(action_repetition):
callbacks.on_action_begin(action)
observation, r, done, info = env.step(action)
observation = deepcopy(observation)
if self.processor is not None:
observation, r, done, info = self.processor.process_step(observation, r, done, info)
for key, value in info.items():
if not np.isreal(value):
continue
if key not in accumulated_info:
accumulated_info[key] = np.zeros_like(value)
accumulated_info[key] += value
callbacks.on_action_end(action)
reward += r
if done:
break
if nb_max_episode_steps and episode_step >= nb_max_episode_steps - 1:
# Force a terminal state.
done = True
metrics = self.backward(reward, terminal=done)
episode_reward += reward
# print("new hidden state: ")
# print(env.hidden_state)
# print("new board state: ")
# print(observation)
# print("reward: ", r, "episode reward: ", episode_reward)
# print("done: ", done)
step_logs = {
'action': action,
'observation': observation,
'reward': reward,
'metrics': metrics,
'episode': episode,
'info': accumulated_info,
}
callbacks.on_step_end(episode_step, step_logs)
episode_step += 1
self.step += 1
if done:
# We are in a terminal state but the agent hasn't yet seen it. We therefore
# perform one more forward-backward call and simply ignore the action before
# resetting the environment. We need to pass in `terminal=False` here since
# the *next* state, that is the state of the newly reset environment, is
# always non-terminal by convention.
action = self.forward(observation)
self.backward(0., terminal=False)
# Now we want to work out the recent averages, this will go into early stopping
if not single_cycle:
recent_episode_lifetimes.append(env.lifetime)
episode_lifetimes_rolling_avg = np.mean(recent_episode_lifetimes)
if episode_lifetimes_rolling_avg > best_rolling_avg:
best_rolling_avg = episode_lifetimes_rolling_avg
best_episode = episode
time_since_best = 0
else:
time_since_best = episode - best_episode
if episode_lifetimes_rolling_avg > success_threshold:
stop_training = True
has_succeeded = True
if self.step > min_nb_steps and time_since_best > stopping_patience:
stop_training = True
stopped_improving = True
else:
if episode_reward == 1:
recent_episode_wins.append(1)
else:
recent_episode_wins.append(0)
num_wins = np.sum(recent_episode_wins)
rolling_win_fraction = num_wins/episode_averaging_length
if rolling_win_fraction > best_rolling_avg:
best_rolling_avg = rolling_win_fraction
best_episode = episode
time_since_best = 0
# Here I need to add something to save the net - I'm worried this will make things really slow while its improving, because it will be saving every time
# For a long time. Eish!
if self.step > min_nb_steps:
self.save_weights(weights_file, overwrite=True)
else:
time_since_best = episode - best_episode
if rolling_win_fraction > success_threshold:
stop_training = True
has_succeeded = True
if self.step > min_nb_steps and time_since_best > stopping_patience:
stop_training = True
stopped_improving = True
# This episode is finished, report and reset.
if not single_cycle:
episode_logs = {
'episode_reward': episode_reward,
'nb_episode_steps': episode_step,
'nb_steps': self.step,
'episode_lifetimes_rolling_avg': episode_lifetimes_rolling_avg,
'best_rolling_avg': best_rolling_avg,
'best_episode': best_episode,
'time_since_best': time_since_best,
'has_succeeded': has_succeeded,
'stopped_improving': stopped_improving
}
else:
episode_logs = {
'episode_reward': episode_reward,
'nb_episode_steps': episode_step,
'nb_steps': self.step,
'rolling_win_fraction': rolling_win_fraction,
'best_rolling_fraction': best_rolling_avg,
'best_episode': best_episode,
'time_since_best': time_since_best,
'has_succeeded': has_succeeded,
'stopped_improving': stopped_improving
}
callbacks.on_episode_end(episode, episode_logs, single_cycle)
episode += 1
observation = None
episode_step = None
episode_reward = None
except KeyboardInterrupt:
# We catch keyboard interrupts here so that training can be be safely aborted.
# This is so common that we've built this right into this function, which ensures that
# the `on_train_end` method is properly called.
did_abort = True
if not single_cycle:
callbacks.on_train_end(logs={'did_abort': did_abort,
'has_succeeded': has_succeeded,
'stopped_improving': stopped_improving,
'episode_lifetimes_rolling_avg': episode_lifetimes_rolling_avg,
'step': self.step
}, single_cycle=single_cycle)
else:
callbacks.on_train_end(logs={'did_abort': did_abort,
'has_succeeded': has_succeeded,
'stopped_improving': stopped_improving,
'rolling_win_fraction': rolling_win_fraction,
'step': self.step
}, single_cycle=single_cycle)
self._on_train_end()
return history
def test(self, env, nb_episodes=1, action_repetition=1, callbacks=None, visualize=True,
nb_max_episode_steps=None, nb_max_start_steps=0, start_step_policy=None, verbose=1,
episode_averaging_length=200, interval = 100, single_cycle=True):
"""Callback that is called before training begins.
# Arguments
env: (`Env` instance): Environment that the agent interacts with. See [Env](#env) for details.
nb_episodes (integer): Number of episodes to perform.
action_repetition (integer): Number of times the agent repeats the same action without
observing the environment again. Setting this to a value > 1 can be useful
if a single action only has a very small effect on the environment.
callbacks (list of `keras.callbacks.Callback` or `rl.callbacks.Callback` instances):
List of callbacks to apply during training. See [callbacks](/callbacks) for details.
verbose (integer): 0 for no logging, 1 for interval logging (compare `log_interval`), 2 for episode logging
visualize (boolean): If `True`, the environment is visualized during training. However,
this is likely going to slow down training significantly and is thus intended to be
a debugging instrument.
nb_max_start_steps (integer): Number of maximum steps that the agent performs at the beginning
of each episode using `start_step_policy`. Notice that this is an upper limit since
the exact number of steps to be performed is sampled uniformly from [0, max_start_steps]
at the beginning of each episode.
start_step_policy (`lambda observation: action`): The policy
to follow if `nb_max_start_steps` > 0. If set to `None`, a random action is performed.
log_interval (integer): If `verbose` = 1, the number of steps that are considered to be an interval.
nb_max_episode_steps (integer): Number of steps per episode that the agent performs before
automatically resetting the environment. Set to `None` if each episode should run
(potentially indefinitely) until the environment signals a terminal state.
# Returns
A `keras.callbacks.History` instance that recorded the entire training process.
"""
if not self.compiled:
raise RuntimeError('Your tried to test your agent but it hasn\'t been compiled yet. Please call `compile()` before `test()`.')
if action_repetition < 1:
raise ValueError('action_repetition must be >= 1, is {}'.format(action_repetition))
self.training = False
self.step = 0
callbacks = [] if not callbacks else callbacks[:]
if verbose >= 1:
callbacks += [TestLogger(interval=interval)]
if visualize:
callbacks += [Visualizer()]
history = History()
callbacks += [history]
callbacks = CallbackList(callbacks)
if hasattr(callbacks, 'set_model'):
callbacks.set_model(self)
else:
callbacks._set_model(self)
callbacks._set_env(env)
params = {
'nb_episodes': nb_episodes,
}
if hasattr(callbacks, 'set_params'):
callbacks.set_params(params)
else:
callbacks._set_params(params)
self._on_test_begin()
callbacks.on_train_begin()
if not single_cycle:
recent_episode_lifetimes = []
episode_lifetimes_rolling_avg = 0
best_rolling_avg = 0
best_episode = 0
time_since_best = 0
else:
recent_episode_wins = []
best_rolling_avg = 0
best_episode = 0
time_since_best = 0
rolling_win_fraction = 0
stop_training = False
has_succeeded = False
stopped_improving = False
for episode in range(nb_episodes):
callbacks.on_episode_begin(episode)
episode_reward = 0.
episode_step = 0
# Obtain the initial observation by resetting the environment.
self.reset_states()
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
assert observation is not None
# Perform random starts at beginning of episode and do not record them into the experience.
# This slightly changes the start position between games.
nb_random_start_steps = 0 if nb_max_start_steps == 0 else np.random.randint(nb_max_start_steps)
for _ in range(nb_random_start_steps):
if start_step_policy is None:
action = env.action_space.sample()
else:
action = start_step_policy(observation)
if self.processor is not None:
action = self.processor.process_action(action)
callbacks.on_action_begin(action)
observation, r, done, info = env.step(action)
observation = deepcopy(observation)
if self.processor is not None:
observation, r, done, info = self.processor.process_step(observation, r, done, info)
callbacks.on_action_end(action)
if done:
warnings.warn('Env ended before {} random steps could be performed at the start. You should probably lower the `nb_max_start_steps` parameter.'.format(nb_random_start_steps))
observation = deepcopy(env.reset())
if self.processor is not None:
observation = self.processor.process_observation(observation)
break
# Run the episode until we're done.
done = False
while not done:
callbacks.on_step_begin(episode_step)
if hasattr(env, "legal_actions"):
legal_actions = list(env.legal_actions)
action = self.forward(observation, legal_actions)
else:
action = self.forward(observation)
if self.processor is not None:
action = self.processor.process_action(action)
reward = 0.
accumulated_info = {}
for _ in range(action_repetition):
callbacks.on_action_begin(action)
observation, r, d, info = env.step(action)
observation = deepcopy(observation)
if self.processor is not None:
observation, r, d, info = self.processor.process_step(observation, r, d, info)
callbacks.on_action_end(action)
reward += r
for key, value in info.items():
if not np.isreal(value):
continue
if key not in accumulated_info:
accumulated_info[key] = np.zeros_like(value)
accumulated_info[key] += value
if d:
done = True
break
if nb_max_episode_steps and episode_step >= nb_max_episode_steps - 1:
done = True
self.backward(reward, terminal=done)
episode_reward += reward
step_logs = {
'action': action,
'observation': observation,
'reward': reward,
'episode': episode,
'info': accumulated_info,
}
callbacks.on_step_end(episode_step, step_logs)
episode_step += 1
self.step += 1
# We are in a terminal state but the agent hasn't yet seen it. We therefore
# perform one more forward-backward call and simply ignore the action before
# resetting the environment. We need to pass in `terminal=False` here since
# the *next* state, that is the state of the newly reset environment, is
# always non-terminal by convention.
self.forward(observation)
self.backward(0., terminal=False)
if not single_cycle:
recent_episode_lifetimes.append(env.lifetime)
episode_lifetimes_rolling_avg = np.mean(recent_episode_lifetimes)
else:
if episode_reward == 1:
recent_episode_wins.append(1)
else:
recent_episode_wins.append(0)
num_wins = np.sum(recent_episode_wins)
rolling_win_fraction = num_wins/len(recent_episode_wins)
if not single_cycle:
episode_logs = {
'episode_reward': episode_reward,
'nb_episode_steps': episode_step,
'episode_lifetime': env.lifetime,
'episode_lifetimes_rolling_avg': episode_lifetimes_rolling_avg}
else:
episode_logs = {
'episode_reward': episode_reward,
'nb_episode_steps': episode_step,
'rolling_win_fraction': rolling_win_fraction}
callbacks.on_episode_end(episode, episode_logs, single_cycle)
callbacks.on_train_end()
self._on_test_end()
return history
def reset_states(self):
"""Resets all internally kept states after an episode is completed.
"""
pass
def forward(self, observation):
"""Takes the an observation from the environment and returns the action to be taken next.
If the policy is implemented by a neural network, this corresponds to a forward (inference) pass.
# Argument
observation (object): The current observation from the environment.
# Returns
The next action to be executed in the environment.
"""
raise NotImplementedError()
def backward(self, reward, terminal):
"""Updates the agent after having executed the action returned by `forward`.
If the policy is implemented by a neural network, this corresponds to a weight update using back-prop.
# Argument
reward (float): The observed reward after executing the action returned by `forward`.
terminal (boolean): `True` if the new state of the environment is terminal.
# Returns
List of metrics values
"""
raise NotImplementedError()
def compile(self, optimizer, metrics=[]):
"""Compiles an agent and the underlaying models to be used for training and testing.
# Arguments
optimizer (`keras.optimizers.Optimizer` instance): The optimizer to be used during training.
metrics (list of functions `lambda y_true, y_pred: metric`): The metrics to run during training.
"""
raise NotImplementedError()
def load_weights(self, filepath):
"""Loads the weights of an agent from an HDF5 file.
# Arguments
filepath (str): The path to the HDF5 file.
"""
raise NotImplementedError()
def save_weights(self, filepath, overwrite=False):
"""Saves the weights of an agent as an HDF5 file.
# Arguments
filepath (str): The path to where the weights should be saved.
overwrite (boolean): If `False` and `filepath` already exists, raises an error.
"""
raise NotImplementedError()
@property
def layers(self):
"""Returns all layers of the underlying model(s).
If the concrete implementation uses multiple internal models,
this method returns them in a concatenated list.
# Returns
A list of the model's layers
"""
raise NotImplementedError()
@property
def metrics_names(self):
"""The human-readable names of the agent's metrics. Must return as many names as there
are metrics (see also `compile`).
# Returns
A list of metric's names (string)
"""
return []
def _on_train_begin(self):
"""Callback that is called before training begins."
"""
pass
def _on_train_end(self):
"""Callback that is called after training ends."
"""
pass
def _on_test_begin(self):
"""Callback that is called before testing begins."
"""
pass
def _on_test_end(self):
"""Callback that is called after testing ends."
"""
pass
class Processor(object):
"""Abstract base class for implementing processors.
A processor acts as a coupling mechanism between an `Agent` and its `Env`. This can
be necessary if your agent has different requirements with respect to the form of the
observations, actions, and rewards of the environment. By implementing a custom processor,
you can effectively translate between the two without having to change the underlaying
implementation of the agent or environment.
Do not use this abstract base class directly but instead use one of the concrete implementations
or write your own.
"""
def process_step(self, observation, reward, done, info):
"""Processes an entire step by applying the processor to the observation, reward, and info arguments.
# Arguments
observation (object): An observation as obtained by the environment.
reward (float): A reward as obtained by the environment.
done (boolean): `True` if the environment is in a terminal state, `False` otherwise.
info (dict): The debug info dictionary as obtained by the environment.
# Returns
The tupel (observation, reward, done, reward) with with all elements after being processed.
"""
observation = self.process_observation(observation)
reward = self.process_reward(reward)
info = self.process_info(info)
return observation, reward, done, info
def process_observation(self, observation):
"""Processes the observation as obtained from the environment for use in an agent and
returns it.
# Arguments
observation (object): An observation as obtained by the environment
# Returns
Observation obtained by the environment processed
"""
return observation
def process_reward(self, reward):
"""Processes the reward as obtained from the environment for use in an agent and
returns it.
# Arguments
reward (float): A reward as obtained by the environment
# Returns
Reward obtained by the environment processed
"""
return reward
def process_info(self, info):
"""Processes the info as obtained from the environment for use in an agent and
returns it.
# Arguments
info (dict): An info as obtained by the environment
# Returns
Info obtained by the environment processed
"""
return info
def process_action(self, action):
"""Processes an action predicted by an agent but before execution in an environment.
# Arguments
action (int): Action given to the environment
# Returns
Processed action given to the environment
"""
return action
def process_state_batch(self, batch):
"""Processes an entire batch of states and returns it.
# Arguments
batch (list): List of states
# Returns
Processed list of states
"""
return batch
@property
def metrics(self):
"""The metrics of the processor, which will be reported during training.
# Returns
List of `lambda y_true, y_pred: metric` functions.
"""
return []
@property
def metrics_names(self):
"""The human-readable names of the agent's metrics. Must return as many names as there
are metrics (see also `compile`).
"""
return []
# Note: the API of the `Env` and `Space` classes are taken from the OpenAI Gym implementation.
# https://github.com/openai/gym/blob/master/gym/core.py
class Env(object):
"""The abstract environment class that is used by all agents. This class has the exact
same API that OpenAI Gym uses so that integrating with it is trivial. In contrast to the
OpenAI Gym implementation, this class only defines the abstract methods without any actual
implementation.
To implement your own environment, you need to define the following methods:
- `step`
- `reset`
- `render`
- `close`
Refer to the [Gym documentation](https://gym.openai.com/docs/#environments).
"""
reward_range = (-np.inf, np.inf)
action_space = None
observation_space = None
def step(self, action):
"""Run one timestep of the environment's dynamics.
Accepts an action and returns a tuple (observation, reward, done, info).
# Arguments
action (object): An action provided by the environment.
# Returns
observation (object): Agent's observation of the current environment.
reward (float) : Amount of reward returned after previous action.
done (boolean): Whether the episode has ended, in which case further step() calls will return undefined results.
info (dict): Contains auxiliary diagnostic information (helpful for debugging, and sometimes learning).
"""
raise NotImplementedError()
def reset(self):
"""
Resets the state of the environment and returns an initial observation.
# Returns
observation (object): The initial observation of the space. Initial reward is assumed to be 0.
"""
raise NotImplementedError()
def render(self, mode='human', close=False):
"""Renders the environment.
The set of supported modes varies per environment. (And some
environments do not support rendering at all.)
# Arguments
mode (str): The mode to render with.
close (bool): Close all open renderings.
"""
raise NotImplementedError()
def close(self):
"""Override in your subclass to perform any necessary cleanup.
Environments will automatically close() themselves when
garbage collected or when the program exits.
"""
raise NotImplementedError()
def seed(self, seed=None):
"""Sets the seed for this env's random number generator(s).
# Returns
Returns the list of seeds used in this env's random number generators
"""
raise NotImplementedError()
def configure(self, *args, **kwargs):
"""Provides runtime configuration to the environment.
This configuration should consist of data that tells your
environment how to run (such as an address of a remote server,
or path to your ImageNet data). It should not affect the
semantics of the environment.
"""
raise NotImplementedError()
def __del__(self):
self.close()
def __str__(self):
return '<{} instance>'.format(type(self).__name__)
class Space(object):
"""Abstract model for a space that is used for the state and action spaces. This class has the
exact same API that OpenAI Gym uses so that integrating with it is trivial.
Please refer to [Gym Documentation](https://gym.openai.com/docs/#spaces)
"""
def sample(self, seed=None):
"""Uniformly randomly sample a random element of this space.
"""
raise NotImplementedError()
def contains(self, x):
"""Return boolean specifying if x is a valid member of this space
"""
raise NotImplementedError()
| 41.775244
| 202
| 0.579571
|
ce4067335f5903b91b780f66765b894afd158761
| 1,476
|
py
|
Python
|
208. Implement Trie (Prefix Tree).py
|
joshlyman/Josh-LeetCode
|
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
|
[
"MIT"
] | null | null | null |
208. Implement Trie (Prefix Tree).py
|
joshlyman/Josh-LeetCode
|
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
|
[
"MIT"
] | null | null | null |
208. Implement Trie (Prefix Tree).py
|
joshlyman/Josh-LeetCode
|
cc9e2cc406d2cbd5a90ee579efbcaeffb842c5ed
|
[
"MIT"
] | null | null | null |
class Trie:
def __init__(self):
"""
Initialize your data structure here.
"""
self.trie = {}
def insert(self, word: str) -> None:
"""
Inserts a word into the trie.
"""
t = self.trie
for w in word:
if w not in t:
t[w] = {}
t = t[w]
# end of word
t['#'] = '#'
def search(self, word: str) -> bool:
"""
Returns if the word is in the trie.
"""
t = self.trie
for w in word:
if w not in t:
return False
# go to next word
t = t[w]
# find the end of word
if '#' in t:
return True
# means no '#' so it is not end of word, just prefix
return False
def startsWith(self, prefix: str) -> bool:
"""
Returns if there is any word in the trie that starts with the given prefix.
"""
t = self.trie
for w in prefix:
if w not in t:
return False
t = t[w]
# we dont need to check '#': end of word
return True
# Your Trie object will be instantiated and called as such:
# obj = Trie()
# obj.insert(word)
# param_2 = obj.search(word)
# param_3 = obj.startsWith(prefix)
# Time: O(L) for insert, search and startswith, assume each word has length L
# Space:O(NL), store all words
| 24.196721
| 83
| 0.472222
|
99e01f364d03b761ee0f7d263c462720ba315ab3
| 3,525
|
py
|
Python
|
tests/sentry/web/frontend/test_js_sdk_loader.py
|
AlexWayfer/sentry
|
ef935cda2b2e960bd602fda590540882d1b0712d
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/web/frontend/test_js_sdk_loader.py
|
AlexWayfer/sentry
|
ef935cda2b2e960bd602fda590540882d1b0712d
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sentry/web/frontend/test_js_sdk_loader.py
|
AlexWayfer/sentry
|
ef935cda2b2e960bd602fda590540882d1b0712d
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from exam import fixture
from django.conf import settings
from django.core.urlresolvers import reverse
from sentry.testutils import TestCase
class JavaScriptSdkLoaderTest(TestCase):
@fixture
def path(self):
settings.JS_SDK_LOADER_SDK_VERSION = '0.5.2'
settings.JS_SDK_LOADER_DEFAULT_SDK_URL = 'https://s3.amazonaws.com/getsentry-cdn/@sentry/browser/%s/bundle.min.js'
return reverse('sentry-js-sdk-loader', args=[self.projectkey.public_key])
def test_404(self):
resp = self.client.get(reverse('sentry-js-sdk-loader', args=['abc']))
assert resp.status_code == 404
def test_noop(self):
settings.JS_SDK_LOADER_DEFAULT_SDK_URL = ''
resp = self.client.get(reverse('sentry-js-sdk-loader', args=[self.projectkey.public_key]))
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/js-sdk-loader-noop.js.tmpl')
def test_no_replace(self):
settings.JS_SDK_LOADER_SDK_VERSION = '0.5.2'
settings.JS_SDK_LOADER_DEFAULT_SDK_URL = 'https://s3.amazonaws.com/getsentry-cdn/@sentry/browser/0.0.0/bundle.min.js'
resp = self.client.get(reverse('sentry-js-sdk-loader', args=[self.projectkey.public_key]))
assert resp.status_code == 200
self.assertIn(settings.JS_SDK_LOADER_DEFAULT_SDK_URL, resp.content)
self.assertTemplateUsed(resp, 'sentry/js-sdk-loader.js.tmpl')
def test_renders_js_loader(self):
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/js-sdk-loader.js.tmpl')
self.assertIn(self.projectkey.public_key, resp.content)
self.assertIn('bundle.min.js', resp.content)
def test_minified(self):
resp = self.client.get(self.path)
assert resp.status_code == 200
min_resp = self.client.get(
reverse(
'sentry-js-sdk-loader',
args=[
self.projectkey.public_key,
'.min']))
assert min_resp.status_code == 200
self.assertTemplateUsed(min_resp, 'sentry/js-sdk-loader.min.js.tmpl')
self.assertIn(self.projectkey.public_key, min_resp.content)
self.assertIn('bundle.min.js', min_resp.content)
assert len(resp.content) > len(min_resp.content)
def test_headers(self):
resp = self.client.get(self.path)
assert resp.status_code == 200, resp
self.assertIn('stale-if-error', resp['Cache-Control'])
self.assertIn('stale-while-revalidate', resp['Cache-Control'])
self.assertIn('s-maxage', resp['Cache-Control'])
self.assertIn('max-age', resp['Cache-Control'])
self.assertIn('project/%s' % self.projectkey.project_id, resp['Surrogate-Key'])
self.assertIn('sdk/%s' % settings.JS_SDK_LOADER_SDK_VERSION, resp['Surrogate-Key'])
self.assertIn('sdk-loader', resp['Surrogate-Key'])
assert 'Content-Encoding' not in resp
assert 'Set-Cookie' not in resp
assert 'Vary' not in resp
def test_absolute_url(self):
assert reverse(
'sentry-js-sdk-loader',
args=[
self.projectkey.public_key,
'.min']) in self.projectkey.js_sdk_loader_cdn_url
settings.JS_SDK_LOADER_CDN_URL = 'https://js.sentry-cdn.com/'
assert 'https://js.sentry-cdn.com/%s.min.js' % (
self.projectkey.public_key == self.projectkey.js_sdk_loader_cdn_url
)
| 43.518519
| 125
| 0.659858
|
b510f85cd27cbc041dcf6a0100533881e930a71d
| 692
|
py
|
Python
|
flask_start/flask_start/extensions.py
|
kostekci/flask_start
|
fa279fc8907aff9868e2596f4ed9c4d9428d2f75
|
[
"MIT"
] | null | null | null |
flask_start/flask_start/extensions.py
|
kostekci/flask_start
|
fa279fc8907aff9868e2596f4ed9c4d9428d2f75
|
[
"MIT"
] | 95
|
2021-09-13T21:23:12.000Z
|
2022-03-31T21:22:32.000Z
|
flask_start/flask_start/extensions.py
|
kostekci/flask_start
|
fa279fc8907aff9868e2596f4ed9c4d9428d2f75
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Extensions module. Each extension is initialized in the app factory located in app.py."""
from flask_bcrypt import Bcrypt
from flask_caching import Cache
from flask_debugtoolbar import DebugToolbarExtension
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from flask_static_digest import FlaskStaticDigest
from flask_wtf.csrf import CSRFProtect
from flask_mail import Mail
bcrypt = Bcrypt()
csrf_protect = CSRFProtect()
login_manager = LoginManager()
db = SQLAlchemy()
migrate = Migrate()
cache = Cache()
debug_toolbar = DebugToolbarExtension()
flask_static_digest = FlaskStaticDigest()
mail = Mail()
| 30.086957
| 92
| 0.813584
|
8ec54cf83c738134fd852f24479ab8566b38c641
| 3,031
|
py
|
Python
|
projects/CodeThorn/src/regressiondata/validate.py
|
maurizioabba/rose
|
7597292cf14da292bdb9a4ef573001b6c5b9b6c0
|
[
"BSD-3-Clause"
] | 488
|
2015-01-09T08:54:48.000Z
|
2022-03-30T07:15:46.000Z
|
projects/CodeThorn/src/regressiondata/validate.py
|
sujankh/rose-matlab
|
7435d4fa1941826c784ba97296c0ec55fa7d7c7e
|
[
"BSD-3-Clause"
] | 174
|
2015-01-28T18:41:32.000Z
|
2022-03-31T16:51:05.000Z
|
projects/CodeThorn/src/regressiondata/validate.py
|
sujankh/rose-matlab
|
7435d4fa1941826c784ba97296c0ec55fa7d7c7e
|
[
"BSD-3-Clause"
] | 146
|
2015-04-27T02:48:34.000Z
|
2022-03-04T07:32:53.000Z
|
#!/usr/bin/python
def boundscheck(n):
if n < 1 or n > 100:
print "** PARSE ERROR"
exit(1)
# needs python 2.7?
import argparse
if __name__ == '__main__':
# Command line argument handling
cmdline = argparse.ArgumentParser(
description='compare counterexample output with rers csv output',
epilog='Please report bugs to <adrian@llnl.gov>.')
cmdline.add_argument('--csv', metavar='<ltl.csv>', type=file, help='LTL CSV data')
cmdline.add_argument('--log', metavar='<counterexamples.log>', type=file, help='qc log output')
cmdline.add_argument('--verbose', action='store_true', help='verbose mode')
args = cmdline.parse_args()
if not args.csv: print 'no csv input specified!'; exit(1)
if not args.log: print 'no log input specified!'; exit(1)
filename = args.log.name
next_counterexample = -1
correct = 0
inconsistent = 0
unverified = 0
unknown = 0
for line in args.csv.readlines(): # OLD FORMAT [1:]:
# OLD FORMAT idx, formula, result, confidence = line.split(';')
idx, result, confidence = line.split(',')
n = int(idx)-60
boundscheck(n)
if next_counterexample < n:
# skip to next counterexample
while 1:
qc_line = args.log.readline()
if qc_line == "": # EOF
# no more counterexamples in the log file
next_counterexample = -1
break
if qc_line.find("FALSE, found counterexample") <> -1:
next_counterexample = int(qc_line.split(' ')[0])
boundscheck(next_counterexample)
break
if n == next_counterexample:
if result == "yes":
print "** INCONSISTENCY"
print " ", line
print " ", qc_line
inconsistent += 1
elif result == "no":
if (args.verbose): print "%d consistent"%n
correct += 1
elif result == "unknown":
print "%d UNKNOWN, but counterexample exists"%n
unknown += 1
else:
if (args.verbose): print "%d consistent, but unverified"%n
if result == "unknown":
unknown += 1
else:
unverified += 1
print correct,inconsistent,unverified,unknown,n
assert(correct+inconsistent+unverified+unknown == n)
red = '\033[31m' # MS: changed color code to standard 31-38 (instead of non-standard aixterm codes)
green = '\033[32m'
reset = '\033[39m'
print
print "Statistics"
print "=========="
if inconsistent > 0:
inconsistent_color=red
else:
inconsistent_color=green
print "%d/%d Consistent, %s%d/%d Inconsistent%s, %d/%d Unverified, %d/%d Unknown" % (
correct, n, inconsistent_color, inconsistent, n, reset, unverified, n, unknown, n)
if inconsistent > 0:
exit(1)
| 34.83908
| 103
| 0.555262
|
e9c8ef5396db976c29bce78563205f85774c15f5
| 330
|
py
|
Python
|
p2d/tests/test_baseconfig.py
|
cubercsl/polygon2domjudge
|
7760fb6fb45e3024f85d72d64b41d44db58eb4d3
|
[
"MIT"
] | 8
|
2021-04-18T14:03:38.000Z
|
2021-11-18T11:54:34.000Z
|
p2d/tests/test_baseconfig.py
|
cubercsl/polygon2domjudge
|
7760fb6fb45e3024f85d72d64b41d44db58eb4d3
|
[
"MIT"
] | null | null | null |
p2d/tests/test_baseconfig.py
|
cubercsl/polygon2domjudge
|
7760fb6fb45e3024f85d72d64b41d44db58eb4d3
|
[
"MIT"
] | null | null | null |
import pytest
from p2d import checkers
from p2d import problems
from p2d import results
from p2d import misc
def test_checkers():
checkers.load_checker_config()
def test_problems():
problems.load_problem_config('')
def test_results():
results.load_result_config()
def test_misc():
misc.load_misc_config()
| 14.347826
| 36
| 0.754545
|
f84e47b15ff04990e3fd5a93c65ec0b11850cf97
| 9,792
|
py
|
Python
|
tests/test_cdb_maker.py
|
willmaclean/MedCAT
|
528a4765acbdf04acf8a8bd90a4f19bc1e3e33c3
|
[
"MIT"
] | 4
|
2019-03-18T11:54:58.000Z
|
2019-06-26T02:53:38.000Z
|
tests/test_cdb_maker.py
|
willmaclean/MedCAT
|
528a4765acbdf04acf8a8bd90a4f19bc1e3e33c3
|
[
"MIT"
] | null | null | null |
tests/test_cdb_maker.py
|
willmaclean/MedCAT
|
528a4765acbdf04acf8a8bd90a4f19bc1e3e33c3
|
[
"MIT"
] | null | null | null |
import unittest
import logging
import os
import numpy as np
from medcat.cdb_maker import CDBMaker
from medcat.cdb import CDB
from medcat.config import Config
from medcat.preprocessing.cleaners import prepare_name
#cdb.csv
#cui name ontologies name_status type_ids description
#C0000039 Virus MSH P T109|T123 Synthetic phospholipid used in liposomes and lipid bilayers to study biological membranes. It is also a major constituent of PULMONARY SURFACTANTS.
#C0000039 Virus M T234
#C0000039 Virus M |Virus K|Virus Z
#C0000139 Virus M|Virus K|Virus Z P
#C0000139 Virus A
#cdb2.csv
#cui name ontologies name_status type_ids description
#C0000239 Second csv
#TESTS RUN IN ALPHABETICAL ORDER - CONTROLLING WITH '[class_letter]Class and test_[classletter subclassletter]' function syntax
class A_CDBMakerLoadTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("Load test database csvs for load tests")
config = Config()
config.general['log_level'] = logging.DEBUG
config.general["spacy_model"] = "en_core_web_md"
cls.maker = CDBMaker(config)
csvs = [
os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'examples', 'cdb.csv'),
os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'examples', 'cdb_2.csv')
]
cls.cdb = cls.maker.prepare_csvs(csvs, full_build=True)
@classmethod
def tearDownClass(cls) -> None:
cls.maker.destroy_pipe()
def test_aa_cdb_names_length(self):
self.assertEqual(len(self.cdb.cui2names), 3, "Should equal 3")
def test_ab_cdb_names_output(self):
target_result = {'C0000039': {'virus~k', 'virus', 'virus~m', 'virus~z'}, 'C0000139': {'virus~k', 'virus', 'virus~m', 'virus~z'}, 'C0000239': {'second~csv'}}
self.assertEqual(self.cdb.cui2names, target_result)
def test_ac_cdb_snames_length(self):
self.assertEqual(len(self.cdb.cui2snames), 3, "Should equal 3")
def test_ad_cdb_snames_output(self):
target_result = {'C0000039': {'virus~k', 'virus', 'virus~m', 'virus~z'}, 'C0000139': {'virus~k', 'virus', 'virus~m', 'virus~z'}, 'C0000239': {'second', 'second~csv'}}
self.assertEqual(self.cdb.cui2snames, target_result)
def test_ae_cdb_name_to_cuis_length(self):
self.assertEqual(len(self.cdb.name2cuis), 5, "Should equal 5")
def test_af_cdb_name_to_cuis_output(self):
target_result = {'virus': ['C0000039', 'C0000139'], 'virus~m': ['C0000039', 'C0000139'], 'virus~k': ['C0000039', 'C0000139'], 'virus~z': ['C0000039', 'C0000139'], 'second~csv': ['C0000239']}
self.assertEqual(self.cdb.name2cuis, target_result)
def test_ag_cdb_cuis_to_tags_length(self):
self.assertEqual(len(self.cdb.cui2tags), 0, "Should equal 0")
def test_ah_cdb_cuis_to_tags_output(self):
target_result = {}
self.assertEqual(self.cdb.cui2tags, target_result)
def test_ai_cdb_cui_to_preferred_name_length(self):
self.assertEqual(len(self.cdb.cui2preferred_name), 2, "Should equal 2")
def test_aj_cdb_cui_to_preferred_name_output(self):
target_result = {'C0000039': 'Virus', 'C0000139': 'Virus Z'}
self.assertEqual(self.cdb.cui2preferred_name, target_result)
def test_ak_cdb_cui_to_context_vectors_length(self):
self.assertEqual(len(self.cdb.cui2context_vectors), 0, "Should equal 0")
def test_al_cdb_cui_to_context_vectors_output(self):
target_result = {}
self.assertEqual(self.cdb.cui2context_vectors, target_result)
def test_am_cdb_cui_to_count_train_length(self):
self.assertEqual(len(self.cdb.cui2count_train), 0, "Should equal 0")
def test_an_cdb_cui_to_count_train_output(self):
target_result = {}
self.assertEqual(self.cdb.cui2count_train, target_result)
def test_ao_cdb_name_to_cui_to_status_length(self):
self.assertEqual(len(self.cdb.name2cuis2status), 5, "Should equal 5")
def test_ap_cdb_name_to_cui_to_status_output(self):
target_result = {'virus': {'C0000039': 'P', 'C0000139': 'A'}, 'virus~m': {'C0000039': 'A', 'C0000139': 'P'}, 'virus~k': {'C0000039': 'A', 'C0000139': 'P'}, 'virus~z': {'C0000039': 'A', 'C0000139': 'P'}, 'second~csv': {'C0000239': 'A'}}
self.assertEqual(self.cdb.name2cuis2status, target_result)
def test_aq_cdb_cui_to_type_ids_length(self):
self.assertEqual(len(self.cdb.cui2type_ids), 3, "Should equal 3")
def test_ar_cdb_cui_to_type_ids_output(self):
target_result = {'C0000039': {'T234', 'T109', 'T123'}, 'C0000139': set(), 'C0000239': set()}
self.assertEqual(self.cdb.cui2type_ids, target_result)
def test_as_cdb_additional_info_length(self):
self.assertEqual(len(self.cdb.addl_info), 8, "Should equal 8")
def test_at_cdb_additional_info_output(self):
target_result = {'cui2icd10': {}, 'cui2opcs4': {}, 'cui2ontologies': {'C0000039': {'MSH'}}, 'cui2original_names': {'C0000039': {'Virus K', 'Virus M', 'Virus', 'Virus Z'}, 'C0000139': {'Virus K', 'Virus M', 'Virus', 'Virus Z'}, 'C0000239': {'Second csv'}}, 'cui2description': {'C0000039': 'Synthetic phospholipid used in liposomes and lipid bilayers to study biological membranes. It is also a major constituent of PULMONARY SURFACTANTS.'}, 'type_id2name': {}, 'type_id2cuis': {'T109': {'C0000039'}, 'T123': {'C0000039'}, 'T234': {'C0000039'}}, 'cui2group': {}}
self.assertEqual(self.cdb.addl_info, target_result)
class B_CDBMakerEditTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("Load test database csvs for edit tests")
cls.config = Config()
cls.config.general['log_level'] = logging.DEBUG
cls.config.general["spacy_model"] = "en_core_web_md"
cls.maker = CDBMaker(cls.config)
csvs = [
os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'examples', 'cdb.csv'),
os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'examples', 'cdb_2.csv')
]
cls.cdb = cls.maker.prepare_csvs(csvs, full_build=True)
cls.cdb2 = CDB(cls.config)
@classmethod
def tearDownClass(cls) -> None:
cls.maker.destroy_pipe()
def test_ba_addition_of_new_name(self):
self.cdb.add_names(cui='C0000239', names=prepare_name('MY: new,-_! Name.', self.maker.pipe.spacy_nlp, {}, self.config), name_status='P', full_build=True)
self.assertEqual(len(self.cdb.name2cuis), 6, "Should equal 6")
target_result = {'MY: new,-_! Name.', 'Second csv'}
self.assertEqual(self.cdb.addl_info['cui2original_names']['C0000239'], target_result)
self.assertIn('my~:~new~name~.', self.cdb.name2cuis)
self.assertIn('my~:~new', self.cdb.snames)
self.assertIn('my~:~new~name~.', self.cdb.name2cuis2status)
def test_bb_removal_of_name(self):
self.cdb.remove_names(cui='C0000239', names=prepare_name('MY: new,-_! Name.', self.maker.pipe.spacy_nlp, {}, self.config))
self.assertEqual(len(self.cdb.name2cuis), 5, "Should equal 5")
self.assertNotIn('my:newname.', self.cdb.name2cuis2status)
def test_bc_filter_by_cui(self):
cuis_to_keep = {'C0000039'}
self.cdb.filter_by_cui(cuis_to_keep=cuis_to_keep)
self.assertEqual(len(self.cdb.cui2names), 2, "Should equal 2")
self.assertEqual(len(self.cdb.name2cuis), 4, "Should equal 4")
self.assertEqual(len(self.cdb.snames), 4, "Should equal 4")
def test_bd_addition_of_context_vector_positive(self):
np.random.seed(11)
cuis = list(self.cdb.cui2names.keys())
for i in range(2):
for cui in cuis:
vectors = {}
for cntx_type in self.config.linking['context_vector_sizes']:
vectors[cntx_type] = np.random.rand(300)
self.cdb.update_context_vector(cui, vectors, negative=False)
self.assertEqual(self.cdb.cui2count_train['C0000139'], 2, "Count should equal 2")
self.assertEqual(self.cdb.cui2context_vectors['C0000139']['long'].shape[0], 300, "Dimensions should equal 300")
def test_be_addition_of_context_vector_negative(self):
np.random.seed(11)
cuis = list(self.cdb.cui2names.keys())
for i in range(2):
for cui in cuis:
vectors = {}
for cntx_type in self.config.linking['context_vector_sizes']:
vectors[cntx_type] = np.random.rand(300)
self.cdb.update_context_vector(cui, vectors, negative=True)
self.assertEqual(self.cdb.cui2count_train['C0000139'], 2, "Count should equal 2")
self.assertEqual(self.cdb.cui2context_vectors['C0000139']['long'].shape[0], 300, "Dimensions should equal 300")
def test_bf_import_training(self):
self.cdb2.import_training(cdb=self.cdb, overwrite=True)
self.assertEqual(self.cdb.cui2count_train['C0000139'], 2, "Count should equal 2")
self.assertEqual(self.cdb.cui2context_vectors['C0000139']['long'].shape[0], 300, "Dimensions should equal 300")
def test_bg_save_and_load_model_context_vectors(self):
self.cdb.save("./tmp_cdb.dat")
self.cdb2 = CDB.load('./tmp_cdb.dat')
self.assertEqual(self.cdb.cui2count_train['C0000139'], 2, "Count should equal 2")
self.assertEqual(self.cdb.cui2context_vectors['C0000139']['long'].shape[0], 300, "Dimensions should equal 300")
def test_bh_reset_training(self):
self.cdb.reset_training()
target_result = {}
self.assertEqual(self.cdb.cui2context_vectors, target_result)
if __name__ == '__main__':
unittest.main()
| 48.236453
| 568
| 0.668811
|
9c5dfacf62a22cd7d4a89d5e8ab459f85ceb7e82
| 29,107
|
py
|
Python
|
tests/python/relay/test_op_level3.py
|
ravikumarvc/incubator-tvm
|
9826947ffce0ed40e9d47a0db2abb033e394279e
|
[
"Apache-2.0"
] | 3
|
2021-02-23T22:06:01.000Z
|
2021-09-30T09:59:17.000Z
|
tests/python/relay/test_op_level3.py
|
ravikumarvc/incubator-tvm
|
9826947ffce0ed40e9d47a0db2abb033e394279e
|
[
"Apache-2.0"
] | 4
|
2021-03-30T11:59:59.000Z
|
2022-03-12T00:40:23.000Z
|
tests/python/relay/test_op_level3.py
|
ravikumarvc/incubator-tvm
|
9826947ffce0ed40e9d47a0db2abb033e394279e
|
[
"Apache-2.0"
] | 3
|
2021-07-20T07:40:15.000Z
|
2021-08-03T08:39:17.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level3 operator test cases.
"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.relay import create_executor, transform
from tvm.relay.testing import ctx_list, check_grad, run_infer_type
def test_zeros_ones():
for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]:
y = op(shape=(124, 50), dtype="float64")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((124, 50), "float64")
intrp = create_executor()
intrp_res = intrp.evaluate(y).asnumpy()
np.testing.assert_allclose(intrp_res, ref((124, 50), 'float64'))
def test_unary_identity():
for op, ref in [(relay.zeros_like, np.zeros_like),
(relay.ones_like, np.ones_like),
(relay.ceil, np.ceil),
(relay.floor, np.floor),
(relay.trunc, np.trunc),
(relay.round, np.round),
(relay.abs, np.abs),
(relay.copy, None), # np.copy
(relay.negative, np.negative),
(relay.sign, np.sign)]:
shape = (8, 9, 4)
x = relay.var("x", relay.TensorType(shape, "float32"))
y = op(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, "float32")
if ref is not None:
data = np.random.rand(*shape).astype('float32')
intrp = create_executor()
op_res = intrp.evaluate(y, { x: relay.const(data) })
ref_res = ref(data)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
def test_cast():
x = relay.var("x", relay.TensorType((8, 9, 4), "float32"))
y = x.astype("int32")
yy = run_infer_type(y)
assert "dtype=" in yy.astext()
assert yy.checked_type == relay.TensorType((8, 9, 4), "int32")
x = relay.var("x", relay.TensorType((8, 9, 4), "float32"))
y = relay.cast(x, "int32")
yy = run_infer_type(y)
assert "dtype=" in yy.astext()
assert yy.checked_type == relay.TensorType((8, 9, 4), "int32")
def test_clip():
a = relay.var("a", relay.TensorType((10, 4), "float32"))
y = relay.clip(a, 1., 4.)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((10, 4), "float32")
data = np.random.rand(10, 4).astype('float32')
intrp = create_executor()
op_res = intrp.evaluate(y, { a: relay.const(data) })
ref_res = np.clip(data, 1., 4.)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
def test_reinterpret():
a = relay.var("a", relay.TensorType((1000, 4), "float32"))
y = relay.reinterpret(a, "int32")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000, 4), "int32")
data = np.random.randn(1000, 4).astype('float32') * 1000
intrp = create_executor()
op_res = intrp.evaluate(y, {a: relay.const(data)})
ref_res = data.view("int32")
np.testing.assert_equal(op_res.asnumpy(), ref_res)
def test_approximate_transcendental():
def C(x):
return relay.expr.const(x, "float32")
def approx_exp(x):
# An approximation derived from Opus,
# https://github.com/xiph/opus/blob/c1c247/celt/mathops.h#L147-L165
x = relay.minimum(relay.maximum(x, C(-88.0)), C(88.0))
x = C(127.0) + x * C(1.44269504)
xf = relay.floor(x)
i = relay.cast(xf, "int32")
x = x - xf
Y = C(0.99992522) + x * (C(0.69583354) + x * (C(0.22606716) + x * C(0.078024523)))
exponent = relay.left_shift(i, relay.expr.const(23, "int32"))
exponent = relay.reinterpret(exponent, "float32")
return exponent * Y
def approximate_sigmoid(x):
y = approx_exp(x)
return y / (y + C(1.0))
def approximate_tanh(x):
x = x * C(2.0)
y = approx_exp(x)
return (y - C(1.0)) / (y + C(1.0))
a = relay.var("a", relay.TensorType((1000,), "float32"))
y = approximate_sigmoid(a)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000,), "float32")
data = np.linspace(-5, 5, 1000).astype("float32")
intrp = create_executor()
op_res = intrp.evaluate(y, {a: relay.const(data)})
def reference_sigmoid(x):
return np.exp(-np.logaddexp(0, -x))
np.testing.assert_allclose(op_res.asnumpy(), reference_sigmoid(data), atol=2e-5, rtol=1e-9)
y = approximate_tanh(a)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000,), "float32")
data = np.linspace(-5, 5, 1000).astype("float32")
intrp = create_executor()
op_res = intrp.evaluate(y, {a: relay.const(data)})
def reference_tanh(x):
return np.tanh(x)
np.testing.assert_allclose(op_res.asnumpy(), reference_tanh(data), atol=4e-5, rtol=1e-9)
def test_squeeze():
def verify_squeeze(shape, dtype, axis):
x = relay.var("x", relay.TensorType(shape, dtype))
squeeze = relay.squeeze(x, axis=axis)
np_axis = tuple(axis) if axis is not None else None
data = np.random.random_sample(shape).astype(dtype)
intrp = create_executor()
op_res = intrp.evaluate(squeeze, { x : relay.const(data) })
ref_res = np.squeeze(data, axis=np_axis)
np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
verify_squeeze((1, 3, 2, 5), "float32", None)
verify_squeeze((1, 3, 1), "float32", [0])
verify_squeeze((1, 2, 1, 2, 1), "float32", [0, 2])
def test_transpose_infer_type():
n, t, d = tvm.size_var("n"), tvm.size_var("t"), 100
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.transpose(x, axes=(1, 0, 2))
assert "axes=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(t, n, 100), "float32")
y = relay.transpose(x)
assert "axes=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(100, t, n), "float32")
def test_transpose():
def verify_transpose(dshape, axes):
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.transpose(x, axes=axes)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.transpose(x_data, axes=axes)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_transpose((2, 3, 4), (0, 2, 1))
def test_squeeze_infer_type():
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x, axis=(2,))
assert "axis=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(1, 4), "float32")
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x)
assert "axis=" not in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(4,), "float32")
@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)
def test_squeeze_bad_axes_infer_type():
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x, axis=(1,))
yy = run_infer_type(y)
def test_reshape_infer_type():
n, t, d1, d2 = 10, 20, 100, 20
x = relay.var("x", relay.TensorType((n, t, d1, d2), "float32"))
y = relay.reshape(x, newshape=(n, t, 2000))
assert "newshape=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(n, t, 2000), "float32")
def test_reshape():
def verify_reshape(shape, newshape, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
z = relay.reshape(x, newshape=newshape)
zz = run_infer_type(z)
assert "newshape=" in z.astext()
assert zz.checked_type == relay.ty.TensorType(oshape, "float32")
func = relay.Function([x], z)
check_grad(func)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
ref_res = np.reshape(x_data, oshape)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_reshape((2, 3, 4), (8, 3), (8, 3))
verify_reshape((4, 7), (2, 7, 2), (2, 7, 2))
verify_reshape((2, 3, 4), (4, 0, 2), (4, 3, 2))
verify_reshape((2, 3, 4), (2, 0, 0), (2, 3, 4))
verify_reshape((2, 3, 4), (0, -1), (2, 12))
verify_reshape((2, 3, 4), (-1, 0), (8, 3))
verify_reshape((2, 3, 4), (2, -2), (2, 3, 4))
verify_reshape((2, 3, 4), (-2, 1, 1), (2, 3, 4, 1, 1))
verify_reshape((2, 3, 4), (-3, 4), (6, 4))
verify_reshape((2, 3, 4, 5), (-3, -3), (6, 20))
verify_reshape((2, 3, 4), (0, -3), (2, 12))
verify_reshape((2, 3, 4), (-3, -2), (6, 4))
verify_reshape((2, 3, 4), (-4, 1, 2, -2), (1, 2, 3, 4))
verify_reshape((2, 3, 4), (2, -4, -1, 3, -2), (2, 1, 3, 4))
def test_reshape_like_infer_type():
# concrete shape
x = relay.var("x", relay.TensorType((1, 2, 3), "float32"))
y = relay.var("y", relay.TensorType((1,6), "float32"))
z = relay.reshape_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((1, 6), "float32")
# symbolic shape
n, c, h, w = tvm.size_var("n"), 2, 3, tvm.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.var("y", relay.TensorType((1, 8, 8), "float32"))
z = relay.reshape_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((1, 8, 8), "float32")
def test_reshape_like():
def verify_reshape_like(shape, oshape):
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=oshape).astype("float32")
ref_res = np.reshape(x_data, y_data.shape)
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("x", relay.TensorType(oshape, "float32"))
z = relay.reshape_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(ref_res.shape, "float32")
func = relay.Function([x, y], z)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_reshape_like((2, 3, 4), (1, 8, 3))
verify_reshape_like((4, 7), (2, 7, 2))
def test_take_infer_type():
def verify_take(dshape, indices_shape, oshape, axis=None):
x = relay.var("x", relay.TensorType(dshape, "float32"))
indices = relay.var("indices", relay.TensorType(indices_shape, "int32"))
y = relay.take(x, indices, axis=axis)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(oshape, "float32")
d1, d2, d3 = tvm.var("d1"), tvm.var("d2"), tvm.var("d3")
d4, d5, d6 = tvm.var("d4"), tvm.var("d5"), tvm.var("d6")
verify_take((d1,), (1,), (1,), 0)
verify_take((4,), (d1, d2), (d1, d2))
verify_take((3, 3, 3), (1, d2), (1, d2))
verify_take((d1, d2), (d3, d4, d5), (d3, d4, d5, d2), 0)
verify_take((d1, d2), (d3, d4, d5), (d1, d3, d4, d5), 1)
verify_take((d1, d2, d3, d4), (d5, d6), (d1, d2, d5, d6, d4), -2)
def test_take():
def verify_take(src_shape, indices_src, axis=None, mode="clip"):
src_dtype = "float32"
indices_dtype = "int32"
indices_src = np.array(indices_src, dtype=indices_dtype)
x = relay.var("x", relay.TensorType(src_shape, src_dtype))
indices = relay.var("indices", relay.TensorType(indices_src.shape, indices_dtype))
z = relay.take(x, indices, axis=axis, mode=mode)
func = relay.Function([x, indices], z)
x_data = np.random.uniform(low=-1, high=1, size=src_shape).astype(src_dtype)
np_mode = "raise" if mode == "fast" else mode
ref_res = np.take(x_data, indices=indices_src, axis=axis, mode=np_mode)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, indices_src)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_take((4,), [1])
verify_take((4,), [[0,1,2,3]])
verify_take((3,3,3), [[11,25]])
verify_take((4,), [[0,1],[2,3]])
verify_take((4,), [1], 0)
verify_take((2,2), [[[1,0],[0,1]]], 0)
verify_take((2,2), [[[1,0],[0,1]]], 1)
verify_take((4,3,5,6), [[2,1,0,0]], -2)
verify_take((3,4), [-5, 20])
verify_take((3,4), [-5, 20], mode="wrap")
verify_take((3,4), [-1, 2], axis=0)
verify_take((3,4), [-1, 2], axis=0, mode="wrap")
verify_take((3,4), [-1, 2], axis=1)
verify_take((3,4), [-1, 2], axis=1, mode="wrap")
verify_take((3,3,3), [[11,25]], mode="fast")
verify_take((3,4), [0, 2], axis=0, mode="fast")
verify_take((3,4), [0, 2], axis=1, mode="fast")
def test_split_infer_type():
def verify_split(dshape, indices_or_sections, ret_type, axis=None):
x = relay.var("x", relay.ty.TensorType(dshape, "float32"))
y = relay.split(x, indices_or_sections, axis=axis)
yy = run_infer_type(y.astuple())
assert yy.checked_type == ret_type
idxd = tvm.indexdiv
d1, d2, d3, d4 = tvm.var("d1"), tvm.var("d2"), tvm.var("d3"), tvm.var("d4")
axis = tvm.var("axis")
verify_split((5, 5, 2, 2), 5,
relay.ty.TupleType(tvm.convert([
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32")])),
axis=1)
verify_split((5, 5, 2, 2), 5,
relay.ty.TupleType(tvm.convert([
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32")])),
axis=0)
verify_split((d1, d2, d3, d4), 4,
relay.ty.TupleType(tvm.convert([
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32")])),
axis=2)
verify_split((d1, d2, d3, d4), 2,
relay.ty.TupleType(tvm.convert([
relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), "float32"),
relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), "float32")])),
axis=0)
verify_split((d1, d2, d3, d4), (2, 4, 7),
relay.ty.TupleType(tvm.convert([
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 3, d3, d4), "float32"),
relay.ty.TensorType((d1, (d2-7), d3, d4), "float32")])),
axis=1)
def test_full_infer_type():
# default settings: match input dtype
x = relay.var("x", relay.TensorType((), "int8"))
y = relay.full(x, ())
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((), "int8")
# change the shape and dtype
x = relay.var("x", relay.TensorType((), "float32"))
y = relay.full(x, (1, 2), "int8")
"shape=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 2), "int8")
def test_full():
def verify_full(fill_value, src_shape, dtype):
x = relay.var("x", relay.scalar_type(dtype))
z = relay.full(x, src_shape, dtype)
func = relay.Function([x], z)
ref_res = np.full(src_shape, fill_value)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(np.array(fill_value, dtype))
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_full(4, (1, 3, 4, 4), "int32")
verify_full(4.0, (1, 4), "float32")
def test_full_like_infer_type():
# concrete shape
base = relay.var("base", relay.TensorType((1, 2, 3), "float32"))
fill = relay.var("fill", relay.TensorType((), "float32"))
y = relay.full_like(base, fill)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 2, 3), "float32")
# symbolic shape
n, c, h, w = tvm.size_var("n"), 2, 3, tvm.size_var("w")
base = relay.var("base", relay.TensorType((n, c, h, w), "float32"))
fill = relay.var("fill", relay.TensorType((), "float32"))
y = relay.full_like(base, fill)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w), "float32")
def test_full_like():
def verify_full_like(base, fill_value, dtype):
x_data = np.random.uniform(low=-1, high=1, size=base).astype(dtype)
x = relay.var("x", relay.TensorType(base, dtype))
y = relay.var("y", relay.scalar_type(dtype))
z = relay.full_like(x, y)
func = relay.Function([x, y], z)
ref_res = np.full_like(x_data, fill_value)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, np.array(fill_value, dtype))
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_full_like((1, 3, 4, 4), 4, "int32")
verify_full_like((1, 1), 44.0, "float32")
def test_infer_type_leaky_relu():
n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.leaky_relu(x, alpha=0.1)
"alpha=0.1" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w), "float32")
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
z = relay.nn.leaky_relu(x, alpha=0.1)
assert "alpha=0.1" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = np.where(x_data > 0, x_data, x_data * 0.1)
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
def verify_infer_type_prelu(data, alpha, axis, output, dtype="float32"):
x = relay.var("data", relay.TensorType(data, dtype))
if alpha:
y = relay.var("alpha", relay.TensorType(alpha, dtype))
else:
y = relay.var("alpha", relay.IncompleteType())
z = relay.nn.prelu(x, y, axis=axis)
zz = run_infer_type(z)
if axis != 1:
assert "axis" in z.astext()
assert zz.checked_type == relay.ty.TensorType(output, dtype)
if not alpha:
axis = axis if axis else 1
alpha_shape = (data[axis],)
assert zz.args[1].checked_type == relay.TensorType(alpha_shape, "float32")
if all(isinstance(v, tvm.expr.Var) == 1 for v in data) or not alpha:
return
func = relay.Function([x, y], z)
x_data = np.random.uniform(low=-1, high=1, size=data).astype(dtype)
a_data = np.random.uniform(low=-1, high=1, size=alpha).astype(dtype)
if axis == 1:
ref_res = (x_data < 0) * (x_data * a_data.reshape(3, 1, 1)) + (x_data>=0) * x_data
else:
ref_res = (x_data < 0) * (x_data * a_data.reshape(1, 1, 3)) + (x_data>=0) * x_data
for target, ctx in ctx_list():
intrp1 = relay.create_executor("graph", ctx=ctx, target=target)
intrp2 = relay.create_executor("debug", ctx=ctx, target=target)
op_res1 = intrp1.evaluate(func)(x_data, a_data)
tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)
op_res2 = intrp2.evaluate(func)(x_data, a_data)
tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)
def test_infer_type_prelu():
n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w")
verify_infer_type_prelu((n, c, h, w), (c,), 1, (n, c, h, w))
verify_infer_type_prelu((n, h, w, c), (c,), 3, (n, h, w, c))
verify_infer_type_prelu((n, c, h, w), None, 1, (n, c, h, w))
verify_infer_type_prelu((n, h, w, c), None, 3, (n, h, w, c))
verify_infer_type_prelu((1, 3, 2, 2), (3,), 1, (1, 3, 2, 2))
verify_infer_type_prelu((1, 2, 2, 3), (3,), 3, (1, 2, 2, 3))
verify_infer_type_prelu((1, 3, 2, 2), None, 1, (1, 3, 2, 2))
verify_infer_type_prelu((1, 2, 2, 3), None, 3, (1, 2, 2, 3))
def test_arange():
def verify_arange(start, stop, step):
dtype = "float32"
if start is None and step is None:
x = relay.arange(relay.const(stop, dtype=dtype))
ref_res = np.arange(stop).astype(dtype)
elif start is None:
x = relay.arange(relay.const(stop, dtype=dtype), step=relay.const(step, dtype=dtype))
ref_res = np.arange(stop, step=step).astype(dtype)
elif step is None:
x = relay.arange(relay.const(start, dtype=dtype), relay.const(stop, dtype=dtype))
ref_res = np.arange(start, stop).astype(dtype)
else:
x = relay.arange(
relay.const(start, dtype=dtype),
relay.const(stop, dtype=dtype),
relay.const(step, dtype=dtype))
ref_res = np.arange(start, stop, step).astype(dtype)
func = relay.Function([], x)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)()
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_arange(None, 20, None)
verify_arange(None, 20, 2)
verify_arange(1, 20, None)
verify_arange(1, 20, 2)
# arange doesnt' support floating point right now, see type relation
# verify_arange(1, 20, 1.5)
verify_arange(1, 20.5, None)
verify_arange(1, 20, 3)
verify_arange(20, 1, -1)
# arange doesnt' support floating point right now, see type relation
# verify_arange(20, 1, -1.5)
def test_tile():
def verify_tile(dshape, reps):
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.tile(x, reps=reps)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.tile(x_data, reps=reps)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_tile((2, 3, 4), (3, 2, 1))
verify_tile((2, 3, 4), (1, 2))
verify_tile((2, 3), (3, 2, 1))
def test_repeat():
def verify_repeat(dshape, repeats, axis):
x = relay.Var("x", relay.TensorType(dshape, "float32"))
func = relay.Function([x], relay.repeat(x, repeats, axis))
data = np.random.uniform(size=dshape).astype("float32")
ref_res = np.repeat(data, repeats, axis)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_repeat((3,), 2, 0)
verify_repeat((3, 10), 2, -1)
verify_repeat((3, 2, 4), 3, 1)
def test_stack():
def verify_stack(dshapes, axis):
y = []
for shape in dshapes:
y.append(relay.var("input", relay.TensorType(shape, "float32")))
x = relay.Tuple(y)
z = relay.stack(x, axis=axis)
func = relay.Function(y, z)
x_data = [np.random.normal(size=shape).astype("float32") for shape in dshapes]
ref_res = np.stack(x_data, axis=axis)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(*x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_stack([(2,), (2,), (2,)], -1)
verify_stack([(2,), (2,), (2,)], 0)
verify_stack([(2, 2, 4), (2, 2, 4), (2, 2, 4)], 1)
verify_stack([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], -1)
def test_reverse():
def verify_reverse(dshape, axis):
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.reverse(x, axis=axis)
zz = run_infer_type(z)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.flip(x_data, axis)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_reverse((2, 3, 4), 1)
verify_reverse((4, 7), 0)
verify_reverse((2, 3, 4), -1)
def test_gather_nd():
def verify_gather_nd(xshape, yshape, y_data):
x = relay.var("x", relay.TensorType(xshape, "float32"))
y = relay.var("y", relay.TensorType(yshape, "int32"))
z = relay.gather_nd(x, y)
func = relay.Function([x, y], z)
x_data = np.random.uniform(size=xshape).astype("float32")
ref_res = x_data[tuple(y_data)]
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, ctx=ctx, target=target)
op_res = intrp.evaluate(func)(x_data, y_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)
verify_gather_nd((2, 2), (2, 3), [[1, 1, 0], [0, 1, 0]])
verify_gather_nd((2, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify_gather_nd((3, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify_gather_nd((3, 2), (2, 2, 3), [[[0, 1, 2], [2, 0, 1]], [[0, 0, 0], [1, 1, 1]]])
if __name__ == "__main__":
test_arange()
test_cast()
test_zeros_ones()
test_unary_identity()
test_clip()
test_transpose_infer_type()
test_transpose()
test_reshape_infer_type()
test_reshape()
test_reshape_like_infer_type()
test_reshape_like()
test_take_infer_type()
test_take()
test_full_infer_type()
test_full()
test_full_like_infer_type()
test_full_like()
test_infer_type_leaky_relu()
test_infer_type_prelu()
test_squeeze()
test_squeeze_infer_type()
test_squeeze_bad_axes_infer_type()
test_split_infer_type()
test_arange()
test_reverse()
test_stack()
test_tile()
test_repeat()
test_gather_nd()
| 40.709091
| 97
| 0.586526
|
fba3ceac83f123e4217592febba595c7fd8ddb5d
| 12,698
|
py
|
Python
|
Lib/test/test_urllib2net.py
|
bendmorris/static-python
|
2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473
|
[
"PSF-2.0"
] | 164
|
2015-01-04T07:04:07.000Z
|
2022-01-06T03:18:56.000Z
|
Lib/test/test_urllib2net.py
|
idobatter/cpython
|
c7b03e7b57cedccb77e37f65f9bbcb82050c2bb5
|
[
"PSF-2.0"
] | 3
|
2015-08-27T07:35:26.000Z
|
2016-04-07T16:35:39.000Z
|
Lib/test/test_urllib2net.py
|
bendmorris/static-python
|
2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473
|
[
"PSF-2.0"
] | 35
|
2015-06-11T05:35:55.000Z
|
2022-01-11T19:32:00.000Z
|
#!/usr/bin/env python3
import unittest
from test import support
from test.test_urllib2 import sanepathname2url
import os
import socket
import urllib.error
import urllib.request
import sys
try:
import ssl
except ImportError:
ssl = None
TIMEOUT = 60 # seconds
def _retry_thrice(func, exc, *args, **kwargs):
for i in range(3):
try:
return func(*args, **kwargs)
except exc as e:
last_exc = e
continue
except:
raise
raise last_exc
def _wrap_with_retry_thrice(func, exc):
def wrapped(*args, **kwargs):
return _retry_thrice(func, exc, *args, **kwargs)
return wrapped
# Connecting to remote hosts is flaky. Make it more robust by retrying
# the connection several times.
_urlopen_with_retry = _wrap_with_retry_thrice(urllib.request.urlopen,
urllib.error.URLError)
class AuthTests(unittest.TestCase):
"""Tests urllib2 authentication features."""
## Disabled at the moment since there is no page under python.org which
## could be used to HTTP authentication.
#
# def test_basic_auth(self):
# import http.client
#
# test_url = "http://www.python.org/test/test_urllib2/basic_auth"
# test_hostport = "www.python.org"
# test_realm = 'Test Realm'
# test_user = 'test.test_urllib2net'
# test_password = 'blah'
#
# # failure
# try:
# _urlopen_with_retry(test_url)
# except urllib2.HTTPError, exc:
# self.assertEqual(exc.code, 401)
# else:
# self.fail("urlopen() should have failed with 401")
#
# # success
# auth_handler = urllib2.HTTPBasicAuthHandler()
# auth_handler.add_password(test_realm, test_hostport,
# test_user, test_password)
# opener = urllib2.build_opener(auth_handler)
# f = opener.open('http://localhost/')
# response = _urlopen_with_retry("http://www.python.org/")
#
# # The 'userinfo' URL component is deprecated by RFC 3986 for security
# # reasons, let's not implement it! (it's already implemented for proxy
# # specification strings (that is, URLs or authorities specifying a
# # proxy), so we must keep that)
# self.assertRaises(http.client.InvalidURL,
# urllib2.urlopen, "http://evil:thing@example.com")
class CloseSocketTest(unittest.TestCase):
def test_close(self):
# calling .close() on urllib2's response objects should close the
# underlying socket
url = "http://www.python.org/"
with support.transient_internet(url):
response = _urlopen_with_retry(url)
sock = response.fp
self.assertTrue(not sock.closed)
response.close()
self.assertTrue(sock.closed)
class OtherNetworkTests(unittest.TestCase):
def setUp(self):
if 0: # for debugging
import logging
logger = logging.getLogger("test_urllib2net")
logger.addHandler(logging.StreamHandler())
# XXX The rest of these tests aren't very good -- they don't check much.
# They do sometimes catch some major disasters, though.
def test_ftp(self):
urls = [
'ftp://ftp.kernel.org/pub/linux/kernel/README',
'ftp://ftp.kernel.org/pub/linux/kernel/non-existent-file',
#'ftp://ftp.kernel.org/pub/leenox/kernel/test',
'ftp://gatekeeper.research.compaq.com/pub/DEC/SRC'
'/research-reports/00README-Legal-Rules-Regs',
]
self._test_urls(urls, self._extra_handlers())
def test_file(self):
TESTFN = support.TESTFN
f = open(TESTFN, 'w')
try:
f.write('hi there\n')
f.close()
urls = [
'file:' + sanepathname2url(os.path.abspath(TESTFN)),
('file:///nonsensename/etc/passwd', None,
urllib.error.URLError),
]
self._test_urls(urls, self._extra_handlers(), retry=True)
finally:
os.remove(TESTFN)
self.assertRaises(ValueError, urllib.request.urlopen,'./relative_path/to/file')
# XXX Following test depends on machine configurations that are internal
# to CNRI. Need to set up a public server with the right authentication
# configuration for test purposes.
## def test_cnri(self):
## if socket.gethostname() == 'bitdiddle':
## localhost = 'bitdiddle.cnri.reston.va.us'
## elif socket.gethostname() == 'bitdiddle.concentric.net':
## localhost = 'localhost'
## else:
## localhost = None
## if localhost is not None:
## urls = [
## 'file://%s/etc/passwd' % localhost,
## 'http://%s/simple/' % localhost,
## 'http://%s/digest/' % localhost,
## 'http://%s/not/found.h' % localhost,
## ]
## bauth = HTTPBasicAuthHandler()
## bauth.add_password('basic_test_realm', localhost, 'jhylton',
## 'password')
## dauth = HTTPDigestAuthHandler()
## dauth.add_password('digest_test_realm', localhost, 'jhylton',
## 'password')
## self._test_urls(urls, self._extra_handlers()+[bauth, dauth])
def test_urlwithfrag(self):
urlwith_frag = "http://docs.python.org/2/glossary.html#glossary"
with support.transient_internet(urlwith_frag):
req = urllib.request.Request(urlwith_frag)
res = urllib.request.urlopen(req)
self.assertEqual(res.geturl(),
"http://docs.python.org/2/glossary.html#glossary")
def test_redirect_url_withfrag(self):
redirect_url_with_frag = "http://bitly.com/urllibredirecttest"
with support.transient_internet(redirect_url_with_frag):
req = urllib.request.Request(redirect_url_with_frag)
res = urllib.request.urlopen(req)
self.assertEqual(res.geturl(),
"http://docs.python.org/3.4/glossary.html#term-global-interpreter-lock")
def test_custom_headers(self):
url = "http://www.example.com"
with support.transient_internet(url):
opener = urllib.request.build_opener()
request = urllib.request.Request(url)
self.assertFalse(request.header_items())
opener.open(request)
self.assertTrue(request.header_items())
self.assertTrue(request.has_header('User-agent'))
request.add_header('User-Agent','Test-Agent')
opener.open(request)
self.assertEqual(request.get_header('User-agent'),'Test-Agent')
def test_sites_no_connection_close(self):
# Some sites do not send Connection: close header.
# Verify that those work properly. (#issue12576)
URL = 'http://www.imdb.com' # mangles Connection:close
with support.transient_internet(URL):
try:
with urllib.request.urlopen(URL) as res:
pass
except ValueError as e:
self.fail("urlopen failed for site not sending \
Connection:close")
else:
self.assertTrue(res)
req = urllib.request.urlopen(URL)
res = req.read()
self.assertTrue(res)
def _test_urls(self, urls, handlers, retry=True):
import time
import logging
debug = logging.getLogger("test_urllib2").debug
urlopen = urllib.request.build_opener(*handlers).open
if retry:
urlopen = _wrap_with_retry_thrice(urlopen, urllib.error.URLError)
for url in urls:
if isinstance(url, tuple):
url, req, expected_err = url
else:
req = expected_err = None
with support.transient_internet(url):
debug(url)
try:
f = urlopen(url, req, TIMEOUT)
except OSError as err:
debug(err)
if expected_err:
msg = ("Didn't get expected error(s) %s for %s %s, got %s: %s" %
(expected_err, url, req, type(err), err))
self.assertIsInstance(err, expected_err, msg)
except urllib.error.URLError as err:
if isinstance(err[0], socket.timeout):
print("<timeout: %s>" % url, file=sys.stderr)
continue
else:
raise
else:
try:
with support.time_out, \
support.socket_peer_reset, \
support.ioerror_peer_reset:
buf = f.read()
debug("read %d bytes" % len(buf))
except socket.timeout:
print("<timeout: %s>" % url, file=sys.stderr)
f.close()
debug("******** next url coming up...")
time.sleep(0.1)
def _extra_handlers(self):
handlers = []
cfh = urllib.request.CacheFTPHandler()
self.addCleanup(cfh.clear_cache)
cfh.setTimeout(1)
handlers.append(cfh)
return handlers
class TimeoutTest(unittest.TestCase):
def test_http_basic(self):
self.assertTrue(socket.getdefaulttimeout() is None)
url = "http://www.python.org"
with support.transient_internet(url, timeout=None):
u = _urlopen_with_retry(url)
self.addCleanup(u.close)
self.assertTrue(u.fp.raw._sock.gettimeout() is None)
def test_http_default_timeout(self):
self.assertTrue(socket.getdefaulttimeout() is None)
url = "http://www.python.org"
with support.transient_internet(url):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(url)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(u.fp.raw._sock.gettimeout(), 60)
def test_http_no_timeout(self):
self.assertTrue(socket.getdefaulttimeout() is None)
url = "http://www.python.org"
with support.transient_internet(url):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(url, timeout=None)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(u.fp.raw._sock.gettimeout() is None)
def test_http_timeout(self):
url = "http://www.python.org"
with support.transient_internet(url):
u = _urlopen_with_retry(url, timeout=120)
self.addCleanup(u.close)
self.assertEqual(u.fp.raw._sock.gettimeout(), 120)
FTP_HOST = "ftp://ftp.mirror.nl/pub/gnu/"
def test_ftp_basic(self):
self.assertTrue(socket.getdefaulttimeout() is None)
with support.transient_internet(self.FTP_HOST, timeout=None):
u = _urlopen_with_retry(self.FTP_HOST)
self.addCleanup(u.close)
self.assertTrue(u.fp.fp.raw._sock.gettimeout() is None)
def test_ftp_default_timeout(self):
self.assertTrue(socket.getdefaulttimeout() is None)
with support.transient_internet(self.FTP_HOST):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(self.FTP_HOST)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(u.fp.fp.raw._sock.gettimeout(), 60)
def test_ftp_no_timeout(self):
self.assertTrue(socket.getdefaulttimeout() is None)
with support.transient_internet(self.FTP_HOST):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(self.FTP_HOST, timeout=None)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(u.fp.fp.raw._sock.gettimeout() is None)
def test_ftp_timeout(self):
with support.transient_internet(self.FTP_HOST):
u = _urlopen_with_retry(self.FTP_HOST, timeout=60)
self.addCleanup(u.close)
self.assertEqual(u.fp.fp.raw._sock.gettimeout(), 60)
if __name__ == "__main__":
support.requires("network")
unittest.main()
| 36.912791
| 92
| 0.578674
|
29aa0bad70406f0778ecec681799bfc9f7d60b46
| 18,715
|
py
|
Python
|
messaging/migrations/0004_auto__del_emailbox__del_field_messagestream_email_outgoing__del_field_.py
|
nuwainfo/treeio
|
f57bf9114d9774c11468a1b0e44614b04631beb1
|
[
"MIT"
] | 242
|
2015-01-01T15:08:23.000Z
|
2022-01-19T21:14:24.000Z
|
messaging/migrations/0004_auto__del_emailbox__del_field_messagestream_email_outgoing__del_field_.py
|
nuwainfo/treeio
|
f57bf9114d9774c11468a1b0e44614b04631beb1
|
[
"MIT"
] | 52
|
2015-01-05T09:13:17.000Z
|
2018-12-26T14:52:43.000Z
|
messaging/migrations/0004_auto__del_emailbox__del_field_messagestream_email_outgoing__del_field_.py
|
nuwainfo/treeio
|
f57bf9114d9774c11468a1b0e44614b04631beb1
|
[
"MIT"
] | 99
|
2015-01-09T23:28:14.000Z
|
2021-12-30T09:19:51.000Z
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'EmailBox'
db.delete_table('messaging_emailbox')
# Deleting field 'MessageStream.email_outgoing'
db.delete_column('messaging_messagestream', 'email_outgoing_id')
# Deleting field 'MessageStream.email_incoming'
db.delete_column('messaging_messagestream', 'email_incoming_id')
def backwards(self, orm):
# Adding model 'EmailBox'
db.create_table('messaging_emailbox', (
('server_password', self.gf(
'django.db.models.fields.CharField')(max_length=255)),
('email_type', self.gf(
'django.db.models.fields.CharField')(max_length=255)),
('last_checked', self.gf('django.db.models.fields.DateTimeField')
(null=True, blank=True)),
('server_type', self.gf(
'django.db.models.fields.CharField')(max_length=255)),
('email_name', self.gf(
'django.db.models.fields.CharField')(max_length=255)),
('server_name', self.gf(
'django.db.models.fields.CharField')(max_length=255)),
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(
to=orm['core.Object'], unique=True, primary_key=True)),
('server_username', self.gf(
'django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('messaging', ['EmailBox'])
# Adding field 'MessageStream.email_outgoing'
db.add_column('messaging_messagestream', 'email_outgoing', self.gf('django.db.models.fields.related.ForeignKey')(
related_name='outgoing', null=True, to=orm['messaging.EmailBox'], blank=True), keep_default=False)
# Adding field 'MessageStream.email_incoming'
db.add_column('messaging_messagestream', 'email_incoming', self.gf('django.db.models.fields.related.ForeignKey')(
related_name='incoming', null=True, to=orm['messaging.EmailBox'], blank=True), keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.accessentity': {
'Meta': {'object_name': 'AccessEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'core.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']", 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"})
},
'core.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group', '_ormbases': ['core.AccessEntity']},
'accessentity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.AccessEntity']", 'unique': 'True', 'primary_key': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['core.Group']"})
},
'core.object': {
'Meta': {'object_name': 'Object'},
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Comment']"}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'objects_created'", 'null': 'True', 'to': "orm['core.User']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'full_access': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_full_access'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'links_rel_+'", 'null': 'True', 'to': "orm['core.Object']"}),
'nuvius_resource': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'read_access': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_read_access'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Tag']", 'null': 'True', 'blank': 'True'}),
'trash': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.tag': {
'Meta': {'ordering': "['name']", 'object_name': 'Tag'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'core.user': {
'Meta': {'ordering': "['name']", 'object_name': 'User', '_ormbases': ['core.AccessEntity']},
'accessentity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.AccessEntity']", 'unique': 'True', 'primary_key': 'True'}),
'default_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'default_user_set'", 'null': 'True', 'to': "orm['core.Group']"}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_access': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'other_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'identities.contact': {
'Meta': {'ordering': "['name']", 'object_name': 'Contact', '_ormbases': ['core.Object']},
'contact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.ContactType']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['identities.Contact']"}),
'related_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.AccessEntity']", 'null': 'True', 'blank': 'True'})
},
'identities.contactfield': {
'Meta': {'ordering': "['name']", 'object_name': 'ContactField', '_ormbases': ['core.Object']},
'allowed_values': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'identities.contacttype': {
'Meta': {'ordering': "['name']", 'object_name': 'ContactType', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['identities.ContactField']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'messaging.mailinglist': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'MailingList', '_ormbases': ['core.Object']},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'from_contact': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'from_contact_set'", 'to': "orm['identities.Contact']"}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'members_set'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['identities.Contact']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'opt_in': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['messaging.Template']", 'null': 'True', 'blank': 'True'})
},
'messaging.message': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Message', '_ormbases': ['core.Object']},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']"}),
'body': ('django.db.models.fields.TextField', [], {}),
'mlist': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'mlist'", 'null': 'True', 'to': "orm['messaging.MailingList']"}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'read_by': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'read_by_user'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'message_recipients'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['identities.Contact']"}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['messaging.Message']"}),
'stream': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'stream'", 'null': 'True', 'to': "orm['messaging.MessageStream']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'messaging.messagestream': {
'Meta': {'ordering': "['name', 'last_updated']", 'object_name': 'MessageStream', '_ormbases': ['core.Object']},
'faulty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'incoming_password': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'incoming_server_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'incoming_server_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'incoming_server_username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'outgoing_email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'outgoing_password': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'outgoing_server_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'outgoing_server_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'outgoing_server_username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'messaging.template': {
'Meta': {'object_name': 'Template', '_ormbases': ['core.Object']},
'body': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['messaging']
| 83.923767
| 217
| 0.577184
|
7fbaefdc55521e8475a9f5b97332209e3551265d
| 2,998
|
py
|
Python
|
predict_ML/ML/create_model.py
|
KeThien/3D-houses
|
2ebde67734889c77cbe101fcc8e9fdd4f566a245
|
[
"MIT",
"Unlicense"
] | null | null | null |
predict_ML/ML/create_model.py
|
KeThien/3D-houses
|
2ebde67734889c77cbe101fcc8e9fdd4f566a245
|
[
"MIT",
"Unlicense"
] | null | null | null |
predict_ML/ML/create_model.py
|
KeThien/3D-houses
|
2ebde67734889c77cbe101fcc8e9fdd4f566a245
|
[
"MIT",
"Unlicense"
] | null | null | null |
import os
import numpy as np
import pandas as pd
import xgboost
import xgboost as xgb
from sklearn.impute import KNNImputer
from sklearn.model_selection import train_test_split
import time
import pickle
from .preprocess import preprocess_data
data_path = os.path.join(os.path.dirname(__file__), "Data", "database.csv")
path_model: str = "Eliza_XGB_Model.pkl"
def knn(data_input: pd.DataFrame) -> np.ndarray:
start = time.perf_counter()
print("start knn")
imputer = KNNImputer(n_neighbors=3)
df_knn = imputer.fit_transform(data_input)
finish = time.perf_counter()
print(f"KNN fini en {round(finish-start, 2)} secondes")
return df_knn
def run():
df: pd.DataFrame = pd.read_csv(data_path, index_col=0)
model = train(df)
with open(path_model, 'wb') as file:
pickle.dump(model, file)
def train(df: pd.DataFrame) -> xgboost.XGBRegressor:
# It's not relevant to train or test without target (Y)
df = df[df['Price'].notna()]
with open("df_empty_pre.pkl", 'wb') as file:
df_empty = df.iloc[:0]
pickle.dump(df_empty, file)
df = preprocess_data(df)
with open("df_empty_post.pkl", 'wb') as file:
df_empty = df.iloc[:0]
pickle.dump(df_empty, file)
y = df.Price
X = df.drop(columns="Price")
# Splitting data into train and test split
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=41, test_size=0.2)
model = xgb.XGBRegressor(random_state=0, n_jobs=6, max_depth=8, grow_policy='lossguide', max_leaves=100,
max_bin=512, reg_alpha=0, reg_lambda=0, n_estimators=1000, learning_rate=0.1,
tree_method='gpu_hist')
start = time.perf_counter()
print("start training")
model.fit(X_train, y_train)
finish = time.perf_counter()
print(f"Training fini en {round(finish-start, 2)} secondes")
print("score train")
print(model.score(X_train, y_train))
print("score test")
print(model.score(X_test, y_test))
print("MSE : ", np.sqrt(((y_test - model.predict(X_test)) ** 2).mean()))
return model
"""
df_train: pd.DataFrame = preprocess_data(df_train)
df_test: pd.DataFrame = preprocess_data(df_test)
for col in df_train.columns:
if not col in df_test.columns:
print("train to test, ", col)
df_test[col] = 0
for col in df_test.columns:
if not col in df_train.columns:
print("test to train", col)
df_train[col] = 0"""
"""
df_knn: np.ndarray = knn(df_train)
np.save("fichierKNN.data", df_knn)
# df_knn = np.load("fichierKNN.data.npy")
y_train: np.ndarray = df_knn[:, 0]
X_train: np.ndarray = df_knn[:, 1:]
#df_knn = knn(df)
#np.save("fichierKNN.data", df_knn)
df_knn = np.load("fichierKNN.data.npy")
y = df_knn[:, 0] # .reshape(-1,1)
X = df_knn[:, 1:]
# y = y.T
print(y.shape)
"""
| 26.298246
| 108
| 0.630754
|
1e33d9428ac76afea5c55cd614fa3106acb04f19
| 9,282
|
py
|
Python
|
cfgov/v1/util/ref.py
|
alexandersirris/consumerfinance.gov
|
611bd5d88188177759faa1fbc63ae57deb88cfbd
|
[
"CC0-1.0"
] | null | null | null |
cfgov/v1/util/ref.py
|
alexandersirris/consumerfinance.gov
|
611bd5d88188177759faa1fbc63ae57deb88cfbd
|
[
"CC0-1.0"
] | null | null | null |
cfgov/v1/util/ref.py
|
alexandersirris/consumerfinance.gov
|
611bd5d88188177759faa1fbc63ae57deb88cfbd
|
[
"CC0-1.0"
] | null | null | null |
import itertools
limited_categories = [
('speech-bubble', 'Blog'),
('newspaper', 'Newsroom'),
('document', 'Report'),
('pencil', "Director's notebook"),
('date', 'Events'),
('microphone', 'Speech'),
('bullhorn', 'Press release'),
('contract', 'Op-ed'),
('double-quote', 'Testimony'),
]
related_posts_categories = [
('Blog', (
('At the CFPB', 'At the CFPB'),
("Director's notebook", "Director's notebook"),
('Policy & Compliance', 'Policy and compliance'),
('Data, Research & Reports', 'Data, research, and reports'),
('Info for Consumers', 'Info for consumers'),
)),
('Newsroom', (
('Op-Ed', 'Op-ed'),
('Press Release', 'Press release'),
('Speech', 'Speech'),
('Testimony', 'Testimony'),
)),
]
page_types = [
('activity-log', 'Recent updates'),
('administrative-adjudication', 'Administrative adjudication'),
('amicus-brief', 'Amicus Brief'),
('blog', 'Blog'),
('consumer-reporting', 'Consumer Reporting Companies'),
('enforcement', 'Enforcement Action'),
('final-rule', 'Final rule'),
('foia-freq-req-record', 'FOIA Frequently Requested Record'),
('impl-resource', 'Implementation Resource'),
('leadership-calendar', 'Leadership Calendar'),
('newsroom', 'Newsroom'),
('notice-opportunity-comment', 'Notice and Opportunity for Comment'),
('research-reports', 'Research Report'),
('rule-under-dev', 'Rule Under Development'),
('story', 'Story'),
('ask', 'Ask CFPB'),
('cfpb-researchers', 'CFPB Researchers'),
]
categories = [
('Administrative adjudication docket', (
('administrative-adjudication', 'Administrative adjudication'),
('stipulation-and-constent-order', 'Stipulation and consent order'),
)),
('Amicus Brief', (
('us-supreme-court', 'U.S. Supreme Court'),
('fed-circuit-court', 'Federal Circuit Court'),
('fed-district-court', 'Federal District Court'),
('state-court', 'State Court'),
)),
('Blog', (
('at-the-cfpb', 'At the CFPB'),
('directors-notebook', "Director's notebook"),
('policy_compliance', 'Policy and compliance'),
('data-research-reports', 'Data, research, and reports'),
('info-for-consumers', 'Info for consumers'),
)),
('Consumer Reporting Companies', (
('nationwide', 'Nationwide'),
('employment-screening', 'Employment screening'),
('tenant-screening', 'Tenant screening'),
('check-bank-screening', 'Check and bank screening'),
('personal-property-insurance', 'Personal property insurance'),
('medical', 'Medical'),
('low-income-and-subprime', 'Low-income and subprime'),
('supplementary-reports', 'Supplementary reports'),
('utilities', 'Utilities'),
('retail', 'Retail'),
('gaming', 'Gaming'),
)),
('Enforcement Action', (
('administrative-proceeding', 'Administrative Proceeding'),
('civil-action', 'Civil Action'),
)),
('Final rule', (
('interim-final-rule', 'Interim final rule'),
('final-rule', 'Final rule'),
)),
('FOIA Frequently Requested Record', (
('report', 'Report'),
('log', 'Log'),
('record', 'Record'),
)),
('Implementation Resource', (
('compliance-aid', 'Compliance aid'),
('official-guidance', 'Official guidance'),
)),
('Newsroom', (
('op-ed', 'Op-ed'),
('press-release', 'Press release'),
('speech', 'Speech'),
('testimony', 'Testimony'),
)),
('Notice and Opportunity for Comment', (
('notice-proposed-rule', 'Advance notice of proposed rulemaking'),
('proposed-rule', 'Proposed rule'),
('interim-final-rule-2', 'Interim final rule'),
('request-comment-info', 'Request for comment or information'),
('proposed-policy', 'Proposed policy'),
('intent-preempt-determ', 'Intent to make preemption determination'),
('info-collect-activity', 'Information collection activities'),
('notice-privacy-act', 'Notice related to Privacy Act'),
)),
('Research Report', (
('consumer-complaint', 'Consumer complaint'),
('super-highlight', 'Supervisory Highlights'),
('data-point', 'Data point'),
('industry-markets', 'Industry and markets'),
('consumer-edu-empower', 'Consumer education and empowerment'),
('to-congress', 'To Congress'),
)),
('Rule Under Development', (
('notice-proposed-rule-2', 'Advance notice of proposed rulemaking'),
('proposed-rule-2', 'Proposed rule'),
)),
('Story', (
('auto-loans', 'Auto loans'),
('bank-accts-services', 'Bank accounts and services'),
('credit-cards', 'Credit cards'),
('credit-reports-scores', 'Credit reports and scores'),
('debt-collection', 'Debt collection'),
('money-transfers', 'Money transfers'),
('mortgages', 'Mortgages'),
('payday-loans', 'Payday loans'),
('prepaid-cards', 'Prepaid cards'),
('student-loans', 'Student loans'),
)),
]
supported_languagues = [
('en', 'English'),
('es', 'Spanish'),
('zh', 'Chinese'),
('vi', 'Vietnamese'),
('ko', 'Korean'),
('tl', 'Tagalog'),
('ru', 'Russian'),
('ar', 'Arabic'),
('ht', 'Haitian Creole'),
]
def get_appropriate_categories(specific_categories, page_type):
""" An array of specific categories is provided from whatever
is selected in the admin for related posts, however they each
correspond to a page type, e.g. newsroom or blog. This function returns
only the categories that belong to the page type in question
"""
# Convert the provided categories to their slugs
category_slugs = related_posts_category_lookup(specific_categories)
# Look up the available categories for the page type in question
options = [c[0] for c in choices_for_page_type(page_type)]
return [c for c in category_slugs if c in options]
def related_posts_category_lookup(related_categories):
related = []
for category in related_categories:
for name, related_posts_cats in related_posts_categories:
for cat in related_posts_cats:
if category == cat[0]:
related.append(cat[1])
results = []
for r in related:
for name, cats in categories:
for c in cats:
if r == c[1]:
results.append(c[0])
return results
def page_type_choices():
new_choices = [
('Recent updates', (
('blog', 'Blog'),
('op-ed', 'Op-ed'),
('press-release', 'Press release'),
('research-reports', 'Report'),
('speech', 'Speech'),
('testimony', 'Testimony'))),
('Administrative adjudication', (
('administrative-adjudication', 'Administrative adjudication'),
(
'stipulation-and-constent-order',
'Stipulation and consent order'
)
)),
('Leadership Calendar', (
('richard-cordray', 'Richard Cordray'),
('david-silberman', 'David Silberman'),
('meredith-fuchs', 'Meredith Fuchs'),
('steve-antonakes', 'Steve Antonakes'),
('raj-date', 'Raj Date'),
('elizabeth-warren', 'Elizabeth Warren'))),
('Newsroom', (
('op-ed', 'Op-ed'),
('press-release', 'Press release'),
('speech', 'Speech'),
('testimony', 'Testimony'))),
]
categories_copy = list(categories)
for i, category in enumerate(categories_copy):
for choice in new_choices:
if choice[0] == category[0]:
del categories_copy[i]
return sorted(categories + new_choices)
def choices_for_page_type(page_type):
for slug, name in page_types:
if page_type == slug:
for cat_slug, cat_tuples in page_type_choices():
if name == cat_slug:
return list(cat_tuples)
return []
def category_label(category):
for parent, children in page_type_choices():
for slug, name in children:
if category == slug:
return name
def is_blog(page):
for category in page.categories.all():
for choice in choices_for_page_type('blog'):
if category.name == choice[0]:
return True
if 'Blog' in page.specific_class.__name__:
return True
def is_event(page):
if 'Event' in page.specific_class.__name__:
return True
def is_report(page):
for category in page.categories.all():
for choice in choices_for_page_type('research-reports'):
if category.name == choice[0]:
return True
def filterable_list_page_types():
return page_types
def get_category_children(category_names):
"""Return a list of page category slugs for given category names."""
categories_dict = dict(categories)
return sorted(itertools.chain(*(
dict(categories_dict[category]).keys()
for category in category_names
)))
| 34.377778
| 77
| 0.582956
|
dc4f8bd9d78e6bd46e1a82b4458f73db8958b3b8
| 359
|
py
|
Python
|
serializers.py
|
shiv27395/Smart-Cam-2017
|
aab7b1275050239a55dfc6d1f13e3806e29f1cee
|
[
"MIT"
] | null | null | null |
serializers.py
|
shiv27395/Smart-Cam-2017
|
aab7b1275050239a55dfc6d1f13e3806e29f1cee
|
[
"MIT"
] | null | null | null |
serializers.py
|
shiv27395/Smart-Cam-2017
|
aab7b1275050239a55dfc6d1f13e3806e29f1cee
|
[
"MIT"
] | null | null | null |
from myapp.models import Mode, Action
from rest_framework import serializers
class ModeSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Mode
fields = ('url', 'name')
class ActionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Action
fields = ('url', 'name')
| 27.615385
| 64
| 0.679666
|
701eaf3a4874223d93b2460f4dbc60eab5bf96d6
| 3,445
|
py
|
Python
|
tests/test_localscope.py
|
tillahoffmann/localscope
|
3f757edadfa042cbbb90e98cddc2c242be74428b
|
[
"MIT"
] | 2
|
2020-11-01T20:18:47.000Z
|
2021-05-15T21:08:33.000Z
|
tests/test_localscope.py
|
tillahoffmann/localscope
|
3f757edadfa042cbbb90e98cddc2c242be74428b
|
[
"MIT"
] | 3
|
2020-10-17T17:51:19.000Z
|
2020-10-23T10:37:03.000Z
|
tests/test_localscope.py
|
tillahoffmann/localscope
|
3f757edadfa042cbbb90e98cddc2c242be74428b
|
[
"MIT"
] | null | null | null |
from localscope import localscope
import uuid
import pytest
allowed_global = uuid.uuid4()
forbidden_global = uuid.uuid4()
integer_global = 17
def test_vanilla_function():
@localscope
def add(a, b):
return a + b
assert add(1, 2) == 3
def test_missing_global():
with pytest.raises(NameError):
@localscope
def func():
return never_ever_declared # noqa: F821
def test_forbidden_global():
with pytest.raises(ValueError):
@localscope
def return_forbidden_global():
return forbidden_global
def test_builtin():
@localscope
def transpose(a, b):
return list(zip(a, b))
assert transpose([1, 2], [3, 4]) == [(1, 3), (2, 4)]
def test_allowed():
@localscope(allowed=['allowed_global'])
def return_allowed_global():
return allowed_global
assert return_allowed_global() == allowed_global
def test_closure():
def wrapper():
forbidden_closure = uuid.uuid4()
@localscope
def return_forbidden_closure():
return forbidden_closure
return return_forbidden_closure()
with pytest.raises(ValueError):
wrapper()
def test_allow_any_closure():
forbidden_closure = uuid.uuid4()
def wrapper():
@localscope(allow_closure=True)
def return_forbidden_closure():
return forbidden_closure
return return_forbidden_closure()
assert wrapper() == forbidden_closure
def test_allow_custom_predicate():
decorator = localscope(predicate=lambda x: isinstance(x, int))
with pytest.raises(ValueError):
@decorator
def return_forbidden_global():
return forbidden_global
@decorator
def return_integer_global():
return integer_global
assert return_integer_global() == integer_global
def test_comprehension():
with pytest.raises(ValueError):
@localscope
def evaluate_mse(xs, ys): # missing argument integer_global
return sum(((x - y) / integer_global) ** 2 for x, y in zip(xs, ys))
def test_recursive():
with pytest.raises(ValueError):
@localscope
def wrapper():
def return_forbidden_global():
return forbidden_global
return return_forbidden_global()
def test_recursive_local_closure():
@localscope
def wrapper():
a = 'hello world'
def child():
return a
def test_mfc():
import sys
x = lambda: 0 # noqa: E731
class MyClass:
pass
# Check we can access modules, functions, and classes
@localscope.mfc
def doit():
sys.version
x()
MyClass()
x = 1
with pytest.raises(ValueError):
@localscope.mfc
def breakit():
x + 1
def test_comprehension_with_argument():
@localscope
def f(n):
return [n for i in range(n)]
assert f(2) == [2, 2]
def test_comprehension_with_closure():
@localscope
def f():
n = 3
return [n for i in range(n)]
assert f() == [3, 3, 3]
def test_argument():
@localscope
def add(a):
return a + 1
assert add(3) == 4
def test_argument_with_closure():
@localscope
def add(a):
return a + 1
lambda: a
assert add(3) == 4
def test_local_deref():
@localscope
def identity(x):
return x
lambda: x
assert identity(42) == 42
| 20.753012
| 79
| 0.615675
|
c7d5326dc3e7d820d745b5836d018681385b625a
| 3,308
|
py
|
Python
|
TrivialImageGen/TrivialImageGen/settings.py
|
memoriasIT/TriviaCardGenerator
|
e33178fbad37050c7477c8d79729c47e40a4d6e7
|
[
"WTFPL"
] | 2
|
2021-04-04T12:33:32.000Z
|
2022-03-27T01:35:59.000Z
|
TrivialImageGen/TrivialImageGen/settings.py
|
memoriasIT/TriviaCardGenerator
|
e33178fbad37050c7477c8d79729c47e40a4d6e7
|
[
"WTFPL"
] | 3
|
2020-02-13T12:42:49.000Z
|
2022-03-12T00:15:18.000Z
|
TrivialImageGen/TrivialImageGen/settings.py
|
memoriasIT/TriviaCardGenerator
|
e33178fbad37050c7477c8d79729c47e40a4d6e7
|
[
"WTFPL"
] | null | null | null |
"""
Django settings for TrivialImageGen project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '02fr&g@l4=zu+i$1_etu^x*123@r^n2ur(wcnnvi+!h1it%z37'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'ImageGenerator.apps.ImageGeneratorConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'generatorModule'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'TrivialImageGen.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'TrivialImageGen.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
| 25.643411
| 91
| 0.699214
|
d5e7f4ab0dfee0372e34b0f68d087b50d0cc1d0c
| 798
|
py
|
Python
|
eventpy/lock.py
|
wqking/eventpy
|
3922afa47843523f4d68f30c755101472f8f1aca
|
[
"Apache-2.0"
] | 19
|
2020-02-16T02:00:49.000Z
|
2022-03-31T20:28:51.000Z
|
eventpy/lock.py
|
wqking/eventpy
|
3922afa47843523f4d68f30c755101472f8f1aca
|
[
"Apache-2.0"
] | 1
|
2020-05-06T09:11:02.000Z
|
2020-05-09T12:47:40.000Z
|
eventpy/lock.py
|
wqking/eventpy
|
3922afa47843523f4d68f30c755101472f8f1aca
|
[
"Apache-2.0"
] | 5
|
2020-07-23T01:36:18.000Z
|
2021-09-02T05:35:28.000Z
|
# eventpy library
# Copyright (C) 2020 Wang Qi (wqking)
# Github: https://github.com/wqking/eventpy
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
Lock = threading.RLock
class NullLock :
def acquire(self, blocking = 1) :
return self
def release(self) :
pass
| 31.92
| 74
| 0.734336
|
b69a25e8f9a1c024885fa59efcc6b83cc65c43cd
| 6,551
|
py
|
Python
|
wargame/migrations/0001_initial.py
|
radl97/wargame-web
|
4062b05717bacb5c1a2c178f3a8bd07af7a18b10
|
[
"MIT"
] | 2
|
2020-10-06T17:07:32.000Z
|
2020-10-15T09:25:42.000Z
|
wargame/migrations/0001_initial.py
|
radl97/wargame-web
|
4062b05717bacb5c1a2c178f3a8bd07af7a18b10
|
[
"MIT"
] | 67
|
2018-06-22T09:12:44.000Z
|
2022-03-11T23:34:39.000Z
|
wargame/migrations/0001_initial.py
|
radl97/wargame-web
|
4062b05717bacb5c1a2c178f3a8bd07af7a18b10
|
[
"MIT"
] | 2
|
2020-10-05T21:13:48.000Z
|
2020-10-10T13:46:20.000Z
|
# Generated by Django 2.0.3 on 2018-03-18 22:09
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [("auth", "0009_alter_user_last_name_max_length")]
operations = [
migrations.CreateModel(
name="User",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("password", models.CharField(max_length=128, verbose_name="password")),
("last_login", models.DateTimeField(blank=True, null=True, verbose_name="last login")),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"username",
models.CharField(
error_messages={"unique": "A user with that username already exists."},
help_text="Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.",
max_length=150,
unique=True,
validators=[django.contrib.auth.validators.UnicodeUsernameValidator()],
verbose_name="username",
),
),
("first_name", models.CharField(blank=True, max_length=30, verbose_name="first name")),
("last_name", models.CharField(blank=True, max_length=150, verbose_name="last name")),
("email", models.EmailField(blank=True, max_length=254, verbose_name="email address")),
(
"is_staff",
models.BooleanField(
default=False,
help_text="Designates whether the user can log into this admin site.",
verbose_name="staff status",
),
),
(
"is_active",
models.BooleanField(
default=True,
help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.",
verbose_name="active",
),
),
("date_joined", models.DateTimeField(default=django.utils.timezone.now, verbose_name="date joined")),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
],
options={"verbose_name": "user", "verbose_name_plural": "users", "abstract": False},
managers=[("objects", django.contrib.auth.models.UserManager())],
),
migrations.CreateModel(
name="Challenge",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("title", models.CharField(max_length=256)),
("creation_dt", models.DateTimeField(auto_now_add=True)),
("description", models.CharField(max_length=8192)),
("level", models.IntegerField()),
("flag_qpa", models.CharField(max_length=256, null=True)),
("flag_hacktivity", models.CharField(max_length=256, null=True)),
("points", models.IntegerField()),
("hint", models.CharField(max_length=8192, null=True)),
],
),
migrations.CreateModel(
name="File",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("path", models.CharField(max_length=512)),
("private", models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name="Submission",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("creation_dt", models.DateTimeField(auto_now_add=True)),
("value", models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name="Tag", fields=[("name", models.CharField(max_length=64, primary_key=True, serialize=False))]
),
migrations.CreateModel(
name="UserChallenge",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("hint_used", models.BooleanField(default=False)),
("challenge", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="wargame.Challenge")),
("user", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name="submission",
name="user_challenge",
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="wargame.UserChallenge"),
),
migrations.AddField(model_name="challenge", name="tags", field=models.ManyToManyField(to="wargame.Tag")),
migrations.AlterUniqueTogether(name="userchallenge", unique_together={("user", "challenge")}),
]
| 47.129496
| 138
| 0.528622
|
0c87a70b6bc1d4ececf437cfbf013fa0602b705f
| 429
|
py
|
Python
|
plotly/validators/contour/_selectedpoints.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 2
|
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/contour/_selectedpoints.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | null | null | null |
plotly/validators/contour/_selectedpoints.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 4
|
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class SelectedpointsValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name='selectedpoints', parent_name='contour', **kwargs
):
super(SelectedpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
role='info',
**kwargs
)
| 26.8125
| 75
| 0.641026
|
0db5b8c26fe7389933b88ea5ae3284256c6eb8c1
| 196
|
py
|
Python
|
PYTHON/Python-Estudos/cars_01.py
|
sourcery-ai-bot/Estudos
|
de88ba326cdd9c17a456161cdb2f9ca69f7da65e
|
[
"MIT"
] | null | null | null |
PYTHON/Python-Estudos/cars_01.py
|
sourcery-ai-bot/Estudos
|
de88ba326cdd9c17a456161cdb2f9ca69f7da65e
|
[
"MIT"
] | 1
|
2021-03-02T07:45:49.000Z
|
2021-03-02T07:45:49.000Z
|
PYTHON/Python-Estudos/cars_01.py
|
angrycaptain19/Estudos
|
bbdc6a7399635312da272a62639157132bcff4a0
|
[
"MIT"
] | 2
|
2021-03-02T07:31:47.000Z
|
2021-03-03T08:12:05.000Z
|
cars = ['audi', 'bmw', 'subaru', 'toyota']
for car in cars:
if car == 'bmw': # SE O VALOR DA VARIAVEL == VALOR DE INTERESSE:
print(car.upper())
else:
print(car.title())
| 28
| 71
| 0.545918
|
e6bf52b60a67294a1386715320b25ea6f628fd77
| 304
|
py
|
Python
|
affo/wipe_and_create.py
|
Djcoldcrown/unfinished-natsim-code
|
a0e9bd2bcffd90bffb01aeaf5aa6e5d4d6b6847b
|
[
"BSD-2-Clause"
] | 2
|
2020-07-28T05:50:39.000Z
|
2020-08-17T20:01:43.000Z
|
affo/wipe_and_create.py
|
Djcoldcrown/unfinished-natsim-code
|
a0e9bd2bcffd90bffb01aeaf5aa6e5d4d6b6847b
|
[
"BSD-2-Clause"
] | 1
|
2020-07-28T14:53:30.000Z
|
2020-07-28T15:36:56.000Z
|
affo/wipe_and_create.py
|
Djcoldcrown/unfinished-natsim-code
|
a0e9bd2bcffd90bffb01aeaf5aa6e5d4d6b6847b
|
[
"BSD-2-Clause"
] | null | null | null |
import sqlite3
conn = sqlite3.connect('aao.db')
db = conn.cursor()
tables = ["air", "coalitions", "colNames", "ground", "provinces",
"requests", "special", "stats", "users", "water"]
for i in tables:
db.execute(f"DROP TABLE {i}")
txt = open(f"raw/{i}.txt", "r")
db.execute(txt.read())
| 23.384615
| 65
| 0.605263
|
abab57ae15bef2e978bb8d63b8b67cdae9636f4e
| 6,185
|
py
|
Python
|
HostAgent/agentFreFrp.py
|
pupeng/hone
|
8fb2618a51478049c73158f1d54e7165a37dffcf
|
[
"BSD-3-Clause"
] | 5
|
2017-02-18T12:39:13.000Z
|
2021-03-29T09:21:58.000Z
|
HostAgent/agentFreFrp.py
|
pupeng/hone
|
8fb2618a51478049c73158f1d54e7165a37dffcf
|
[
"BSD-3-Clause"
] | null | null | null |
HostAgent/agentFreFrp.py
|
pupeng/hone
|
8fb2618a51478049c73158f1d54e7165a37dffcf
|
[
"BSD-3-Clause"
] | 7
|
2015-08-12T10:08:21.000Z
|
2018-08-30T12:55:25.000Z
|
################################################################################
# The Frenetic Project #
# frenetic@frenetic-lang.org #
################################################################################
# Licensed to the Frenetic Project by one or more contributors. See the #
# NOTICE file distributed with this work for additional information #
# regarding copyright and ownership. The Frenetic Project licenses this #
# file to you under the following license. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided the following conditions are met: #
# - Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# - Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation or other materials provided with the distribution. #
# - The names of the copyright holds and contributors may not be used to #
# endorse or promote products derived from this work without specific #
# prior written permission. #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# LICENSE file distributed with this work for specific language governing #
# permissions and limitations under the License. #
################################################################################
# /src/frenetic_frp.py #
# Frenetic FRP representation #
# $Id$ #
################################################################################
import threading
import agentFreUtil as util
def mk_add_listener(self):
def f(l):
x = self.next()
self.listeners[x] = l
def d():
del self.listeners[x]
return d
return f
def mk_prepare(self):
def f():
for l in self.listeners.itervalues():
l.prepare()
return f
def mk_push(self):
def f(x):
for l in self.listeners.itervalues():
l.push(x)
return f
def mk_finish(self):
def f():
for l in self.listeners.itervalues():
l.finish()
return f
def mk_terminate(self):
def f():
for l in self.listeners.itervalues():
l.terminate()
return f
def mk_next(self):
def f():
self.lid = self.lid + 1
return self.lid
return f
# Composition operators
def ComposeEFEF(ef1,ef2):
ef1.add_listener(ef2)
ef = FEventFun(ef2.add_listener,ef1.prepare,ef1.push,ef1.finish,ef1.terminate)
return ef
def ComposeEEF(e1,ef2):
e = None
def prepare():
e.fresh = False
ef2.prepare()
def push(x):
ef2.push(x)
def finish():
ef2.finish()
e.fresh = True
e1.add_listener(FListener(prepare,push,finish,ef2.terminate))
e = FEvent(ef2.add_listener)
return e
def ComposeEL(e,l):
e.add_listener(l)
# Events
class FEvent:
def __init__(self,add_listener=None,type=None):
self.fresh = True
self.lid = 0
self.listeners = {}
self.next = mk_next(self)
self.add_listener = mk_add_listener(self) if add_listener is None else add_listener
self.type = type
def __rshift__(self,other):
other_name = other.__class__.__name__
if other_name == "FEventFun":
return ComposeEEF(self,other)
elif other_name == "FListener":
return ComposeEL(self,other)
else:
raise util.IllegalArgument("Cannot compose FEvent and %s" % other_name)
# Event functions
class FEventFun:
def __init__(self,add_listener=None,prepare=None,push=None,finish=None,terminate=None,type_fun=None):
self.listeners = {}
self.lid = 0
self.next = mk_next(self)
self.add_listener = mk_add_listener(self) if add_listener is None else add_listener
self.prepare = mk_prepare(self) if prepare is None else prepare
self.push = mk_push(self) if push is None else push
self.finish = mk_finish(self) if finish is None else finish
self.terminate = mk_terminate(self) if terminate is None else terminate
self.type_fun = (lambda x : None) if type_fun is None else type_fun
def __rshift__(self,other):
other_name = other.__class__.__name__
if other_name == "FEventFun":
return ComposeEFEF(self,other)
else:
raise util.IllegalArgument("Cannot compose FEvent and %s" % other_name)
# Listeners
class FListener:
def __init__(self,prepare=(lambda:()),push=lambda x:(),finish=lambda:(),terminate=(lambda:())):
self.prepare = prepare
self.push = push
self.finish = finish
self.terminate = terminate
# Behaviors
class FBehavior:
def __init__(self,pull,type=None):
self.pull = pull
self.type = type
# event_lock: a global lock ensuring atomic propogation of events.
event_lock = threading.Lock()
# RawEvent: generates an event stream and a "go" function to propagate a value
def RawEvent():
e = FEvent()
def go(x):
event_lock.acquire()
e.fresh = False
for l in e.listeners.itervalues():
l.prepare()
for l in e.listeners.itervalues():
l.push(x)
for l in e.listeners.itervalues():
l.finish()
event_lock.release()
e.fresh = True
return (e,go)
| 37.035928
| 105
| 0.55861
|
024b5dc8ebefd3220dd6af1c910af965b0e0f593
| 509
|
py
|
Python
|
user_admin.py
|
heidingew/enable-disable-network-adapter
|
1cbcd6f526098160630399be13ac9e2bf6e4a8f8
|
[
"MIT"
] | null | null | null |
user_admin.py
|
heidingew/enable-disable-network-adapter
|
1cbcd6f526098160630399be13ac9e2bf6e4a8f8
|
[
"MIT"
] | null | null | null |
user_admin.py
|
heidingew/enable-disable-network-adapter
|
1cbcd6f526098160630399be13ac9e2bf6e4a8f8
|
[
"MIT"
] | null | null | null |
import os
import ctypes
class AdminStateUnknownError(Exception):
"""Cannot determine whether the user is an admin."""
pass
# Function checks if application is running as Administrator
# type: () -> bool
def is_user_admin():
"""Return True if user has admin privileges.
Raises:
AdminStateUnknownError if user privileges cannot be determined.
"""
try:
return ctypes.windll.shell32.IsUserAnAdmin() == 1
except AttributeError:
raise AdminStateUnknownError
| 23.136364
| 71
| 0.70334
|
64593f42021c21469f3da0051093061a61a13e16
| 73,612
|
py
|
Python
|
gpMgmt/bin/gppylib/programs/clsSystemState.py
|
iyerr3/gpdb
|
19f36828368e407a883de79134b34c3e33e1865a
|
[
"PostgreSQL",
"Apache-2.0"
] | 2
|
2019-01-29T06:59:26.000Z
|
2019-05-17T08:59:11.000Z
|
gpMgmt/bin/gppylib/programs/clsSystemState.py
|
iyerr3/gpdb
|
19f36828368e407a883de79134b34c3e33e1865a
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
gpMgmt/bin/gppylib/programs/clsSystemState.py
|
iyerr3/gpdb
|
19f36828368e407a883de79134b34c3e33e1865a
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2010. All Rights Reserved.
#
# import mainUtils FIRST to get python version check
# THIS IMPORT SHOULD COME FIRST
from gppylib.mainUtils import *
from optparse import Option, OptionGroup, OptionParser, OptionValueError, SUPPRESS_USAGE
import os, sys, getopt, socket, StringIO, signal
import datetime
from gppylib import gparray, gplog, pgconf, userinput, utils
from gppylib.commands import base, gp, pg, unix
from gppylib.db import catalog, dbconn
from gppylib.gpparseopts import OptParser, OptChecker
from gppylib.operations.startSegments import *
from gppylib.operations.buildMirrorSegments import *
from gppylib.programs import programIoUtils
from gppylib.system import configurationInterface as configInterface
from gppylib.system.environment import GpMasterEnvironment
from gppylib.utils import toNonNoneString, checkNotNone, readAllLinesFromFile, writeLinesToFile, TableLogger
logger = gplog.get_default_logger()
class FieldDefinition:
"""
Represent a field of our data. Note that we could infer columnName from name, but we would like
for columnName to be more stable than "name"
"""
def __init__(self, name, columnName, columnType, shortLabel=None):
self.__name = name
self.__columnName = columnName
self.__columnType = columnType
self.__shortLabel = shortLabel if shortLabel is not None else name
def getName(self): return self.__name
def getColumnName(self): return self.__columnName
def getColumnType(self): return self.__columnType
def getShortLabel(self): return self.__shortLabel
#
# __str__ needs to return naem -- we use this for output in some cases right now
#
def __str__(self): return self.__name
CATEGORY__SEGMENT_INFO = "Segment Info"
VALUE__HOSTNAME = FieldDefinition("Hostname", "hostname", "text")
VALUE__ADDRESS = FieldDefinition("Address", "address", "text")
VALUE__DATADIR = FieldDefinition("Datadir", "datadir", "text")
VALUE__PORT = FieldDefinition("Port", "port", "int")
CATEGORY__MIRRORING_INFO = "Mirroring Info"
VALUE__CURRENT_ROLE = FieldDefinition("Current role", "role", "text") # can't use current_role as name -- it's a reserved word
VALUE__PREFERRED_ROLE = FieldDefinition("Preferred role", "preferred_role", "text")
VALUE__MIRROR_STATUS = FieldDefinition("Mirror status", "mirror_status", "text")
CATEGORY__ERROR_GETTING_SEGMENT_STATUS = "Error Getting Segment Status"
VALUE__ERROR_GETTING_SEGMENT_STATUS = FieldDefinition("Error Getting Segment Status", "error_getting_status", "text")
CATEGORY__CHANGE_TRACKING_INFO = "Change Tracking Info"
VALUE__CHANGE_TRACKING_DATA_SIZE = FieldDefinition("Change tracking data size", "change_tracking_data_size", "text", "Change tracking size")
CATEGORY__RESYNCHRONIZATION_INFO = "Resynchronization Info"
VALUE__RESYNC_MODE = FieldDefinition("Resynchronization mode", "resync_mode", "text", "Resync mode")
VALUE__RESYNC_DATA_SYNCHRONIZED = FieldDefinition("Data synchronized", "data_synced_str", "text", "Data synced")
VALUE__RESYNC_EST_TOTAL_DATA = FieldDefinition("Estimated total data to synchronize", "est_total_bytes_to_sync_str", "text", "Est. total to sync")
VALUE__RESYNC_EST_PROGRESS_WITH_MIRROR = FieldDefinition("Estimated resync progress with mirror", "est_resync_progress_str", "text", "Est. resync progress")
VALUE__TOTAL_RESYNC_OBJECT_COUNT = FieldDefinition("Total resync objects", "totalResyncObjectCount", "text", "Total resync objects")
VALUE__RESYNC_OBJECT_COUNT = FieldDefinition("Objects to resync", "curResyncObjectCount", "text", "Objects to resync")
VALUE__RESYNC_EST_COMPLETION_TIME = FieldDefinition("Estimated resync end time", "est_resync_end_time_str", "text", "Est. resync end time")
CATEGORY__STATUS = "Status"
VALUE__MASTER_REPORTS_STATUS = FieldDefinition("Configuration reports status as", "status_in_config", "text", "Config status")
VALUE__MIRROR_SEGMENT_STATUS = FieldDefinition("Segment status", "segment_status", "text") # must not be same name as VALUE__SEGMENT_STATUS
VALUE__NONMIRROR_DATABASE_STATUS = FieldDefinition("Database status", "database_status", "text")
VALUE__ACTIVE_PID = FieldDefinition("PID", "active_pid", "text") # int would be better, but we print error messages here sometimes
# these are not in a category, used for other logging
VALUE__SEGMENT_STATUS = FieldDefinition("Instance status", "instance_status", "text", "Status")
VALUE__DBID = FieldDefinition("dbid", "dbid", "int")
VALUE__CONTENTID = FieldDefinition("contentid", "contentid", "int")
VALUE__RESYNC_DATA_SYNCHRONIZED_BYTES = FieldDefinition("Data synchronized (bytes)", "bytes_synced", "int8")
VALUE__RESYNC_EST_TOTAL_DATA_BYTES = FieldDefinition("Estimated total data to synchronize (bytes)", "est_total_bytes_to_sync", "int8")
VALUE__RESYNC_EST_PROGRESS_WITH_MIRROR_NUMERIC = FieldDefinition("Estimated resync progress with mirror (numeric)", "est_resync_progress_pct", "float")
VALUE__TOTAL_RESYNC_OBJECT_COUNT_INT = FieldDefinition("Total Resync Object Count (int)", "totalResyncObjCount", "int")
VALUE__RESYNC_OBJECT_COUNT_INT = FieldDefinition("Resync Object Count (int)", "curResyncObjCount", "int")
VALUE__RESYNC_EST_COMPLETION_TIME_TIMESTAMP = FieldDefinition("Estimated resync end time (timestamp)", "est_resync_end_time", "timestamp")
VALUE__HAS_DATABASE_STATUS_WARNING = FieldDefinition("Has database status warning", "has_status_warning", "bool")
VALUE__VERSION_STRING = FieldDefinition("Version", "version", "text")
VALUE__POSTMASTER_PID_FILE_EXISTS = FieldDefinition("File postmaster.pid (boolean)", "postmaster_pid_file_exists", "bool")
VALUE__POSTMASTER_PID_VALUE_INT = FieldDefinition("PID from postmaster.pid file (int)", "postmaster_pid", "int", "pid file PID")
VALUE__LOCK_FILES_EXIST = FieldDefinition("Lock files in /tmp (boolean)", "lock_files_exist", "bool", "local files exist")
VALUE__ACTIVE_PID_INT = FieldDefinition("Active PID (int)", "active_pid", "int")
VALUE__CHANGE_TRACKING_DATA_SIZE_BYTES = FieldDefinition("Change tracking data size (bytes)", "change_tracking_bytes", "int8")
VALUE__POSTMASTER_PID_FILE = FieldDefinition("File postmaster.pid", "postmaster_pid_file_exists", "text", "pid file exists") # boolean would be nice
VALUE__POSTMASTER_PID_VALUE = FieldDefinition("PID from postmaster.pid file", "postmaster_pid", "text", "pid file PID") # int would be better, but we print error messages here sometimes
VALUE__LOCK_FILES= FieldDefinition("Lock files in /tmp", "lock_files_exist", "text", "local files exist") # boolean would be nice
class GpStateData:
"""
Store key-value pairs of unpacked data for each segment in the cluster
Also provides categories on top of this
To add new values:
1) add CATEGORY_* and VALUE* constants as appropriate
2) update self.__categories and self.__entriesByCategories below
3) call .addValue from the code that loads the values (search for it down below)
"""
def __init__(self ):
self.__segmentData = []
self.__segmentDbIdToSegmentData = {}
self.__dbIdIsProbablyDown = {}
self.__contentsWithUpSegments = {}
self.__currentSegmentData = None
self.__categories = [
CATEGORY__SEGMENT_INFO,
CATEGORY__MIRRORING_INFO,
CATEGORY__ERROR_GETTING_SEGMENT_STATUS,
CATEGORY__CHANGE_TRACKING_INFO,
CATEGORY__RESYNCHRONIZATION_INFO,
CATEGORY__STATUS]
self.__entriesByCategory = {}
self.__entriesByCategory[CATEGORY__SEGMENT_INFO] = \
[VALUE__HOSTNAME,
VALUE__ADDRESS,
VALUE__DATADIR,
VALUE__PORT]
self.__entriesByCategory[CATEGORY__MIRRORING_INFO] = \
[VALUE__CURRENT_ROLE,
VALUE__PREFERRED_ROLE,
VALUE__MIRROR_STATUS]
self.__entriesByCategory[CATEGORY__ERROR_GETTING_SEGMENT_STATUS] = \
[VALUE__ERROR_GETTING_SEGMENT_STATUS]
self.__entriesByCategory[CATEGORY__CHANGE_TRACKING_INFO] = \
[VALUE__CHANGE_TRACKING_DATA_SIZE]
self.__entriesByCategory[CATEGORY__RESYNCHRONIZATION_INFO] = \
[VALUE__RESYNC_MODE,
VALUE__RESYNC_DATA_SYNCHRONIZED,
VALUE__RESYNC_EST_TOTAL_DATA,
VALUE__RESYNC_EST_PROGRESS_WITH_MIRROR,
VALUE__TOTAL_RESYNC_OBJECT_COUNT,
VALUE__RESYNC_OBJECT_COUNT,
VALUE__RESYNC_EST_COMPLETION_TIME]
self.__entriesByCategory[CATEGORY__STATUS] = \
[VALUE__ACTIVE_PID,
VALUE__MASTER_REPORTS_STATUS,
VALUE__MIRROR_SEGMENT_STATUS,
VALUE__NONMIRROR_DATABASE_STATUS]
self.__allValues = {}
for k in [VALUE__SEGMENT_STATUS, VALUE__DBID, VALUE__CONTENTID,
VALUE__RESYNC_DATA_SYNCHRONIZED_BYTES, VALUE__RESYNC_EST_TOTAL_DATA_BYTES,
VALUE__RESYNC_EST_PROGRESS_WITH_MIRROR_NUMERIC, VALUE__TOTAL_RESYNC_OBJECT_COUNT_INT,
VALUE__RESYNC_OBJECT_COUNT_INT, VALUE__RESYNC_EST_COMPLETION_TIME_TIMESTAMP, VALUE__HAS_DATABASE_STATUS_WARNING,
VALUE__VERSION_STRING, VALUE__POSTMASTER_PID_FILE_EXISTS, VALUE__LOCK_FILES_EXIST,
VALUE__ACTIVE_PID_INT, VALUE__POSTMASTER_PID_VALUE_INT,
VALUE__CHANGE_TRACKING_DATA_SIZE_BYTES,
VALUE__POSTMASTER_PID_FILE, VALUE__POSTMASTER_PID_VALUE, VALUE__LOCK_FILES
]:
self.__allValues[k] = True
for values in self.__entriesByCategory.values():
for v in values:
self.__allValues[v] = True
def beginSegment(self, segment):
self.__currentSegmentData = {}
self.__currentSegmentData["values"] = {}
self.__currentSegmentData["isWarning"] = {}
self.__segmentData.append(self.__currentSegmentData)
self.__segmentDbIdToSegmentData[segment.getSegmentDbId()] = self.__currentSegmentData
def addValue(self, key, value, isWarning=False):
self.__currentSegmentData["values"][key] = value
self.__currentSegmentData["isWarning"][key] = isWarning
assert key in self.__allValues;
def isClusterProbablyDown(self, gpArray):
"""
approximate whether or not the cluster has a problem and need to review
we could beef this up -- for example, the mirror is only useful
if we are in resync mode
"""
for seg in gpArray.getSegDbList():
if seg.getSegmentContentId() not in self.__contentsWithUpSegments:
return True
return False
def setSegmentProbablyDown(self, seg, peerPrimary, isThisSegmentProbablyDown):
"""
Mark whether this segment is probably down (based on isThisSegmentProbablyDown)
@param peerPrimary: if this is a mirror in file replication mode, this will be its primary
"""
if isThisSegmentProbablyDown:
self.__dbIdIsProbablyDown[seg.getSegmentDbId()] = True
else:
#
# a segment is "good to use" for the cluster only if it's a primary, or a mirror whose
# primary says that they are in sync (primary not in changetracking or resync)
#
isGoodToUse = seg.isSegmentPrimary(current_role=True) or peerPrimary.isSegmentModeSynchronized()
if isGoodToUse:
self.__contentsWithUpSegments[seg.getSegmentContentId()] = True
def isSegmentProbablyDown(self, seg):
return seg.getSegmentDbId() in self.__dbIdIsProbablyDown
def addSegmentToTableLogger(self, tabLog, segment, suppressCategories={}):
"""
@param suppressCategories map from [categoryName-> true value] for category names that should be suppressed
"""
for cat in self.__categories:
if not suppressCategories.get(cat):
keys = self.__entriesByCategory[cat]
self.addSectionToTableLogger(tabLog, segment, cat, keys)
def getStrValue(self, segment, key, defaultIfNone=""):
data = self.__segmentDbIdToSegmentData[segment.getSegmentDbId()]
valuesMap = data["values"]
val = valuesMap.get(key)
if val is None:
val = defaultIfNone
else:
val = str(val)
return val
def addSectionToTableLogger(self, tabLog, segment, sectionHeader, keys, categoryIndent="", indent=" "):
data = self.__segmentDbIdToSegmentData[segment.getSegmentDbId()]
valuesMap = data["values"]
isWarningMap = data["isWarning"]
hasValue = False
for k in keys:
if k in valuesMap:
hasValue = True
break
if not hasValue:
#
# skip sections for which we have no values!
#
return
tabLog.info([categoryIndent + sectionHeader])
for k in keys:
if k in valuesMap:
val = valuesMap[k]
if val is None:
val = ""
else:
val = str(val)
tabLog.infoOrWarn(isWarningMap[k], ["%s%s" %(indent, k), "= %s" % val])
#-------------------------------------------------------------------------
class GpSystemStateProgram:
#
# Constructor:
#
# @param options the options as returned by the options parser
#
def __init__(self, options):
self.__options = options
self.__pool = None
def __appendSegmentTripletToArray(self, segment, line):
"""
returns line
@param the line to which to append the triplet of address/datadir/port
"""
line.append(segment.getSegmentAddress())
line.append(segment.getSegmentDataDirectory())
line.append(str(segment.getSegmentPort()))
return line
def __getMirrorType(self, gpArray):
if gpArray.hasMirrors:
if gpArray.guessIsSpreadMirror():
return "Spread"
else:
return "Group"
else:
return "No Mirror"
def __showClusterConfig(self, gpEnv, gpArray):
"""
Returns the exitCode
"""
if gpArray.hasMirrors:
logger.info("-------------------------------------------------------------" )
logger.info("-Current GPDB mirror list and status" )
logger.info("-Type = %s" % self.__getMirrorType(gpArray) )
logger.info("-------------------------------------------------------------" )
primarySegments = [ seg for seg in gpArray.getSegDbList() if seg.isSegmentPrimary(False) ]
mirrorSegments = [ seg for seg in gpArray.getSegDbList() if seg.isSegmentMirror(False) ]
contentIdToMirror = GpArray.getSegmentsByContentId(mirrorSegments)
tabLog = TableLogger().setWarnWithArrows(True)
tabLog.info(["Status", "Data State", "Primary", "Datadir", "Port", "Mirror", "Datadir", "Port"])
numInChangeTracking = 0
numMirrorsActingAsPrimaries = 0
for primary in primarySegments:
mirror = contentIdToMirror[primary.getSegmentContentId()][0]
doWarn = False
status = ""
if primary.isSegmentMirror(True):
actingPrimary = mirror
actingMirror = primary
actMirrorStatus = "Available" if actingMirror.isSegmentUp() else "Failed"
status = "Mirror Active, Primary %s" % (actMirrorStatus)
numMirrorsActingAsPrimaries += 1
else:
actingPrimary = primary
actingMirror = mirror
actMirrorStatus = "Available" if actingMirror.isSegmentUp() else "Failed"
status = "Primary Active, Mirror %s" % (actMirrorStatus)
if actingPrimary.isSegmentModeInChangeLogging():
doWarn = True
numInChangeTracking += 1
dataStatus = gparray.getDataModeLabel(actingPrimary.getSegmentMode())
line = [status, dataStatus]
self.__appendSegmentTripletToArray(primary, line)
self.__appendSegmentTripletToArray(mirror, line)
tabLog.infoOrWarn(doWarn, line)
tabLog.outputTable()
logger.info("-------------------------------------------------------------" )
if numMirrorsActingAsPrimaries > 0:
logger.warn( "%s segment(s) configured as mirror(s) are acting as primaries" % numMirrorsActingAsPrimaries )
if numInChangeTracking > 0:
logger.warn( "%s segment(s) are in change tracking" % numInChangeTracking)
else:
logger.info("-------------------------------------------------------------" )
logger.info("-Primary list [Mirror not used]")
logger.info("-------------------------------------------------------------" )
tabLog = TableLogger().setWarnWithArrows(True)
tabLog.info(["Primary", "Datadir", "Port"])
for seg in [ seg for seg in gpArray.getSegDbList()]:
tabLog.info(self.__appendSegmentTripletToArray(seg, []))
tabLog.outputTable()
logger.info("-------------------------------------------------------------" )
return 0
def _showMirrorList(self,gpEnv, gpArray):
"""
Returns the exitCode
"""
exitCode = 0
if gpArray.hasMirrors:
tabLog = TableLogger().setWarnWithArrows(True)
tabLog.info(["Mirror","Datadir", "Port", "Status", "Data Status", ""])
# based off the bash version of -m "mirror list" option,
# the mirror list prints information about defined mirrors only
mirrorSegments = [ seg for seg in gpArray.getSegDbList() if seg.isSegmentMirror(False) ]
numMirrorsActingAsPrimaries = 0
numFailedMirrors = 0
numChangeTrackingMirrors = 0
for seg in mirrorSegments:
doWarn = False
status = ""
dataStatus = gparray.getDataModeLabel(seg.getSegmentMode())
if seg.isSegmentPrimary(True):
status = "Acting as Primary"
if seg.isSegmentModeInChangeLogging():
numChangeTrackingMirrors += 1
numMirrorsActingAsPrimaries += 1
elif seg.isSegmentUp():
status = "Passive"
else:
status = "Failed"
dataStatus = ""
doWarn = True
numFailedMirrors += 1
if doWarn:
exitCode = 1
line = self.__appendSegmentTripletToArray(seg, [])
line.extend([status, dataStatus])
tabLog.infoOrWarn(doWarn, line)
logger.info("-------------------------------------------------------------" )
logger.info("-Current GPDB mirror list and status" )
logger.info("-Type = %s" % self.__getMirrorType(gpArray) )
logger.info("-------------------------------------------------------------" )
tabLog.outputTable()
logger.info("-------------------------------------------------------------" )
if numMirrorsActingAsPrimaries > 0:
logger.warn( "%s segment(s) configured as mirror(s) are acting as primaries" % numMirrorsActingAsPrimaries )
if numFailedMirrors > 0:
logger.warn( "%s segment(s) configured as mirror(s) have failed" % numFailedMirrors )
if numChangeTrackingMirrors > 0:
logger.warn( "%s mirror segment(s) acting as primaries are in change tracking" % numChangeTrackingMirrors)
else:
logger.warn("-------------------------------------------------------------" )
logger.warn( "Mirror not used")
logger.warn("-------------------------------------------------------------" )
return exitCode
def __appendStandbySummary(self, hostNameToResults, standby, tabLog):
"""
Log information about the configured standby and its current status
"""
if standby is None:
tabLog.info(["Master standby", "= No master standby configured"])
else:
tabLog.info(["Master standby", "= %s" % standby.getSegmentHostName()])
(standbyStatusFetchWarning, outputFromStandbyCmd) = hostNameToResults[standby.getSegmentHostName()]
standbyData = outputFromStandbyCmd[standby.getSegmentDbId()] if standbyStatusFetchWarning is None else None
if standbyStatusFetchWarning is not None:
tabLog.warn(["Standby master state", "= Status could not be determined: %s" % standbyStatusFetchWarning])
elif standbyData[gp.SEGMENT_STATUS__HAS_POSTMASTER_PID_FILE] and \
standbyData[gp.SEGMENT_STATUS__GET_PID]['pid'] > 0 and \
standbyData[gp.SEGMENT_STATUS__GET_PID]['error'] is None:
tabLog.info(["Standby master state", "= Standby host passive"])
else:
tabLog.warn(["Standby master state", "= Standby host DOWN"])
def __showStatusStatistics(self, gpEnv, gpArray):
"""
Print high-level numeric stats about the cluster
returns the exit code
"""
hostNameToResults = self.__fetchAllSegmentData(gpArray)
logger.info("Greenplum instance status summary")
# master summary info
tabLog = TableLogger().setWarnWithArrows(True)
tabLog.addSeparator()
tabLog.info(["Master instance", "= Active"])
self.__appendStandbySummary(hostNameToResults, gpArray.standbyMaster, tabLog)
tabLog.info(["Total segment instance count from metadata", "= %s" % len(gpArray.getSegDbList())])
tabLog.addSeparator()
# primary and mirror segment info
for whichType in ["Primary", "Mirror"]:
tabLog.info(["%s Segment Status" % whichType])
tabLog.addSeparator()
if whichType == "Primary":
segs = [seg for seg in gpArray.getSegDbList() if seg.isSegmentPrimary(current_role=False)]
else:
segs = [seg for seg in gpArray.getSegDbList() if seg.isSegmentMirror(current_role=False)]
if not segs:
tabLog.info(["Mirrors not configured on this array"])
tabLog.addSeparator()
continue
numPostmasterPidFilesMissing = 0
numPostmasterProcessesMissing = 0
numLockFilesMissing = 0
numPostmasterPidsMissing = 0
for seg in segs:
(statusFetchWarning, outputFromCmd) = hostNameToResults[seg.getSegmentHostName()]
if statusFetchWarning is not None:
# I guess if we can't contact the segment that we can do this?
# or should add a new error row instead to account for this?
numPostmasterPidFilesMissing += 1
numLockFilesMissing += 1
numPostmasterProcessesMissing += 1
numPostmasterPidsMissing += 1
else:
segmentData = outputFromCmd[seg.getSegmentDbId()]
if not segmentData[gp.SEGMENT_STATUS__HAS_LOCKFILE]:
numLockFilesMissing += 1
if not segmentData[gp.SEGMENT_STATUS__HAS_POSTMASTER_PID_FILE]:
numPostmasterPidFilesMissing += 1
# note: this (which I think matches old behavior fairly closely)
# doesn't seem entirely correct -- we are checking whether netstat is
# there, but not really checking that the process is running on that port?
if segmentData[gp.SEGMENT_STATUS__GET_PID] is None or \
segmentData[gp.SEGMENT_STATUS__GET_PID]['pid'] == 0:
numPostmasterPidsMissing += 1
numPostmasterProcessesMissing += 1
elif segmentData[gp.SEGMENT_STATUS__GET_PID]['error'] is not None:
numPostmasterProcessesMissing += 1
numSegments = len(segs)
numValidAtMaster = len([seg for seg in segs if seg.isSegmentUp()])
numFailuresAtMaster = len([seg for seg in segs if seg.isSegmentDown()])
numPostmasterPidFilesFound = numSegments - numPostmasterPidFilesMissing
numLockFilesFound = numSegments - numLockFilesMissing
numPostmasterPidsFound = numSegments - numPostmasterPidsMissing
numPostmasterProcessesFound = numSegments - numPostmasterProcessesMissing
# print stuff
tabLog.info(["Total %s segments" % whichType.lower(), "= %d" % numSegments])
tabLog.info(["Total %s segment valid (at master)" % whichType.lower(), "= %d" % numValidAtMaster])
tabLog.infoOrWarn(numFailuresAtMaster > 0,
["Total %s segment failures (at master)" % whichType.lower(), "= %d" % numFailuresAtMaster])
tabLog.infoOrWarn(numPostmasterPidFilesMissing > 0,
["Total number of postmaster.pid files missing", "= %d" % numPostmasterPidFilesMissing])
tabLog.info( ["Total number of postmaster.pid files found", "= %d" % numPostmasterPidFilesFound])
tabLog.infoOrWarn(numPostmasterPidsMissing > 0,
["Total number of postmaster.pid PIDs missing", "= %d" % numPostmasterPidsMissing])
tabLog.info( ["Total number of postmaster.pid PIDs found", "= %d" % numPostmasterPidsFound])
tabLog.infoOrWarn(numLockFilesMissing > 0,
["Total number of /tmp lock files missing", "= %d" % numLockFilesMissing])
tabLog.info( ["Total number of /tmp lock files found", "= %d" % numLockFilesFound])
tabLog.infoOrWarn(numPostmasterProcessesMissing > 0,
["Total number postmaster processes missing", "= %d" % numPostmasterProcessesMissing])
tabLog.info( ["Total number postmaster processes found", "= %d" % numPostmasterProcessesFound])
if whichType == "Mirror":
numMirrorsActive = len([seg for seg in segs if seg.isSegmentPrimary(current_role=True)])
numMirrorsPassive = numSegments - numMirrorsActive
tabLog.infoOrWarn(numMirrorsActive > 0,
["Total number mirror segments acting as primary segments", "= %d" % numMirrorsActive])
tabLog.info( ["Total number mirror segments acting as mirror segments", "= %d" % numMirrorsPassive])
tabLog.addSeparator()
tabLog.outputTable()
def __fetchAllSegmentData(self, gpArray):
"""
returns a dict mapping hostName to the GpGetSgementStatusValues decoded result
"""
logger.info("Gathering data from segments...")
segmentsByHost = GpArray.getSegmentsByHostName(gpArray.getDbList())
segmentData = {}
dispatchCount = 0
hostNameToCmd = {}
for hostName, segments in segmentsByHost.iteritems():
cmd = gp.GpGetSegmentStatusValues("get segment version status", segments,
[gp.SEGMENT_STATUS__GET_VERSION,
gp.SEGMENT_STATUS__GET_PID,
gp.SEGMENT_STATUS__HAS_LOCKFILE,
gp.SEGMENT_STATUS__HAS_POSTMASTER_PID_FILE,
gp.SEGMENT_STATUS__GET_MIRROR_STATUS
],
verbose=logging_is_verbose(),
ctxt=base.REMOTE,
remoteHost=segments[0].getSegmentAddress())
hostNameToCmd[hostName] = cmd
self.__pool.addCommand(cmd)
dispatchCount+=1
self.__poolWait(dispatchCount)
hostNameToResults = {}
for hostName, cmd in hostNameToCmd.iteritems():
hostNameToResults[hostName] = cmd.decodeResults()
return hostNameToResults
def __showSummaryOfSegmentsWhichRequireAttention(self, gpEnv, gpArray):
"""
Prints out the current status of the cluster.
@param gpEnv the GpMasterEnvironment object
@param gpArray the array to display
returns the exit code
"""
exitCode = 0
if not gpArray.hasMirrors:
logger.info("Physical mirroring is not configured")
return 1
primarySegments = [ seg for seg in gpArray.getSegDbList() if seg.isSegmentPrimary(current_role=True) ]
mirrorSegments = [ seg for seg in gpArray.getSegDbList() if seg.isSegmentMirror(current_role=True) ]
contentIdToMirror = GpArray.getSegmentsByContentId(mirrorSegments)
hasWarnings = False
hostNameToResults = self.__fetchAllSegmentData(gpArray)
data = self.__buildGpStateData(gpArray, hostNameToResults)
def logSegments(segments, logAsPairs, additionalFieldsToLog=[]):
"""
helper function for logging a list of primaries, with their mirrors
@param logAsPairs if True, then segments should be primaries only, and we will log corresponding mirror datadir/port
@param additionalFieldsToLog should be a list of FieldDefinition objects
"""
tabLog = TableLogger().setWarnWithArrows(True)
for segment in segments:
if tabLog.getNumLines() == 0:
header = ["Current Primary" if logAsPairs else "Segment", "Port"]
header.extend([f.getShortLabel() for f in additionalFieldsToLog])
if logAsPairs:
header.extend(["Mirror", "Port"])
tabLog.info(header)
line = []
line.extend([segment.getSegmentAddress(), str(segment.getSegmentPort())])
for key in additionalFieldsToLog:
line.append(data.getStrValue(segment, key))
if logAsPairs:
mirror = contentIdToMirror[segment.getSegmentContentId()][0]
line.extend([mirror.getSegmentAddress(), str(mirror.getSegmentPort())])
tabLog.info(line)
tabLog.outputTable()
logger.info("----------------------------------------------------")
logger.info("Segment Mirroring Status Report")
# segment pairs that are in wrong roles
primariesInWrongRole = [s for s in gpArray.getSegDbList() if s.isSegmentPrimary(current_role=True) and \
not s.isSegmentPrimary(current_role=False)]
if primariesInWrongRole:
logger.info("----------------------------------------------------")
logger.info("Segments with Primary and Mirror Roles Switched")
logSegments(primariesInWrongRole, logAsPairs=True)
exitCode = 1
else:
pass # logger.info( "No segment pairs with switched roles")
# segment pairs that are in changetracking
primariesInChangeTracking = [s for s in gpArray.getSegDbList() if s.isSegmentPrimary(current_role=True) and \
s.isSegmentModeInChangeLogging()]
if primariesInChangeTracking:
logger.info("----------------------------------------------------")
logger.info("Primaries in Change Tracking")
logSegments(primariesInChangeTracking, logAsPairs=True, additionalFieldsToLog=[VALUE__CHANGE_TRACKING_DATA_SIZE])
exitCode = 1
else:
pass # logger.info( "No segment pairs are in change tracking")
# segments that are in resync
primariesInResync = [s for s in gpArray.getSegDbList() if s.isSegmentPrimary(current_role=True) and \
s.isSegmentModeInResynchronization()]
if primariesInResync:
logger.info("----------------------------------------------------")
logger.info("Segment Pairs in Resynchronization")
logSegments(primariesInResync, logAsPairs=True, additionalFieldsToLog=[VALUE__RESYNC_MODE, \
VALUE__RESYNC_EST_PROGRESS_WITH_MIRROR, VALUE__TOTAL_RESYNC_OBJECT_COUNT, VALUE__RESYNC_OBJECT_COUNT, VALUE__RESYNC_DATA_SYNCHRONIZED, \
VALUE__RESYNC_EST_TOTAL_DATA, VALUE__RESYNC_EST_COMPLETION_TIME, VALUE__CHANGE_TRACKING_DATA_SIZE])
exitCode = 1
else:
pass # logger.info( "No segment pairs are in resynchronization")
# segments that are down (excluding those that are part of changetracking)
changeTrackingMirrors = [contentIdToMirror[s.getSegmentContentId()][0] for s in primariesInChangeTracking]
changeTrackingMirrorsByDbId = GpArray.getSegmentsGroupedByValue(changeTrackingMirrors, gparray.Segment.getSegmentDbId)
segmentsThatAreDown = [s for s in gpArray.getSegDbList() if \
not s.getSegmentDbId() in changeTrackingMirrorsByDbId and \
data.isSegmentProbablyDown(s)]
if segmentsThatAreDown:
logger.info("----------------------------------------------------")
logger.info("Downed Segments (this excludes mirrors whose primaries are in change tracking" )
logger.info(" -- these, if any, are reported separately above")
logger.info(" also, this may include segments where status could not be retrieved)")
logSegments(segmentsThatAreDown, False, [VALUE__MASTER_REPORTS_STATUS, VALUE__SEGMENT_STATUS])
exitCode = 1
else:
pass # logger.info( "No segments are down")
self.__addClusterDownWarning(gpArray, data)
# final output -- no errors, then log this message
if exitCode == 0:
logger.info("----------------------------------------------------")
logger.info("All segments are running normally")
return exitCode
def __addClusterDownWarning(self, gpArray, gpStateData):
if gpStateData.isClusterProbablyDown(gpArray):
logger.warn("*****************************************************" )
logger.warn("DATABASE IS PROBABLY UNAVAILABLE" )
logger.warn("Review Instance Status in log file or screen output for more information" )
logger.warn("*****************************************************" )
def __getSegmentStatusColumns(self):
return [
VALUE__DBID,
VALUE__CONTENTID,
VALUE__HOSTNAME,
VALUE__ADDRESS,
VALUE__DATADIR,
VALUE__PORT,
VALUE__CURRENT_ROLE,
VALUE__PREFERRED_ROLE,
VALUE__MIRROR_STATUS,
VALUE__MASTER_REPORTS_STATUS,
VALUE__SEGMENT_STATUS,
VALUE__HAS_DATABASE_STATUS_WARNING,
VALUE__ERROR_GETTING_SEGMENT_STATUS,
VALUE__CHANGE_TRACKING_DATA_SIZE_BYTES,
VALUE__RESYNC_MODE,
VALUE__RESYNC_DATA_SYNCHRONIZED_BYTES,
VALUE__RESYNC_EST_TOTAL_DATA_BYTES,
VALUE__RESYNC_EST_PROGRESS_WITH_MIRROR_NUMERIC,
VALUE__RESYNC_EST_COMPLETION_TIME_TIMESTAMP,
VALUE__POSTMASTER_PID_FILE_EXISTS,
VALUE__POSTMASTER_PID_VALUE_INT,
VALUE__LOCK_FILES_EXIST,
VALUE__ACTIVE_PID_INT,
VALUE__RESYNC_EST_TOTAL_DATA,
VALUE__VERSION_STRING
]
def __segmentStatusPipeSeparatedForTableUse(self, gpEnv, gpArray):
"""
Print out the current status of the cluster (not including master+standby) as a pipe separate list
@param gpEnv the GpMasterEnvironment object
@param gpArray the array to display
returns the exit code
"""
hostNameToResults = self.__fetchAllSegmentData(gpArray)
data = self.__buildGpStateData(gpArray, hostNameToResults)
fields = self.__getSegmentStatusColumns()
rows = [] # [[f.getName() for f in fields]]
for seg in gpArray.getSegDbList():
row = []
for key in fields:
row.append(data.getStrValue(seg, key, ""))
rows.append(row)
# output rows and fieldNames!
self.__writePipeSeparated(rows, printToLogger=False)
return 0
def __printSampleExternalTableSqlForSegmentStatus(self, gpEnv):
scriptName = "%s/gpstate --segmentStatusPipeSeparatedForTableUse -q -d %s" % \
(sys.path[0], gpEnv.getMasterDataDir()) # todo: ideally, would escape here
columns = ["%s %s" % (f.getColumnName(), f.getColumnType()) for f in self.__getSegmentStatusColumns()]
sql = "\nDROP EXTERNAL TABLE IF EXISTS gpstate_segment_status;\n\n\nCREATE EXTERNAL WEB TABLE gpstate_segment_status\n" \
"(%s)\nEXECUTE '%s' ON MASTER\nFORMAT 'TEXT' (DELIMITER '|' NULL AS '');\n" % \
(", ".join(columns), scriptName )
print sql
return 0
def __writePipeSeparated(self, rows, printToLogger=True):
for row in rows:
escapedRow = [s.replace("|", "_") for s in row] # todo: can we escape it better?
str = "|".join(escapedRow)
if printToLogger:
logger.info(str)
else:
print str
def __showStatus(self, gpEnv, gpArray):
"""
Prints out the current status of the cluster.
@param gpEnv the GpMasterEnvironment object
@param gpArray the array to display
returns the exit code
"""
hasWarnings = False
hostNameToResults = self.__fetchAllSegmentData(gpArray)
#
# fetch data about master
#
master = gpArray.master
dbUrl = dbconn.DbURL(port=gpEnv.getMasterPort(), dbname='template1' )
conn = dbconn.connect(dbUrl, utility=True)
initDbVersion = dbconn.execSQLForSingletonRow(conn, "select productversion from gp_version_at_initdb limit 1;")[0]
pgVersion = dbconn.execSQLForSingletonRow(conn, "show server_version;")[0]
conn.close()
try:
# note: this is how old gpstate did this but ... can we do it without requiring a non-utility-mode
# connection? non-utility-mode connections can take a long time to quit out if there
# are segment failures and you need to wait for the prober (and this would print
# role as "utility" even though it's really a failed-dispatcher.
#
# for now, we use Verbose=True so we don't try any statements on the connection during connect
conn = dbconn.connect(dbUrl, utility=False, verbose=True)
conn.close()
qdRole = "dispatch"
except Exception:
qdRole = "utility" # unable to connect in non-utility, but we've been able to connect in utility so...
#
# print output about master
#
(statusFetchWarning, outputFromMasterCmd) = hostNameToResults[master.getSegmentHostName()]
masterData = outputFromMasterCmd[master.getSegmentDbId()] if statusFetchWarning is None else None
data = self.__buildGpStateData(gpArray, hostNameToResults)
logger.info( "----------------------------------------------------" )
logger.info("-Master Configuration & Status")
logger.info( "----------------------------------------------------" )
self.__addClusterDownWarning(gpArray, data)
tabLog = TableLogger().setWarnWithArrows(True)
tabLog.info(["Master host", "= %s" % master.getSegmentHostName()])
if statusFetchWarning is None:
pidData = masterData[gp.SEGMENT_STATUS__GET_PID]
tabLog.info(["Master postgres process ID", "= %s" % pidData['pid']])
else:
tabLog.warn(["Master port", "= Error fetching data: %s" % statusFetchWarning])
tabLog.info(["Master data directory", "= %s" % master.getSegmentDataDirectory()])
tabLog.info(["Master port", "= %d" % master.getSegmentPort()])
tabLog.info(["Master current role", "= %s" % qdRole])
tabLog.info(["Greenplum initsystem version", "= %s" % initDbVersion])
if statusFetchWarning is None:
if masterData[gp.SEGMENT_STATUS__GET_VERSION] is None:
tabLog.warn(["Greenplum current version", "= Unknown"])
else:
tabLog.info(["Greenplum current version", "= %s" % masterData[gp.SEGMENT_STATUS__GET_VERSION]])
else:
tabLog.warn(["Greenplum current version", "= Error fetching data: %s" % statusFetchWarning])
tabLog.info(["Postgres version", "= %s" % pgVersion])
self.__appendStandbySummary(hostNameToResults, gpArray.standbyMaster, tabLog)
tabLog.outputTable()
hasWarnings = hasWarnings or tabLog.hasWarnings()
#
# Output about segments
#
logger.info("----------------------------------------------------")
logger.info("Segment Instance Status Report")
tabLog = TableLogger().setWarnWithArrows(True)
categoriesToIgnoreOnMirror = {CATEGORY__CHANGE_TRACKING_INFO:True, CATEGORY__RESYNCHRONIZATION_INFO:True}
categoriesToIgnoreWithoutMirroring = {CATEGORY__CHANGE_TRACKING_INFO:True, CATEGORY__MIRRORING_INFO:True,
CATEGORY__RESYNCHRONIZATION_INFO:True}
for seg in gpArray.getSegDbList():
tabLog.addSeparator()
if gpArray.hasMirrors:
toSuppress = categoriesToIgnoreOnMirror if seg.isSegmentMirror(current_role=True) else {}
else: toSuppress = categoriesToIgnoreWithoutMirroring
data.addSegmentToTableLogger(tabLog, seg, toSuppress)
tabLog.outputTable()
hasWarnings = hasWarnings or tabLog.hasWarnings()
self.__addClusterDownWarning(gpArray, data)
if hasWarnings:
logger.warn("*****************************************************" )
logger.warn("Warnings have been generated during status processing" )
logger.warn("Check log file or review screen output" )
logger.warn("*****************************************************" )
return 1 if hasWarnings else 0
def __addResyncProgressFields(self, data, primary, primarySegmentData, isMirror):
"""
Add progress fields to the current segment in data, using the primary information provided.
@param isMirror True if the current segment is a mirror, False otherwise. Not all fields from the primary
data should be inserted (for example, change tracking size is not
considered to apply to the pair but only to the primary so it will not be
inserted for the mirror)
"""
mirrorData = primarySegmentData[gp.SEGMENT_STATUS__GET_MIRROR_STATUS]
#
# populate change tracking fields
#
if not isMirror: # we don't populate CHANGE_TRACKING values for the mirror
if primary.getSegmentMode() == gparray.MODE_RESYNCHRONIZATION or \
primary.getSegmentMode() == gparray.MODE_CHANGELOGGING:
if mirrorData is None or mirrorData["changeTrackingBytesUsed"] < 0:
# server returns <0 if there was an error calculating size
data.addValue(VALUE__CHANGE_TRACKING_DATA_SIZE, "unable to retrieve data size", isWarning=True)
data.addValue(VALUE__CHANGE_TRACKING_DATA_SIZE_BYTES, "", isWarning=True)
else:
data.addValue(VALUE__CHANGE_TRACKING_DATA_SIZE,
self.__abbreviateBytes(mirrorData["changeTrackingBytesUsed"]))
data.addValue(VALUE__CHANGE_TRACKING_DATA_SIZE_BYTES, mirrorData["changeTrackingBytesUsed"])
if mirrorData is None:
# MPP-14054
pass
#
# populate resync modes on primary and mirror
#
if primary.getSegmentMode() == gparray.MODE_RESYNCHRONIZATION:
if mirrorData is None:
data.addValue(VALUE__RESYNC_EST_PROGRESS_WITH_MIRROR, "unable to retrieve progress", isWarning=True)
else:
totalResyncObjectCount = mirrorData['totalResyncObjectCount']
if totalResyncObjectCount == -1:
totalResyncObjectCountStr = "Not Available"
else:
totalResyncObjectCountStr = str(totalResyncObjectCount)
resyncObjectCount = mirrorData['curResyncObjectCount']
if resyncObjectCount == -1:
resyncObjectCountStr = "Not Available"
else:
resyncObjectCountStr = str(resyncObjectCount)
dataSynchronizedBytes = mirrorData["resyncNumCompleted"] * 32L * 1024
dataSynchronizedStr = self.__abbreviateBytes( dataSynchronizedBytes )
resyncDataBytes = None
resyncProgressNumeric = None
totalDataToSynchronizeBytes = None
estimatedEndTimeTimestamp = None
if mirrorData["dataState"] == "InSync":
totalDataToSynchronizeStr = "Sync complete; awaiting config change"
resyncProgressNumeric = 1
resyncProgressStr = "100%"
estimatedEndTimeStr = ""
elif mirrorData["estimatedCompletionTimeSecondsSinceEpoch"] == 0:
totalDataToSynchronizeStr = "Not Available"
resyncProgressStr = "Not Available"
estimatedEndTimeStr = "Not Available"
else:
if mirrorData["resyncTotalToComplete"] == 0:
resyncProgressStr = "Not Available"
else:
resyncProgressNumeric = mirrorData["resyncNumCompleted"] / float(mirrorData["resyncTotalToComplete"])
percentComplete = 100 * resyncProgressNumeric
resyncProgressStr = "%.2f%%" % percentComplete
totalDataToSynchronizeBytes = mirrorData["resyncTotalToComplete"] * 32L * 1024
totalDataToSynchronizeStr = self.__abbreviateBytes( totalDataToSynchronizeBytes )
endTime = datetime.datetime.fromtimestamp(mirrorData["estimatedCompletionTimeSecondsSinceEpoch"])
estimatedEndTimeStr = str(endTime)
estimatedEndTimeTimestamp = endTime.isoformat()
data.addValue(VALUE__RESYNC_MODE, "Full" if mirrorData['isFullResync'] else "Incremental")
data.addValue(VALUE__RESYNC_DATA_SYNCHRONIZED, dataSynchronizedStr)
data.addValue(VALUE__RESYNC_DATA_SYNCHRONIZED_BYTES, dataSynchronizedBytes)
data.addValue(VALUE__RESYNC_EST_TOTAL_DATA, totalDataToSynchronizeStr)
data.addValue(VALUE__RESYNC_EST_TOTAL_DATA_BYTES, totalDataToSynchronizeBytes)
data.addValue(VALUE__RESYNC_EST_PROGRESS_WITH_MIRROR, resyncProgressStr)
data.addValue(VALUE__RESYNC_EST_PROGRESS_WITH_MIRROR_NUMERIC, resyncProgressNumeric)
data.addValue(VALUE__TOTAL_RESYNC_OBJECT_COUNT, totalResyncObjectCountStr)
data.addValue(VALUE__TOTAL_RESYNC_OBJECT_COUNT_INT, totalResyncObjectCount)
data.addValue(VALUE__RESYNC_OBJECT_COUNT, resyncObjectCountStr)
data.addValue(VALUE__RESYNC_OBJECT_COUNT_INT, resyncObjectCount)
data.addValue(VALUE__RESYNC_EST_COMPLETION_TIME, estimatedEndTimeStr)
data.addValue(VALUE__RESYNC_EST_COMPLETION_TIME_TIMESTAMP, estimatedEndTimeTimestamp)
def __buildGpStateData(self, gpArray, hostNameToResults):
data = GpStateData()
primaryByContentId = GpArray.getSegmentsByContentId(\
[s for s in gpArray.getSegDbList() if s.isSegmentPrimary(current_role=True)])
for seg in gpArray.getSegDbList():
(statusFetchWarning, outputFromCmd) = hostNameToResults[seg.getSegmentHostName()]
data.beginSegment(seg)
data.addValue(VALUE__DBID, seg.getSegmentDbId())
data.addValue(VALUE__CONTENTID, seg.getSegmentContentId())
data.addValue(VALUE__HOSTNAME, seg.getSegmentHostName())
data.addValue(VALUE__ADDRESS, seg.getSegmentAddress())
data.addValue(VALUE__DATADIR, seg.getSegmentDataDirectory())
data.addValue(VALUE__PORT, seg.getSegmentPort())
peerPrimary = None
data.addValue(VALUE__CURRENT_ROLE, "Primary" if seg.isSegmentPrimary(current_role=True) else "Mirror")
data.addValue(VALUE__PREFERRED_ROLE, "Primary" if seg.isSegmentPrimary(current_role=False) else "Mirror")
if gpArray.hasMirrors:
if seg.isSegmentPrimary(current_role=True):
data.addValue(VALUE__MIRROR_STATUS, gparray.getDataModeLabel(seg.getSegmentMode()))
else:
peerPrimary = primaryByContentId[seg.getSegmentContentId()][0]
if peerPrimary.isSegmentModeInChangeLogging():
data.addValue(VALUE__MIRROR_STATUS, "Out of Sync", isWarning=True)
else:
data.addValue(VALUE__MIRROR_STATUS, gparray.getDataModeLabel(seg.getSegmentMode()))
else:
data.addValue(VALUE__MIRROR_STATUS, "Physical replication not configured")
if statusFetchWarning is not None:
segmentData = None
data.addValue(VALUE__ERROR_GETTING_SEGMENT_STATUS, statusFetchWarning)
else:
segmentData = outputFromCmd[seg.getSegmentDbId()]
#
# Able to fetch from that segment, proceed
#
#
# mirror info
#
if gpArray.hasMirrors:
# print out mirroring state from the segment itself
if seg.isSegmentPrimary(current_role=True):
self.__addResyncProgressFields(data, seg, segmentData, False)
else:
(primaryStatusFetchWarning, primaryOutputFromCmd) = hostNameToResults[peerPrimary.getSegmentHostName()]
if primaryStatusFetchWarning is not None:
data.addValue(VALUE__ERROR_GETTING_SEGMENT_STATUS, "Primary resync status error:" + str(primaryStatusFetchWarning))
else:
self.__addResyncProgressFields(data, peerPrimary, primaryOutputFromCmd[peerPrimary.getSegmentDbId()], True)
#
# Now PID status
#
pidData = segmentData[gp.SEGMENT_STATUS__GET_PID]
found = segmentData[gp.SEGMENT_STATUS__HAS_POSTMASTER_PID_FILE]
data.addValue(VALUE__POSTMASTER_PID_FILE, "Found" if found else "Missing", isWarning=not found)
data.addValue(VALUE__POSTMASTER_PID_FILE_EXISTS, "t" if found else "f", isWarning=not found)
# PID from postmaster.pid
pidValueForSql = "" if pidData["pid"] == 0 else str(pidData["pid"])
data.addValue(VALUE__POSTMASTER_PID_VALUE, pidData["pid"], pidData['pid'] == 0)
data.addValue(VALUE__POSTMASTER_PID_VALUE_INT, pidValueForSql, pidData['pid'] == 0)
# has lock file
found = segmentData[gp.SEGMENT_STATUS__HAS_LOCKFILE]
data.addValue(VALUE__LOCK_FILES, "Found" if found else "Missing", isWarning=not found)
data.addValue(VALUE__LOCK_FILES_EXIST, "t" if found else "f", isWarning=not found)
if pidData['error'] is None:
data.addValue(VALUE__ACTIVE_PID, pidData["pid"])
data.addValue(VALUE__ACTIVE_PID_INT, pidValueForSql)
else:
data.addValue(VALUE__ACTIVE_PID, "Not found", True)
data.addValue(VALUE__ACTIVE_PID_INT, "", True)
data.addValue(VALUE__VERSION_STRING, segmentData[gp.SEGMENT_STATUS__GET_VERSION])
data.addValue(VALUE__MASTER_REPORTS_STATUS, "Up" if seg.isSegmentUp() else "Down", seg.isSegmentDown())
databaseStatus = None
databaseStatusIsWarning = False
if seg.isSegmentDown():
databaseStatus = "Down in configuration"
databaseStatusIsWarning = True
elif segmentData is None:
databaseStatus = "Unknown -- unable to load segment status"
databaseStatusIsWarning = True
elif segmentData[gp.SEGMENT_STATUS__GET_PID]['error'] is not None:
databaseStatus = "Process error -- database process may be down"
databaseStatusIsWarning = True
elif segmentData[gp.SEGMENT_STATUS__GET_MIRROR_STATUS] is None:
databaseStatus = "Unknown -- unable to load segment status"
databaseStatusIsWarning = True
else:
databaseStatus = segmentData[gp.SEGMENT_STATUS__GET_MIRROR_STATUS]["databaseStatus"]
databaseStatusIsWarning = databaseStatus == "Uninitialized" or databaseStatus == "Down"
if seg.isSegmentMirror(current_role=True):
data.addValue(VALUE__MIRROR_SEGMENT_STATUS, databaseStatus, databaseStatusIsWarning)
else:
data.addValue(VALUE__NONMIRROR_DATABASE_STATUS, databaseStatus, databaseStatusIsWarning)
data.addValue(VALUE__SEGMENT_STATUS, databaseStatus, databaseStatusIsWarning)
data.addValue(VALUE__HAS_DATABASE_STATUS_WARNING, "t" if databaseStatusIsWarning else "f", databaseStatusIsWarning)
data.setSegmentProbablyDown(seg, peerPrimary, databaseStatusIsWarning)
return data
def __abbreviateBytes(self, numBytes):
"""
Abbreviate bytes with 3 bytes of precision (so 1.45GB but also 12.3GB), except for numBytes < 1024
SAMPLE TEST:
def testAbbreviateBytes(bytes, expected=""):
# logger.info(" %s abbreviates to %s" % (bytes, self.__abbreviateBytes(bytes)))
if expected != self.__abbreviateBytes(bytes):
raise Exception("Invalid abbreviation for %s : %s" % (bytes, self.__abbreviateBytes(bytes)))
testAbbreviateBytes(0, "0 bytes")
testAbbreviateBytes(1, "1 byte")
testAbbreviateBytes(2, "2 bytes")
testAbbreviateBytes(13, "13 bytes")
testAbbreviateBytes(656, "656 bytes")
testAbbreviateBytes(999, "999 bytes")
testAbbreviateBytes(1000, "1000 bytes")
testAbbreviateBytes(1001, "1001 bytes")
testAbbreviateBytes(1024, "1.00 kB")
testAbbreviateBytes(1301, "1.27 kB")
testAbbreviateBytes(13501, "13.2 kB")
testAbbreviateBytes(135401, "132 kB")
testAbbreviateBytes(1354015, "1.29 MB")
testAbbreviateBytes(13544015, "12.9 MB")
testAbbreviateBytes(135440154, "129 MB")
testAbbreviateBytes(1354401574, "1.26 GB")
testAbbreviateBytes(13544015776, "12.6 GB")
testAbbreviateBytes(135440157769, "126 GB")
testAbbreviateBytes(1354401577609, "1.23 TB")
testAbbreviateBytes(13544015776094, "12.3 TB")
testAbbreviateBytes(135440157760944, "123 TB")
testAbbreviateBytes(1754401577609464, "1.56 PB")
testAbbreviateBytes(17544015776094646, "15.6 PB")
testAbbreviateBytes(175440157760946475, "156 PB")
testAbbreviateBytes(175440157760945555564, "155822 PB")
"""
abbreviations = [
(1024L*1024*1024*1024*1024, "PB"),
(1024L*1024*1024*1024, "TB"),
(1024L*1024*1024, "GB"),
(1024L*1024, "MB"),
(1024L, "kB"),
(1, "bytes")]
if numBytes == 1:
return "1 byte"
for factor, suffix in abbreviations:
if numBytes >= factor:
break
precision = 3
precisionForDisplay = precision - len('%d' % int(numBytes/factor))
if precisionForDisplay < 0 or numBytes < 1024:
precisionForDisplay = 0
return '%.*f %s' % (precisionForDisplay, float(numBytes) / factor, suffix)
def __showQuickStatus(self, gpEnv, gpArray):
exitCode = 0
logger.info("-Quick Greenplum database status from Master instance only")
logger.info( "----------------------------------------------------------")
segments = [seg for seg in gpArray.getDbList() if seg.isSegmentQE()]
upSegments = [seg for seg in segments if seg.isSegmentUp()]
downSegments = [seg for seg in segments if seg.isSegmentDown()]
logger.info("# of up segments, from configuration table = %s" % (len(upSegments)))
if len(downSegments) > 0:
exitCode = 1
logger.info("# of down segments, from configuration table = %s" % (len(downSegments)))
tabLog = TableLogger().setWarnWithArrows(True)
tabLog.info(["Down Segment", "Datadir", "Port"])
for seg in downSegments:
tabLog.info(self.__appendSegmentTripletToArray(seg, []))
tabLog.outputTable()
logger.info( "----------------------------------------------------------")
return exitCode
def __showPortInfo(self, gpEnv, gpArray):
logger.info("-Master segment instance %s port = %d" % (gpEnv.getMasterDataDir(), gpEnv.getMasterPort()))
logger.info("-Segment instance port assignments")
logger.info("----------------------------------")
tabLog = TableLogger().setWarnWithArrows(True)
tabLog.info([ "Host", "Datadir", "Port"])
for seg in gpArray.getSegDbList():
tabLog.info(self.__appendSegmentTripletToArray(seg, []))
tabLog.outputTable()
def __showStandbyMasterInformation(self, gpEnv, gpArray):
standby = gpArray.standbyMaster
#
# print standby configuration/status
#
if standby is None:
logger.info("Standby master instance not configured")
else:
cmd = gp.GpGetSegmentStatusValues("get standby segment version status", [standby],
[gp.SEGMENT_STATUS__GET_PID], verbose=logging_is_verbose(), ctxt=base.REMOTE,
remoteHost=standby.getSegmentAddress())
cmd.run()
# fetch standby pid
(standbyPidFetchWarning, outputFromCmd) = cmd.decodeResults()
if standbyPidFetchWarning is None:
pidData = outputFromCmd[standby.getSegmentDbId()][gp.SEGMENT_STATUS__GET_PID]
else:
pidData = {}
pidData['pid'] = 0
pidData['error'] = None
# Print output!
logger.info("Standby master details" )
logger.info("----------------------" )
tabLog = TableLogger().setWarnWithArrows(True)
tabLog.info(["Standby address", "= %s" % standby.getSegmentAddress()])
tabLog.info(["Standby data directory", "= %s" % standby.getSegmentDataDirectory()])
tabLog.info(["Standby port", "= %s" % standby.getSegmentPort()])
if standbyPidFetchWarning is not None:
tabLog.warn(["Standby PID", "= %s" % standbyPidFetchWarning ])
tabLog.warn(["Standby status", "= Status could not be determined"])
elif pidData['pid'] == 0:
tabLog.warn(["Standby PID", "= 0"])
tabLog.warn(["Standby status", "= Standby process not running"])
else:
if pidData['error'] is not None:
#
# we got a pid value but had some kind of error -- so possibly the PID
# is not actually active on its port. Print the error
#
tabLog.warn(["Standby PID", "= %s" % pidData['pid'], "%s" % pidData['error']])
tabLog.warn(["Standby status", "= Status could not be determined" ])
else:
tabLog.info(["Standby PID", "= %s" % pidData['pid']])
tabLog.info(["Standby status", "= Standby host passive" ])
tabLog.outputTable()
#
# now print pg_stat_replication
#
logger.info("-------------------------------------------------------------" )
logger.info("-pg_stat_replication" )
logger.info("-------------------------------------------------------------" )
dbUrl = dbconn.DbURL(port=gpEnv.getMasterPort(), dbname='template1')
conn = dbconn.connect(dbUrl, utility=True)
sql = "SELECT state, sync_state, sent_location, flush_location, replay_location FROM pg_stat_replication"
cur = dbconn.execSQL(conn, sql)
if cur.rowcount == 1:
row = cur.fetchall()[0]
logger.info("-WAL Sender State: %s" % row[0])
logger.info("-Sync state: %s" % row[1])
logger.info("-Sent Location: %s" % row[2])
logger.info("-Flush Location: %s" % row[3])
logger.info("-Replay Location: %s" % row[4])
elif cur.rowcount > 1:
logger.warning("pg_stat_replication shows more than 1 row.")
else:
logger.info("No entries found.")
logger.info("-------------------------------------------------------------" )
# done printing pg_stat_replication table
def __poolWait(self, dispatchCount):
self.__pool.wait_and_printdots(dispatchCount, self.__options.quiet)
def __showVersionInfo(self, gpEnv, gpArray):
exitCode = 0
logger.info("Loading version information")
segmentsAndMaster = [seg for seg in gpArray.getDbList()]
upSegmentsAndMaster = [seg for seg in segmentsAndMaster if seg.isSegmentUp()]
# fetch from hosts
segmentsByHost = GpArray.getSegmentsByHostName(upSegmentsAndMaster)
dispatchCount = 0
for hostName, segments in segmentsByHost.iteritems():
cmd = gp.GpGetSegmentStatusValues("get segment version status", segments,
[gp.SEGMENT_STATUS__GET_VERSION],
verbose=logging_is_verbose(),
ctxt=base.REMOTE,
remoteHost=segments[0].getSegmentAddress())
self.__pool.addCommand(cmd)
dispatchCount+=1
self.__poolWait(dispatchCount)
# group output
dbIdToVersion = {}
uniqueVersions = {}
for cmd in self.__pool.getCompletedItems():
(warning, outputFromCmd) = cmd.decodeResults()
if warning is None:
for seg in cmd.dblist:
version = outputFromCmd[seg.getSegmentDbId()][gp.SEGMENT_STATUS__GET_VERSION]
if version is not None:
dbIdToVersion[seg.getSegmentDbId()] = version
uniqueVersions[version] = True
else:
logger.warn(warning)
# print the list of all segments and warnings about trouble
tabLog = TableLogger().setWarnWithArrows(True)
tabLog.info(["Host","Datadir", "Port", "Version", ""])
for seg in segmentsAndMaster:
line = self.__appendSegmentTripletToArray(seg, [])
version = dbIdToVersion.get(seg.getSegmentDbId())
if version is None:
line.append("unable to retrieve version")
tabLog.warn(line)
else:
line.append(version)
tabLog.info(line)
tabLog.outputTable()
if len(uniqueVersions) > 1:
logger.warn("Versions for some segments do not match. Review table above for details.")
hadFailures = len(dbIdToVersion) != len(segmentsAndMaster)
if hadFailures:
logger.warn("Unable to retrieve version data from all segments. Review table above for details.")
if len(uniqueVersions) == 1 and not hadFailures:
# if we got data from all segments then we are confident they are all the same version
logger.info("All segments are running the same software version")
self.__pool.empty_completed_items()
return exitCode
def run(self):
# check that only one option is set
numSet = (1 if self.__options.showMirrorList else 0) + \
(1 if self.__options.showClusterConfig else 0) + \
(1 if self.__options.showQuickStatus else 0) + \
(1 if self.__options.showStatus else 0) + \
(1 if self.__options.showStatusStatistics else 0) + \
(1 if self.__options.segmentStatusPipeSeparatedForTableUse else 0) + \
(1 if self.__options.printSampleExternalTableSqlForSegmentStatus else 0) + \
(1 if self.__options.showPortInformation else 0) + \
(1 if self.__options.showStandbyMasterInformation else 0) + \
(1 if self.__options.showSummaryOfSegmentsWhichRequireAttention else 0) + \
(1 if self.__options.showVersionInfo else 0)
if numSet > 1:
raise ProgramArgumentValidationException("Too many output options specified")
if self.__options.parallelDegree < 1 or self.__options.parallelDegree > 64:
raise ProgramArgumentValidationException("Invalid parallelDegree provided with -B argument: %d" % self.__options.parallelDegree)
self.__pool = base.WorkerPool(self.__options.parallelDegree)
# load config
gpEnv = GpMasterEnvironment(self.__options.masterDataDirectory, True, self.__options.timeout, self.__options.retries)
confProvider = configInterface.getConfigurationProvider().initializeProvider(gpEnv.getMasterPort())
gpArray = confProvider.loadSystemConfig(useUtilityMode=True)
# do it!
if self.__options.showMirrorList:
exitCode = self._showMirrorList(gpEnv, gpArray)
elif self.__options.showClusterConfig:
exitCode = self.__showClusterConfig(gpEnv, gpArray)
elif self.__options.showQuickStatus:
exitCode = self.__showQuickStatus(gpEnv, gpArray)
elif self.__options.showStatus:
exitCode = self.__showStatus(gpEnv, gpArray)
elif self.__options.showVersionInfo:
exitCode = self.__showVersionInfo(gpEnv, gpArray)
elif self.__options.showSummaryOfSegmentsWhichRequireAttention:
exitCode = self.__showSummaryOfSegmentsWhichRequireAttention(gpEnv, gpArray)
elif self.__options.printSampleExternalTableSqlForSegmentStatus:
exitCode = self.__printSampleExternalTableSqlForSegmentStatus(gpEnv)
elif self.__options.showStandbyMasterInformation:
exitCode = self.__showStandbyMasterInformation(gpEnv, gpArray)
elif self.__options.showPortInformation:
exitCode = self.__showPortInfo(gpEnv, gpArray)
elif self.__options.segmentStatusPipeSeparatedForTableUse:
exitCode = self.__segmentStatusPipeSeparatedForTableUse(gpEnv, gpArray)
else:
# self.__options.showStatusStatistics OR default:
exitCode = self.__showStatusStatistics(gpEnv, gpArray)
return exitCode
def cleanup(self):
if self.__pool:
self.__pool.haltWork()
#-------------------------------------------------------------------------
@staticmethod
def createParser():
description = ("Display system state")
help = [""]
parser = OptParser(option_class=OptChecker,
description=' '.join(description.split()),
version='%prog version $Revision$')
parser.setHelp(help)
addStandardLoggingAndHelpOptions(parser, True)
addTo = OptionGroup(parser, "Connection Options")
parser.add_option_group(addTo)
addMasterDirectoryOptionForSingleClusterProgram(addTo)
addTo = OptionGroup(parser, "Output Options")
parser.add_option_group(addTo)
addTo.add_option('-m', None, default=False, action='store_true',
dest="showMirrorList",
metavar="<showMirrorList>",
help="Show mirror list from configuration")
addTo.add_option('-c', None, default=False, action='store_true',
dest="showClusterConfig",
metavar="<showClusterConfig>",
help="Show cluster configuration")
addTo.add_option("-Q", None, default=False, action="store_true",
dest="showQuickStatus",
metavar="<showQuickStatus>",
help="Show quick status")
addTo.add_option("-s", None, default=False, action="store_true",
dest="showStatus",
metavar="<showStatus>",
help="Show status")
addTo.add_option("-i", None, default=False, action="store_true",
dest="showVersionInfo",
metavar="<showVersionInfo>",
help="Show version information")
addTo.add_option("-p", None, default=False, action="store_true",
dest="showPortInformation",
metavar="<showPortInformation>",
help="Show port information")
addTo.add_option("-f", None, default=False, action="store_true",
dest="showStandbyMasterInformation",
metavar="<showStandbyMasterInformation>",
help="Show standby master information")
addTo.add_option("-b", None, default=False, action="store_true",
dest="showStatusStatistics",
metavar="<showStatusStatistics>",
help="Show status statistics")
addTo.add_option("-e", None, default=False, action="store_true",
dest="showSummaryOfSegmentsWhichRequireAttention",
metavar="<showSummaryOfSegmentsWhichRequireAttention>",
help="Show summary of segments needing attention")
#
# two experimental options for exposing segment status as a queryable web table
#
addTo.add_option("--segmentStatusPipeSeparatedForTableUse", None, default=False, action="store_true",
dest="segmentStatusPipeSeparatedForTableUse",
metavar="<segmentStatusPipeSeparatedForTableUse>",
help="Show status as pipe separated output")
addTo.add_option("--printSampleExternalTableSql", None, default=False, action="store_true",
dest="printSampleExternalTableSqlForSegmentStatus",
metavar="<printSampleExternalTableSqlForSegmentStatus>",
help="Print sample sql that can be run to create an external table on stop of gpstate --segmentStatusPipeSeparatedForTableUse")
addTo = OptionGroup(parser, "Other Options")
parser.add_option_group(addTo)
addTo.add_option("-B", None, type="int", default=16,
dest="parallelDegree",
metavar="<parallelDegree>",
help="Max # of workers to use querying segments for status. [default: %default]")
addTo.add_option("--timeout", None, type="int", default=None,
dest="timeout",
metavar="<timeout>",
help="Database connection timeout. [default: %default]")
addTo.add_option("--retries", None, type="int", default=None,
dest="retries",
metavar="<retries>",
help="Database connection retries. [default: %default]")
parser.set_defaults()
return parser
@staticmethod
def createProgram(options, args):
if len(args) > 0 :
raise ProgramArgumentValidationException(\
"too many arguments: only options may be specified", True)
return GpSystemStateProgram(options)
| 48.52472
| 185
| 0.607116
|
5216ae1404d6d3a007f004d61dd88e641a14726b
| 1,229
|
py
|
Python
|
app/bookingapp/models.py
|
kamleshmj07/movie-ticketing-app-api
|
8b6d4ba71202485e0589bbc1bfc5c9aef4c035ce
|
[
"MIT"
] | null | null | null |
app/bookingapp/models.py
|
kamleshmj07/movie-ticketing-app-api
|
8b6d4ba71202485e0589bbc1bfc5c9aef4c035ce
|
[
"MIT"
] | null | null | null |
app/bookingapp/models.py
|
kamleshmj07/movie-ticketing-app-api
|
8b6d4ba71202485e0589bbc1bfc5c9aef4c035ce
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
# Create your models here.
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **args):
"""Creates and saves a new user"""
if not email:
raise ValueError("Please enter a valid email address.")
user = self.model(email=self.normalize_email(email), **args)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new super user"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
| 31.512821
| 75
| 0.664768
|
cd324b7b415da4c91e78c6a42d7df3b8527364c3
| 853
|
py
|
Python
|
main.py
|
rakesh160/azureTest
|
73e01853be5313ca6be7864b10dce48989a8f167
|
[
"MIT"
] | null | null | null |
main.py
|
rakesh160/azureTest
|
73e01853be5313ca6be7864b10dce48989a8f167
|
[
"MIT"
] | null | null | null |
main.py
|
rakesh160/azureTest
|
73e01853be5313ca6be7864b10dce48989a8f167
|
[
"MIT"
] | null | null | null |
from flask import Flask, redirect, url_for
from sqlalchemy import *
app = Flask(__name__)
@app.route('/hello/<name>')
def hello_name(name):
hello_name_default()
return 'Hello %s!' % name
@app.route('/hello')
def hello_name_default():
return 'Hello default'
@app.route('/helloall/<word>')
def hello_all(word):
if (word == 'admin'):
return redirect(url_for('hello_name', name=word))
else:
return redirect(url_for('hello_name_default'))
@app.route('/select')
def select():
engine = create_engine('mysql+pymysql://datavirt:D@t@Virt@usilapphad05/_Stage_GoogleAnalytics?charset=utf8')
connection = engine.connect()
result = connection.execute("select username from GA_KB_Activity")
connection.close()
return render_template('select.html', data=result)
if __name__ == '__main__':
app.run()
| 23.694444
| 112
| 0.692849
|
d201f4c6bfc3502b7fe21791170aac64f16eeed8
| 1,635
|
py
|
Python
|
rowcolremove.py
|
dnaneet/numcode
|
7ec9345f65367a2690f4b9815d476e241edc2d52
|
[
"MIT"
] | null | null | null |
rowcolremove.py
|
dnaneet/numcode
|
7ec9345f65367a2690f4b9815d476e241edc2d52
|
[
"MIT"
] | null | null | null |
rowcolremove.py
|
dnaneet/numcode
|
7ec9345f65367a2690f4b9815d476e241edc2d52
|
[
"MIT"
] | null | null | null |
#Code to remove Multiple rows and columns from a numpy array
import numpy as np
from scipy.linalg import eigh
import math
from matplotlib import pyplot as plt
import time
import os
os.system('clear') #Clear screen on linux. For Win, use os.system('cls')
## FUNCTION DECLARATION ##
#SpringElementStiffness: takes linear spring stiffness value as argument
def SpringElementStiffness(k):
print 'stiffness matrix:\n', np.array([[k,-k],[-k,k]])
return np.array([[k,-k],[-k,k]])
#Spring Assemble: takes K_global, k_local, nodes of connectivity (m,n) as arguments
def SpringAssemble(K_global,k_local,m,n):
K_global[m,m]+=k_local[0,0]
K_global[m,n]+=k_local[0,1]
K_global[n,m]+=k_local[1,0]
K_global[n,n]+=k_local[1,1]
return K_global
## INITIALIZATION ##
nNodes=3 #Number of nodes in structure
restrained_dofs = [0,2] #Fixed boundary condition
force=np.array([0,0,15000])
k_1=SpringElementStiffness(100000) #Creates local spring element 2x2 stifness matrix
k_2=SpringElementStiffness(200000) #Creates local spring element 2x2 stifness matrix
print('\n****************************\n')
K_global = np.zeros((nNodes,nNodes))
rows=K_global.shape[0]
cols=K_global.shape[1]
print 'Init K_global:\n', K_global
print('\n****************************\n')
#Calling spring assemble
SpringAssemble(K_global,k_1,0,1)
print 'K_global:\n', K_global
print('\n****************************\n')
SpringAssemble(K_global,k_2,1,2)
print 'K_global:\n', K_global
print('\n****************************\n')
print 'K_global latest:\n', np.delete(np.delete(K_global,0,0),0,1)
print('\n****************************\n')
| 30.849057
| 87
| 0.66422
|
d43086756769a16dc63114c6b743d2ed4fe41903
| 14,163
|
py
|
Python
|
scripts/validate_docstrings.py
|
Quansight/pandas
|
511fd46e68b12317eb925d4bf7405c2d33daba6c
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-04-06T16:50:48.000Z
|
2020-04-06T16:50:48.000Z
|
scripts/validate_docstrings.py
|
Quansight/pandas
|
511fd46e68b12317eb925d4bf7405c2d33daba6c
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
scripts/validate_docstrings.py
|
Quansight/pandas
|
511fd46e68b12317eb925d4bf7405c2d33daba6c
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-07-04T10:32:38.000Z
|
2020-07-04T10:32:38.000Z
|
#!/usr/bin/env python3
"""
Analyze docstrings to detect errors.
If no argument is provided, it does a quick check of docstrings and returns
a csv with all API functions and results of basic checks.
If a function or method is provided in the form "pandas.function",
"pandas.module.class.method", etc. a list of all errors in the docstring for
the specified function or method.
Usage::
$ ./validate_docstrings.py
$ ./validate_docstrings.py pandas.DataFrame.head
"""
import argparse
import doctest
import glob
import importlib
import json
import os
import sys
import tempfile
from typing import List, Optional
import flake8.main.application
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
# Template backend makes matplotlib to not plot anything. This is useful
# to avoid that plot windows are open from the doctests while running the
# script. Setting here before matplotlib is loaded.
# We don't warn for the number of open plots, as none is actually being opened
os.environ["MPLBACKEND"] = "Template"
import matplotlib # noqa: E402 isort:skip
matplotlib.rc("figure", max_open_warning=10000)
import numpy # noqa: E402 isort:skip
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(BASE_PATH))
import pandas # noqa: E402 isort:skip
sys.path.insert(1, os.path.join(BASE_PATH, "doc", "sphinxext"))
from numpydoc.validate import validate, Docstring # noqa: E402 isort:skip
PRIVATE_CLASSES = ["NDFrame", "IndexOpsMixin"]
ERROR_MSGS = {
"GL04": "Private classes ({mentioned_private_classes}) should not be "
"mentioned in public docstrings",
"SA05": "{reference_name} in `See Also` section does not need `pandas` "
"prefix, use {right_reference} instead.",
"EX02": "Examples do not pass tests:\n{doctest_log}",
"EX03": "flake8 error: {error_code} {error_message}{times_happening}",
"EX04": "Do not import {imported_library}, as it is imported "
"automatically for the examples (numpy as np, pandas as pd)",
}
def pandas_error(code, **kwargs):
"""
Copy of the numpydoc error function, since ERROR_MSGS can't be updated
with our custom errors yet.
"""
return (code, ERROR_MSGS[code].format(**kwargs))
def get_api_items(api_doc_fd):
"""
Yield information about all public API items.
Parse api.rst file from the documentation, and extract all the functions,
methods, classes, attributes... This should include all pandas public API.
Parameters
----------
api_doc_fd : file descriptor
A file descriptor of the API documentation page, containing the table
of contents with all the public API.
Yields
------
name : str
The name of the object (e.g. 'pandas.Series.str.upper).
func : function
The object itself. In most cases this will be a function or method,
but it can also be classes, properties, cython objects...
section : str
The name of the section in the API page where the object item is
located.
subsection : str
The name of the subsection in the API page where the object item is
located.
"""
current_module = "pandas"
previous_line = current_section = current_subsection = ""
position = None
for line in api_doc_fd:
line = line.strip()
if len(line) == len(previous_line):
if set(line) == set("-"):
current_section = previous_line
continue
if set(line) == set("~"):
current_subsection = previous_line
continue
if line.startswith(".. currentmodule::"):
current_module = line.replace(".. currentmodule::", "").strip()
continue
if line == ".. autosummary::":
position = "autosummary"
continue
if position == "autosummary":
if line == "":
position = "items"
continue
if position == "items":
if line == "":
position = None
continue
item = line.strip()
func = importlib.import_module(current_module)
for part in item.split("."):
func = getattr(func, part)
yield (
".".join([current_module, item]),
func,
current_section,
current_subsection,
)
previous_line = line
class PandasDocstring(Docstring):
@property
def mentioned_private_classes(self):
return [klass for klass in PRIVATE_CLASSES if klass in self.raw_doc]
@property
def examples_errors(self):
flags = doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL
finder = doctest.DocTestFinder()
runner = doctest.DocTestRunner(optionflags=flags)
context = {"np": numpy, "pd": pandas}
error_msgs = ""
for test in finder.find(self.raw_doc, self.name, globs=context):
f = StringIO()
runner.run(test, out=f.write)
error_msgs += f.getvalue()
return error_msgs
@property
def examples_source_code(self):
lines = doctest.DocTestParser().get_examples(self.raw_doc)
return [line.source for line in lines]
def validate_pep8(self):
if not self.examples:
return
# F401 is needed to not generate flake8 errors in examples
# that do not user numpy or pandas
content = "".join(
(
"import numpy as np # noqa: F401\n",
"import pandas as pd # noqa: F401\n",
*self.examples_source_code,
)
)
application = flake8.main.application.Application()
application.initialize(["--quiet"])
with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8") as file:
file.write(content)
file.flush()
application.run_checks([file.name])
# We need this to avoid flake8 printing the names of the files to
# the standard output
application.formatter.write = lambda line, source: None
application.report()
yield from application.guide.stats.statistics_for("")
def pandas_validate(func_name: str):
"""
Call the numpydoc validation, and add the errors specific to pandas.
Parameters
----------
func_name : str
Name of the object of the docstring to validate.
Returns
-------
dict
Information about the docstring and the errors found.
"""
doc = PandasDocstring(func_name)
result = validate(func_name)
mentioned_errs = doc.mentioned_private_classes
if mentioned_errs:
result["errors"].append(
pandas_error("GL04", mentioned_private_classes=", ".join(mentioned_errs))
)
if doc.see_also:
for rel_name, rel_desc in doc.see_also.items():
if rel_name.startswith("pandas."):
result["errors"].append(
pandas_error(
"SA05",
reference_name=rel_name,
right_reference=rel_name[len("pandas.") :],
)
)
result["examples_errs"] = ""
if doc.examples:
result["examples_errs"] = doc.examples_errors
if result["examples_errs"]:
result["errors"].append(
pandas_error("EX02", doctest_log=result["examples_errs"])
)
for err in doc.validate_pep8():
result["errors"].append(
pandas_error(
"EX03",
error_code=err.error_code,
error_message=err.message,
times_happening=" ({} times)".format(err.count)
if err.count > 1
else "",
)
)
examples_source_code = "".join(doc.examples_source_code)
for wrong_import in ("numpy", "pandas"):
if "import {}".format(wrong_import) in examples_source_code:
result["errors"].append(
pandas_error("EX04", imported_library=wrong_import)
)
return result
def validate_all(prefix, ignore_deprecated=False):
"""
Execute the validation of all docstrings, and return a dict with the
results.
Parameters
----------
prefix : str or None
If provided, only the docstrings that start with this pattern will be
validated. If None, all docstrings will be validated.
ignore_deprecated: bool, default False
If True, deprecated objects are ignored when validating docstrings.
Returns
-------
dict
A dictionary with an item for every function/method... containing
all the validation information.
"""
result = {}
seen = {}
api_doc_fnames = os.path.join(BASE_PATH, "doc", "source", "reference", "*.rst")
api_items = []
for api_doc_fname in glob.glob(api_doc_fnames):
with open(api_doc_fname) as f:
api_items += list(get_api_items(f))
for func_name, func_obj, section, subsection in api_items:
if prefix and not func_name.startswith(prefix):
continue
doc_info = pandas_validate(func_name)
if ignore_deprecated and doc_info["deprecated"]:
continue
result[func_name] = doc_info
shared_code_key = doc_info["file"], doc_info["file_line"]
shared_code = seen.get(shared_code_key, "")
result[func_name].update(
{
"in_api": True,
"section": section,
"subsection": subsection,
"shared_code_with": shared_code,
}
)
seen[shared_code_key] = func_name
return result
def print_validate_all_results(
prefix: str,
errors: Optional[List[str]],
output_format: str,
ignore_deprecated: bool,
):
if output_format not in ("default", "json", "actions"):
raise ValueError(f'Unknown output_format "{output_format}"')
result = validate_all(prefix, ignore_deprecated)
if output_format == "json":
sys.stdout.write(json.dumps(result))
return 0
prefix = "##[error]" if output_format == "actions" else ""
exit_status = 0
for name, res in result.items():
for err_code, err_desc in res["errors"]:
if errors and err_code not in errors:
continue
sys.stdout.write(
f'{prefix}{res["file"]}:{res["file_line"]}:'
f"{err_code}:{name}:{err_desc}\n"
)
exit_status += 1
return exit_status
def print_validate_one_results(func_name: str):
def header(title, width=80, char="#"):
full_line = char * width
side_len = (width - len(title) - 2) // 2
adj = "" if len(title) % 2 == 0 else " "
title_line = "{side} {title}{adj} {side}".format(
side=char * side_len, title=title, adj=adj
)
return f"\n{full_line}\n{title_line}\n{full_line}\n\n"
result = pandas_validate(func_name)
sys.stderr.write(header(f"Docstring ({func_name})"))
sys.stderr.write(f"{result['docstring']}\n")
sys.stderr.write(header("Validation"))
if result["errors"]:
sys.stderr.write(f'{len(result["errors"])} Errors found:\n')
for err_code, err_desc in result["errors"]:
if err_code == "EX02": # Failing examples are printed at the end
sys.stderr.write("\tExamples do not pass tests\n")
continue
sys.stderr.write(f"\t{err_desc}\n")
elif result["errors"]:
sys.stderr.write(f'Docstring for "{func_name}" correct. :)\n')
if result["examples_errs"]:
sys.stderr.write(header("Doctests"))
sys.stderr.write(result["examples_errs"])
def main(func_name, prefix, errors, output_format, ignore_deprecated):
"""
Main entry point. Call the validation for one or for all docstrings.
"""
if func_name is None:
return print_validate_all_results(
prefix, errors, output_format, ignore_deprecated
)
else:
print_validate_one_results(func_name)
return 0
if __name__ == "__main__":
format_opts = "default", "json", "actions"
func_help = (
"function or method to validate (e.g. pandas.DataFrame.head) "
"if not provided, all docstrings are validated and returned "
"as JSON"
)
argparser = argparse.ArgumentParser(description="validate pandas docstrings")
argparser.add_argument("function", nargs="?", default=None, help=func_help)
argparser.add_argument(
"--format",
default="default",
choices=format_opts,
help="format of the output when validating "
"multiple docstrings (ignored when validating one). "
"It can be {str(format_opts)[1:-1]}",
)
argparser.add_argument(
"--prefix",
default=None,
help="pattern for the "
"docstring names, in order to decide which ones "
'will be validated. A prefix "pandas.Series.str."'
"will make the script validate all the docstrings "
"of methods starting by this pattern. It is "
"ignored if parameter function is provided",
)
argparser.add_argument(
"--errors",
default=None,
help="comma separated "
"list of error codes to validate. By default it "
"validates all errors (ignored when validating "
"a single docstring)",
)
argparser.add_argument(
"--ignore_deprecated",
default=False,
action="store_true",
help="if this flag is set, "
"deprecated objects are ignored when validating "
"all docstrings",
)
args = argparser.parse_args()
sys.exit(
main(
args.function,
args.prefix,
args.errors.split(",") if args.errors else None,
args.format,
args.ignore_deprecated,
)
)
| 32.042986
| 85
| 0.606863
|
b3e408bbebd20d3e642859a41fcb44f2afd63fcb
| 577
|
py
|
Python
|
mint/wallet/puzzles/p2_m_of_n_delegate_direct.py
|
sai-genesis/rc1-test
|
56e565952b283450c8589296f87c31b1c67b8502
|
[
"Apache-2.0"
] | 12
|
2021-08-18T20:53:31.000Z
|
2022-03-15T21:45:13.000Z
|
mint/wallet/puzzles/p2_m_of_n_delegate_direct.py
|
sai-genesis/rc1-test
|
56e565952b283450c8589296f87c31b1c67b8502
|
[
"Apache-2.0"
] | 34
|
2021-08-18T19:12:11.000Z
|
2022-01-06T17:15:34.000Z
|
mint/wallet/puzzles/p2_m_of_n_delegate_direct.py
|
sai-genesis/rc1-test
|
56e565952b283450c8589296f87c31b1c67b8502
|
[
"Apache-2.0"
] | 7
|
2021-08-18T20:53:34.000Z
|
2022-03-15T08:37:40.000Z
|
"""
Pay to m of n direct
This puzzle program is like p2_delegated_puzzle except instead of one public key,
it includes N public keys, any M of which needs to sign the delegated puzzle.
"""
from mint.types.blockchain_format.program import Program
from .load_clvm import load_clvm
MOD = load_clvm("p2_m_of_n_delegate_direct.clvm")
def puzzle_for_m_of_public_key_list(m, public_key_list) -> Program:
return MOD.curry(m, public_key_list)
def solution_for_delegated_puzzle(m, selectors, puzzle, solution) -> Program:
return Program.to([selectors, puzzle, solution])
| 27.47619
| 81
| 0.783362
|
9228071ce953d6320fcdd1333aec36a94a39b57b
| 51
|
py
|
Python
|
src/adresses/__init__.py
|
marquesds/zipnator
|
fe6102d3e69e9adfa343e2ead5099817a1cf742f
|
[
"Unlicense"
] | null | null | null |
src/adresses/__init__.py
|
marquesds/zipnator
|
fe6102d3e69e9adfa343e2ead5099817a1cf742f
|
[
"Unlicense"
] | null | null | null |
src/adresses/__init__.py
|
marquesds/zipnator
|
fe6102d3e69e9adfa343e2ead5099817a1cf742f
|
[
"Unlicense"
] | null | null | null |
class InvalidZipcodeException(Exception):
pass
| 17
| 41
| 0.803922
|
71f788f23823c285b749036f2a8b36bce2e20def
| 10,790
|
py
|
Python
|
interaction_manager/view/ui_spotify_dialog.py
|
ES-TUDelft/interaction-design-tool-ir
|
d6fffa8d76c9e3df4ed1f505ee9427e5af5b8082
|
[
"MIT"
] | 1
|
2021-03-07T12:36:13.000Z
|
2021-03-07T12:36:13.000Z
|
interaction_manager/view/ui_spotify_dialog.py
|
ES-TUDelft/interaction-design-tool-ir
|
d6fffa8d76c9e3df4ed1f505ee9427e5af5b8082
|
[
"MIT"
] | null | null | null |
interaction_manager/view/ui_spotify_dialog.py
|
ES-TUDelft/interaction-design-tool-ir
|
d6fffa8d76c9e3df4ed1f505ee9427e5af5b8082
|
[
"MIT"
] | 1
|
2021-02-20T15:10:37.000Z
|
2021-02-20T15:10:37.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'interaction_manager/ui/spotifydialog.ui'
#
# Created by: PyQt5 UI code generator 5.12.2
#
# WARNING! All changes made in this file will be lost!
from es_common.utils.qt import QtCore, QtGui, QtWidgets
class Ui_SpotifyDialog(object):
def setupUi(self, SpotifyDialog):
SpotifyDialog.setObjectName("SpotifyDialog")
SpotifyDialog.resize(424, 564)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(SpotifyDialog.sizePolicy().hasHeightForWidth())
SpotifyDialog.setSizePolicy(sizePolicy)
SpotifyDialog.setMinimumSize(QtCore.QSize(0, 0))
SpotifyDialog.setMaximumSize(QtCore.QSize(750, 1000))
self.gridLayout_5 = QtWidgets.QGridLayout(SpotifyDialog)
self.gridLayout_5.setObjectName("gridLayout_5")
self.gridLayout_3 = QtWidgets.QGridLayout()
self.gridLayout_3.setObjectName("gridLayout_3")
spacerItem = QtWidgets.QSpacerItem(20, 10, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem, 6, 0, 1, 1)
self.defaultSettingsCheckBox = QtWidgets.QCheckBox(SpotifyDialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.defaultSettingsCheckBox.sizePolicy().hasHeightForWidth())
self.defaultSettingsCheckBox.setSizePolicy(sizePolicy)
self.defaultSettingsCheckBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.defaultSettingsCheckBox.setChecked(True)
self.defaultSettingsCheckBox.setObjectName("defaultSettingsCheckBox")
self.gridLayout_3.addWidget(self.defaultSettingsCheckBox, 1, 0, 1, 1)
self.connectButton = QtWidgets.QPushButton(SpotifyDialog)
self.connectButton.setObjectName("connectButton")
self.gridLayout_3.addWidget(self.connectButton, 1, 1, 1, 1)
self.settingsGroupBox = QtWidgets.QGroupBox(SpotifyDialog)
self.settingsGroupBox.setEnabled(False)
font = QtGui.QFont()
font.setFamily("Courier New")
font.setBold(True)
font.setWeight(75)
self.settingsGroupBox.setFont(font)
self.settingsGroupBox.setObjectName("settingsGroupBox")
self.gridLayout = QtWidgets.QGridLayout(self.settingsGroupBox)
self.gridLayout.setObjectName("gridLayout")
self.gridLayout_12 = QtWidgets.QGridLayout()
self.gridLayout_12.setObjectName("gridLayout_12")
self.label_3 = QtWidgets.QLabel(self.settingsGroupBox)
self.label_3.setObjectName("label_3")
self.gridLayout_12.addWidget(self.label_3, 1, 0, 1, 1)
self.usernameLineEdit = QtWidgets.QLineEdit(self.settingsGroupBox)
self.usernameLineEdit.setObjectName("usernameLineEdit")
self.gridLayout_12.addWidget(self.usernameLineEdit, 0, 1, 1, 2)
self.label_4 = QtWidgets.QLabel(self.settingsGroupBox)
self.label_4.setObjectName("label_4")
self.gridLayout_12.addWidget(self.label_4, 0, 0, 1, 1)
self.redirectURILineEdit = QtWidgets.QLineEdit(self.settingsGroupBox)
self.redirectURILineEdit.setObjectName("redirectURILineEdit")
self.gridLayout_12.addWidget(self.redirectURILineEdit, 3, 1, 1, 2)
self.clientSecretLineEdit = QtWidgets.QLineEdit(self.settingsGroupBox)
self.clientSecretLineEdit.setObjectName("clientSecretLineEdit")
self.gridLayout_12.addWidget(self.clientSecretLineEdit, 2, 1, 1, 2)
self.label_2 = QtWidgets.QLabel(self.settingsGroupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setObjectName("label_2")
self.gridLayout_12.addWidget(self.label_2, 2, 0, 1, 1)
self.label = QtWidgets.QLabel(self.settingsGroupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName("label")
self.gridLayout_12.addWidget(self.label, 3, 0, 1, 1)
self.clientIDLineEdit = QtWidgets.QLineEdit(self.settingsGroupBox)
self.clientIDLineEdit.setObjectName("clientIDLineEdit")
self.gridLayout_12.addWidget(self.clientIDLineEdit, 1, 1, 1, 2)
self.gridLayout.addLayout(self.gridLayout_12, 0, 1, 1, 1)
self.gridLayout_3.addWidget(self.settingsGroupBox, 0, 0, 1, 2)
self.gridLayout_4 = QtWidgets.QGridLayout()
self.gridLayout_4.setObjectName("gridLayout_4")
self.playButton = QtWidgets.QPushButton(SpotifyDialog)
self.playButton.setEnabled(False)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.playButton.sizePolicy().hasHeightForWidth())
self.playButton.setSizePolicy(sizePolicy)
self.playButton.setObjectName("playButton")
self.gridLayout_4.addWidget(self.playButton, 3, 2, 1, 1)
self.playlistComboBox = QtWidgets.QComboBox(SpotifyDialog)
self.playlistComboBox.setObjectName("playlistComboBox")
self.gridLayout_4.addWidget(self.playlistComboBox, 1, 1, 1, 2)
self.line_2 = QtWidgets.QFrame(SpotifyDialog)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.gridLayout_4.addWidget(self.line_2, 0, 0, 1, 3)
self.label_5 = QtWidgets.QLabel(SpotifyDialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_5.sizePolicy().hasHeightForWidth())
self.label_5.setSizePolicy(sizePolicy)
self.label_5.setObjectName("label_5")
self.gridLayout_4.addWidget(self.label_5, 1, 0, 1, 1)
self.trackComboBox = QtWidgets.QComboBox(SpotifyDialog)
self.trackComboBox.setObjectName("trackComboBox")
self.gridLayout_4.addWidget(self.trackComboBox, 2, 1, 1, 2)
self.label_6 = QtWidgets.QLabel(SpotifyDialog)
self.label_6.setObjectName("label_6")
self.gridLayout_4.addWidget(self.label_6, 2, 0, 1, 1)
self.gridLayout_3.addLayout(self.gridLayout_4, 2, 0, 1, 2)
self.groupBox_2 = QtWidgets.QGroupBox(SpotifyDialog)
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout_2 = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout_2.setObjectName("gridLayout_2")
self.messageTextEdit = QtWidgets.QTextEdit(self.groupBox_2)
self.messageTextEdit.setAutoFillBackground(True)
self.messageTextEdit.setStyleSheet("background: rgb(76, 76, 76)")
self.messageTextEdit.setReadOnly(True)
self.messageTextEdit.setObjectName("messageTextEdit")
self.gridLayout_2.addWidget(self.messageTextEdit, 0, 0, 1, 1)
self.gridLayout_3.addWidget(self.groupBox_2, 4, 0, 1, 2)
self.buttonBox = QtWidgets.QDialogButtonBox(SpotifyDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout_3.addWidget(self.buttonBox, 5, 0, 1, 2)
self.line = QtWidgets.QFrame(SpotifyDialog)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.gridLayout_3.addWidget(self.line, 3, 0, 1, 2)
self.gridLayout_5.addLayout(self.gridLayout_3, 0, 0, 1, 1)
self.retranslateUi(SpotifyDialog)
self.buttonBox.accepted.connect(SpotifyDialog.accept)
self.buttonBox.rejected.connect(SpotifyDialog.reject)
QtCore.QMetaObject.connectSlotsByName(SpotifyDialog)
SpotifyDialog.setTabOrder(self.usernameLineEdit, self.clientIDLineEdit)
SpotifyDialog.setTabOrder(self.clientIDLineEdit, self.clientSecretLineEdit)
SpotifyDialog.setTabOrder(self.clientSecretLineEdit, self.redirectURILineEdit)
SpotifyDialog.setTabOrder(self.redirectURILineEdit, self.defaultSettingsCheckBox)
SpotifyDialog.setTabOrder(self.defaultSettingsCheckBox, self.connectButton)
SpotifyDialog.setTabOrder(self.connectButton, self.playlistComboBox)
SpotifyDialog.setTabOrder(self.playlistComboBox, self.trackComboBox)
SpotifyDialog.setTabOrder(self.trackComboBox, self.playButton)
SpotifyDialog.setTabOrder(self.playButton, self.messageTextEdit)
def retranslateUi(self, SpotifyDialog):
_translate = QtCore.QCoreApplication.translate
SpotifyDialog.setWindowTitle(_translate("SpotifyDialog", "Spotify"))
self.defaultSettingsCheckBox.setText(_translate("SpotifyDialog", "Default Settings"))
self.connectButton.setText(_translate("SpotifyDialog", "Connect"))
self.settingsGroupBox.setTitle(_translate("SpotifyDialog", "Spotify Settings"))
self.label_3.setText(_translate("SpotifyDialog", "Client ID"))
self.label_4.setText(_translate("SpotifyDialog", "Username"))
self.redirectURILineEdit.setPlaceholderText(_translate("SpotifyDialog", "http://localhost/8080/"))
self.label_2.setText(_translate("SpotifyDialog", "Client Secret"))
self.label.setText(_translate("SpotifyDialog", "Redirect URI"))
self.playButton.setText(_translate("SpotifyDialog", "Test"))
self.label_5.setText(_translate("SpotifyDialog", "Playlists"))
self.label_6.setText(_translate("SpotifyDialog", "Tracks"))
self.groupBox_2.setTitle(_translate("SpotifyDialog", "Log"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
SpotifyDialog = QtWidgets.QDialog()
ui = Ui_SpotifyDialog()
ui.setupUi(SpotifyDialog)
SpotifyDialog.show()
sys.exit(app.exec_())
| 57.393617
| 114
| 0.732808
|
5784d8b7874a78f4fc4277061290f3063b7a902b
| 2,295
|
py
|
Python
|
Section 4/section_4.py
|
PacktPublishing/Hands-On-Computer-Vision-with-PyTorch-1.x
|
bad073f7489792d3c4bc860a2d56fa133ba63617
|
[
"MIT"
] | 6
|
2020-03-11T23:39:12.000Z
|
2021-10-04T04:53:02.000Z
|
Section 4/section_4.py
|
PacktPublishing/Hands-On-Computer-Vision-with-PyTorch-1.x
|
bad073f7489792d3c4bc860a2d56fa133ba63617
|
[
"MIT"
] | null | null | null |
Section 4/section_4.py
|
PacktPublishing/Hands-On-Computer-Vision-with-PyTorch-1.x
|
bad073f7489792d3c4bc860a2d56fa133ba63617
|
[
"MIT"
] | 2
|
2020-05-02T14:50:05.000Z
|
2020-09-19T06:01:54.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as f
import torch.optim as optim
from torchvision import datasets, transforms
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# single input cconhannel and 6 output, 5*5 kernel
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc2 = nn.Linear(16 * 20 * 20, 10)
def forward(self, x):
x = f.relu(self.conv1(x))
x = f.relu(self.conv2(x))
x = self.fc2(x.view(-1, 16 * 20 * 20))
return x
net = Net()
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data', train=True, download=True, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=64, shuffle=True)
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)
# put on gpu if available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net.to(device)
for epoch in range(1):
for batch_idx, (input_batch, label) in enumerate(train_loader, 0):
input_batch = input_batch.to(device)
label = label.to(device)
optimizer.zero_grad()
output = net(input_batch)
loss = loss_fn(output, label)
loss.backward()
optimizer.step()
# print statistics
if batch_idx % 50 == 0:
print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, loss.item()))
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, download=True, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=64, shuffle=True)
net.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for _, (input_batch, label) in enumerate(test_loader, 0):
input_batch = input_batch.to(device)
label = label.to(device)
output = net(input_batch)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(label.view_as(pred)).sum().item()
print(correct)
print('Accuracy: %.3f' % (correct / len(test_loader.dataset)))
| 30.6
| 89
| 0.599564
|
ef4b6ac7f7467242a434ca02473f1eecaba2e2e1
| 19,576
|
py
|
Python
|
ParlAI/projects/image_chat/transresnet_multimodal/modules.py
|
UmaTaru/run
|
be29e4d41a4de3dee27cd6796801bfe51382d294
|
[
"MIT"
] | 163
|
2019-06-23T14:07:57.000Z
|
2022-02-25T23:06:07.000Z
|
ParlAI/projects/image_chat/transresnet_multimodal/modules.py
|
UmaTaru/run
|
be29e4d41a4de3dee27cd6796801bfe51382d294
|
[
"MIT"
] | 8
|
2019-07-24T12:41:31.000Z
|
2022-02-10T00:17:20.000Z
|
ParlAI/projects/image_chat/transresnet_multimodal/modules.py
|
UmaTaru/run
|
be29e4d41a4de3dee27cd6796801bfe51382d294
|
[
"MIT"
] | 31
|
2019-06-26T01:21:07.000Z
|
2021-09-06T17:23:24.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Modules for TransresnetMultimodalAgent."""
import torch
from torch import nn
from parlai.agents.transformer.modules import (
TransformerEncoder,
create_position_codes,
TransformerEncoderLayer,
)
from projects.personality_captions.transresnet.modules import (
TransresnetModel,
load_fasttext_embeddings,
)
class TransresnetMultimodalModel(TransresnetModel):
"""Extension of Transresnet to incorporate dialogue history and multimodality."""
@staticmethod
def add_cmdline_args(argparser):
"""Override to include model-specific args."""
TransresnetModel.add_cmdline_args(argparser)
agent = argparser.add_argument_group("TransresnetMultimodal task arguments")
agent.add_argument(
"--context-encoder-embedding-type",
type=str,
default=None,
choices=[None, "fasttext_cc"],
help="Specify if using pretrained embeddings",
)
agent.add_argument(
"--load-context-encoder-from",
type=str,
default=None,
help="Specify if using a pretrained transformer encoder",
)
agent.add_argument(
"--share-encoder",
type="bool",
default=False,
help="Whether to share the text encoder for the "
"labels and the dialogue history",
)
agent.add_argument("--num-layers-multimodal-encoder", type=int, default=1)
agent.add_argument(
"--multimodal",
type="bool",
default=False,
help="If true, feed a query term into a separate "
"transformer prior to computing final rank "
"scores",
)
agent.add_argument(
"--multimodal-combo",
type=str,
choices=["concat", "sum"],
default="sum",
help="How to combine the encoding for the " "multi-modal transformer",
)
agent.add_argument(
"--encode-image",
type="bool",
default=True,
help="Whether to include the image encoding when "
"retrieving a candidate response",
)
agent.add_argument(
"--encode-dialogue-history",
type="bool",
default=True,
help="Whether to include the dialogue history "
"encoding when retrieving a candidate response",
)
agent.add_argument(
"--encode-personality",
type="bool",
default=True,
help="Whether to include the personality encoding "
"when retrieving a candidate response",
)
def __init__(self, opt, personalities_list, dictionary):
super().__init__(opt, personalities_list, dictionary)
self.hidden_dim = self.opt["hidden_dim"]
self.share_encoder = opt.get("share_encoder")
nlayers_mm = (
opt["num_layers_all"]
if opt["num_layers_all"] != -1
else opt["num_layers_multimodal_encoder"]
)
# blank encoding (for concat)
self.blank_encoding = torch.Tensor(opt["hidden_dim"]).fill_(0).detach_()
if self.use_cuda:
self.blank_encoding = self.blank_encoding.cuda()
# Encoders
self.encode_image = opt.get("encode_image", True)
self.encode_personality = opt.get("encode_personality", True)
self.encode_dialogue_history = opt.get("encode_dialogue_history", True)
assert any(
[self.encode_dialogue_history, self.encode_image, self.encode_personality]
)
# Transformer 2
self._build_multimodal_encoder(nlayers_mm)
# Label Encoder
self.label_encoder = self.text_encoder
# Context encoder
self._build_context_encoder()
def _build_multimodal_encoder(self, n_layers_mm):
"""
Build the multimodal encoder.
:param n_layers_mm:
number of layers for the transformer
"""
self.multimodal = self.opt.get("multimodal")
if self.multimodal:
self.multimodal_combo = self.opt.get("multimodal_combo", "sum")
nlayers_mm = (
self.opt["num_layers_all"]
if self.opt["num_layers_all"] != -1
else self.opt["num_layers_multimodal_encoder"]
)
self.multimodal_encoder = MultimodalCombiner(
n_heads=self.opt["n_heads"],
n_layers=nlayers_mm,
hidden_dim=self.opt["hidden_dim"],
ffn_size=self.opt["embedding_size"] * 4,
attention_dropout=self.opt["attention_dropout"],
relu_dropout=self.opt["relu_dropout"],
learn_positional_embeddings=self.opt.get(
"learn_positional_embeddings", False
),
reduction=True,
)
def _build_context_encoder(self):
"""Build the context (i.e. dialogue history) encoder."""
if self.opt.get("share_encoder"):
self.context_encoder = self.label_encoder
else:
if (
self.opt["load_context_encoder_from"] is None
and self.opt["context_encoder_embedding_type"] == "fasttext_cc"
):
embeddings = load_fasttext_embeddings(
self.dictionary, self.opt["embedding_size"], self.opt["datapath"]
)
else:
embeddings = nn.Embedding(
len(self.dictionary), self.opt["embedding_size"]
)
self.context_encoder = TransformerEncoder(
n_heads=self.opt["n_heads"],
n_layers=self.opt["n_layers"],
embedding_size=self.opt["embedding_size"],
ffn_size=self.opt["ffn_size"],
vocabulary_size=len(self.dictionary),
embedding=embeddings,
dropout=self.opt["dropout"],
attention_dropout=self.opt["attention_dropout"],
relu_dropout=self.opt["relu_dropout"],
padding_idx=self.dictionary.tok2ind[self.dictionary.null_token],
learn_positional_embeddings=self.opt["learn_positional_embeddings"],
embeddings_scale=False,
n_positions=self.opt["n_positions"],
activation=self.opt["activation"],
variant=self.opt["variant"],
n_segments=self.opt["n_segments"],
)
if self.opt.get("load_context_encoder_from") is not None:
self._load_context_encoder_state()
def forward(
self,
image_features,
personalities,
dialogue_histories,
labels,
batchsize=None,
personalities_tensor=None,
):
"""
Model forward pass.
:param image_features:
list of tensors of image features, one per example
:param personalities:
list of personalities, one per example
:param dialogue_histories:
list of dialogue histories, one per example
:param labels:
list of response labels, one per example
:param personalities_tensor:
(optional) list of personality representations, usually a one-hot
vector if specified
:return:
the encoded context and the encoded captions.
"""
# labels
labels_encoded = self.forward_text_encoder(labels)
# dialog history
d_hist_encoded = self.forward_text_encoder(
dialogue_histories, dialogue_history=True, batchsize=batchsize
)
# images
img_encoded = self.forward_image(image_features)
# personalities
pers_encoded = self.forward_personality(personalities, personalities_tensor)
total_encoded = self.get_rep(
[img_encoded, d_hist_encoded, pers_encoded], batchsize=batchsize
)
loss, nb_ok = self.get_loss(total_encoded, labels_encoded)
return loss, nb_ok, total_encoded
def forward_personality(self, personalities, personalities_tensor):
"""
Encode personalities.
:param personalities:
list of personalities, one per example
:param personalities_tensor:
(optional) list of personality representations, usually a one-hot
vector if specified
:return:
encoded representation of the personalities
"""
pers_encoded = None
if not self.encode_personality:
if self.multimodal and self.multimodal_combo == "concat":
pers_encoded = self.blank_encoding
else:
pers_encoded = super().forward_personality(
personalities, personalities_tensor
)
return pers_encoded
def forward_text_encoder(self, texts, dialogue_history=False, batchsize=None):
"""
Forward pass for a text encoder.
:param texts:
text to encode
:param dialogue_history:
flag that indicates whether the text is dialogue history; if False,
text is a response candidate
:param batchsize:
size of the batch
:return:
encoded representation of the `texts`
"""
texts_encoded = None
if texts is None or (dialogue_history and not self.encode_dialogue_history):
if (
self.multimodal
and self.multimodal_combo == "concat"
and dialogue_history
):
texts_encoded = torch.stack(
[self.blank_encoding for _ in range(batchsize)]
)
else:
encoder = self.context_encoder if dialogue_history else self.label_encoder
indexes, mask = self.captions_to_tensor(texts)
texts_encoded = encoder(indexes)
if self.text_encoder_frozen:
texts_encoded = texts_encoded.detach()
texts_encoded = self.additional_layer(texts_encoded)
return texts_encoded
def forward_image(self, image_features):
"""
Encode image features.
:param image_features:
list of image features
:return:
encoded representation of the image features
"""
img_encoded = None
if image_features is None or not self.encode_image:
if self.multimodal and self.multimodal_combo == "concat":
img_encoded = self.blank_encoding
else:
img_encoded = super().forward_image(image_features)
return img_encoded
def get_rep(self, encodings, batchsize=None):
"""
Get the multimodal representation of the encodings.
:param encodings:
list of encodings
:param batchsize:
size of batch
:return:
final multimodal representations
"""
if not self.multimodal:
rep = self.sum_encodings(encodings)
else:
if self.multimodal_combo == "sum":
encodings = self.sum_encodings(encodings).unsqueeze(1)
elif self.multimodal_combo == "concat":
encodings = self.cat_encodings(encodings)
all_one_mask = torch.ones(encodings.size()[:2])
if self.use_cuda:
all_one_mask = all_one_mask.cuda()
rep = self.multimodal_encoder(encodings, all_one_mask)
if rep is None:
rep = torch.stack([self.blank_encoding for _ in range(batchsize)])
return rep
def choose_best_response(
self,
image_features,
personalities,
dialogue_histories,
candidates,
candidates_encoded=None,
k=1,
batchsize=None,
):
"""
Choose the best response for each example.
:param image_features:
list of tensors of image features
:param personalities:
list of personalities
:param dialogue_histories:
list of dialogue histories, one per example
:param candidates:
list of candidates, one set per example
:param candidates_encoded:
optional; if specified, a fixed set of encoded candidates that is
used for each example
:param k:
number of ranked candidates to return. if < 1, we return the ranks
of all candidates in the set.
:return:
a set of ranked candidates for each example
"""
self.eval()
_, _, encoded = self.forward(
image_features, personalities, dialogue_histories, None, batchsize=batchsize
)
encoded = encoded.detach()
one_cand_set = True
if candidates_encoded is None:
one_cand_set = False
candidates_encoded = [
self.forward_text_encoder(c).detach() for c in candidates
]
chosen = [
self.choose_topk(
idx if not one_cand_set else 0,
encoded,
candidates,
candidates_encoded,
one_cand_set,
k,
)
for idx in range(len(encoded))
]
return chosen
def choose_topk(
self, idx, encoded, candidates, candidates_encoded, one_cand_set, k
):
"""
Choose top k best responses for a single example.
:param idx:
idx of example in encoded
:param encoded:
full matrix of encoded representations (for the whole batch)
:param candidates:
list of candidates
:param candidates_encoded:
encoding of the candidates
:param one_cand_set:
true if there is one set of candidates for each example
:param k:
how many ranked responses to return
:return:
ranked list of k responses
"""
encoding = encoded[idx : idx + 1, :]
scores = torch.mm(
candidates_encoded[idx] if not one_cand_set else candidates_encoded,
encoding.transpose(0, 1),
)
if k >= 1:
_, index_top = torch.topk(scores, k, dim=0)
else:
_, index_top = torch.topk(scores, scores.size(0), dim=0)
return [
candidates[idx][idx2] if not one_cand_set else candidates[idx2]
for idx2 in index_top.unsqueeze(1)
]
def get_loss(self, total_encoded, labels_encoded):
"""
Compute loss over batch.
:param total_encoded:
encoding of the examples
:param labels_encoded:
encoding of the labels
:return:
total batch loss, and number of correct examples
"""
loss = None
num_correct = None
if labels_encoded is not None:
dot_products = total_encoded.mm(
labels_encoded.t()
) # batch_size * batch_size
log_prob = torch.nn.functional.log_softmax(dot_products, dim=1)
targets = torch.arange(0, len(total_encoded), dtype=torch.long)
if self.use_cuda:
targets = targets.cuda()
loss = torch.nn.functional.nll_loss(log_prob, targets)
num_correct = (log_prob.max(dim=1)[1] == targets).float().sum()
return loss, num_correct
def cat_encodings(self, tensors):
"""
Concatenate non-`None` encodings.
:param tensors:
list tensors to concatenate
:return:
concatenated tensors
"""
tensors = [t for t in tensors if t is not None]
return torch.cat([t.unsqueeze(1) for t in tensors], dim=1)
def _load_text_encoder_state(self):
try:
state_file = self.opt.get("load_encoder_from")
model = torch.load(state_file)
states = model["model"]
self.text_encoder.load_state_dict(states)
except Exception as e:
print(
"WARNING: Cannot load transformer state; please make sure "
"specified file is a dictionary with the states in `model`. "
"Additionally, make sure that the appropriate options are "
"specified. Error: {}".format(e)
)
def _load_context_encoder_state(self):
try:
state_file = self.opt.get("load_context_encoder_from")
model = torch.load(state_file)
states = model["model"]
self.context_encoder.load_state_dict(states)
except Exception as e:
print(
"WARNING: Cannot load transformer state; please make sure "
"specified file is a dictionary with the states in `model`. "
"Additionally, make sure that the appropriate options are "
"specified. Error: {}".format(e)
)
class MultimodalCombiner(nn.Module):
"""Multimodal Combination module."""
def __init__(
self,
n_heads,
n_layers,
hidden_dim,
ffn_size,
reduction=True,
attention_dropout=0.0,
relu_dropout=0.0,
learn_positional_embeddings=False,
):
super().__init__()
self.ffn_size = ffn_size
self.n_layers = n_layers
self.n_heads = n_heads
self.out_dim = hidden_dim
self.dim = hidden_dim
self.reduction = reduction
assert hidden_dim % n_heads == 0, "MM-Combiner dim must be multiple of n_heads"
n_positions = 1024
self.position_embeddings = nn.Embedding(n_positions, hidden_dim)
if not learn_positional_embeddings:
create_position_codes(
n_positions, hidden_dim, out=self.position_embeddings.weight
)
else:
nn.init.normal_(self.position_embeddings.weight, 0, hidden_dim ** -0.5)
self.layers = nn.ModuleList()
for _ in range(self.n_layers):
self.layers.append(
TransformerEncoderLayer(
n_heads, hidden_dim, ffn_size, attention_dropout, relu_dropout
)
)
def forward(self, tensor, mask):
"""
Forward pass.
:param tensor:
a [bsz, seq_len, hidden_dim] FloatTensor
:param mask:
a [bsz, seq_len] ByteTensor filled with 1 when inside the sequence and 0 outside.
:return:
output: a [bsz, hidden_dim] FloatTensor of encodings
mask: the same as before
"""
seq_len = tensor.size(1)
positions = tensor.new(seq_len).long()
positions = torch.arange(seq_len, out=positions).unsqueeze(0)
tensor = tensor + self.position_embeddings(positions).expand_as(tensor)
tensor *= mask.unsqueeze(-1).float()
for i in range(self.n_layers):
tensor = self.layers[i](tensor, mask)
if self.reduction:
divisor = mask.float().sum(dim=1).unsqueeze(-1).clamp(min=1e-20)
output = tensor.sum(dim=1) / divisor
return output
else:
output = tensor
return output, mask
| 35.019678
| 93
| 0.581784
|
8d1a5dbfeba30249b4aab70c8af9d7b18f89590d
| 39,372
|
py
|
Python
|
code/qa_model.py
|
akshaynavalakha/cs224n-win18-squad
|
2ac0b63321468819f2d0bc4f1ddb3946d53fbed8
|
[
"Apache-2.0"
] | 6
|
2018-10-13T15:51:49.000Z
|
2022-03-25T00:53:57.000Z
|
code/qa_model.py
|
orkuntemiz/cs224n-win18-squad
|
2ac0b63321468819f2d0bc4f1ddb3946d53fbed8
|
[
"Apache-2.0"
] | null | null | null |
code/qa_model.py
|
orkuntemiz/cs224n-win18-squad
|
2ac0b63321468819f2d0bc4f1ddb3946d53fbed8
|
[
"Apache-2.0"
] | 2
|
2018-10-13T15:51:50.000Z
|
2020-03-20T21:34:36.000Z
|
# Copyright 2018 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file defines the top-level model"""
from __future__ import absolute_import
from __future__ import division
import time
import logging
import os
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import embedding_ops
from evaluate import exact_match_score, f1_score
from data_batcher import get_batch_generator
from pretty_print import print_example
from modules import RNNEncoder, SimpleSoftmaxLayer, BasicAttn, BiDafAttn, RNNEncoder_LSTM, MODEL_LAYER_BIDAF, END_WORD_LAYER, ANSWER_DECODER, masked_softmax, SelfAttn
from dcan import LSTMEncoder, CoAttention
logging.basicConfig(level=logging.INFO)
class QAModel(object):
"""Top-level Question Answering module"""
def __init__(self, FLAGS, id2word, word2id, emb_matrix):
"""
Initializes the QA model.
Inputs:
FLAGS: the flags passed in from main.py
id2word: dictionary mapping word idx (int) to word (string)
word2id: dictionary mapping word (string) to word idx (int)
emb_matrix: numpy array shape (400002, embedding_size) containing pre-traing GloVe embeddings
"""
print "Initializing the QAModel..."
self.FLAGS = FLAGS
self.id2word = id2word
self.word2id = word2id
# Add all parts of the graph
with tf.variable_scope("QAModel", initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, uniform=True)):
self.add_placeholders()
self.add_embedding_layer(emb_matrix)
self.build_graph()
self.add_loss()
# Define trainable parameters, gradient, gradient norm, and clip by gradient norm
params = tf.trainable_variables()
gradients = tf.gradients(self.loss, params)
self.gradient_norm = tf.global_norm(gradients)
clipped_gradients, _ = tf.clip_by_global_norm(gradients, FLAGS.max_gradient_norm)
self.param_norm = tf.global_norm(params)
# Define optimizer and updates
# (updates is what you need to fetch in session.run to do a gradient update)
self.global_step = tf.Variable(0, name="global_step", trainable=False)
if self.FLAGS.model == "bidaf_dynamic":
opt = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) # you can try other optimizers
else:
opt = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) # you can try other optimizers
self.updates = opt.apply_gradients(zip(clipped_gradients, params), global_step=self.global_step)
# Define savers (for checkpointing) and summaries (for tensorboard)
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.keep)
self.bestmodel_saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
self.summaries = tf.summary.merge_all()
def add_placeholders(self):
"""
Add placeholders to the graph. Placeholders are used to feed in inputs.
"""
# Add placeholders for inputs.
# These are all batch-first: the None corresponds to batch_size and
# allows you to run the same model with variable batch_size
self.context_ids = tf.placeholder(tf.int32, shape=[None, self.FLAGS.context_len])
self.context_mask = tf.placeholder(tf.int32, shape=[None, self.FLAGS.context_len])
self.qn_ids = tf.placeholder(tf.int32, shape=[None, self.FLAGS.question_len])
self.qn_mask = tf.placeholder(tf.int32, shape=[None, self.FLAGS.question_len])
self.ans_span = tf.placeholder(tf.int32, shape=[None, 2])
# Add a placeholder to feed in the keep probability (for dropout).
# This is necessary so that we can instruct the model to use dropout when training, but not when testing
self.keep_prob = tf.placeholder_with_default(1.0, shape=())
def add_embedding_layer(self, emb_matrix):
"""
Adds word embedding layer to the graph.
Inputs:
emb_matrix: shape (400002, embedding_size).
The GloVe vectors, plus vectors for PAD and UNK.
"""
with vs.variable_scope("embeddings"):
# Note: the embedding matrix is a tf.constant which means it's not a trainable parameter
embedding_matrix = tf.constant(emb_matrix, dtype=tf.float32, name="emb_matrix") # shape (400002, embedding_size)
# Get the word embeddings for the context and question,
# using the placeholders self.context_ids and self.qn_ids
self.context_embs = embedding_ops.embedding_lookup(embedding_matrix, self.context_ids) # shape (batch_size, context_len, embedding_size)
self.qn_embs = embedding_ops.embedding_lookup(embedding_matrix, self.qn_ids) # shape (batch_size, question_len, embedding_size)
def build_graph(self):
"""Builds the main part of the graph for the model, starting from the input embeddings to the final distributions for the answer span.
Defines:
self.logits_start, self.logits_end: Both tensors shape (batch_size, context_len).
These are the logits (i.e. values that are fed into the softmax function) for the start and end distribution.
Important: these are -large in the pad locations. Necessary for when we feed into the cross entropy function.
self.probdist_start, self.probdist_end: Both shape (batch_size, context_len). Each row sums to 1.
These are the result of taking (masked) softmax of logits_start and logits_end.
"""
# Use a RNN to get hidden states for the context and the question
# Note: here the RNNEncoder is shared (i.e. the weights are the same)
# between the context and the question.
if self.FLAGS.model == "baseline" :
encoder = RNNEncoder(self.FLAGS.hidden_size, self.keep_prob)
elif self.FLAGS.model == "bidaf" or self.FLAGS.model == "bidaf_dynamic" or self.FLAGS.model=="bidaf_self_attn" or self.FLAGS.model=="bidaf_dynamic_self_attn":
print "INSIDE the BIDAF model"
encoder = RNNEncoder_LSTM(self.FLAGS.hidden_size, self.keep_prob)
elif self.FLAGS.model == "coatt" or self.FLAGS.model == "coatt_dynamic" or self.FLAGS.model=="coatt_dynamic_self_attn":
encoder = LSTMEncoder(self.FLAGS.hidden_size, self.keep_prob)
if self.FLAGS.model != "coatt" and self.FLAGS.model != "coatt_dynamic" and self.FLAGS.model!="coatt_dynamic_self_attn":
context_hiddens = encoder.build_graph(self.context_embs, self.context_mask) # (batch_size, context_len, hidden_size*2)
question_hiddens = encoder.build_graph(self.qn_embs, self.qn_mask) # (batch_size, question_len, hidden_size*2)
# Attention model
# Use context hidden states to attend to question hidden states
if self.FLAGS.model == "baseline" :
attn_layer = BasicAttn(self.keep_prob, self.FLAGS.hidden_size * 2, self.FLAGS.hidden_size * 2)
_,attn_output = attn_layer.build_graph(question_hiddens, self.qn_mask, context_hiddens) # attn_output is shape (batch_size, context_len, hidden_size*2)
# Concat attn_output to context_hiddens to get blended_reps
blended_reps = tf.concat([context_hiddens, attn_output], axis=2) # (batch_size, context_len, hidden_size*4)
# Apply fully connected layer to each blended representation
# Note, blended_reps_final corresponds to b' in the handout
# Note, tf.contrib.layers.fully_connected applies a ReLU non-linarity here by default
blended_reps_final = tf.contrib.layers.fully_connected(blended_reps, num_outputs=self.FLAGS.hidden_size) # blended_reps_final is shape (batch_size, context_len, hidden_size)
# Use softmax layer to compute probability distribution for start location
# Note this produces self.logits_start and self.probdist_start, both of which have shape (batch_size, context_len)
with vs.variable_scope("StartDist"):
softmax_layer_start = SimpleSoftmaxLayer()
self.logits_start, self.probdist_start = softmax_layer_start.build_graph(blended_reps_final,self.context_mask)
# Use softmax layer to compute probability distribution for end location
# Note this produces self.logits_end and self.probdist_end, both of which have shape (batch_size, context_len)
with vs.variable_scope("EndDist"):
softmax_layer_end = SimpleSoftmaxLayer()
self.logits_end, self.probdist_end = softmax_layer_end.build_graph(blended_reps_final,self.context_mask)
# Attention model
# Use context hidden states to attend to question hidden states
if self.FLAGS.model == "coatt" :
#context_hiddens = encoder.build_graph(self.context_embs, self.context_mask, "context") # (batch_size, context_len, hidden_size*2)
#question_hiddens = encoder.build_graph(self.qn_embs, self.qn_mask, "question") # (batch_size, question_len, hidden_size*2)
context_hiddens, question_hiddens = encoder.build_graph1(self.context_embs, self.qn_embs, self.context_mask, self.qn_mask)
attn_layer = CoAttention(self.keep_prob, self.FLAGS.hidden_size*2, self.FLAGS.hidden_size*2)
attn_output = attn_layer.build_graph(question_hiddens, self.qn_mask, context_hiddens, self.context_mask)
blended_reps_final = attn_output
#blended_reps = tf.concat([context_hiddens, attn_output], axis=2)
# Apply fully connected layer to each blended representation
# Note, blended_reps_final corresponds to b' in the handout
# Note, tf.contrib.layers.fully_connected applies a ReLU non-linarity here by default
#blended_reps_final = tf.contrib.layers.fully_connected(blended_reps, num_outputs=self.FLAGS.hidden_size) # blended_reps_final is shape (batch_size, context_len, hidden_size)
# Use softmax layer to compute probability distribution for start location
# Note this produces self.logits_start and self.probdist_start, both of which have shape (batch_size, context_len)
with vs.variable_scope("StartDist"):
softmax_layer_start = SimpleSoftmaxLayer()
self.logits_start, self.probdist_start = softmax_layer_start.build_graph(blended_reps_final,self.context_mask)
# Use softmax layer to compute probability distribution for end location
# Note this produces self.logits_end and self.probdist_end, both of which have shape (batch_size, context_len)
with vs.variable_scope("EndDist"):
contextLen = tf.reduce_sum(self.context_mask, axis=1)
cell = tf.contrib.rnn.LSTMBlockCell(2 * self.FLAGS.hidden_size)
(fw_out, bw_out), _ = tf.nn.bidirectional_dynamic_rnn(cell, cell, attn_output, contextLen, dtype = tf.float32)
U_1 = tf.concat([fw_out, bw_out], axis=2)
out = tf.nn.dropout(U_1, self.keep_prob)
softmax_layer_end = SimpleSoftmaxLayer()
self.logits_end, self.probdist_end = softmax_layer_end.build_graph(out,self.context_mask)
elif self.FLAGS.model =="bidaf" or self.FLAGS.model=="bidaf_self_attn":
attn_layer = BiDafAttn(self.keep_prob, self.FLAGS.hidden_size*2, self.FLAGS.hidden_size*2)
attn_output_tmp = attn_layer.build_graph(question_hiddens, self.qn_mask, context_hiddens, self.context_mask) # attn_output is shape (batch_size, context_len, hidden_size*8)
# Set of vectors which produces a set of query aware feature vectors for each word in the context
#blended_reps = attn_output #(batch_size, num_keys, 4*value_vec_size)
if self.FLAGS.model == "bidaf_self_attn":
self_attn_layer = SelfAttn(self.keep_prob, self.FLAGS.hidden_size * 8, self.FLAGS.hidden_size * 8)
_,self_attn_output = self_attn_layer.build_graph(attn_output_tmp, self.context_mask) #(batch_size, conetx_len, 8*hidden_size)
attn_output = tf.concat([attn_output_tmp, self_attn_output], axis=2) #(batch_size, context_len, 16*hidden_size)
else:
attn_output = attn_output_tmp
# In BIDAF the attention output is feed to a modeling layer
# The Modeling layer is a 2 layer lstm
mod_layer = MODEL_LAYER_BIDAF(self.FLAGS.hidden_size, self.keep_prob)
mod_layer_out = mod_layer.build_graph(attn_output, self.context_mask) # (batch_size, context_len, hidden_size*2)
blended_reps_start = tf.concat([attn_output,mod_layer_out], axis=2) # (batch_size, context_len, hidden_size*10)
# Use softmax layer to compute probability distribution for start location
# Note this produces self.logits_start and self.probdist_start, both of which have shape (batch_size, context_len)
with vs.variable_scope("StartDist"):
softmax_layer_start = SimpleSoftmaxLayer()
self.logits_start, self.probdist_start = softmax_layer_start.build_graph(blended_reps_start, self.context_mask)
# Use softmax layer to compute probability distribution for end location
# Note this produces self.logits_end and self.probdist_end, both of which have shape (batch_size, context_len)
with vs.variable_scope("EndDist"):
# Concatenate the start logits with the modelling layer output to get the input to the
# end word lstm
#self.logits_start has a shape of #(batch_size, context_len)
logits_start_expand = tf.expand_dims(self.logits_start, axis=2) #(batch_size, context_len, 1)
end_lstm_input = tf.concat([logits_start_expand, mod_layer_out], axis=2) #(batch_size, context_len, 1 + hidden_size*2)
# LSTM
end_layer = END_WORD_LAYER(self.FLAGS.hidden_size, self.keep_prob)
blended_reps_end = end_layer.build_graph(end_lstm_input, self.context_mask)
blended_reps_end_final = tf.concat([attn_output, blended_reps_end], axis=2)
softmax_layer_end = SimpleSoftmaxLayer()
self.logits_end, self.probdist_end = softmax_layer_end.build_graph(blended_reps_end_final, self.context_mask)
elif self.FLAGS.model =="bidaf_dynamic" or self.FLAGS.model =="bidaf_dynamic_self_attn":
attn_layer = BiDafAttn(self.keep_prob, self.FLAGS.hidden_size*2, self.FLAGS.hidden_size*2)
attn_output_tmp = attn_layer.build_graph(question_hiddens, self.qn_mask, context_hiddens, self.context_mask) # attn_output is shape (batch_size, context_len, hidden_size*8)
if self.FLAGS.model == "bidaf_dynamic_self_attn":
self_attn_layer = SelfAttn(self.keep_prob, self.FLAGS.hidden_size * 8, self.FLAGS.hidden_size * 8)
_,self_attn_output = self_attn_layer.build_graph(attn_output_tmp,self.context_mask) # (batch_size, conetx_len, 8*hidden_size)
attn_output = tf.concat([attn_output_tmp, self_attn_output], axis=2) #(batch_size, context_len, 16*hidden_size)
else:
attn_output = attn_output_tmp
# Set of vectors which produces a set of query aware feature vectors for each word in the context
#blended_reps = attn_output #(batch_size, num_keys, 4*value_vec_size)
# In BIDAF the attention output is feed to a modeling layer
# The Modeling layer is a 2 layer lstm
mod_layer = MODEL_LAYER_BIDAF(self.FLAGS.hidden_size, self.keep_prob)
mod_layer_out = mod_layer.build_graph(attn_output, self.context_mask) # (batch_size, context_len, hidden_size*2)
blended_reps_start = tf.concat([attn_output,mod_layer_out], axis=2) # (batch_size, context_len, hidden_size*10)
# We now feed this to dynamic decoder module coded in Answer decoder
# the output of the decoder are start, end, alpha_logits and beta_logits
# start and end have a shape of (batch_size, num_iterations)
#alpha_logits and beta_logits have a shape of (batch_size, num_iterations, inpit_dim)
decoder = ANSWER_DECODER(self.FLAGS.hidden_size, self.keep_prob, self.FLAGS.num_iterations, self.FLAGS.max_pool, self.FLAGS.batch_size)
u_s_init = mod_layer_out[:,0,:]
u_e_init = mod_layer_out[:,0,:]
start_location, end_location, alpha_logits, beta_logits = decoder.build_graph(mod_layer_out, self.context_mask, u_s_init, u_e_init)
# Use softmax layer to compute probability distribution for start location
# Note this produces self.logits_start and self.probdist_start, both of which have shape (batch_size, context_len)
with vs.variable_scope("StartDist"):
#softmax_layer_start = SimpleSoftmaxLayer()
logits_start_tmp = [masked_softmax(logits, self.context_mask,1) for logits in alpha_logits]
self.alpha_logits , alpha_logits_probs = zip(*logits_start_tmp)
self.logits_start, self.probdist_start = self.alpha_logits[self.FLAGS.num_iterations -1], alpha_logits_probs[self.FLAGS.num_iterations -1]
# Use softmax layer to compute probability distribution for end location
# Note this produces self.logits_end and self.probdist_end, both of which have shape (batch_size, context_len)
with vs.variable_scope("EndDist"):
logits_end_tmp = [masked_softmax(logits, self.context_mask,1) for logits in beta_logits]
self.beta_logits , beta_logits_probs = zip(*logits_end_tmp)
self.logits_end, self.probdist_end = self.beta_logits[self.FLAGS.num_iterations -1], beta_logits_probs[self.FLAGS.num_iterations -1]
elif self.FLAGS.model =="coatt_dynamic" or self.FLAGS.model == "coatt_dynamic_self_attn":
context_hiddens, question_hiddens = encoder.build_graph1(self.context_embs, self.qn_embs, self.context_mask, self.qn_mask)
attn_layer = CoAttention(self.keep_prob, self.FLAGS.hidden_size*2, self.FLAGS.hidden_size*2)
if self.FLAGS.model == "coatt_dynamic_self_attn":
CoATT = attn_layer.build_graph1(question_hiddens, self.qn_mask, context_hiddens, self.context_mask)
self_attn_layer = SelfAttn(self.keep_prob, self.FLAGS.hidden_size * 8, self.FLAGS.hidden_size * 8)
_, self_attn_output = self_attn_layer.build_graph(CoATT, self.context_mask) # (batch_size, conetx_len, 8*hidden_size)
attn_output = tf.concat([CoATT, self_attn_output], axis=2) #(batch_size, context_len, 16*hidden_size)
else:
U = attn_layer.build_graph(question_hiddens, self.qn_mask, context_hiddens, self.context_mask)
attn_output = U
#blended_reps = tf.concat([context_hiddens, attn_output], axis=2)
# Apply fully connected layer to each blended representation
# Note, blended_reps_final corresponds to b' in the handout
# Note, tf.contrib.layers.fully_connected applies a ReLU non-linarity here by default
decoder = ANSWER_DECODER(self.FLAGS.hidden_size, self.keep_prob, self.FLAGS.num_iterations, self.FLAGS.max_pool, self.FLAGS.batch_size)
u_s_init = attn_output[:,0,:]
u_e_init = attn_output[:,0,:]
start_location, end_location, alpha_logits, beta_logits = decoder.build_graph(attn_output, self.context_mask, u_s_init, u_e_init)
# Use softmax layer to compute probability distribution for start location
# Note this produces self.logits_start and self.probdist_start, both of which have shape (batch_size, context_len)
with vs.variable_scope("StartDist"):
#softmax_layer_start = SimpleSoftmaxLayer()
logits_start_tmp = [masked_softmax(logits, self.context_mask,1) for logits in alpha_logits]
self.alpha_logits , alpha_logits_probs = zip(*logits_start_tmp)
self.logits_start, self.probdist_start = self.alpha_logits[self.FLAGS.num_iterations -1], alpha_logits_probs[self.FLAGS.num_iterations -1]
# Use softmax layer to compute probability distribution for end location
# Note this produces self.logits_end and self.probdist_end, both of which have shape (batch_size, context_len)
with vs.variable_scope("EndDist"):
logits_end_tmp = [masked_softmax(logits, self.context_mask,1) for logits in beta_logits]
self.beta_logits , beta_logits_probs = zip(*logits_end_tmp)
self.logits_end, self.probdist_end = self.beta_logits[self.FLAGS.num_iterations -1], beta_logits_probs[self.FLAGS.num_iterations -1]
def add_loss(self):
"""
Add loss computation to the graph.
Uses:
self.logits_start: shape (batch_size, context_len)
IMPORTANT: Assumes that self.logits_start is masked (i.e. has -large in masked locations).
That's because the tf.nn.sparse_softmax_cross_entropy_with_logits
function applies softmax and then computes cross-entropy loss.
So you need to apply masking to the logits (by subtracting large
number in the padding location) BEFORE you pass to the
sparse_softmax_cross_entropy_with_logits function.
self.ans_span: shape (batch_size, 2)
Contains the gold start and end locations
Defines:
self.loss_start, self.loss_end, self.loss: all scalar tensors
"""
with vs.variable_scope("loss"):
if self.FLAGS.model == "bidaf_dynamic" or self.FLAGS.model == "bidaf_dynamic_self_attn" or self.FLAGS.model == "coatt_dynamic_self_attn" or self.FLAGS.model =="coatt_dynamic":
loss_start = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=self.ans_span[:, 0]) for logits in self.alpha_logits]
loss_end = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=self.ans_span[:, 1]) for logits in self.beta_logits]
self.loss_start = tf.reduce_mean(loss_start)
tf.summary.scalar('loss_start', self.loss_start) # log to tensorboard
self.loss_end = tf.reduce_mean(loss_end)
tf.summary.scalar('loss_end', self.loss_end)
weights = tf.trainable_variables()
weights_loss = tf.add_n([tf.nn.l2_loss(v) for v in weights if 'bias' not in v.name])
# Add the two losses
self.loss = self.loss_start + self.loss_end + self.FLAGS.regularization *weights_loss
tf.summary.scalar('loss', self.loss)
else:
# Calculate loss for prediction of start position
loss_start = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits_start, labels=self.ans_span[:, 0]) # loss_start has shape (batch_size)
self.loss_start = tf.reduce_mean(loss_start) # scalar. avg across batch
tf.summary.scalar('loss_start', self.loss_start) # log to tensorboard
# Calculate loss for prediction of end position
loss_end = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits_end, labels=self.ans_span[:, 1])
self.loss_end = tf.reduce_mean(loss_end)
tf.summary.scalar('loss_end', self.loss_end)
weights = tf.trainable_variables()
weights_loss = tf.add_n([tf.nn.l2_loss(v) for v in weights if 'bias' not in v.name])
# Add the two losses
self.loss = self.loss_start + self.loss_end + self.FLAGS.regularization * weights_loss
tf.summary.scalar('loss', self.loss)
def run_train_iter(self, session, batch, summary_writer):
"""
This performs a single training iteration (forward pass, loss computation, backprop, parameter update)
Inputs:
session: TensorFlow session
batch: a Batch object
summary_writer: for Tensorboard
Returns:
loss: The loss (averaged across the batch) for this batch.
global_step: The current number of training iterations we've done
param_norm: Global norm of the parameters
gradient_norm: Global norm of the gradients
"""
# Match up our input data with the placeholders
input_feed = {}
input_feed[self.context_ids] = batch.context_ids
input_feed[self.context_mask] = batch.context_mask
input_feed[self.qn_ids] = batch.qn_ids
input_feed[self.qn_mask] = batch.qn_mask
input_feed[self.ans_span] = batch.ans_span
input_feed[self.keep_prob] = 1.0 - self.FLAGS.dropout # apply dropout
# output_feed contains the things we want to fetch.
output_feed = [self.updates, self.summaries, self.loss, self.global_step, self.param_norm, self.gradient_norm]
# Run the model
[_, summaries, loss, global_step, param_norm, gradient_norm] = session.run(output_feed, input_feed)
# All summaries in the graph are added to Tensorboard
summary_writer.add_summary(summaries, global_step)
return loss, global_step, param_norm, gradient_norm
def get_loss(self, session, batch):
"""
Run forward-pass only; get loss.
Inputs:
session: TensorFlow session
batch: a Batch object
Returns:
loss: The loss (averaged across the batch) for this batch
"""
input_feed = {}
input_feed[self.context_ids] = batch.context_ids
input_feed[self.context_mask] = batch.context_mask
input_feed[self.qn_ids] = batch.qn_ids
input_feed[self.qn_mask] = batch.qn_mask
input_feed[self.ans_span] = batch.ans_span
# note you don't supply keep_prob here, so it will default to 1 i.e. no dropout
output_feed = [self.loss]
[loss] = session.run(output_feed, input_feed)
return loss
def get_prob_dists(self, session, batch):
"""
Run forward-pass only; get probability distributions for start and end positions.
Inputs:
session: TensorFlow session
batch: Batch object
Returns:
probdist_start and probdist_end: both shape (batch_size, context_len)
"""
input_feed = {}
input_feed[self.context_ids] = batch.context_ids
input_feed[self.context_mask] = batch.context_mask
input_feed[self.qn_ids] = batch.qn_ids
input_feed[self.qn_mask] = batch.qn_mask
# note you don't supply keep_prob here, so it will default to 1 i.e. no dropout
output_feed = [self.probdist_start, self.probdist_end]
[probdist_start, probdist_end] = session.run(output_feed, input_feed)
return probdist_start, probdist_end
def get_start_end_pos(self, session, batch):
"""
Run forward-pass only; get the most likely answer span.
Inputs:
session: TensorFlow session
batch: Batch object
Returns:
start_pos, end_pos: both numpy arrays shape (batch_size).
The most likely start and end positions for each example in the batch.
"""
# Get start_dist and end_dist, both shape (batch_size, context_len)
start_dist, end_dist = self.get_prob_dists(session, batch)
# Take argmax to get start_pos and end_post, both shape (batch_size)
start_pos = np.argmax(start_dist, axis=1)
end_pos = np.argmax(end_dist, axis=1)
return start_pos, end_pos
def get_dev_loss(self, session, dev_context_path, dev_qn_path, dev_ans_path):
"""
Get loss for entire dev set.
Inputs:
session: TensorFlow session
dev_qn_path, dev_context_path, dev_ans_path: paths to the dev.{context/question/answer} data files
Outputs:
dev_loss: float. Average loss across the dev set.
"""
logging.info("Calculating dev loss...")
tic = time.time()
loss_per_batch, batch_lengths = [], []
# Iterate over dev set batches
# Note: here we set discard_long=True, meaning we discard any examples
# which are longer than our context_len or question_len.
# We need to do this because if, for example, the true answer is cut
# off the context, then the loss function is undefined.
for batch in get_batch_generator(self.word2id, dev_context_path, dev_qn_path, dev_ans_path, self.FLAGS.batch_size, context_len=self.FLAGS.context_len, question_len=self.FLAGS.question_len, discard_long=True):
# Get loss for this batch
loss = self.get_loss(session, batch)
curr_batch_size = batch.batch_size
loss_per_batch.append(loss * curr_batch_size)
batch_lengths.append(curr_batch_size)
# Calculate average loss
total_num_examples = sum(batch_lengths)
toc = time.time()
print "Computed dev loss over %i examples in %.2f seconds" % (total_num_examples, toc-tic)
# Overall loss is total loss divided by total number of examples
dev_loss = sum(loss_per_batch) / float(total_num_examples)
return dev_loss
def check_f1_em(self, session, context_path, qn_path, ans_path, dataset, num_samples=100, print_to_screen=False):
"""
Sample from the provided (train/dev) set.
For each sample, calculate F1 and EM score.
Return average F1 and EM score for all samples.
Optionally pretty-print examples.
Note: This function is not quite the same as the F1/EM numbers you get from "official_eval" mode.
This function uses the pre-processed version of the e.g. dev set for speed,
whereas "official_eval" mode uses the original JSON. Therefore:
1. official_eval takes your max F1/EM score w.r.t. the three reference answers,
whereas this function compares to just the first answer (which is what's saved in the preprocessed data)
2. Our preprocessed version of the dev set is missing some examples
due to tokenization issues (see squad_preprocess.py).
"official_eval" includes all examples.
Inputs:
session: TensorFlow session
qn_path, context_path, ans_path: paths to {dev/train}.{question/context/answer} data files.
dataset: string. Either "train" or "dev". Just for logging purposes.
num_samples: int. How many samples to use. If num_samples=0 then do whole dataset.
print_to_screen: if True, pretty-prints each example to screen
Returns:
F1 and EM: Scalars. The average across the sampled examples.
"""
logging.info("Calculating F1/EM for %s examples in %s set..." % (str(num_samples) if num_samples != 0 else "all", dataset))
f1_total = 0.
em_total = 0.
example_num = 0
tic = time.time()
# Note here we select discard_long=False because we want to sample from the entire dataset
# That means we're truncating, rather than discarding, examples with too-long context or questions
for batch in get_batch_generator(self.word2id, context_path, qn_path, ans_path, self.FLAGS.batch_size, context_len=self.FLAGS.context_len, question_len=self.FLAGS.question_len, discard_long=False):
pred_start_pos, pred_end_pos = self.get_start_end_pos(session, batch)
# Convert the start and end positions to lists length batch_size
pred_start_pos = pred_start_pos.tolist() # list length batch_size
pred_end_pos = pred_end_pos.tolist() # list length batch_size
for ex_idx, (pred_ans_start, pred_ans_end, true_ans_tokens) in enumerate(zip(pred_start_pos, pred_end_pos, batch.ans_tokens)):
example_num += 1
# Get the predicted answer
# Important: batch.context_tokens contains the original words (no UNKs)
# You need to use the original no-UNK version when measuring F1/EM
pred_ans_tokens = batch.context_tokens[ex_idx][pred_ans_start : pred_ans_end + 1]
pred_answer = " ".join(pred_ans_tokens)
# Get true answer (no UNKs)
true_answer = " ".join(true_ans_tokens)
# Calc F1/EM
f1 = f1_score(pred_answer, true_answer)
em = exact_match_score(pred_answer, true_answer)
f1_total += f1
em_total += em
# Optionally pretty-print
if print_to_screen:
print_example(self.word2id, batch.context_tokens[ex_idx], batch.qn_tokens[ex_idx], batch.ans_span[ex_idx, 0], batch.ans_span[ex_idx, 1], pred_ans_start, pred_ans_end, true_answer, pred_answer, f1, em)
if num_samples != 0 and example_num >= num_samples:
break
if num_samples != 0 and example_num >= num_samples:
break
f1_total /= example_num
em_total /= example_num
toc = time.time()
logging.info("Calculating F1/EM for %i examples in %s set took %.2f seconds" % (example_num, dataset, toc-tic))
return f1_total, em_total
def train(self, session, train_context_path, train_qn_path, train_ans_path, dev_qn_path, dev_context_path, dev_ans_path):
"""
Main training loop.
Inputs:
session: TensorFlow session
{train/dev}_{qn/context/ans}_path: paths to {train/dev}.{context/question/answer} data files
"""
# Print number of model parameters
tic = time.time()
params = tf.trainable_variables()
num_params = sum(map(lambda t: np.prod(tf.shape(t.value()).eval()), params))
toc = time.time()
logging.info("Number of params: %d (retrieval took %f secs)" % (num_params, toc - tic))
# We will keep track of exponentially-smoothed loss
exp_loss = None
# Checkpoint management.
# We keep one latest checkpoint, and one best checkpoint (early stopping)
checkpoint_path = os.path.join(self.FLAGS.train_dir, "qa.ckpt")
bestmodel_dir = os.path.join(self.FLAGS.train_dir, "best_checkpoint")
bestmodel_ckpt_path = os.path.join(bestmodel_dir, "qa_best.ckpt")
best_dev_f1 = None
best_dev_em = None
# for TensorBoard
summary_writer = tf.summary.FileWriter(self.FLAGS.train_dir, session.graph)
epoch = 0
logging.info("Beginning training loop...")
while self.FLAGS.num_epochs == 0 or epoch < self.FLAGS.num_epochs:
epoch += 1
epoch_tic = time.time()
# Loop over batches
for batch in get_batch_generator(self.word2id, train_context_path, train_qn_path, train_ans_path, self.FLAGS.batch_size, context_len=self.FLAGS.context_len, question_len=self.FLAGS.question_len, discard_long=True):
# Run training iteration
iter_tic = time.time()
loss, global_step, param_norm, grad_norm = self.run_train_iter(session, batch, summary_writer)
iter_toc = time.time()
iter_time = iter_toc - iter_tic
# Update exponentially-smoothed loss
if not exp_loss: # first iter
exp_loss = loss
else:
exp_loss = 0.99 * exp_loss + 0.01 * loss
# Sometimes print info to screen
if global_step % self.FLAGS.print_every == 0:
logging.info(
'epoch %d, iter %d, loss %.5f, smoothed loss %.5f, grad norm %.5f, param norm %.5f, batch time %.3f' %
(epoch, global_step, loss, exp_loss, grad_norm, param_norm, iter_time))
# Sometimes save model
if global_step % self.FLAGS.save_every == 0:
logging.info("Saving to %s..." % checkpoint_path)
self.saver.save(session, checkpoint_path, global_step=global_step)
# Sometimes evaluate model on dev loss, train F1/EM and dev F1/EM
if global_step % self.FLAGS.eval_every == 0:
# Get loss for entire dev set and log to tensorboard
dev_loss = self.get_dev_loss(session, dev_context_path, dev_qn_path, dev_ans_path)
logging.info("Epoch %d, Iter %d, dev loss: %f" % (epoch, global_step, dev_loss))
write_summary(dev_loss, "dev/loss", summary_writer, global_step)
# Get F1/EM on train set and log to tensorboard
train_f1, train_em = self.check_f1_em(session, train_context_path, train_qn_path, train_ans_path, "train", num_samples=1000)
logging.info("Epoch %d, Iter %d, Train F1 score: %f, Train EM score: %f" % (epoch, global_step, train_f1, train_em))
write_summary(train_f1, "train/F1", summary_writer, global_step)
write_summary(train_em, "train/EM", summary_writer, global_step)
# Get F1/EM on dev set and log to tensorboard
dev_f1, dev_em = self.check_f1_em(session, dev_context_path, dev_qn_path, dev_ans_path, "dev", num_samples=0)
logging.info("Epoch %d, Iter %d, Dev F1 score: %f, Dev EM score: %f" % (epoch, global_step, dev_f1, dev_em))
write_summary(dev_f1, "dev/F1", summary_writer, global_step)
write_summary(dev_em, "dev/EM", summary_writer, global_step)
# Early stopping based on dev EM. You could switch this to use F1 instead.
if best_dev_em is None or dev_em > best_dev_em:
best_dev_em = dev_em
logging.info("Saving to %s..." % bestmodel_ckpt_path)
self.bestmodel_saver.save(session, bestmodel_ckpt_path, global_step=global_step)
epoch_toc = time.time()
logging.info("End of epoch %i. Time for epoch: %f" % (epoch, epoch_toc-epoch_tic))
sys.stdout.flush()
def write_summary(value, tag, summary_writer, global_step):
"""Write a single summary value to tensorboard"""
summary = tf.Summary()
summary.value.add(tag=tag, simple_value=value)
summary_writer.add_summary(summary, global_step)
| 53.494565
| 226
| 0.670476
|
6e81057949c6cf17a5995ea905ad6faeab30cfab
| 121
|
py
|
Python
|
cosycar-runner.py
|
eragnms/cosy-car
|
73290ee01cf79b75d42fdd4b0fdb9188af797113
|
[
"MIT"
] | null | null | null |
cosycar-runner.py
|
eragnms/cosy-car
|
73290ee01cf79b75d42fdd4b0fdb9188af797113
|
[
"MIT"
] | null | null | null |
cosycar-runner.py
|
eragnms/cosy-car
|
73290ee01cf79b75d42fdd4b0fdb9188af797113
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from cosycar.cosycar import main
if __name__ == '__main__':
main()
| 13.444444
| 32
| 0.628099
|
e19dc60de952da108e3017025ebb908820135231
| 315
|
py
|
Python
|
deeppages/tests.py
|
ricardofalasca/deep-pages
|
d1b2a48f62c31e20d767df5c6345e07e4d05290d
|
[
"MIT"
] | null | null | null |
deeppages/tests.py
|
ricardofalasca/deep-pages
|
d1b2a48f62c31e20d767df5c6345e07e4d05290d
|
[
"MIT"
] | null | null | null |
deeppages/tests.py
|
ricardofalasca/deep-pages
|
d1b2a48f62c31e20d767df5c6345e07e4d05290d
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
# from model_mommy import mommy
class PageTestCase(TestCase):
def test_deep_page_settings(self):
pass
def test_create_deep_page(self):
pass
def test_create_duplicated_path_pages(self):
pass
def test_request_deep_page(self):
pass
| 18.529412
| 48
| 0.704762
|
aa72d3332eed5898f83208037eb6a19a4b6e70fe
| 2,309
|
py
|
Python
|
tools/install_venv.py
|
gokarslan/networking-odl2
|
6a6967832b2c02dfcff6a9f0ab6e36472b849ce8
|
[
"Apache-2.0"
] | null | null | null |
tools/install_venv.py
|
gokarslan/networking-odl2
|
6a6967832b2c02dfcff6a9f0ab6e36472b849ce8
|
[
"Apache-2.0"
] | null | null | null |
tools/install_venv.py
|
gokarslan/networking-odl2
|
6a6967832b2c02dfcff6a9f0ab6e36472b849ce8
|
[
"Apache-2.0"
] | 1
|
2019-10-25T08:12:03.000Z
|
2019-10-25T08:12:03.000Z
|
#!/usr/bin/env python
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack Foundation.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Installation script for Neutron's development virtualenv
"""
from __future__ import print_function
import os
import sys
import install_venv_common as install_venv
def print_help():
help = """
Neutron development environment setup is complete.
Neutron development uses virtualenv to track and manage Python dependencies
while in development and testing.
To activate the Neutron virtualenv for the extent of your current shell
session you can run:
$ . .venv/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print(help)
def main(argv):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
venv = os.path.join(root, '.venv')
pip_requires = os.path.join(root, 'requirements.txt')
test_requires = os.path.join(root, 'test-requirements.txt')
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'Neutron'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
options = install.parse_args(argv)
install.check_python_version()
install.check_dependencies()
install.create_virtualenv(no_site_packages=options.no_site_packages)
install.install_dependencies()
print_help()
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 31.630137
| 79
| 0.728887
|
a05a5d66eccd65badfda70360c19d617fb1ea6ab
| 9,678
|
py
|
Python
|
app/main/views.py
|
finndai/flasky
|
75e7ba6d561a63b8ec3f58aca4732d4aa0f51d74
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
finndai/flasky
|
75e7ba6d561a63b8ec3f58aca4732d4aa0f51d74
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
finndai/flasky
|
75e7ba6d561a63b8ec3f58aca4732d4aa0f51d74
|
[
"MIT"
] | null | null | null |
from flask import render_template,abort,redirect,flash,url_for,request,\
current_app,make_response
from flask_login import login_required,current_user
from flask_sqlalchemy import get_debug_queries
from . import main
from .forms import EditProfileForm,EditProfileAdminForm,PostForm,CommentForm
from ..import db
from ..models import Permission,User,Role,Post,Comment
from ..decorators import admin_required,permission_required
@main.after_app_request
def after_request(response):
for query in get_debug_queries():
if query.duration >= current_app.config['FLASKY_SLOW_DB_QUERY_TIME']:
current_app.logger.warning(
'Slow query: %s\nParameters: %s\nDuration: %fs\nContext: %s\n'
%(query.statement,query.parameters,query.duration,query.context))
return response
@main.route('/shutdown')
def server_shutdown():
if not current_app.testing:
abort(404)
shutdown = request.environ.get('werkzeug.server.shutdown')
if not shutdown:
abort(500)
shutdown()
return 'Shutting down...'
@main.route('/',methods=['GET','POST'])
def index():
form =PostForm()
if current_user.can(Permission.WRITE_ARTICLES) and \
form.validate_on_submit():
post = Post(body=form.body.data,author=current_user._get_current_object())
db.session.add(post)
db.session.commit()
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
show_followed = False
if current_user.is_authenticated:
show_followed = bool(request.cookies.get('show_followed',''))
if show_followed:
query = current_user.followed_posts
else:
query = Post.query
pagination = query.order_by(Post.timestamp.desc()).paginate(page,per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
return render_template('index.html',form=form,posts=posts,show_followed=show_followed,pagination=pagination)
@main.route('/user/<username>')
def user(username):
user = User.query.filter_by(username=username).first()
if user is None:
abort(404)
page = request.args.get('page', 1, type=int)
pagination = user.posts.order_by(Post.timestamp.desc()).paginate(page, per_page=current_app.config[
'FLASKY_POSTS_PER_PAGE'],error_out=False)
posts = pagination.items
return render_template('user.html',user=user,posts=posts,pagination=pagination)
@main.route('/edit-profile',methods=['GET','POST'])
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.name = form.name.data
current_user.location = form.location.data
current_user.about_me = form.about_me.data
db.session.add(current_user)
db.session.commit()
flash('Your profile has been updated')
return redirect(url_for('.user',username=current_user.username))
form.name.data = current_user.name
form.location.data = current_user.location
form.about_me.data = current_user.about_me
return render_template('edit_profile.html',form=form)
@main.route('/edit-profile/<int:id>',methods=['GET','POST'])
@login_required
@admin_required
def edit_profile_admin(id):
user = User.query.get_or_404(id)
form = EditProfileAdminForm(user=user)
if form.validate_on_submit():
user.email = form.email.data
user.username = form.username.data
user.confirmed = form.confirmed.data
user.role = Role.query.get(form.role.data)
user.name = form.name.data
user.location = form.location.data
user.about_me = form.about_me.data
db.session.add(user)
db.session.commit()
flash('The profile has been updated')
return redirect(url_for('.user',username=user.username))
form.email.data = user.email
form.username.data = user.username
form.confirmed.data = user.confirmed
form.role.data = user.role_id
form.name.data = user.name
form.location.data = user.location
form.about_me.data = user.about_me
return render_template('edit_profile.html',form=form,user=user)
@main.route('/post/<int:id>',methods=['GET','POST'])
def post(id):
post = Post.query.get_or_404(id)
form = CommentForm()
if form.validate_on_submit():
comment = Comment(body=form.body.data,post=post,author=current_user._get_current_object())
db.session.add(comment)
flash('Your comment has been published')
return redirect(url_for('.post',id=post.id,page=-1))
page = request.args.get('page',1,type=int)
if page == -1:
page =(post.comments.count()-1)/current_app.config['FLASKY_COMMENTS_PER_PAGE'] +1
pagination = post.comments.order_by(Comment.timestamp.asc()).paginate(page,
per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],error_out=False)
comments = pagination.items
return render_template('post.html',posts=[post],form=form,comments=comments,pagination=pagination)
@main.route('/edit/<int:id>',methods=['GET','POST'])
@login_required
def edit(id):
post = Post.query.get_or_404(id)
if current_user != post.author and \
not current_user.can(Permission.ADMINISTER):
abort(403)
form = PostForm()
if form.validate_on_submit():
post.body = form.body.data
db.session.add(post)
db.session.commit()
flash('The post has been updated.')
return redirect(url_for('.post',id=post.id))
form.body.data = post.body
return render_template('edit_post.html',form=form)
@main.route('/follow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
if current_user.is_following(user):
flash('You are already following this user.')
return redirect(url_for('.user', username=username))
current_user.follow(user)
flash('You are now following %s.' % username)
return redirect(url_for('.user', username=username))
@main.route('/unfollow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
if not current_user.is_following(user):
flash('You are not following this user.')
return redirect(url_for('.user', username=username))
current_user.unfollow(user)
flash('You are not following %s anymore.' % username)
return redirect(url_for('.user', username=username))
@main.route('/followers/<username>')
def followers(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followers.paginate(
page, per_page=current_app.config['FLASKY_FOLLOWERS_PER_PAGE'],
error_out=False)
follows = [{'user': item.follower, 'timestamp': item.timestamp}
for item in pagination.items]
return render_template('followers.html', user=user, title="Followers of",
endpoint='.followers', pagination=pagination,
follows=follows)
@main.route('/followed-by/<username>')
def followed_by(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followed.paginate(
page, per_page=current_app.config['FLASKY_FOLLOWERS_PER_PAGE'],
error_out=False)
follows = [{'user': item.followed, 'timestamp': item.timestamp}
for item in pagination.items]
return render_template('followers.html', user=user, title="Followed by",
endpoint='.followed_by', pagination=pagination,
follows=follows)
@main.route('/all')
@login_required
def show_all():
resp = make_response(redirect(url_for('.index')))
resp.set_cookie('show_followed','',max_age=30*24*60*60)
return resp
@main.route('/followed')
@login_required
def show_followed():
resp = make_response(redirect(url_for('.index')))
resp.set_cookie('show_followed','1',max_age=30*24*60*60)
return resp
@main.route('/moderate')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def moderate():
page = request.args.get('page',1,type=int)
pagination = Comment.query.order_by(Comment.timestamp.desc()).paginate(
page,per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],error_out=False)
comments = pagination.items
return render_template('moderate.html',comments=comments,pagination=pagination,page=page)
@main.route('/moderate/enable/<int:id>')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def moderate_enable(id):
comment=Comment.query.get_or_404(id)
comment.disabled = False
db.session.add(comment)
return redirect(url_for('.moderate',page=request.args.get('page',1,type=int)))
@main.route('/moderate/disable/<int:id>')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def moderate_disable(id):
comment = Comment.query.get_or_404(id)
comment.disabled = True
db.session.add(comment)
return redirect(url_for('.moderate',page=request.args.get('page',1,type=int)))
| 38.557769
| 122
| 0.687332
|
f1ce91416d3d6d8b6f04fc5d269dcdddb192ec52
| 77,595
|
py
|
Python
|
translators/apis.py
|
duandaxei/translators
|
af661ebb7b797e0e9493f1a1c8d30a1ea2edef90
|
[
"MIT"
] | null | null | null |
translators/apis.py
|
duandaxei/translators
|
af661ebb7b797e0e9493f1a1c8d30a1ea2edef90
|
[
"MIT"
] | null | null | null |
translators/apis.py
|
duandaxei/translators
|
af661ebb7b797e0e9493f1a1c8d30a1ea2edef90
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# author=UlionTse
"""MIT License
Copyright (c) 2022 UlionTse
Warning: Prohibition of commercial use!
This module is designed to help students and individuals with translation services.
For commercial use, please purchase API services from translation suppliers.
Don't make high frequency requests!
Enterprises provide free services, we should be grateful instead of making trouble.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software. You may obtain a copy of the
License at
https://github.com/uliontse/translators/blob/master/LICENSE
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import re
import sys
import time
import json
import base64
import random
import urllib.parse
import hashlib
import functools
import warnings
from typing import Union, Callable
import pathos.multiprocessing
import lxml.etree
import execjs
import requests
import loguru
loguru.logger.remove()
loguru.logger.add(sys.stdout, format='[{time:HH:mm:ss}] <lvl>{message}</lvl>', level='INFO')
loguru.logger.opt(colors=True)
class Tse:
def __init__(self):
self.author = 'Ulion.Tse'
@staticmethod
def time_stat(func):
@functools.wraps(func)
def _wrapper(*args, **kwargs):
t1 = time.time()
r = func(*args, **kwargs)
t2 = time.time()
loguru.logger.success('CostTime(fn: {}): {}s'.format(func.__name__, round((t2 - t1), 1)), style='braces')
return r
return _wrapper
@staticmethod
def get_headers(host_url, if_api=False, if_referer_for_host=True, if_ajax_for_api=True, if_json_for_api=False):
url_path = urllib.parse.urlparse(host_url).path
user_agent = "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) " \
"Chrome/55.0.2883.87 Safari/537.36"
host_headers = {
'Referer' if if_referer_for_host else 'Host': host_url,
"User-Agent": user_agent,
}
api_headers = {
'Origin': host_url.split(url_path)[0] if url_path else host_url,
'Referer': host_url,
'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
"User-Agent": user_agent,
}
if if_api and not if_ajax_for_api:
api_headers.pop('X-Requested-With')
api_headers.update({'Content-Type': 'text/plain'})
if if_api and if_json_for_api:
api_headers.update({'Content-Type': 'application/json'})
return host_headers if not if_api else api_headers
@staticmethod
def check_language(from_language, to_language, language_map, output_zh=None, output_auto='auto'):
auto_pool = ('auto', 'auto-detect')
zh_pool = ('zh', 'zh-CN', 'zh-CHS', 'zh-Hans')
from_language = output_auto if from_language in auto_pool else from_language
from_language = output_zh if output_zh and from_language in zh_pool else from_language
to_language = output_zh if output_zh and to_language in zh_pool else to_language
if from_language != output_auto and from_language not in language_map:
raise TranslatorError('Unsupported from_language[{}] in {}.'.format(from_language, sorted(language_map.keys())))
elif to_language not in language_map:
raise TranslatorError('Unsupported to_language[{}] in {}.'.format(to_language, sorted(language_map.keys())))
elif from_language != output_auto and to_language not in language_map[from_language]:
loguru.logger.exception('language_map:', language_map)
raise TranslatorError('Unsupported translation: from [{0}] to [{1}]!'.format(from_language, to_language))
return from_language, to_language
@staticmethod
def make_temp_language_map(from_language, to_language):
warnings.warn('Did not get a complete language map. And do not use `from_language="auto"`.')
assert from_language != 'auto' and to_language != 'auto' and from_language != to_language
lang_list = [from_language, to_language]
return {}.fromkeys(lang_list, lang_list)
@staticmethod
def check_query_text(query_text, if_ignore_limit_of_length=False, limit_of_length=5000):
if not isinstance(query_text, str):
raise TranslatorError('query_text is not string.')
query_text = query_text.strip()
if not query_text:
return ''
length = len(query_text)
if length > limit_of_length and not if_ignore_limit_of_length:
raise TranslatorError('The length of the text to be translated exceeds the limit.')
else:
if length > limit_of_length:
warnings.warn(f'The translation ignored the excess[above 5000]. Length of `query_text` is {length}.')
return query_text[:limit_of_length]
return query_text
class TranslatorSeverRegion:
@property
def request_server_region_info(self):
try:
ip_address = requests.get('https://httpbin.org/ip').json()['origin']
try:
data = requests.get(f'http://ip-api.com/json/{ip_address}', timeout=10).json() # http # limit 45/min.
country = data.get("country")
assert country
sys.stderr.write(f'Using {country} server backend.\n')
return data
except requests.exceptions.Timeout:
data = requests.post(
url='https://ip.taobao.com/outGetIpInfo',
data={'ip': ip_address, 'accessKey': 'alibaba-inc'}
).json().get('data')
data.update({'countryCode': data.get('country_id')})
return data
except requests.exceptions.ConnectionError:
raise TranslatorError('Unable to connect the Internet.\n')
except:
warnings.warn('Unable to find server backend.\n')
country = input('Please input your server region need to visit:\neg: [England, China, ...]\n')
sys.stderr.write(f'Using {country} server backend.\n')
return {'country': country, 'countryCode': 'CN' if country == 'China' else 'EN'}
class TranslatorError(Exception):
pass
class GoogleV1(Tse):
def __init__(self):
super().__init__()
self.host_url = None
self.cn_host_url = 'https://translate.google.cn'
self.en_host_url = 'https://translate.google.com'
self.request_server_region_info = REQUEST_SERVER_REGION_INFO
self.host_headers = None
self.language_map = None
self.api_url = None
self.tkk = None
self.query_count = 0
self.output_zh = 'zh-CN'
def _xr(self, a, b):
size_b = len(b)
c = 0
while c < size_b - 2:
d = b[c + 2]
d = ord(d[0]) - 87 if 'a' <= d else int(d)
d = (a % 2**32) >> d if '+' == b[c + 1] else a << d
a = a + d & (2**32-1) if '+' == b[c] else a ^ d
c += 3
return a
def _ints(self, text):
ints = []
for v in text:
int_v = ord(v)
if int_v < 2**16:
ints.append(int_v)
else:
# unicode, emoji
ints.append(int((int_v - 2**16) / 2**10 + 55296))
ints.append(int((int_v - 2**16) % 2**10 + 56320))
return ints
def acquire(self, text, tkk):
ints = self._ints(text)
size = len(ints)
e = []
g = 0
while g < size:
l = ints[g]
if l < 2**7: # 128(ascii)
e.append(l)
else:
if l < 2**11: # 2048
e.append(l >> 6 | 192)
else:
if (l & 64512) == 55296 and g + 1 < size and ints[g + 1] & 64512 == 56320:
g += 1
l = 65536 + ((l & 1023) << 10) + (ints[g] & 1023)
e.append(l >> 18 | 240)
e.append(l >> 12 & 63 | 128)
else:
e.append(l >> 12 | 224)
e.append(l >> 6 & 63 | 128) ##
e.append(l & 63 | 128)
g += 1
b = tkk if tkk != '0' else ''
d = b.split('.')
b = int(d[0]) if len(d) > 1 else 0
a = b
for value in e:
a += value
a = self._xr(a, '+-a^+6')
a = self._xr(a, '+-3^+b+-f')
a ^= int(d[1]) if len(d) > 1 else 0
if a < 0:
a = (a & (2**31-1)) + 2**31
a %= int(1E6)
return '{}.{}'.format(a, a ^ b)
def get_language_map(self, host_html, ss, timeout, proxies):
while 'source_code_name:' not in host_html:
host_html = ss.get(self.host_url, headers=self.host_headers, timeout=timeout, proxies=proxies).text
time.sleep(0.01)
lang_list_str = re.compile("source_code_name:\[(.*?)\],").findall(host_html)[0]
lang_list_str = ('['+ lang_list_str + ']').replace('code','"code"').replace('name','"name"')
lang_list = [x['code'] for x in eval(lang_list_str) if x['code'] != 'auto']
return {}.fromkeys(lang_list,lang_list)
def get_tkk(self, host_html, ss, timeout, proxies):
while 'tkk:' not in host_html:
host_html = ss.get(self.host_url, headers=self.host_headers, timeout=timeout, proxies=proxies).text
time.sleep(0.01)
return re.compile("tkk:'(.*?)'").findall(host_html)[0]
# @Tse.time_stat
def google_api(self, query_text:str, from_language:str='auto', to_language:str='en', **kwargs) -> Union[str,list]:
"""
https://translate.google.com, https://translate.google.cn.
:param query_text: str, must.
:param from_language: str, default 'auto'.
:param to_language: str, default 'en'.
:param **kwargs:
:param if_use_cn_host: boolean, default None.
:param if_ignore_limit_of_length: boolean, default False.
:param is_detail_result: boolean, default False.
:param timeout: float, default None.
:param proxies: dict, default None.
:param sleep_seconds: float, default `random.random()`.
:return: str or list
"""
use_cn_condition = kwargs.get('if_use_cn_host', None) or self.request_server_region_info.get('countryCode')=='CN'
self.host_url = self.cn_host_url if use_cn_condition else self.en_host_url
self.host_headers = self.get_headers(self.cn_host_url, if_api=False)
is_detail_result = kwargs.get('is_detail_result', False)
timeout = kwargs.get('timeout', None)
proxies = kwargs.get('proxies', None)
sleep_seconds = kwargs.get('sleep_seconds', random.random())
if_ignore_limit_of_length = kwargs.get('if_ignore_limit_of_length', False)
query_text = self.check_query_text(query_text, if_ignore_limit_of_length)
with requests.Session() as ss:
host_html = ss.get(self.host_url, headers=self.host_headers, timeout=timeout, proxies=proxies).text
if not self.language_map:
self.language_map = self.get_language_map(host_html, ss, timeout, proxies)
from_language,to_language = self.check_language(from_language,to_language,self.language_map,output_zh=self.output_zh)
if not self.tkk:
self.tkk = self.get_tkk(host_html, ss, timeout, proxies)
tk = self.acquire(query_text, self.tkk)
self.api_url = (self.host_url + '/translate_a/single?client={0}&sl={1}&tl={2}&hl=zh-CN&dt=at&dt=bd&dt=ex'
+ '&dt=ld&dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&ie=UTF-8&oe=UTF-8&source=bh&ssel=0&tsel=0&kc=1&tk='
+ str(tk) + '&q=' + urllib.parse.quote(query_text)).format('webapp', from_language,to_language) # [t,webapp]
r = ss.get(self.api_url, headers=self.host_headers, timeout=timeout, proxies=proxies)
r.raise_for_status()
data = r.json()
time.sleep(sleep_seconds)
self.query_count += 1
return data if is_detail_result else ''.join([item[0] for item in data[0] if isinstance(item[0],str)])
class GoogleV2(Tse):
def __init__(self):
super().__init__()
self.host_url = None
self.cn_host_url = 'https://translate.google.cn'
self.en_host_url = 'https://translate.google.com'
self.api_url = None
self.request_server_region_info = REQUEST_SERVER_REGION_INFO
self.host_headers = None
self.api_headers = None
self.language_map = None
self.rpcid = 'MkEWBc'
self.query_count = 0
self.output_zh = 'zh-CN'
def get_rpc(self, query_text, from_language, to_language):
param = json.dumps([[query_text, from_language, to_language, True], [1]])
rpc = json.dumps([[[self.rpcid, param, None, "generic"]]])
return {'f.req': rpc}
def get_language_map(self, host_html):
et = lxml.etree.HTML(host_html)
lang_list = sorted(list(set(et.xpath('//*/@data-language-code'))))
return {}.fromkeys(lang_list, lang_list)
def get_info(self, host_html):
data_str = re.compile(r'window.WIZ_global_data = (.*?);</script>').findall(host_html)[0]
data = execjs.get().eval(data_str)
return {'bl': data['cfb2h'], 'f.sid': data['FdrFJe']}
def get_consent_cookie(self, consent_html): # by mercuree. merged but not verify.
et = lxml.etree.HTML(consent_html)
input_element = et.xpath('.//input[@type="hidden"][@name="v"]')
if input_element:
cookie_value = input_element[0].attrib.get('value')
else:
cookie_value = 'cb' # cookie CONSENT=YES+cb works for now
return f'CONSENT=YES+{cookie_value}'
# @Tse.time_stat
def google_api(self, query_text: str, from_language: str = 'auto', to_language: str = 'en', **kwargs) -> Union[str, list]:
"""
https://translate.google.com, https://translate.google.cn.
:param query_text: str, must.
:param from_language: str, default 'auto'.
:param to_language: str, default 'en'.
:param **kwargs:
:param reset_host_url: str, default None. eg: 'https://translate.google.fr'
:param if_use_cn_host: boolean, default None. affected by `reset_host_url`.
:param if_ignore_limit_of_length: boolean, default False.
:param is_detail_result: boolean, default False.
:param timeout: float, default None.
:param proxies: dict, default None.
:param sleep_seconds: float, default `random.random()`.
:return: str or list
"""
reset_host_url = kwargs.get('reset_host_url', None)
if reset_host_url:
assert reset_host_url[:25] == 'https://translate.google.'
self.host_url = reset_host_url
else:
use_cn_condition = kwargs.get('if_use_cn_host', None) or self.request_server_region_info.get('countryCode') == 'CN'
self.host_url = self.cn_host_url if use_cn_condition else self.en_host_url
self.api_url = f'{self.host_url}/_/TranslateWebserverUi/data/batchexecute'
self.host_headers = self.host_headers or self.get_headers(self.cn_host_url, if_api=False) # reuse cookie header
self.api_headers = self.get_headers(self.cn_host_url, if_api=True, if_referer_for_host=True, if_ajax_for_api=True)
is_detail_result = kwargs.get('is_detail_result', False)
timeout = kwargs.get('timeout', None)
proxies = kwargs.get('proxies', None)
sleep_seconds = kwargs.get('sleep_seconds', random.random())
if_ignore_limit_of_length = kwargs.get('if_ignore_limit_of_length', False)
query_text = self.check_query_text(query_text, if_ignore_limit_of_length)
delete_temp_language_map_label = 0
with requests.Session() as ss:
r = ss.get(self.host_url, headers=self.host_headers, timeout=timeout, proxies=proxies)
if 'consent.google.com' == urllib.parse.urlparse(r.url).hostname:
self.host_headers.update({'cookie': self.get_consent_cookie(r.text)})
host_html = ss.get(self.host_url, headers=self.host_headers, timeout=timeout, proxies=proxies).text
else:
host_html = r.text
if not self.language_map:
self.language_map = self.get_language_map(host_html)
if not self.language_map:
delete_temp_language_map_label += 1
self.language_map = self.make_temp_language_map(from_language, to_language)
from_language, to_language = self.check_language(from_language, to_language, self.language_map, output_zh=self.output_zh)
rpc_data = self.get_rpc(query_text, from_language, to_language)
rpc_data = urllib.parse.urlencode(rpc_data)
r = ss.post(self.api_url, headers=self.api_headers, data=rpc_data, timeout=timeout, proxies=proxies)
r.raise_for_status()
json_data = json.loads(r.text[6:])
data = json.loads(json_data[0][2])
if delete_temp_language_map_label != 0:
self.language_map = None
time.sleep(sleep_seconds)
self.query_count += 1
return data if is_detail_result else ' '.join([x[0] for x in data[1][0][0][5]])
class Baidu(Tse):
def __init__(self):
super().__init__()
self.host_url = 'https://fanyi.baidu.com'
self.api_url = 'https://fanyi.baidu.com/v2transapi'
self.langdetect_url = 'https://fanyi.baidu.com/langdetect'
self.get_sign_old_url = 'https://fanyi-cdn.cdn.bcebos.com/static/translation/pkg/index_bd36cef.js'
self.get_sign_url = None
self.get_sign_pattern = 'https://fanyi-cdn.cdn.bcebos.com/static/translation/pkg/index_(.*?).js'
self.host_headers = self.get_headers(self.host_url, if_api=False)
self.api_headers = self.get_headers(self.host_url, if_api=True)
self.bdtk_pool = [
{"baidu_id": "F215FBBB82CAF048A24B86785E193475:FG=1", "token": "4e6d918b00ada40933d3e63fd2f2c009"},
{"baidu_id": "97AD065BAC1491494A8D48510DABE382:FG=1", "token": "9d893922f8ea987de2f2adc81a81fbe7"},
{"baidu_id": "A6D0C58DDED7B75B744EDE8A26054BF3:FG=1", "token": "4a1edb47b0528aad49d622db98c7c750"},
]
self.bdtk = random.choice(self.bdtk_pool)
self.new_bdtk = None
self.host_info = None
self.language_map = None
self.query_count = 0
self.output_zh = 'zh'
def get_sign_html(self, ss, host_html, timeout, proxies):
try:
if not self.get_sign_url:
self.get_sign_url = re.compile(self.get_sign_pattern).search(host_html).group(0)
r = ss.get(self.get_sign_url, headers=self.host_headers, timeout=timeout, proxies=proxies)
r.raise_for_status()
except:
r = ss.get(self.get_sign_old_url, headers=self.host_headers, timeout=timeout, proxies=proxies)
r.raise_for_status()
self.get_sign_url = self.get_sign_old_url
return r.text
def get_sign(self, sign_html, ts_text, gtk):
begin_label = 'define("translation:widget/translate/input/pGrab",function(r,o,t){'
end_label = 'var i=null;t.exports=e});'
sign_js = sign_html[sign_html.find(begin_label) + len(begin_label):sign_html.find(end_label)]
sign_js = sign_js.replace('function e(r)', 'function e(r,i)')
return execjs.compile(sign_js).call('e', ts_text, gtk)
def get_host_info(self, host_html, sign_html, ts_text):
gtk = re.compile("window.gtk = '(.*?)';").findall(host_html)[0]
sign = self.get_sign(sign_html, ts_text, gtk)
et = lxml.etree.HTML(host_html)
js_txt = ''
for i in range(6):
js_re_list = et.xpath(f"/html/body/script[{i}]/text()")
if js_re_list:
if 'langMap' in js_re_list[0]:
js_txt = js_re_list[0][20:-111]
break
js_data = execjs.get().eval(js_txt)
js_data.update({'gtk': gtk, 'sign': sign})
return js_data
# @Tse.time_stat
def baidu_api(self, query_text:str, from_language:str='auto', to_language:str='en', **kwargs) -> Union[str,dict]:
"""
https://fanyi.baidu.com
:param query_text: str, must. # attention emoji
:param from_language: str, default 'auto'.
:param to_language: str, default 'en'.
:param **kwargs:
:param professional_field: str, default 'common'. Choose from ('common', 'medicine', 'electronics', 'mechanics')
:param if_ignore_limit_of_length: boolean, default False.
:param is_detail_result: boolean, default False.
:param timeout: float, default None.
:param proxies: dict, default None.
:param sleep_seconds: float, default `random.random()`.
:return: str or dict
"""
use_domain = kwargs.get('professional_field', 'common')
assert use_domain in ('common', 'medicine', 'electronics', 'mechanics')
is_detail_result = kwargs.get('is_detail_result', False)
timeout = kwargs.get('timeout', None)
proxies = kwargs.get('proxies', None)
sleep_seconds = kwargs.get('sleep_seconds', random.random())
if_ignore_limit_of_length = kwargs.get('if_ignore_limit_of_length', False)
query_text = self.check_query_text(query_text, if_ignore_limit_of_length)
with requests.Session() as ss:
host_html = ss.get(self.host_url, headers=self.host_headers, timeout=timeout, proxies=proxies).text
sign_html = self.get_sign_html(ss, host_html, timeout, proxies)
self.host_info = self.get_host_info(host_html, sign_html, query_text)
self.new_bdtk = {"baidu_id": ss.cookies.get("BAIDUID"), "token": self.host_info.get("token")}
self.language_map = self.host_info['langMap']
from_language,to_language = self.check_language(from_language,to_language,self.language_map,output_zh=self.output_zh)
self.api_headers.update({"cookie": "BAIDUID={};".format(self.bdtk['baidu_id'])})
if from_language == 'auto':
res = ss.post(self.langdetect_url, headers=self.api_headers, data={"query": query_text}, timeout=timeout, proxies=proxies)
from_language = res.json()['lan']
# param_data = {"from": from_language, "to": to_language}
form_data = {
"from": from_language,
"to": to_language,
"query": query_text, # from urllib.parse import quote_plus
"transtype": "translang", # ["translang","realtime"]
"simple_means_flag": "3",
"sign": self.host_info.get('sign'),
"token": self.bdtk['token'], # self.host_info.get('token'),
"domain": use_domain,
}
form_data = urllib.parse.urlencode(form_data).encode('utf-8')
r = ss.post(self.api_url, headers=self.api_headers, data=form_data, timeout=timeout, proxies=proxies)
r.raise_for_status()
data = r.json()
time.sleep(sleep_seconds)
self.query_count += 1
return data if is_detail_result else '\n'.join([x['dst'] for x in data['trans_result']['data']])
class Youdao(Tse):
def __init__(self):
super().__init__()
self.host_url = 'https://fanyi.youdao.com'
self.api_url = 'https://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule'
self.get_old_sign_url = 'https://shared.ydstatic.com/fanyi/newweb/v1.0.29/scripts/newweb/fanyi.min.js'
self.get_new_sign_url = None
self.get_sign_pattern = 'https://shared.ydstatic.com/fanyi/newweb/(.*?)/scripts/newweb/fanyi.min.js'
self.host_headers = self.get_headers(self.host_url, if_api=False)
self.api_headers = self.get_headers(self.host_url, if_api=True)
self.language_map = None
self.query_count = 0
self.output_zh = 'zh-CHS'
def get_language_map(self, host_html):
et = lxml.etree.HTML(host_html)
lang_list = et.xpath('//*[@id="languageSelect"]/li/@data-value')
lang_list = [(x.split('2')[0], [x.split('2')[1]]) for x in lang_list if '2' in x]
lang_map = dict(map(lambda x: x, lang_list))
lang_map.pop('zh-CHS')
lang_map.update({'zh-CHS': list(lang_map.keys())})
return lang_map
def get_sign_key(self, ss, host_html, timeout, proxies):
try:
if not self.get_new_sign_url:
self.get_new_sign_url = re.compile(self.get_sign_pattern).search(host_html).group(0)
r = ss.get(self.get_new_sign_url, headers=self.host_headers, timeout=timeout, proxies=proxies)
r.raise_for_status()
except:
r = ss.get(self.get_old_sign_url, headers=self.host_headers, timeout=timeout, proxies=proxies)
r.raise_for_status()
sign = re.compile('n.md5\("fanyideskweb"\+e\+i\+"(.*?)"\)').findall(r.text)
return sign[0] if sign and sign != [''] else "Tbh5E8=q6U3EXe+&L[4c@" #v1.0.31
def get_form(self, query_text, from_language, to_language, sign_key):
ts = str(int(time.time()*1000))
salt = str(ts) + str(random.randrange(0, 10))
sign_text = ''.join(['fanyideskweb', query_text, salt, sign_key])
sign = hashlib.md5(sign_text.encode()).hexdigest()
bv = hashlib.md5(self.api_headers['User-Agent'][8:].encode()).hexdigest()
form = {
'i': query_text,
'from': from_language,
'to': to_language,
'lts': ts, # r = "" + (new Date).getTime()
'salt': salt, # i = r + parseInt(10 * Math.random(), 10)
'sign': sign, # n.md5("fanyideskweb" + e + i + "n%A-rKaT5fb[Gy?;N5@Tj"),e=text
'bv': bv, # n.md5(navigator.appVersion)
'smartresult': 'dict',
'client': 'fanyideskweb',
'doctype': 'json',
'version': '2.1',
'keyfrom': 'fanyi.web',
'action': 'FY_BY_DEFAULT', # not time.["FY_BY_REALTlME","FY_BY_DEFAULT"]
# 'typoResult': 'false'
}
return form
# @Tse.time_stat
def youdao_api(self, query_text:str, from_language:str='auto', to_language:str='en', **kwargs) -> Union[str,dict]:
"""
https://fanyi.youdao.com
:param query_text: str, must.
:param from_language: str, default 'auto'.
:param to_language: str, default 'en'.
:param **kwargs:
:param if_ignore_limit_of_length: boolean, default False.
:param is_detail_result: boolean, default False.
:param timeout: float, default None.
:param proxies: dict, default None.
:param sleep_seconds: float, default `random.random()`.
:return: str or dict
"""
is_detail_result = kwargs.get('is_detail_result', False)
timeout = kwargs.get('timeout', None)
proxies = kwargs.get('proxies', None)
sleep_seconds = kwargs.get('sleep_seconds', random.random())
if_ignore_limit_of_length = kwargs.get('if_ignore_limit_of_length', False)
query_text = self.check_query_text(query_text, if_ignore_limit_of_length)
with requests.Session() as ss:
host_html = ss.get(self.host_url, headers=self.host_headers, timeout=timeout, proxies=proxies).text
if not self.language_map:
self.language_map = self.get_language_map(host_html)
sign_key = self.get_sign_key(ss, host_html, timeout, proxies)
from_language, to_language = self.check_language(from_language, to_language, self.language_map,output_zh=self.output_zh)
from_language, to_language = ('auto', 'auto') if from_language == 'auto' else (from_language, to_language)
form = self.get_form(query_text, from_language, to_language, sign_key)
r = ss.post(self.api_url, data=form, headers=self.api_headers, timeout=timeout, proxies=proxies)
r.raise_for_status()
data = r.json()
if data['errorCode'] == 40:
raise TranslatorError('Invalid translation of `from_language[auto]`, '
'please specify parameters of `from_language` or `to_language`.')
time.sleep(sleep_seconds)
self.query_count += 1
return data if is_detail_result else ' '.join(item['tgt'] if item['tgt'] else '\n' for result in data['translateResult'] for item in result)
class Tencent(Tse):
def __init__(self):
super().__init__()
self.host_url = 'https://fanyi.qq.com'
self.api_url = 'https://fanyi.qq.com/api/translate'
self.get_language_url = 'https://fanyi.qq.com/js/index.js'
self.get_qt_url = 'https://fanyi.qq.com/api/reauth12f'
self.host_headers = self.get_headers(self.host_url, if_api=False)
self.api_headers = self.get_headers(self.host_url, if_api=True)
self.qt_headers = self.get_headers(self.host_url, if_api=True, if_json_for_api=True)
self.language_map = None
self.qtv_qtk = None
self.query_count = 0
self.output_zh = 'zh'
def get_language_map(self, ss, language_url, timeout, proxies):
r = ss.get(language_url, headers=self.host_headers, timeout=timeout, proxies=proxies)
r.raise_for_status()
lang_map_str = re.compile(pattern='C={(.*?)}|languagePair = {(.*?)}', flags=re.S).search(r.text).group(0) # C=
return execjs.get().eval(lang_map_str)
def get_qt(self, ss, timeout, proxies, if_session=False):
if if_session:
return ss.post(self.get_qt_url, headers=self.qt_headers, json=self.qtv_qtk, timeout=timeout, proxies=proxies).json()
return requests.post(self.get_qt_url, headers=self.qt_headers, json=self.qtv_qtk, timeout=timeout, proxies=proxies).json()
# @Tse.time_stat
def tencent_api(self, query_text:str, from_language:str='auto', to_language:str='en', **kwargs) -> Union[str,dict]:
"""
https://fanyi.qq.com
:param query_text: str, must.
:param from_language: str, default 'auto'.
:param to_language: str, default 'en'.
:param **kwargs:
:param if_ignore_limit_of_length: boolean, default False.
:param is_detail_result: boolean, default False.
:param timeout: float, default None.
:param proxies: dict, default None.
:param sleep_seconds: float, default `random.random()`.
:return: str or dict
"""
is_detail_result = kwargs.get('is_detail_result', False)
timeout = kwargs.get('timeout', None)
proxies = kwargs.get('proxies', None)
sleep_seconds = kwargs.get('sleep_seconds', random.random())
if_ignore_limit_of_length = kwargs.get('if_ignore_limit_of_length', False)
query_text = self.check_query_text(query_text, if_ignore_limit_of_length)
with requests.Session() as ss:
_ = ss.get(self.host_url, headers=self.host_headers, timeout=timeout, proxies=proxies).text
if not self.language_map:
self.language_map = self.get_language_map(ss, self.get_language_url, timeout, proxies)
from_language, to_language = self.check_language(from_language, to_language, self.language_map, output_zh=self.output_zh)
self.qtv_qtk = self.get_qt(ss, timeout, proxies, if_session=False)
form_data = {
'source': from_language,
'target': to_language,
'sourceText': query_text,
'qtv': self.qtv_qtk.get('qtv', ''),
'qtk': self.qtv_qtk.get('qtk', ''),
'ticket': '',
'randstr': '',
'sessionUuid': 'translate_uuid' + str(int(time.time()*1000)),
}
r = ss.post(self.api_url, headers=self.api_headers, data=form_data, timeout=timeout, proxies=proxies)
r.raise_for_status()
data = r.json()
time.sleep(sleep_seconds)
self.query_count += 1
return data if is_detail_result else ''.join(item['targetText'] for item in data['translate']['records']) # auto whitespace
class Alibaba(Tse):
def __init__(self):
super().__init__()
self.host_url = 'https://translate.alibaba.com'
self.api_url = 'https://translate.alibaba.com/translationopenseviceapp/trans/TranslateTextAddAlignment.do'
self.get_language_old_url = 'https://translate.alibaba.com/trans/acquireSupportLanguage.do'
self.get_language_new_url = 'https://translate.alibaba.com/translationopenseviceapp/trans/acquire_supportLanguage.do'
self.host_headers = self.get_headers(self.host_url, if_api=False)
self.api_headers = self.get_headers(self.host_url, if_api=True)
self.language_map = None
self.query_count = 0
self.output_zh = 'zh'
def get_dmtrack_pageid(self, host_response):
try:
e = re.compile("dmtrack_pageid='(\w+)';").findall(host_response.text)[0]
except:
e = ''
if not e:
e = host_response.cookies.get_dict().get("cna", "001")
e = re.compile(pattern='[^a-z\d]').sub(repl='', string=e.lower())[:16]
else:
n, r = e[0:16], e[16:26]
i = hex(int(r, 10))[2:] if re.compile('^[\-+]?[0-9]+$').match(r) else r
e = n + i
s = int(time.time() * 1000)
o = ''.join([e, hex(s)[2:]])
for _ in range(1, 10):
a = hex(int(random.random() * 1e10))[2:] # int->str: 16, '0x'
o += a
return o[:42]
def get_language_map(self, ss, biz_type, dmtrack_pageid, timeout, proxies):
def _get_lang(language_url, params=None):
language_dict = ss.get(language_url, params=params, headers=self.host_headers, timeout=timeout, proxies=proxies).json()
language_map = dict(map(lambda x: x, [(x['sourceLuange'], x['targetLanguages']) for x in language_dict['languageMap']]))
return language_map
params = {'dmtrack_pageid': dmtrack_pageid, 'biz_type': biz_type}
try:
return _get_lang(self.get_language_new_url, params=None)
except:
return _get_lang(self.get_language_old_url, params=params)
# @Tse.time_stat
def alibaba_api(self, query_text:str, from_language:str='auto', to_language:str='en', **kwargs) -> Union[str,dict]:
"""
https://translate.alibaba.com
:param query_text: str, must.
:param from_language: str, default 'auto'.
:param to_language: str, default 'en'.
:param **kwargs:
:param professional_field: str, default 'message', choose from ("general","message","offer")
:param if_ignore_limit_of_length: boolean, default False.
:param is_detail_result: boolean, default False.
:param timeout: float, default None.
:param proxies: dict, default None.
:param sleep_seconds: float, default `random.random()`.
:return: str or dict
"""
use_domain = kwargs.get('professional_field', 'message')
assert use_domain in ("general", "message", "offer")
is_detail_result = kwargs.get('is_detail_result', False)
timeout = kwargs.get('timeout', None)
proxies = kwargs.get('proxies', None)
sleep_seconds = kwargs.get('sleep_seconds', random.random())
if_ignore_limit_of_length = kwargs.get('if_ignore_limit_of_length', False)
query_text = self.check_query_text(query_text, if_ignore_limit_of_length)
with requests.Session() as ss:
host_response = ss.get(self.host_url, headers=self.host_headers, timeout=timeout, proxies=proxies)
dmtrack_pageid = self.get_dmtrack_pageid(host_response)
if not self.language_map:
self.language_map = self.get_language_map(ss, use_domain, dmtrack_pageid, timeout, proxies)
from_language, to_language = self.check_language(from_language, to_language, self.language_map, output_zh=self.output_zh)
form_data = {
"srcLanguage": from_language,
"tgtLanguage": to_language,
"srcText": query_text,
"viewType": "",
"source": "",
"bizType": use_domain,
}
params = {"dmtrack_pageid":dmtrack_pageid}
r = ss.post(self.api_url, headers=self.api_headers, params=params, data=form_data, timeout=timeout, proxies=proxies)
r.raise_for_status()
data = r.json()
time.sleep(sleep_seconds)
self.query_count += 1
return data if is_detail_result else data['listTargetText'][0]
class Bing(Tse):
def __init__(self):
super().__init__()
self.host_url = None
self.cn_host_url = 'https://cn.bing.com/Translator'
self.en_host_url = 'https://www.bing.com/Translator'
self.request_server_region_info = REQUEST_SERVER_REGION_INFO
self.api_url = None
self.host_headers = None
self.api_headers = None
self.host_info = None
self.tk = None
self.first_time = int(time.time())
self.language_map = None
self.query_count = 0
self.output_auto = 'auto-detect'
self.output_zh = 'zh-Hans'
def get_host_info(self, host_html):
et = lxml.etree.HTML(host_html)
lang_list = et.xpath('//*[@id="tta_srcsl"]/option/@value') or et.xpath('//*[@id="t_srcAllLang"]/option/@value')
lang_list = list(set(lang_list))
language_map = {}.fromkeys(lang_list, lang_list)
iid = et.xpath('//*[@id="rich_tta"]/@data-iid')[0] + '.' + str(self.query_count + 1)
ig = re.compile('IG:"(.*?)"').findall(host_html)[0]
return {'iid': iid, 'ig': ig, 'language_map': language_map}
def get_tk(self, host_html):
result_str = re.compile('var params_RichTranslateHelper = (.*?);').findall(host_html)[0]
result = execjs.get().eval(result_str)
return {'key': result[0], 'token': result[1]}
# @Tse.time_stat
def bing_api(self, query_text:str, from_language:str='auto', to_language:str='en', **kwargs) -> Union[str,list]:
"""
https://bing.com/Translator, https://cn.bing.com/Translator.
:param query_text: str, must.
:param from_language: str, default 'auto'.
:param to_language: str, default 'en'.
:param **kwargs:
:param if_use_cn_host: boolean, default None.
:param if_ignore_limit_of_length: boolean, default False.
:param is_detail_result: boolean, default False.
:param timeout: float, default None.
:param proxies: dict, default None.
:param sleep_seconds: float, default `random.random()`.
:return: str or list
"""
use_cn_condition = kwargs.get('if_use_cn_host', None) or self.request_server_region_info.get('countryCode') == 'CN'
self.host_url = self.cn_host_url if use_cn_condition else self.en_host_url
self.api_url = self.host_url.replace('Translator', 'ttranslatev3')
self.host_headers = self.get_headers(self.host_url, if_api=False)
self.api_headers = self.get_headers(self.host_url, if_api=True)
is_detail_result = kwargs.get('is_detail_result', False)
timeout = kwargs.get('timeout', None)
proxies = kwargs.get('proxies', None)
sleep_seconds = kwargs.get('sleep_seconds', random.random())
if_ignore_limit_of_length = kwargs.get('if_ignore_limit_of_length', False)
query_text = self.check_query_text(query_text, if_ignore_limit_of_length)
with requests.Session() as ss:
host_html = ss.get(self.host_url, headers=self.host_headers, timeout=timeout, proxies=proxies).text
self.host_info = self.get_host_info(host_html)
if not self.language_map:
self.language_map = self.host_info.get('language_map')
from_language, to_language = self.check_language(from_language, to_language, self.language_map,
output_zh=self.output_zh,output_auto=self.output_auto)
# params = {'isVertical': '1', '': '', 'IG': self.host_info['ig'], 'IID': self.host_info['iid']}
self.api_url = self.api_url + '?isVertical=1&&IG={}&IID={}'.format(self.host_info['ig'],self.host_info['iid'])
if not self.tk or time.time() - self.first_time > 3500: # 3600
self.tk = self.get_tk(host_html)
form_data = {
'text': query_text,
'fromLang': from_language,
'to': to_language,
}
form_data.update(self.tk)
r = ss.post(self.api_url, headers=self.host_headers, data=form_data, timeout=timeout, proxies=proxies)
r.raise_for_status()
data = r.json()
time.sleep(sleep_seconds)
self.query_count += 1
return data if is_detail_result else data[0]['translations'][0]['text']
class Sogou(Tse):
def __init__(self):
super().__init__()
self.host_url = 'https://fanyi.sogou.com'
# self.old_api_url = 'https://fanyi.sogou.com/reventondc/translateV3'
self.api_url = 'https://fanyi.sogou.com/api/transpc/text/result'
self.get_language_url = 'https://dlweb.sogoucdn.com/translate/pc/static/js/app.7016e0df.js'
# self.get_language_pattern = '//dlweb.sogoucdn.com/translate/pc/static/js/app.(.*?).js'
# self.get_language_url = None
self.host_headers = self.get_headers(self.host_url, if_api=False)
self.api_headers = self.get_headers(self.host_url, if_api=True)
self.language_map = None
self.form_data = None
self.query_count = 0
self.output_zh = 'zh-CHS'
def get_language_map(self, ss, get_language_url, timeout, proxies):
lang_html = ss.get(get_language_url, headers=self.host_headers, timeout=timeout, proxies=proxies).text
lang_list_str = re.compile('"ALL":\[(.*?)\]').findall(lang_html)[0]
lang_list = execjs.get().eval('[' + lang_list_str + ']')
lang_list = [x['lang'] for x in lang_list]
return {}.fromkeys(lang_list,lang_list)
def get_form(self, query_text, from_language, to_language):
uuid = ''
for i in range(8):
uuid += hex(int(65536 * (1 + random.random())))[2:][1:]
if i in range(1,5):
uuid += '-'
sign_text = "" + from_language + to_language + query_text + '109984457' # window.__INITIAL_STATE__.common.CONFIG.secretCode
sign = hashlib.md5(sign_text.encode()).hexdigest()
form = {
"from": from_language,
"to": to_language,
"text": query_text,
"uuid": uuid,
"s": sign,
"client": "pc", #wap
"fr": "browser_pc", #browser_wap
"needQc": "1",
}
return form
# @Tse.time_stat
def sogou_api(self, query_text:str, from_language:str='auto', to_language:str='en', **kwargs) -> Union[str,dict]:
"""
https://fanyi.sogou.com
:param query_text: str, must.
:param from_language: str, default 'auto'.
:param to_language: str, default 'en'.
:param **kwargs:
:param if_ignore_limit_of_length: boolean, default False.
:param is_detail_result: boolean, default False.
:param timeout: float, default None.
:param proxies: dict, default None.
:param sleep_seconds: float, default `random.random()`.
:return: str or dict
"""
is_detail_result = kwargs.get('is_detail_result', False)
timeout = kwargs.get('timeout', None)
proxies = kwargs.get('proxies', None)
sleep_seconds = kwargs.get('sleep_seconds', random.random())
if_ignore_limit_of_length = kwargs.get('if_ignore_limit_of_length', False)
query_text = self.check_query_text(query_text, if_ignore_limit_of_length)
with requests.Session() as ss:
_ = ss.get(self.host_url, headers=self.host_headers, timeout=timeout, proxies=proxies).text
# if not self.get_language_url:
# self.get_language_url = 'https:' + re.compile(self.get_language_pattern).search(host_html).group() # TODO
if not self.language_map:
self.language_map = self.get_language_map(ss, self.get_language_url, timeout, proxies)
from_language, to_language = self.check_language(from_language, to_language, self.language_map, output_zh=self.output_zh)
self.form_data = self.get_form(query_text, from_language, to_language)
r = ss.post(self.api_url, headers=self.api_headers, data=self.form_data, timeout=timeout, proxies=proxies)
r.raise_for_status()
data = r.json()
time.sleep(sleep_seconds)
self.query_count += 1
return data if is_detail_result else data['data']['translate']['dit']
class Caiyun(Tse):
def __init__(self):
super().__init__()
self.host_url = 'https://fanyi.caiyunapp.com'
self.api_url = 'https://api.interpreter.caiyunai.com/v1/translator'
# self.old_get_tk_url = 'https://fanyi.caiyunapp.com/static/js/app.1312348c1a3d00422dd1.js'
self.get_tk_pattern = '/static/js/app.(.*?).js'
self.get_tk_url = None
self.get_jwt_url = 'https://api.interpreter.caiyunai.com/v1/user/jwt/generate'
self.host_headers = self.get_headers(self.host_url, if_api=False, if_referer_for_host=True)
self.api_headers = self.get_headers(self.host_url, if_api=True, if_ajax_for_api=False, if_json_for_api=True)
self.language_map = None
self.browser_pool = [
'd8bab270cec5dc600525d424be1da0bb',
'2c011fd3dbab6f3f763c5e7406317fdf',
'74231a3a95c91c2fa8eba3082a8cc4d6'
]
self.browser_id = random.choice(self.browser_pool)
self.tk = None
self.jwt = None
self.decrypt_dictionary = self.crypt(if_de=True)
self.query_count = 0
self.output_zh = 'zh'
def get_language_map(self, ss, timeout, proxies):
js_html = ss.get(self.get_tk_url, headers=self.host_headers, timeout=timeout, proxies=proxies).text
# lang_str = re.compile('Ai={(.*?)},').search(js_html).group()[3:-1]
lang_str = re.compile('lang:{(.*?)},').search(js_html).group()[5:-1]
lang_list = list(execjs.eval(lang_str).keys())
return {}.fromkeys(lang_list, lang_list)
def get_tk(self, ss, timeout, proxies):
js_html = ss.get(self.get_tk_url, headers=self.host_headers, timeout=timeout, proxies=proxies).text
return re.compile('t.headers\["X-Authorization"\]="(.*?)",').findall(js_html)[0]
def get_jwt(self, browser_id, api_headers, ss, timeout, proxies):
data = {"browser_id": browser_id}
_ = ss.options(self.get_jwt_url, headers=self.host_headers, timeout=timeout, proxies=proxies)
return ss.post(self.get_jwt_url, headers=api_headers, json=data, timeout=timeout, proxies=proxies).json()['jwt']
def crypt(self, if_de=True):
normal_key = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' + '0123456789' + '=.+-_/'
cipher_key = 'NOPQRSTUVWXYZABCDEFGHIJKLMnopqrstuvwxyzabcdefghijklm' + '0123456789' + '=.+-_/'
if if_de:
return {k: v for k, v in zip(cipher_key, normal_key)}
return {v: k for k, v in zip(cipher_key, normal_key)}
def encrypt(self, plain_text):
encrypt_dictionary = self.crypt(if_de=False)
_cipher_text = base64.b64encode(plain_text.encode()).decode()
return ''.join(list(map(lambda k: encrypt_dictionary[k], _cipher_text)))
def decrypt(self, cipher_text):
_ciphertext = ''.join(list(map(lambda k: self.decrypt_dictionary[k], cipher_text)))
return base64.b64decode(_ciphertext).decode()
# @Tse.time_stat
def caiyun_api(self, query_text:str, from_language:str='auto', to_language:str='en', **kwargs) -> Union[str, dict]:
"""
https://fanyi.caiyunapp.com
:param query_text: str, must.
:param from_language: str, default 'auto'.
:param to_language: str, default 'en'.
:param **kwargs:
:param professional_field: str, default None, choose from ("medicine","law","machinery")
:param if_ignore_limit_of_length: boolean, default False.
:param is_detail_result: boolean, default False.
:param timeout: float, default None.
:param proxies: dict, default None.
:param sleep_seconds: float, default `random.random()`.
:return: str or dict
"""
use_domain = kwargs.get('professional_field', None)
if use_domain:
assert use_domain in ("medicine", "law", "machinery")
is_detail_result = kwargs.get('is_detail_result', False)
timeout = kwargs.get('timeout', None)
proxies = kwargs.get('proxies', None)
sleep_seconds = kwargs.get('sleep_seconds', random.random())
if_ignore_limit_of_length = kwargs.get('if_ignore_limit_of_length', False)
query_text = self.check_query_text(query_text, if_ignore_limit_of_length)
with requests.Session() as ss:
host_html = ss.get(self.host_url, headers=self.host_headers, timeout=timeout, proxies=proxies).text
if not self.get_tk_url:
self.get_tk_url = self.host_url + re.compile(self.get_tk_pattern).search(host_html).group()
if not self.language_map:
self.language_map = self.get_language_map(ss, timeout, proxies)
from_language, to_language = self.check_language(from_language, to_language, self.language_map, output_zh=self.output_zh)
self.tk = self.get_tk(ss, timeout, proxies)
self.api_headers.update({
"app-name": "xy",
"device-id": "",
"os-type": "web",
"os-version": "",
"version": "1.8.0",
"X-Authorization": self.tk,
})
self.jwt = self.get_jwt(self.browser_id, self.api_headers, ss, timeout, proxies)
self.api_headers.update({"T-Authorization": self.jwt})
form_data = {
"browser_id": self.browser_id,
"cached": "true",
"dict": "true",
"media": "text",
"os_type": "web",
"replaced": "true",
"request_id": "web_fanyi",
"source": query_text,
"trans_type": f"{from_language}2{to_language}",
}
if from_language == 'auto':
form_data.update({'detect': 'true'})
if use_domain:
form_data.update({"dict_name": use_domain, "use_common_dict": "true"})
_ = ss.options(self.api_url, headers=self.host_headers, timeout=timeout, proxies=proxies)
r = ss.post(self.api_url, headers=self.api_headers, json=form_data, timeout=timeout, proxies=proxies)
r.raise_for_status()
data = r.json()
time.sleep(sleep_seconds)
self.query_count += 1
self.api_headers.pop('T-Authorization')
data['target'] = self.decrypt(data['target'])
return data if is_detail_result else data['target']
class Deepl(Tse):
def __init__(self):
super().__init__()
self.host_url = 'https://www.deepl.com/translator'
self.api_url = 'https://www2.deepl.com/jsonrpc'
self.host_headers = self.get_headers(self.host_url, if_api=False)
self.api_headers = self.get_headers(self.host_url, if_api=True, if_ajax_for_api=False, if_json_for_api=True)
self.api_headers.update({'TE': 'trailers'})
self.request_id = random.randrange(100,10000) * 10000 + 5
self.language_map = None
self.query_count = 0
self.output_zh = 'zh'
def get_language_map(self, host_html):
pattern = '//*[@dl-test="language-selector"]//option[@value]/@value'
lang_list = lxml.etree.HTML(host_html).xpath(pattern)
lang_list = list(set([x.split('/')[1] for x in lang_list if 'auto' not in x]))
return {}.fromkeys(lang_list, lang_list)
def split_sentences_param(self, query_text, from_language):
params = {'method': 'LMT_split_into_sentences'}
data = {
'id': self.request_id + 0,
'jsonrpc': '2.0',
'params': {
'texts': [query_text],
'lang': {
'lang_user_selected': from_language,
'preference':{
'weight': {},
'default': 'default',
},
},
},
}
data.update(params)
return params, data
def context_sentences_param(self, sentences, from_language, to_language):
sentences = [''] + sentences + ['']
params = {'method': 'LMT_handle_jobs'}
data = {
'id': self.request_id + 1,
'jsonrpc':' 2.0',
'params': {
'priority': 1, #-1 if 'quality': 'fast'
'commonJobParams': {
# 'regionalVariant': 'en-US',
'browserType': 1,
'formality': None,
},
'timestamp': int(time.time()*1000),
'jobs': [
{
'kind': 'default',
# 'quality': 'fast', # -1
'raw_en_sentence': sentences[i],
'raw_en_context_before': [sentences[i-1]] if sentences[i-1] else [],
'raw_en_context_after': [sentences[i+1]] if sentences[i+1] else [],
'preferred_num_beams': 1 if len(sentences) >= 4 else 4, # 1 if two sentences else 4, len>=2+2
} for i in range(1,len(sentences)-1)
],
'lang': {
'preference': {
'weight': {},
'default': 'default',
},
'source_lang_computed': from_language,
'target_lang': to_language,
},
},
}
data.update(params)
return params, data
# @Tse.time_stat
def deepl_api(self, query_text:str, from_language:str='auto', to_language:str='en', **kwargs) -> Union[str,dict]:
"""
https://www.deepl.com
:param query_text: str, must.
:param from_language: str, default 'auto'.
:param to_language: str, default 'en'.
:param **kwargs:
:param if_ignore_limit_of_length: boolean, default False.
:param is_detail_result: boolean, default False.
:param timeout: float, default None.
:param proxies: dict, default None.
:param sleep_seconds: float, default `random.random()`.
:return: str or dict
"""
is_detail_result = kwargs.get('is_detail_result', False)
timeout = kwargs.get('timeout', None)
proxies = kwargs.get('proxies', None)
sleep_seconds = kwargs.get('sleep_seconds', random.random())
if_ignore_limit_of_length = kwargs.get('if_ignore_limit_of_length', False)
query_text = self.check_query_text(query_text, if_ignore_limit_of_length)
delete_temp_language_map_label = 0
with requests.Session() as ss:
host_html = ss.get(self.host_url, headers=self.host_headers, timeout=timeout, proxies=proxies).text
if not self.language_map:
self.language_map = self.get_language_map(host_html)
if not self.language_map:
delete_temp_language_map_label += 1
self.language_map = self.make_temp_language_map(from_language, to_language)
from_language, to_language = self.check_language(from_language, to_language, language_map=self.language_map,
output_zh=self.output_zh, output_auto='auto')
from_language = from_language.upper() if from_language != 'auto' else from_language
to_language = to_language.upper() if to_language != 'auto' else to_language
ss_params, ss_data = self.split_sentences_param(query_text, from_language)
_ = ss.options(self.api_url, params=ss_params, headers=self.api_headers, timeout=timeout, proxies=proxies)
r_ss = ss.post(self.api_url, params=ss_params, json=ss_data, headers=self.api_headers, timeout=timeout, proxies=proxies)
r_ss.raise_for_status()
ss_data = r_ss.json()
ss_sentences = ss_data['result']['splitted_texts'][0]
cs_params, cs_data = self.context_sentences_param(ss_sentences, from_language, to_language)
_ = ss.options(self.api_url, params=cs_params, headers=self.api_headers, timeout=timeout, proxies=proxies)
r_cs = ss.post(self.api_url, params=cs_params, json=cs_data, headers=self.api_headers, timeout=timeout, proxies=proxies)
r_cs.raise_for_status()
data = r_cs.json()
if delete_temp_language_map_label != 0:
self.language_map = None
time.sleep(sleep_seconds)
self.query_count += 1
return data if is_detail_result else ' '.join(item['beams'][0]['postprocessed_sentence'] for item in data['result']['translations'])
class Yandex(Tse):
def __init__(self):
super().__init__()
self.host_url = 'https://translate.yandex.com'
self.api_url = 'https://translate.yandex.net/api/v1/tr.json/translate'
self.detect_language_url = 'https://translate.yandex.net/api/v1/tr.json/detect'
self.host_headers = self.get_headers(self.host_url, if_api=False, if_ajax_for_api=False)
self.api_headers = self.get_headers(self.host_url, if_api=True, if_ajax_for_api=True)
self.language_map = None
self.sid = None
self.query_count = 0
self.output_zh = 'zh'
def get_language_map(self, host_html):
lang_str = re.compile(pattern='TRANSLATOR_LANGS: {(.*?)},', flags=re.S).findall(host_html)
if not lang_str:
return {}
lang_dict = eval('{' + lang_str[0] + '}')
return {}.fromkeys(lang_dict.keys(), lang_dict.keys())
def detect_language(self, ss, query_text, sid, timeout, proxies):
params = {'sid': sid, 'srv': 'tr-text', 'text': query_text, 'hint': 'zh,en', 'options': 1,}
r = ss.get(self.detect_language_url, params=params, headers=self.host_headers, timeout=timeout, proxies=proxies)
r.raise_for_status()
return r.json().get('lang')
# @Tse.time_stat
def yandex_api(self, query_text:str, from_language:str='auto', to_language:str='en', **kwargs) -> Union[str,dict]:
"""
https://translate.yandex.com
:param query_text: str, must.
:param from_language: str, default 'auto'.
:param to_language: str, default 'en'.
:param **kwargs:
:param if_ignore_limit_of_length: boolean, default False.
:param is_detail_result: boolean, default False.
:param timeout: float, default None.
:param proxies: dict, default None.
:param sleep_seconds: float, default `random.random()`.
:return: str or dict
"""
is_detail_result = kwargs.get('is_detail_result', False)
timeout = kwargs.get('timeout', None)
proxies = kwargs.get('proxies', None)
sleep_seconds = kwargs.get('sleep_seconds', random.random())
if_ignore_limit_of_length = kwargs.get('if_ignore_limit_of_length', False)
query_text = self.check_query_text(query_text, if_ignore_limit_of_length)
with requests.Session() as ss:
host_html = ss.get(self.host_url, headers=self.host_headers, timeout=timeout, proxies=proxies).text
if not self.sid:
sid_find = re.compile("SID: '(.*?)',").findall(host_html)
self.sid = sid_find[0] if sid_find else '3d58bd71.5f49c293.93b157d0.74722d74657874'
if not self.language_map:
self.language_map = self.get_language_map(host_html)
from_language, to_language = self.check_language(from_language, to_language, self.language_map, output_zh=self.output_zh)
from_language = self.detect_language(ss, query_text, self.sid, timeout, proxies) if from_language == 'auto' else from_language
params = {
'id': f'{self.sid}-{self.query_count}-0',
'lang': f'{from_language}-{to_language}',
'srv': 'tr-text',
'reason': 'auto',
'format': 'text'
}
form_data = {'text': query_text, 'options': 4}
r = ss.post(self.api_url, headers=self.api_headers, params=params, data=form_data, timeout=timeout, proxies=proxies)
r.raise_for_status()
data = r.json()
time.sleep(sleep_seconds)
self.query_count += 1
return data if is_detail_result else data['text'][0]
class Argos(Tse):
def __init__(self):
super().__init__()
self.host_url = 'https://translate.argosopentech.com'
self.api_url = f'{self.host_url}/translate'
self.language_url = f'{self.host_url}/languages'
self.host_headers = self.get_headers(self.host_url, if_api=False, if_ajax_for_api=False)
self.api_headers = self.get_headers(self.host_url, if_api=True, if_ajax_for_api=False, if_json_for_api=True)
self.language_headers = self.get_headers(self.host_url, if_api=False, if_json_for_api=True)
self.host_pool = ['https://translate.argosopentech.com', 'https://libretranslate.de',
'https://translate.astian.org', 'https://translate.mentality.rip',
'https://translate.api.skitzen.com', 'https://trans.zillyhuhn.com']
self.language_map = None
self.query_count = 0
self.output_zh = 'zh'
def get_language_map(self, lang_url, ss, headers, timeout, proxies):
# et = lxml.etree.HTML(host_html)
# lang_list = sorted(list(set(et.xpath('//*/select/option/@value'))))
lang_list = ss.get(lang_url, headers=headers, timeout=timeout, proxies=proxies).json()
lang_list = sorted([lang['code'] for lang in lang_list])
return {}.fromkeys(lang_list, lang_list)
def argos_api(self, query_text:str, from_language:str='auto', to_language:str='en', **kwargs) -> Union[str,dict]:
"""
https://translate.argosopentech.com
:param query_text: str, must.
:param from_language: str, default 'auto'.
:param to_language: str, default 'en'.
:param **kwargs:
:param reset_host_url: str, default None.
:param if_ignore_limit_of_length: boolean, default False.
:param is_detail_result: boolean, default False.
:param timeout: float, default None.
:param proxies: dict, default None.
:param sleep_seconds: float, default `random.random()`.
:return: str or dict
"""
reset_host_url = kwargs.get('reset_host_url', None)
if reset_host_url and reset_host_url != self.host_url:
assert reset_host_url in self.host_pool, f'`reset_host_url` not in `host_pool`: {self.host_pool}'
self.host_url = reset_host_url
self.api_url = f'{self.host_url}/translate'
self.language_url = f'{self.host_url}/languages'
is_detail_result = kwargs.get('is_detail_result', False)
timeout = kwargs.get('timeout', None)
proxies = kwargs.get('proxies', None)
sleep_seconds = kwargs.get('sleep_seconds', random.random())
if_ignore_limit_of_length = kwargs.get('if_ignore_limit_of_length', False)
query_text = self.check_query_text(query_text, if_ignore_limit_of_length)
delete_temp_language_map_label = 0
with requests.Session() as ss:
_ = ss.get(self.host_url, headers=self.host_headers, timeout=timeout, proxies=proxies).text
if not self.language_map:
self.language_map = self.get_language_map(self.language_url, ss, self.language_headers, timeout, proxies)
if not self.language_map:
delete_temp_language_map_label += 1
self.language_map = self.make_temp_language_map(from_language, to_language)
from_language, to_language = self.check_language(from_language, to_language, self.language_map, output_zh=self.output_zh)
form_data = {'q': query_text, 'source': from_language, 'target': to_language, 'format': 'text'}
r = ss.post(self.api_url, headers=self.api_headers, json=form_data, timeout=timeout, proxies=proxies)
r.raise_for_status()
data = r.json()
if delete_temp_language_map_label != 0:
self.language_map = None
time.sleep(sleep_seconds)
self.query_count += 1
return data if is_detail_result else data['translatedText']
class Iciba(Tse):
def __init__(self):
super().__init__()
self.host_url = 'https://www.iciba.com/fy'
self.api_url = 'https://ifanyi.iciba.com/index.php'
self.host_headers = self.get_headers(self.host_url, if_api=False, if_ajax_for_api=False)
self.api_headers = self.get_headers(self.host_url, if_api=True, if_ajax_for_api=True, if_json_for_api=False)
self.language_headers = self.get_headers(self.host_url, if_api=False, if_json_for_api=True)
self.language_map = None
self.query_count = 0
self.output_zh = 'zh'
def get_language_map(self, api_url, ss, headers, timeout, proxies):
params = {'c': 'trans', 'm': 'getLanguage', 'q': 0, 'type': 'en', 'str': ''}
dd = ss.get(api_url, params=params, headers=headers, timeout=timeout, proxies=proxies).json()
lang_list = sorted(list(set([lang for d in dd for lang in dd[d]])))
return {}.fromkeys(lang_list, lang_list)
def iciba_api(self, query_text:str, from_language:str='auto', to_language:str='en', **kwargs) -> Union[str,dict]:
"""
https://www.iciba.com/fy
:param query_text: str, must.
:param from_language: str, default 'auto'.
:param to_language: str, default 'en'.
:param **kwargs:
:param if_ignore_limit_of_length: boolean, default False.
:param is_detail_result: boolean, default False.
:param timeout: float, default None.
:param proxies: dict, default None.
:param sleep_seconds: float, default `random.random()`.
:return: str or dict
"""
is_detail_result = kwargs.get('is_detail_result', False)
timeout = kwargs.get('timeout', None)
proxies = kwargs.get('proxies', None)
sleep_seconds = kwargs.get('sleep_seconds', random.random())
if_ignore_limit_of_length = kwargs.get('if_ignore_limit_of_length', False)
query_text = self.check_query_text(query_text, if_ignore_limit_of_length)
delete_temp_language_map_label = 0
with requests.Session() as ss:
_ = ss.get(self.host_url, headers=self.host_headers, timeout=timeout, proxies=proxies)
if not self.language_map:
self.language_map = self.get_language_map(self.api_url, ss, self.language_headers, timeout, proxies)
if not self.language_map:
delete_temp_language_map_label += 1
self.language_map = self.make_temp_language_map(from_language, to_language)
from_language, to_language = self.check_language(from_language, to_language, self.language_map, output_zh=self.output_zh)
sign = hashlib.md5(f"6key_cibaifanyicjbysdlove1{query_text}".encode()).hexdigest()[:16]
params = {'c': 'trans', 'm': 'fy', 'client': 6, 'auth_user': 'key_ciba', 'sign': sign}
form_data = {'from': from_language, 'to': to_language, 'q': query_text}
r = ss.post(self.api_url, headers=self.api_headers, params=params, data=form_data, timeout=timeout, proxies=proxies)
r.raise_for_status()
data = r.json()
if delete_temp_language_map_label != 0:
self.language_map = None
time.sleep(sleep_seconds)
self.query_count += 1
if data.get('isSensitive') == 1:
warnings.warn('Iciba warning: Sorry, your translation file contains sensitive words and cannot provide translation.')
content_out = data['content']
else:
content_out = data['content']['out']
return data if is_detail_result else content_out
class Iflytek(Tse):
def __init__(self):
super().__init__()
self.host_url = 'https://saas.xfyun.cn/translate?tabKey=text'
self.api_url = 'https://saas.xfyun.cn/ai-application/trans/its'
self.old_language_url = 'https://saas.xfyun.cn/_next/static/4bzLSGCWUNl67Xal-AfIl/pages/translate.js'
self.language_url_pattern = '/_next/static/(\w+([-?]\w+))/pages/translate.js' # tired
self.language_url = None
self.cookies_url = 'https://sso.xfyun.cn//SSOService/login/getcookies'
self.info_url = 'https://saas.xfyun.cn/ai-application/user/info'
self.host_headers = self.get_headers(self.host_url, if_api=False)
self.api_headers = self.get_headers(self.host_url, if_api=True)
self.language_map = None
self.query_count = 0
self.output_zh = 'cn'
def get_language_map(self, host_html, ss, headers, timeout, proxies):
try:
if not self.language_url:
url_path = re.compile(self.language_url_pattern).search(host_html).group(0)
self.language_url = f'{self.host_url[:21]}{url_path}'
r = ss.get(self.language_url, headers=headers, timeout=timeout, proxies=proxies)
r.raise_for_status()
except:
r = ss.get(self.old_language_url, headers=headers, timeout=timeout, proxies=proxies)
r.raise_for_status()
if not self.language_url:
self.language_url = self.old_language_url
js_html = r.text
lang_str = re.compile('languageList:{(.*?)}').search(js_html).group()[13:]
lang_list = sorted(list(execjs.eval(lang_str).keys()))
return {}.fromkeys(lang_list, lang_list)
def iflytek_api(self, query_text:str, from_language:str='zh', to_language:str='en', **kwargs) -> Union[str,dict]:
"""
https://saas.xfyun.cn/translate?tabKey=text
:param query_text: str, must.
:param from_language: str, default 'zh', unsupported 'auto'.
:param to_language: str, default 'en'.
:param **kwargs:
:param if_ignore_limit_of_length: boolean, default False.
:param is_detail_result: boolean, default False.
:param timeout: float, default None.
:param proxies: dict, default None.
:param sleep_seconds: float, default `random.random()`.
:return: str or dict
"""
is_detail_result = kwargs.get('is_detail_result', False)
timeout = kwargs.get('timeout', None)
proxies = kwargs.get('proxies', None)
sleep_seconds = kwargs.get('sleep_seconds', random.random())
if_ignore_limit_of_length = kwargs.get('if_ignore_limit_of_length', False)
query_text = self.check_query_text(query_text, if_ignore_limit_of_length)
delete_temp_language_map_label = 0
assert from_language != 'auto', 'unsupported [from_language=auto] with [iflytek] !'
with requests.Session() as ss:
host_html = ss.get(self.host_url, headers=self.host_headers, timeout=timeout, proxies=proxies).text
_ = ss.get(self.cookies_url, headers=self.host_headers, timeout=timeout, proxies=proxies)
_ = ss.get(self.info_url, headers=self.host_headers, timeout=timeout, proxies=proxies)
if not self.language_map:
self.language_map = self.get_language_map(host_html, ss, self.host_headers, timeout, proxies)
if not self.language_map:
delete_temp_language_map_label += 1
self.language_map = self.make_temp_language_map(from_language, to_language)
from_language, to_language = self.check_language(from_language, to_language, self.language_map, output_zh=self.output_zh)
cookie_dict = ss.cookies.get_dict()
self.api_headers.update({'Cookie': f'_wafuid={cookie_dict["_wafuid"]}; di_c_mti={cookie_dict["SESSION"]}'})
cipher_query_text = base64.b64encode(query_text.encode()).decode()
form_data = {'from': from_language, 'to': to_language, 'text': cipher_query_text}
r = ss.post(self.api_url, headers=self.api_headers, data=form_data, timeout=timeout, proxies=proxies)
r.raise_for_status()
data = r.json()
if delete_temp_language_map_label != 0:
self.language_map = None
time.sleep(sleep_seconds)
self.query_count += 1
return data if is_detail_result else data['data']['result']['trans_result']['dst']
REQUEST_SERVER_REGION_INFO = TranslatorSeverRegion().request_server_region_info
_alibaba = Alibaba()
alibaba = _alibaba.alibaba_api
_argos = Argos()
argos = _argos.argos_api
_baidu = Baidu()
baidu = _baidu.baidu_api
_bing = Bing()
bing = _bing.bing_api
_caiyun = Caiyun()
caiyun = _caiyun.caiyun_api
_deepl = Deepl()
deepl = _deepl.deepl_api
# _google = GoogleV1()
_google = GoogleV2()
google = _google.google_api
_iciba = Iciba()
iciba = _iciba.iciba_api
_iflytek = Iflytek()
iflytek = _iflytek.iflytek_api
_sogou = Sogou()
sogou = _sogou.sogou_api
_tencent = Tencent()
tencent = _tencent.tencent_api
_yandex = Yandex()
yandex = _yandex.yandex_api
_youdao = Youdao()
youdao = _youdao.youdao_api
@Tse.time_stat
def translate_html(html_text:str, to_language:str='en', translator:Callable='auto', n_jobs:int=-1, **kwargs) -> str:
"""
Translate the displayed content of html without changing the html structure.
:param html_text: str, html format.
:param to_language: str, default: 'en'.
:param translator: translator, default 'auto', means ts.bing
:param n_jobs: int, default -1, means os.cpu_cnt().
:param **kwargs:
:param if_ignore_limit_of_length: boolean, default False.
:param timeout: float, default None.
:param proxies: dict, default None.
:return: str, html format.
"""
if kwargs:
for param in ('query_text', 'to_language', 'is_detail_result'):
assert param not in kwargs, f'{param} should not be in `**kwargs`.'
kwargs.update({'sleep_seconds': 0})
n_jobs = os.cpu_count() if n_jobs <= 0 else n_jobs
translator = bing if translator == 'auto' else translator
pattern = re.compile(r"(?:^|(?<=>))([\s\S]*?)(?:(?=<)|$)") #TODO: <code></code> <div class="codetext notranslate">
sentence_list = set(pattern.findall(html_text))
_map_translate_func = lambda sentence: (sentence,translator(query_text=sentence, to_language=to_language, **kwargs))
result_list = pathos.multiprocessing.ProcessPool(n_jobs).map(_map_translate_func, sentence_list)
result_dict = {text: ts_text for text,ts_text in result_list}
_get_result_func = lambda k: result_dict.get(k.group(1), '')
return pattern.sub(repl=_get_result_func, string=html_text)
| 48.740578
| 148
| 0.620336
|
498cce0a4be4259615164d7ce5a0ff83df2fec10
| 1,584
|
py
|
Python
|
tests/testapp/settings.py
|
jghyllebert/django-translated-fields
|
5a812331b11342faafc99a9e707c16d33edb3fa2
|
[
"BSD-3-Clause"
] | null | null | null |
tests/testapp/settings.py
|
jghyllebert/django-translated-fields
|
5a812331b11342faafc99a9e707c16d33edb3fa2
|
[
"BSD-3-Clause"
] | null | null | null |
tests/testapp/settings.py
|
jghyllebert/django-translated-fields
|
5a812331b11342faafc99a9e707c16d33edb3fa2
|
[
"BSD-3-Clause"
] | null | null | null |
import os
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}}
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.admin",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.staticfiles",
"django.contrib.messages",
"testapp",
]
MEDIA_ROOT = "/media/"
STATIC_URL = "/static/"
BASEDIR = os.path.dirname(__file__)
MEDIA_ROOT = os.path.join(BASEDIR, "media/")
STATIC_ROOT = os.path.join(BASEDIR, "static/")
SECRET_KEY = "supersikret"
LOGIN_REDIRECT_URL = "/?login=1"
ROOT_URLCONF = "testapp.urls"
LANGUAGES = (("en", "English"), ("de", "German"))
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
| 29.886792
| 85
| 0.666035
|
50b3729a1e4415a3c05a47aea62a2bf1c3809c85
| 2,886
|
py
|
Python
|
vta/python/vta/top/bitpack.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 4,640
|
2017-08-17T19:22:15.000Z
|
2019-11-04T15:29:46.000Z
|
vta/python/vta/top/bitpack.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 3,022
|
2020-11-24T14:02:31.000Z
|
2022-03-31T23:55:31.000Z
|
vta/python/vta/top/bitpack.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 1,352
|
2017-08-17T19:30:38.000Z
|
2019-11-04T16:09:29.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=ungrouped-imports
"""Bit packing operators"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm.topi import utils
from tvm.relay.op.op import register_compute, register_injective_schedule
from tvm.relay.op.op import register_pattern, OpPattern
def bitpack(data, bits, pack_type="int8", name="bitpack"):
"""Packs lowest dimension into format needed by VTA
Parameters
----------
pack_axis : int
index of the axis to pack in data
bit_axis : int
index of axis to place bit axis in resulting packed data
Returns
-------
packed : Tensor
The packed tensor.
"""
shape_vec = list(data.shape)
if pack_type == "int8":
data_width = 8
elif pack_type == "int16":
data_width = 16
elif pack_type == "int32":
data_width = 32
else:
raise RuntimeError("Unknown pack type %s" % pack_type)
assert data_width % bits == 0
lanes = data_width // bits
# Data must be in multiples of the data_width
assert utils.get_const_int(shape_vec[-1]) % lanes == 0, "Not a multiple of word size"
shape_vec[-1] = shape_vec[-1] // lanes
oshape = tuple(shape_vec)
def _bitpack(*indices):
ret = None
mask = tvm.tir.const((1 << bits) - 1, pack_type)
for k in range(lanes):
idx = list(indices)
idx[-1] = idx[-1] * lanes + k
elem = data(*idx).astype(pack_type)
if k == 0:
ret = elem & mask
else:
val = (elem & mask) << tvm.tir.const(k * bits, pack_type)
ret = ret | val
return ret
return te.compute(oshape, _bitpack, name=name, tag="bitpack")
@register_compute("bitpack", level=15)
def compute_bitpack(attrs, inputs):
lanes = attrs.lanes
dtype = inputs[0].dtype
assert dtype == "int8"
width = 8
assert width % lanes == 0
bits = 8 // lanes
return bitpack(inputs[0], bits, dtype)
register_injective_schedule("bitpack")
register_pattern("bitpack", OpPattern.INJECTIVE)
| 31.369565
| 89
| 0.658697
|
fe8cbcf7fa12b6363415fce6cc06772e7cbf3b0e
| 23,679
|
py
|
Python
|
ontology/FlatOntologyManager.py
|
Dagu9/Reinforcement-learning-SGD
|
eb4a2546d6c99917b33e8cc4c210709e7d4cc15e
|
[
"Apache-2.0"
] | 2
|
2020-01-20T14:43:27.000Z
|
2021-04-29T12:21:05.000Z
|
ontology/FlatOntologyManager.py
|
vmishra04/Pydial
|
a689fa1177cd34f32dd4d30a5a6140fb721855bf
|
[
"Apache-2.0"
] | null | null | null |
ontology/FlatOntologyManager.py
|
vmishra04/Pydial
|
a689fa1177cd34f32dd4d30a5a6140fb721855bf
|
[
"Apache-2.0"
] | null | null | null |
###############################################################################
# PyDial: Multi-domain Statistical Spoken Dialogue System Software
###############################################################################
#
# Copyright 2015 - 2019
# Cambridge University Engineering Department Dialogue Systems Group
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
'''
FlatOntologyManager.py - Domain class and Multidomain API
==========================================================
Copyright CUED Dialogue Systems Group 2015 - 2017
Controls Access to the ontology files.
.. seealso:: CUED Imports/Dependencies:
import :mod:`utils.Settings` |.|
import :mod:`utils.ContextLogger` |.|
import :mod:`ontology.OntologyUtils`
************************
'''
__author__ = "cued_dialogue_systems_group"
import copy, math, json
import numpy as np
import os
import DataBaseSQLite
from ontology import OntologyUtils
from utils import Settings, ContextLogger
logger = ContextLogger.getLogger('')
#------------------------------------------------------------------------------------------------------------
# ONTOLOGY FOR A SINGLE DOMAIN
#------------------------------------------------------------------------------------------------------------
class FlatDomainOntology(object):
'''Utilities for ontology queries
'''
def __init__(self, domainString, rootIn=None):
'''Should also be a singleton class - otherwise you can actually circumvent the point of FlatOntologyManager() being a
singleton, which is that if ontology is to be dynamic at all - then everything should refer to the one single source.
:param domainString: tag such as 'SFHotels' or 'CamHotels'
:type domainString: str
:param rootIn: path of repository - default None -
:type rootIn: str
'''
# For conditional goal generation:
self.PROB_MASS_OVER_CONDITIONALS = 0.9 # must be less than 1.0
if rootIn is not None:
Settings.load_root(rootIn) # for use with semi file parser - where no config is given to set repository root by
self.domainString = domainString
self._set_ontology() # sets self.ontology
self._set_db() # sets self.db
self._set_domains_informable_and_requestable_slots()
def _set_domains_informable_and_requestable_slots(self):
'''
'''
self.sorted_system_requestable_slots = self.ontology["system_requestable"]
self.sorted_system_requestable_slots.sort()
self.informable_slots = self.ontology["informable"].keys()
return
def _set_ontology(self):
"""Just loads json file -- No class for ontology representation at present.
"""
ontology_fname = OntologyUtils.get_ontology_path(self.domainString)
logger.info('Loading ontology: '+ontology_fname)
try:
with open(ontology_fname) as ontofile:
self.ontology = json.load(ontofile)
except IOError:
#print IOError
logger.error("No such file or directory: "+ontology_fname+". Probably <Settings.root> is not set/set wrong by config.")
return
def _set_db(self):
"""Sets self.db to instance of choosen Data base accessing class.
.. note:: It is currently hardcoded to use the sqlite method. But this can be config based - data base classes share interface
so only need to change class here, nothing else in code will need adjusting.
"""
db_fname = OntologyUtils.get_database_path(self.domainString)
logger.info('Loading database: '+db_fname+'db')
try:
#self.db = DataBase.DataBase(db_fname+'txt')
dbprefix = None
if Settings.config.has_option("exec_config", "dbprefix"):
dbprefix = Settings.config.get("exec_config", "dbprefix")
if dbprefix.lower() == 'none':
dbprefix = None
if dbprefix:
db_fname = os.path.join(dbprefix, db_fname.split('/')[-1])
self.db = DataBaseSQLite.DataBase_SQLite(dbfile=db_fname+'db', dstring=self.domainString)
except IOError:
print IOError
logger.error("No such file or directory: "+db_fname+". Probably <Settings.root> is not set/set wrong by config.")
return
# methods:
def getRandomValueForSlot(self, slot, nodontcare=False, notthese=[], conditional_values=[]):
'''
:param slot: None
:type slot: str
:param nodontcare: None
:type nodontcare: bool
:param notthese: None
:type notthese: list
'''
if slot == 'type':
#TODO - still think need to think about how "type" slot is used - rather pointless just now.
return self.ontology['type']
if slot not in self.ontology['informable']:
return None
candidate = copy.deepcopy(self.ontology['informable'][slot])
if len(candidate) == 0:
logger.warning("candidates for slot "+slot+" should not be empty")
if not nodontcare:
candidate += ['dontcare']
candidate = list(set(candidate) - set(notthese))
# TODO - think should end up doing something like if candidate is empty - return 'dontcare'
if len(candidate) == 0:
return 'dontcare'
# Conditionally sample a goal based on already generated goals in other domains
conditional_sample_prob = self.get_sample_prob(candidate,conditional_values)
return Settings.random.choice(candidate, p=conditional_sample_prob)
def get_sample_prob(self, candidate, conditional_values):
"""Sets a prob distribution over the values in *candidate* (which could be slots, or values with a slot)
- assigns larger probabilities to things within the *conditional_values* list
:param candidate: of strings
:type candidate: list
:param conditional_values: of strings
:type conditional_values: list
:returns: numpy vector with prob distribution
"""
conditional_sample_prob = None
if len(conditional_values):
prob_mass_per_cond = self.PROB_MASS_OVER_CONDITIONALS/float(len(conditional_values))
conditional_sample_prob = np.zeros(len(candidate))
for cond in conditional_values:
conditional_sample_prob[candidate.index(cond)] += prob_mass_per_cond
# and normalise (ie fix zero elements)
prob_mass_per_non_cond = (1.0-self.PROB_MASS_OVER_CONDITIONALS)/\
float(len(conditional_sample_prob)-len(conditional_sample_prob[np.nonzero(conditional_sample_prob)]))
conditional_sample_prob[conditional_sample_prob==0] = prob_mass_per_non_cond
if not np.isclose(1.0, math.fsum(conditional_sample_prob)):
logger.warning("Sampling prob distrib not normalised.sums to: "+str(math.fsum(conditional_sample_prob)))
return None
return conditional_sample_prob
def getValidSlotsForTask(self):
'''
:param None:
:returns: (list) with goal slot strings
'''
goalslots = self.ontology['system_requestable']
if len(goalslots) < 1:
logger.error('Number of goal constraints == 0')
return goalslots
def getValidRequestSlotsForTask(self):
'''
:param None:
:returns: (list) with user requestable goal slot strings
.. todo::
should be extended to cover arbitrary domains and ontologies
'''
A = self.ontology['requestable']
B = self.ontology['system_requestable']
request_slots = list(set(A)-set(B))
return request_slots
def getSlotsToExpress(self, slot, value):
'''
:param slot:
:param value:
:returns: List of slot names that should be conveyed for
the given abstract slot-value pair.
'''
#
#
# NOTE THAT THIS FUNCTION IS NOT IMPLEMENTED ... see below
#
#
logger.debug('DomainUtils/FlatDomainOntology: not completely implemented')
return [slot]
# result = []
# if value == '':
# result.append(slot)
#
# rules = ruletable.findClassByTerm(slot)
#
# if not rules:
# return result
#
# keytype = getKeyTypeForSlot(slot, rules[0].subclass)
# if keytype == 'structKey':
# argrules = ruletable.findClassBInst(slot)
# if argrules and argrules[0].args and value != 'dontcare':
# result = getSlotsForSubclass(value)
#
# if not result:
# result.append(slot)
#
# return result
def is_valid_request(self, request_type, slot):
# TODO
#logger.warning('Currently not implemented: always return True.')
return True
def is_implied(self, slot, value):
# TODO
#logger.warning('Currently not implemented: always return False.')
return False
def constraintsCanBeDiscriminated(self, constraints):
'''
Checks if the given constraints list returns a list of values which can be
discriminated between - i.e. there is a question which we could ask which
would give differences between the values.
'''
real_constraints = {}
dontcare_slots = []
for slot, value, belief in constraints:
if value != 'dontcare':
real_constraints[slot] = value
else:
dontcare_slots.append(slot)
entries = self.db.entity_by_features(constraints=real_constraints)
discriminable = False
if len(entries) < 2:
return discriminable
else:
discriminating_slots = list(self.informable_slots)
discriminating_slots.remove('name')
if 'price' in discriminating_slots: #TODO: ic340 why is price in informable slots (SFR)?
discriminating_slots.remove('price')
for slot in discriminating_slots:
if slot not in dontcare_slots:
values = []
for ent in entries:
values.append(ent[slot])
if len(set(values)) > 1:
discriminable = True
return discriminable
def get_length_entity_by_features(self, constraints):
return self.db.get_length_entity_by_features(constraints=constraints)
class FlatOntologyManager(object):
"""
A singleton class that is used system wide (single instance created in Ontology.py-->globalOntology)
Provides access to all available domains ontologies and databases.
"""
instances = 0
# TODO - think about Cambridge Tourist System (other multi domain systems) -- can mention this under domains -->
def __init__(self):
self._ensure_singleton()
self.ontologyManagers = dict.fromkeys(OntologyUtils.available_domains)
self._config_bootup()
self.SPECIAL_DOMAINS = ['topicmanager','wikipedia','ood']
def _ensure_singleton(self):
FlatOntologyManager.instances += 1
if FlatOntologyManager.instances != 1:
msg = "Should not be trying to instantiate FlatOntologyManager(). This class is to be used as a singleton."
msg += " Only 1 global instance across system, accessed via ontology.Ontology module."
logger.error(msg)
return
def _config_bootup(self):
'''Boot up all domains given under [GENERAL] in config as domains = A,B,C,D
Settings.config must have first been set.
'''
if not Settings.config.has_option("GENERAL","domains"):
logger.error("You must specify the domains (a domain) under the GENERAL section of the config")
domains = Settings.config.get("GENERAL",'domains')
if domains in ['camtourist','sftourist','electronics','all']:
self.possible_domains = OntologyUtils.get_domains_group(domains)
else:
self.possible_domains = domains.split(',')
# self.possible_domains is used by simulated user --> won't act outside these domains
for dstring in self.possible_domains:
self._checkDomainString(dstring)
self._bootup(dstring)
def _bootup(self, dstring):
self.ontologyManagers[dstring] = self._load_domains_ontology(dstring)
def _checkDomainString(self, dstring):
if dstring not in self.ontologyManagers:
logger.error("Sorry, "+dstring+" is not an available domain string. See OntologyUtils.available_domains")
def ensure_domain_ontology_loaded(self, domainString):
'''
'''
if domainString is None or domainString in self.SPECIAL_DOMAINS:
return
else:
try:
if self.ontologyManagers[domainString] is None:
self._bootup(domainString)
else:
return # already loaded
except AttributeError as e:
print e
logger.error("Domain string {} is not valid".format(domainString))
except KeyError as e:
print e
logger.error("Domain string {} is not valid".format(domainString))
except Exception as e:
print e # some other error
def updateBinarySlots(self, dstring):
if 'binary' in self.ontologyManagers[dstring].ontology:
OntologyUtils.BINARY_SLOTS[dstring] = self.ontologyManagers[dstring].ontology['binary']
else:
OntologyUtils.BINARY_SLOTS[dstring] = []
#------------------------------------------------------------------------------------------------------------
# Wrappers for domain access to ontologies/database methods and info. NB: No checks on valid domain strings
#------------------------------------------------------------------------------------------------------------
def entity_by_features(self, dstring, constraints):
if dstring in self.ontologyManagers and self.ontologyManagers[dstring] is not None:
return self.ontologyManagers[dstring].db.entity_by_features(constraints=constraints)
return {}
def get_length_entity_by_features(self, dstring, constraints):
if dstring in self.ontologyManagers and self.ontologyManagers[dstring] is not None:
return self.ontologyManagers[dstring].get_length_entity_by_features(constraints=constraints)
return 0
def getSlotsToExpress(self, dstring, slot, value):
return self.ontologyManagers[dstring].getSlotsToExpress(slot=slot, value=value)
def getValidSlotsForTask(self, dstring):
return self.ontologyManagers[dstring].getValidSlotsForTask()
def getRandomValueForSlot(self, dstring, slot, nodontcare=False, notthese=[], conditional_values=[]):
'''
Randomly select a slot value for the given slot slot.
'''
return self.ontologyManagers[dstring].getRandomValueForSlot(slot=slot,
nodontcare=nodontcare,
notthese=notthese,
conditional_values=conditional_values)
def getValidRequestSlotsForTask(self, dstring):
return self.ontologyManagers[dstring].getValidRequestSlotsForTask()
def is_value_in_slot(self, dstring, value, slot):
'''
'''
try:
if value in self.ontologyManagers[dstring].ontology['informable'][slot]:
return True
else:
return False
except:
return False
def get_sample_prob(self, dstring, candidate, conditional_values):
return self.ontologyManagers[dstring].get_sample_prob(candidate=candidate, conditional_values=conditional_values)
def is_only_user_requestable(self, dstring, slot):
try:
logic1 = slot in self.ontologyManagers[dstring].ontology['requestable']
logic2 = slot not in self.ontologyManagers[dstring].ontology['system_requestable']
if logic1 and logic2:
return True
else:
return False
except:
return False
def is_system_requestable(self, dstring, slot):
try:
if slot in self.ontologyManagers[dstring].ontology['system_requestable']:
return True
else:
return False
except:
return False
def is_valid_request(self, dstring, request_type, slot):
return self.ontologyManagers[dstring].is_valid_request(request_type=request_type, slot=slot)
def is_implied(self, dstring, slot, value):
return self.ontologyManagers[dstring].is_implied(slot, value)
# GET LENGTHS:
#------------------------------------------------------------------------------------
def get_len_informable_slot(self, dstring, slot):
return len(self.ontologyManagers[dstring].ontology['informable'][slot])
def get_length_system_requestable_slots(self, dstring):
return len(self.ontologyManagers[dstring].ontology['system_requestable'])
# for things subsequently manipulated - use copy.copy() with gets
#------------------------------------------------------------------------------------
def get_requestable_slots(self, dstring):
requestable = []
if dstring in self.ontologyManagers and self.ontologyManagers[dstring] is not None:
requestable = copy.copy(self.ontologyManagers[dstring].ontology['requestable'])
return requestable
def get_system_requestable_slots(self, dstring):
requestable = []
if dstring in self.ontologyManagers and self.ontologyManagers[dstring] is not None:
requestable = copy.copy(self.ontologyManagers[dstring].ontology['system_requestable'])
return requestable
def get_type(self, dstring):
return self.ontologyManagers[dstring].ontology["type"] #can return a string - no problem
def get_informable_slot_values(self, dstring, slot):
try:
return copy.copy(self.ontologyManagers[dstring].ontology["informable"][slot])
except:
logger.error("Likely due to slot being invalid")
def get_informable_slots_and_values(self, dstring):
'''NOTE: not using copy.copy() since when used, it is only looped over, not modified
'''
slotsValues = {}
if dstring in self.ontologyManagers and self.ontologyManagers[dstring] is not None:
slotsValues = self.ontologyManagers[dstring].ontology["informable"]
return slotsValues
def get_informable_slots(self, dstring):
'''NB no copy
'''
informable = []
if dstring in self.ontologyManagers and self.ontologyManagers[dstring] is not None:
informable = self.ontologyManagers[dstring].informable_slots
return informable
def get_random_slot_name(self, dstring):
return Settings.random.choice(self.ontologyManagers[dstring].ontology['requestable'])
def get_ontology(self, dstring):
'''Note: not using copy.copy() -- object assumed not to change
'''
if dstring in self.ontologyManagers and self.ontologyManagers[dstring] is not None:
return self.ontologyManagers[dstring].ontology
return None
def get_method(self, dstring):
'''NB no copy
'''
method = []
if dstring in self.ontologyManagers and self.ontologyManagers[dstring] is not None:
method = self.ontologyManagers[dstring].ontology['method']
return method
def get_discourseAct(self, dstring):
'''NB no copy
'''
acts = []
if dstring in self.ontologyManagers and self.ontologyManagers[dstring] is not None:
acts = self.ontologyManagers[dstring].ontology['discourseAct']
if 'none' not in acts:
acts.append('none')
if 'bye' not in acts:
acts.append('bye')
return acts
def get_sorted_system_requestable_slots(self, dstring, mode='entity'):
'''NB no copy
'''
if mode not in ['entity']: # SAFETY CHECK
logger.warning('Mode %s is not valid ' % mode)
mode = 'entity'
if mode == 'entity':
return self.ontologyManagers[dstring].sorted_system_requestable_slots
else:
logger.error('Mode %s is not valid' % mode)
def constraintsCanBeDiscriminated(self, domainString, constraints):
'''
Checks if the given constraints list returns a list of values which can be
discriminated between - i.e. there is a question which we could ask which
would give differences between the values.
'''
if self.ontologyManagers[domainString] is not None:
return self.ontologyManagers[domainString].constraintsCanBeDiscriminated(constraints=constraints)
return False
def _load_domains_ontology(self, domainString):
'''
Loads and instantiates the respective ontology object as configured in config file. The new object is added to the internal
dictionary.
Default is FlatDomainOntology.
.. Note:
To dynamically load a class, the __init__() must take one argument: domainString.
:param domainString: the domain the ontology will be loaded for.
:type domainString: str
:returns: None
'''
ontologyClass = None
if Settings.config.has_option('ontology_' + domainString, 'handler'):
ontologyClass = Settings.config.get('ontology_' + domainString, 'handler')
if ontologyClass is None:
return FlatDomainOntology(domainString)
else:
try:
# try to view the config string as a complete module path to the class to be instantiated
components = ontologyClass.split('.')
packageString = '.'.join(components[:-1])
classString = components[-1]
mod = __import__(packageString, fromlist=[classString])
klass = getattr(mod, classString)
return klass(domainString)
except ImportError:
logger.error('Unknown domain ontology class "{}" for domain "{}"'.format(ontologyClass, domainString))
#END OF FILE
| 41.615114
| 134
| 0.604586
|
b1b4fa2c292ca7c2a903b8f350591e9911ac3c60
| 3,038
|
py
|
Python
|
packages/hagrid/hagrid/launch.py
|
vishalbelsare/PySyft
|
fb04404fcfbef82fad1fb47407b35a24e9afb599
|
[
"Apache-1.1"
] | 8,428
|
2017-08-10T09:17:49.000Z
|
2022-03-31T08:20:14.000Z
|
packages/hagrid/hagrid/launch.py
|
vishalbelsare/PySyft
|
fb04404fcfbef82fad1fb47407b35a24e9afb599
|
[
"Apache-1.1"
] | 4,779
|
2017-08-09T23:19:00.000Z
|
2022-03-29T11:49:36.000Z
|
packages/hagrid/hagrid/launch.py
|
vishalbelsare/PySyft
|
fb04404fcfbef82fad1fb47407b35a24e9afb599
|
[
"Apache-1.1"
] | 2,307
|
2017-08-10T08:52:12.000Z
|
2022-03-30T05:36:07.000Z
|
# stdlib
from typing import Dict as TypeDict
from typing import List as TypeList
from typing import Optional
# relative
from .cache import DEFAULT_BRANCH
from .grammar import GrammarTerm
from .grammar import GrammarVerb
from .grammar import HostGrammarTerm
from .grammar import SourceGrammarTerm
from .names import random_name
def get_launch_verb() -> GrammarVerb:
full_sentence = [
{
"name": "node_name",
"type": "adjective",
"klass": GrammarTerm,
"default": random_name,
"example": "'my_domain'",
},
{
"name": "node_type",
"type": "object",
"klass": GrammarTerm,
"default": "domain",
"options": ["domain", "network"],
},
{
"name": "preposition",
"type": "preposition",
"klass": GrammarTerm,
"default": "to",
"options": ["to"],
},
{
"name": "host",
"type": "propernoun",
"klass": HostGrammarTerm,
"default": "docker",
"example": "docker:8081+",
},
{
"name": "preposition",
"type": "preposition",
"klass": GrammarTerm,
"default": "from",
"options": ["from"],
},
{
"name": "source",
"type": "propernoun",
"klass": SourceGrammarTerm,
"default": f"github.com/OpenMined/PySyft/tree/{DEFAULT_BRANCH}",
},
]
abbreviations: TypeDict[int, TypeList[Optional[str]]] = {
6: [
"adjective", # name
"object", # node_type
"preposition", # to
"propernoun", # host
"preposition", # from
"propernoun", # source
],
5: [
None, # name
"object", # node_type
"preposition", # to
"propernoun", # host
"preposition", # from
"propernoun", # source
],
4: [
"adjective", # name
"object", # node_type
"preposition", # to
"propernoun", # host
None, # ignore
None, # ignore
],
3: [
None, # ignore
"object", # node_type
"preposition", # to
"propernoun", # host
None, # ignore
None, # ignore
],
2: [
"adjective", # name
"object", # node_type
None, # ignore
None, # ignore
None, # ignore
None, # ignore
],
1: [
None, # ignore
"object", # node_type
None, # ignore
None, # ignore
None, # ignore
None, # ignore
],
}
return GrammarVerb(
command="launch",
full_sentence=full_sentence,
abbreviations=abbreviations,
)
| 26.189655
| 76
| 0.442067
|
974f9dc9c84b365919eb91c004ef46719babe476
| 7,035
|
py
|
Python
|
src/ui/Complaint.py
|
madhav-datt/kgp-hms
|
c948d1d3580cb23baccf24cc59fe116e0ce8e700
|
[
"MIT"
] | 3
|
2016-04-10T17:48:10.000Z
|
2016-04-24T04:44:39.000Z
|
src/ui/Complaint.py
|
madhav-datt/kgp-hms
|
c948d1d3580cb23baccf24cc59fe116e0ce8e700
|
[
"MIT"
] | null | null | null |
src/ui/Complaint.py
|
madhav-datt/kgp-hms
|
c948d1d3580cb23baccf24cc59fe116e0ce8e700
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Complaint.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_complaintWindow(object):
def setupUi(self, complaintWindow):
complaintWindow.setObjectName(_fromUtf8("complaintWindow"))
complaintWindow.setWindowModality(QtCore.Qt.ApplicationModal)
complaintWindow.resize(384, 441)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("../../Desktop/logo2.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
complaintWindow.setWindowIcon(icon)
self.formLayout = QtGui.QFormLayout(complaintWindow)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setVerticalSpacing(12)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.formLayout.setLayout(0, QtGui.QFormLayout.LabelRole, self.verticalLayout)
self.label = QtGui.QLabel(complaintWindow)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Cambria"))
font.setPointSize(14)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.formLayout.setWidget(1, QtGui.QFormLayout.SpanningRole, self.label)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.formLayout.setLayout(2, QtGui.QFormLayout.LabelRole, self.horizontalLayout)
self.label_2 = QtGui.QLabel(complaintWindow)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_2)
self.lineEdit = QtGui.QLineEdit(complaintWindow)
self.lineEdit.setReadOnly(True)
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.lineEdit)
self.label_6 = QtGui.QLabel(complaintWindow)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.formLayout.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_6)
self.lineEdit_2 = QtGui.QLineEdit(complaintWindow)
self.lineEdit_2.setReadOnly(True)
self.lineEdit_2.setObjectName(_fromUtf8("lineEdit_2"))
self.formLayout.setWidget(4, QtGui.QFormLayout.FieldRole, self.lineEdit_2)
self.label_7 = QtGui.QLabel(complaintWindow)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.formLayout.setWidget(5, QtGui.QFormLayout.LabelRole, self.label_7)
self.lineEdit_3 = QtGui.QLineEdit(complaintWindow)
self.lineEdit_3.setReadOnly(True)
self.lineEdit_3.setObjectName(_fromUtf8("lineEdit_3"))
self.formLayout.setWidget(5, QtGui.QFormLayout.FieldRole, self.lineEdit_3)
self.label_3 = QtGui.QLabel(complaintWindow)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.formLayout.setWidget(6, QtGui.QFormLayout.LabelRole, self.label_3)
self.lineEdit_4 = QtGui.QLineEdit(complaintWindow)
self.lineEdit_4.setObjectName(_fromUtf8("lineEdit_4"))
self.formLayout.setWidget(6, QtGui.QFormLayout.FieldRole, self.lineEdit_4)
self.label_4 = QtGui.QLabel(complaintWindow)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.formLayout.setWidget(7, QtGui.QFormLayout.LabelRole, self.label_4)
self.comboBox = QtGui.QComboBox(complaintWindow)
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.formLayout.setWidget(7, QtGui.QFormLayout.FieldRole, self.comboBox)
self.label_5 = QtGui.QLabel(complaintWindow)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.formLayout.setWidget(8, QtGui.QFormLayout.LabelRole, self.label_5)
self.label_8 = QtGui.QLabel(complaintWindow)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.formLayout.setWidget(9, QtGui.QFormLayout.LabelRole, self.label_8)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(10, QtGui.QFormLayout.LabelRole, spacerItem)
self.buttonBox = QtGui.QDialogButtonBox(complaintWindow)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Save)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.formLayout.setWidget(11, QtGui.QFormLayout.LabelRole, self.buttonBox)
self.plainTextEdit = QtGui.QPlainTextEdit(complaintWindow)
self.plainTextEdit.setObjectName(_fromUtf8("plainTextEdit"))
self.formLayout.setWidget(8, QtGui.QFormLayout.FieldRole, self.plainTextEdit)
self.plainTextEdit_2 = QtGui.QPlainTextEdit(complaintWindow)
self.plainTextEdit_2.setReadOnly(True)
self.plainTextEdit_2.setObjectName(_fromUtf8("plainTextEdit_2"))
self.formLayout.setWidget(9, QtGui.QFormLayout.FieldRole, self.plainTextEdit_2)
self.retranslateUi(complaintWindow)
QtCore.QMetaObject.connectSlotsByName(complaintWindow)
def retranslateUi(self, complaintWindow):
complaintWindow.setWindowTitle(_translate("complaintWindow", "HMS - Student Complaint", None))
self.label.setText(_translate("complaintWindow", "Student Complaint", None))
self.label_2.setText(_translate("complaintWindow", "Complaint ID ", None))
self.label_6.setText(_translate("complaintWindow", "Student ID", None))
self.label_7.setText(_translate("complaintWindow", "Concerned Hall", None))
self.label_3.setText(_translate("complaintWindow", "Complaint Subject", None))
self.label_4.setText(_translate("complaintWindow", "Complaint Type", None))
self.comboBox.setItemText(0, _translate("complaintWindow", "Repair", None))
self.comboBox.setItemText(1, _translate("complaintWindow", "Worker Complaint", None))
self.comboBox.setItemText(2, _translate("complaintWindow", "Other", None))
self.label_5.setText(_translate("complaintWindow", "Complaint Description", None))
self.label_8.setText(_translate("complaintWindow", "Action Taken Report (ATR)", None))
| 55.393701
| 112
| 0.725942
|
f982eb3e510966f66a5ff9cdd6ee970cbd9d8170
| 978
|
py
|
Python
|
jabba/test/test_include_graph/test_include_graph.py
|
puppetlabs/jabba
|
71c1d008ab497020fba6ffa12a600721eb3f5ef7
|
[
"MIT"
] | 1
|
2017-05-10T19:07:57.000Z
|
2017-05-10T19:07:57.000Z
|
jabba/test/test_include_graph/test_include_graph.py
|
OSLL/jenkins_job_builder_visualization
|
71c1d008ab497020fba6ffa12a600721eb3f5ef7
|
[
"MIT"
] | 39
|
2017-02-23T10:29:13.000Z
|
2017-04-18T16:19:38.000Z
|
jabba/test/test_include_graph/test_include_graph.py
|
isabella232/jabba
|
71c1d008ab497020fba6ffa12a600721eb3f5ef7
|
[
"MIT"
] | 1
|
2021-03-17T17:12:44.000Z
|
2021-03-17T17:12:44.000Z
|
import sys
import os
sys.path.append("../")
sys.path.append("../../")
from jabba.yaml_unfolder import YamlUnfolder
from test.graph_test import GraphTest
class TestIncludeGraph(GraphTest):
def setUpYamlUnfolder(self, main_file):
export_name = self.yaml_root + main_file + self.ext
self.yaml_unfolder.include_graph.unfold_file(self.test_data + main_file)
self.yaml_unfolder.include_graph.render(export_name)
def setUp(self):
self.test_data = 'test/test_include_graph/test_data/'
self.test_refs = 'test/test_include_graph/test_refs/'
self.yaml_root = self.test_data
self.yaml_unfolder = YamlUnfolder(root=self.yaml_root)
self.yaml_unfolder.include_graph.active = True
self.ext = '_include'
def testExample1(self):
self.run_test_for_file('test.yml')
def testExample2(self):
self.run_test_for_file('example2-test.yml')
if __name__ == "__main__":
unittest.main()
| 25.736842
| 80
| 0.703476
|
5895bd602f559669b88fbc2f2287b007205819f1
| 2,076
|
py
|
Python
|
setup.py
|
thops/cue-sdk-python
|
ee14846958163b1c18268e44d0bf0a852514e564
|
[
"MIT"
] | null | null | null |
setup.py
|
thops/cue-sdk-python
|
ee14846958163b1c18268e44d0bf0a852514e564
|
[
"MIT"
] | null | null | null |
setup.py
|
thops/cue-sdk-python
|
ee14846958163b1c18268e44d0bf0a852514e564
|
[
"MIT"
] | null | null | null |
import os
import platform
import re
import sys
from setuptools import setup
def read_version(filename='cuesdk/version.py'):
"""Parse a __version__ number from a source file"""
with open(filename) as source:
text = source.read()
match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", text)
if not match:
msg = "Unable to find version number in {}".format(filename)
raise RuntimeError(msg)
version = match.group(1)
return version
arch, exetype = platform.architecture()
system = platform.system().lower()
if not system in ['windows', 'darwin']:
msg = "{} system is not supported".format(system)
raise RuntimeError(msg)
def package_files(directory):
return [
os.path.join('..', path, filename)
for (path, directories, filenames) in os.walk(directory)
for filename in filenames
]
setup(
name="cuesdk",
version=read_version(),
packages=['cuesdk'],
package_data={
'cuesdk': package_files('cuesdk/bin'),
},
zip_safe=False,
author="Corsair Memory, Inc.",
license='MIT',
url="https://github.com/CorsairOfficial/cue-sdk-python",
description="Ctypes-based CUE SDK binding for Python",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
install_requires=[],
platforms=['win'],
python_requires='>=3.5',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Games/Entertainment',
'Topic :: System :: Hardware'
]
)
| 30.086957
| 72
| 0.615607
|
ab33cce512c0598dce7be18c8e4f24e750a1bdca
| 7,097
|
py
|
Python
|
django-olwidget/olwidget/forms.py
|
RobertoMaurizzi/olwidget
|
105738ad140f06a2a50407e9d309255a5e420aaf
|
[
"BSD-3-Clause"
] | null | null | null |
django-olwidget/olwidget/forms.py
|
RobertoMaurizzi/olwidget
|
105738ad140f06a2a50407e9d309255a5e420aaf
|
[
"BSD-3-Clause"
] | null | null | null |
django-olwidget/olwidget/forms.py
|
RobertoMaurizzi/olwidget
|
105738ad140f06a2a50407e9d309255a5e420aaf
|
[
"BSD-3-Clause"
] | null | null | null |
from django import forms
from django.contrib.gis.forms.fields import GeometryField
from olwidget.widgets import Map, BaseVectorLayer, EditableLayer
from olwidget.fields import MapField
from olwidget import utils
__all__ = ('MapModelForm', )
class BaseMapModelForm(forms.models.BaseModelForm):
"""
ModelForm type that uses olwidget maps for geometry fields. Multiple
fields can be edited in a single map -- to do this, specify a property
"maps" of the inner Meta class which lists fields and map options:
class MyMapModelForm(MapModelForm):
class Meta:
model = MyModel
maps = (
(('geom1', 'geom2'), {'layers': ['google.streets]}),
(('geom3',), None),
...
)
"""
def __init__(self, *args, **kwargs):
super(BaseMapModelForm, self).__init__(*args, **kwargs)
fix_initial_data(self.initial, self.initial_data_keymap)
def clean(self):
super(BaseMapModelForm, self).clean()
fix_cleaned_data(self.cleaned_data, self.initial_data_keymap)
return self.cleaned_data
class MapModelFormOptions(forms.models.ModelFormOptions):
def __init__(self, options=None):
super(MapModelFormOptions, self).__init__(options)
self.maps = getattr(options, 'maps', None)
if not self.maps:
self.maps = getattr(options, 'options', None)
self.default_field_class = getattr(options, 'default_field_class', None)
self.template = getattr(options, 'template', None)
class MapModelFormMetaclass(type):
"""
Metaclass for map-containing ModelForm widgets. The implementation is
mostly copied from django's ModelFormMetaclass, but we change the
hard-coded parent class name and add our map field processing parts.
"""
def __new__(mcs, name, bases, attrs):
formfield_callback = attrs.pop('formfield_callback',
lambda f, **kwargs: f.formfield(**kwargs))
try:
parents = [b for b in bases if issubclass(b, MapModelForm)]
except NameError:
# We are defining MapModelForm itself.
parents = None
declared_fields = forms.forms.get_declared_fields(bases, attrs, False)
new_class = super(MapModelFormMetaclass, mcs).__new__(mcs, name, bases,
attrs)
if not parents:
return new_class
if 'media' not in attrs:
new_class.media = forms.widgets.media_property(new_class)
opts = new_class._meta = MapModelFormOptions(
getattr(new_class, 'Meta', None))
if opts.model:
# If a model is defined, extract form fields from it.
fields = forms.models.fields_for_model(opts.model, opts.fields,
opts.exclude, opts.widgets,
formfield_callback)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(declared_fields)
else:
fields = declared_fields
# Transform base fields by extracting types mentioned in 'maps'
initial_data_keymap = apply_maps_to_modelform_fields(
fields, opts.maps, default_field_class=opts.default_field_class,
default_template=opts.template)
new_class.initial_data_keymap = initial_data_keymap
new_class.declared_fields = declared_fields
new_class.base_fields = fields
return new_class
class MapModelForm(BaseMapModelForm):
__metaclass__ = MapModelFormMetaclass
def fix_initial_data(initial, initial_data_keymap):
"""
Take a dict like this as `initial`:
{ 'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}
and a dict like this as `initial_data_keymap`:
{ 'newkey1': ['key1', 'key2'], 'newkey2': ['key3']}
and remap the initial dict to have this form:
{ 'newkey1': ['val1', 'val2'], 'newkey2': ['val3']}
Used for rearranging initial data in fields to match declared maps.
"""
if initial:
for dest, sources in initial_data_keymap.iteritems():
data = [initial.pop(s, None) for s in sources]
initial[dest] = data
return initial
def fix_cleaned_data(cleaned_data, initial_data_keymap):
for group, keys in initial_data_keymap.iteritems():
if cleaned_data.has_key(group):
vals = cleaned_data.pop(group)
if isinstance(vals, (list, tuple)):
for key, val in zip(keys, vals):
cleaned_data[key] = val
else:
cleaned_data[keys[0]] = vals
return cleaned_data
def apply_maps_to_modelform_fields(fields, maps, default_options=None,
default_template=None, default_field_class=None):
"""
Rearranges fields to match those defined in ``maps``. ``maps`` is a list
of [field_list, options_dict] pairs. For each pair, a new map field is
created that contans all the fields in ``field_list``.
"""
if default_field_class is None:
default_field_class = MapField
map_field_names = (name for name,field in fields.iteritems() if isinstance(field, (MapField, GeometryField)))
if not maps:
maps = [((name,),) for name in map_field_names]
elif isinstance(maps, dict):
maps = [[tuple(map_field_names), maps]]
default_options = utils.get_options(default_options)
initial_data_keymap = {}
for map_definition in maps:
field_list = map_definition[0]
if len(map_definition) > 1:
options = map_definition[1]
else:
options = {}
if len(map_definition) > 2:
template = map_definition[2]
else:
template = default_template
map_name = "_".join(field_list)
layer_fields = []
names = []
min_pos = 65535 # arbitrarily high number for field ordering
initial = []
for field_name in field_list:
min_pos = min(min_pos, fields.keys().index(field_name))
field = fields.pop(field_name)
initial.append(field_name)
if not isinstance(field.widget, (Map, BaseVectorLayer)):
field.widget = EditableLayer(
options=utils.options_for_field(field))
layer_fields.append(field)
names.append(field_name)
if isinstance(field, MapField):
map_field = field
else:
map_opts = {}
map_opts.update(default_options)
map_opts.update(options or {})
map_field = default_field_class(layer_fields, map_opts, layer_names=names,
label=", ".join(forms.forms.pretty_name(f) for f in field_list),
template=template)
#fields.insert(min_pos, map_name, map_field)
utils.OrderedDict_insertat(fields, min_pos, map_name, map_field)
initial_data_keymap[map_name] = initial
return initial_data_keymap
| 39.870787
| 113
| 0.629844
|
41ffdbade65f34def77b1e3a78d762dc407d0eaa
| 514
|
py
|
Python
|
countries/migrations/0003_auto_20210219_1422.py
|
baikov/dj-cc-dep
|
5ae66b1a638c040e6497d56c2a9299fac335d608
|
[
"MIT"
] | null | null | null |
countries/migrations/0003_auto_20210219_1422.py
|
baikov/dj-cc-dep
|
5ae66b1a638c040e6497d56c2a9299fac335d608
|
[
"MIT"
] | 3
|
2021-11-02T21:08:31.000Z
|
2022-02-04T21:07:13.000Z
|
countries/migrations/0003_auto_20210219_1422.py
|
baikov/dj-cc-dep
|
5ae66b1a638c040e6497d56c2a9299fac335d608
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.12 on 2021-02-19 11:22
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('countries', '0002_auto_20210217_2124'),
]
operations = [
migrations.AlterField(
model_name='flag',
name='colors',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=200), blank=True, null=True, size=None),
),
]
| 25.7
| 139
| 0.657588
|
7d40e74792ed336875b7ff44ba1a92e1d573684a
| 9,232
|
py
|
Python
|
lark/visitors.py
|
timofurrer/lark
|
f3714a572f047c5857a2b3ab8d8a161e142f20bf
|
[
"MIT"
] | 3
|
2021-08-12T15:29:35.000Z
|
2022-02-24T21:59:10.000Z
|
lark/visitors.py
|
timofurrer/lark
|
f3714a572f047c5857a2b3ab8d8a161e142f20bf
|
[
"MIT"
] | null | null | null |
lark/visitors.py
|
timofurrer/lark
|
f3714a572f047c5857a2b3ab8d8a161e142f20bf
|
[
"MIT"
] | 1
|
2021-03-14T18:30:11.000Z
|
2021-03-14T18:30:11.000Z
|
from functools import wraps
from .utils import smart_decorator
from .tree import Tree
from .exceptions import VisitError, GrammarError
from .lexer import Token
###{standalone
from inspect import getmembers, getmro
class Discard(Exception):
pass
# Transformers
class Transformer:
"""Visits the tree recursively, starting with the leaves and finally the root (bottom-up)
Calls its methods (provided by user via inheritance) according to tree.data
The returned value replaces the old one in the structure.
Can be used to implement map or reduce.
"""
__visit_tokens__ = False # For backwards compatibility
def __init__(self, visit_tokens=False):
self.__visit_tokens__ = visit_tokens
def _call_userfunc(self, tree, new_children=None):
# Assumes tree is already transformed
children = new_children if new_children is not None else tree.children
try:
f = getattr(self, tree.data)
except AttributeError:
return self.__default__(tree.data, children, tree.meta)
else:
try:
if getattr(f, 'meta', False):
return f(children, tree.meta)
elif getattr(f, 'inline', False):
return f(*children)
elif getattr(f, 'whole_tree', False):
if new_children is not None:
tree.children = new_children
return f(tree)
else:
return f(children)
except (GrammarError, Discard):
raise
except Exception as e:
raise VisitError(tree, e)
def _call_userfunc_token(self, token):
try:
f = getattr(self, token.type)
except AttributeError:
return self.__default_token__(token)
else:
try:
return f(token)
except (GrammarError, Discard):
raise
except Exception as e:
raise VisitError(token, e)
def _transform_children(self, children):
for c in children:
try:
if isinstance(c, Tree):
yield self._transform_tree(c)
elif self.__visit_tokens__ and isinstance(c, Token):
yield self._call_userfunc_token(c)
else:
yield c
except Discard:
pass
def _transform_tree(self, tree):
children = list(self._transform_children(tree.children))
return self._call_userfunc(tree, children)
def transform(self, tree):
return self._transform_tree(tree)
def __mul__(self, other):
return TransformerChain(self, other)
def __default__(self, data, children, meta):
"Default operation on tree (for override)"
return Tree(data, children, meta)
def __default_token__(self, token):
"Default operation on token (for override)"
return token
@classmethod
def _apply_decorator(cls, decorator, **kwargs):
mro = getmro(cls)
assert mro[0] is cls
libmembers = {name for _cls in mro[1:] for name, _ in getmembers(_cls)}
for name, value in getmembers(cls):
# Make sure the function isn't inherited (unless it's overwritten)
if name.startswith('_') or (name in libmembers and name not in cls.__dict__):
continue
if not callable(cls.__dict__[name]):
continue
# Skip if v_args already applied (at the function level)
if hasattr(cls.__dict__[name], 'vargs_applied'):
continue
static = isinstance(cls.__dict__[name], (staticmethod, classmethod))
setattr(cls, name, decorator(value, static=static, **kwargs))
return cls
class InlineTransformer(Transformer): # XXX Deprecated
def _call_userfunc(self, tree, new_children=None):
# Assumes tree is already transformed
children = new_children if new_children is not None else tree.children
try:
f = getattr(self, tree.data)
except AttributeError:
return self.__default__(tree.data, children, tree.meta)
else:
return f(*children)
class TransformerChain(object):
def __init__(self, *transformers):
self.transformers = transformers
def transform(self, tree):
for t in self.transformers:
tree = t.transform(tree)
return tree
def __mul__(self, other):
return TransformerChain(*self.transformers + (other,))
class Transformer_InPlace(Transformer):
"Non-recursive. Changes the tree in-place instead of returning new instances"
def _transform_tree(self, tree): # Cancel recursion
return self._call_userfunc(tree)
def transform(self, tree):
for subtree in tree.iter_subtrees():
subtree.children = list(self._transform_children(subtree.children))
return self._transform_tree(tree)
class Transformer_InPlaceRecursive(Transformer):
"Recursive. Changes the tree in-place instead of returning new instances"
def _transform_tree(self, tree):
tree.children = list(self._transform_children(tree.children))
return self._call_userfunc(tree)
# Visitors
class VisitorBase:
def _call_userfunc(self, tree):
return getattr(self, tree.data, self.__default__)(tree)
def __default__(self, tree):
"Default operation on tree (for override)"
return tree
class Visitor(VisitorBase):
"""Bottom-up visitor, non-recursive
Visits the tree, starting with the leaves and finally the root (bottom-up)
Calls its methods (provided by user via inheritance) according to tree.data
"""
def visit(self, tree):
for subtree in tree.iter_subtrees():
self._call_userfunc(subtree)
return tree
class Visitor_Recursive(VisitorBase):
"""Bottom-up visitor, recursive
Visits the tree, starting with the leaves and finally the root (bottom-up)
Calls its methods (provided by user via inheritance) according to tree.data
"""
def visit(self, tree):
for child in tree.children:
if isinstance(child, Tree):
self.visit(child)
f = getattr(self, tree.data, self.__default__)
f(tree)
return tree
def visit_children_decor(func):
"See Interpreter"
@wraps(func)
def inner(cls, tree):
values = cls.visit_children(tree)
return func(cls, values)
return inner
class Interpreter:
"""Top-down visitor, recursive
Visits the tree, starting with the root and finally the leaves (top-down)
Calls its methods (provided by user via inheritance) according to tree.data
Unlike Transformer and Visitor, the Interpreter doesn't automatically visit its sub-branches.
The user has to explicitly call visit_children, or use the @visit_children_decor
"""
def visit(self, tree):
return getattr(self, tree.data)(tree)
def visit_children(self, tree):
return [self.visit(child) if isinstance(child, Tree) else child
for child in tree.children]
def __getattr__(self, name):
return self.__default__
def __default__(self, tree):
return self.visit_children(tree)
# Decorators
def _apply_decorator(obj, decorator, **kwargs):
try:
_apply = obj._apply_decorator
except AttributeError:
return decorator(obj, **kwargs)
else:
return _apply(decorator, **kwargs)
def _inline_args__func(func):
@wraps(func)
def create_decorator(_f, with_self):
if with_self:
def f(self, children):
return _f(self, *children)
else:
def f(self, children):
return _f(*children)
return f
return smart_decorator(func, create_decorator)
def inline_args(obj): # XXX Deprecated
return _apply_decorator(obj, _inline_args__func)
def _visitor_args_func_dec(func, inline=False, meta=False, whole_tree=False, static=False):
assert [whole_tree, meta, inline].count(True) <= 1
def create_decorator(_f, with_self):
if with_self:
def f(self, *args, **kwargs):
return _f(self, *args, **kwargs)
else:
def f(self, *args, **kwargs):
return _f(*args, **kwargs)
return f
if static:
f = wraps(func)(create_decorator(func, False))
else:
f = smart_decorator(func, create_decorator)
f.vargs_applied = True
f.inline = inline
f.meta = meta
f.whole_tree = whole_tree
return f
def v_args(inline=False, meta=False, tree=False):
"A convenience decorator factory, for modifying the behavior of user-supplied visitor methods"
if [tree, meta, inline].count(True) > 1:
raise ValueError("Visitor functions can either accept tree, or meta, or be inlined. These cannot be combined.")
def _visitor_args_dec(obj):
return _apply_decorator(obj, _visitor_args_func_dec, inline=inline, meta=meta, whole_tree=tree)
return _visitor_args_dec
###}
| 30.468647
| 119
| 0.633124
|
535094af573812332a50dd39792aa0b452af27d5
| 2,106
|
py
|
Python
|
Week 2/src/blosum.py
|
klxu03/SEAP2020
|
0bcebd4c972be5e6fafcada5eb1f19e8bee671ae
|
[
"MIT"
] | null | null | null |
Week 2/src/blosum.py
|
klxu03/SEAP2020
|
0bcebd4c972be5e6fafcada5eb1f19e8bee671ae
|
[
"MIT"
] | 1
|
2021-08-23T20:48:21.000Z
|
2021-08-23T20:48:21.000Z
|
Week 2/src/blosum.py
|
klxu03/SEAP2020
|
0bcebd4c972be5e6fafcada5eb1f19e8bee671ae
|
[
"MIT"
] | 1
|
2020-09-15T01:26:09.000Z
|
2020-09-15T01:26:09.000Z
|
import numpy as np
from pathlib import Path
import pandas as pd
class BLOSUM:
def __init__(self, filepath):
# Importing in the file path (have to add a . because it is in /~/src instead of /~
self.filepath = '.' + filepath
""" I/O the 136 HIV panel fasta sequence """
base_path = Path(__file__).parent
file_path = (base_path / self.filepath).resolve()
letters = "A R N D C Q E G H I L K M F P S T W Y V B Z X *".split()
# letter_compression[index] == letter
# letter_compression[0] == 'A'
letter_compression = {}
for num, letter in enumerate(letters, start=0):
letter_compression[num] = letter
# Matrix is the Panda Dataframe Showing Blosum
numpy_matrix = np.zeros((24, 24))
# blosum_dict[x_letter, y_letter] == Value
# For example, blosum_dict['A', 'R'] will be -1
blosum_dict = {}
with open(file_path, 'r') as f:
for x, line in enumerate(f):
input = line.split()
# print('input', input)
y = 0
if x != 0 and x < 25:
x_letter = letter_compression[x - 1]
for character in input:
if y != 0:
y_letter = letter_compression[y - 1]
el = int(character)
numpy_matrix[x - 1][y - 1] = el
blosum_dict[x_letter, y_letter] = el
y += 1
panda_matrix = pd.DataFrame(data=numpy_matrix[0:, 0:],
index = [i for i in letters],
columns = [i for i in letters])
print(panda_matrix)
# Setting everything to self so that other functions can access these variables
self.blosum_dict = blosum_dict
self.panda_matrix = panda_matrix
def get_value(self, letter1, letter2):
return self.blosum_dict[letter1, letter2]
def get_panda_matrix(self):
return self.panda_matrix
| 36.310345
| 98
| 0.525641
|
2facdc7117e82bfdc67162f67e208c859353369b
| 391
|
py
|
Python
|
desafio/wsgi.py
|
felipemaia02/desafio4linux
|
d4ebcfbd767a506b7babba3206b4a370bef35449
|
[
"MIT"
] | null | null | null |
desafio/wsgi.py
|
felipemaia02/desafio4linux
|
d4ebcfbd767a506b7babba3206b4a370bef35449
|
[
"MIT"
] | null | null | null |
desafio/wsgi.py
|
felipemaia02/desafio4linux
|
d4ebcfbd767a506b7babba3206b4a370bef35449
|
[
"MIT"
] | null | null | null |
"""
WSGI config for desafio project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'desafio.settings')
application = get_wsgi_application()
| 23
| 78
| 0.785166
|
a660b4592169f0d0487670525afbcec727ee5522
| 921
|
py
|
Python
|
ggde/patches.py
|
gg-de/backend
|
42208526333c4b31925b03a90600a9005fad3087
|
[
"Apache-2.0"
] | 1
|
2021-01-20T21:12:25.000Z
|
2021-01-20T21:12:25.000Z
|
ggde/patches.py
|
gg-de/backend
|
42208526333c4b31925b03a90600a9005fad3087
|
[
"Apache-2.0"
] | null | null | null |
ggde/patches.py
|
gg-de/backend
|
42208526333c4b31925b03a90600a9005fad3087
|
[
"Apache-2.0"
] | null | null | null |
import logging
from functools import wraps
from django.http.response import JsonResponse
logger = logging.getLogger(__name__)
def patch_jsonresponse_disable_ensure_ascii():
if getattr(JsonResponse, '_utf8_patched', False):
# Already patched. Add warning in logs with stack to see what location
# is trying to patch this a second time.
logger.warning("JSONResponse UTF8 patch already applied", stack_info=True)
return
logger.debug("Patching JSONResponse to disable ensure_ascii")
orig_init = JsonResponse.__init__
@wraps(orig_init)
def utf8_init(self, *args, json_dumps_params=None, **kwargs):
json_dumps_params = {"ensure_ascii": False, **(json_dumps_params or {})}
orig_init(self, *args, json_dumps_params=json_dumps_params, **kwargs)
JsonResponse.__init__ = utf8_init
JsonResponse._utf8_patched = True # to prevent accidental re-patching
| 40.043478
| 82
| 0.739414
|
3c5827db92b2b627399e889d0279b31d80fcec30
| 1,506
|
py
|
Python
|
pcat2py/class/22243176-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
pcat2py/class/22243176-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
pcat2py/class/22243176-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
################################################################################
# 22243176-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "22243176-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
dword = cli.get_reg_dword(r'HKCU:\Software\Policies\Microsoft\Office\15.0\excel\security\fileblock', 'XL4Workbooks')
# Output Lines
self.output = [r'HKCU:\Software\Policies\Microsoft\Office\15.0\excel\security\fileblock', ('XL4Workbooks=' + str(dword))]
if dword == 2:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft\Office\15.0\excel'")
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft\Office\15.0\excel\security'")
cli.powershell(r"New-Item -path 'HKCU:\Software\Policies\Microsoft\Office\15.0\excel\security\fileblock'")
cli.powershell(r"Set-ItemProperty -path 'HKCU:\Software\Policies\Microsoft\Office\15.0\excel\security\fileblock' -name 'XL4Workbooks' -value 2 -Type DWord")
| 39.631579
| 164
| 0.610226
|
504550b7434360814c7d42c60552044d49d64bdd
| 9,052
|
py
|
Python
|
tests/xls2xform_tests.py
|
ukanga/pyxform
|
f4ce2ec7f90d3e197b9b5b58fecccabe31d213f8
|
[
"BSD-2-Clause"
] | 67
|
2015-02-02T17:49:25.000Z
|
2022-02-18T06:31:52.000Z
|
tests/xls2xform_tests.py
|
ukanga/pyxform
|
f4ce2ec7f90d3e197b9b5b58fecccabe31d213f8
|
[
"BSD-2-Clause"
] | 477
|
2015-01-14T15:48:44.000Z
|
2022-03-29T16:37:22.000Z
|
tests/xls2xform_tests.py
|
ukanga/pyxform
|
f4ce2ec7f90d3e197b9b5b58fecccabe31d213f8
|
[
"BSD-2-Clause"
] | 86
|
2015-01-26T13:16:26.000Z
|
2022-01-20T21:40:54.000Z
|
# -*- coding: utf-8 -*-
"""
Test xls2xform module.
"""
# The Django application xls2xform uses the function
# pyxform.create_survey. We have a test here to make sure no one
# breaks that function.
import argparse
import logging
from unittest import TestCase
import pyxform
from pyxform.xls2xform import (
_create_parser,
_validator_args_logic,
get_xml_path,
main_cli,
)
from tests.utils import path_to_text_fixture
try:
from unittest import mock
except ImportError:
import mock
class XLS2XFormTests(TestCase):
survey_package = {
"id_string": "test_2011_08_29b",
"name_of_main_section": "gps",
"sections": {
"gps": {
"children": [{"name": "location", "type": "gps"}],
"name": "gps",
"type": "survey",
}
},
"title": "test",
}
survey = pyxform.create_survey(**survey_package)
def test_create_parser_without_args(self):
"""Should exit when no args provided."""
with self.assertRaises(SystemExit):
_create_parser().parse_args([])
def test_create_parser_optional_output_path(self):
"""
Should run fine for a single argument i.e. that is the
path to the xlsx file path, while the output path is left out
"""
try:
_create_parser().parse_args(["/some/path/tofile.xlsx"])
except SystemExit:
self.fail()
def test_create_parser_with_args(self):
"""Should parse the provided arguments."""
arg_xlsform = "xlsform.xlsx"
arg_output = "."
arg_list = [
"--json",
"--skip_validate",
"--pretty_print",
arg_xlsform,
arg_output,
]
args = _create_parser().parse_args(arg_list)
self.assertEqual(arg_xlsform, args.path_to_XLSForm)
self.assertEqual(arg_output, args.output_path)
self.assertEqual(True, args.json)
self.assertEqual(False, args.skip_validate)
self.assertEqual(True, args.pretty_print)
def test_create_parser_file_name_with_space(self):
"""Should interpret the path correctly."""
arg_xlsform = "some/path/my xlsform.xlsx"
arg_output = "."
arg_list = [arg_xlsform, arg_output]
args = _create_parser().parse_args(arg_list)
self.assertEqual(arg_xlsform, args.path_to_XLSForm)
def test_create_parser_json_default_false(self):
"""Should have json=False if not specified."""
arg_xlsform = "xlsform.xlsx"
arg_output = "."
arg_list = [arg_xlsform, arg_output]
args = _create_parser().parse_args(arg_list)
self.assertEqual(False, args.json)
def test_create_parser_skip_validate_default_true(self):
"""Should have skip_validate=True if not specified."""
arg_xlsform = "xlsform.xlsx"
arg_output = "."
arg_list = [arg_xlsform, arg_output]
args = _create_parser().parse_args(arg_list)
self.assertEqual(True, args.skip_validate)
def test_create_parser_no_enketo_default_false(self):
"""Should have enketo_validate=False if not specified."""
arg_xlsform = "xlsform.xlsx"
arg_output = "."
arg_list = [arg_xlsform, arg_output]
args = _create_parser().parse_args(arg_list)
self.assertEqual(False, args.enketo_validate)
def test_create_parser_pretty_print_default_False(self):
"""Should have pretty_print=False if not specified."""
args = _create_parser().parse_args(["xlsform.xlsx", "."])
self.assertFalse(args.pretty_print)
def test_validator_args_logic_skip_validate_alone(self):
"""Should deactivate both validators."""
raw_args = _create_parser().parse_args(["xlsform.xlsx", ".", "--skip_validate"])
args = _validator_args_logic(args=raw_args)
self.assertEqual(False, args.odk_validate)
self.assertEqual(False, args.enketo_validate)
def test_validator_args_logic_odk_default(self):
"""Should activate ODK only."""
raw_args = _create_parser().parse_args(["xlsform.xlsx", "."])
args = _validator_args_logic(args=raw_args)
self.assertEqual(True, args.odk_validate)
self.assertEqual(False, args.enketo_validate)
def test_validator_args_logic_enketo_only(self):
"""Should activate Enketo only."""
raw_args = _create_parser().parse_args(["xlsform.xlsx", ".", "--enketo_validate"])
args = _validator_args_logic(args=raw_args)
self.assertEqual(False, args.odk_validate)
self.assertEqual(True, args.enketo_validate)
def test_validator_args_logic_odk_only(self):
"""Should activate ODK only."""
raw_args = _create_parser().parse_args(["xlsform.xlsx", ".", "--odk_validate"])
args = _validator_args_logic(args=raw_args)
self.assertEqual(True, args.odk_validate)
self.assertEqual(False, args.enketo_validate)
def test_validator_args_logic_odk_and_enketo(self):
"""Should activate ODK and Enketo."""
raw_args = _create_parser().parse_args(
["xlsform.xlsx", ".", "--odk_validate", "--enketo_validate"]
)
args = _validator_args_logic(args=raw_args)
self.assertEqual(True, args.odk_validate)
self.assertEqual(True, args.enketo_validate)
def test_validator_args_logic_skip_validate_override(self):
"""Should deactivate both validators"""
raw_args = _create_parser().parse_args(
[
"xlsform.xlsx",
".",
"--skip_validate",
"--odk_validate",
"--enketo_validate",
]
)
args = _validator_args_logic(args=raw_args)
self.assertEqual(False, args.odk_validate)
self.assertEqual(False, args.enketo_validate)
@mock.patch(
"argparse.ArgumentParser.parse_args",
return_value=argparse.Namespace(
path_to_XLSForm="xlsform.xlsx",
output_path=None,
json=False,
skip_validate=False,
odk_validate=False,
enketo_validate=False,
pretty_print=False,
),
)
@mock.patch("pyxform.xls2xform.xls2xform_convert")
def test_xls2form_convert_parameters(self, converter_mock, parser_mock_args):
"""
Checks that xls2xform_convert is given the right arguments, when the
output-path is not given
"""
converter_mock.return_value = "{}"
main_cli()
converter_mock.assert_called_once_with(
xlsform_path="xlsform.xlsx",
xform_path="xlsform.xml",
validate=False,
pretty_print=False,
enketo=False,
)
@mock.patch(
"argparse.ArgumentParser.parse_args",
return_value=argparse.Namespace(
path_to_XLSForm="xlsform.xlsx",
output_path=None,
json=True,
skip_validate=False,
odk_validate=False,
enketo_validate=False,
pretty_print=False,
),
)
@mock.patch("pyxform.xls2xform.xls2xform_convert")
def test_xls2xform_convert_params_with_flags(self, converter_mock, parser_mock_args):
"""
Should call xlsform_convert with the correct input for output
path where only the xlsform input path and json flag were provided, since
the xlsform-convert can be called if json flag was set or when not
"""
converter_mock.return_value = "{}"
main_cli()
converter_mock.assert_called_once_with(
xlsform_path="xlsform.xlsx",
xform_path="xlsform.xml",
validate=False,
pretty_print=False,
enketo=False,
)
@mock.patch(
"argparse.ArgumentParser.parse_args",
return_value=argparse.Namespace(
path_to_XLSForm=path_to_text_fixture("bad_calc.xlsx"),
output_path=None,
json=False,
skip_validate=True,
odk_validate=True,
enketo_validate=True,
pretty_print=True,
),
)
def test_xls2xform_convert_throwing_odk_error(self, parser_mock_args):
"""
Parse and validate bad_calc.xlsx
"""
logger = logging.getLogger("pyxform.xls2xform")
with mock.patch.object(logger, "error") as mock_debug:
main_cli()
self.assertEqual(mock_debug.call_count, 1)
def test_get_xml_path_function(self):
"""Should return an xml path in the same directory as the xlsx file"""
xlsx_path = "/home/user/Desktop/xlsform.xlsx"
expected = "/home/user/Desktop/xlsform.xml"
assert expected == get_xml_path(xlsx_path)
# check that it also handles spaced routes
xlsx_path = "/home/user/Desktop/my xlsform.xlsx"
expected = "/home/user/Desktop/my xlsform.xml"
assert expected == get_xml_path(xlsx_path)
| 35.920635
| 90
| 0.63091
|
16aea8e6fe520c5091cf1315e9f7406fd158e8fb
| 538
|
py
|
Python
|
vnpy/api/t2sdk/py_t2sdk/setup.py
|
jerryhe26/vnpy
|
d7607da780c57a2ab182688f4e52bc3a300acfda
|
[
"MIT"
] | 2
|
2020-04-17T03:10:17.000Z
|
2020-04-17T03:15:10.000Z
|
vnpy/api/t2sdk/py_t2sdk/setup.py
|
jerryhe26/vnpy
|
d7607da780c57a2ab182688f4e52bc3a300acfda
|
[
"MIT"
] | 1
|
2020-04-21T02:42:32.000Z
|
2020-04-21T02:42:32.000Z
|
vnpy/api/t2sdk/py_t2sdk/setup.py
|
jerryhe26/vnpy
|
d7607da780c57a2ab182688f4e52bc3a300acfda
|
[
"MIT"
] | 1
|
2021-02-19T07:25:22.000Z
|
2021-02-19T07:25:22.000Z
|
#from distutils.core import setup, Extension
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
setup(
name='py_t2sdk',
version='1.0.0',
author='rdc@hundsun',
author_email='',
url='',
ext_modules = cythonize(Extension(
'py_t2sdk',
sources=['py_t2sdk.pyx','ufx_interface.cpp'],
language='c++',
include_dirs=[],
library_dirs=[],
libraries=['t2sdk'],
extra_compile_args=[],
extra_link_args=[]
)))
| 24.454545
| 49
| 0.697026
|
31478e79ea3305a4fb5de4a63763a9d240b66bcd
| 18,408
|
py
|
Python
|
domainbed/hparams_registry.py
|
alexrame/domainbedresearch
|
6255da9aedf4584115324f8cf3a45be6e9004602
|
[
"MIT"
] | 1
|
2022-03-15T16:30:14.000Z
|
2022-03-15T16:30:14.000Z
|
domainbed/hparams_registry.py
|
alexrame/domainbedresearch
|
6255da9aedf4584115324f8cf3a45be6e9004602
|
[
"MIT"
] | null | null | null |
domainbed/hparams_registry.py
|
alexrame/domainbedresearch
|
6255da9aedf4584115324f8cf3a45be6e9004602
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import numpy as np
from domainbed.lib import misc
def _define_hparam(hparams, hparam_name, default_val, random_val_fn):
hparams[hparam_name] = (hparams, hparam_name, default_val, random_val_fn)
def _hparams(algorithm, dataset, random_seed):
"""
Global registry of hyperparams. Each entry is a (default, random) tuple.
New algorithms / networks / etc. should add entries here.
"""
SMALL_IMAGES = [
'Debug28', 'RotatedMNIST', 'ColoredMNIST', 'CustomColoredMNIST', 'CustomGrayColoredMNIST'
]
MAX_EPOCH_5000 = dataset != 'CelebA_Blond'
hparams = {}
def _hparam(name, default_val, random_val_fn):
"""Define a hyperparameter. random_val_fn takes a RandomState and
returns a random hyperparameter value."""
# assert(name not in hparams)
if name in hparams:
print(
f"Warning: parameter {name} was overriden from {hparams[name]} to {default_val, random_val_fn}."
)
random_state = np.random.RandomState(misc.seed_hash(random_seed, name))
hparams[name] = (default_val, random_val_fn(random_state))
# Unconditional hparam definitions.
_hparam('model', None, lambda r: None)
_hparam('data_augmentation', True, lambda r: True)
_hparam('resnet18', False, lambda r: False)
if os.environ.get("HP") == "D":
_hparam('resnet_dropout', 0., lambda r: 0.)
else:
_hparam('resnet_dropout', 0., lambda r: r.choice([0., 0.1, 0.5]))
_hparam('class_balanced', False, lambda r: False)
_hparam('unfreeze_resnet_bn', False, lambda r: False)
# TODO: nonlinear classifiers disabled
_hparam('nonlinear_classifier', False, lambda r: bool(r.choice([False])))
# Algorithm-specific hparam definitions. Each block of code below
# corresponds to exactly one algorithm.
if algorithm in ['DANN', 'CDANN']:
_hparam('beta1_d', 0.5, lambda r: r.choice([0., 0.5]))
_hparam('mlp_width', 256, lambda r: int(2**r.uniform(6, 10)))
_hparam('mlp_depth', 3, lambda r: int(r.choice([3, 4, 5])))
_hparam('mlp_dropout', 0., lambda r: r.choice([0., 0.1, 0.5]))
_hparam('weight_decay_d', 0., lambda r: 10**r.uniform(-6, -2))
elif algorithm == "Ensembling":
_hparam("num_members", 2, lambda r: 2)
_hparam("lambda_ib_firstorder", 0, lambda r: 0)
# for domain matching
_hparam('lambda_domain_matcher', 0, lambda r: 10**r.uniform(1, 4))
_hparam("similarity_loss", "none", lambda r: "none")
# for fishr
_hparam('ema', 0.95, lambda r: r.uniform(0.9, 0.99))
_hparam('method', "weight", lambda r: r.choice([""]))
elif algorithm in ["Fishr", "COREL"]:
if os.environ.get("LAMBDA") == "v15":
print("Lambda v15")
_hparam('lambda', 1000, lambda r: 10**r.uniform(1, 5))
else:
print("Lambda v14")
_hparam('lambda', 1000, lambda r: 10**r.uniform(1, 4))
if os.environ.get("MEAN"):
_hparam('lambdamean', 1000, lambda r: 10**r.uniform(1, 4))
else:
_hparam('lambdamean', 0, lambda r: 0)
if os.environ.get("WARMUP"):
_hparam(
'penalty_anneal_iters', 500,
lambda r: int(10**r.uniform(0, 4 if MAX_EPOCH_5000 else 3.5))
)
else:
_hparam(
'penalty_anneal_iters', 1500,
lambda r: int(r.uniform(0., 5000. if MAX_EPOCH_5000 else 2000))
)
_hparam('ema', 0.95, lambda r: r.uniform(0.9, 0.99))
_hparam('method', "", lambda r: r.choice([""]))
# elif algorithm in ['IRMAdv']:
# _hparam(
# 'mmd_lambda', 1000., lambda r: 10**r.uniform(1., 4.)
# ) # between 10 and \approx 10000
# if os.environ.get("COV") in ["v15"]:
# raise ValueError(os.environ.get("COV"))
# _hparam('mmd_lambda', 1000., lambda r: 10**r.uniform(1., 5.))
# if os.environ.get("MEAN") == "1":
# _hparam('mean_lambda', 1.0, lambda r: 10**r.uniform(1., 4.))
# else:
# _hparam('mean_lambda', 0.0, lambda r: 0.)
# _hparam(
# 'penalty_anneal_iters', 1500,
# lambda r: int(r.uniform(0., 5000. if MAX_EPOCH_5000 else 2000))
# )
# # to fix
# _hparam('beta1', 0.9, lambda r: r.choice([0.9])) # 0.5,
# _hparam('ema', 0.95, lambda r: r.uniform(0.90, 0.99)) # 0.9
# _hparam('strategy', 92., lambda r: r.choice([92.]))
# _hparam('penalty_method', 2., lambda r: r.choice([2.])) # 2
# _hparam('msd_lambda', 0.0, lambda r: 0)
# _hparam('strategy_mean', "l2mean", lambda r: "l2mean")
# _hparam('strategy_cov', "l2mean", lambda r: "l2mean")
# _hparam('grad_wrt', "loss", lambda r: "loss")
# # yet to be optimized
# _hparam('pd_lambda', 0.0, lambda r: 0.)
# _hparam('pd_penalty_anneal_iters', 0, lambda r: 0.)
# _hparam('beta1_d', 0.5, lambda r: 0.5)
# _hparam('mlp_width', 256, lambda r: 256)
# # _hparam('mlp_depth', 3, lambda r: 3)
# # _hparam('mlp_dropout', 0., lambda r: 0)
# _hparam('weight_decay_d', 0., lambda r: 0)
# _hparam('disc_lambda', 0.0, lambda r: 0.)
# _hparam('lr_d', 0.00, lambda r: 0)
elif algorithm == "VRExema":
_hparam('vrex_lambda', 1e1, lambda r: 10**r.uniform(-1, 5))
_hparam(
'vrex_penalty_anneal_iters', 500,
lambda r: int(10**r.uniform(0, 4. if MAX_EPOCH_5000 else 3.5))
)
_hparam('ema', 0.95, lambda r: r.uniform(0.90, 0.99))
if algorithm in ['DANN', 'CDANN']:
_hparam('lambda', 1.0, lambda r: 10**r.uniform(-2, 2))
_hparam('d_steps_per_g_step', 1, lambda r: int(2**r.uniform(0, 3)))
_hparam('grad_penalty', 0., lambda r: 10**r.uniform(-2, 1))
elif algorithm == 'Fish':
_hparam('meta_lr', 0.5, lambda r: r.choice([0.05, 0.1, 0.5]))
elif algorithm == "RSC":
_hparam('rsc_f_drop_factor', 1 / 3, lambda r: r.uniform(0, 0.5))
_hparam('rsc_b_drop_factor', 1 / 3, lambda r: r.uniform(0, 0.5))
elif algorithm == "SagNet":
_hparam('sag_w_adv', 0.1, lambda r: 10**r.uniform(-2, 1))
elif algorithm == "IRM":
_hparam('irm_lambda', 1e2, lambda r: 10**r.uniform(-1, 5))
_hparam(
'irm_penalty_anneal_iters', 500,
lambda r: int(10**r.uniform(0, 4. if MAX_EPOCH_5000 else 3.5))
)
elif algorithm == "Mixup":
_hparam('mixup_alpha', 0.2, lambda r: 10**r.uniform(-1, -1))
elif algorithm == "GroupDRO":
_hparam('groupdro_eta', 1e-2, lambda r: 10**r.uniform(-3, -1))
elif algorithm == "MMD" or algorithm == "CORAL":
_hparam('mmd_lambda', 1., lambda r: 10**r.uniform(-1, 1))
elif algorithm == "MLDG":
_hparam('mldg_beta', 1., lambda r: 10**r.uniform(-1, 1))
elif algorithm == "MTL":
_hparam('mtl_ema', .99, lambda r: r.choice([0.5, 0.9, 0.99, 1.]))
elif algorithm == "VREx":
_hparam('vrex_lambda', 1e1, lambda r: 10**r.uniform(-1, 5))
_hparam(
'vrex_penalty_anneal_iters', 500,
lambda r: int(10**r.uniform(0, 4. if MAX_EPOCH_5000 else 3.5))
)
elif algorithm == "SD":
_hparam('sd_reg', 0.1, lambda r: 10**r.uniform(-5, -1))
elif algorithm == "ANDMask":
_hparam('tau', 1, lambda r: r.uniform(0.5, 1.))
elif algorithm == "SANDMask":
_hparam('tau', 1.0, lambda r: r.uniform(0.0, 1.))
_hparam('k', 1e+1, lambda r: int(10**r.uniform(-3, 5)))
elif algorithm == "IGA":
_hparam('penalty', 1000, lambda r: 10**r.uniform(1, 5))
# elif algorithm == "LFF":
# _hparam("weight_q", 0.7, lambda r: r.choice([0.1, 0.5, 0.7, 1.0, 5.0]))
# _hparam("weak_gce", True, lambda r: bool(r.choice([True, False])))
# _hparam("reweighting", True, lambda r: True)
# elif algorithm == "KernelDiversity":
# _hparam("kernel", "ntk", lambda r: r.choice(["ntk", "teney"]))
# _hparam("kernel_on", "classifier", lambda r: "classifier")
# _hparam("similarity", "cos", lambda r: r.choice(["cos", "dot", "center-cos", "center-dot"]))
# _hparam(
# "similarity_result", "square", lambda r: r.choice(["square", "relu", "abs", "none"])
# )
# _hparam("similarity_weight", 1.0, lambda r: 10**r.uniform(-4, 4))
# _hparam("weight_q", 0.7, lambda r: r.choice([0.01, 0.1, 0.5, 0.7, 1.0, 10.0]))
# _hparam("detach_weak", False, lambda r: bool(r.choice([True, False])))
# _hparam("weak_gce", True, lambda r: bool(r.choice([True, False])))
# _hparam("reweighting", False, lambda r: False)
# elif algorithm == "EnsembleKernelDiversity":
# _hparam("num_classifiers", 3, lambda r: 3)
# _hparam("kernel", "ntk", lambda r: "ntk")
# _hparam("kernel_on", "classifier", lambda r: "classifier")
# _hparam(
# "similarity", "cos",
# lambda r: r.choice(["cos", "dot", "dot", "center-cos", "center-dot"])
# )
# _hparam(
# "similarity_result", "square", lambda r: r.choice(["square", "relu", "abs", "none"])
# )
# _hparam("similarity_weight", 1.0, lambda r: 10**r.uniform(-4, 4))
# _hparam("freeze_featurizer", False, lambda r: False)
# _hparam("loss", "cross-entropy", lambda r: "cross-entropy")
# _hparam("ntk_loss", "cross-entropy", lambda r: "cross-entropy")
# _hparam("similarity_schedule", "none", lambda r: "none")
# _hparam("similarity_schedule_param1", 0.0, lambda r: "none")
# _hparam("similarity_schedule_start_at", 0, lambda r: "none")
# _hparam("no_diversity_first_model", False, lambda r: False)
# _hparam("center_gradients", "none", lambda r: "none") # none, all, classes
# _hparam("normalize_gradients", False, lambda r: False) # none, all, classes
# _hparam("difference_gt_kernel", False, lambda r: False) # none, all, classes
# _hparam("spectral_decoupling", 0.0, lambda r: 0.0)
# elif algorithm == "TwoModelsCMNIST":
# _hparam("detach_shape_features", False, lambda r: True)
# _hparam("supervise_logits", True, lambda r: True)
# _hparam("weight_regular_loss", 1.0, lambda r: 1.0)
# _hparam("classifier1", "original", lambda r: "original")
# _hparam("classifier2", "shape", lambda r: "original")
# _hparam("supervise_kernels", False, lambda r: False)
# _hparam("supervise_kernel1", "original", lambda r: "original")
# _hparam("supervise_kernel2", "shape", lambda r: "shape")
# _hparam("weight_kernel_loss", 1.0, lambda r: 1.0)
# _hparam("normalize_gradients", True, lambda r: True)
# _hparam("center_gradients", "none", lambda r: "none") # none, all, classes
# _hparam("kernel_loss", "cos", lambda r: "cos") # none, all, classes
if algorithm in ['Fishr', 'ERM']:
_hparam('sam', 0, lambda r: r.choice([0]))
_hparam('samadapt', 0, lambda r: r.choice([0]))
# _hparam('phosam', 0.05, lambda r: r.choice([0.005, 0.01, 0.02, 0.05, 0.1]))
_hparam('phosam', 0.001, lambda r: r.choice([0.001, 0.002, 0.005, 0.01, 0.02, 0.05]))
_hparam('mavsamcoeff', 1., lambda r: 10**r.uniform(-1, 2))
if algorithm in ['Fishr', 'ERM', "Fish", "Ensembling"]:
_hparam('mav', 0, lambda r: r.choice([0]))
if algorithm in ["SWA"]:
_hparam('mav', 0, lambda r: r.choice([1]))
if algorithm in ["Ensembling", "SWA"]:
if os.environ.get("HP") == "D":
_hparam(
'penalty_anneal_iters', 1500, lambda r: 1500)
else:
_hparam(
'penalty_anneal_iters', 1500, lambda r: int(r.uniform(0., 5000. if MAX_EPOCH_5000 else 2000))
)
_hparam("diversity_loss", "none", lambda r: "none")
# for sampling diversity
if os.environ.get("DIV") == "1":
_hparam('div_eta', 0, lambda r: 10**r.uniform(-5, -2))
elif os.environ.get("DIV") == "2":
_hparam('div_eta', 0., lambda r: 10**r.uniform(-5, 0.))
else:
# for features diversity
_hparam("conditional_d", False, lambda r: r.choice([False]))
_hparam('clamping_value', 10, lambda r: r.choice([10]))
_hparam('hidden_size', 64, lambda r: 64) # 2**int(r.uniform(5., 7.)))
_hparam('num_hidden_layers', 2., lambda r: r.choice([2]))
_hparam('ib_space', "features", lambda r: r.choice(["features"]))
_hparam('sampling_negative', "", lambda r: r.choice([""])) # "domain"
_hparam("lambda_diversity_loss", 0.0, lambda r: 10**r.uniform(-3, -1))
_hparam('weight_decay_d', 0.0005, lambda r: 0.0005)
_hparam('reparameterization_var', 0.1, lambda r: 10**r.uniform(-3, 0))
if dataset in SMALL_IMAGES:
_hparam('lr_d', 0.0005, lambda r: 10**r.uniform(-4.5, -2.5))
else:
_hparam('lr_d', 0.0005, lambda r: 10**r.uniform(-4.5, -3.))
# Dataset-and-algorithm-specific hparam definitions. Each block of code
# below corresponds to exactly one hparam. Avoid nested conditionals.
# learning rate
if os.environ.get("HP") == "D":
_hparam('lr', 5e-5, lambda r: 5e-5)
elif os.environ.get("HP") == "1":
_hparam('lr', 5e-5, lambda r: r.choice([1e-5, 3e-5, 5e-5]))
elif dataset == "Spirals":
_hparam('lr', 0.01, lambda r: 10**r.uniform(-3.5, -1.5))
elif dataset in SMALL_IMAGES:
_hparam('lr', 1e-3, lambda r: 10**r.uniform(-4.5, -2.5))
elif algorithm == "LFF" and dataset == "ColoredMNISTLFF":
# if algorithm in ['IRMAdv', "FisherMMD"]:
# _hparam('lr', 1e-3, lambda r: 10**r.uniform(-3.5, -2.))
# else:
_hparam('lr', 1e-3, lambda r: 10**r.uniform(-4.5, -2.5))
elif dataset == "ColoredMNISTLFF":
_hparam('lr', 1e-3, lambda r: 10**r.uniform(-4.5, -2.5))
# elif dataset == "ColoredMNISTLFF":
# _hparam('lr', 1e-3, lambda r: 10**r.uniform(-5, -2))
elif dataset == "BAR":
_hparam("lr", 0.0001, lambda r: 0.0001)
elif dataset == "Collage":
_hparam("lr", 0.001, lambda r: 0.001)
elif dataset == "TwoDirections2D":
_hparam('lr', 1e-3, lambda r: 10**r.uniform(-4.5, -2.5))
else:
_hparam('lr', 5e-5, lambda r: 10**r.uniform(-5, -3.5))
if os.environ.get("LRD"):
_hparam('lrdecay', 0.999, lambda r: 1. - 10**r.uniform(-5, -2))
else:
_hparam('lrdecay', 0, lambda r: 0)
if os.environ.get("HP") == "D":
_hparam('weight_decay', 0., lambda r: 0)
elif os.environ.get("HP") == "1":
_hparam('weight_decay', 0., lambda r: r.choice([1e-4, 1e-6]))
elif dataset == "Spirals":
_hparam('weight_decay', 0.001, lambda r: 10**r.uniform(-6, -2))
elif dataset in SMALL_IMAGES:
_hparam('weight_decay', 0., lambda r: 0.)
else:
_hparam('weight_decay', 0., lambda r: 10**r.uniform(-6, -2))
# batch size
if os.environ.get("HP") in ["1", "D"]:
_hparam('batch_size', 32, lambda r: 32)
elif dataset == "Spirals":
_hparam('batch_size', 512, lambda r: int(2**r.uniform(3, 9)))
elif dataset == "ColoredMNISTLFF":
_hparam('batch_size', 256, lambda r: 256)
elif dataset == "BAR":
_hparam('batch_size', 256, lambda r: 256)
elif dataset == "Collage":
_hparam('batch_size', 256, lambda r: 256)
elif dataset in SMALL_IMAGES:
_hparam('batch_size', 64, lambda r: int(2**r.uniform(3, 9)))
elif algorithm == 'ARM':
_hparam('batch_size', 8, lambda r: 8)
elif dataset == 'DomainNet':
_hparam('batch_size', 32, lambda r: int(2**r.uniform(3, 5)))
elif dataset == 'CelebA_Blond':
_hparam('batch_size', 48, lambda r: int(2**r.uniform(4.5, 6)))
elif dataset == "TwoDirections2D":
_hparam('batch_size', 512, lambda r: 256)
else:
_hparam('batch_size', 32, lambda r: int(2**r.uniform(3, 5.5)))
# if dataset == "Spirals":
# _hparam('mlp_width', 256, lambda r: int(2**r.uniform(6, 10)))
# _hparam('mlp_depth', 3, lambda r: int(r.choice([3, 4, 5]))) # because linear classifier
# _hparam('mlp_dropout', 0., lambda r: r.choice([0., 0.1, 0.5]))
if algorithm in ['DANN', 'CDANN']:
if dataset in SMALL_IMAGES:
_hparam('lr_g', 1e-3, lambda r: 10**r.uniform(-4.5, -2.5))
_hparam('lr_d', 1e-3, lambda r: 10**r.uniform(-4.5, -2.5))
_hparam('weight_decay_g', 0., lambda r: 0.)
else:
_hparam('lr_g', 5e-5, lambda r: 10**r.uniform(-5, -3.5))
_hparam('lr_d', 5e-5, lambda r: 10**r.uniform(-5, -3.5))
_hparam('weight_decay_g', 0., lambda r: 10**r.uniform(-6, -2))
# if dataset == "ColoredMNISTLFF":
# _hparam("flatten_cmnist_lff", False, lambda r: False)
# _hparam('mlp_width', 100, lambda r: 100)
# _hparam('mlp_depth', 2, lambda r: 2) # because linear classifier
# _hparam('mlp_dropout', 0., lambda r: 0)
# if dataset == "Collage":
# _hparam("model", "flatten", lambda r: "flatten")
# _hparam("classifier", "mlp2", lambda r: "mlp2")
# _hparam('mlp_width', 16, lambda r: 16)
# _hparam('mlp_depth', 2, lambda r: 2) # because linear classifier
# _hparam('mlp_dropout', 0., lambda r: 0)
# _hparam('mlp_activation', "leaky-relu", lambda r: "leaky-relu")
# _hparam('mlp_leaky_relu_slope', 0.01, lambda r: 0.01)
# if dataset == "TwoDirections2D":
# _hparam('mlp_width', 4, lambda r: 4)
# _hparam('mlp_depth', 2, lambda r: 2)
# _hparam('mlp_dropout', 0., lambda r: 0)
# _hparam('mlp_activation', "relu", lambda r: "relu")
# model
if dataset == "BAR":
_hparam('model', "pretrained-resnet-18", lambda r: "pretrained-resnet-18")
return hparams
def default_hparams(algorithm, dataset):
return {a: b for a, (b, c) in _hparams(algorithm, dataset, 0).items()}
def random_hparams(algorithm, dataset, seed):
return {a: c for a, (b, c) in _hparams(algorithm, dataset, seed).items()}
| 43.620853
| 112
| 0.570567
|
9f3e141d00a9372377ebf767423de29e8d93e267
| 3,535
|
py
|
Python
|
tests/test/generic/export_alias.py
|
mace84/script-languages
|
d586cbe212bbb4efbfb39e095183729c65489360
|
[
"MIT"
] | 6
|
2019-01-09T11:55:15.000Z
|
2021-06-25T19:52:42.000Z
|
tests/test/generic/export_alias.py
|
mace84/script-languages
|
d586cbe212bbb4efbfb39e095183729c65489360
|
[
"MIT"
] | 65
|
2018-12-12T08:40:38.000Z
|
2022-02-28T09:19:45.000Z
|
tests/test/generic/export_alias.py
|
mace84/script-languages
|
d586cbe212bbb4efbfb39e095183729c65489360
|
[
"MIT"
] | 9
|
2018-11-23T08:59:09.000Z
|
2020-02-04T12:56:35.000Z
|
#!/usr/bin/env python2.7
import os
import sys
sys.path.append(os.path.realpath(__file__ + '/../../../lib'))
import udf
from udf import requires
import exatest
# ATTENTION!
# The logic for the tests had to be put in the export_alias.sql files for each language.
# This was required because EXPORT INTO SCRIPT can only return a single integer.
class ExportAliasTest(udf.TestCase):
result_unknown = 0
result_ok = 1
result_failed = 2
result_test_error = 3
def setUp(self):
self.query('DROP SCHEMA FN2 CASCADE', ignore_errors=True)
self.query('CREATE SCHEMA FN2')
self.query('create or replace table t(a int, z varchar(3000))')
self.query("insert into t values (1, 'x')")
self.query('create or replace table "tl"(a int, "z" varchar(3000))')
self.query("insert into \"tl\" values (1, 'x')")
self.query("create connection FOOCONN to 'a' user 'b' identified by 'c'", ignore_errors=True)
@requires('EXPAL_TEST_PASS_FAIL')
@requires('EXPAL_USE_PARAM_FOO_BAR')
def test_export_use_params(self):
rows = self.executeStatement("EXPORT fn2.t INTO SCRIPT fn1.expal_use_param_foo_bar with foo='bar' bar='foo'")
self.assertEqual(self.result_ok, rows)
@requires('EXPAL_TEST_PASS_FAIL')
@requires('EXPAL_USE_CONNECTION_NAME')
def test_export_use_connection_name(self):
rows = self.executeStatement("EXPORT fn2.t INTO SCRIPT fn1.expal_use_connection_name AT FOOCONN with foo='bar' bar='foo'")
self.assertEqual(self.result_ok, rows)
@requires('EXPAL_TEST_PASS_FAIL')
@requires('EXPAL_USE_CONNECTION_INFO')
def test_export_use_connection_info(self):
rows = self.executeStatement("EXPORT fn2.t INTO SCRIPT fn1.expal_use_connection_info AT 'a' USER 'b' IDENTIFIED BY 'c' with foo='bar' bar='foo'")
self.assertEqual(self.result_ok, rows)
@requires('EXPAL_TEST_PASS_FAIL')
@requires('EXPAL_USE_HAS_TRUNCATE')
def test_export_use_has_truncate(self):
rows = self.executeStatement("EXPORT fn2.t INTO SCRIPT fn1.expal_use_has_truncate with foo='bar' bar='foo' truncate")
self.assertEqual(self.result_ok, rows)
@requires('EXPAL_TEST_PASS_FAIL')
@requires('EXPAL_USE_REPLACE_CREATED_BY')
def test_export_use_replace_created_by(self):
rows = self.executeStatement("EXPORT fn2.t INTO SCRIPT fn1.expal_use_replace_created_by with foo='bar' bar='foo' replace created by 'create table t(a int, z varchar(3000))'")
self.assertEqual(self.result_ok, rows)
@requires('EXPAL_TEST_PASS_FAIL')
@requires('EXPAL_USE_COLUMN_NAME_LOWER_CASE')
def test_export_use_column_name_lower_case(self):
rows = self.executeStatement("EXPORT fn2.\"tl\" INTO SCRIPT fn1.expal_use_column_name_lower_case with foo='bar' bar='foo'")
self.assertEqual(self.result_ok, rows)
@requires('EXPAL_TEST_PASS_FAIL')
@requires('EXPAL_USE_COLUMN_SELECTION')
def test_export_use_column_selection(self):
rows = self.executeStatement("EXPORT fn2.\"tl\"(a, \"z\") INTO SCRIPT fn1.expal_use_column_selection with foo='bar' bar='foo'")
self.assertEqual(self.result_ok, rows)
@requires('EXPAL_TEST_PASS_FAIL')
@requires('EXPAL_USE_QUERY')
def test_export_use_query(self):
rows = self.executeStatement("EXPORT (select a as 'col1', \"z\" as 'col2' from fn2.\"tl\") INTO SCRIPT fn1.expal_use_query with foo='bar' bar='foo'")
self.assertEqual(self.result_ok, rows)
if __name__ == '__main__':
udf.main()
| 43.641975
| 182
| 0.712023
|
19ef00ba83c578439c0fe515b521888fddfff1db
| 2,473
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_sequence_unpad_op.py
|
frankwhzhang/Paddle
|
131b1dc3240e53ea295cc49323bb2a7e7dcc717f
|
[
"Apache-2.0"
] | 1
|
2019-10-10T05:58:26.000Z
|
2019-10-10T05:58:26.000Z
|
python/paddle/fluid/tests/unittests/test_sequence_unpad_op.py
|
frankwhzhang/Paddle
|
131b1dc3240e53ea295cc49323bb2a7e7dcc717f
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_sequence_unpad_op.py
|
frankwhzhang/Paddle
|
131b1dc3240e53ea295cc49323bb2a7e7dcc717f
|
[
"Apache-2.0"
] | 4
|
2019-09-30T02:15:34.000Z
|
2019-09-30T02:41:30.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import six
import numpy as np
from op_test import OpTest
class TestSequenceUnpadOp(OpTest):
def init(self):
self.length = [2, 3, 4]
self.x_shape = (3, 5)
self.dtype = "float32"
def compute(self):
assert len(self.length) == self.x_shape[0]
x = np.random.random(self.x_shape).astype(self.dtype)
out_lod = [self.length]
out = x[0, 0:self.length[0]]
for i in six.moves.xrange(1, x.shape[0]):
out = np.append(out, x[i, 0:self.length[i]], axis=0)
out_shape = (sum(self.length), )
if len(self.x_shape) == 2:
out_shape = out_shape + (1, )
else:
out_shape = out_shape + self.x_shape[2:]
self.inputs = {'X': x, 'Length': np.array(self.length).astype('int64')}
self.outputs = {'Out': (out.reshape(out_shape), out_lod)}
def setUp(self):
self.op_type = 'sequence_unpad'
self.init()
self.compute()
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
class TestSequenceUnpadOp2(TestSequenceUnpadOp):
def init(self):
self.length = [2, 3, 4]
self.x_shape = (3, 5, 4, 3)
self.dtype = "float32"
class TestSequenceUnpadOp3(TestSequenceUnpadOp):
def init(self):
self.length = [5, 2, 3, 4]
self.x_shape = (4, 5, 3, 3, 6)
self.dtype = "float64"
class TestSequenceUnpadOp4(TestSequenceUnpadOp):
def init(self):
self.length = [5, 0, 0, 4]
self.x_shape = (4, 5, 3, 3, 6)
self.dtype = "float64"
class TestSequenceUnpadOp4(TestSequenceUnpadOp):
def init(self):
self.length = [0, 4, 3, 0]
self.x_shape = (4, 5, 3, 3, 6)
self.dtype = "float64"
if __name__ == '__main__':
unittest.main()
| 28.425287
| 79
| 0.621512
|
b397292066c0233bb8d66ca308e5b02ba556d76c
| 197
|
py
|
Python
|
CrownstoneYodiwo/lib/ports/OutputPorts.py
|
crownstone/yodiwo-crownstone-node
|
ba5afc6d92f70f8c795256621dd47fab754cbd35
|
[
"MIT"
] | null | null | null |
CrownstoneYodiwo/lib/ports/OutputPorts.py
|
crownstone/yodiwo-crownstone-node
|
ba5afc6d92f70f8c795256621dd47fab754cbd35
|
[
"MIT"
] | null | null | null |
CrownstoneYodiwo/lib/ports/OutputPorts.py
|
crownstone/yodiwo-crownstone-node
|
ba5afc6d92f70f8c795256621dd47fab754cbd35
|
[
"MIT"
] | null | null | null |
from enum import Enum
class OutputPorts(Enum):
powerUsage = "PowerUsage"
personEnter = "PersonEnter"
personExit = "PersonExit"
occupancyChange = "occupancyChanged"
| 19.7
| 40
| 0.664975
|
0f45cf81f223f1b588713da29b2b09f1a8e7ea9e
| 639
|
py
|
Python
|
UnitTest.py
|
Himself65/chenzheBot
|
034f63910700878a79afbbf313ed412910b6c367
|
[
"MIT"
] | 10
|
2018-06-19T11:11:06.000Z
|
2018-10-30T13:03:27.000Z
|
UnitTest.py
|
Himself65/chenzheBot
|
034f63910700878a79afbbf313ed412910b6c367
|
[
"MIT"
] | 2
|
2018-06-20T04:56:09.000Z
|
2018-06-20T15:34:29.000Z
|
UnitTest.py
|
Himself65/chenzheBot
|
034f63910700878a79afbbf313ed412910b6c367
|
[
"MIT"
] | 1
|
2018-10-04T14:53:47.000Z
|
2018-10-04T14:53:47.000Z
|
import json
def CZBotWordTest():
from bot.CZBotWord import CZBotWord
def display(s):
print(bot.getSentence(s))
bot = CZBotWord()
CZBotWord.initRedis()
display("chenzhe好强啊")
display("chenzhe为什么这么强")
display("will爷")
display("chenzhe强")
display("基础知识")
display("人类的本质是什么")
display("啥?")
# display("...")
def QQBotTest():
from qqbot import _bot as bot
qqID = '761282619' # QQ号
group_name = 'hxr粉丝群' # 监听的群s
bot.Login(['-q', qqID])
group = bot.List('group', group_name)[0]
bot.SendTo(group, '测试一下Bot')
if __name__ == '__main__':
CZBotWordTest()
| 19.96875
| 44
| 0.608764
|
5d3de0cdc73a7cccb5c852a2d7e68bfc28aef0a0
| 261
|
gyp
|
Python
|
deps/libgdal/gyp-formats/aaigrid.gyp
|
jimgambale/node-gdal
|
dc5c89fb23f1004732106250c8b7d57f380f9b61
|
[
"Apache-2.0"
] | 462
|
2015-01-07T23:09:18.000Z
|
2022-03-30T03:58:09.000Z
|
deps/libgdal/gyp-formats/aaigrid.gyp
|
jimgambale/node-gdal
|
dc5c89fb23f1004732106250c8b7d57f380f9b61
|
[
"Apache-2.0"
] | 196
|
2015-01-07T11:10:35.000Z
|
2022-03-29T08:50:30.000Z
|
deps/libgdal/gyp-formats/aaigrid.gyp
|
jimgambale/node-gdal
|
dc5c89fb23f1004732106250c8b7d57f380f9b61
|
[
"Apache-2.0"
] | 113
|
2015-01-15T02:24:18.000Z
|
2021-11-22T06:05:52.000Z
|
{
"includes": [
"../common.gypi"
],
"targets": [
{
"target_name": "libgdal_aaigrid_frmt",
"type": "static_library",
"sources": [
"../gdal/frmts/aaigrid/aaigriddataset.cpp"
],
"include_dirs": [
"../gdal/frmts/aaigrid"
]
}
]
}
| 14.5
| 46
| 0.544061
|
0d88d3e87f088fdab1eefb71ff98baa547afc344
| 1,467
|
py
|
Python
|
the-pattern-bart-summary/experiments/tokenizer_gears_summary.py
|
redis-developer/the-pattern
|
faa629b8152f405f92987c1436565938fa302932
|
[
"MIT"
] | 3
|
2021-09-22T15:24:24.000Z
|
2022-02-14T02:40:42.000Z
|
the-pattern-bart-summary/experiments/tokenizer_gears_summary.py
|
redis-developer/the-pattern
|
faa629b8152f405f92987c1436565938fa302932
|
[
"MIT"
] | null | null | null |
the-pattern-bart-summary/experiments/tokenizer_gears_summary.py
|
redis-developer/the-pattern
|
faa629b8152f405f92987c1436565938fa302932
|
[
"MIT"
] | 2
|
2021-09-22T15:24:27.000Z
|
2022-02-14T01:00:34.000Z
|
tokenizer = None
model= None
def loadTokeniser():
global tokenizer
global model
import torch
from transformers import AutoTokenizer, T5ForConditionalGeneration
tokenizer = AutoTokenizer.from_pretrained("t5-small")
# Try RobertaTokenizerFast and BART
# tokenizer = AutoTokenizer.from_pretrained("emilyalsentzer/Bio_ClinicalBERT")
model = T5ForConditionalGeneration.from_pretrained("t5-small")
return tokenizer, model
def remove_prefix(text, prefix):
return text[text.startswith(prefix) and len(prefix):]
def parse_sentence(record):
global tokenizer
global model
if not tokenizer:
tokenizer, model=loadTokeniser()
article_text=[]
for _, value in sorted(record['value'].items(), key=lambda item: int(item[0])):
article_text.append(value)
full_text=" ".join(article_text[0:512])
inputs = tokenizer.encode("summarize: " + full_text, return_tensors="pt", max_length=512, truncation=True)
outputs = model.generate(inputs, max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True)
output = tokenizer.decode(outputs[0], skip_special_tokens=True)
key_prefix='sentence:'
article_key=remove_prefix(record['key'],key_prefix)
summary_key = f"summary:T5:{article_key}"
execute('SET', summary_key, output)
execute('SADD','processed_docs_stage3_sum', summary_key)
gb = GB()
gb.foreach(parse_sentence)
gb.count()
gb.run('sentence:*')
| 33.340909
| 121
| 0.728698
|
0266b850f8d9ebfaa66a3de14c657f413f24369f
| 2,156
|
py
|
Python
|
5_deep_learning/solution/01_DL_framework/GradientDescentOptimizer.py
|
karanchawla/ai_for_robotics
|
03bb66bae99bac3acd79bc1ec6d3b9c0eeabcdf8
|
[
"BSD-3-Clause"
] | 65
|
2017-03-03T07:30:28.000Z
|
2021-08-19T01:12:47.000Z
|
5_deep_learning/solution/01_DL_framework/GradientDescentOptimizer.py
|
karanchawla/ai_for_robotics
|
03bb66bae99bac3acd79bc1ec6d3b9c0eeabcdf8
|
[
"BSD-3-Clause"
] | 4
|
2017-03-02T13:51:40.000Z
|
2017-11-01T16:49:22.000Z
|
5_deep_learning/solution/01_DL_framework/GradientDescentOptimizer.py
|
ethz-asl/ai_for_robotics
|
03bb66bae99bac3acd79bc1ec6d3b9c0eeabcdf8
|
[
"BSD-3-Clause"
] | 43
|
2017-03-02T11:31:21.000Z
|
2020-10-30T07:10:59.000Z
|
# Copyright 2017 Mark Pfeiffer, ASL, ETH Zurich, Switzerland
# Copyright 2017 Fadri Furrer, ASL, ETH Zurich, Switzerland
# Copyright 2017 Renaud Dubé, ASL, ETH Zurich, Switzerland
import Support as sup
import numpy as np
class GradientDescentOptimizer():
"""Gradient descent optimization for neural network parameters."""
learning_rate = 0
def __init__(self, learning_rate=0.01):
self.learning_rate = learning_rate
def getUpdatedParameters(self, nn, gradients):
"""Update parameters of the network and return them."""
p = nn.getParameters()
new_p = p - gradients * self.learning_rate
return new_p
def computeBatchGradient(self, gradient_list):
"""Compute the gradient for a whole data batch from a provided gradient list.
Input:
Gradient list contains the gradient for each sample in the data batch. The structure is a list of variables (provided data structure support.Variable()).
The weights and biases members both contain the gradients of all the layers for one data sample.
Return value:
One fused gradient including all data sample gradients.
"""
batch_gradient = gradient_list[0]
for g in gradient_list[1:]:
batch_gradient = batch_gradient + g
return batch_gradient
def updateStep(self, nn, loss_function, x_batch, y_target_batch):
"""
Update the NN model parameters given the loss function and a data
batch.
"""
gradients = []
avg_batch_loss = 0
batch_size = x_batch.shape[0]
for i in range(x_batch.shape[0]):
x = np.array([x_batch[i, :]])
y_target = np.array([y_target_batch[i, :]])
y = nn.output(x)
avg_batch_loss += loss_function.evaluate(y, y_target)
nn_gradient = nn.gradients(x, loss_function, y_target)
gradients.append(nn_gradient)
batch_gradient = self.computeBatchGradient(gradients)
new_p = self.getUpdatedParameters(nn, batch_gradient)
nn.setParameters(new_p)
return avg_batch_loss / batch_size
| 37.824561
| 163
| 0.662338
|
7656c3f4e7cdd23a81b46e615cbda44ce43d1554
| 12,637
|
py
|
Python
|
test/test_sixrd.py
|
tsyesika/VPP
|
5d28c7afbc0abd172d0053768b2ebe37b7a6c348
|
[
"Apache-2.0"
] | 1
|
2020-05-21T16:26:02.000Z
|
2020-05-21T16:26:02.000Z
|
test/test_sixrd.py
|
tsyesika/VPP
|
5d28c7afbc0abd172d0053768b2ebe37b7a6c348
|
[
"Apache-2.0"
] | 2
|
2018-09-10T21:43:09.000Z
|
2021-06-01T22:36:51.000Z
|
test/test_sixrd.py
|
tsyesika/VPP
|
5d28c7afbc0abd172d0053768b2ebe37b7a6c348
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
""" 6RD RFC5969 functional tests """
import unittest
from scapy.layers.inet import IP, UDP, Ether
from scapy.layers.inet6 import IPv6
from scapy.packet import Raw
from framework import VppTestCase, VppTestRunner
from vpp_ip_route import VppIpRoute, VppRoutePath, DpoProto, VppIpTable
from socket import AF_INET, AF_INET6, inet_pton
""" Test6rd is a subclass of VPPTestCase classes.
6RD tests.
"""
class Test6RD(VppTestCase):
""" 6RD Test Case """
@classmethod
def setUpClass(cls):
super(Test6RD, cls).setUpClass()
cls.create_pg_interfaces(range(4))
cls.interfaces = list(cls.pg_interfaces)
def setUp(self):
super(Test6RD, self).setUp()
t4 = VppIpTable(self, 10)
t6 = VppIpTable(self, 20, True)
t4.add_vpp_config()
t6.add_vpp_config()
for n in range(4):
i = self.pg_interfaces[n]
i.admin_up()
if (n > 1):
i.set_table_ip4(10)
i.set_table_ip6(20)
i.config_ip4()
i.config_ip6()
i.disable_ipv6_ra()
i.resolve_arp()
i.resolve_ndp()
def tearDown(self):
for i in self.pg_interfaces:
i.unconfig_ip4()
i.unconfig_ip6()
i.set_table_ip4(0)
i.set_table_ip6(0)
super(Test6RD, self).tearDown()
def validate_6in4(self, rx, expected):
if IP not in rx:
self.fail()
if IPv6 not in rx:
self.fail()
self.assertEqual(rx[IP].src, expected[IP].src)
self.assertEqual(rx[IP].dst, expected[IP].dst)
self.assertEqual(rx[IP].proto, expected[IP].proto)
self.assertEqual(rx[IPv6].src, expected[IPv6].src)
self.assertEqual(rx[IPv6].dst, expected[IPv6].dst)
def validate_4in6(self, rx, expected):
if IPv6 not in rx:
self.fail()
if IP in rx:
self.fail()
self.assertTrue(rx[IPv6].src == expected[IPv6].src)
self.assertTrue(rx[IPv6].dst == expected[IPv6].dst)
self.assertTrue(rx[IPv6].nh == expected[IPv6].nh)
def payload(self, len):
return 'x' * len
def test_6rd_ip6_to_ip4(self):
""" ip6 -> ip4 (encap) 6rd test """
p_ether = Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)
p_ip6 = IPv6(src="1::1", dst="2002:AC10:0202::1", nh='UDP')
rv = self.vapi.ipip_6rd_add_tunnel(
0, inet_pton(AF_INET6, '2002::'), 16,
0, inet_pton(AF_INET, '0.0.0.0'), 0,
self.pg0.local_ip4n, True)
self.tunnel_index = rv.sw_if_index
self.vapi.cli("show ip6 fib")
p_payload = UDP(sport=1234, dport=1234)
p = (p_ether / p_ip6 / p_payload)
p_reply = (IP(src=self.pg0.local_ip4, dst=self.pg1.remote_ip4,
proto='ipv6') / p_ip6)
rx = self.send_and_expect(self.pg0, p*10, self.pg1)
for p in rx:
self.validate_6in4(p, p_reply)
# MTU tests (default is 1480)
plen = 1481 - 40 - 8
p_ip6 = IPv6(src="1::1", dst="2002:AC10:0202::1")
p_payload = UDP(sport=1234, dport=1234) / Raw(self.payload(plen))
p = (p_ether / p_ip6 / p_payload)
p_reply = (IP(src=self.pg0.local_ip4, dst=self.pg1.remote_ip4,
proto='ipv6') / p_ip6)
rx = self.send_and_assert_no_replies(self.pg0, p*10)
self.vapi.ipip_6rd_del_tunnel(self.tunnel_index)
def test_6rd_ip6_to_ip4_vrf(self):
""" ip6 -> ip4 (encap) 6rd VRF test """
p_ether = Ether(src=self.pg2.remote_mac, dst=self.pg2.local_mac)
p_ip6 = IPv6(src="1::1", dst="2002:AC10:0402::1", nh='UDP')
rv = self.vapi.ipip_6rd_add_tunnel(
20, inet_pton(AF_INET6, '2002::'), 16,
10, inet_pton(AF_INET, '0.0.0.0'), 0,
self.pg2.local_ip4n, True)
self.tunnel_index = rv.sw_if_index
self.vapi.cli("show ip6 fib")
p_payload = UDP(sport=1234, dport=1234)
p = (p_ether / p_ip6 / p_payload)
p_reply = (IP(src=self.pg2.local_ip4, dst=self.pg3.remote_ip4,
proto='ipv6') / p_ip6)
rx = self.send_and_expect(self.pg2, p*10, self.pg3)
for p in rx:
self.validate_6in4(p, p_reply)
# MTU tests (default is 1480)
plen = 1481 - 40 - 8
p_ip6 = IPv6(src="1::1", dst="2002:AC10:0402::1")
p_payload = UDP(sport=1234, dport=1234) / Raw(self.payload(plen))
p = (p_ether / p_ip6 / p_payload)
p_reply = (IP(src=self.pg2.local_ip4, dst=self.pg3.remote_ip4,
proto='ipv6') / p_ip6)
rx = self.send_and_assert_no_replies(self.pg0, p*10)
self.vapi.ipip_6rd_del_tunnel(self.tunnel_index)
def test_6rd_ip4_to_ip6(self):
""" ip4 -> ip6 (decap) 6rd test """
rv = self.vapi.ipip_6rd_add_tunnel(
0, inet_pton(AF_INET6, '2002::'), 16,
0, inet_pton(AF_INET, '0.0.0.0'), 0,
self.pg0.local_ip4n, True)
self.tunnel_index = rv.sw_if_index
rv = self.vapi.ipip_6rd_del_tunnel(rv.sw_if_index)
rv = self.vapi.ipip_6rd_add_tunnel(
0, inet_pton(AF_INET6, '2002::'), 16,
0, inet_pton(AF_INET, '0.0.0.0'), 0,
self.pg0.local_ip4n, True)
self.tunnel_index = rv.sw_if_index
p_ip6 = (IPv6(src="2002:AC10:0202::1", dst=self.pg1.remote_ip6) /
UDP(sport=1234, dport=1234))
p = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg1.remote_ip4, dst=self.pg0.local_ip4) /
p_ip6)
p_reply = p_ip6
rx = self.send_and_expect(self.pg0, p*10, self.pg1)
for p in rx:
self.validate_4in6(p, p_reply)
self.vapi.ipip_6rd_del_tunnel(self.tunnel_index)
def test_6rd_ip4_to_ip6_vrf(self):
""" ip4 -> ip6 (decap) 6rd VRF test """
rv = self.vapi.ipip_6rd_add_tunnel(
20, inet_pton(AF_INET6, '2002::'), 16,
10, inet_pton(AF_INET, '0.0.0.0'), 0,
self.pg2.local_ip4n, True)
self.tunnel_index = rv.sw_if_index
rv = self.vapi.ipip_6rd_del_tunnel(rv.sw_if_index)
rv = self.vapi.ipip_6rd_add_tunnel(
20, inet_pton(AF_INET6, '2002::'), 16,
10, inet_pton(AF_INET, '0.0.0.0'), 0,
self.pg2.local_ip4n, True)
self.tunnel_index = rv.sw_if_index
self.vapi.sw_interface_set_table(self.tunnel_index, 1, 20)
p_ip6 = (IPv6(src="2002:AC10:0402::1", dst=self.pg3.remote_ip6) /
UDP(sport=1234, dport=1234))
p = (Ether(src=self.pg2.remote_mac,
dst=self.pg2.local_mac) /
IP(src=self.pg3.remote_ip4, dst=self.pg2.local_ip4) /
p_ip6)
p_reply = p_ip6
rx = self.send_and_expect(self.pg2, p*10, self.pg3)
for p in rx:
self.validate_4in6(p, p_reply)
self.vapi.sw_interface_set_table(self.tunnel_index, 1, 0)
self.vapi.ipip_6rd_del_tunnel(self.tunnel_index)
def test_6rd_ip4_to_ip6_multiple(self):
""" ip4 -> ip6 (decap) 6rd test """
self.tunnel_index = []
rv = self.vapi.ipip_6rd_add_tunnel(
0, inet_pton(AF_INET6, '2002::'), 16,
0, inet_pton(AF_INET, '0.0.0.0'), 0,
self.pg0.local_ip4n, True)
self.tunnel_index.append(rv.sw_if_index)
rv = self.vapi.ipip_6rd_add_tunnel(
0, inet_pton(AF_INET6, '2003::'), 16,
0, inet_pton(AF_INET, '0.0.0.0'), 0,
self.pg1.local_ip4n, True)
self.tunnel_index.append(rv.sw_if_index)
self.vapi.cli("show ip6 fib")
p_ether = Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)
p_ip4 = IP(src=self.pg1.remote_ip4, dst=self.pg0.local_ip4)
p_ip6_1 = (IPv6(src="2002:AC10:0202::1", dst=self.pg1.remote_ip6) /
UDP(sport=1234, dport=1234))
p_ip6_2 = (IPv6(src="2003:AC10:0202::1", dst=self.pg1.remote_ip6) /
UDP(sport=1234, dport=1234))
p = (p_ether / p_ip4 / p_ip6_1)
rx = self.send_and_expect(self.pg0, p*10, self.pg1)
for p in rx:
self.validate_4in6(p, p_ip6_1)
p = (p_ether / p_ip4 / p_ip6_2)
rx = self.send_and_expect(self.pg0, p*10, self.pg1)
for p in rx:
self.validate_4in6(p, p_ip6_2)
for i in self.tunnel_index:
self.vapi.ipip_6rd_del_tunnel(i)
def test_6rd_ip4_to_ip6_suffix(self):
""" ip4 -> ip6 (decap) 6rd test """
rv = self.vapi.ipip_6rd_add_tunnel(
0, inet_pton(AF_INET6, '2002::'), 16,
0, inet_pton(AF_INET, '172.0.0.0'), 8,
self.pg0.local_ip4n, True)
self.tunnel_index = rv.sw_if_index
self.vapi.cli("show ip6 fib")
p_ether = Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)
p_ip4 = IP(src=self.pg1.remote_ip4, dst=self.pg0.local_ip4)
p_ip6 = (IPv6(src="2002:1002:0200::1", dst=self.pg1.remote_ip6) /
UDP(sport=1234, dport=1234))
p = (p_ether / p_ip4 / p_ip6)
rx = self.send_and_expect(self.pg0, p*10, self.pg1)
for p in rx:
self.validate_4in6(p, p_ip6)
self.vapi.ipip_6rd_del_tunnel(self.tunnel_index)
def test_6rd_ip4_to_ip6_sec_check(self):
""" ip4 -> ip6 (decap) security check 6rd test """
rv = self.vapi.ipip_6rd_add_tunnel(
0, inet_pton(AF_INET6, '2002::'), 16,
0, inet_pton(AF_INET, '0.0.0.0'), 0,
self.pg0.local_ip4n, True)
self.tunnel_index = rv.sw_if_index
self.vapi.cli("show ip6 fib")
p_ip6 = (IPv6(src="2002:AC10:0202::1", dst=self.pg1.remote_ip6) /
UDP(sport=1234, dport=1234))
p_ip6_fail = (IPv6(src="2002:DEAD:0202::1", dst=self.pg1.remote_ip6) /
UDP(sport=1234, dport=1234))
p = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg1.remote_ip4, dst=self.pg0.local_ip4) /
p_ip6)
p_reply = p_ip6
rx = self.send_and_expect(self.pg0, p*10, self.pg1)
for p in rx:
self.validate_4in6(p, p_reply)
p = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg1.remote_ip4, dst=self.pg0.local_ip4) /
p_ip6_fail)
rx = self.send_and_assert_no_replies(self.pg0, p*10)
self.vapi.ipip_6rd_del_tunnel(self.tunnel_index)
def test_6rd_bgp_tunnel(self):
""" 6rd BGP tunnel """
rv = self.vapi.ipip_6rd_add_tunnel(
0, inet_pton(AF_INET6, '2002::'), 16,
0, inet_pton(AF_INET, '0.0.0.0'), 0,
self.pg0.local_ip4n, False)
self.tunnel_index = rv.sw_if_index
default_route = VppIpRoute(
self, "DEAD::", 16, [VppRoutePath("2002:0808:0808::",
self.tunnel_index,
proto=DpoProto.DPO_PROTO_IP6)],
is_ip6=1)
default_route.add_vpp_config()
ip4_route = VppIpRoute(self, "8.0.0.0", 8,
[VppRoutePath(self.pg1.remote_ip4, 0xFFFFFFFF)])
ip4_route.add_vpp_config()
# Via recursive route 6 -> 4
p = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IPv6(src="1::1", dst="DEAD:BEEF::1") /
UDP(sport=1234, dport=1234))
p_reply = (IP(src=self.pg0.local_ip4, dst="8.8.8.8",
proto='ipv6') /
IPv6(src='1::1', dst='DEAD:BEEF::1', nh='UDP'))
rx = self.send_and_expect(self.pg0, p*10, self.pg1)
for p in rx:
self.validate_6in4(p, p_reply)
# Via recursive route 4 -> 6 (Security check must be disabled)
p_ip6 = (IPv6(src="DEAD:BEEF::1", dst=self.pg1.remote_ip6) /
UDP(sport=1234, dport=1234))
p = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src="8.8.8.8", dst=self.pg0.local_ip4) /
p_ip6)
p_reply = p_ip6
rx = self.send_and_expect(self.pg0, p*10, self.pg1)
for p in rx:
self.validate_4in6(p, p_reply)
ip4_route.remove_vpp_config()
default_route.remove_vpp_config()
self.vapi.ipip_6rd_del_tunnel(self.tunnel_index)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| 35.497191
| 79
| 0.570547
|
d7e3dca1b638a5c010d45fb228ac03b2c1fdbf0e
| 205,377
|
py
|
Python
|
Lib/test/test_email/test_email.py
|
pelotoncycle/cpython-fork
|
1ab99a0e912aac9c3f16555f23284d7e381f2f69
|
[
"PSF-2.0"
] | 70
|
2015-06-20T17:59:24.000Z
|
2021-05-03T02:01:49.000Z
|
Lib/test/test_email/test_email.py
|
sky-skynet/Python3
|
b816507f56ee14b730b7ab52a61eb17f9eb9d815
|
[
"PSF-2.0"
] | 16
|
2015-06-11T14:57:43.000Z
|
2016-12-03T00:22:13.000Z
|
Lib/test/test_email/test_email.py
|
sky-skynet/Python3
|
b816507f56ee14b730b7ab52a61eb17f9eb9d815
|
[
"PSF-2.0"
] | 36
|
2015-05-15T20:30:44.000Z
|
2020-11-14T19:31:40.000Z
|
# Copyright (C) 2001-2010 Python Software Foundation
# Contact: email-sig@python.org
# email package unit tests
import re
import time
import base64
import unittest
import textwrap
from io import StringIO, BytesIO
from itertools import chain
from random import choice
try:
from threading import Thread
except ImportError:
from dummy_threading import Thread
import email
import email.policy
from email.charset import Charset
from email.header import Header, decode_header, make_header
from email.parser import Parser, HeaderParser
from email.generator import Generator, DecodedGenerator, BytesGenerator
from email.message import Message
from email.mime.application import MIMEApplication
from email.mime.audio import MIMEAudio
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.mime.multipart import MIMEMultipart
from email import utils
from email import errors
from email import encoders
from email import iterators
from email import base64mime
from email import quoprimime
from test.support import unlink, start_threads
from test.test_email import openfile, TestEmailBase
# These imports are documented to work, but we are testing them using a
# different path, so we import them here just to make sure they are importable.
from email.parser import FeedParser, BytesFeedParser
NL = '\n'
EMPTYSTRING = ''
SPACE = ' '
# Test various aspects of the Message class's API
class TestMessageAPI(TestEmailBase):
def test_get_all(self):
eq = self.assertEqual
msg = self._msgobj('msg_20.txt')
eq(msg.get_all('cc'), ['ccc@zzz.org', 'ddd@zzz.org', 'eee@zzz.org'])
eq(msg.get_all('xx', 'n/a'), 'n/a')
def test_getset_charset(self):
eq = self.assertEqual
msg = Message()
eq(msg.get_charset(), None)
charset = Charset('iso-8859-1')
msg.set_charset(charset)
eq(msg['mime-version'], '1.0')
eq(msg.get_content_type(), 'text/plain')
eq(msg['content-type'], 'text/plain; charset="iso-8859-1"')
eq(msg.get_param('charset'), 'iso-8859-1')
eq(msg['content-transfer-encoding'], 'quoted-printable')
eq(msg.get_charset().input_charset, 'iso-8859-1')
# Remove the charset
msg.set_charset(None)
eq(msg.get_charset(), None)
eq(msg['content-type'], 'text/plain')
# Try adding a charset when there's already MIME headers present
msg = Message()
msg['MIME-Version'] = '2.0'
msg['Content-Type'] = 'text/x-weird'
msg['Content-Transfer-Encoding'] = 'quinted-puntable'
msg.set_charset(charset)
eq(msg['mime-version'], '2.0')
eq(msg['content-type'], 'text/x-weird; charset="iso-8859-1"')
eq(msg['content-transfer-encoding'], 'quinted-puntable')
def test_set_charset_from_string(self):
eq = self.assertEqual
msg = Message()
msg.set_charset('us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
def test_set_payload_with_charset(self):
msg = Message()
charset = Charset('iso-8859-1')
msg.set_payload('This is a string payload', charset)
self.assertEqual(msg.get_charset().input_charset, 'iso-8859-1')
def test_set_payload_with_8bit_data_and_charset(self):
data = b'\xd0\x90\xd0\x91\xd0\x92'
charset = Charset('utf-8')
msg = Message()
msg.set_payload(data, charset)
self.assertEqual(msg['content-transfer-encoding'], 'base64')
self.assertEqual(msg.get_payload(decode=True), data)
self.assertEqual(msg.get_payload(), '0JDQkdCS\n')
def test_set_payload_with_non_ascii_and_charset_body_encoding_none(self):
data = b'\xd0\x90\xd0\x91\xd0\x92'
charset = Charset('utf-8')
charset.body_encoding = None # Disable base64 encoding
msg = Message()
msg.set_payload(data.decode('utf-8'), charset)
self.assertEqual(msg['content-transfer-encoding'], '8bit')
self.assertEqual(msg.get_payload(decode=True), data)
def test_set_payload_with_8bit_data_and_charset_body_encoding_none(self):
data = b'\xd0\x90\xd0\x91\xd0\x92'
charset = Charset('utf-8')
charset.body_encoding = None # Disable base64 encoding
msg = Message()
msg.set_payload(data, charset)
self.assertEqual(msg['content-transfer-encoding'], '8bit')
self.assertEqual(msg.get_payload(decode=True), data)
def test_set_payload_to_list(self):
msg = Message()
msg.set_payload([])
self.assertEqual(msg.get_payload(), [])
def test_attach_when_payload_is_string(self):
msg = Message()
msg['Content-Type'] = 'multipart/mixed'
msg.set_payload('string payload')
sub_msg = MIMEMessage(Message())
self.assertRaisesRegex(TypeError, "[Aa]ttach.*non-multipart",
msg.attach, sub_msg)
def test_get_charsets(self):
eq = self.assertEqual
msg = self._msgobj('msg_08.txt')
charsets = msg.get_charsets()
eq(charsets, [None, 'us-ascii', 'iso-8859-1', 'iso-8859-2', 'koi8-r'])
msg = self._msgobj('msg_09.txt')
charsets = msg.get_charsets('dingbat')
eq(charsets, ['dingbat', 'us-ascii', 'iso-8859-1', 'dingbat',
'koi8-r'])
msg = self._msgobj('msg_12.txt')
charsets = msg.get_charsets()
eq(charsets, [None, 'us-ascii', 'iso-8859-1', None, 'iso-8859-2',
'iso-8859-3', 'us-ascii', 'koi8-r'])
def test_get_filename(self):
eq = self.assertEqual
msg = self._msgobj('msg_04.txt')
filenames = [p.get_filename() for p in msg.get_payload()]
eq(filenames, ['msg.txt', 'msg.txt'])
msg = self._msgobj('msg_07.txt')
subpart = msg.get_payload(1)
eq(subpart.get_filename(), 'dingusfish.gif')
def test_get_filename_with_name_parameter(self):
eq = self.assertEqual
msg = self._msgobj('msg_44.txt')
filenames = [p.get_filename() for p in msg.get_payload()]
eq(filenames, ['msg.txt', 'msg.txt'])
def test_get_boundary(self):
eq = self.assertEqual
msg = self._msgobj('msg_07.txt')
# No quotes!
eq(msg.get_boundary(), 'BOUNDARY')
def test_set_boundary(self):
eq = self.assertEqual
# This one has no existing boundary parameter, but the Content-Type:
# header appears fifth.
msg = self._msgobj('msg_01.txt')
msg.set_boundary('BOUNDARY')
header, value = msg.items()[4]
eq(header.lower(), 'content-type')
eq(value, 'text/plain; charset="us-ascii"; boundary="BOUNDARY"')
# This one has a Content-Type: header, with a boundary, stuck in the
# middle of its headers. Make sure the order is preserved; it should
# be fifth.
msg = self._msgobj('msg_04.txt')
msg.set_boundary('BOUNDARY')
header, value = msg.items()[4]
eq(header.lower(), 'content-type')
eq(value, 'multipart/mixed; boundary="BOUNDARY"')
# And this one has no Content-Type: header at all.
msg = self._msgobj('msg_03.txt')
self.assertRaises(errors.HeaderParseError,
msg.set_boundary, 'BOUNDARY')
def test_make_boundary(self):
msg = MIMEMultipart('form-data')
# Note that when the boundary gets created is an implementation
# detail and might change.
self.assertEqual(msg.items()[0][1], 'multipart/form-data')
# Trigger creation of boundary
msg.as_string()
self.assertEqual(msg.items()[0][1][:33],
'multipart/form-data; boundary="==')
# XXX: there ought to be tests of the uniqueness of the boundary, too.
def test_message_rfc822_only(self):
# Issue 7970: message/rfc822 not in multipart parsed by
# HeaderParser caused an exception when flattened.
with openfile('msg_46.txt') as fp:
msgdata = fp.read()
parser = HeaderParser()
msg = parser.parsestr(msgdata)
out = StringIO()
gen = Generator(out, True, 0)
gen.flatten(msg, False)
self.assertEqual(out.getvalue(), msgdata)
def test_byte_message_rfc822_only(self):
# Make sure new bytes header parser also passes this.
with openfile('msg_46.txt') as fp:
msgdata = fp.read().encode('ascii')
parser = email.parser.BytesHeaderParser()
msg = parser.parsebytes(msgdata)
out = BytesIO()
gen = email.generator.BytesGenerator(out)
gen.flatten(msg)
self.assertEqual(out.getvalue(), msgdata)
def test_get_decoded_payload(self):
eq = self.assertEqual
msg = self._msgobj('msg_10.txt')
# The outer message is a multipart
eq(msg.get_payload(decode=True), None)
# Subpart 1 is 7bit encoded
eq(msg.get_payload(0).get_payload(decode=True),
b'This is a 7bit encoded message.\n')
# Subpart 2 is quopri
eq(msg.get_payload(1).get_payload(decode=True),
b'\xa1This is a Quoted Printable encoded message!\n')
# Subpart 3 is base64
eq(msg.get_payload(2).get_payload(decode=True),
b'This is a Base64 encoded message.')
# Subpart 4 is base64 with a trailing newline, which
# used to be stripped (issue 7143).
eq(msg.get_payload(3).get_payload(decode=True),
b'This is a Base64 encoded message.\n')
# Subpart 5 has no Content-Transfer-Encoding: header.
eq(msg.get_payload(4).get_payload(decode=True),
b'This has no Content-Transfer-Encoding: header.\n')
def test_get_decoded_uu_payload(self):
eq = self.assertEqual
msg = Message()
msg.set_payload('begin 666 -\n+:&5L;&\\@=V]R;&0 \n \nend\n')
for cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
msg['content-transfer-encoding'] = cte
eq(msg.get_payload(decode=True), b'hello world')
# Now try some bogus data
msg.set_payload('foo')
eq(msg.get_payload(decode=True), b'foo')
def test_get_payload_n_raises_on_non_multipart(self):
msg = Message()
self.assertRaises(TypeError, msg.get_payload, 1)
def test_decoded_generator(self):
eq = self.assertEqual
msg = self._msgobj('msg_07.txt')
with openfile('msg_17.txt') as fp:
text = fp.read()
s = StringIO()
g = DecodedGenerator(s)
g.flatten(msg)
eq(s.getvalue(), text)
def test__contains__(self):
msg = Message()
msg['From'] = 'Me'
msg['to'] = 'You'
# Check for case insensitivity
self.assertIn('from', msg)
self.assertIn('From', msg)
self.assertIn('FROM', msg)
self.assertIn('to', msg)
self.assertIn('To', msg)
self.assertIn('TO', msg)
def test_as_string(self):
msg = self._msgobj('msg_01.txt')
with openfile('msg_01.txt') as fp:
text = fp.read()
self.assertEqual(text, str(msg))
fullrepr = msg.as_string(unixfrom=True)
lines = fullrepr.split('\n')
self.assertTrue(lines[0].startswith('From '))
self.assertEqual(text, NL.join(lines[1:]))
def test_as_string_policy(self):
msg = self._msgobj('msg_01.txt')
newpolicy = msg.policy.clone(linesep='\r\n')
fullrepr = msg.as_string(policy=newpolicy)
s = StringIO()
g = Generator(s, policy=newpolicy)
g.flatten(msg)
self.assertEqual(fullrepr, s.getvalue())
def test_as_bytes(self):
msg = self._msgobj('msg_01.txt')
with openfile('msg_01.txt') as fp:
data = fp.read().encode('ascii')
self.assertEqual(data, bytes(msg))
fullrepr = msg.as_bytes(unixfrom=True)
lines = fullrepr.split(b'\n')
self.assertTrue(lines[0].startswith(b'From '))
self.assertEqual(data, b'\n'.join(lines[1:]))
def test_as_bytes_policy(self):
msg = self._msgobj('msg_01.txt')
newpolicy = msg.policy.clone(linesep='\r\n')
fullrepr = msg.as_bytes(policy=newpolicy)
s = BytesIO()
g = BytesGenerator(s,policy=newpolicy)
g.flatten(msg)
self.assertEqual(fullrepr, s.getvalue())
# test_headerregistry.TestContentTypeHeader.bad_params
def test_bad_param(self):
msg = email.message_from_string("Content-Type: blarg; baz; boo\n")
self.assertEqual(msg.get_param('baz'), '')
def test_missing_filename(self):
msg = email.message_from_string("From: foo\n")
self.assertEqual(msg.get_filename(), None)
def test_bogus_filename(self):
msg = email.message_from_string(
"Content-Disposition: blarg; filename\n")
self.assertEqual(msg.get_filename(), '')
def test_missing_boundary(self):
msg = email.message_from_string("From: foo\n")
self.assertEqual(msg.get_boundary(), None)
def test_get_params(self):
eq = self.assertEqual
msg = email.message_from_string(
'X-Header: foo=one; bar=two; baz=three\n')
eq(msg.get_params(header='x-header'),
[('foo', 'one'), ('bar', 'two'), ('baz', 'three')])
msg = email.message_from_string(
'X-Header: foo; bar=one; baz=two\n')
eq(msg.get_params(header='x-header'),
[('foo', ''), ('bar', 'one'), ('baz', 'two')])
eq(msg.get_params(), None)
msg = email.message_from_string(
'X-Header: foo; bar="one"; baz=two\n')
eq(msg.get_params(header='x-header'),
[('foo', ''), ('bar', 'one'), ('baz', 'two')])
# test_headerregistry.TestContentTypeHeader.spaces_around_param_equals
def test_get_param_liberal(self):
msg = Message()
msg['Content-Type'] = 'Content-Type: Multipart/mixed; boundary = "CPIMSSMTPC06p5f3tG"'
self.assertEqual(msg.get_param('boundary'), 'CPIMSSMTPC06p5f3tG')
def test_get_param(self):
eq = self.assertEqual
msg = email.message_from_string(
"X-Header: foo=one; bar=two; baz=three\n")
eq(msg.get_param('bar', header='x-header'), 'two')
eq(msg.get_param('quuz', header='x-header'), None)
eq(msg.get_param('quuz'), None)
msg = email.message_from_string(
'X-Header: foo; bar="one"; baz=two\n')
eq(msg.get_param('foo', header='x-header'), '')
eq(msg.get_param('bar', header='x-header'), 'one')
eq(msg.get_param('baz', header='x-header'), 'two')
# XXX: We are not RFC-2045 compliant! We cannot parse:
# msg["Content-Type"] = 'text/plain; weird="hey; dolly? [you] @ <\\"home\\">?"'
# msg.get_param("weird")
# yet.
# test_headerregistry.TestContentTypeHeader.spaces_around_semis
def test_get_param_funky_continuation_lines(self):
msg = self._msgobj('msg_22.txt')
self.assertEqual(msg.get_payload(1).get_param('name'), 'wibble.JPG')
# test_headerregistry.TestContentTypeHeader.semis_inside_quotes
def test_get_param_with_semis_in_quotes(self):
msg = email.message_from_string(
'Content-Type: image/pjpeg; name="Jim&&Jill"\n')
self.assertEqual(msg.get_param('name'), 'Jim&&Jill')
self.assertEqual(msg.get_param('name', unquote=False),
'"Jim&&Jill"')
# test_headerregistry.TestContentTypeHeader.quotes_inside_rfc2231_value
def test_get_param_with_quotes(self):
msg = email.message_from_string(
'Content-Type: foo; bar*0="baz\\"foobar"; bar*1="\\"baz"')
self.assertEqual(msg.get_param('bar'), 'baz"foobar"baz')
msg = email.message_from_string(
"Content-Type: foo; bar*0=\"baz\\\"foobar\"; bar*1=\"\\\"baz\"")
self.assertEqual(msg.get_param('bar'), 'baz"foobar"baz')
def test_field_containment(self):
msg = email.message_from_string('Header: exists')
self.assertIn('header', msg)
self.assertIn('Header', msg)
self.assertIn('HEADER', msg)
self.assertNotIn('headerx', msg)
def test_set_param(self):
eq = self.assertEqual
msg = Message()
msg.set_param('charset', 'iso-2022-jp')
eq(msg.get_param('charset'), 'iso-2022-jp')
msg.set_param('importance', 'high value')
eq(msg.get_param('importance'), 'high value')
eq(msg.get_param('importance', unquote=False), '"high value"')
eq(msg.get_params(), [('text/plain', ''),
('charset', 'iso-2022-jp'),
('importance', 'high value')])
eq(msg.get_params(unquote=False), [('text/plain', ''),
('charset', '"iso-2022-jp"'),
('importance', '"high value"')])
msg.set_param('charset', 'iso-9999-xx', header='X-Jimmy')
eq(msg.get_param('charset', header='X-Jimmy'), 'iso-9999-xx')
def test_del_param(self):
eq = self.assertEqual
msg = self._msgobj('msg_05.txt')
eq(msg.get_params(),
[('multipart/report', ''), ('report-type', 'delivery-status'),
('boundary', 'D1690A7AC1.996856090/mail.example.com')])
old_val = msg.get_param("report-type")
msg.del_param("report-type")
eq(msg.get_params(),
[('multipart/report', ''),
('boundary', 'D1690A7AC1.996856090/mail.example.com')])
msg.set_param("report-type", old_val)
eq(msg.get_params(),
[('multipart/report', ''),
('boundary', 'D1690A7AC1.996856090/mail.example.com'),
('report-type', old_val)])
def test_del_param_on_other_header(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment', filename='bud.gif')
msg.del_param('filename', 'content-disposition')
self.assertEqual(msg['content-disposition'], 'attachment')
def test_del_param_on_nonexistent_header(self):
msg = Message()
# Deleting param on empty msg should not raise exception.
msg.del_param('filename', 'content-disposition')
def test_del_nonexistent_param(self):
msg = Message()
msg.add_header('Content-Type', 'text/plain', charset='utf-8')
existing_header = msg['Content-Type']
msg.del_param('foobar', header='Content-Type')
self.assertEqual(msg['Content-Type'], existing_header)
def test_set_type(self):
eq = self.assertEqual
msg = Message()
self.assertRaises(ValueError, msg.set_type, 'text')
msg.set_type('text/plain')
eq(msg['content-type'], 'text/plain')
msg.set_param('charset', 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
msg.set_type('text/html')
eq(msg['content-type'], 'text/html; charset="us-ascii"')
def test_set_type_on_other_header(self):
msg = Message()
msg['X-Content-Type'] = 'text/plain'
msg.set_type('application/octet-stream', 'X-Content-Type')
self.assertEqual(msg['x-content-type'], 'application/octet-stream')
def test_get_content_type_missing(self):
msg = Message()
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_type_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_type(), 'message/rfc822')
def test_get_content_type_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_type(),
'message/rfc822')
def test_get_content_type_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_type(),
'message/rfc822')
def test_get_content_type_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_type_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_type(), 'text/plain')
def test_get_content_maintype_missing(self):
msg = Message()
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_maintype_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_maintype(), 'message')
def test_get_content_maintype_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
def test_get_content_maintype_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_maintype(), 'message')
def test_get_content_maintype_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_maintype_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_subtype_missing(self):
msg = Message()
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_subtype_missing_with_default_type(self):
msg = Message()
msg.set_default_type('message/rfc822')
self.assertEqual(msg.get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_implicit(self):
msg = self._msgobj('msg_30.txt')
self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_explicit(self):
msg = self._msgobj('msg_28.txt')
self.assertEqual(msg.get_payload(0).get_content_subtype(), 'rfc822')
def test_get_content_subtype_from_message_text_plain_implicit(self):
msg = self._msgobj('msg_03.txt')
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_subtype_from_message_text_plain_explicit(self):
msg = self._msgobj('msg_01.txt')
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_get_content_maintype_error(self):
msg = Message()
msg['Content-Type'] = 'no-slash-in-this-string'
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_content_subtype_error(self):
msg = Message()
msg['Content-Type'] = 'no-slash-in-this-string'
self.assertEqual(msg.get_content_subtype(), 'plain')
def test_replace_header(self):
eq = self.assertEqual
msg = Message()
msg.add_header('First', 'One')
msg.add_header('Second', 'Two')
msg.add_header('Third', 'Three')
eq(msg.keys(), ['First', 'Second', 'Third'])
eq(msg.values(), ['One', 'Two', 'Three'])
msg.replace_header('Second', 'Twenty')
eq(msg.keys(), ['First', 'Second', 'Third'])
eq(msg.values(), ['One', 'Twenty', 'Three'])
msg.add_header('First', 'Eleven')
msg.replace_header('First', 'One Hundred')
eq(msg.keys(), ['First', 'Second', 'Third', 'First'])
eq(msg.values(), ['One Hundred', 'Twenty', 'Three', 'Eleven'])
self.assertRaises(KeyError, msg.replace_header, 'Fourth', 'Missing')
def test_get_content_disposition(self):
msg = Message()
self.assertIsNone(msg.get_content_disposition())
msg.add_header('Content-Disposition', 'attachment',
filename='random.avi')
self.assertEqual(msg.get_content_disposition(), 'attachment')
msg.replace_header('Content-Disposition', 'inline')
self.assertEqual(msg.get_content_disposition(), 'inline')
msg.replace_header('Content-Disposition', 'InlinE')
self.assertEqual(msg.get_content_disposition(), 'inline')
# test_defect_handling:test_invalid_chars_in_base64_payload
def test_broken_base64_payload(self):
x = 'AwDp0P7//y6LwKEAcPa/6Q=9'
msg = Message()
msg['content-type'] = 'audio/x-midi'
msg['content-transfer-encoding'] = 'base64'
msg.set_payload(x)
self.assertEqual(msg.get_payload(decode=True),
(b'\x03\x00\xe9\xd0\xfe\xff\xff.\x8b\xc0'
b'\xa1\x00p\xf6\xbf\xe9\x0f'))
self.assertIsInstance(msg.defects[0],
errors.InvalidBase64CharactersDefect)
def test_broken_unicode_payload(self):
# This test improves coverage but is not a compliance test.
# The behavior in this situation is currently undefined by the API.
x = 'this is a br\xf6ken thing to do'
msg = Message()
msg['content-type'] = 'text/plain'
msg['content-transfer-encoding'] = '8bit'
msg.set_payload(x)
self.assertEqual(msg.get_payload(decode=True),
bytes(x, 'raw-unicode-escape'))
def test_questionable_bytes_payload(self):
# This test improves coverage but is not a compliance test,
# since it involves poking inside the black box.
x = 'this is a quéstionable thing to do'.encode('utf-8')
msg = Message()
msg['content-type'] = 'text/plain; charset="utf-8"'
msg['content-transfer-encoding'] = '8bit'
msg._payload = x
self.assertEqual(msg.get_payload(decode=True), x)
# Issue 1078919
def test_ascii_add_header(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename='bud.gif')
self.assertEqual('attachment; filename="bud.gif"',
msg['Content-Disposition'])
def test_noascii_add_header(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename="Fußballer.ppt")
self.assertEqual(
'attachment; filename*=utf-8\'\'Fu%C3%9Fballer.ppt',
msg['Content-Disposition'])
def test_nonascii_add_header_via_triple(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename=('iso-8859-1', '', 'Fußballer.ppt'))
self.assertEqual(
'attachment; filename*=iso-8859-1\'\'Fu%DFballer.ppt',
msg['Content-Disposition'])
def test_ascii_add_header_with_tspecial(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename="windows [filename].ppt")
self.assertEqual(
'attachment; filename="windows [filename].ppt"',
msg['Content-Disposition'])
def test_nonascii_add_header_with_tspecial(self):
msg = Message()
msg.add_header('Content-Disposition', 'attachment',
filename="Fußballer [filename].ppt")
self.assertEqual(
"attachment; filename*=utf-8''Fu%C3%9Fballer%20%5Bfilename%5D.ppt",
msg['Content-Disposition'])
def test_binary_quopri_payload(self):
for charset in ('latin-1', 'ascii'):
msg = Message()
msg['content-type'] = 'text/plain; charset=%s' % charset
msg['content-transfer-encoding'] = 'quoted-printable'
msg.set_payload(b'foo=e6=96=87bar')
self.assertEqual(
msg.get_payload(decode=True),
b'foo\xe6\x96\x87bar',
'get_payload returns wrong result with charset %s.' % charset)
def test_binary_base64_payload(self):
for charset in ('latin-1', 'ascii'):
msg = Message()
msg['content-type'] = 'text/plain; charset=%s' % charset
msg['content-transfer-encoding'] = 'base64'
msg.set_payload(b'Zm9v5paHYmFy')
self.assertEqual(
msg.get_payload(decode=True),
b'foo\xe6\x96\x87bar',
'get_payload returns wrong result with charset %s.' % charset)
def test_binary_uuencode_payload(self):
for charset in ('latin-1', 'ascii'):
for encoding in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
msg = Message()
msg['content-type'] = 'text/plain; charset=%s' % charset
msg['content-transfer-encoding'] = encoding
msg.set_payload(b"begin 666 -\n)9F]OYI:'8F%R\n \nend\n")
self.assertEqual(
msg.get_payload(decode=True),
b'foo\xe6\x96\x87bar',
str(('get_payload returns wrong result ',
'with charset {0} and encoding {1}.')).\
format(charset, encoding))
def test_add_header_with_name_only_param(self):
msg = Message()
msg.add_header('Content-Disposition', 'inline', foo_bar=None)
self.assertEqual("inline; foo-bar", msg['Content-Disposition'])
def test_add_header_with_no_value(self):
msg = Message()
msg.add_header('X-Status', None)
self.assertEqual('', msg['X-Status'])
# Issue 5871: reject an attempt to embed a header inside a header value
# (header injection attack).
def test_embeded_header_via_Header_rejected(self):
msg = Message()
msg['Dummy'] = Header('dummy\nX-Injected-Header: test')
self.assertRaises(errors.HeaderParseError, msg.as_string)
def test_embeded_header_via_string_rejected(self):
msg = Message()
msg['Dummy'] = 'dummy\nX-Injected-Header: test'
self.assertRaises(errors.HeaderParseError, msg.as_string)
def test_unicode_header_defaults_to_utf8_encoding(self):
# Issue 14291
m = MIMEText('abc\n')
m['Subject'] = 'É test'
self.assertEqual(str(m),textwrap.dedent("""\
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Subject: =?utf-8?q?=C3=89_test?=
abc
"""))
def test_unicode_body_defaults_to_utf8_encoding(self):
# Issue 14291
m = MIMEText('É testabc\n')
self.assertEqual(str(m),textwrap.dedent("""\
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: base64
w4kgdGVzdGFiYwo=
"""))
# Test the email.encoders module
class TestEncoders(unittest.TestCase):
def test_EncodersEncode_base64(self):
with openfile('PyBanner048.gif', 'rb') as fp:
bindata = fp.read()
mimed = email.mime.image.MIMEImage(bindata)
base64ed = mimed.get_payload()
# the transfer-encoded body lines should all be <=76 characters
lines = base64ed.split('\n')
self.assertLessEqual(max([ len(x) for x in lines ]), 76)
def test_encode_empty_payload(self):
eq = self.assertEqual
msg = Message()
msg.set_charset('us-ascii')
eq(msg['content-transfer-encoding'], '7bit')
def test_default_cte(self):
eq = self.assertEqual
# 7bit data and the default us-ascii _charset
msg = MIMEText('hello world')
eq(msg['content-transfer-encoding'], '7bit')
# Similar, but with 8bit data
msg = MIMEText('hello \xf8 world')
eq(msg['content-transfer-encoding'], 'base64')
# And now with a different charset
msg = MIMEText('hello \xf8 world', _charset='iso-8859-1')
eq(msg['content-transfer-encoding'], 'quoted-printable')
def test_encode7or8bit(self):
# Make sure a charset whose input character set is 8bit but
# whose output character set is 7bit gets a transfer-encoding
# of 7bit.
eq = self.assertEqual
msg = MIMEText('文\n', _charset='euc-jp')
eq(msg['content-transfer-encoding'], '7bit')
eq(msg.as_string(), textwrap.dedent("""\
MIME-Version: 1.0
Content-Type: text/plain; charset="iso-2022-jp"
Content-Transfer-Encoding: 7bit
\x1b$BJ8\x1b(B
"""))
def test_qp_encode_latin1(self):
msg = MIMEText('\xe1\xf6\n', 'text', 'ISO-8859-1')
self.assertEqual(str(msg), textwrap.dedent("""\
MIME-Version: 1.0
Content-Type: text/text; charset="iso-8859-1"
Content-Transfer-Encoding: quoted-printable
=E1=F6
"""))
def test_qp_encode_non_latin1(self):
# Issue 16948
msg = MIMEText('\u017c\n', 'text', 'ISO-8859-2')
self.assertEqual(str(msg), textwrap.dedent("""\
MIME-Version: 1.0
Content-Type: text/text; charset="iso-8859-2"
Content-Transfer-Encoding: quoted-printable
=BF
"""))
# Test long header wrapping
class TestLongHeaders(TestEmailBase):
maxDiff = None
def test_split_long_continuation(self):
eq = self.ndiffAssertEqual
msg = email.message_from_string("""\
Subject: bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text
test
""")
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
Subject: bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text
test
""")
def test_another_long_almost_unsplittable_header(self):
eq = self.ndiffAssertEqual
hstr = """\
bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text"""
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
bug demonstration
\t12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
\tmore text""")
h = Header(hstr.replace('\t', ' '))
eq(h.encode(), """\
bug demonstration
12345678911234567892123456789312345678941234567895123456789612345678971234567898112345678911234567892123456789112345678911234567892123456789
more text""")
def test_long_nonstring(self):
eq = self.ndiffAssertEqual
g = Charset("iso-8859-1")
cz = Charset("iso-8859-2")
utf8 = Charset("utf-8")
g_head = (b'Die Mieter treten hier ein werden mit einem Foerderband '
b'komfortabel den Korridor entlang, an s\xfcdl\xfcndischen '
b'Wandgem\xe4lden vorbei, gegen die rotierenden Klingen '
b'bef\xf6rdert. ')
cz_head = (b'Finan\xe8ni metropole se hroutily pod tlakem jejich '
b'd\xf9vtipu.. ')
utf8_head = ('\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
'\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
'\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
'\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
'\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das '
'Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder '
'die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066'
'\u3044\u307e\u3059\u3002')
h = Header(g_head, g, header_name='Subject')
h.append(cz_head, cz)
h.append(utf8_head, utf8)
msg = Message()
msg['Subject'] = h
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
Subject: =?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerderb?=
=?iso-8859-1?q?and_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndischen?=
=?iso-8859-1?q?_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Klingen_bef?=
=?iso-8859-1?q?=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_metropole_se_hrouti?=
=?iso-8859-2?q?ly_pod_tlakem_jejich_d=F9vtipu=2E=2E_?= =?utf-8?b?5q2j56K6?=
=?utf-8?b?44Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE44G+44Gb44KT44CC5LiA?=
=?utf-8?b?6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB44GC44Go44Gv44Gn44Gf44KJ?=
=?utf-8?b?44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CMV2VubiBpc3QgZGFzIE51bnN0dWNr?=
=?utf-8?b?IGdpdCB1bmQgU2xvdGVybWV5ZXI/IEphISBCZWloZXJodW5kIGRhcyBPZGVyIGRp?=
=?utf-8?b?ZSBGbGlwcGVyd2FsZHQgZ2Vyc3B1dC7jgI3jgajoqIDjgaPjgabjgYTjgb7jgZk=?=
=?utf-8?b?44CC?=
""")
eq(h.encode(maxlinelen=76), """\
=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerde?=
=?iso-8859-1?q?rband_komfortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndis?=
=?iso-8859-1?q?chen_Wandgem=E4lden_vorbei=2C_gegen_die_rotierenden_Klinge?=
=?iso-8859-1?q?n_bef=F6rdert=2E_?= =?iso-8859-2?q?Finan=E8ni_metropole_se?=
=?iso-8859-2?q?_hroutily_pod_tlakem_jejich_d=F9vtipu=2E=2E_?=
=?utf-8?b?5q2j56K644Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE44G+44Gb?=
=?utf-8?b?44KT44CC5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB44GC44Go?=
=?utf-8?b?44Gv44Gn44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CMV2VubiBp?=
=?utf-8?b?c3QgZGFzIE51bnN0dWNrIGdpdCB1bmQgU2xvdGVybWV5ZXI/IEphISBCZWlo?=
=?utf-8?b?ZXJodW5kIGRhcyBPZGVyIGRpZSBGbGlwcGVyd2FsZHQgZ2Vyc3B1dC7jgI0=?=
=?utf-8?b?44Go6KiA44Gj44Gm44GE44G+44GZ44CC?=""")
def test_long_header_encode(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_long_header_encode_with_tab_continuation_is_just_a_hint(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit',
continuation_ws='\t')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_long_header_encode_with_tab_continuation(self):
eq = self.ndiffAssertEqual
h = Header('wasnipoop; giraffes="very-long-necked-animals";\t'
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"',
header_name='X-Foobar-Spoink-Defrobnit',
continuation_ws='\t')
eq(h.encode(), '''\
wasnipoop; giraffes="very-long-necked-animals";
\tspooge="yummy"; hippos="gargantuan"; marshmallows="gooey"''')
def test_header_encode_with_different_output_charset(self):
h = Header('文', 'euc-jp')
self.assertEqual(h.encode(), "=?iso-2022-jp?b?GyRCSjgbKEI=?=")
def test_long_header_encode_with_different_output_charset(self):
h = Header(b'test-ja \xa4\xd8\xc5\xea\xb9\xc6\xa4\xb5\xa4\xec\xa4'
b'\xbf\xa5\xe1\xa1\xbc\xa5\xeb\xa4\xcf\xbb\xca\xb2\xf1\xbc\xd4'
b'\xa4\xce\xbe\xb5\xc7\xa7\xa4\xf2\xc2\xd4\xa4\xc3\xa4\xc6\xa4'
b'\xa4\xa4\xde\xa4\xb9'.decode('euc-jp'), 'euc-jp')
res = """\
=?iso-2022-jp?b?dGVzdC1qYSAbJEIkWEVqOUYkNSRsJD8lYSE8JWskTztKMnE8VCROPjUbKEI=?=
=?iso-2022-jp?b?GyRCRyckckJUJEMkRiQkJF4kORsoQg==?="""
self.assertEqual(h.encode(), res)
def test_header_splitter(self):
eq = self.ndiffAssertEqual
msg = MIMEText('')
# It'd be great if we could use add_header() here, but that doesn't
# guarantee an order of the parameters.
msg['X-Foobar-Spoink-Defrobnit'] = (
'wasnipoop; giraffes="very-long-necked-animals"; '
'spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), '''\
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Foobar-Spoink-Defrobnit: wasnipoop; giraffes="very-long-necked-animals";
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"
''')
def test_no_semis_header_splitter(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'test@dom.ain'
msg['References'] = SPACE.join('<%d@dom.ain>' % i for i in range(10))
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
From: test@dom.ain
References: <0@dom.ain> <1@dom.ain> <2@dom.ain> <3@dom.ain> <4@dom.ain>
<5@dom.ain> <6@dom.ain> <7@dom.ain> <8@dom.ain> <9@dom.ain>
Test""")
def test_last_split_chunk_does_not_fit(self):
eq = self.ndiffAssertEqual
h = Header('Subject: the first part of this is short, but_the_second'
'_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line'
'_all_by_itself')
eq(h.encode(), """\
Subject: the first part of this is short,
but_the_second_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself""")
def test_splittable_leading_char_followed_by_overlong_unsplitable(self):
eq = self.ndiffAssertEqual
h = Header(', but_the_second'
'_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line'
'_all_by_itself')
eq(h.encode(), """\
,
but_the_second_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself""")
def test_multiple_splittable_leading_char_followed_by_overlong_unsplitable(self):
eq = self.ndiffAssertEqual
h = Header(', , but_the_second'
'_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line'
'_all_by_itself')
eq(h.encode(), """\
, ,
but_the_second_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself""")
def test_trailing_splitable_on_overlong_unsplitable(self):
eq = self.ndiffAssertEqual
h = Header('this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself;')
eq(h.encode(), "this_part_does_not_fit_within_maxlinelen_and_thus_should_"
"be_on_a_line_all_by_itself;")
def test_trailing_splitable_on_overlong_unsplitable_with_leading_splitable(self):
eq = self.ndiffAssertEqual
h = Header('; '
'this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself; ')
eq(h.encode(), """\
;
this_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself; """)
def test_long_header_with_multiple_sequential_split_chars(self):
eq = self.ndiffAssertEqual
h = Header('This is a long line that has two whitespaces in a row. '
'This used to cause truncation of the header when folded')
eq(h.encode(), """\
This is a long line that has two whitespaces in a row. This used to cause
truncation of the header when folded""")
def test_splitter_split_on_punctuation_only_if_fws_with_header(self):
eq = self.ndiffAssertEqual
h = Header('thisverylongheaderhas;semicolons;and,commas,but'
'they;arenotlegal;fold,points')
eq(h.encode(), "thisverylongheaderhas;semicolons;and,commas,butthey;"
"arenotlegal;fold,points")
def test_leading_splittable_in_the_middle_just_before_overlong_last_part(self):
eq = self.ndiffAssertEqual
h = Header('this is a test where we need to have more than one line '
'before; our final line that is just too big to fit;; '
'this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself;')
eq(h.encode(), """\
this is a test where we need to have more than one line before;
our final line that is just too big to fit;;
this_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself;""")
def test_overlong_last_part_followed_by_split_point(self):
eq = self.ndiffAssertEqual
h = Header('this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself ')
eq(h.encode(), "this_part_does_not_fit_within_maxlinelen_and_thus_"
"should_be_on_a_line_all_by_itself ")
def test_multiline_with_overlong_parts_separated_by_two_split_points(self):
eq = self.ndiffAssertEqual
h = Header('this_is_a__test_where_we_need_to_have_more_than_one_line_'
'before_our_final_line_; ; '
'this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself; ')
eq(h.encode(), """\
this_is_a__test_where_we_need_to_have_more_than_one_line_before_our_final_line_;
;
this_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself; """)
def test_multiline_with_overlong_last_part_followed_by_split_point(self):
eq = self.ndiffAssertEqual
h = Header('this is a test where we need to have more than one line '
'before our final line; ; '
'this_part_does_not_fit_within_maxlinelen_and_thus_should_'
'be_on_a_line_all_by_itself; ')
eq(h.encode(), """\
this is a test where we need to have more than one line before our final line;
;
this_part_does_not_fit_within_maxlinelen_and_thus_should_be_on_a_line_all_by_itself; """)
def test_long_header_with_whitespace_runs(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'test@dom.ain'
msg['References'] = SPACE.join(['<foo@dom.ain> '] * 10)
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
From: test@dom.ain
References: <foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain>
<foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain>
<foo@dom.ain> <foo@dom.ain>\x20\x20
Test""")
def test_long_run_with_semi_header_splitter(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'test@dom.ain'
msg['References'] = SPACE.join(['<foo@dom.ain>'] * 10) + '; abc'
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), """\
From: test@dom.ain
References: <foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain>
<foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain> <foo@dom.ain>
<foo@dom.ain>; abc
Test""")
def test_splitter_split_on_punctuation_only_if_fws(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'test@dom.ain'
msg['References'] = ('thisverylongheaderhas;semicolons;and,commas,but'
'they;arenotlegal;fold,points')
msg.set_payload('Test')
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
# XXX the space after the header should not be there.
eq(sfp.getvalue(), """\
From: test@dom.ain
References:\x20
thisverylongheaderhas;semicolons;and,commas,butthey;arenotlegal;fold,points
Test""")
def test_no_split_long_header(self):
eq = self.ndiffAssertEqual
hstr = 'References: ' + 'x' * 80
h = Header(hstr)
# These come on two lines because Headers are really field value
# classes and don't really know about their field names.
eq(h.encode(), """\
References:
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx""")
h = Header('x' * 80)
eq(h.encode(), 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
def test_splitting_multiple_long_lines(self):
eq = self.ndiffAssertEqual
hstr = """\
from babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]); by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81; for <mailman-admin@babylon.socal-raves.org>; Sat, 2 Feb 2002 17:00:06 -0800 (PST)
"""
h = Header(hstr, continuation_ws='\t')
eq(h.encode(), """\
from babylon.socal-raves.org (localhost [127.0.0.1]);
by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
for <mailman-admin@babylon.socal-raves.org>;
Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
for <mailman-admin@babylon.socal-raves.org>;
Sat, 2 Feb 2002 17:00:06 -0800 (PST)
\tfrom babylon.socal-raves.org (localhost [127.0.0.1]);
by babylon.socal-raves.org (Postfix) with ESMTP id B570E51B81;
for <mailman-admin@babylon.socal-raves.org>;
Sat, 2 Feb 2002 17:00:06 -0800 (PST)""")
def test_splitting_first_line_only_is_long(self):
eq = self.ndiffAssertEqual
hstr = """\
from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93] helo=cthulhu.gerg.ca)
\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
\tid 17k4h5-00034i-00
\tfor test@mems-exchange.org; Wed, 28 Aug 2002 11:25:20 -0400"""
h = Header(hstr, maxlinelen=78, header_name='Received',
continuation_ws='\t')
eq(h.encode(), """\
from modemcable093.139-201-24.que.mc.videotron.ca ([24.201.139.93]
helo=cthulhu.gerg.ca)
\tby kronos.mems-exchange.org with esmtp (Exim 4.05)
\tid 17k4h5-00034i-00
\tfor test@mems-exchange.org; Wed, 28 Aug 2002 11:25:20 -0400""")
def test_long_8bit_header(self):
eq = self.ndiffAssertEqual
msg = Message()
h = Header('Britische Regierung gibt', 'iso-8859-1',
header_name='Subject')
h.append('gr\xfcnes Licht f\xfcr Offshore-Windkraftprojekte')
eq(h.encode(maxlinelen=76), """\
=?iso-8859-1?q?Britische_Regierung_gibt_gr=FCnes_Licht_f=FCr_Offs?=
=?iso-8859-1?q?hore-Windkraftprojekte?=""")
msg['Subject'] = h
eq(msg.as_string(maxheaderlen=76), """\
Subject: =?iso-8859-1?q?Britische_Regierung_gibt_gr=FCnes_Licht_f=FCr_Offs?=
=?iso-8859-1?q?hore-Windkraftprojekte?=
""")
eq(msg.as_string(maxheaderlen=0), """\
Subject: =?iso-8859-1?q?Britische_Regierung_gibt_gr=FCnes_Licht_f=FCr_Offshore-Windkraftprojekte?=
""")
def test_long_8bit_header_no_charset(self):
eq = self.ndiffAssertEqual
msg = Message()
header_string = ('Britische Regierung gibt gr\xfcnes Licht '
'f\xfcr Offshore-Windkraftprojekte '
'<a-very-long-address@example.com>')
msg['Reply-To'] = header_string
eq(msg.as_string(maxheaderlen=78), """\
Reply-To: =?utf-8?q?Britische_Regierung_gibt_gr=C3=BCnes_Licht_f=C3=BCr_Offs?=
=?utf-8?q?hore-Windkraftprojekte_=3Ca-very-long-address=40example=2Ecom=3E?=
""")
msg = Message()
msg['Reply-To'] = Header(header_string,
header_name='Reply-To')
eq(msg.as_string(maxheaderlen=78), """\
Reply-To: =?utf-8?q?Britische_Regierung_gibt_gr=C3=BCnes_Licht_f=C3=BCr_Offs?=
=?utf-8?q?hore-Windkraftprojekte_=3Ca-very-long-address=40example=2Ecom=3E?=
""")
def test_long_to_header(self):
eq = self.ndiffAssertEqual
to = ('"Someone Test #A" <someone@eecs.umich.edu>,'
'<someone@eecs.umich.edu>, '
'"Someone Test #B" <someone@umich.edu>, '
'"Someone Test #C" <someone@eecs.umich.edu>, '
'"Someone Test #D" <someone@eecs.umich.edu>')
msg = Message()
msg['To'] = to
eq(msg.as_string(maxheaderlen=78), '''\
To: "Someone Test #A" <someone@eecs.umich.edu>,<someone@eecs.umich.edu>,
"Someone Test #B" <someone@umich.edu>,
"Someone Test #C" <someone@eecs.umich.edu>,
"Someone Test #D" <someone@eecs.umich.edu>
''')
def test_long_line_after_append(self):
eq = self.ndiffAssertEqual
s = 'This is an example of string which has almost the limit of header length.'
h = Header(s)
h.append('Add another line.')
eq(h.encode(maxlinelen=76), """\
This is an example of string which has almost the limit of header length.
Add another line.""")
def test_shorter_line_with_append(self):
eq = self.ndiffAssertEqual
s = 'This is a shorter line.'
h = Header(s)
h.append('Add another sentence. (Surprise?)')
eq(h.encode(),
'This is a shorter line. Add another sentence. (Surprise?)')
def test_long_field_name(self):
eq = self.ndiffAssertEqual
fn = 'X-Very-Very-Very-Long-Header-Name'
gs = ('Die Mieter treten hier ein werden mit einem Foerderband '
'komfortabel den Korridor entlang, an s\xfcdl\xfcndischen '
'Wandgem\xe4lden vorbei, gegen die rotierenden Klingen '
'bef\xf6rdert. ')
h = Header(gs, 'iso-8859-1', header_name=fn)
# BAW: this seems broken because the first line is too long
eq(h.encode(maxlinelen=76), """\
=?iso-8859-1?q?Die_Mieter_treten_hier_e?=
=?iso-8859-1?q?in_werden_mit_einem_Foerderband_komfortabel_den_Korridor_e?=
=?iso-8859-1?q?ntlang=2C_an_s=FCdl=FCndischen_Wandgem=E4lden_vorbei=2C_ge?=
=?iso-8859-1?q?gen_die_rotierenden_Klingen_bef=F6rdert=2E_?=""")
def test_long_received_header(self):
h = ('from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) '
'by hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP; '
'Wed, 05 Mar 2003 18:10:18 -0700')
msg = Message()
msg['Received-1'] = Header(h, continuation_ws='\t')
msg['Received-2'] = h
# This should be splitting on spaces not semicolons.
self.ndiffAssertEqual(msg.as_string(maxheaderlen=78), """\
Received-1: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
Wed, 05 Mar 2003 18:10:18 -0700
Received-2: from FOO.TLD (vizworld.acl.foo.tld [123.452.678.9]) by
hrothgar.la.mastaler.com (tmda-ofmipd) with ESMTP;
Wed, 05 Mar 2003 18:10:18 -0700
""")
def test_string_headerinst_eq(self):
h = ('<15975.17901.207240.414604@sgigritzmann1.mathematik.'
'tu-muenchen.de> (David Bremner\'s message of '
'"Thu, 6 Mar 2003 13:58:21 +0100")')
msg = Message()
msg['Received-1'] = Header(h, header_name='Received-1',
continuation_ws='\t')
msg['Received-2'] = h
# XXX The space after the ':' should not be there.
self.ndiffAssertEqual(msg.as_string(maxheaderlen=78), """\
Received-1:\x20
<15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de> (David
Bremner's message of \"Thu, 6 Mar 2003 13:58:21 +0100\")
Received-2:\x20
<15975.17901.207240.414604@sgigritzmann1.mathematik.tu-muenchen.de> (David
Bremner's message of \"Thu, 6 Mar 2003 13:58:21 +0100\")
""")
def test_long_unbreakable_lines_with_continuation(self):
eq = self.ndiffAssertEqual
msg = Message()
t = """\
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp"""
msg['Face-1'] = t
msg['Face-2'] = Header(t, header_name='Face-2')
msg['Face-3'] = ' ' + t
# XXX This splitting is all wrong. It the first value line should be
# snug against the field name or the space after the header not there.
eq(msg.as_string(maxheaderlen=78), """\
Face-1:\x20
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
Face-2:\x20
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
Face-3:\x20
iVBORw0KGgoAAAANSUhEUgAAADAAAAAwBAMAAAClLOS0AAAAGFBMVEUAAAAkHiJeRUIcGBi9
locQDQ4zJykFBAXJfWDjAAACYUlEQVR4nF2TQY/jIAyFc6lydlG5x8Nyp1Y69wj1PN2I5gzp
""")
def test_another_long_multiline_header(self):
eq = self.ndiffAssertEqual
m = ('Received: from siimage.com '
'([172.25.1.3]) by zima.siliconimage.com with '
'Microsoft SMTPSVC(5.0.2195.4905); '
'Wed, 16 Oct 2002 07:41:11 -0700')
msg = email.message_from_string(m)
eq(msg.as_string(maxheaderlen=78), '''\
Received: from siimage.com ([172.25.1.3]) by zima.siliconimage.com with
Microsoft SMTPSVC(5.0.2195.4905); Wed, 16 Oct 2002 07:41:11 -0700
''')
def test_long_lines_with_different_header(self):
eq = self.ndiffAssertEqual
h = ('List-Unsubscribe: '
'<http://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,'
' <mailto:spamassassin-talk-request@lists.sourceforge.net'
'?subject=unsubscribe>')
msg = Message()
msg['List'] = h
msg['List'] = Header(h, header_name='List')
eq(msg.as_string(maxheaderlen=78), """\
List: List-Unsubscribe:
<http://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
<mailto:spamassassin-talk-request@lists.sourceforge.net?subject=unsubscribe>
List: List-Unsubscribe:
<http://lists.sourceforge.net/lists/listinfo/spamassassin-talk>,
<mailto:spamassassin-talk-request@lists.sourceforge.net?subject=unsubscribe>
""")
def test_long_rfc2047_header_with_embedded_fws(self):
h = Header(textwrap.dedent("""\
We're going to pretend this header is in a non-ascii character set
\tto see if line wrapping with encoded words and embedded
folding white space works"""),
charset='utf-8',
header_name='Test')
self.assertEqual(h.encode()+'\n', textwrap.dedent("""\
=?utf-8?q?We=27re_going_to_pretend_this_header_is_in_a_non-ascii_chara?=
=?utf-8?q?cter_set?=
=?utf-8?q?_to_see_if_line_wrapping_with_encoded_words_and_embedded?=
=?utf-8?q?_folding_white_space_works?=""")+'\n')
# Test mangling of "From " lines in the body of a message
class TestFromMangling(unittest.TestCase):
def setUp(self):
self.msg = Message()
self.msg['From'] = 'aaa@bbb.org'
self.msg.set_payload("""\
From the desk of A.A.A.:
Blah blah blah
""")
def test_mangled_from(self):
s = StringIO()
g = Generator(s, mangle_from_=True)
g.flatten(self.msg)
self.assertEqual(s.getvalue(), """\
From: aaa@bbb.org
>From the desk of A.A.A.:
Blah blah blah
""")
def test_dont_mangle_from(self):
s = StringIO()
g = Generator(s, mangle_from_=False)
g.flatten(self.msg)
self.assertEqual(s.getvalue(), """\
From: aaa@bbb.org
From the desk of A.A.A.:
Blah blah blah
""")
def test_mangle_from_in_preamble_and_epilog(self):
s = StringIO()
g = Generator(s, mangle_from_=True)
msg = email.message_from_string(textwrap.dedent("""\
From: foo@bar.com
Mime-Version: 1.0
Content-Type: multipart/mixed; boundary=XXX
From somewhere unknown
--XXX
Content-Type: text/plain
foo
--XXX--
From somewhere unknowable
"""))
g.flatten(msg)
self.assertEqual(len([1 for x in s.getvalue().split('\n')
if x.startswith('>From ')]), 2)
def test_mangled_from_with_bad_bytes(self):
source = textwrap.dedent("""\
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
From: aaa@bbb.org
""").encode('utf-8')
msg = email.message_from_bytes(source + b'From R\xc3\xb6lli\n')
b = BytesIO()
g = BytesGenerator(b, mangle_from_=True)
g.flatten(msg)
self.assertEqual(b.getvalue(), source + b'>From R\xc3\xb6lli\n')
# Test the basic MIMEAudio class
class TestMIMEAudio(unittest.TestCase):
def setUp(self):
with openfile('audiotest.au', 'rb') as fp:
self._audiodata = fp.read()
self._au = MIMEAudio(self._audiodata)
def test_guess_minor_type(self):
self.assertEqual(self._au.get_content_type(), 'audio/basic')
def test_encoding(self):
payload = self._au.get_payload()
self.assertEqual(base64.decodebytes(bytes(payload, 'ascii')),
self._audiodata)
def test_checkSetMinor(self):
au = MIMEAudio(self._audiodata, 'fish')
self.assertEqual(au.get_content_type(), 'audio/fish')
def test_add_header(self):
eq = self.assertEqual
self._au.add_header('Content-Disposition', 'attachment',
filename='audiotest.au')
eq(self._au['content-disposition'],
'attachment; filename="audiotest.au"')
eq(self._au.get_params(header='content-disposition'),
[('attachment', ''), ('filename', 'audiotest.au')])
eq(self._au.get_param('filename', header='content-disposition'),
'audiotest.au')
missing = []
eq(self._au.get_param('attachment', header='content-disposition'), '')
self.assertIs(self._au.get_param('foo', failobj=missing,
header='content-disposition'), missing)
# Try some missing stuff
self.assertIs(self._au.get_param('foobar', missing), missing)
self.assertIs(self._au.get_param('attachment', missing,
header='foobar'), missing)
# Test the basic MIMEImage class
class TestMIMEImage(unittest.TestCase):
def setUp(self):
with openfile('PyBanner048.gif', 'rb') as fp:
self._imgdata = fp.read()
self._im = MIMEImage(self._imgdata)
def test_guess_minor_type(self):
self.assertEqual(self._im.get_content_type(), 'image/gif')
def test_encoding(self):
payload = self._im.get_payload()
self.assertEqual(base64.decodebytes(bytes(payload, 'ascii')),
self._imgdata)
def test_checkSetMinor(self):
im = MIMEImage(self._imgdata, 'fish')
self.assertEqual(im.get_content_type(), 'image/fish')
def test_add_header(self):
eq = self.assertEqual
self._im.add_header('Content-Disposition', 'attachment',
filename='dingusfish.gif')
eq(self._im['content-disposition'],
'attachment; filename="dingusfish.gif"')
eq(self._im.get_params(header='content-disposition'),
[('attachment', ''), ('filename', 'dingusfish.gif')])
eq(self._im.get_param('filename', header='content-disposition'),
'dingusfish.gif')
missing = []
eq(self._im.get_param('attachment', header='content-disposition'), '')
self.assertIs(self._im.get_param('foo', failobj=missing,
header='content-disposition'), missing)
# Try some missing stuff
self.assertIs(self._im.get_param('foobar', missing), missing)
self.assertIs(self._im.get_param('attachment', missing,
header='foobar'), missing)
# Test the basic MIMEApplication class
class TestMIMEApplication(unittest.TestCase):
def test_headers(self):
eq = self.assertEqual
msg = MIMEApplication(b'\xfa\xfb\xfc\xfd\xfe\xff')
eq(msg.get_content_type(), 'application/octet-stream')
eq(msg['content-transfer-encoding'], 'base64')
def test_body(self):
eq = self.assertEqual
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytesdata)
# whitespace in the cte encoded block is RFC-irrelevant.
eq(msg.get_payload().strip(), '+vv8/f7/')
eq(msg.get_payload(decode=True), bytesdata)
def test_binary_body_with_encode_7or8bit(self):
# Issue 17171.
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytesdata, _encoder=encoders.encode_7or8bit)
# Treated as a string, this will be invalid code points.
self.assertEqual(msg.get_payload(), '\uFFFD' * len(bytesdata))
self.assertEqual(msg.get_payload(decode=True), bytesdata)
self.assertEqual(msg['Content-Transfer-Encoding'], '8bit')
s = BytesIO()
g = BytesGenerator(s)
g.flatten(msg)
wireform = s.getvalue()
msg2 = email.message_from_bytes(wireform)
self.assertEqual(msg.get_payload(), '\uFFFD' * len(bytesdata))
self.assertEqual(msg2.get_payload(decode=True), bytesdata)
self.assertEqual(msg2['Content-Transfer-Encoding'], '8bit')
def test_binary_body_with_encode_noop(self):
# Issue 16564: This does not produce an RFC valid message, since to be
# valid it should have a CTE of binary. But the below works in
# Python2, and is documented as working this way.
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytesdata, _encoder=encoders.encode_noop)
# Treated as a string, this will be invalid code points.
self.assertEqual(msg.get_payload(), '\uFFFD' * len(bytesdata))
self.assertEqual(msg.get_payload(decode=True), bytesdata)
s = BytesIO()
g = BytesGenerator(s)
g.flatten(msg)
wireform = s.getvalue()
msg2 = email.message_from_bytes(wireform)
self.assertEqual(msg.get_payload(), '\uFFFD' * len(bytesdata))
self.assertEqual(msg2.get_payload(decode=True), bytesdata)
def test_binary_body_with_encode_quopri(self):
# Issue 14360.
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff '
msg = MIMEApplication(bytesdata, _encoder=encoders.encode_quopri)
self.assertEqual(msg.get_payload(), '=FA=FB=FC=FD=FE=FF=20')
self.assertEqual(msg.get_payload(decode=True), bytesdata)
self.assertEqual(msg['Content-Transfer-Encoding'], 'quoted-printable')
s = BytesIO()
g = BytesGenerator(s)
g.flatten(msg)
wireform = s.getvalue()
msg2 = email.message_from_bytes(wireform)
self.assertEqual(msg.get_payload(), '=FA=FB=FC=FD=FE=FF=20')
self.assertEqual(msg2.get_payload(decode=True), bytesdata)
self.assertEqual(msg2['Content-Transfer-Encoding'], 'quoted-printable')
def test_binary_body_with_encode_base64(self):
bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
msg = MIMEApplication(bytesdata, _encoder=encoders.encode_base64)
self.assertEqual(msg.get_payload(), '+vv8/f7/\n')
self.assertEqual(msg.get_payload(decode=True), bytesdata)
s = BytesIO()
g = BytesGenerator(s)
g.flatten(msg)
wireform = s.getvalue()
msg2 = email.message_from_bytes(wireform)
self.assertEqual(msg.get_payload(), '+vv8/f7/\n')
self.assertEqual(msg2.get_payload(decode=True), bytesdata)
# Test the basic MIMEText class
class TestMIMEText(unittest.TestCase):
def setUp(self):
self._msg = MIMEText('hello there')
def test_types(self):
eq = self.assertEqual
eq(self._msg.get_content_type(), 'text/plain')
eq(self._msg.get_param('charset'), 'us-ascii')
missing = []
self.assertIs(self._msg.get_param('foobar', missing), missing)
self.assertIs(self._msg.get_param('charset', missing, header='foobar'),
missing)
def test_payload(self):
self.assertEqual(self._msg.get_payload(), 'hello there')
self.assertFalse(self._msg.is_multipart())
def test_charset(self):
eq = self.assertEqual
msg = MIMEText('hello there', _charset='us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
# Also accept a Charset instance
msg = MIMEText('hello there', _charset=Charset('utf-8'))
eq(msg.get_charset().input_charset, 'utf-8')
eq(msg['content-type'], 'text/plain; charset="utf-8"')
def test_7bit_input(self):
eq = self.assertEqual
msg = MIMEText('hello there', _charset='us-ascii')
eq(msg.get_charset().input_charset, 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
def test_7bit_input_no_charset(self):
eq = self.assertEqual
msg = MIMEText('hello there')
eq(msg.get_charset(), 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
self.assertIn('hello there', msg.as_string())
def test_utf8_input(self):
teststr = '\u043a\u0438\u0440\u0438\u043b\u0438\u0446\u0430'
eq = self.assertEqual
msg = MIMEText(teststr, _charset='utf-8')
eq(msg.get_charset().output_charset, 'utf-8')
eq(msg['content-type'], 'text/plain; charset="utf-8"')
eq(msg.get_payload(decode=True), teststr.encode('utf-8'))
@unittest.skip("can't fix because of backward compat in email5, "
"will fix in email6")
def test_utf8_input_no_charset(self):
teststr = '\u043a\u0438\u0440\u0438\u043b\u0438\u0446\u0430'
self.assertRaises(UnicodeEncodeError, MIMEText, teststr)
# Test complicated multipart/* messages
class TestMultipart(TestEmailBase):
def setUp(self):
with openfile('PyBanner048.gif', 'rb') as fp:
data = fp.read()
container = MIMEBase('multipart', 'mixed', boundary='BOUNDARY')
image = MIMEImage(data, name='dingusfish.gif')
image.add_header('content-disposition', 'attachment',
filename='dingusfish.gif')
intro = MIMEText('''\
Hi there,
This is the dingus fish.
''')
container.attach(intro)
container.attach(image)
container['From'] = 'Barry <barry@digicool.com>'
container['To'] = 'Dingus Lovers <cravindogs@cravindogs.com>'
container['Subject'] = 'Here is your dingus fish'
now = 987809702.54848599
timetuple = time.localtime(now)
if timetuple[-1] == 0:
tzsecs = time.timezone
else:
tzsecs = time.altzone
if tzsecs > 0:
sign = '-'
else:
sign = '+'
tzoffset = ' %s%04d' % (sign, tzsecs / 36)
container['Date'] = time.strftime(
'%a, %d %b %Y %H:%M:%S',
time.localtime(now)) + tzoffset
self._msg = container
self._im = image
self._txt = intro
def test_hierarchy(self):
# convenience
eq = self.assertEqual
raises = self.assertRaises
# tests
m = self._msg
self.assertTrue(m.is_multipart())
eq(m.get_content_type(), 'multipart/mixed')
eq(len(m.get_payload()), 2)
raises(IndexError, m.get_payload, 2)
m0 = m.get_payload(0)
m1 = m.get_payload(1)
self.assertIs(m0, self._txt)
self.assertIs(m1, self._im)
eq(m.get_payload(), [m0, m1])
self.assertFalse(m0.is_multipart())
self.assertFalse(m1.is_multipart())
def test_empty_multipart_idempotent(self):
text = """\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--
"""
msg = Parser().parsestr(text)
self.ndiffAssertEqual(text, msg.as_string())
def test_no_parts_in_a_multipart_with_none_epilogue(self):
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.set_boundary('BOUNDARY')
self.ndiffAssertEqual(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--
''')
def test_no_parts_in_a_multipart_with_empty_epilogue(self):
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = ''
outer.epilogue = ''
outer.set_boundary('BOUNDARY')
self.ndiffAssertEqual(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
--BOUNDARY--
''')
def test_one_part_in_a_multipart(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.set_boundary('BOUNDARY')
msg = MIMEText('hello world')
outer.attach(msg)
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_empty_preamble(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = ''
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_none_preamble(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.preamble = None
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_none_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = None
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_empty_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = ''
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_seq_parts_in_a_multipart_with_nl_epilogue(self):
eq = self.ndiffAssertEqual
outer = MIMEBase('multipart', 'mixed')
outer['Subject'] = 'A subject'
outer['To'] = 'aperson@dom.ain'
outer['From'] = 'bperson@dom.ain'
outer.epilogue = '\n'
msg = MIMEText('hello world')
outer.attach(msg)
outer.set_boundary('BOUNDARY')
eq(outer.as_string(), '''\
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME-Version: 1.0
Subject: A subject
To: aperson@dom.ain
From: bperson@dom.ain
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
--BOUNDARY--
''')
def test_message_external_body(self):
eq = self.assertEqual
msg = self._msgobj('msg_36.txt')
eq(len(msg.get_payload()), 2)
msg1 = msg.get_payload(1)
eq(msg1.get_content_type(), 'multipart/alternative')
eq(len(msg1.get_payload()), 2)
for subpart in msg1.get_payload():
eq(subpart.get_content_type(), 'message/external-body')
eq(len(subpart.get_payload()), 1)
subsubpart = subpart.get_payload(0)
eq(subsubpart.get_content_type(), 'text/plain')
def test_double_boundary(self):
# msg_37.txt is a multipart that contains two dash-boundary's in a
# row. Our interpretation of RFC 2046 calls for ignoring the second
# and subsequent boundaries.
msg = self._msgobj('msg_37.txt')
self.assertEqual(len(msg.get_payload()), 3)
def test_nested_inner_contains_outer_boundary(self):
eq = self.ndiffAssertEqual
# msg_38.txt has an inner part that contains outer boundaries. My
# interpretation of RFC 2046 (based on sections 5.1 and 5.1.2) say
# these are illegal and should be interpreted as unterminated inner
# parts.
msg = self._msgobj('msg_38.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/mixed
multipart/mixed
multipart/alternative
text/plain
text/plain
text/plain
text/plain
""")
def test_nested_with_same_boundary(self):
eq = self.ndiffAssertEqual
# msg 39.txt is similarly evil in that it's got inner parts that use
# the same boundary as outer parts. Again, I believe the way this is
# parsed is closest to the spirit of RFC 2046
msg = self._msgobj('msg_39.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/mixed
multipart/mixed
multipart/alternative
application/octet-stream
application/octet-stream
text/plain
""")
def test_boundary_in_non_multipart(self):
msg = self._msgobj('msg_40.txt')
self.assertEqual(msg.as_string(), '''\
MIME-Version: 1.0
Content-Type: text/html; boundary="--961284236552522269"
----961284236552522269
Content-Type: text/html;
Content-Transfer-Encoding: 7Bit
<html></html>
----961284236552522269--
''')
def test_boundary_with_leading_space(self):
eq = self.assertEqual
msg = email.message_from_string('''\
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary=" XXXX"
-- XXXX
Content-Type: text/plain
-- XXXX
Content-Type: text/plain
-- XXXX--
''')
self.assertTrue(msg.is_multipart())
eq(msg.get_boundary(), ' XXXX')
eq(len(msg.get_payload()), 2)
def test_boundary_without_trailing_newline(self):
m = Parser().parsestr("""\
Content-Type: multipart/mixed; boundary="===============0012394164=="
MIME-Version: 1.0
--===============0012394164==
Content-Type: image/file1.jpg
MIME-Version: 1.0
Content-Transfer-Encoding: base64
YXNkZg==
--===============0012394164==--""")
self.assertEqual(m.get_payload(0).get_payload(), 'YXNkZg==')
# Test some badly formatted messages
class TestNonConformant(TestEmailBase):
def test_parse_missing_minor_type(self):
eq = self.assertEqual
msg = self._msgobj('msg_14.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
# test_defect_handling
def test_same_boundary_inner_outer(self):
msg = self._msgobj('msg_15.txt')
# XXX We can probably eventually do better
inner = msg.get_payload(0)
self.assertTrue(hasattr(inner, 'defects'))
self.assertEqual(len(inner.defects), 1)
self.assertIsInstance(inner.defects[0],
errors.StartBoundaryNotFoundDefect)
# test_defect_handling
def test_multipart_no_boundary(self):
msg = self._msgobj('msg_25.txt')
self.assertIsInstance(msg.get_payload(), str)
self.assertEqual(len(msg.defects), 2)
self.assertIsInstance(msg.defects[0],
errors.NoBoundaryInMultipartDefect)
self.assertIsInstance(msg.defects[1],
errors.MultipartInvariantViolationDefect)
multipart_msg = textwrap.dedent("""\
Date: Wed, 14 Nov 2007 12:56:23 GMT
From: foo@bar.invalid
To: foo@bar.invalid
Subject: Content-Transfer-Encoding: base64 and multipart
MIME-Version: 1.0
Content-Type: multipart/mixed;
boundary="===============3344438784458119861=="{}
--===============3344438784458119861==
Content-Type: text/plain
Test message
--===============3344438784458119861==
Content-Type: application/octet-stream
Content-Transfer-Encoding: base64
YWJj
--===============3344438784458119861==--
""")
# test_defect_handling
def test_multipart_invalid_cte(self):
msg = self._str_msg(
self.multipart_msg.format("\nContent-Transfer-Encoding: base64"))
self.assertEqual(len(msg.defects), 1)
self.assertIsInstance(msg.defects[0],
errors.InvalidMultipartContentTransferEncodingDefect)
# test_defect_handling
def test_multipart_no_cte_no_defect(self):
msg = self._str_msg(self.multipart_msg.format(''))
self.assertEqual(len(msg.defects), 0)
# test_defect_handling
def test_multipart_valid_cte_no_defect(self):
for cte in ('7bit', '8bit', 'BINary'):
msg = self._str_msg(
self.multipart_msg.format(
"\nContent-Transfer-Encoding: {}".format(cte)))
self.assertEqual(len(msg.defects), 0)
# test_headerregistry.TestContentTyopeHeader invalid_1 and invalid_2.
def test_invalid_content_type(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
msg = Message()
# RFC 2045, $5.2 says invalid yields text/plain
msg['Content-Type'] = 'text'
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_content_type(), 'text/plain')
# Clear the old value and try something /really/ invalid
del msg['content-type']
msg['Content-Type'] = 'foo'
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_content_type(), 'text/plain')
# Still, make sure that the message is idempotently generated
s = StringIO()
g = Generator(s)
g.flatten(msg)
neq(s.getvalue(), 'Content-Type: foo\n\n')
def test_no_start_boundary(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_31.txt')
eq(msg.get_payload(), """\
--BOUNDARY
Content-Type: text/plain
message 1
--BOUNDARY
Content-Type: text/plain
message 2
--BOUNDARY--
""")
def test_no_separating_blank_line(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_35.txt')
eq(msg.as_string(), """\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: here's something interesting
counter to RFC 2822, there's no separating newline here
""")
# test_defect_handling
def test_lying_multipart(self):
msg = self._msgobj('msg_41.txt')
self.assertTrue(hasattr(msg, 'defects'))
self.assertEqual(len(msg.defects), 2)
self.assertIsInstance(msg.defects[0],
errors.NoBoundaryInMultipartDefect)
self.assertIsInstance(msg.defects[1],
errors.MultipartInvariantViolationDefect)
# test_defect_handling
def test_missing_start_boundary(self):
outer = self._msgobj('msg_42.txt')
# The message structure is:
#
# multipart/mixed
# text/plain
# message/rfc822
# multipart/mixed [*]
#
# [*] This message is missing its start boundary
bad = outer.get_payload(1).get_payload(0)
self.assertEqual(len(bad.defects), 1)
self.assertIsInstance(bad.defects[0],
errors.StartBoundaryNotFoundDefect)
# test_defect_handling
def test_first_line_is_continuation_header(self):
eq = self.assertEqual
m = ' Line 1\nSubject: test\n\nbody'
msg = email.message_from_string(m)
eq(msg.keys(), ['Subject'])
eq(msg.get_payload(), 'body')
eq(len(msg.defects), 1)
self.assertDefectsEqual(msg.defects,
[errors.FirstHeaderLineIsContinuationDefect])
eq(msg.defects[0].line, ' Line 1\n')
# test_defect_handling
def test_missing_header_body_separator(self):
# Our heuristic if we see a line that doesn't look like a header (no
# leading whitespace but no ':') is to assume that the blank line that
# separates the header from the body is missing, and to stop parsing
# headers and start parsing the body.
msg = self._str_msg('Subject: test\nnot a header\nTo: abc\n\nb\n')
self.assertEqual(msg.keys(), ['Subject'])
self.assertEqual(msg.get_payload(), 'not a header\nTo: abc\n\nb\n')
self.assertDefectsEqual(msg.defects,
[errors.MissingHeaderBodySeparatorDefect])
# Test RFC 2047 header encoding and decoding
class TestRFC2047(TestEmailBase):
def test_rfc2047_multiline(self):
eq = self.assertEqual
s = """Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz
foo bar =?mac-iceland?q?r=8Aksm=9Arg=8Cs?="""
dh = decode_header(s)
eq(dh, [
(b'Re: ', None),
(b'r\x8aksm\x9arg\x8cs', 'mac-iceland'),
(b' baz foo bar ', None),
(b'r\x8aksm\x9arg\x8cs', 'mac-iceland')])
header = make_header(dh)
eq(str(header),
'Re: r\xe4ksm\xf6rg\xe5s baz foo bar r\xe4ksm\xf6rg\xe5s')
self.ndiffAssertEqual(header.encode(maxlinelen=76), """\
Re: =?mac-iceland?q?r=8Aksm=9Arg=8Cs?= baz foo bar =?mac-iceland?q?r=8Aksm?=
=?mac-iceland?q?=9Arg=8Cs?=""")
def test_whitespace_keeper_unicode(self):
eq = self.assertEqual
s = '=?ISO-8859-1?Q?Andr=E9?= Pirard <pirard@dom.ain>'
dh = decode_header(s)
eq(dh, [(b'Andr\xe9', 'iso-8859-1'),
(b' Pirard <pirard@dom.ain>', None)])
header = str(make_header(dh))
eq(header, 'Andr\xe9 Pirard <pirard@dom.ain>')
def test_whitespace_keeper_unicode_2(self):
eq = self.assertEqual
s = 'The =?iso-8859-1?b?cXVpY2sgYnJvd24gZm94?= jumped over the =?iso-8859-1?b?bGF6eSBkb2c=?='
dh = decode_header(s)
eq(dh, [(b'The ', None), (b'quick brown fox', 'iso-8859-1'),
(b' jumped over the ', None), (b'lazy dog', 'iso-8859-1')])
hu = str(make_header(dh))
eq(hu, 'The quick brown fox jumped over the lazy dog')
def test_rfc2047_missing_whitespace(self):
s = 'Sm=?ISO-8859-1?B?9g==?=rg=?ISO-8859-1?B?5Q==?=sbord'
dh = decode_header(s)
self.assertEqual(dh, [(b'Sm', None), (b'\xf6', 'iso-8859-1'),
(b'rg', None), (b'\xe5', 'iso-8859-1'),
(b'sbord', None)])
def test_rfc2047_with_whitespace(self):
s = 'Sm =?ISO-8859-1?B?9g==?= rg =?ISO-8859-1?B?5Q==?= sbord'
dh = decode_header(s)
self.assertEqual(dh, [(b'Sm ', None), (b'\xf6', 'iso-8859-1'),
(b' rg ', None), (b'\xe5', 'iso-8859-1'),
(b' sbord', None)])
def test_rfc2047_B_bad_padding(self):
s = '=?iso-8859-1?B?%s?='
data = [ # only test complete bytes
('dm==', b'v'), ('dm=', b'v'), ('dm', b'v'),
('dmk=', b'vi'), ('dmk', b'vi')
]
for q, a in data:
dh = decode_header(s % q)
self.assertEqual(dh, [(a, 'iso-8859-1')])
def test_rfc2047_Q_invalid_digits(self):
# issue 10004.
s = '=?iso-8659-1?Q?andr=e9=zz?='
self.assertEqual(decode_header(s),
[(b'andr\xe9=zz', 'iso-8659-1')])
def test_rfc2047_rfc2047_1(self):
# 1st testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'a', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_2(self):
# 2nd testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?= b)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'a', 'iso-8859-1'), (b' b)', None)])
def test_rfc2047_rfc2047_3(self):
# 3rd testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?= =?ISO-8859-1?Q?b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'ab', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_4(self):
# 4th testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?= =?ISO-8859-1?Q?b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'ab', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_5a(self):
# 5th testcase at end of rfc2047 newline is \r\n
s = '(=?ISO-8859-1?Q?a?=\r\n =?ISO-8859-1?Q?b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'ab', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_5b(self):
# 5th testcase at end of rfc2047 newline is \n
s = '(=?ISO-8859-1?Q?a?=\n =?ISO-8859-1?Q?b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'ab', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_6(self):
# 6th testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a_b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'a b', 'iso-8859-1'), (b')', None)])
def test_rfc2047_rfc2047_7(self):
# 7th testcase at end of rfc2047
s = '(=?ISO-8859-1?Q?a?= =?ISO-8859-2?Q?_b?=)'
self.assertEqual(decode_header(s),
[(b'(', None), (b'a', 'iso-8859-1'), (b' b', 'iso-8859-2'),
(b')', None)])
self.assertEqual(make_header(decode_header(s)).encode(), s.lower())
self.assertEqual(str(make_header(decode_header(s))), '(a b)')
def test_multiline_header(self):
s = '=?windows-1252?q?=22M=FCller_T=22?=\r\n <T.Mueller@xxx.com>'
self.assertEqual(decode_header(s),
[(b'"M\xfcller T"', 'windows-1252'),
(b'<T.Mueller@xxx.com>', None)])
self.assertEqual(make_header(decode_header(s)).encode(),
''.join(s.splitlines()))
self.assertEqual(str(make_header(decode_header(s))),
'"Müller T" <T.Mueller@xxx.com>')
# Test the MIMEMessage class
class TestMIMEMessage(TestEmailBase):
def setUp(self):
with openfile('msg_11.txt') as fp:
self._text = fp.read()
def test_type_error(self):
self.assertRaises(TypeError, MIMEMessage, 'a plain string')
def test_valid_argument(self):
eq = self.assertEqual
subject = 'A sub-message'
m = Message()
m['Subject'] = subject
r = MIMEMessage(m)
eq(r.get_content_type(), 'message/rfc822')
payload = r.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
subpart = payload[0]
self.assertIs(subpart, m)
eq(subpart['subject'], subject)
def test_bad_multipart(self):
msg1 = Message()
msg1['Subject'] = 'subpart 1'
msg2 = Message()
msg2['Subject'] = 'subpart 2'
r = MIMEMessage(msg1)
self.assertRaises(errors.MultipartConversionError, r.attach, msg2)
def test_generate(self):
# First craft the message to be encapsulated
m = Message()
m['Subject'] = 'An enclosed message'
m.set_payload('Here is the body of the message.\n')
r = MIMEMessage(m)
r['Subject'] = 'The enclosing message'
s = StringIO()
g = Generator(s)
g.flatten(r)
self.assertEqual(s.getvalue(), """\
Content-Type: message/rfc822
MIME-Version: 1.0
Subject: The enclosing message
Subject: An enclosed message
Here is the body of the message.
""")
def test_parse_message_rfc822(self):
eq = self.assertEqual
msg = self._msgobj('msg_11.txt')
eq(msg.get_content_type(), 'message/rfc822')
payload = msg.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
submsg = payload[0]
self.assertIsInstance(submsg, Message)
eq(submsg['subject'], 'An enclosed message')
eq(submsg.get_payload(), 'Here is the body of the message.\n')
def test_dsn(self):
eq = self.assertEqual
# msg 16 is a Delivery Status Notification, see RFC 1894
msg = self._msgobj('msg_16.txt')
eq(msg.get_content_type(), 'multipart/report')
self.assertTrue(msg.is_multipart())
eq(len(msg.get_payload()), 3)
# Subpart 1 is a text/plain, human readable section
subpart = msg.get_payload(0)
eq(subpart.get_content_type(), 'text/plain')
eq(subpart.get_payload(), """\
This report relates to a message you sent with the following header fields:
Message-id: <002001c144a6$8752e060$56104586@oxy.edu>
Date: Sun, 23 Sep 2001 20:10:55 -0700
From: "Ian T. Henry" <henryi@oxy.edu>
To: SoCal Raves <scr@socal-raves.org>
Subject: [scr] yeah for Ians!!
Your message cannot be delivered to the following recipients:
Recipient address: jangel1@cougar.noc.ucla.edu
Reason: recipient reached disk quota
""")
# Subpart 2 contains the machine parsable DSN information. It
# consists of two blocks of headers, represented by two nested Message
# objects.
subpart = msg.get_payload(1)
eq(subpart.get_content_type(), 'message/delivery-status')
eq(len(subpart.get_payload()), 2)
# message/delivery-status should treat each block as a bunch of
# headers, i.e. a bunch of Message objects.
dsn1 = subpart.get_payload(0)
self.assertIsInstance(dsn1, Message)
eq(dsn1['original-envelope-id'], '0GK500B4HD0888@cougar.noc.ucla.edu')
eq(dsn1.get_param('dns', header='reporting-mta'), '')
# Try a missing one <wink>
eq(dsn1.get_param('nsd', header='reporting-mta'), None)
dsn2 = subpart.get_payload(1)
self.assertIsInstance(dsn2, Message)
eq(dsn2['action'], 'failed')
eq(dsn2.get_params(header='original-recipient'),
[('rfc822', ''), ('jangel1@cougar.noc.ucla.edu', '')])
eq(dsn2.get_param('rfc822', header='final-recipient'), '')
# Subpart 3 is the original message
subpart = msg.get_payload(2)
eq(subpart.get_content_type(), 'message/rfc822')
payload = subpart.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
subsubpart = payload[0]
self.assertIsInstance(subsubpart, Message)
eq(subsubpart.get_content_type(), 'text/plain')
eq(subsubpart['message-id'],
'<002001c144a6$8752e060$56104586@oxy.edu>')
def test_epilogue(self):
eq = self.ndiffAssertEqual
with openfile('msg_21.txt') as fp:
text = fp.read()
msg = Message()
msg['From'] = 'aperson@dom.ain'
msg['To'] = 'bperson@dom.ain'
msg['Subject'] = 'Test'
msg.preamble = 'MIME message'
msg.epilogue = 'End of MIME message\n'
msg1 = MIMEText('One')
msg2 = MIMEText('Two')
msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
msg.attach(msg1)
msg.attach(msg2)
sfp = StringIO()
g = Generator(sfp)
g.flatten(msg)
eq(sfp.getvalue(), text)
def test_no_nl_preamble(self):
eq = self.ndiffAssertEqual
msg = Message()
msg['From'] = 'aperson@dom.ain'
msg['To'] = 'bperson@dom.ain'
msg['Subject'] = 'Test'
msg.preamble = 'MIME message'
msg.epilogue = ''
msg1 = MIMEText('One')
msg2 = MIMEText('Two')
msg.add_header('Content-Type', 'multipart/mixed', boundary='BOUNDARY')
msg.attach(msg1)
msg.attach(msg2)
eq(msg.as_string(), """\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: Test
Content-Type: multipart/mixed; boundary="BOUNDARY"
MIME message
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
One
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Two
--BOUNDARY--
""")
def test_default_type(self):
eq = self.assertEqual
with openfile('msg_30.txt') as fp:
msg = email.message_from_file(fp)
container1 = msg.get_payload(0)
eq(container1.get_default_type(), 'message/rfc822')
eq(container1.get_content_type(), 'message/rfc822')
container2 = msg.get_payload(1)
eq(container2.get_default_type(), 'message/rfc822')
eq(container2.get_content_type(), 'message/rfc822')
container1a = container1.get_payload(0)
eq(container1a.get_default_type(), 'text/plain')
eq(container1a.get_content_type(), 'text/plain')
container2a = container2.get_payload(0)
eq(container2a.get_default_type(), 'text/plain')
eq(container2a.get_content_type(), 'text/plain')
def test_default_type_with_explicit_container_type(self):
eq = self.assertEqual
with openfile('msg_28.txt') as fp:
msg = email.message_from_file(fp)
container1 = msg.get_payload(0)
eq(container1.get_default_type(), 'message/rfc822')
eq(container1.get_content_type(), 'message/rfc822')
container2 = msg.get_payload(1)
eq(container2.get_default_type(), 'message/rfc822')
eq(container2.get_content_type(), 'message/rfc822')
container1a = container1.get_payload(0)
eq(container1a.get_default_type(), 'text/plain')
eq(container1a.get_content_type(), 'text/plain')
container2a = container2.get_payload(0)
eq(container2a.get_default_type(), 'text/plain')
eq(container2a.get_content_type(), 'text/plain')
def test_default_type_non_parsed(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
# Set up container
container = MIMEMultipart('digest', 'BOUNDARY')
container.epilogue = ''
# Set up subparts
subpart1a = MIMEText('message 1\n')
subpart2a = MIMEText('message 2\n')
subpart1 = MIMEMessage(subpart1a)
subpart2 = MIMEMessage(subpart2a)
container.attach(subpart1)
container.attach(subpart2)
eq(subpart1.get_content_type(), 'message/rfc822')
eq(subpart1.get_default_type(), 'message/rfc822')
eq(subpart2.get_content_type(), 'message/rfc822')
eq(subpart2.get_default_type(), 'message/rfc822')
neq(container.as_string(0), '''\
Content-Type: multipart/digest; boundary="BOUNDARY"
MIME-Version: 1.0
--BOUNDARY
Content-Type: message/rfc822
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 1
--BOUNDARY
Content-Type: message/rfc822
MIME-Version: 1.0
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 2
--BOUNDARY--
''')
del subpart1['content-type']
del subpart1['mime-version']
del subpart2['content-type']
del subpart2['mime-version']
eq(subpart1.get_content_type(), 'message/rfc822')
eq(subpart1.get_default_type(), 'message/rfc822')
eq(subpart2.get_content_type(), 'message/rfc822')
eq(subpart2.get_default_type(), 'message/rfc822')
neq(container.as_string(0), '''\
Content-Type: multipart/digest; boundary="BOUNDARY"
MIME-Version: 1.0
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 1
--BOUNDARY
Content-Type: text/plain; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
message 2
--BOUNDARY--
''')
def test_mime_attachments_in_constructor(self):
eq = self.assertEqual
text1 = MIMEText('')
text2 = MIMEText('')
msg = MIMEMultipart(_subparts=(text1, text2))
eq(len(msg.get_payload()), 2)
eq(msg.get_payload(0), text1)
eq(msg.get_payload(1), text2)
def test_default_multipart_constructor(self):
msg = MIMEMultipart()
self.assertTrue(msg.is_multipart())
# A general test of parser->model->generator idempotency. IOW, read a message
# in, parse it into a message object tree, then without touching the tree,
# regenerate the plain text. The original text and the transformed text
# should be identical. Note: that we ignore the Unix-From since that may
# contain a changed date.
class TestIdempotent(TestEmailBase):
linesep = '\n'
def _msgobj(self, filename):
with openfile(filename) as fp:
data = fp.read()
msg = email.message_from_string(data)
return msg, data
def _idempotent(self, msg, text, unixfrom=False):
eq = self.ndiffAssertEqual
s = StringIO()
g = Generator(s, maxheaderlen=0)
g.flatten(msg, unixfrom=unixfrom)
eq(text, s.getvalue())
def test_parse_text_message(self):
eq = self.assertEqual
msg, text = self._msgobj('msg_01.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_content_maintype(), 'text')
eq(msg.get_content_subtype(), 'plain')
eq(msg.get_params()[1], ('charset', 'us-ascii'))
eq(msg.get_param('charset'), 'us-ascii')
eq(msg.preamble, None)
eq(msg.epilogue, None)
self._idempotent(msg, text)
def test_parse_untyped_message(self):
eq = self.assertEqual
msg, text = self._msgobj('msg_03.txt')
eq(msg.get_content_type(), 'text/plain')
eq(msg.get_params(), None)
eq(msg.get_param('charset'), None)
self._idempotent(msg, text)
def test_simple_multipart(self):
msg, text = self._msgobj('msg_04.txt')
self._idempotent(msg, text)
def test_MIME_digest(self):
msg, text = self._msgobj('msg_02.txt')
self._idempotent(msg, text)
def test_long_header(self):
msg, text = self._msgobj('msg_27.txt')
self._idempotent(msg, text)
def test_MIME_digest_with_part_headers(self):
msg, text = self._msgobj('msg_28.txt')
self._idempotent(msg, text)
def test_mixed_with_image(self):
msg, text = self._msgobj('msg_06.txt')
self._idempotent(msg, text)
def test_multipart_report(self):
msg, text = self._msgobj('msg_05.txt')
self._idempotent(msg, text)
def test_dsn(self):
msg, text = self._msgobj('msg_16.txt')
self._idempotent(msg, text)
def test_preamble_epilogue(self):
msg, text = self._msgobj('msg_21.txt')
self._idempotent(msg, text)
def test_multipart_one_part(self):
msg, text = self._msgobj('msg_23.txt')
self._idempotent(msg, text)
def test_multipart_no_parts(self):
msg, text = self._msgobj('msg_24.txt')
self._idempotent(msg, text)
def test_no_start_boundary(self):
msg, text = self._msgobj('msg_31.txt')
self._idempotent(msg, text)
def test_rfc2231_charset(self):
msg, text = self._msgobj('msg_32.txt')
self._idempotent(msg, text)
def test_more_rfc2231_parameters(self):
msg, text = self._msgobj('msg_33.txt')
self._idempotent(msg, text)
def test_text_plain_in_a_multipart_digest(self):
msg, text = self._msgobj('msg_34.txt')
self._idempotent(msg, text)
def test_nested_multipart_mixeds(self):
msg, text = self._msgobj('msg_12a.txt')
self._idempotent(msg, text)
def test_message_external_body_idempotent(self):
msg, text = self._msgobj('msg_36.txt')
self._idempotent(msg, text)
def test_message_delivery_status(self):
msg, text = self._msgobj('msg_43.txt')
self._idempotent(msg, text, unixfrom=True)
def test_message_signed_idempotent(self):
msg, text = self._msgobj('msg_45.txt')
self._idempotent(msg, text)
def test_content_type(self):
eq = self.assertEqual
# Get a message object and reset the seek pointer for other tests
msg, text = self._msgobj('msg_05.txt')
eq(msg.get_content_type(), 'multipart/report')
# Test the Content-Type: parameters
params = {}
for pk, pv in msg.get_params():
params[pk] = pv
eq(params['report-type'], 'delivery-status')
eq(params['boundary'], 'D1690A7AC1.996856090/mail.example.com')
eq(msg.preamble, 'This is a MIME-encapsulated message.' + self.linesep)
eq(msg.epilogue, self.linesep)
eq(len(msg.get_payload()), 3)
# Make sure the subparts are what we expect
msg1 = msg.get_payload(0)
eq(msg1.get_content_type(), 'text/plain')
eq(msg1.get_payload(), 'Yadda yadda yadda' + self.linesep)
msg2 = msg.get_payload(1)
eq(msg2.get_content_type(), 'text/plain')
eq(msg2.get_payload(), 'Yadda yadda yadda' + self.linesep)
msg3 = msg.get_payload(2)
eq(msg3.get_content_type(), 'message/rfc822')
self.assertIsInstance(msg3, Message)
payload = msg3.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
msg4 = payload[0]
self.assertIsInstance(msg4, Message)
eq(msg4.get_payload(), 'Yadda yadda yadda' + self.linesep)
def test_parser(self):
eq = self.assertEqual
msg, text = self._msgobj('msg_06.txt')
# Check some of the outer headers
eq(msg.get_content_type(), 'message/rfc822')
# Make sure the payload is a list of exactly one sub-Message, and that
# that submessage has a type of text/plain
payload = msg.get_payload()
self.assertIsInstance(payload, list)
eq(len(payload), 1)
msg1 = payload[0]
self.assertIsInstance(msg1, Message)
eq(msg1.get_content_type(), 'text/plain')
self.assertIsInstance(msg1.get_payload(), str)
eq(msg1.get_payload(), self.linesep)
# Test various other bits of the package's functionality
class TestMiscellaneous(TestEmailBase):
def test_message_from_string(self):
with openfile('msg_01.txt') as fp:
text = fp.read()
msg = email.message_from_string(text)
s = StringIO()
# Don't wrap/continue long headers since we're trying to test
# idempotency.
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
self.assertEqual(text, s.getvalue())
def test_message_from_file(self):
with openfile('msg_01.txt') as fp:
text = fp.read()
fp.seek(0)
msg = email.message_from_file(fp)
s = StringIO()
# Don't wrap/continue long headers since we're trying to test
# idempotency.
g = Generator(s, maxheaderlen=0)
g.flatten(msg)
self.assertEqual(text, s.getvalue())
def test_message_from_string_with_class(self):
with openfile('msg_01.txt') as fp:
text = fp.read()
# Create a subclass
class MyMessage(Message):
pass
msg = email.message_from_string(text, MyMessage)
self.assertIsInstance(msg, MyMessage)
# Try something more complicated
with openfile('msg_02.txt') as fp:
text = fp.read()
msg = email.message_from_string(text, MyMessage)
for subpart in msg.walk():
self.assertIsInstance(subpart, MyMessage)
def test_message_from_file_with_class(self):
# Create a subclass
class MyMessage(Message):
pass
with openfile('msg_01.txt') as fp:
msg = email.message_from_file(fp, MyMessage)
self.assertIsInstance(msg, MyMessage)
# Try something more complicated
with openfile('msg_02.txt') as fp:
msg = email.message_from_file(fp, MyMessage)
for subpart in msg.walk():
self.assertIsInstance(subpart, MyMessage)
def test_custom_message_does_not_require_arguments(self):
class MyMessage(Message):
def __init__(self):
super().__init__()
msg = self._str_msg("Subject: test\n\ntest", MyMessage)
self.assertIsInstance(msg, MyMessage)
def test__all__(self):
module = __import__('email')
self.assertEqual(sorted(module.__all__), [
'base64mime', 'charset', 'encoders', 'errors', 'feedparser',
'generator', 'header', 'iterators', 'message',
'message_from_binary_file', 'message_from_bytes',
'message_from_file', 'message_from_string', 'mime', 'parser',
'quoprimime', 'utils',
])
def test_formatdate(self):
now = time.time()
self.assertEqual(utils.parsedate(utils.formatdate(now))[:6],
time.gmtime(now)[:6])
def test_formatdate_localtime(self):
now = time.time()
self.assertEqual(
utils.parsedate(utils.formatdate(now, localtime=True))[:6],
time.localtime(now)[:6])
def test_formatdate_usegmt(self):
now = time.time()
self.assertEqual(
utils.formatdate(now, localtime=False),
time.strftime('%a, %d %b %Y %H:%M:%S -0000', time.gmtime(now)))
self.assertEqual(
utils.formatdate(now, localtime=False, usegmt=True),
time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(now)))
# parsedate and parsedate_tz will become deprecated interfaces someday
def test_parsedate_returns_None_for_invalid_strings(self):
self.assertIsNone(utils.parsedate(''))
self.assertIsNone(utils.parsedate_tz(''))
self.assertIsNone(utils.parsedate('0'))
self.assertIsNone(utils.parsedate_tz('0'))
self.assertIsNone(utils.parsedate('A Complete Waste of Time'))
self.assertIsNone(utils.parsedate_tz('A Complete Waste of Time'))
# Not a part of the spec but, but this has historically worked:
self.assertIsNone(utils.parsedate(None))
self.assertIsNone(utils.parsedate_tz(None))
def test_parsedate_compact(self):
# The FWS after the comma is optional
self.assertEqual(utils.parsedate('Wed,3 Apr 2002 14:58:26 +0800'),
utils.parsedate('Wed, 3 Apr 2002 14:58:26 +0800'))
def test_parsedate_no_dayofweek(self):
eq = self.assertEqual
eq(utils.parsedate_tz('25 Feb 2003 13:47:26 -0800'),
(2003, 2, 25, 13, 47, 26, 0, 1, -1, -28800))
def test_parsedate_compact_no_dayofweek(self):
eq = self.assertEqual
eq(utils.parsedate_tz('5 Feb 2003 13:47:26 -0800'),
(2003, 2, 5, 13, 47, 26, 0, 1, -1, -28800))
def test_parsedate_no_space_before_positive_offset(self):
self.assertEqual(utils.parsedate_tz('Wed, 3 Apr 2002 14:58:26+0800'),
(2002, 4, 3, 14, 58, 26, 0, 1, -1, 28800))
def test_parsedate_no_space_before_negative_offset(self):
# Issue 1155362: we already handled '+' for this case.
self.assertEqual(utils.parsedate_tz('Wed, 3 Apr 2002 14:58:26-0800'),
(2002, 4, 3, 14, 58, 26, 0, 1, -1, -28800))
def test_parsedate_accepts_time_with_dots(self):
eq = self.assertEqual
eq(utils.parsedate_tz('5 Feb 2003 13.47.26 -0800'),
(2003, 2, 5, 13, 47, 26, 0, 1, -1, -28800))
eq(utils.parsedate_tz('5 Feb 2003 13.47 -0800'),
(2003, 2, 5, 13, 47, 0, 0, 1, -1, -28800))
def test_parsedate_acceptable_to_time_functions(self):
eq = self.assertEqual
timetup = utils.parsedate('5 Feb 2003 13:47:26 -0800')
t = int(time.mktime(timetup))
eq(time.localtime(t)[:6], timetup[:6])
eq(int(time.strftime('%Y', timetup)), 2003)
timetup = utils.parsedate_tz('5 Feb 2003 13:47:26 -0800')
t = int(time.mktime(timetup[:9]))
eq(time.localtime(t)[:6], timetup[:6])
eq(int(time.strftime('%Y', timetup[:9])), 2003)
def test_mktime_tz(self):
self.assertEqual(utils.mktime_tz((1970, 1, 1, 0, 0, 0,
-1, -1, -1, 0)), 0)
self.assertEqual(utils.mktime_tz((1970, 1, 1, 0, 0, 0,
-1, -1, -1, 1234)), -1234)
def test_parsedate_y2k(self):
"""Test for parsing a date with a two-digit year.
Parsing a date with a two-digit year should return the correct
four-digit year. RFC822 allows two-digit years, but RFC2822 (which
obsoletes RFC822) requires four-digit years.
"""
self.assertEqual(utils.parsedate_tz('25 Feb 03 13:47:26 -0800'),
utils.parsedate_tz('25 Feb 2003 13:47:26 -0800'))
self.assertEqual(utils.parsedate_tz('25 Feb 71 13:47:26 -0800'),
utils.parsedate_tz('25 Feb 1971 13:47:26 -0800'))
def test_parseaddr_empty(self):
self.assertEqual(utils.parseaddr('<>'), ('', ''))
self.assertEqual(utils.formataddr(utils.parseaddr('<>')), '')
def test_noquote_dump(self):
self.assertEqual(
utils.formataddr(('A Silly Person', 'person@dom.ain')),
'A Silly Person <person@dom.ain>')
def test_escape_dump(self):
self.assertEqual(
utils.formataddr(('A (Very) Silly Person', 'person@dom.ain')),
r'"A (Very) Silly Person" <person@dom.ain>')
self.assertEqual(
utils.parseaddr(r'"A \(Very\) Silly Person" <person@dom.ain>'),
('A (Very) Silly Person', 'person@dom.ain'))
a = r'A \(Special\) Person'
b = 'person@dom.ain'
self.assertEqual(utils.parseaddr(utils.formataddr((a, b))), (a, b))
def test_escape_backslashes(self):
self.assertEqual(
utils.formataddr(('Arthur \Backslash\ Foobar', 'person@dom.ain')),
r'"Arthur \\Backslash\\ Foobar" <person@dom.ain>')
a = r'Arthur \Backslash\ Foobar'
b = 'person@dom.ain'
self.assertEqual(utils.parseaddr(utils.formataddr((a, b))), (a, b))
def test_quotes_unicode_names(self):
# issue 1690608. email.utils.formataddr() should be rfc2047 aware.
name = "H\u00e4ns W\u00fcrst"
addr = 'person@dom.ain'
utf8_base64 = "=?utf-8?b?SMOkbnMgV8O8cnN0?= <person@dom.ain>"
latin1_quopri = "=?iso-8859-1?q?H=E4ns_W=FCrst?= <person@dom.ain>"
self.assertEqual(utils.formataddr((name, addr)), utf8_base64)
self.assertEqual(utils.formataddr((name, addr), 'iso-8859-1'),
latin1_quopri)
def test_accepts_any_charset_like_object(self):
# issue 1690608. email.utils.formataddr() should be rfc2047 aware.
name = "H\u00e4ns W\u00fcrst"
addr = 'person@dom.ain'
utf8_base64 = "=?utf-8?b?SMOkbnMgV8O8cnN0?= <person@dom.ain>"
foobar = "FOOBAR"
class CharsetMock:
def header_encode(self, string):
return foobar
mock = CharsetMock()
mock_expected = "%s <%s>" % (foobar, addr)
self.assertEqual(utils.formataddr((name, addr), mock), mock_expected)
self.assertEqual(utils.formataddr((name, addr), Charset('utf-8')),
utf8_base64)
def test_invalid_charset_like_object_raises_error(self):
# issue 1690608. email.utils.formataddr() should be rfc2047 aware.
name = "H\u00e4ns W\u00fcrst"
addr = 'person@dom.ain'
# A object without a header_encode method:
bad_charset = object()
self.assertRaises(AttributeError, utils.formataddr, (name, addr),
bad_charset)
def test_unicode_address_raises_error(self):
# issue 1690608. email.utils.formataddr() should be rfc2047 aware.
addr = 'pers\u00f6n@dom.in'
self.assertRaises(UnicodeError, utils.formataddr, (None, addr))
self.assertRaises(UnicodeError, utils.formataddr, ("Name", addr))
def test_name_with_dot(self):
x = 'John X. Doe <jxd@example.com>'
y = '"John X. Doe" <jxd@example.com>'
a, b = ('John X. Doe', 'jxd@example.com')
self.assertEqual(utils.parseaddr(x), (a, b))
self.assertEqual(utils.parseaddr(y), (a, b))
# formataddr() quotes the name if there's a dot in it
self.assertEqual(utils.formataddr((a, b)), y)
def test_parseaddr_preserves_quoted_pairs_in_addresses(self):
# issue 10005. Note that in the third test the second pair of
# backslashes is not actually a quoted pair because it is not inside a
# comment or quoted string: the address being parsed has a quoted
# string containing a quoted backslash, followed by 'example' and two
# backslashes, followed by another quoted string containing a space and
# the word 'example'. parseaddr copies those two backslashes
# literally. Per rfc5322 this is not technically correct since a \ may
# not appear in an address outside of a quoted string. It is probably
# a sensible Postel interpretation, though.
eq = self.assertEqual
eq(utils.parseaddr('""example" example"@example.com'),
('', '""example" example"@example.com'))
eq(utils.parseaddr('"\\"example\\" example"@example.com'),
('', '"\\"example\\" example"@example.com'))
eq(utils.parseaddr('"\\\\"example\\\\" example"@example.com'),
('', '"\\\\"example\\\\" example"@example.com'))
def test_parseaddr_preserves_spaces_in_local_part(self):
# issue 9286. A normal RFC5322 local part should not contain any
# folding white space, but legacy local parts can (they are a sequence
# of atoms, not dotatoms). On the other hand we strip whitespace from
# before the @ and around dots, on the assumption that the whitespace
# around the punctuation is a mistake in what would otherwise be
# an RFC5322 local part. Leading whitespace is, usual, stripped as well.
self.assertEqual(('', "merwok wok@xample.com"),
utils.parseaddr("merwok wok@xample.com"))
self.assertEqual(('', "merwok wok@xample.com"),
utils.parseaddr("merwok wok@xample.com"))
self.assertEqual(('', "merwok wok@xample.com"),
utils.parseaddr(" merwok wok @xample.com"))
self.assertEqual(('', 'merwok"wok" wok@xample.com'),
utils.parseaddr('merwok"wok" wok@xample.com'))
self.assertEqual(('', 'merwok.wok.wok@xample.com'),
utils.parseaddr('merwok. wok . wok@xample.com'))
def test_formataddr_does_not_quote_parens_in_quoted_string(self):
addr = ("'foo@example.com' (foo@example.com)",
'foo@example.com')
addrstr = ('"\'foo@example.com\' '
'(foo@example.com)" <foo@example.com>')
self.assertEqual(utils.parseaddr(addrstr), addr)
self.assertEqual(utils.formataddr(addr), addrstr)
def test_multiline_from_comment(self):
x = """\
Foo
\tBar <foo@example.com>"""
self.assertEqual(utils.parseaddr(x), ('Foo Bar', 'foo@example.com'))
def test_quote_dump(self):
self.assertEqual(
utils.formataddr(('A Silly; Person', 'person@dom.ain')),
r'"A Silly; Person" <person@dom.ain>')
def test_charset_richcomparisons(self):
eq = self.assertEqual
ne = self.assertNotEqual
cset1 = Charset()
cset2 = Charset()
eq(cset1, 'us-ascii')
eq(cset1, 'US-ASCII')
eq(cset1, 'Us-AsCiI')
eq('us-ascii', cset1)
eq('US-ASCII', cset1)
eq('Us-AsCiI', cset1)
ne(cset1, 'usascii')
ne(cset1, 'USASCII')
ne(cset1, 'UsAsCiI')
ne('usascii', cset1)
ne('USASCII', cset1)
ne('UsAsCiI', cset1)
eq(cset1, cset2)
eq(cset2, cset1)
def test_getaddresses(self):
eq = self.assertEqual
eq(utils.getaddresses(['aperson@dom.ain (Al Person)',
'Bud Person <bperson@dom.ain>']),
[('Al Person', 'aperson@dom.ain'),
('Bud Person', 'bperson@dom.ain')])
def test_getaddresses_nasty(self):
eq = self.assertEqual
eq(utils.getaddresses(['foo: ;']), [('', '')])
eq(utils.getaddresses(
['[]*-- =~$']),
[('', ''), ('', ''), ('', '*--')])
eq(utils.getaddresses(
['foo: ;', '"Jason R. Mastaler" <jason@dom.ain>']),
[('', ''), ('Jason R. Mastaler', 'jason@dom.ain')])
def test_getaddresses_embedded_comment(self):
"""Test proper handling of a nested comment"""
eq = self.assertEqual
addrs = utils.getaddresses(['User ((nested comment)) <foo@bar.com>'])
eq(addrs[0][1], 'foo@bar.com')
def test_make_msgid_collisions(self):
# Test make_msgid uniqueness, even with multiple threads
class MsgidsThread(Thread):
def run(self):
# generate msgids for 3 seconds
self.msgids = []
append = self.msgids.append
make_msgid = utils.make_msgid
clock = time.clock
tfin = clock() + 3.0
while clock() < tfin:
append(make_msgid(domain='testdomain-string'))
threads = [MsgidsThread() for i in range(5)]
with start_threads(threads):
pass
all_ids = sum([t.msgids for t in threads], [])
self.assertEqual(len(set(all_ids)), len(all_ids))
def test_utils_quote_unquote(self):
eq = self.assertEqual
msg = Message()
msg.add_header('content-disposition', 'attachment',
filename='foo\\wacky"name')
eq(msg.get_filename(), 'foo\\wacky"name')
def test_get_body_encoding_with_bogus_charset(self):
charset = Charset('not a charset')
self.assertEqual(charset.get_body_encoding(), 'base64')
def test_get_body_encoding_with_uppercase_charset(self):
eq = self.assertEqual
msg = Message()
msg['Content-Type'] = 'text/plain; charset=UTF-8'
eq(msg['content-type'], 'text/plain; charset=UTF-8')
charsets = msg.get_charsets()
eq(len(charsets), 1)
eq(charsets[0], 'utf-8')
charset = Charset(charsets[0])
eq(charset.get_body_encoding(), 'base64')
msg.set_payload(b'hello world', charset=charset)
eq(msg.get_payload(), 'aGVsbG8gd29ybGQ=\n')
eq(msg.get_payload(decode=True), b'hello world')
eq(msg['content-transfer-encoding'], 'base64')
# Try another one
msg = Message()
msg['Content-Type'] = 'text/plain; charset="US-ASCII"'
charsets = msg.get_charsets()
eq(len(charsets), 1)
eq(charsets[0], 'us-ascii')
charset = Charset(charsets[0])
eq(charset.get_body_encoding(), encoders.encode_7or8bit)
msg.set_payload('hello world', charset=charset)
eq(msg.get_payload(), 'hello world')
eq(msg['content-transfer-encoding'], '7bit')
def test_charsets_case_insensitive(self):
lc = Charset('us-ascii')
uc = Charset('US-ASCII')
self.assertEqual(lc.get_body_encoding(), uc.get_body_encoding())
def test_partial_falls_inside_message_delivery_status(self):
eq = self.ndiffAssertEqual
# The Parser interface provides chunks of data to FeedParser in 8192
# byte gulps. SF bug #1076485 found one of those chunks inside
# message/delivery-status header block, which triggered an
# unreadline() of NeedMoreData.
msg = self._msgobj('msg_43.txt')
sfp = StringIO()
iterators._structure(msg, sfp)
eq(sfp.getvalue(), """\
multipart/report
text/plain
message/delivery-status
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/plain
text/rfc822-headers
""")
def test_make_msgid_domain(self):
self.assertEqual(
email.utils.make_msgid(domain='testdomain-string')[-19:],
'@testdomain-string>')
def test_Generator_linend(self):
# Issue 14645.
with openfile('msg_26.txt', newline='\n') as f:
msgtxt = f.read()
msgtxt_nl = msgtxt.replace('\r\n', '\n')
msg = email.message_from_string(msgtxt)
s = StringIO()
g = email.generator.Generator(s)
g.flatten(msg)
self.assertEqual(s.getvalue(), msgtxt_nl)
def test_BytesGenerator_linend(self):
# Issue 14645.
with openfile('msg_26.txt', newline='\n') as f:
msgtxt = f.read()
msgtxt_nl = msgtxt.replace('\r\n', '\n')
msg = email.message_from_string(msgtxt_nl)
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(msg, linesep='\r\n')
self.assertEqual(s.getvalue().decode('ascii'), msgtxt)
def test_BytesGenerator_linend_with_non_ascii(self):
# Issue 14645.
with openfile('msg_26.txt', 'rb') as f:
msgtxt = f.read()
msgtxt = msgtxt.replace(b'with attachment', b'fo\xf6')
msgtxt_nl = msgtxt.replace(b'\r\n', b'\n')
msg = email.message_from_bytes(msgtxt_nl)
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(msg, linesep='\r\n')
self.assertEqual(s.getvalue(), msgtxt)
# Test the iterator/generators
class TestIterators(TestEmailBase):
def test_body_line_iterator(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
# First a simple non-multipart message
msg = self._msgobj('msg_01.txt')
it = iterators.body_line_iterator(msg)
lines = list(it)
eq(len(lines), 6)
neq(EMPTYSTRING.join(lines), msg.get_payload())
# Now a more complicated multipart
msg = self._msgobj('msg_02.txt')
it = iterators.body_line_iterator(msg)
lines = list(it)
eq(len(lines), 43)
with openfile('msg_19.txt') as fp:
neq(EMPTYSTRING.join(lines), fp.read())
def test_typed_subpart_iterator(self):
eq = self.assertEqual
msg = self._msgobj('msg_04.txt')
it = iterators.typed_subpart_iterator(msg, 'text')
lines = []
subparts = 0
for subpart in it:
subparts += 1
lines.append(subpart.get_payload())
eq(subparts, 2)
eq(EMPTYSTRING.join(lines), """\
a simple kind of mirror
to reflect upon our own
a simple kind of mirror
to reflect upon our own
""")
def test_typed_subpart_iterator_default_type(self):
eq = self.assertEqual
msg = self._msgobj('msg_03.txt')
it = iterators.typed_subpart_iterator(msg, 'text', 'plain')
lines = []
subparts = 0
for subpart in it:
subparts += 1
lines.append(subpart.get_payload())
eq(subparts, 1)
eq(EMPTYSTRING.join(lines), """\
Hi,
Do you like this message?
-Me
""")
def test_pushCR_LF(self):
'''FeedParser BufferedSubFile.push() assumed it received complete
line endings. A CR ending one push() followed by a LF starting
the next push() added an empty line.
'''
imt = [
("a\r \n", 2),
("b", 0),
("c\n", 1),
("", 0),
("d\r\n", 1),
("e\r", 0),
("\nf", 1),
("\r\n", 1),
]
from email.feedparser import BufferedSubFile, NeedMoreData
bsf = BufferedSubFile()
om = []
nt = 0
for il, n in imt:
bsf.push(il)
nt += n
n1 = 0
for ol in iter(bsf.readline, NeedMoreData):
om.append(ol)
n1 += 1
self.assertEqual(n, n1)
self.assertEqual(len(om), nt)
self.assertEqual(''.join([il for il, n in imt]), ''.join(om))
def test_push_random(self):
from email.feedparser import BufferedSubFile, NeedMoreData
n = 10000
chunksize = 5
chars = 'abcd \t\r\n'
s = ''.join(choice(chars) for i in range(n)) + '\n'
target = s.splitlines(True)
bsf = BufferedSubFile()
lines = []
for i in range(0, len(s), chunksize):
chunk = s[i:i+chunksize]
bsf.push(chunk)
lines.extend(iter(bsf.readline, NeedMoreData))
self.assertEqual(lines, target)
class TestFeedParsers(TestEmailBase):
def parse(self, chunks):
from email.feedparser import FeedParser
feedparser = FeedParser()
for chunk in chunks:
feedparser.feed(chunk)
return feedparser.close()
def test_empty_header_name_handled(self):
# Issue 19996
msg = self.parse("First: val\n: bad\nSecond: val")
self.assertEqual(msg['First'], 'val')
self.assertEqual(msg['Second'], 'val')
def test_newlines(self):
m = self.parse(['a:\nb:\rc:\r\nd:\n'])
self.assertEqual(m.keys(), ['a', 'b', 'c', 'd'])
m = self.parse(['a:\nb:\rc:\r\nd:'])
self.assertEqual(m.keys(), ['a', 'b', 'c', 'd'])
m = self.parse(['a:\rb', 'c:\n'])
self.assertEqual(m.keys(), ['a', 'bc'])
m = self.parse(['a:\r', 'b:\n'])
self.assertEqual(m.keys(), ['a', 'b'])
m = self.parse(['a:\r', '\nb:\n'])
self.assertEqual(m.keys(), ['a', 'b'])
m = self.parse(['a:\x85b:\u2028c:\n'])
self.assertEqual(m.items(), [('a', '\x85'), ('b', '\u2028'), ('c', '')])
m = self.parse(['a:\r', 'b:\x85', 'c:\n'])
self.assertEqual(m.items(), [('a', ''), ('b', '\x85'), ('c', '')])
def test_long_lines(self):
# Expected peak memory use on 32-bit platform: 6*N*M bytes.
M, N = 1000, 20000
m = self.parse(['a:b\n\n'] + ['x'*M] * N)
self.assertEqual(m.items(), [('a', 'b')])
self.assertEqual(m.get_payload(), 'x'*M*N)
m = self.parse(['a:b\r\r'] + ['x'*M] * N)
self.assertEqual(m.items(), [('a', 'b')])
self.assertEqual(m.get_payload(), 'x'*M*N)
m = self.parse(['a:b\r\r'] + ['x'*M+'\x85'] * N)
self.assertEqual(m.items(), [('a', 'b')])
self.assertEqual(m.get_payload(), ('x'*M+'\x85')*N)
m = self.parse(['a:\r', 'b: '] + ['x'*M] * N)
self.assertEqual(m.items(), [('a', ''), ('b', 'x'*M*N)])
class TestParsers(TestEmailBase):
def test_header_parser(self):
eq = self.assertEqual
# Parse only the headers of a complex multipart MIME document
with openfile('msg_02.txt') as fp:
msg = HeaderParser().parse(fp)
eq(msg['from'], 'ppp-request@zzz.org')
eq(msg['to'], 'ppp@zzz.org')
eq(msg.get_content_type(), 'multipart/mixed')
self.assertFalse(msg.is_multipart())
self.assertIsInstance(msg.get_payload(), str)
def test_bytes_header_parser(self):
eq = self.assertEqual
# Parse only the headers of a complex multipart MIME document
with openfile('msg_02.txt', 'rb') as fp:
msg = email.parser.BytesHeaderParser().parse(fp)
eq(msg['from'], 'ppp-request@zzz.org')
eq(msg['to'], 'ppp@zzz.org')
eq(msg.get_content_type(), 'multipart/mixed')
self.assertFalse(msg.is_multipart())
self.assertIsInstance(msg.get_payload(), str)
self.assertIsInstance(msg.get_payload(decode=True), bytes)
def test_bytes_parser_does_not_close_file(self):
with openfile('msg_02.txt', 'rb') as fp:
email.parser.BytesParser().parse(fp)
self.assertFalse(fp.closed)
def test_bytes_parser_on_exception_does_not_close_file(self):
with openfile('msg_15.txt', 'rb') as fp:
bytesParser = email.parser.BytesParser
self.assertRaises(email.errors.StartBoundaryNotFoundDefect,
bytesParser(policy=email.policy.strict).parse,
fp)
self.assertFalse(fp.closed)
def test_parser_does_not_close_file(self):
with openfile('msg_02.txt', 'r') as fp:
email.parser.Parser().parse(fp)
self.assertFalse(fp.closed)
def test_parser_on_exception_does_not_close_file(self):
with openfile('msg_15.txt', 'r') as fp:
parser = email.parser.Parser
self.assertRaises(email.errors.StartBoundaryNotFoundDefect,
parser(policy=email.policy.strict).parse, fp)
self.assertFalse(fp.closed)
def test_whitespace_continuation(self):
eq = self.assertEqual
# This message contains a line after the Subject: header that has only
# whitespace, but it is not empty!
msg = email.message_from_string("""\
From: aperson@dom.ain
To: bperson@dom.ain
Subject: the next line has a space on it
\x20
Date: Mon, 8 Apr 2002 15:09:19 -0400
Message-ID: spam
Here's the message body
""")
eq(msg['subject'], 'the next line has a space on it\n ')
eq(msg['message-id'], 'spam')
eq(msg.get_payload(), "Here's the message body\n")
def test_whitespace_continuation_last_header(self):
eq = self.assertEqual
# Like the previous test, but the subject line is the last
# header.
msg = email.message_from_string("""\
From: aperson@dom.ain
To: bperson@dom.ain
Date: Mon, 8 Apr 2002 15:09:19 -0400
Message-ID: spam
Subject: the next line has a space on it
\x20
Here's the message body
""")
eq(msg['subject'], 'the next line has a space on it\n ')
eq(msg['message-id'], 'spam')
eq(msg.get_payload(), "Here's the message body\n")
def test_crlf_separation(self):
eq = self.assertEqual
with openfile('msg_26.txt', newline='\n') as fp:
msg = Parser().parse(fp)
eq(len(msg.get_payload()), 2)
part1 = msg.get_payload(0)
eq(part1.get_content_type(), 'text/plain')
eq(part1.get_payload(), 'Simple email with attachment.\r\n\r\n')
part2 = msg.get_payload(1)
eq(part2.get_content_type(), 'application/riscos')
def test_crlf_flatten(self):
# Using newline='\n' preserves the crlfs in this input file.
with openfile('msg_26.txt', newline='\n') as fp:
text = fp.read()
msg = email.message_from_string(text)
s = StringIO()
g = Generator(s)
g.flatten(msg, linesep='\r\n')
self.assertEqual(s.getvalue(), text)
maxDiff = None
def test_multipart_digest_with_extra_mime_headers(self):
eq = self.assertEqual
neq = self.ndiffAssertEqual
with openfile('msg_28.txt') as fp:
msg = email.message_from_file(fp)
# Structure is:
# multipart/digest
# message/rfc822
# text/plain
# message/rfc822
# text/plain
eq(msg.is_multipart(), 1)
eq(len(msg.get_payload()), 2)
part1 = msg.get_payload(0)
eq(part1.get_content_type(), 'message/rfc822')
eq(part1.is_multipart(), 1)
eq(len(part1.get_payload()), 1)
part1a = part1.get_payload(0)
eq(part1a.is_multipart(), 0)
eq(part1a.get_content_type(), 'text/plain')
neq(part1a.get_payload(), 'message 1\n')
# next message/rfc822
part2 = msg.get_payload(1)
eq(part2.get_content_type(), 'message/rfc822')
eq(part2.is_multipart(), 1)
eq(len(part2.get_payload()), 1)
part2a = part2.get_payload(0)
eq(part2a.is_multipart(), 0)
eq(part2a.get_content_type(), 'text/plain')
neq(part2a.get_payload(), 'message 2\n')
def test_three_lines(self):
# A bug report by Andrew McNamara
lines = ['From: Andrew Person <aperson@dom.ain',
'Subject: Test',
'Date: Tue, 20 Aug 2002 16:43:45 +1000']
msg = email.message_from_string(NL.join(lines))
self.assertEqual(msg['date'], 'Tue, 20 Aug 2002 16:43:45 +1000')
def test_strip_line_feed_and_carriage_return_in_headers(self):
eq = self.assertEqual
# For [ 1002475 ] email message parser doesn't handle \r\n correctly
value1 = 'text'
value2 = 'more text'
m = 'Header: %s\r\nNext-Header: %s\r\n\r\nBody\r\n\r\n' % (
value1, value2)
msg = email.message_from_string(m)
eq(msg.get('Header'), value1)
eq(msg.get('Next-Header'), value2)
def test_rfc2822_header_syntax(self):
eq = self.assertEqual
m = '>From: foo\nFrom: bar\n!"#QUX;~: zoo\n\nbody'
msg = email.message_from_string(m)
eq(len(msg), 3)
eq(sorted(field for field in msg), ['!"#QUX;~', '>From', 'From'])
eq(msg.get_payload(), 'body')
def test_rfc2822_space_not_allowed_in_header(self):
eq = self.assertEqual
m = '>From foo@example.com 11:25:53\nFrom: bar\n!"#QUX;~: zoo\n\nbody'
msg = email.message_from_string(m)
eq(len(msg.keys()), 0)
def test_rfc2822_one_character_header(self):
eq = self.assertEqual
m = 'A: first header\nB: second header\nCC: third header\n\nbody'
msg = email.message_from_string(m)
headers = msg.keys()
headers.sort()
eq(headers, ['A', 'B', 'CC'])
eq(msg.get_payload(), 'body')
def test_CRLFLF_at_end_of_part(self):
# issue 5610: feedparser should not eat two chars from body part ending
# with "\r\n\n".
m = (
"From: foo@bar.com\n"
"To: baz\n"
"Mime-Version: 1.0\n"
"Content-Type: multipart/mixed; boundary=BOUNDARY\n"
"\n"
"--BOUNDARY\n"
"Content-Type: text/plain\n"
"\n"
"body ending with CRLF newline\r\n"
"\n"
"--BOUNDARY--\n"
)
msg = email.message_from_string(m)
self.assertTrue(msg.get_payload(0).get_payload().endswith('\r\n'))
class Test8BitBytesHandling(TestEmailBase):
# In Python3 all input is string, but that doesn't work if the actual input
# uses an 8bit transfer encoding. To hack around that, in email 5.1 we
# decode byte streams using the surrogateescape error handler, and
# reconvert to binary at appropriate places if we detect surrogates. This
# doesn't allow us to transform headers with 8bit bytes (they get munged),
# but it does allow us to parse and preserve them, and to decode body
# parts that use an 8bit CTE.
bodytest_msg = textwrap.dedent("""\
From: foo@bar.com
To: baz
Mime-Version: 1.0
Content-Type: text/plain; charset={charset}
Content-Transfer-Encoding: {cte}
{bodyline}
""")
def test_known_8bit_CTE(self):
m = self.bodytest_msg.format(charset='utf-8',
cte='8bit',
bodyline='pöstal').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(), "pöstal\n")
self.assertEqual(msg.get_payload(decode=True),
"pöstal\n".encode('utf-8'))
def test_unknown_8bit_CTE(self):
m = self.bodytest_msg.format(charset='notavalidcharset',
cte='8bit',
bodyline='pöstal').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(), "p\uFFFD\uFFFDstal\n")
self.assertEqual(msg.get_payload(decode=True),
"pöstal\n".encode('utf-8'))
def test_8bit_in_quopri_body(self):
# This is non-RFC compliant data...without 'decode' the library code
# decodes the body using the charset from the headers, and because the
# source byte really is utf-8 this works. This is likely to fail
# against real dirty data (ie: produce mojibake), but the data is
# invalid anyway so it is as good a guess as any. But this means that
# this test just confirms the current behavior; that behavior is not
# necessarily the best possible behavior. With 'decode' it is
# returning the raw bytes, so that test should be of correct behavior,
# or at least produce the same result that email4 did.
m = self.bodytest_msg.format(charset='utf-8',
cte='quoted-printable',
bodyline='p=C3=B6stál').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(), 'p=C3=B6stál\n')
self.assertEqual(msg.get_payload(decode=True),
'pöstál\n'.encode('utf-8'))
def test_invalid_8bit_in_non_8bit_cte_uses_replace(self):
# This is similar to the previous test, but proves that if the 8bit
# byte is undecodeable in the specified charset, it gets replaced
# by the unicode 'unknown' character. Again, this may or may not
# be the ideal behavior. Note that if decode=False none of the
# decoders will get involved, so this is the only test we need
# for this behavior.
m = self.bodytest_msg.format(charset='ascii',
cte='quoted-printable',
bodyline='p=C3=B6stál').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(), 'p=C3=B6st\uFFFD\uFFFDl\n')
self.assertEqual(msg.get_payload(decode=True),
'pöstál\n'.encode('utf-8'))
# test_defect_handling:test_invalid_chars_in_base64_payload
def test_8bit_in_base64_body(self):
# If we get 8bit bytes in a base64 body, we can just ignore them
# as being outside the base64 alphabet and decode anyway. But
# we register a defect.
m = self.bodytest_msg.format(charset='utf-8',
cte='base64',
bodyline='cMO2c3RhbAá=').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(decode=True),
'pöstal'.encode('utf-8'))
self.assertIsInstance(msg.defects[0],
errors.InvalidBase64CharactersDefect)
def test_8bit_in_uuencode_body(self):
# Sticking an 8bit byte in a uuencode block makes it undecodable by
# normal means, so the block is returned undecoded, but as bytes.
m = self.bodytest_msg.format(charset='utf-8',
cte='uuencode',
bodyline='<,.V<W1A; á ').encode('utf-8')
msg = email.message_from_bytes(m)
self.assertEqual(msg.get_payload(decode=True),
'<,.V<W1A; á \n'.encode('utf-8'))
headertest_headers = (
('From: foo@bar.com', ('From', 'foo@bar.com')),
('To: báz', ('To', '=?unknown-8bit?q?b=C3=A1z?=')),
('Subject: Maintenant je vous présente mon collègue, le pouf célèbre\n'
'\tJean de Baddie',
('Subject', '=?unknown-8bit?q?Maintenant_je_vous_pr=C3=A9sente_mon_'
'coll=C3=A8gue=2C_le_pouf_c=C3=A9l=C3=A8bre?=\n'
' =?unknown-8bit?q?_Jean_de_Baddie?=')),
('From: göst', ('From', '=?unknown-8bit?b?Z8O2c3Q=?=')),
)
headertest_msg = ('\n'.join([src for (src, _) in headertest_headers]) +
'\nYes, they are flying.\n').encode('utf-8')
def test_get_8bit_header(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertEqual(str(msg.get('to')), 'b\uFFFD\uFFFDz')
self.assertEqual(str(msg['to']), 'b\uFFFD\uFFFDz')
def test_print_8bit_headers(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertEqual(str(msg),
textwrap.dedent("""\
From: {}
To: {}
Subject: {}
From: {}
Yes, they are flying.
""").format(*[expected[1] for (_, expected) in
self.headertest_headers]))
def test_values_with_8bit_headers(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertListEqual([str(x) for x in msg.values()],
['foo@bar.com',
'b\uFFFD\uFFFDz',
'Maintenant je vous pr\uFFFD\uFFFDsente mon '
'coll\uFFFD\uFFFDgue, le pouf '
'c\uFFFD\uFFFDl\uFFFD\uFFFDbre\n'
'\tJean de Baddie',
"g\uFFFD\uFFFDst"])
def test_items_with_8bit_headers(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertListEqual([(str(x), str(y)) for (x, y) in msg.items()],
[('From', 'foo@bar.com'),
('To', 'b\uFFFD\uFFFDz'),
('Subject', 'Maintenant je vous '
'pr\uFFFD\uFFFDsente '
'mon coll\uFFFD\uFFFDgue, le pouf '
'c\uFFFD\uFFFDl\uFFFD\uFFFDbre\n'
'\tJean de Baddie'),
('From', 'g\uFFFD\uFFFDst')])
def test_get_all_with_8bit_headers(self):
msg = email.message_from_bytes(self.headertest_msg)
self.assertListEqual([str(x) for x in msg.get_all('from')],
['foo@bar.com',
'g\uFFFD\uFFFDst'])
def test_get_content_type_with_8bit(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Type: text/pl\xA7in; charset=utf-8
""").encode('latin-1'))
self.assertEqual(msg.get_content_type(), "text/pl\uFFFDin")
self.assertEqual(msg.get_content_maintype(), "text")
self.assertEqual(msg.get_content_subtype(), "pl\uFFFDin")
# test_headerregistry.TestContentTypeHeader.non_ascii_in_params
def test_get_params_with_8bit(self):
msg = email.message_from_bytes(
'X-Header: foo=\xa7ne; b\xa7r=two; baz=three\n'.encode('latin-1'))
self.assertEqual(msg.get_params(header='x-header'),
[('foo', '\uFFFDne'), ('b\uFFFDr', 'two'), ('baz', 'three')])
self.assertEqual(msg.get_param('Foo', header='x-header'), '\uFFFdne')
# XXX: someday you might be able to get 'b\xa7r', for now you can't.
self.assertEqual(msg.get_param('b\xa7r', header='x-header'), None)
# test_headerregistry.TestContentTypeHeader.non_ascii_in_rfc2231_value
def test_get_rfc2231_params_with_8bit(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Type: text/plain; charset=us-ascii;
title*=us-ascii'en'This%20is%20not%20f\xa7n"""
).encode('latin-1'))
self.assertEqual(msg.get_param('title'),
('us-ascii', 'en', 'This is not f\uFFFDn'))
def test_set_rfc2231_params_with_8bit(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Type: text/plain; charset=us-ascii;
title*=us-ascii'en'This%20is%20not%20f\xa7n"""
).encode('latin-1'))
msg.set_param('title', 'test')
self.assertEqual(msg.get_param('title'), 'test')
def test_del_rfc2231_params_with_8bit(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Type: text/plain; charset=us-ascii;
title*=us-ascii'en'This%20is%20not%20f\xa7n"""
).encode('latin-1'))
msg.del_param('title')
self.assertEqual(msg.get_param('title'), None)
self.assertEqual(msg.get_content_maintype(), 'text')
def test_get_payload_with_8bit_cte_header(self):
msg = email.message_from_bytes(textwrap.dedent("""\
Content-Transfer-Encoding: b\xa7se64
Content-Type: text/plain; charset=latin-1
payload
""").encode('latin-1'))
self.assertEqual(msg.get_payload(), 'payload\n')
self.assertEqual(msg.get_payload(decode=True), b'payload\n')
non_latin_bin_msg = textwrap.dedent("""\
From: foo@bar.com
To: báz
Subject: Maintenant je vous présente mon collègue, le pouf célèbre
\tJean de Baddie
Mime-Version: 1.0
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
Да, они летят.
""").encode('utf-8')
def test_bytes_generator(self):
msg = email.message_from_bytes(self.non_latin_bin_msg)
out = BytesIO()
email.generator.BytesGenerator(out).flatten(msg)
self.assertEqual(out.getvalue(), self.non_latin_bin_msg)
def test_bytes_generator_handles_None_body(self):
#Issue 11019
msg = email.message.Message()
out = BytesIO()
email.generator.BytesGenerator(out).flatten(msg)
self.assertEqual(out.getvalue(), b"\n")
non_latin_bin_msg_as7bit_wrapped = textwrap.dedent("""\
From: foo@bar.com
To: =?unknown-8bit?q?b=C3=A1z?=
Subject: =?unknown-8bit?q?Maintenant_je_vous_pr=C3=A9sente_mon_coll=C3=A8gue?=
=?unknown-8bit?q?=2C_le_pouf_c=C3=A9l=C3=A8bre?=
=?unknown-8bit?q?_Jean_de_Baddie?=
Mime-Version: 1.0
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: base64
0JTQsCwg0L7QvdC4INC70LXRgtGP0YIuCg==
""")
def test_generator_handles_8bit(self):
msg = email.message_from_bytes(self.non_latin_bin_msg)
out = StringIO()
email.generator.Generator(out).flatten(msg)
self.assertEqual(out.getvalue(), self.non_latin_bin_msg_as7bit_wrapped)
def test_str_generator_should_not_mutate_msg_when_handling_8bit(self):
msg = email.message_from_bytes(self.non_latin_bin_msg)
out = BytesIO()
BytesGenerator(out).flatten(msg)
orig_value = out.getvalue()
Generator(StringIO()).flatten(msg) # Should not mutate msg!
out = BytesIO()
BytesGenerator(out).flatten(msg)
self.assertEqual(out.getvalue(), orig_value)
def test_bytes_generator_with_unix_from(self):
# The unixfrom contains a current date, so we can't check it
# literally. Just make sure the first word is 'From' and the
# rest of the message matches the input.
msg = email.message_from_bytes(self.non_latin_bin_msg)
out = BytesIO()
email.generator.BytesGenerator(out).flatten(msg, unixfrom=True)
lines = out.getvalue().split(b'\n')
self.assertEqual(lines[0].split()[0], b'From')
self.assertEqual(b'\n'.join(lines[1:]), self.non_latin_bin_msg)
non_latin_bin_msg_as7bit = non_latin_bin_msg_as7bit_wrapped.split('\n')
non_latin_bin_msg_as7bit[2:4] = [
'Subject: =?unknown-8bit?q?Maintenant_je_vous_pr=C3=A9sente_mon_'
'coll=C3=A8gue=2C_le_pouf_c=C3=A9l=C3=A8bre?=']
non_latin_bin_msg_as7bit = '\n'.join(non_latin_bin_msg_as7bit)
def test_message_from_binary_file(self):
fn = 'test.msg'
self.addCleanup(unlink, fn)
with open(fn, 'wb') as testfile:
testfile.write(self.non_latin_bin_msg)
with open(fn, 'rb') as testfile:
m = email.parser.BytesParser().parse(testfile)
self.assertEqual(str(m), self.non_latin_bin_msg_as7bit)
latin_bin_msg = textwrap.dedent("""\
From: foo@bar.com
To: Dinsdale
Subject: Nudge nudge, wink, wink
Mime-Version: 1.0
Content-Type: text/plain; charset="latin-1"
Content-Transfer-Encoding: 8bit
oh là là, know what I mean, know what I mean?
""").encode('latin-1')
latin_bin_msg_as7bit = textwrap.dedent("""\
From: foo@bar.com
To: Dinsdale
Subject: Nudge nudge, wink, wink
Mime-Version: 1.0
Content-Type: text/plain; charset="iso-8859-1"
Content-Transfer-Encoding: quoted-printable
oh l=E0 l=E0, know what I mean, know what I mean?
""")
def test_string_generator_reencodes_to_quopri_when_appropriate(self):
m = email.message_from_bytes(self.latin_bin_msg)
self.assertEqual(str(m), self.latin_bin_msg_as7bit)
def test_decoded_generator_emits_unicode_body(self):
m = email.message_from_bytes(self.latin_bin_msg)
out = StringIO()
email.generator.DecodedGenerator(out).flatten(m)
#DecodedHeader output contains an extra blank line compared
#to the input message. RDM: not sure if this is a bug or not,
#but it is not specific to the 8bit->7bit conversion.
self.assertEqual(out.getvalue(),
self.latin_bin_msg.decode('latin-1')+'\n')
def test_bytes_feedparser(self):
bfp = email.feedparser.BytesFeedParser()
for i in range(0, len(self.latin_bin_msg), 10):
bfp.feed(self.latin_bin_msg[i:i+10])
m = bfp.close()
self.assertEqual(str(m), self.latin_bin_msg_as7bit)
def test_crlf_flatten(self):
with openfile('msg_26.txt', 'rb') as fp:
text = fp.read()
msg = email.message_from_bytes(text)
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(msg, linesep='\r\n')
self.assertEqual(s.getvalue(), text)
def test_8bit_multipart(self):
# Issue 11605
source = textwrap.dedent("""\
Date: Fri, 18 Mar 2011 17:15:43 +0100
To: foo@example.com
From: foodwatch-Newsletter <bar@example.com>
Subject: Aktuelles zu Japan, Klonfleisch und Smiley-System
Message-ID: <76a486bee62b0d200f33dc2ca08220ad@localhost.localdomain>
MIME-Version: 1.0
Content-Type: multipart/alternative;
boundary="b1_76a486bee62b0d200f33dc2ca08220ad"
--b1_76a486bee62b0d200f33dc2ca08220ad
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
Guten Tag, ,
mit großer Betroffenheit verfolgen auch wir im foodwatch-Team die
Nachrichten aus Japan.
--b1_76a486bee62b0d200f33dc2ca08220ad
Content-Type: text/html; charset="utf-8"
Content-Transfer-Encoding: 8bit
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html lang="de">
<head>
<title>foodwatch - Newsletter</title>
</head>
<body>
<p>mit großer Betroffenheit verfolgen auch wir im foodwatch-Team
die Nachrichten aus Japan.</p>
</body>
</html>
--b1_76a486bee62b0d200f33dc2ca08220ad--
""").encode('utf-8')
msg = email.message_from_bytes(source)
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(msg)
self.assertEqual(s.getvalue(), source)
def test_bytes_generator_b_encoding_linesep(self):
# Issue 14062: b encoding was tacking on an extra \n.
m = Message()
# This has enough non-ascii that it should always end up b encoded.
m['Subject'] = Header('žluťoučký kůň')
s = BytesIO()
g = email.generator.BytesGenerator(s)
g.flatten(m, linesep='\r\n')
self.assertEqual(
s.getvalue(),
b'Subject: =?utf-8?b?xb5sdcWlb3XEjWvDvSBrxa/FiA==?=\r\n\r\n')
def test_generator_b_encoding_linesep(self):
# Since this broke in ByteGenerator, test Generator for completeness.
m = Message()
# This has enough non-ascii that it should always end up b encoded.
m['Subject'] = Header('žluťoučký kůň')
s = StringIO()
g = email.generator.Generator(s)
g.flatten(m, linesep='\r\n')
self.assertEqual(
s.getvalue(),
'Subject: =?utf-8?b?xb5sdcWlb3XEjWvDvSBrxa/FiA==?=\r\n\r\n')
maxDiff = None
class BaseTestBytesGeneratorIdempotent:
maxDiff = None
def _msgobj(self, filename):
with openfile(filename, 'rb') as fp:
data = fp.read()
data = self.normalize_linesep_regex.sub(self.blinesep, data)
msg = email.message_from_bytes(data)
return msg, data
def _idempotent(self, msg, data, unixfrom=False):
b = BytesIO()
g = email.generator.BytesGenerator(b, maxheaderlen=0)
g.flatten(msg, unixfrom=unixfrom, linesep=self.linesep)
self.assertEqual(data, b.getvalue())
class TestBytesGeneratorIdempotentNL(BaseTestBytesGeneratorIdempotent,
TestIdempotent):
linesep = '\n'
blinesep = b'\n'
normalize_linesep_regex = re.compile(br'\r\n')
class TestBytesGeneratorIdempotentCRLF(BaseTestBytesGeneratorIdempotent,
TestIdempotent):
linesep = '\r\n'
blinesep = b'\r\n'
normalize_linesep_regex = re.compile(br'(?<!\r)\n')
class TestBase64(unittest.TestCase):
def test_len(self):
eq = self.assertEqual
eq(base64mime.header_length('hello'),
len(base64mime.body_encode(b'hello', eol='')))
for size in range(15):
if size == 0 : bsize = 0
elif size <= 3 : bsize = 4
elif size <= 6 : bsize = 8
elif size <= 9 : bsize = 12
elif size <= 12: bsize = 16
else : bsize = 20
eq(base64mime.header_length('x' * size), bsize)
def test_decode(self):
eq = self.assertEqual
eq(base64mime.decode(''), b'')
eq(base64mime.decode('aGVsbG8='), b'hello')
def test_encode(self):
eq = self.assertEqual
eq(base64mime.body_encode(b''), b'')
eq(base64mime.body_encode(b'hello'), 'aGVsbG8=\n')
# Test the binary flag
eq(base64mime.body_encode(b'hello\n'), 'aGVsbG8K\n')
# Test the maxlinelen arg
eq(base64mime.body_encode(b'xxxx ' * 20, maxlinelen=40), """\
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg
eHh4eCB4eHh4IA==
""")
# Test the eol argument
eq(base64mime.body_encode(b'xxxx ' * 20, maxlinelen=40, eol='\r\n'),
"""\
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IHh4eHggeHh4eCB4eHh4IHh4eHgg\r
eHh4eCB4eHh4IA==\r
""")
def test_header_encode(self):
eq = self.assertEqual
he = base64mime.header_encode
eq(he('hello'), '=?iso-8859-1?b?aGVsbG8=?=')
eq(he('hello\r\nworld'), '=?iso-8859-1?b?aGVsbG8NCndvcmxk?=')
eq(he('hello\nworld'), '=?iso-8859-1?b?aGVsbG8Kd29ybGQ=?=')
# Test the charset option
eq(he('hello', charset='iso-8859-2'), '=?iso-8859-2?b?aGVsbG8=?=')
eq(he('hello\nworld'), '=?iso-8859-1?b?aGVsbG8Kd29ybGQ=?=')
class TestQuopri(unittest.TestCase):
def setUp(self):
# Set of characters (as byte integers) that don't need to be encoded
# in headers.
self.hlit = list(chain(
range(ord('a'), ord('z') + 1),
range(ord('A'), ord('Z') + 1),
range(ord('0'), ord('9') + 1),
(c for c in b'!*+-/')))
# Set of characters (as byte integers) that do need to be encoded in
# headers.
self.hnon = [c for c in range(256) if c not in self.hlit]
assert len(self.hlit) + len(self.hnon) == 256
# Set of characters (as byte integers) that don't need to be encoded
# in bodies.
self.blit = list(range(ord(' '), ord('~') + 1))
self.blit.append(ord('\t'))
self.blit.remove(ord('='))
# Set of characters (as byte integers) that do need to be encoded in
# bodies.
self.bnon = [c for c in range(256) if c not in self.blit]
assert len(self.blit) + len(self.bnon) == 256
def test_quopri_header_check(self):
for c in self.hlit:
self.assertFalse(quoprimime.header_check(c),
'Should not be header quopri encoded: %s' % chr(c))
for c in self.hnon:
self.assertTrue(quoprimime.header_check(c),
'Should be header quopri encoded: %s' % chr(c))
def test_quopri_body_check(self):
for c in self.blit:
self.assertFalse(quoprimime.body_check(c),
'Should not be body quopri encoded: %s' % chr(c))
for c in self.bnon:
self.assertTrue(quoprimime.body_check(c),
'Should be body quopri encoded: %s' % chr(c))
def test_header_quopri_len(self):
eq = self.assertEqual
eq(quoprimime.header_length(b'hello'), 5)
# RFC 2047 chrome is not included in header_length().
eq(len(quoprimime.header_encode(b'hello', charset='xxx')),
quoprimime.header_length(b'hello') +
# =?xxx?q?...?= means 10 extra characters
10)
eq(quoprimime.header_length(b'h@e@l@l@o@'), 20)
# RFC 2047 chrome is not included in header_length().
eq(len(quoprimime.header_encode(b'h@e@l@l@o@', charset='xxx')),
quoprimime.header_length(b'h@e@l@l@o@') +
# =?xxx?q?...?= means 10 extra characters
10)
for c in self.hlit:
eq(quoprimime.header_length(bytes([c])), 1,
'expected length 1 for %r' % chr(c))
for c in self.hnon:
# Space is special; it's encoded to _
if c == ord(' '):
continue
eq(quoprimime.header_length(bytes([c])), 3,
'expected length 3 for %r' % chr(c))
eq(quoprimime.header_length(b' '), 1)
def test_body_quopri_len(self):
eq = self.assertEqual
for c in self.blit:
eq(quoprimime.body_length(bytes([c])), 1)
for c in self.bnon:
eq(quoprimime.body_length(bytes([c])), 3)
def test_quote_unquote_idempotent(self):
for x in range(256):
c = chr(x)
self.assertEqual(quoprimime.unquote(quoprimime.quote(c)), c)
def _test_header_encode(self, header, expected_encoded_header, charset=None):
if charset is None:
encoded_header = quoprimime.header_encode(header)
else:
encoded_header = quoprimime.header_encode(header, charset)
self.assertEqual(encoded_header, expected_encoded_header)
def test_header_encode_null(self):
self._test_header_encode(b'', '')
def test_header_encode_one_word(self):
self._test_header_encode(b'hello', '=?iso-8859-1?q?hello?=')
def test_header_encode_two_lines(self):
self._test_header_encode(b'hello\nworld',
'=?iso-8859-1?q?hello=0Aworld?=')
def test_header_encode_non_ascii(self):
self._test_header_encode(b'hello\xc7there',
'=?iso-8859-1?q?hello=C7there?=')
def test_header_encode_alt_charset(self):
self._test_header_encode(b'hello', '=?iso-8859-2?q?hello?=',
charset='iso-8859-2')
def _test_header_decode(self, encoded_header, expected_decoded_header):
decoded_header = quoprimime.header_decode(encoded_header)
self.assertEqual(decoded_header, expected_decoded_header)
def test_header_decode_null(self):
self._test_header_decode('', '')
def test_header_decode_one_word(self):
self._test_header_decode('hello', 'hello')
def test_header_decode_two_lines(self):
self._test_header_decode('hello=0Aworld', 'hello\nworld')
def test_header_decode_non_ascii(self):
self._test_header_decode('hello=C7there', 'hello\xc7there')
def test_header_decode_re_bug_18380(self):
# Issue 18380: Call re.sub with a positional argument for flags in the wrong position
self.assertEqual(quoprimime.header_decode('=30' * 257), '0' * 257)
def _test_decode(self, encoded, expected_decoded, eol=None):
if eol is None:
decoded = quoprimime.decode(encoded)
else:
decoded = quoprimime.decode(encoded, eol=eol)
self.assertEqual(decoded, expected_decoded)
def test_decode_null_word(self):
self._test_decode('', '')
def test_decode_null_line_null_word(self):
self._test_decode('\r\n', '\n')
def test_decode_one_word(self):
self._test_decode('hello', 'hello')
def test_decode_one_word_eol(self):
self._test_decode('hello', 'hello', eol='X')
def test_decode_one_line(self):
self._test_decode('hello\r\n', 'hello\n')
def test_decode_one_line_lf(self):
self._test_decode('hello\n', 'hello\n')
def test_decode_one_line_cr(self):
self._test_decode('hello\r', 'hello\n')
def test_decode_one_line_nl(self):
self._test_decode('hello\n', 'helloX', eol='X')
def test_decode_one_line_crnl(self):
self._test_decode('hello\r\n', 'helloX', eol='X')
def test_decode_one_line_one_word(self):
self._test_decode('hello\r\nworld', 'hello\nworld')
def test_decode_one_line_one_word_eol(self):
self._test_decode('hello\r\nworld', 'helloXworld', eol='X')
def test_decode_two_lines(self):
self._test_decode('hello\r\nworld\r\n', 'hello\nworld\n')
def test_decode_two_lines_eol(self):
self._test_decode('hello\r\nworld\r\n', 'helloXworldX', eol='X')
def test_decode_one_long_line(self):
self._test_decode('Spam' * 250, 'Spam' * 250)
def test_decode_one_space(self):
self._test_decode(' ', '')
def test_decode_multiple_spaces(self):
self._test_decode(' ' * 5, '')
def test_decode_one_line_trailing_spaces(self):
self._test_decode('hello \r\n', 'hello\n')
def test_decode_two_lines_trailing_spaces(self):
self._test_decode('hello \r\nworld \r\n', 'hello\nworld\n')
def test_decode_quoted_word(self):
self._test_decode('=22quoted=20words=22', '"quoted words"')
def test_decode_uppercase_quoting(self):
self._test_decode('ab=CD=EF', 'ab\xcd\xef')
def test_decode_lowercase_quoting(self):
self._test_decode('ab=cd=ef', 'ab\xcd\xef')
def test_decode_soft_line_break(self):
self._test_decode('soft line=\r\nbreak', 'soft linebreak')
def test_decode_false_quoting(self):
self._test_decode('A=1,B=A ==> A+B==2', 'A=1,B=A ==> A+B==2')
def _test_encode(self, body, expected_encoded_body, maxlinelen=None, eol=None):
kwargs = {}
if maxlinelen is None:
# Use body_encode's default.
maxlinelen = 76
else:
kwargs['maxlinelen'] = maxlinelen
if eol is None:
# Use body_encode's default.
eol = '\n'
else:
kwargs['eol'] = eol
encoded_body = quoprimime.body_encode(body, **kwargs)
self.assertEqual(encoded_body, expected_encoded_body)
if eol == '\n' or eol == '\r\n':
# We know how to split the result back into lines, so maxlinelen
# can be checked.
for line in encoded_body.splitlines():
self.assertLessEqual(len(line), maxlinelen)
def test_encode_null(self):
self._test_encode('', '')
def test_encode_null_lines(self):
self._test_encode('\n\n', '\n\n')
def test_encode_one_line(self):
self._test_encode('hello\n', 'hello\n')
def test_encode_one_line_crlf(self):
self._test_encode('hello\r\n', 'hello\n')
def test_encode_one_line_eol(self):
self._test_encode('hello\n', 'hello\r\n', eol='\r\n')
def test_encode_one_line_eol_after_non_ascii(self):
# issue 20206; see changeset 0cf700464177 for why the encode/decode.
self._test_encode('hello\u03c5\n'.encode('utf-8').decode('latin1'),
'hello=CF=85\r\n', eol='\r\n')
def test_encode_one_space(self):
self._test_encode(' ', '=20')
def test_encode_one_line_one_space(self):
self._test_encode(' \n', '=20\n')
# XXX: body_encode() expect strings, but uses ord(char) from these strings
# to index into a 256-entry list. For code points above 255, this will fail.
# Should there be a check for 8-bit only ord() values in body, or at least
# a comment about the expected input?
def test_encode_two_lines_one_space(self):
self._test_encode(' \n \n', '=20\n=20\n')
def test_encode_one_word_trailing_spaces(self):
self._test_encode('hello ', 'hello =20')
def test_encode_one_line_trailing_spaces(self):
self._test_encode('hello \n', 'hello =20\n')
def test_encode_one_word_trailing_tab(self):
self._test_encode('hello \t', 'hello =09')
def test_encode_one_line_trailing_tab(self):
self._test_encode('hello \t\n', 'hello =09\n')
def test_encode_trailing_space_before_maxlinelen(self):
self._test_encode('abcd \n1234', 'abcd =\n\n1234', maxlinelen=6)
def test_encode_trailing_space_at_maxlinelen(self):
self._test_encode('abcd \n1234', 'abcd=\n=20\n1234', maxlinelen=5)
def test_encode_trailing_space_beyond_maxlinelen(self):
self._test_encode('abcd \n1234', 'abc=\nd=20\n1234', maxlinelen=4)
def test_encode_whitespace_lines(self):
self._test_encode(' \n' * 5, '=20\n' * 5)
def test_encode_quoted_equals(self):
self._test_encode('a = b', 'a =3D b')
def test_encode_one_long_string(self):
self._test_encode('x' * 100, 'x' * 75 + '=\n' + 'x' * 25)
def test_encode_one_long_line(self):
self._test_encode('x' * 100 + '\n', 'x' * 75 + '=\n' + 'x' * 25 + '\n')
def test_encode_one_very_long_line(self):
self._test_encode('x' * 200 + '\n',
2 * ('x' * 75 + '=\n') + 'x' * 50 + '\n')
def test_encode_shortest_maxlinelen(self):
self._test_encode('=' * 5, '=3D=\n' * 4 + '=3D', maxlinelen=4)
def test_encode_maxlinelen_too_small(self):
self.assertRaises(ValueError, self._test_encode, '', '', maxlinelen=3)
def test_encode(self):
eq = self.assertEqual
eq(quoprimime.body_encode(''), '')
eq(quoprimime.body_encode('hello'), 'hello')
# Test the binary flag
eq(quoprimime.body_encode('hello\r\nworld'), 'hello\nworld')
# Test the maxlinelen arg
eq(quoprimime.body_encode('xxxx ' * 20, maxlinelen=40), """\
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=
x xxxx xxxx xxxx xxxx=20""")
# Test the eol argument
eq(quoprimime.body_encode('xxxx ' * 20, maxlinelen=40, eol='\r\n'),
"""\
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxxx=\r
xxxx xxxx xxxx xxxx xxxx xxxx xxxx xxx=\r
x xxxx xxxx xxxx xxxx=20""")
eq(quoprimime.body_encode("""\
one line
two line"""), """\
one line
two line""")
# Test the Charset class
class TestCharset(unittest.TestCase):
def tearDown(self):
from email import charset as CharsetModule
try:
del CharsetModule.CHARSETS['fake']
except KeyError:
pass
def test_codec_encodeable(self):
eq = self.assertEqual
# Make sure us-ascii = no Unicode conversion
c = Charset('us-ascii')
eq(c.header_encode('Hello World!'), 'Hello World!')
# Test 8-bit idempotency with us-ascii
s = '\xa4\xa2\xa4\xa4\xa4\xa6\xa4\xa8\xa4\xaa'
self.assertRaises(UnicodeError, c.header_encode, s)
c = Charset('utf-8')
eq(c.header_encode(s), '=?utf-8?b?wqTCosKkwqTCpMKmwqTCqMKkwqo=?=')
def test_body_encode(self):
eq = self.assertEqual
# Try a charset with QP body encoding
c = Charset('iso-8859-1')
eq('hello w=F6rld', c.body_encode('hello w\xf6rld'))
# Try a charset with Base64 body encoding
c = Charset('utf-8')
eq('aGVsbG8gd29ybGQ=\n', c.body_encode(b'hello world'))
# Try a charset with None body encoding
c = Charset('us-ascii')
eq('hello world', c.body_encode('hello world'))
# Try the convert argument, where input codec != output codec
c = Charset('euc-jp')
# With apologies to Tokio Kikuchi ;)
# XXX FIXME
## try:
## eq('\x1b$B5FCO;~IW\x1b(B',
## c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7'))
## eq('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7',
## c.body_encode('\xb5\xc6\xc3\xcf\xbb\xfe\xc9\xd7', False))
## except LookupError:
## # We probably don't have the Japanese codecs installed
## pass
# Testing SF bug #625509, which we have to fake, since there are no
# built-in encodings where the header encoding is QP but the body
# encoding is not.
from email import charset as CharsetModule
CharsetModule.add_charset('fake', CharsetModule.QP, None, 'utf-8')
c = Charset('fake')
eq('hello world', c.body_encode('hello world'))
def test_unicode_charset_name(self):
charset = Charset('us-ascii')
self.assertEqual(str(charset), 'us-ascii')
self.assertRaises(errors.CharsetError, Charset, 'asc\xffii')
# Test multilingual MIME headers.
class TestHeader(TestEmailBase):
def test_simple(self):
eq = self.ndiffAssertEqual
h = Header('Hello World!')
eq(h.encode(), 'Hello World!')
h.append(' Goodbye World!')
eq(h.encode(), 'Hello World! Goodbye World!')
def test_simple_surprise(self):
eq = self.ndiffAssertEqual
h = Header('Hello World!')
eq(h.encode(), 'Hello World!')
h.append('Goodbye World!')
eq(h.encode(), 'Hello World! Goodbye World!')
def test_header_needs_no_decoding(self):
h = 'no decoding needed'
self.assertEqual(decode_header(h), [(h, None)])
def test_long(self):
h = Header("I am the very model of a modern Major-General; I've information vegetable, animal, and mineral; I know the kings of England, and I quote the fights historical from Marathon to Waterloo, in order categorical; I'm very well acquainted, too, with matters mathematical; I understand equations, both the simple and quadratical; about binomial theorem I'm teeming with a lot o' news, with many cheerful facts about the square of the hypotenuse.",
maxlinelen=76)
for l in h.encode(splitchars=' ').split('\n '):
self.assertLessEqual(len(l), 76)
def test_multilingual(self):
eq = self.ndiffAssertEqual
g = Charset("iso-8859-1")
cz = Charset("iso-8859-2")
utf8 = Charset("utf-8")
g_head = (b'Die Mieter treten hier ein werden mit einem '
b'Foerderband komfortabel den Korridor entlang, '
b'an s\xfcdl\xfcndischen Wandgem\xe4lden vorbei, '
b'gegen die rotierenden Klingen bef\xf6rdert. ')
cz_head = (b'Finan\xe8ni metropole se hroutily pod tlakem jejich '
b'd\xf9vtipu.. ')
utf8_head = ('\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
'\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
'\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
'\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
'\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das '
'Nunstuck git und Slotermeyer? Ja! Beiherhund das Oder '
'die Flipperwaldt gersput.\u300d\u3068\u8a00\u3063\u3066'
'\u3044\u307e\u3059\u3002')
h = Header(g_head, g)
h.append(cz_head, cz)
h.append(utf8_head, utf8)
enc = h.encode(maxlinelen=76)
eq(enc, """\
=?iso-8859-1?q?Die_Mieter_treten_hier_ein_werden_mit_einem_Foerderband_kom?=
=?iso-8859-1?q?fortabel_den_Korridor_entlang=2C_an_s=FCdl=FCndischen_Wand?=
=?iso-8859-1?q?gem=E4lden_vorbei=2C_gegen_die_rotierenden_Klingen_bef=F6r?=
=?iso-8859-1?q?dert=2E_?= =?iso-8859-2?q?Finan=E8ni_metropole_se_hroutily?=
=?iso-8859-2?q?_pod_tlakem_jejich_d=F9vtipu=2E=2E_?= =?utf-8?b?5q2j56K6?=
=?utf-8?b?44Gr6KiA44GG44Go57+76Kiz44Gv44GV44KM44Gm44GE44G+44Gb44KT44CC?=
=?utf-8?b?5LiA6YOo44Gv44OJ44Kk44OE6Kqe44Gn44GZ44GM44CB44GC44Go44Gv44Gn?=
=?utf-8?b?44Gf44KJ44KB44Gn44GZ44CC5a6f6Zqb44Gr44Gv44CMV2VubiBpc3QgZGFz?=
=?utf-8?b?IE51bnN0dWNrIGdpdCB1bmQgU2xvdGVybWV5ZXI/IEphISBCZWloZXJodW5k?=
=?utf-8?b?IGRhcyBPZGVyIGRpZSBGbGlwcGVyd2FsZHQgZ2Vyc3B1dC7jgI3jgajoqIA=?=
=?utf-8?b?44Gj44Gm44GE44G+44GZ44CC?=""")
decoded = decode_header(enc)
eq(len(decoded), 3)
eq(decoded[0], (g_head, 'iso-8859-1'))
eq(decoded[1], (cz_head, 'iso-8859-2'))
eq(decoded[2], (utf8_head.encode('utf-8'), 'utf-8'))
ustr = str(h)
eq(ustr,
(b'Die Mieter treten hier ein werden mit einem Foerderband '
b'komfortabel den Korridor entlang, an s\xc3\xbcdl\xc3\xbcndischen '
b'Wandgem\xc3\xa4lden vorbei, gegen die rotierenden Klingen '
b'bef\xc3\xb6rdert. Finan\xc4\x8dni metropole se hroutily pod '
b'tlakem jejich d\xc5\xafvtipu.. \xe6\xad\xa3\xe7\xa2\xba\xe3\x81'
b'\xab\xe8\xa8\x80\xe3\x81\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3'
b'\xe3\x81\xaf\xe3\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3'
b'\x81\xbe\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
b'\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8\xaa\x9e'
b'\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81\xe3\x81\x82\xe3'
b'\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81\x9f\xe3\x82\x89\xe3\x82'
b'\x81\xe3\x81\xa7\xe3\x81\x99\xe3\x80\x82\xe5\xae\x9f\xe9\x9a\x9b'
b'\xe3\x81\xab\xe3\x81\xaf\xe3\x80\x8cWenn ist das Nunstuck git '
b'und Slotermeyer? Ja! Beiherhund das Oder die Flipperwaldt '
b'gersput.\xe3\x80\x8d\xe3\x81\xa8\xe8\xa8\x80\xe3\x81\xa3\xe3\x81'
b'\xa6\xe3\x81\x84\xe3\x81\xbe\xe3\x81\x99\xe3\x80\x82'
).decode('utf-8'))
# Test make_header()
newh = make_header(decode_header(enc))
eq(newh, h)
def test_empty_header_encode(self):
h = Header()
self.assertEqual(h.encode(), '')
def test_header_ctor_default_args(self):
eq = self.ndiffAssertEqual
h = Header()
eq(h, '')
h.append('foo', Charset('iso-8859-1'))
eq(h, 'foo')
def test_explicit_maxlinelen(self):
eq = self.ndiffAssertEqual
hstr = ('A very long line that must get split to something other '
'than at the 76th character boundary to test the non-default '
'behavior')
h = Header(hstr)
eq(h.encode(), '''\
A very long line that must get split to something other than at the 76th
character boundary to test the non-default behavior''')
eq(str(h), hstr)
h = Header(hstr, header_name='Subject')
eq(h.encode(), '''\
A very long line that must get split to something other than at the
76th character boundary to test the non-default behavior''')
eq(str(h), hstr)
h = Header(hstr, maxlinelen=1024, header_name='Subject')
eq(h.encode(), hstr)
eq(str(h), hstr)
def test_quopri_splittable(self):
eq = self.ndiffAssertEqual
h = Header(charset='iso-8859-1', maxlinelen=20)
x = 'xxxx ' * 20
h.append(x)
s = h.encode()
eq(s, """\
=?iso-8859-1?q?xxx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_x?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?x_?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?xx?=
=?iso-8859-1?q?_?=""")
eq(x, str(make_header(decode_header(s))))
h = Header(charset='iso-8859-1', maxlinelen=40)
h.append('xxxx ' * 20)
s = h.encode()
eq(s, """\
=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xxx?=
=?iso-8859-1?q?x_xxxx_xxxx_xxxx_xxxx_?=
=?iso-8859-1?q?xxxx_xxxx_xxxx_xxxx_xx?=
=?iso-8859-1?q?xx_xxxx_xxxx_xxxx_xxxx?=
=?iso-8859-1?q?_xxxx_xxxx_?=""")
eq(x, str(make_header(decode_header(s))))
def test_base64_splittable(self):
eq = self.ndiffAssertEqual
h = Header(charset='koi8-r', maxlinelen=20)
x = 'xxxx ' * 20
h.append(x)
s = h.encode()
eq(s, """\
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IHh4?=
=?koi8-r?b?eHgg?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?eCB4?=
=?koi8-r?b?eHh4?=
=?koi8-r?b?IA==?=""")
eq(x, str(make_header(decode_header(s))))
h = Header(charset='koi8-r', maxlinelen=40)
h.append(x)
s = h.encode()
eq(s, """\
=?koi8-r?b?eHh4eCB4eHh4IHh4eHggeHh4?=
=?koi8-r?b?eCB4eHh4IHh4eHggeHh4eCB4?=
=?koi8-r?b?eHh4IHh4eHggeHh4eCB4eHh4?=
=?koi8-r?b?IHh4eHggeHh4eCB4eHh4IHh4?=
=?koi8-r?b?eHggeHh4eCB4eHh4IHh4eHgg?=
=?koi8-r?b?eHh4eCB4eHh4IA==?=""")
eq(x, str(make_header(decode_header(s))))
def test_us_ascii_header(self):
eq = self.assertEqual
s = 'hello'
x = decode_header(s)
eq(x, [('hello', None)])
h = make_header(x)
eq(s, h.encode())
def test_string_charset(self):
eq = self.assertEqual
h = Header()
h.append('hello', 'iso-8859-1')
eq(h, 'hello')
## def test_unicode_error(self):
## raises = self.assertRaises
## raises(UnicodeError, Header, u'[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, Header, '[P\xf6stal]', 'us-ascii')
## h = Header()
## raises(UnicodeError, h.append, u'[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, h.append, '[P\xf6stal]', 'us-ascii')
## raises(UnicodeError, Header, u'\u83ca\u5730\u6642\u592b', 'iso-8859-1')
def test_utf8_shortest(self):
eq = self.assertEqual
h = Header('p\xf6stal', 'utf-8')
eq(h.encode(), '=?utf-8?q?p=C3=B6stal?=')
h = Header('\u83ca\u5730\u6642\u592b', 'utf-8')
eq(h.encode(), '=?utf-8?b?6I+K5Zyw5pmC5aSr?=')
def test_bad_8bit_header(self):
raises = self.assertRaises
eq = self.assertEqual
x = b'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
raises(UnicodeError, Header, x)
h = Header()
raises(UnicodeError, h.append, x)
e = x.decode('utf-8', 'replace')
eq(str(Header(x, errors='replace')), e)
h.append(x, errors='replace')
eq(str(h), e)
def test_escaped_8bit_header(self):
x = b'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
e = x.decode('ascii', 'surrogateescape')
h = Header(e, charset=email.charset.UNKNOWN8BIT)
self.assertEqual(str(h),
'Ynwp4dUEbay Auction Semiar- No Charge \uFFFD Earn Big')
self.assertEqual(email.header.decode_header(h), [(x, 'unknown-8bit')])
def test_header_handles_binary_unknown8bit(self):
x = b'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
h = Header(x, charset=email.charset.UNKNOWN8BIT)
self.assertEqual(str(h),
'Ynwp4dUEbay Auction Semiar- No Charge \uFFFD Earn Big')
self.assertEqual(email.header.decode_header(h), [(x, 'unknown-8bit')])
def test_make_header_handles_binary_unknown8bit(self):
x = b'Ynwp4dUEbay Auction Semiar- No Charge \x96 Earn Big'
h = Header(x, charset=email.charset.UNKNOWN8BIT)
h2 = email.header.make_header(email.header.decode_header(h))
self.assertEqual(str(h2),
'Ynwp4dUEbay Auction Semiar- No Charge \uFFFD Earn Big')
self.assertEqual(email.header.decode_header(h2), [(x, 'unknown-8bit')])
def test_modify_returned_list_does_not_change_header(self):
h = Header('test')
chunks = email.header.decode_header(h)
chunks.append(('ascii', 'test2'))
self.assertEqual(str(h), 'test')
def test_encoded_adjacent_nonencoded(self):
eq = self.assertEqual
h = Header()
h.append('hello', 'iso-8859-1')
h.append('world')
s = h.encode()
eq(s, '=?iso-8859-1?q?hello?= world')
h = make_header(decode_header(s))
eq(h.encode(), s)
def test_whitespace_keeper(self):
eq = self.assertEqual
s = 'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztk=?= =?koi8-r?q?=CA?= zz.'
parts = decode_header(s)
eq(parts, [(b'Subject: ', None), (b'\xf0\xd2\xcf\xd7\xc5\xd2\xcb\xc1 \xce\xc1 \xc6\xc9\xce\xc1\xcc\xd8\xce\xd9\xca', 'koi8-r'), (b' zz.', None)])
hdr = make_header(parts)
eq(hdr.encode(),
'Subject: =?koi8-r?b?8NLP18XSy8EgzsEgxsnOwczYztnK?= zz.')
def test_broken_base64_header(self):
raises = self.assertRaises
s = 'Subject: =?EUC-KR?B?CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3I ?='
raises(errors.HeaderParseError, decode_header, s)
def test_shift_jis_charset(self):
h = Header('文', charset='shift_jis')
self.assertEqual(h.encode(), '=?iso-2022-jp?b?GyRCSjgbKEI=?=')
def test_flatten_header_with_no_value(self):
# Issue 11401 (regression from email 4.x) Note that the space after
# the header doesn't reflect the input, but this is also the way
# email 4.x behaved. At some point it would be nice to fix that.
msg = email.message_from_string("EmptyHeader:")
self.assertEqual(str(msg), "EmptyHeader: \n\n")
def test_encode_preserves_leading_ws_on_value(self):
msg = Message()
msg['SomeHeader'] = ' value with leading ws'
self.assertEqual(str(msg), "SomeHeader: value with leading ws\n\n")
# Test RFC 2231 header parameters (en/de)coding
class TestRFC2231(TestEmailBase):
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_with_double_quotes
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quote_inside_double_quotes
def test_get_param(self):
eq = self.assertEqual
msg = self._msgobj('msg_29.txt')
eq(msg.get_param('title'),
('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
eq(msg.get_param('title', unquote=False),
('us-ascii', 'en', '"This is even more ***fun*** isn\'t it!"'))
def test_set_param(self):
eq = self.ndiffAssertEqual
msg = Message()
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii')
eq(msg.get_param('title'),
('us-ascii', '', 'This is even more ***fun*** isn\'t it!'))
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
eq(msg.get_param('title'),
('us-ascii', 'en', 'This is even more ***fun*** isn\'t it!'))
msg = self._msgobj('msg_01.txt')
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
eq(msg.as_string(maxheaderlen=78), """\
Return-Path: <bbb@zzz.org>
Delivered-To: bbb@zzz.org
Received: by mail.zzz.org (Postfix, from userid 889)
\tid 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Message-ID: <15090.61304.110929.45684@aaa.zzz.org>
From: bbb@ddd.com (John X. Doe)
To: bbb@zzz.org
Subject: This is a test message
Date: Fri, 4 May 2001 14:05:44 -0400
Content-Type: text/plain; charset=us-ascii;
title*=us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21
Hi,
Do you like this message?
-Me
""")
def test_set_param_requote(self):
msg = Message()
msg.set_param('title', 'foo')
self.assertEqual(msg['content-type'], 'text/plain; title="foo"')
msg.set_param('title', 'bar', requote=False)
self.assertEqual(msg['content-type'], 'text/plain; title=bar')
# tspecial is still quoted.
msg.set_param('title', "(bar)bell", requote=False)
self.assertEqual(msg['content-type'], 'text/plain; title="(bar)bell"')
def test_del_param(self):
eq = self.ndiffAssertEqual
msg = self._msgobj('msg_01.txt')
msg.set_param('foo', 'bar', charset='us-ascii', language='en')
msg.set_param('title', 'This is even more ***fun*** isn\'t it!',
charset='us-ascii', language='en')
msg.del_param('foo', header='Content-Type')
eq(msg.as_string(maxheaderlen=78), """\
Return-Path: <bbb@zzz.org>
Delivered-To: bbb@zzz.org
Received: by mail.zzz.org (Postfix, from userid 889)
\tid 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Message-ID: <15090.61304.110929.45684@aaa.zzz.org>
From: bbb@ddd.com (John X. Doe)
To: bbb@zzz.org
Subject: This is a test message
Date: Fri, 4 May 2001 14:05:44 -0400
Content-Type: text/plain; charset="us-ascii";
title*=us-ascii'en'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20isn%27t%20it%21
Hi,
Do you like this message?
-Me
""")
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_charset
# I changed the charset name, though, because the one in the file isn't
# a legal charset name. Should add a test for an illegal charset.
def test_rfc2231_get_content_charset(self):
eq = self.assertEqual
msg = self._msgobj('msg_32.txt')
eq(msg.get_content_charset(), 'us-ascii')
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_no_double_quotes
def test_rfc2231_parse_rfc_quoting(self):
m = textwrap.dedent('''\
Content-Disposition: inline;
\tfilename*0*=''This%20is%20even%20more%20;
\tfilename*1*=%2A%2A%2Afun%2A%2A%2A%20;
\tfilename*2="is it not.pdf"
''')
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
self.assertEqual(m, msg.as_string())
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_with_double_quotes
def test_rfc2231_parse_extra_quoting(self):
m = textwrap.dedent('''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
''')
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
self.assertEqual(m, msg.as_string())
# test_headerregistry.TestContentTypeHeader.rfc2231_no_language_or_charset
# but new test uses *0* because otherwise lang/charset is not valid.
# test_headerregistry.TestContentTypeHeader.rfc2231_segmented_normal_values
def test_rfc2231_no_language_or_charset(self):
m = '''\
Content-Transfer-Encoding: 8bit
Content-Disposition: inline; filename="file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm"
Content-Type: text/html; NAME*0=file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEM; NAME*1=P_nsmail.htm
'''
msg = email.message_from_string(m)
param = msg.get_param('NAME')
self.assertNotIsInstance(param, tuple)
self.assertEqual(
param,
'file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm')
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_no_charset
def test_rfc2231_no_language_or_charset_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
# Duplicate of previous test?
def test_rfc2231_no_language_or_charset_in_filename_encoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
# test_headerregistry.TestContentTypeHeader.rfc2231_partly_encoded,
# but the test below is wrong (the first part should be decoded).
def test_rfc2231_partly_encoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0="''This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
'This%20is%20even%20more%20***fun*** is it not.pdf')
def test_rfc2231_partly_nonencoded(self):
m = '''\
Content-Disposition: inline;
\tfilename*0="This%20is%20even%20more%20";
\tfilename*1="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
'This%20is%20even%20more%20%2A%2A%2Afun%2A%2A%2A%20is it not.pdf')
def test_rfc2231_no_language_or_charset_in_boundary(self):
m = '''\
Content-Type: multipart/alternative;
\tboundary*0*="''This%20is%20even%20more%20";
\tboundary*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tboundary*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_boundary(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_no_language_or_charset_in_charset(self):
# This is a nonsensical charset value, but tests the code anyway
m = '''\
Content-Type: text/plain;
\tcharset*0*="This%20is%20even%20more%20";
\tcharset*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tcharset*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_content_charset(),
'this is even more ***fun*** is it not.pdf')
# test_headerregistry.TestContentTypeHeader.rfc2231_unknown_charset_treated_as_ascii
def test_rfc2231_bad_encoding_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="bogus'xx'This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2="is it not.pdf"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf')
def test_rfc2231_bad_encoding_in_charset(self):
m = """\
Content-Type: text/plain; charset*=bogus''utf-8%E2%80%9D
"""
msg = email.message_from_string(m)
# This should return None because non-ascii characters in the charset
# are not allowed.
self.assertEqual(msg.get_content_charset(), None)
def test_rfc2231_bad_character_in_charset(self):
m = """\
Content-Type: text/plain; charset*=ascii''utf-8%E2%80%9D
"""
msg = email.message_from_string(m)
# This should return None because non-ascii characters in the charset
# are not allowed.
self.assertEqual(msg.get_content_charset(), None)
def test_rfc2231_bad_character_in_filename(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="ascii'xx'This%20is%20even%20more%20";
\tfilename*1*="%2A%2A%2Afun%2A%2A%2A%20";
\tfilename*2*="is it not.pdf%E2"
'''
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(),
'This is even more ***fun*** is it not.pdf\ufffd')
def test_rfc2231_unknown_encoding(self):
m = """\
Content-Transfer-Encoding: 8bit
Content-Disposition: inline; filename*=X-UNKNOWN''myfile.txt
"""
msg = email.message_from_string(m)
self.assertEqual(msg.get_filename(), 'myfile.txt')
def test_rfc2231_single_tick_in_filename_extended(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"Frank's\"; name*1*=\" Document\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, None)
eq(language, None)
eq(s, "Frank's Document")
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quote_inside_double_quotes
def test_rfc2231_single_tick_in_filename(self):
m = """\
Content-Type: application/x-foo; name*0=\"Frank's\"; name*1=\" Document\"
"""
msg = email.message_from_string(m)
param = msg.get_param('name')
self.assertNotIsInstance(param, tuple)
self.assertEqual(param, "Frank's Document")
def test_rfc2231_missing_tick(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="'This%20is%20broken";
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
"'This is broken")
def test_rfc2231_missing_tick_with_encoded_non_ascii(self):
m = '''\
Content-Disposition: inline;
\tfilename*0*="'This%20is%E2broken";
'''
msg = email.message_from_string(m)
self.assertEqual(
msg.get_filename(),
"'This is\ufffdbroken")
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quote_in_value_with_charset_and_lang
def test_rfc2231_tick_attack_extended(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"us-ascii'en-us'Frank's\"; name*1*=\" Document\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, "Frank's Document")
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quote_in_non_encoded_value
def test_rfc2231_tick_attack(self):
m = """\
Content-Type: application/x-foo;
\tname*0=\"us-ascii'en-us'Frank's\"; name*1=\" Document\"
"""
msg = email.message_from_string(m)
param = msg.get_param('name')
self.assertNotIsInstance(param, tuple)
self.assertEqual(param, "us-ascii'en-us'Frank's Document")
# test_headerregistry.TestContentTypeHeader.rfc2231_single_quotes_inside_quotes
def test_rfc2231_no_extended_values(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo; name=\"Frank's Document\"
"""
msg = email.message_from_string(m)
eq(msg.get_param('name'), "Frank's Document")
# test_headerregistry.TestContentTypeHeader.rfc2231_encoded_then_unencoded_segments
def test_rfc2231_encoded_then_unencoded_segments(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0*=\"us-ascii'en-us'My\";
\tname*1=\" Document\";
\tname*2*=\" For You\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, 'My Document For You')
# test_headerregistry.TestContentTypeHeader.rfc2231_unencoded_then_encoded_segments
# test_headerregistry.TestContentTypeHeader.rfc2231_quoted_unencoded_then_encoded_segments
def test_rfc2231_unencoded_then_encoded_segments(self):
eq = self.assertEqual
m = """\
Content-Type: application/x-foo;
\tname*0=\"us-ascii'en-us'My\";
\tname*1*=\" Document\";
\tname*2*=\" For You\"
"""
msg = email.message_from_string(m)
charset, language, s = msg.get_param('name')
eq(charset, 'us-ascii')
eq(language, 'en-us')
eq(s, 'My Document For You')
# Tests to ensure that signed parts of an email are completely preserved, as
# required by RFC1847 section 2.1. Note that these are incomplete, because the
# email package does not currently always preserve the body. See issue 1670765.
class TestSigned(TestEmailBase):
def _msg_and_obj(self, filename):
with openfile(filename) as fp:
original = fp.read()
msg = email.message_from_string(original)
return original, msg
def _signed_parts_eq(self, original, result):
# Extract the first mime part of each message
import re
repart = re.compile(r'^--([^\n]+)\n(.*?)\n--\1$', re.S | re.M)
inpart = repart.search(original).group(2)
outpart = repart.search(result).group(2)
self.assertEqual(outpart, inpart)
def test_long_headers_as_string(self):
original, msg = self._msg_and_obj('msg_45.txt')
result = msg.as_string()
self._signed_parts_eq(original, result)
def test_long_headers_as_string_maxheaderlen(self):
original, msg = self._msg_and_obj('msg_45.txt')
result = msg.as_string(maxheaderlen=60)
self._signed_parts_eq(original, result)
def test_long_headers_flatten(self):
original, msg = self._msg_and_obj('msg_45.txt')
fp = StringIO()
Generator(fp).flatten(msg)
result = fp.getvalue()
self._signed_parts_eq(original, result)
if __name__ == '__main__':
unittest.main()
| 38.503375
| 460
| 0.620006
|
d5c48dc166521235e69b3aa1d57bd546e5ff40c6
| 10,347
|
py
|
Python
|
tools/get_action_list.py
|
mail2nsrajesh/mistral
|
b19d87141563e00f18cd74c685392d0b9b70e351
|
[
"Apache-2.0"
] | null | null | null |
tools/get_action_list.py
|
mail2nsrajesh/mistral
|
b19d87141563e00f18cd74c685392d0b9b70e351
|
[
"Apache-2.0"
] | null | null | null |
tools/get_action_list.py
|
mail2nsrajesh/mistral
|
b19d87141563e00f18cd74c685392d0b9b70e351
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import inspect
import json
import os
from aodhclient.v2 import base as aodh_base
from aodhclient.v2 import client as aodhclient
from barbicanclient import base as barbican_base
from barbicanclient import client as barbicanclient
from ceilometerclient.v2 import client as ceilometerclient
from cinderclient.apiclient import base as cinder_base
from cinderclient.v2 import client as cinderclient
from designateclient import client as designateclient
from glanceclient.v2 import client as glanceclient
from gnocchiclient.v1 import base as gnocchi_base
from gnocchiclient.v1 import client as gnocchiclient
from heatclient.common import base as heat_base
from heatclient.v1 import client as heatclient
from ironicclient.common import base as ironic_base
from ironicclient.v1 import client as ironicclient
from keystoneclient import base as keystone_base
from keystoneclient.v3 import client as keystoneclient
from magnumclient.common import base as magnum_base
from magnumclient.v1 import client as magnumclient
from mistralclient.api import base as mistral_base
from mistralclient.api.v2 import client as mistralclient
from muranoclient.common import base as murano_base
from muranoclient.v1 import client as muranoclient
from novaclient import base as nova_base
from novaclient import client as novaclient
from troveclient import base as trove_base
from troveclient.v1 import client as troveclient
# TODO(nmakhotkin): Find a rational way to do it for neutron.
# TODO(nmakhotkin): Implement recursive way of searching for managers
# TODO(nmakhotkin): (e.g. keystone).
# TODO(dprince): Need to update ironic_inspector_client before we can
# plug it in cleanly here.
# TODO(dprince): Swiftclient doesn't currently support discovery
# like we do in this class.
# TODO(therve): Zaqarclient doesn't currently support discovery
# like we do in this class.
# TODO(sa709c): Tackerclient doesn't currently support discovery
# like we do in this class.
"""It is simple CLI tool which allows to see and update mapping.json file
if needed. mapping.json contains all allowing OpenStack actions sorted by
service name. Usage example:
python tools/get_action_list.py nova
The result will be simple JSON containing action name as a key and method
path as a value. For updating mapping.json it is need to copy all keys and
values of the result to corresponding section of mapping.json:
...mapping.json...
"nova": {
<put it here>
},
...mapping.json...
Note: in case of Keystone service, correct OS_AUTH_URL v3 and the rest auth
info must be provided. It can be provided either via environment variables
or CLI arguments. See --help for details.
"""
BASE_HEAT_MANAGER = heat_base.HookableMixin
BASE_NOVA_MANAGER = nova_base.HookableMixin
BASE_KEYSTONE_MANAGER = keystone_base.Manager
BASE_CINDER_MANAGER = cinder_base.HookableMixin
BASE_MISTRAL_MANAGER = mistral_base.ResourceManager
BASE_TROVE_MANAGER = trove_base.Manager
BASE_IRONIC_MANAGER = ironic_base.Manager
BASE_BARBICAN_MANAGER = barbican_base.BaseEntityManager
BASE_MAGNUM_MANAGER = magnum_base.Manager
BASE_MURANO_MANAGER = murano_base.Manager
BASE_AODH_MANAGER = aodh_base.Manager
BASE_GNOCCHI_MANAGER = gnocchi_base.Manager
def get_parser():
parser = argparse.ArgumentParser(
description='Gets All needed methods of OpenStack clients.',
usage="python get_action_list.py <service_name>"
)
parser.add_argument(
'service',
choices=CLIENTS.keys(),
help='Service name which methods need to be found.'
)
parser.add_argument(
'--os-username',
dest='username',
default=os.environ.get('OS_USERNAME', 'admin'),
help='Authentication username (Env: OS_USERNAME)'
)
parser.add_argument(
'--os-password',
dest='password',
default=os.environ.get('OS_PASSWORD', 'openstack'),
help='Authentication password (Env: OS_PASSWORD)'
)
parser.add_argument(
'--os-tenant-name',
dest='tenant_name',
default=os.environ.get('OS_TENANT_NAME', 'Default'),
help='Authentication tenant name (Env: OS_TENANT_NAME)'
)
parser.add_argument(
'--os-auth-url',
dest='auth_url',
default=os.environ.get('OS_AUTH_URL'),
help='Authentication URL (Env: OS_AUTH_URL)'
)
return parser
GLANCE_NAMESPACE_LIST = [
'image_members', 'image_tags', 'images', 'schemas', 'tasks',
'metadefs_resource_type', 'metadefs_property', 'metadefs_object',
'metadefs_tag', 'metadefs_namespace', 'versions'
]
CEILOMETER_NAMESPACE_LIST = [
'alarms', 'capabilities', 'event_types', 'events', 'meters',
'new_samples', 'query_alarm_history', 'query_alarms', 'query_samples',
'resources', 'samples', 'statistics', 'trait_descriptions', 'traits'
]
DESIGNATE_NAMESPACE_LIST = [
'diagnostics', 'domains', 'quotas', 'records', 'reports', 'servers',
'sync', 'touch'
]
def get_nova_client(**kwargs):
return novaclient.Client(2)
def get_keystone_client(**kwargs):
return keystoneclient.Client(**kwargs)
def get_glance_client(**kwargs):
return glanceclient.Client(kwargs.get('auth_url'))
def get_heat_client(**kwargs):
return heatclient.Client('')
def get_ceilometer_client(**kwargs):
return ceilometerclient.Client('')
def get_cinder_client(**kwargs):
return cinderclient.Client()
def get_mistral_client(**kwargs):
return mistralclient.Client()
def get_trove_client(**kwargs):
return troveclient.Client('username', 'password')
def get_ironic_client(**kwargs):
return ironicclient.Client("http://127.0.0.1:6385/")
def get_barbican_client(**kwargs):
return barbicanclient.Client(
project_id="1",
endpoint="http://127.0.0.1:9311"
)
def get_designate_client(**kwargs):
return designateclient.Client('1')
def get_magnum_client(**kwargs):
return magnumclient.Client()
def get_murano_client(**kwargs):
return muranoclient.Client('')
def get_aodh_client(**kwargs):
return aodhclient.Client('')
def get_gnocchi_client(**kwargs):
return gnocchiclient.Client()
CLIENTS = {
'nova': get_nova_client,
'heat': get_heat_client,
'ceilometer': get_ceilometer_client,
'cinder': get_cinder_client,
'keystone': get_keystone_client,
'glance': get_glance_client,
'trove': get_trove_client,
'ironic': get_ironic_client,
'barbican': get_barbican_client,
'mistral': get_mistral_client,
'designate': get_designate_client,
'magnum': get_magnum_client,
'murano': get_murano_client,
'aodh': get_aodh_client,
'gnocchi': get_gnocchi_client,
# 'neutron': get_nova_client
# 'baremetal_introspection': ...
# 'swift': ...
# 'zaqar': ...
}
BASE_MANAGERS = {
'nova': BASE_NOVA_MANAGER,
'heat': BASE_HEAT_MANAGER,
'ceilometer': None,
'cinder': BASE_CINDER_MANAGER,
'keystone': BASE_KEYSTONE_MANAGER,
'glance': None,
'trove': BASE_TROVE_MANAGER,
'ironic': BASE_IRONIC_MANAGER,
'barbican': BASE_BARBICAN_MANAGER,
'mistral': BASE_MISTRAL_MANAGER,
'designate': None,
'magnum': BASE_MAGNUM_MANAGER,
'murano': BASE_MURANO_MANAGER,
'aodh': BASE_AODH_MANAGER,
'gnocchi': BASE_GNOCCHI_MANAGER,
# 'neutron': BASE_NOVA_MANAGER
# 'baremetal_introspection': ...
# 'swift': ...
# 'zaqar': ...
}
NAMESPACES = {
'glance': GLANCE_NAMESPACE_LIST,
'ceilometer': CEILOMETER_NAMESPACE_LIST,
'designate': DESIGNATE_NAMESPACE_LIST
}
ALLOWED_ATTRS = ['service_catalog', 'catalog']
FORBIDDEN_METHODS = [
'add_hook', 'alternate_service_type', 'completion_cache', 'run_hooks',
'write_to_completion_cache', 'model', 'build_key_only_query', 'build_url',
'head', 'put', 'unvalidated_model'
]
def get_public_attrs(obj):
all_attrs = dir(obj)
return [a for a in all_attrs if not a.startswith('_')]
def get_public_methods(attr, client):
hierarchy_list = attr.split('.')
attribute = client
for attr in hierarchy_list:
attribute = getattr(attribute, attr)
all_attributes_list = get_public_attrs(attribute)
methods = []
for a in all_attributes_list:
allowed = a in ALLOWED_ATTRS
forbidden = a in FORBIDDEN_METHODS
if (not forbidden and
(allowed or inspect.ismethod(getattr(attribute, a)))):
methods.append(a)
return methods
def get_manager_list(service_name, client):
base_manager = BASE_MANAGERS[service_name]
if not base_manager:
return NAMESPACES[service_name]
public_attrs = get_public_attrs(client)
manager_list = []
for attr in public_attrs:
if (isinstance(getattr(client, attr), base_manager)
or attr in ALLOWED_ATTRS):
manager_list.append(attr)
return manager_list
def get_mapping_for_service(service, client):
mapping = collections.OrderedDict()
for man in get_manager_list(service, client):
public_methods = get_public_methods(man, client)
for method in public_methods:
key = "%s_%s" % (man, method)
value = "%s.%s" % (man, method)
mapping[key] = value
return mapping
def print_mapping(mapping):
print(json.dumps(mapping, indent=8, separators=(',', ': ')))
if __name__ == "__main__":
args = get_parser().parse_args()
auth_info = {
'username': args.username,
'tenant_name': args.tenant_name,
'password': args.password,
'auth_url': args.auth_url
}
service = args.service
client = CLIENTS.get(service)(**auth_info)
print("Find methods for service: %s..." % service)
print_mapping(get_mapping_for_service(service, client))
| 29.818444
| 78
| 0.718083
|
730fd313671a32636302e9dc7f34278c964feed0
| 566
|
py
|
Python
|
scripts/hail_batch/af_tob_wgs/main.py
|
populationgenomics/ancestry
|
faf6fd4bc3a1f8b2a2adb7e59cf584d4bfdf79e6
|
[
"MIT"
] | null | null | null |
scripts/hail_batch/af_tob_wgs/main.py
|
populationgenomics/ancestry
|
faf6fd4bc3a1f8b2a2adb7e59cf584d4bfdf79e6
|
[
"MIT"
] | 21
|
2021-03-09T06:35:59.000Z
|
2022-02-21T22:56:15.000Z
|
scripts/hail_batch/af_tob_wgs/main.py
|
populationgenomics/ancestry
|
faf6fd4bc3a1f8b2a2adb7e59cf584d4bfdf79e6
|
[
"MIT"
] | null | null | null |
"""Entry point for the analysis runner."""
import os
import hailtop.batch as hb
from analysis_runner import dataproc
service_backend = hb.ServiceBackend(
billing_project=os.getenv("HAIL_BILLING_PROJECT"), bucket=os.getenv("HAIL_BUCKET")
)
batch = hb.Batch(name="calculate-maf", backend=service_backend)
dataproc.hail_dataproc_job(
batch,
f"calculate_maf.py",
max_age="12h",
num_secondary_workers=20,
init=["gs://cpg-reference/hail_dataproc/install_common.sh"],
job_name=f"calculate_maf",
worker_boot_disk_size=200,
)
batch.run()
| 23.583333
| 86
| 0.745583
|
e295296ee0eec5ecdd2d8e4637141b6333619143
| 322
|
py
|
Python
|
remote_tutor/users/apps.py
|
wasim2263/remote-tutor
|
803dbd5b500bf5b82e4888f40463cbd2db1125ac
|
[
"MIT"
] | null | null | null |
remote_tutor/users/apps.py
|
wasim2263/remote-tutor
|
803dbd5b500bf5b82e4888f40463cbd2db1125ac
|
[
"MIT"
] | null | null | null |
remote_tutor/users/apps.py
|
wasim2263/remote-tutor
|
803dbd5b500bf5b82e4888f40463cbd2db1125ac
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class UsersConfig(AppConfig):
name = "remote_tutor.users"
verbose_name = _("Users")
def ready(self):
try:
import remote_tutor.users.signals # noqa F401
except ImportError:
pass
| 23
| 58
| 0.664596
|
9d2f56d4c00029c1ed4d8e6f1665488d2f7f7f87
| 7,864
|
py
|
Python
|
gen2-fire-detection/main.py
|
Thrada/depthai-experiments
|
f06312596b47427f9699a300ae8bc143dbbac000
|
[
"MIT"
] | 381
|
2020-05-31T22:36:51.000Z
|
2022-03-31T15:39:36.000Z
|
gen2-fire-detection/main.py
|
Thrada/depthai-experiments
|
f06312596b47427f9699a300ae8bc143dbbac000
|
[
"MIT"
] | 211
|
2020-09-12T20:49:18.000Z
|
2022-03-31T17:22:52.000Z
|
gen2-fire-detection/main.py
|
Thrada/depthai-experiments
|
f06312596b47427f9699a300ae8bc143dbbac000
|
[
"MIT"
] | 189
|
2020-06-01T19:09:51.000Z
|
2022-03-31T15:39:28.000Z
|
# coding=utf-8
from pathlib import Path
import argparse
import cv2
import depthai
import numpy as np
from imutils.video import FPS
parser = argparse.ArgumentParser()
parser.add_argument(
"-nd", "--no-debug", action="store_true", help="prevent debug output"
)
parser.add_argument(
"-cam",
"--camera",
action="store_true",
help="Use DepthAI 4K RGB camera for inference (conflicts with -vid)",
)
parser.add_argument(
"-vid",
"--video",
type=str,
help="The path of the video file used for inference (conflicts with -cam)",
)
args = parser.parse_args()
debug = not args.no_debug
if args.camera and args.video:
raise ValueError(
'Command line parameter error! "-Cam" cannot be used together with "-vid"!'
)
elif args.camera is False and args.video is None:
raise ValueError(
'Missing inference source! Use "-cam" to run on DepthAI cameras, or use "-vid <path>" to run on video files'
)
def to_planar(arr: np.ndarray, shape: tuple):
return cv2.resize(arr, shape).transpose((2, 0, 1)).flatten()
def to_nn_result(nn_data):
return np.array(nn_data.getFirstLayerFp16())
def to_tensor_result(packet):
return {
name: np.array(packet.getLayerFp16(name))
for name in [tensor.name for tensor in packet.getRaw().tensors]
}
def run_nn(x_in, x_out, in_dict):
nn_data = depthai.NNData()
for key in in_dict:
nn_data.setLayer(key, in_dict[key])
x_in.send(nn_data)
return x_out.tryGet()
class DepthAI:
def __init__(
self,
file=None,
camera=False,
):
print("Loading pipeline...")
self.file = file
self.camera = camera
self.fps_cam = FPS()
self.fps_nn = FPS()
self.create_pipeline()
self.start_pipeline()
self.fontScale = 1 if self.camera else 2
self.lineType = 0 if self.camera else 3
def create_pipeline(self):
print("Creating pipeline...")
self.pipeline = depthai.Pipeline()
if self.camera:
# ColorCamera
print("Creating Color Camera...")
self.cam = self.pipeline.createColorCamera()
self.cam.setPreviewSize(self._cam_size[1], self._cam_size[0])
self.cam.setResolution(
depthai.ColorCameraProperties.SensorResolution.THE_4_K
)
self.cam.setInterleaved(False)
self.cam.setBoardSocket(depthai.CameraBoardSocket.RGB)
self.cam.setColorOrder(depthai.ColorCameraProperties.ColorOrder.BGR)
self.cam_xout = self.pipeline.createXLinkOut()
self.cam_xout.setStreamName("preview")
self.cam.preview.link(self.cam_xout.input)
self.create_nns()
print("Pipeline created.")
def create_nns(self):
pass
def create_nn(self, model_path: str, model_name: str, first: bool = False):
"""
:param model_path: model path
:param model_name: model abbreviation
:param first: Is it the first model
:return:
"""
# NeuralNetwork
print(f"Creating {model_path} Neural Network...")
model_nn = self.pipeline.createNeuralNetwork()
model_nn.setBlobPath(str(Path(f"{model_path}").resolve().absolute()))
model_nn.input.setBlocking(False)
if first and self.camera:
print("linked cam.preview to model_nn.input")
self.cam.preview.link(model_nn.input)
else:
model_in = self.pipeline.createXLinkIn()
model_in.setStreamName(f"{model_name}_in")
model_in.out.link(model_nn.input)
model_nn_xout = self.pipeline.createXLinkOut()
model_nn_xout.setStreamName(f"{model_name}_nn")
model_nn.out.link(model_nn_xout.input)
def start_pipeline(self):
self.device = depthai.Device(self.pipeline)
print("Starting pipeline...")
self.start_nns()
if self.camera:
self.preview = self.device.getOutputQueue(
name="preview", maxSize=4, blocking=False
)
def start_nns(self):
pass
def put_text(self, text, dot, color=(0, 0, 255), font_scale=None, line_type=None):
font_scale = font_scale if font_scale else self.fontScale
line_type = line_type if line_type else self.lineType
dot = tuple(dot[:2])
cv2.putText(
img=self.debug_frame,
text=text,
org=dot,
fontFace=cv2.FONT_HERSHEY_COMPLEX,
fontScale=font_scale,
color=color,
lineType=line_type,
)
def parse(self):
if debug:
self.debug_frame = self.frame.copy()
self.parse_fun()
if debug:
cv2.imshow(
"Camera_view",
self.debug_frame,
)
self.fps_cam.update()
if cv2.waitKey(1) == ord("q"):
cv2.destroyAllWindows()
self.fps_cam.stop()
self.fps_nn.stop()
print(
f"FPS_CAMERA: {self.fps_cam.fps():.2f} , FPS_NN: {self.fps_nn.fps():.2f}"
)
raise StopIteration()
def parse_fun(self):
pass
def run_video(self):
cap = cv2.VideoCapture(str(Path(self.file).resolve().absolute()))
while cap.isOpened():
read_correctly, self.frame = cap.read()
if not read_correctly:
break
try:
self.parse()
except StopIteration:
break
cap.release()
def run_camera(self):
while True:
in_rgb = self.preview.tryGet()
if in_rgb is not None:
shape = (3, in_rgb.getHeight(), in_rgb.getWidth())
self.frame = (
in_rgb.getData().reshape(shape).transpose(1, 2, 0).astype(np.uint8)
)
self.frame = np.ascontiguousarray(self.frame)
try:
self.parse()
except StopIteration:
break
@property
def cam_size(self):
return self._cam_size
@cam_size.setter
def cam_size(self, v):
self._cam_size = v
def run(self):
self.fps_cam.start()
self.fps_nn.start()
if self.file is not None:
self.run_video()
else:
self.run_camera()
del self.device
class Main(DepthAI):
def __init__(self, file=None, camera=False):
self.cam_size = (255, 255)
super().__init__(file, camera)
def create_nns(self):
self.create_nn("models/fire-detection_openvino_2021.2_5shave.blob", "fire")
def start_nns(self):
self.fire_in = self.device.getInputQueue("fire_in", 4, False)
self.fire_nn = self.device.getOutputQueue("fire_nn", 4, False)
def run_fire(self):
labels = ["fire", "normal", "smoke"]
w, h = self.frame.shape[:2]
nn_data = run_nn(
self.fire_in,
self.fire_nn,
{"Placeholder": to_planar(self.frame, (224, 224))},
)
if nn_data is None:
return
self.fps_nn.update()
results = to_tensor_result(nn_data).get("final_result")
i = int(np.argmax(results))
label = labels[i]
if label == "normal":
return
else:
if results[i] > 0.5:
self.put_text(
f"{label}:{results[i]:.2f}",
(10, 25),
color=(0, 0, 255),
font_scale=1,
)
def parse_fun(self):
self.run_fire()
if __name__ == "__main__":
if args.video:
Main(file=args.video).run()
else:
Main(camera=args.camera).run()
| 28.18638
| 116
| 0.569939
|
6e0dffca045af1c5b37991a610fb4bb2cfc1e39a
| 1,685
|
py
|
Python
|
cilium/setup.py
|
remicalixte/integrations-core
|
b115e18c52820fe1a92495f538fdc14ddf83cfe1
|
[
"BSD-3-Clause"
] | null | null | null |
cilium/setup.py
|
remicalixte/integrations-core
|
b115e18c52820fe1a92495f538fdc14ddf83cfe1
|
[
"BSD-3-Clause"
] | null | null | null |
cilium/setup.py
|
remicalixte/integrations-core
|
b115e18c52820fe1a92495f538fdc14ddf83cfe1
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, 'datadog_checks', 'cilium', '__about__.py')) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
CHECKS_BASE_REQ = 'datadog-checks-base>=4.2.0'
setup(
name='datadog-cilium',
version=ABOUT['__version__'],
description='The Cilium check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent cilium check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='BSD-3-Clause',
# See https://pypi.org/classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.8',
],
# The package we're going to ship
packages=['datadog_checks.cilium'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
# Extra files to ship with the wheel package
include_package_data=True,
)
| 30.636364
| 76
| 0.679525
|
f040b8e76a3118ad0bd16e2096c51350b340289f
| 4,426
|
py
|
Python
|
krb5/src/lib/krb5/krb/t_in_ccache_patypes.py
|
kbore/pbis-open
|
a05eb9309269b6402b4d6659bc45961986ea5eab
|
[
"Apache-2.0"
] | 372
|
2016-10-28T10:50:35.000Z
|
2022-03-18T19:54:37.000Z
|
krb5/src/lib/krb5/krb/t_in_ccache_patypes.py
|
kbore/pbis-open
|
a05eb9309269b6402b4d6659bc45961986ea5eab
|
[
"Apache-2.0"
] | 317
|
2016-11-02T17:41:48.000Z
|
2021-11-08T20:28:19.000Z
|
krb5/src/lib/krb5/krb/t_in_ccache_patypes.py
|
kenferrara/pbis-open
|
690c325d947b2bf6fb3032f9d660e41b94aea4be
|
[
"Apache-2.0"
] | 107
|
2016-11-03T19:25:16.000Z
|
2022-03-20T21:15:22.000Z
|
#!/usr/bin/python
# Copyright (C) 2010,2012 by the Massachusetts Institute of Technology.
# All rights reserved.
#
# Export of this software from the United States of America may
# require a specific license from the United States Government.
# It is the responsibility of any person or organization contemplating
# export to obtain such a license before exporting.
#
# WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
# distribute this software and its documentation for any purpose and
# without fee is hereby granted, provided that the above copyright
# notice appear in all copies and that both that copyright notice and
# this permission notice appear in supporting documentation, and that
# the name of M.I.T. not be used in advertising or publicity pertaining
# to distribution of the software without specific, written prior
# permission. Furthermore if you modify this software you must label
# your software as modified software and not distribute it in such a
# fashion that it might be confused with the original M.I.T. software.
# M.I.T. makes no representations about the suitability of
# this software for any purpose. It is provided "as is" without express
# or implied warranty.
from k5test import *
# Create a bare-bones KDC.
realm = K5Realm(create_user=False, create_host=False)
# Create principals with various password expirations.
realm.run_kadminl('addprinc -pw pass nopreauth')
realm.run_kadminl('addprinc -pw pass +requires_preauth preauth')
# Check that we can get creds without preauth without an in_ccache. This is
# the default behavior for kinit.
realm.run_as_client(['./t_in_ccache', 'nopreauth', 'pass'])
# Check that we can get creds with preauth without an in_ccache. This is the
# default behavior for kinit.
realm.run_as_client(['./t_in_ccache', 'preauth', 'pass'])
# Check that we can get creds while supplying a now-populated input ccache that
# doesn't contain any relevant configuration.
realm.run_as_client(['./t_in_ccache', 'nopreauth', 'pass'])
realm.run_as_client(['./t_in_ccache', '-I', realm.ccache, 'preauth', 'pass'])
# Check that we can get creds while supplying a now-populated input ccache.
realm.run_as_client(['./t_in_ccache', 'preauth', 'pass'])
realm.run_as_client(['./t_in_ccache', '-I', realm.ccache, 'preauth', 'pass'])
# Check that we can't get creds while specifying patypes that aren't available
# in a FAST tunnel while using a FAST tunnel. Expect the client-end
# preauth-failed error.
realm.run_as_client(['./t_in_ccache', 'nopreauth', 'pass'])
realm.run_as_client(['./t_cc_config', '-p', realm.krbtgt_princ,
'pa_type', '2'])
realm.run_as_client(['./t_in_ccache', '-A', realm.ccache, '-I', realm.ccache,
'preauth', 'pass'], expected_code=210)
# Check that we can't get creds while specifying patypes that are only
# available in a FAST tunnel while not using a FAST tunnel. Expect the
# client-end preauth-failed error.
realm.run_as_client(['./t_in_ccache', 'nopreauth', 'pass'])
realm.run_as_client(['./t_cc_config', '-p', realm.krbtgt_princ,
'pa_type', '138'])
realm.run_as_client(['./t_in_ccache', '-I', realm.ccache, 'preauth', 'pass'],
expected_code=210)
# Check that we can get creds using FAST, and that we end up using
# encrypted_challenge when we do.
realm.run_as_client(['./t_in_ccache', 'preauth', 'pass'])
realm.run_as_client(['./t_cc_config', '-p', realm.krbtgt_princ,
'pa_type', '138'])
realm.run_as_client(['./t_in_ccache', '-A', realm.ccache, 'preauth', 'pass'])
output = realm.run_as_client(['./t_cc_config', '-p', realm.krbtgt_princ,
'pa_type'])
# We should have selected and used encrypted_challenge.
if output != '138':
fail('Unexpected pa_type value in out_ccache: "%s"' % output)
# Check that we can get creds while specifying the right patypes.
realm.run_as_client(['./t_in_ccache', 'nopreauth', 'pass'])
realm.run_as_client(['./t_cc_config', '-p', realm.krbtgt_princ,
'pa_type', '2'])
realm.run_as_client(['./t_in_ccache', '-I', realm.ccache, 'preauth', 'pass'])
output = realm.run_as_client(['./t_cc_config', '-p', realm.krbtgt_princ,
'pa_type'])
# We should have selected and used encrypted_timestamp.
if output != '2':
fail('Unexpected pa_type value in out_ccache')
success('input ccache pa_type tests')
| 47.591398
| 79
| 0.713059
|
040a91e489582bbdd69c217da3c9aaf0d3b81904
| 4,485
|
py
|
Python
|
vertex/test/test_sigma.py
|
twisted/vertex
|
feb591aa1b9a3b2b8fdcf53e4962dad2a0bc38ca
|
[
"MIT"
] | 56
|
2015-01-09T03:52:07.000Z
|
2021-09-26T22:17:06.000Z
|
vertex/test/test_sigma.py
|
DalavanCloud/vertex
|
feb591aa1b9a3b2b8fdcf53e4962dad2a0bc38ca
|
[
"MIT"
] | 34
|
2015-03-05T02:57:48.000Z
|
2017-05-23T22:34:13.000Z
|
vertex/test/test_sigma.py
|
DalavanCloud/vertex
|
feb591aa1b9a3b2b8fdcf53e4962dad2a0bc38ca
|
[
"MIT"
] | 17
|
2015-04-17T02:03:16.000Z
|
2021-11-12T03:31:07.000Z
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.python.failure import Failure
from twisted.python.filepath import FilePath
from twisted.internet.error import ConnectionDone
from twisted.trial import unittest
from vertex.q2q import Q2QAddress
from vertex import sigma, conncache
from vertex.test.mock_data import data as TEST_DATA
from vertex.test.test_conncache import DisconnectingTransport
from vertex.test.helpers import FakeQ2QService
sender = Q2QAddress("sending-data.net", "sender")
receiver = Q2QAddress("receiving-data.org", "receiver")
class TestBase(unittest.TestCase):
def setUp(self):
self.realChunkSize = sigma.CHUNK_SIZE
sigma.CHUNK_SIZE = 100
svc = self.service = FakeQ2QService()
fname = self.mktemp()
sf = self.sfile = FilePath(fname)
if not sf.parent().isdir():
sf.parent().makedirs()
sf.open('w').write(TEST_DATA)
self.senderNexus = sigma.Nexus(svc, sender,
sigma.BaseNexusUI(self.mktemp()),
svc.callLater)
def tearDown(self):
self.senderNexus.stopService()
sigma.CHUNK_SIZE = self.realChunkSize
class BasicTransferTests(TestBase):
def setUp(self):
TestBase.setUp(self)
self.stoppers = []
self.receiverNexus = sigma.Nexus(self.service, receiver,
sigma.BaseNexusUI(self.mktemp()),
self.service.callLater)
self.stoppers.append(self.receiverNexus)
def tearDown(self):
TestBase.tearDown(self)
for stopper in self.stoppers:
stopper.stopService()
def test_OneSenderOneRecipient(self):
self.senderNexus.push(self.sfile, 'TESTtoTEST', [receiver])
self.service.flush()
peerThingyoes = childrenOf(self.receiverNexus.ui.basepath)
self.assertEquals(len(peerThingyoes), 1)
rfiles = childrenOf(peerThingyoes[0])
self.assertEquals(len(rfiles), 1)
rfile = rfiles[0]
rfdata = rfile.open().read()
self.assertEquals(len(rfdata),
len(TEST_DATA))
self.assertEquals(rfdata, TEST_DATA,
"file values unequal")
def test_OneSenderManyRecipients(self):
raddresses = [Q2QAddress("receiving-data.org", "receiver%d" % (x,))
for x in range(10)]
nexi = [sigma.Nexus(self.service,
radr,
sigma.BaseNexusUI(self.mktemp()),
self.service.callLater) for radr in raddresses]
self.stoppers.extend(nexi)
self.senderNexus.push(self.sfile, 'TESTtoTEST', raddresses)
self.service.flush()
receivedIntroductions = 0
for nexium in nexi:
receivedIntroductions += nexium.ui.receivedIntroductions
self.failUnless(receivedIntroductions > 1)
for nexium in nexi:
peerFiles = childrenOf(nexium.ui.basepath)
self.assertEquals(len(peerFiles), 1)
rfiles = childrenOf(peerFiles[0])
self.assertEquals(len(rfiles), 1, rfiles)
rfile = rfiles[0]
self.assertEquals(rfile.open().read(),
TEST_DATA,
"file value mismatch")
class SigmaConnectionCacheTests(unittest.TestCase):
"""
Tests for the interaction of L{sigma.SigmaProtocol} and
L{conncache.ConnectionCache}.
"""
def test_connectionLost_unregistersFromConnectionCache(self):
"""
L{sigma.SigmaProtocol.connectionLost} notifies the connection
cache that the connection is lost.
"""
cache = conncache.ConnectionCache()
class FakeNexus(object):
conns = cache
addr = object()
svc = object()
protocol = sigma.SigmaProtocol(FakeNexus())
transport = DisconnectingTransport()
q2qPeer = object()
transport.getQ2QPeer = lambda: q2qPeer
protocol.makeConnection(transport)
d = cache.shutdown()
transport.loseConnectionDeferred.callback(None)
self.assertNoResult(d)
protocol.connectionLost(Failure(ConnectionDone))
self.successResultOf(d)
def childrenOf(x):
# This should be a part of FilePath, but hey
return map(x.child, x.listdir())
| 31.808511
| 75
| 0.613824
|
dee7ab37320daf84773b768f1d5f72c2ab86a0a8
| 160
|
py
|
Python
|
CarParkArcGisApi/CarParkArcGisApi/env/Lib/site-packages/easyprocess/examples/ver.py
|
moazzamwaheed2017/carparkapi
|
e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a
|
[
"MIT"
] | null | null | null |
CarParkArcGisApi/CarParkArcGisApi/env/Lib/site-packages/easyprocess/examples/ver.py
|
moazzamwaheed2017/carparkapi
|
e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a
|
[
"MIT"
] | 9
|
2020-02-03T15:50:10.000Z
|
2022-03-02T07:11:34.000Z
|
CarParkArcGisApi/CarParkArcGisApi/env/Lib/site-packages/easyprocess/examples/ver.py
|
moazzamwaheed2017/carparkapi
|
e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a
|
[
"MIT"
] | null | null | null |
from easyprocess import EasyProcess
import sys
python = sys.executable
v = EasyProcess([python, '--version']).call().stderr
print('your python version:%s' % v)
| 26.666667
| 52
| 0.74375
|
21d4b2cc64ffede263df2d3651fd044f2cb98cc3
| 4,787
|
py
|
Python
|
tern/utils/general.py
|
oc37ejuc/tern
|
0a18807d1d8089d2a7a73b5a3f83cd90bc4bfda2
|
[
"BSD-2-Clause"
] | null | null | null |
tern/utils/general.py
|
oc37ejuc/tern
|
0a18807d1d8089d2a7a73b5a3f83cd90bc4bfda2
|
[
"BSD-2-Clause"
] | null | null | null |
tern/utils/general.py
|
oc37ejuc/tern
|
0a18807d1d8089d2a7a73b5a3f83cd90bc4bfda2
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017-2020 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
import os
import random
import re
import tarfile
import subprocess # nosec
from contextlib import contextmanager
from pathlib import Path
from pbr.version import VersionInfo
from tern.utils import constants
# regex strings
cleaning = '[\t\\\\]'
concat = '&&|;'
# from https://stackoverflow.com/questions/6194499/pushd-through-os-system
@contextmanager
def pushd(path):
curr_path = os.getcwd()
os.chdir(path)
yield
os.chdir(curr_path)
def get_top_dir():
'''Get the hidden working directory'''
return os.path.join(str(Path.home()), constants.dot_folder)
def initialize_names():
randint = random.randint(10000, 99999) # nosec
constants.image = constants.image + "_" + str(randint)
constants.tag = constants.tag + "_" + str(randint)
constants.container = constants.container + "_" + str(randint)
def clean_command(command):
'''Given a command string, clean out all whitespaces, tabs and line
indentations
Leave && alone'''
return re.sub(cleaning, '', command).strip()
def split_command(command):
'''Given a string of concatenated commands, return a list of commands'''
return re.split(concat, command)
def parse_command(command):
'''Parse a unix command of the form:
command (subcommand) [options] [arguments]
Caveats:
1. There is no way of knowing whether a command contains
subcommands or not so those tokens will be identified as 'words'
2. There is no way of knowing whether an option requires an
argument or not. The arguments will be identified as 'words'
For most cases involving package management this should be enough to
identify whether a package was meant to be installed or removed.
Convert a given command into a dictionary of the form:
{'name': command,
'options': [list of option tuples]
'words': [remaining words]}
An option tuple contains the option flag and the option argument
We look ahead to see if the token after the option flag does not match
the regex for an option flag and assumes that is the argument
The token still remains in the words list because we do not know for sure
if it is a command argument or an option argument'''
options = re.compile('^-')
option_list = []
word_list = []
command_dict = {}
command_tokens = command.split(' ')
# first token is the command name
command_dict.update({'name': command_tokens.pop(0).strip()})
# find options in the rest of the list
while command_tokens:
if options.match(command_tokens[0]):
option_flag = command_tokens.pop(0).strip()
# we have to check if this is the end of the command
if command_tokens and not options.match(command_tokens[0]):
option_arg = command_tokens[0].strip()
else:
option_arg = ''
option_list.append((option_flag, option_arg))
else:
word_list.append(command_tokens.pop(0).strip())
# now we have options and the remainder words
command_dict.update({'options': option_list,
'words': word_list})
return command_dict
def get_git_rev_or_version():
'''Either get the current git commit or the PyPI distribution
Use pbr to get the package version'''
command = ['git', 'rev-parse', 'HEAD']
try:
output = subprocess.check_output( # nosec
command, stderr=subprocess.DEVNULL)
if isinstance(output, bytes):
output = output.decode('utf-8')
ver_type = 'commit'
except subprocess.CalledProcessError:
ver_type = 'package'
output = VersionInfo('tern').version_string()
return ver_type, output.split('\n').pop(0)
def prop_names(obj):
'''Given an object, return a generator that will produce the object's
property key in its __dict__ representation and it's name'''
prop_decorators = r'^__|^_'
for key in obj.__dict__.keys():
# remove private and protected decorator characters if any
priv_name = '_' + obj.__class__.__name__
prop_name = re.sub(priv_name, '', key)
prop_name = re.sub(prop_decorators, '', prop_name, 1)
yield key, prop_name
def check_tar(tar_file):
'''Check if provided file is a valid tar archive file'''
if os.path.exists(tar_file):
if tarfile.is_tarfile(tar_file):
return True
return False
def check_root():
'''Check to see if the current user is root or not. Return True if root
and False if not'''
if os.getuid() == 0:
return True
return False
| 33.475524
| 77
| 0.661583
|
e79e8ce748cec7462ebaaa522d163a16a51dc170
| 12,469
|
py
|
Python
|
src/sage/combinat/species/product_species.py
|
bopopescu/sage-5
|
9d85b34956ca2edd55af307f99c5d3859acd30bf
|
[
"BSL-1.0"
] | 5
|
2015-01-04T07:15:06.000Z
|
2022-03-04T15:15:18.000Z
|
src/sage/combinat/species/product_species.py
|
bopopescu/sage-5
|
9d85b34956ca2edd55af307f99c5d3859acd30bf
|
[
"BSL-1.0"
] | null | null | null |
src/sage/combinat/species/product_species.py
|
bopopescu/sage-5
|
9d85b34956ca2edd55af307f99c5d3859acd30bf
|
[
"BSL-1.0"
] | 10
|
2016-09-28T13:12:40.000Z
|
2022-02-12T09:28:34.000Z
|
"""
Sum species
"""
#*****************************************************************************
# Copyright (C) 2008 Mike Hansen <mhansen@gmail.com>,
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from species import GenericCombinatorialSpecies
from structure import GenericSpeciesStructure
from subset_species import SubsetSpecies
from sage.misc.cachefunc import cached_function
from sage.structure.unique_representation import UniqueRepresentation
class ProductSpeciesStructure(GenericSpeciesStructure):
def __repr__(self):
"""
Returns the string representation of this object.
EXAMPLES::
sage: S = species.SetSpecies()
sage: (S*S).structures(['a','b','c']).random_element()
{}*{'a', 'b', 'c'}
sage: (S*S*S).structures(['a','b','c']).random_element()
({'c'}*{'a'})*{'b'}
"""
left, right = map(repr, self._list)
if "*" in left:
left = "(%s)"%left
if "*" in right:
right = "(%s)"%right
return "%s*%s"%(left, right)
def __init__(self, parent, labels, subset, left, right):
"""
TESTS::
sage: S = species.SetSpecies()
sage: F = S * S
sage: a = F.structures(['a','b','c']).random_element()
sage: a == loads(dumps(a))
True
"""
self._subset = subset
GenericSpeciesStructure.__init__(self, parent, labels, [left, right])
def transport(self, perm):
"""
EXAMPLES::
sage: p = PermutationGroupElement((2,3))
sage: S = species.SetSpecies()
sage: F = S * S
sage: a = F.structures(['a','b','c'])[4]; a
{'a', 'b'}*{'c'}
sage: a.transport(p)
{'a', 'c'}*{'b'}
"""
left, right = self._list
new_subset = self._subset.transport(perm)
left_labels = new_subset.labels()
right_labels = new_subset.complement().labels()
return self.__class__(self.parent(), self._labels,
new_subset,
left.change_labels(left_labels),
right.change_labels(right_labels))
def canonical_label(self):
"""
EXAMPLES::
sage: S = species.SetSpecies()
sage: F = S * S
sage: S = F.structures(['a','b','c']).list(); S
[{}*{'a', 'b', 'c'},
{'a'}*{'b', 'c'},
{'b'}*{'a', 'c'},
{'c'}*{'a', 'b'},
{'a', 'b'}*{'c'},
{'a', 'c'}*{'b'},
{'b', 'c'}*{'a'},
{'a', 'b', 'c'}*{}]
::
sage: F.isotypes(['a','b','c']).cardinality()
4
sage: [s.canonical_label() for s in S]
[{}*{'a', 'b', 'c'},
{'a'}*{'b', 'c'},
{'a'}*{'b', 'c'},
{'a'}*{'b', 'c'},
{'a', 'b'}*{'c'},
{'a', 'b'}*{'c'},
{'a', 'b'}*{'c'},
{'a', 'b', 'c'}*{}]
"""
left, right = self._list
new_subset = self._subset.canonical_label()
left_labels = new_subset.labels()
right_labels = new_subset.complement().labels()
return self.__class__(self.parent(), self._labels,
new_subset,
left.canonical_label().change_labels(left_labels),
right.canonical_label().change_labels(right_labels))
def change_labels(self, labels):
"""
EXAMPLES::
sage: S = species.SetSpecies()
sage: F = S * S
sage: a = F.structures(['a','b','c']).random_element(); a
{}*{'a', 'b', 'c'}
sage: a.change_labels([1,2,3])
{}*{1, 2, 3}
"""
left, right = self._list
new_subset = self._subset.change_labels(labels)
left_labels = new_subset.labels()
right_labels = new_subset.complement().labels()
return self.__class__(self.parent(), labels,
new_subset,
left.change_labels(left_labels),
right.change_labels(right_labels))
def automorphism_group(self):
"""
EXAMPLES::
sage: p = PermutationGroupElement((2,3))
sage: S = species.SetSpecies()
sage: F = S * S
sage: a = F.structures([1,2,3,4]).random_element(); a
{1}*{2, 3, 4}
sage: a.automorphism_group()
Permutation Group with generators [(2,3), (2,3,4)]
::
sage: [a.transport(g) for g in a.automorphism_group()]
[{1}*{2, 3, 4},
{1}*{2, 3, 4},
{1}*{2, 3, 4},
{1}*{2, 3, 4},
{1}*{2, 3, 4},
{1}*{2, 3, 4}]
::
sage: a = F.structures([1,2,3,4]).random_element(); a
{2, 3}*{1, 4}
sage: [a.transport(g) for g in a.automorphism_group()]
[{2, 3}*{1, 4}, {2, 3}*{1, 4}, {2, 3}*{1, 4}, {2, 3}*{1, 4}]
"""
from sage.groups.all import PermutationGroupElement, PermutationGroup, SymmetricGroup
from sage.misc.misc import uniq
from sage.combinat.species.misc import change_support
left, right = self._list
n = len(self._labels)
#Get the supports for each of the sides
l_support = self._subset._list
r_support = self._subset.complement()._list
#Get the automorphism group for the left object and
#make it have the correct support. Do the same to the
#right side.
l_aut = change_support(left.automorphism_group(), l_support)
r_aut = change_support(right.automorphism_group(), r_support)
identity = PermutationGroupElement([])
gens = l_aut.gens() + r_aut.gens()
gens = [g for g in gens if g != identity]
gens = uniq(gens) if len(gens) > 0 else [[]]
return PermutationGroup(gens)
class ProductSpecies(GenericCombinatorialSpecies, UniqueRepresentation):
def __init__(self, F, G, min=None, max=None, weight=None):
"""
EXAMPLES::
sage: X = species.SingletonSpecies()
sage: A = X*X
sage: A.generating_series().coefficients(4)
[0, 0, 1, 0]
sage: P = species.PermutationSpecies()
sage: F = P * P; F
Product of (Permutation species) and (Permutation species)
sage: F == loads(dumps(F))
True
sage: F._check()
True
TESTS::
sage: X = species.SingletonSpecies()
sage: X*X is X*X
True
"""
self._F = F
self._G = G
self._state_info = [F, G]
GenericCombinatorialSpecies.__init__(self, min=None, max=None, weight=weight)
_default_structure_class = ProductSpeciesStructure
def _name(self):
"""
Note that we use a function to return the name of this species
because we can't do it in the __init__ method due to it
requiring that self._F and self._G already be unpickled.
EXAMPLES::
sage: P = species.PermutationSpecies()
sage: F = P * P
sage: F._name()
'Product of (Permutation species) and (Permutation species)'
"""
return "Product of (%s) and (%s)"%(self._F, self._G)
def _structures(self, structure_class, labels):
"""
EXAMPLES::
sage: S = species.SetSpecies()
sage: F = S * S
sage: F.structures([1,2]).list()
[{}*{1, 2}, {1}*{2}, {2}*{1}, {1, 2}*{}]
"""
return self._times_gen(structure_class, "structures", labels)
def _isotypes(self, structure_class, labels):
"""
EXAMPLES::
sage: S = species.SetSpecies()
sage: F = S * S
sage: F.isotypes([1,2,3]).list()
[{}*{1, 2, 3}, {1}*{2, 3}, {1, 2}*{3}, {1, 2, 3}*{}]
"""
return self._times_gen(structure_class, "isotypes", labels)
def _times_gen(self, structure_class, attr, labels):
"""
EXAMPLES::
sage: S = species.SetSpecies()
sage: F = S * S
sage: list(F._times_gen(F._default_structure_class, 'structures',[1,2]))
[{}*{1, 2}, {1}*{2}, {2}*{1}, {1, 2}*{}]
"""
c = lambda F,n: F.generating_series().coefficient(n)
S = SubsetSpecies()
for u in getattr(S, attr)(labels):
vl = u.complement().labels()
ul = u.labels()
if c(self._F, len(ul)) == 0 or c(self._G, len(vl)) == 0:
continue
for x in getattr(self._F, attr)(ul):
for y in getattr(self._G, attr)(vl):
yield structure_class(self, labels, u, x, y)
def _gs(self, series_ring, base_ring):
"""
EXAMPLES::
sage: P = species.PermutationSpecies()
sage: F = P * P
sage: F.generating_series().coefficients(5)
[1, 2, 3, 4, 5]
"""
res = self._F.generating_series(base_ring) * self._G.generating_series(base_ring)
if self.is_weighted():
res = self._weight * res
return res
def _itgs(self, series_ring, base_ring):
"""
EXAMPLES::
sage: P = species.PermutationSpecies()
sage: F = P * P
sage: F.isotype_generating_series().coefficients(5)
[1, 2, 5, 10, 20]
"""
res = (self._F.isotype_generating_series(base_ring) *
self._G.isotype_generating_series(base_ring))
if self.is_weighted():
res = self._weight * res
return res
def _cis(self, series_ring, base_ring):
"""
EXAMPLES::
sage: P = species.PermutationSpecies()
sage: F = P * P
sage: F.cycle_index_series().coefficients(5)
[p[],
2*p[1],
3*p[1, 1] + 2*p[2],
4*p[1, 1, 1] + 4*p[2, 1] + 2*p[3],
5*p[1, 1, 1, 1] + 6*p[2, 1, 1] + 3*p[2, 2] + 4*p[3, 1] + 2*p[4]]
"""
res = (self._F.cycle_index_series(base_ring) *
self._G.cycle_index_series(base_ring))
if self.is_weighted():
res = self._weight * res
return res
def weight_ring(self):
"""
Returns the weight ring for this species. This is determined by
asking Sage's coercion model what the result is when you multiply
(and add) elements of the weight rings for each of the operands.
EXAMPLES::
sage: S = species.SetSpecies()
sage: C = S*S
sage: C.weight_ring()
Rational Field
::
sage: S = species.SetSpecies(weight=QQ['t'].gen())
sage: C = S*S
sage: C.weight_ring()
Univariate Polynomial Ring in t over Rational Field
::
sage: S = species.SetSpecies()
sage: C = (S*S).weighted(QQ['t'].gen())
sage: C.weight_ring()
Univariate Polynomial Ring in t over Rational Field
"""
return self._common_parent([self._F.weight_ring(), self._G.weight_ring(), self._weight.parent()])
def _equation(self, var_mapping):
"""
Returns the right hand side of an algebraic equation satisfied by
this species. This is a utility function called by the
algebraic_equation_system method.
EXAMPLES::
sage: X = species.SingletonSpecies()
sage: S = X * X
sage: S.algebraic_equation_system()
[node0 - z^2]
"""
from sage.rings.all import prod
return prod(var_mapping[operand] for operand in self._state_info)
#Backward compatibility
ProductSpecies_class = ProductSpecies
| 32.899736
| 105
| 0.502286
|
8359c26b36f43239eb6febc38b313dcbd9a20b75
| 1,523
|
py
|
Python
|
orgparse/tests/data/01_attributes.py
|
olopost/orgparse
|
91cdec1edeffc953e28dfae4139a431780f52576
|
[
"BSD-2-Clause"
] | null | null | null |
orgparse/tests/data/01_attributes.py
|
olopost/orgparse
|
91cdec1edeffc953e28dfae4139a431780f52576
|
[
"BSD-2-Clause"
] | null | null | null |
orgparse/tests/data/01_attributes.py
|
olopost/orgparse
|
91cdec1edeffc953e28dfae4139a431780f52576
|
[
"BSD-2-Clause"
] | 1
|
2019-03-24T07:09:02.000Z
|
2019-03-24T07:09:02.000Z
|
from orgparse.date import (
OrgDate, OrgDateScheduled, OrgDateDeadline, OrgDateClosed,
OrgDateClock,
)
node1 = dict(
heading="A node with a lot of attributes",
priority='A',
scheduled=OrgDateScheduled((2010, 8, 6)),
deadline=OrgDateDeadline((2010, 8, 10)),
closed=OrgDateClosed((2010, 8, 8, 18, 0)),
clock=[
OrgDateClock((2010, 8, 8, 17, 40), (2010, 8, 8, 17, 50), 10),
OrgDateClock((2010, 8, 8, 17, 00), (2010, 8, 8, 17, 30), 30),
],
properties=dict(Effort=70),
datelist=[OrgDate((2010, 8, 16))],
rangelist=[
OrgDate((2010, 8, 7), (2010, 8, 8)),
OrgDate((2010, 8, 9, 0, 30), (2010, 8, 10, 13, 20)),
OrgDate((2019, 8, 10, 16, 30, 0), (2019, 8, 10, 17, 30, 0)),
],
body="""\
- <2010-08-16 Mon> DateList
- <2010-08-07 Sat>--<2010-08-08 Sun>
- <2010-08-09 Mon 00:30>--<2010-08-10 Tue 13:20> RangeList
- <2019-08-10 Sat 16:30-17:30> TimeRange"""
)
node2 = dict(
heading="A node without any attributed",
priority=None,
scheduled=OrgDate(None),
deadline=OrgDate(None),
closed=OrgDate(None),
clock=[],
properties={},
datelist=[],
rangelist=[],
body="",
)
node3 = dict(
heading="range in deadline",
priority=None,
scheduled=OrgDate(None),
deadline=OrgDateDeadline((2019, 9, 6, 10, 0), (2019, 9, 6, 11, 20)),
closed=OrgDate(None),
clock=[],
properties={},
datelist=[],
rangelist=[],
body=" body",
)
data = [node1, node2, node1, node3]
| 26.719298
| 72
| 0.571241
|
b954e1b0de676d556ca7c4513232e0f77a89d3c2
| 7,432
|
py
|
Python
|
sales/models.py
|
jorgesaw/kmarket_old
|
4a7ddb22cd96f8c223a21bef9a614a0ac4a08f06
|
[
"MIT"
] | null | null | null |
sales/models.py
|
jorgesaw/kmarket_old
|
4a7ddb22cd96f8c223a21bef9a614a0ac4a08f06
|
[
"MIT"
] | 5
|
2021-03-19T09:28:21.000Z
|
2022-02-10T12:11:57.000Z
|
sales/models.py
|
jorgesaw/kmarket_old
|
4a7ddb22cd96f8c223a21bef9a614a0ac4a08f06
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.db.models import Sum, F, FloatField, Max
from django.utils import timezone
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
import datetime
from decimal import Decimal
from customers.models import Customer
from products.models import Product, PriceWithDesc
from balances.models import DailyBalance
from utils.models_mixin import StatusCreatedUpdatedModelMixin
from .managers import SaleManager
# Create your models here.
TAX_CHOICES = [
("0", 0.0),
("1", 0.21),
("2", 0.105),
]
class Sale(StatusCreatedUpdatedModelMixin, models.Model):
"""Model representing a category of sale."""
number_sale = models.CharField(max_length=18, help_text="Ingresar N°", verbose_name="Número")
date_sale = models.DateField(default=timezone.now, verbose_name="Fecha")
discount = models.DecimalField(blank=True, null=True, max_digits=5, decimal_places=2, default=0)
fiscal_bill = models.BooleanField(default=True, help_text="Es fiscal", verbose_name="AFIP")
tax = models.CharField(max_length=2, choices=TAX_CHOICES, default="0", verbose_name="IVA")
value = models.DecimalField(max_digits=8, decimal_places=2, default=0)
customer = models.ForeignKey(Customer, blank=True, null=True, on_delete=models.SET_NULL)
objects = SaleManager()
def calculate_total(self):
tot = self.itemsale_set.all().aggregate(
tot_sale=Sum( ( F('quantity') * F('product__price') ) - F('discount'), output_field=FloatField() ) # DEvuelve un diccionario con un dato cuya key es 'tot_sale'
)['tot_sale'] or 0
tot = tot - float(self.discount)
self.value = tot
#self.save()
Sale.objects.filter(id=self.id).update(value=tot)
class Meta:
ordering = ['id',]
verbose_name = "venta"
verbose_name_plural = "ventas"
def __str__(self):
return self.number_sale
class SaleWithDailyBalance(Sale):
"""Model representing a category of sale with daily balance."""
daily_balance = models.ForeignKey(DailyBalance, null=True, blank=True, on_delete=models.SET_NULL, verbose_name="Saldo diario")
class Meta:
verbose_name = "venta con saldo"
verbose_name_plural = "venta con saldos"
class SaleSummary(Sale):
class Meta:
proxy = True
verbose_name = 'reporte venta'
verbose_name_plural = 'reporte de ventas'
class ItemSale(models.Model):
"""Model representing a many to many relationship of sales and articles (item of sale)."""
quantity = models.FloatField(default=0, help_text='Ingresa la cantidad', verbose_name='Cantidad')
value = models.DecimalField(default=0.0, max_digits=8, decimal_places=2)
discount = models.DecimalField(default=0.0, max_digits=8, decimal_places=2)
product = models.ForeignKey(Product, on_delete=models.CASCADE, verbose_name="Producto")
#product_price = models.ForeignKey(PriceWithDesc, on_delete=models.CASCADE, verbose_name="Producto")
sale = models.ForeignKey(Sale, on_delete=models.CASCADE, verbose_name="Venta")
class Meta:
ordering = ['id',]
verbose_name = "item"
verbose_name_plural = "items"
@property
def actual_price(self):
price = 0.0
if self.article:
price = self.article.price
return price
def calculate_total(self):
price = self.article.price
self.value = price * Decimal.from_float(self.quantity)
stock = self.product.stock
if stock and stock >= self.quantity * 2:
stock -= self.quantity * 2
self.product.stock = stock
self.product.save(update_fields=['stock',])
return self.value
def save(self, *args, **kwargs):
super(ItemSale, self).save(*args, **kwargs)
self.calculate_total()
super(ItemSale, self).save(update_fields=['value'])
def delete(self, *args, **kwargs):
sale = self.sale
product = self.product
quantity = self.quantity
super(ItemSale, self).delete(*args, **kwargs)
sale.calculate_total()
stock = product.stock
stock += quantity
product.stock = stock
product.save(update_fields=['stock',])
def __str__(self):
return str(self.value)
class ItemSaleWithPrice(ItemSale):
price = models.DecimalField(default=0.0, max_digits=8, decimal_places=2)
@receiver(post_save, sender=ItemSale)
def update_total_sales_at_item(sender, instance, **kwargs):
instance.sale.calculate_total()
@receiver(pre_save, sender=ItemSale)
def update_stock_in_article(sender, instance, **kwargs):
try:
old_instance = ItemSale.objects.get(id=instance.id)
except ItemSale.DoesNotExist:
old_instance = None
if not old_instance:
return
old_stock = old_instance.quantity
if old_instance.product.stock:
old_instance.product.stock += old_stock
old_instance.product.save(update_fields=['stock',])
@receiver(post_save, sender=Sale)
def update_sales_total(sender, instance, **kwargs):
instance.calculate_total()
"""
class Sale(StatusCreatedUpdatedModelMixin, models.Model):
number_sale = models.CharField(max_length=18, help_text="Ingresar N°", verbose_name="Número")
date_sale = models.DateField(default=timezone.now, verbose_name="Fecha")
discount = models.DecimalField(blank=True, null=True, max_digits=5, decimal_places=2, default=0)
fiscal_bill = models.BooleanField(default=True, help_text="Es fiscal", verbose_name="AFIP")
tax = models.CharField(max_length=2, choices=TAX_CHOICES, default="0", verbose_name="IVA")
value = models.DecimalField(max_digits=8, decimal_places=2, default=0)
client = models.ForeignKey(Client, blank=True, null=True, on_delete=models.SET_NULL)
def calculate_total(self):
tot = 0.0
for item in self.itemsale_set.all():
tot = tot + item.calculate_total()
tot = tot - self.discount
self.value = tot
Sale.objects.filter(id=self.id).update(value=tot)
class ItemSale(models.Model):
quantity = models.FloatField(default=0, help_text='Ingresa la cantidad', verbose_name='Cantidad')
value = models.DecimalField(default=0.0, max_digits=8, decimal_places=2)
price = models.DecimalField(default=0.0, max_digits=8, decimal_places=2)
#discount = models.DecimalField(default=0.0, max_digits=5, decimal_places=2, default=0)
article = models.ForeignKey(Article, on_delete=models.CASCADE, verbose_name="Artículo")
sale = models.ForeignKey(Sale, on_delete=models.CASCADE, verbose_name="Venta")
class Meta:
ordering = ['id',]
verbose_name = "item"
verbose_name_plural = "items"
@property
def actual_price(self):
if self.article:
self.price = self.article.price if self.price == 0.0 else self.price
return self.price
def calculate_total(self):
self.price = self.article.price if self.price == 0.0 else self.price
self.value = self.price * Decimal.from_float(self.quantity)
return self.value
"""
"""
@receiver(post_save, sender=ItemSale)
def update_total_sales_at_item(sender, instance, **kwargs):
instance.sale.calculate_total()
@receiver(post_save, sender=Sale)
def update_vendas_total(sender, instance, **kwargs):
instance.calculate_total()
"""
| 35.056604
| 172
| 0.692277
|
e8af68e81da53d627bc22ba2a2b0fd657f0225d8
| 9,797
|
py
|
Python
|
Controller/adminManager.py
|
AleksandarDjurov/MeterReaderWeb-MobileApp
|
29655fff2a3e7c1961026ec4a3f2205c4e667d8e
|
[
"MIT"
] | null | null | null |
Controller/adminManager.py
|
AleksandarDjurov/MeterReaderWeb-MobileApp
|
29655fff2a3e7c1961026ec4a3f2205c4e667d8e
|
[
"MIT"
] | null | null | null |
Controller/adminManager.py
|
AleksandarDjurov/MeterReaderWeb-MobileApp
|
29655fff2a3e7c1961026ec4a3f2205c4e667d8e
|
[
"MIT"
] | null | null | null |
from flask_httpauth import HTTPBasicAuth
from model import *
from sqlalchemy import func, text
from flask_sqlalchemy import SQLAlchemy
from flask import Flask, abort, request, jsonify, g, url_for, json, session
from werkzeug.security import generate_password_hash, check_password_hash
from datetime import datetime
def getAllUsers():
users = User.query.all()
db.session.commit()
return users
def getAllDistricts():
districts = District.query.all()
db.session.commit()
return districts
def serialize_customer_list(row):
accountActive = "On"
meterActive = "On"
if row.IsAccountActive == False:
accountActive = "Off"
if row.isMeterActive == False:
meterActive = "Off"
return {
"CustomerId": row.CustomerId,
"SupplyNo": row.SupplyNo,
"Name": row.FirstName + " " + row.LastName,
"IsAccountActive": accountActive,
"CustomerType": row.CustomerType,
"IsMeterActive": meterActive,
"DistrictCode": row.DistrictCode,
"MeterBarcodeNo": row.MeterBarcodeNo,
"CreateDate": row.CreateDate.strftime("%Y-%m-%d"),
"Mange": '''<div class="dropdown">
<button class="btn btn-info dropdown-toggle" data-toggle="dropdown" aria-expanded="false" type="button">Manage <span class="caret"></span></button>
<ul class="dropdown-menu dropdown-menu-right" role="menu">
<li><a class="dropdown-item" id="update_''' + str(row.CustomerId) + '''">Update District</a></li>
<li><a class="dropdown-item" id="ra_''' + str(row.CustomerId) + '''">ReActivate Customer</a></li>
</ul></div>
'''
}
def get_customer_table_list(district):
customer_list = Customer.query.join(Meter, Customer.CustomerId==Meter.CustomerId).\
join(District, Meter.DistrictId==District.DistrictId).\
add_columns(Customer.CustomerId, Customer.SupplyNo, Customer.FirstName, Customer.LastName, \
Customer.IsAccountActive, Customer.CustomerType, Meter.isMeterActive, Customer.CreateDate, \
Customer.CreateDate, District.DistrictCode, Meter.MeterBarcodeNo).\
filter(District.DistrictCode == district).all()
db.session.commit()
return jsonify({ "data": [ serialize_customer_list(row) for row in customer_list], "user": session['username']})
def customer_update_district(request):
update_data = request.json['data']
barcode = update_data['Barcode']
district = update_data['District']
user = update_data['User']
query = "EXEC sp_superuser_Customer_UpdateDistrictArea @Meterbarcodeno = " + \
barcode + ", @DistrictArea = '" + district + "', @ModifiedByuser = '" + user + "'"
connection = db.engine.raw_connection()
connection.autocommit = True
cursor = connection.cursor()
new_customer = cursor.execute(query)
connection.commit()
connection.close()
# ret_val = db.engine.execute("dbo.sp_superuser_Customer_UpdateDistrictArea ?, ?, ?", [barcode, str(district), str(user)] )
# db.session.commit()
return jsonify({'district': district}), 200
def customer_reactivate_user(request):
data = request.json['data']
barcode = data['Barcode']
IsAccountActive = data['IsAccountActive']
accountStatus = 1
if str(IsAccountActive) == "On":
accountStatus = 0
IsAccountActive = "Off"
else:
IsAccountActive = "On"
query = "EXEC sp_superuser_ReactivateCustomer @Meterbarcodeno = " + \
str(barcode) + ", @IsAccountActive = " + str(accountStatus) + ", @ModifiedByuser = '" + session['username'] + "'"
connection = db.engine.raw_connection()
cursor = connection.cursor()
cursor.execute(query)
connection.commit()
connection.close()
return jsonify({'active': IsAccountActive}), 200
def serialize_user_list(row, roles):
chk_user_active_html = '<input onchange = "ChangeUserActive()" type="checkbox" id="act_' + str(row.id) + '"'
if row.IsUserActive :
chk_user_active_html += ' checked>'
else:
chk_user_active_html += '>'
sel_role_html = '<select style="border: none" id="role_' + str(row.id) + '" onchange="ChangeUserRole()">'
for role in roles:
if row.RoleId == role.RoleId:
sel_role_html += '<option value=' + str(role.RoleId) + ' selected="selected">' + role.RoleName + '</option>'
else:
sel_role_html += '<option value=' + str(role.RoleId) + '>' + role.RoleName + '</option>'
sel_role_html += '</select>'
return {
"id": row.id,
"username": row.username,
"Phone": row.Phone,
"UserAccessType": row.UserAccessType,
"IsUserActive": row.IsUserActive,
"ChkUserActive": chk_user_active_html,
"RoleId": row.RoleId,
"SelRoleId": sel_role_html,
"NewPwd": '<input onchange="ChangeNewPassword()" id="new_pwd_' + str(row.id) + '" type="password" style="width:40px; text-align:center; background: none; border: none" value="" readonly autocomplete="new-password">',
"ConfirmPwd": '<input onchange="ChangeConfirmPassword()" id="confirm_pwd_' + str(row.id) + '" type="password" style="width:40px; text-align:center; background: none; border: none" value="" readonly autocomplete="new-password">',
"ChkPwd": '<input type="checkbox" id="pwd_' + str(row.id) + '">'
}
def user_management_table(isNew):
users = 0
if isNew == "true":
users = User.query.filter_by(RoleId = None).all()
else:
users = User.query.all()
roles = UserRoles.query.all()
return jsonify( { "data": [ serialize_user_list(row, roles) for row in users]} )
def admin_user_management_update_user(request):
update_users = request.json['sendData']
for update_user in update_users:
role_id = update_user['RoleId']
user_role = UserRoles.query.filter_by(RoleId = role_id).first()
user = User.query.filter_by(id = update_user['id'] ).first()
user.UserAccessType = user_role.RoleName
user.IsUserActive = update_user['IsUserActive']
user.RoleId = role_id
if update_user['ChkPwd'] == "true":
user.password = update_user['password']
db.session.commit()
return jsonify({"result":"success"})
def serialize_billing_month_table_row(row):
createDate = ""
if row.CreatedDate:
createDate = row.CreatedDate.strftime("%Y-%m-%d")
chk_curr_month_html = '<input onchange = "CheckCurrBillingMonth()" type="checkbox" id="curr_' + str(row.id) + '"'
if row.CurrentBillingMonth :
chk_curr_month_html += ' checked>'
else:
chk_curr_month_html += '>'
return {
"id": row.id,
"BillingMonth": row.BillingMonth,
"BillingFromDate": row.BillingFromDate.strftime("%Y-%m-%d"),
"BillingToDate": row.BillingToDate.strftime("%Y-%m-%d"),
"CurrBillingMonth": row.CurrentBillingMonth,
"ChkCurrBillingMonth": chk_curr_month_html,
"CreatedByUser": row.CreatedByUser,
"CreatedDate": createDate,
"PickerFromDate":'<div class="input-group date"" style="background:none; border:none" id="month_from_' + str(row.id) + '''">
<input type="text" class="form-control" style="background:none; border:none">
<div class="input-group-addon" style="background:none; border:none">
<span class="glyphicon glyphicon-calendar"></span>
</div>
</div>''',
"PickerToDate": '<div class="input-group date"" style="background:none; border:none" id="month_to_' + str(row.id) + '''">
<input type="text" class="form-control" style="background:none; border:none">
<div class="input-group-addon" style="background:none; border:none">
<span class="glyphicon glyphicon-calendar"></span>
</div>
</div>'''
}
def admin_billing_month_table():
billing_months = db.engine.execute("dbo.sp_superuser_SetBillingMonth_Showupcoming").fetchall()
return jsonify({"data": [serialize_billing_month_table_row(row) for row in billing_months]})
def admin_billing_month_set_billing_month(request):
seted_months = request.json['sendData']
for set_month in seted_months:
id = set_month['id']
billing_from_date = (datetime.strptime(set_month['BillingFromDate'], "%Y-%m-%d")).strftime("%Y%m%d")
billing_to_date = (datetime.strptime(set_month['BillingToDate'], "%Y-%m-%d")).strftime("%Y%m%d")
curr_billing_month = 1
if set_month['CurrBillingMonth'] == False:
curr_billing_month = 0
created_by_user = session['username']
created_date = datetime.now().strftime("%Y%m%d")
query = "EXEC sp_superuser_SetBillingMonth_Set @Id = " + str(id) + \
", @BillingFromDate = '" + str(billing_from_date) + \
"', @BillingToDate = '" + str(billing_to_date) + \
"', @CurrentBillingMonth = " + str(curr_billing_month) + \
", @CreatedByUser = '" + str(created_by_user) + \
"', @CreatedDate = '" + str(created_date) + "'"
connection = db.engine.raw_connection()
cursor = connection.cursor()
cursor.execute(query)
connection.commit()
connection.close()
return jsonify({"result":"success"})
| 43.542222
| 236
| 0.611514
|
10e168cbcdd6f01100b9e7e50571e05c0f627bab
| 1,572
|
py
|
Python
|
speech_synth.py
|
ramrom/haus
|
6ad300be0c1dd0818248503ffe70695a878a1ace
|
[
"MIT"
] | 1
|
2019-11-30T03:45:38.000Z
|
2019-11-30T03:45:38.000Z
|
speech_synth.py
|
ramrom/haus
|
6ad300be0c1dd0818248503ffe70695a878a1ace
|
[
"MIT"
] | null | null | null |
speech_synth.py
|
ramrom/haus
|
6ad300be0c1dd0818248503ffe70695a878a1ace
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python
import os
# ESPEAK CODE
#from espeak import espeak
#import time
from gtts import gTTS
import logging
import pdb
#
#espeak.synth('hello world')
#time.sleep(2)
import pyttsx
def pspeak(phrase = 'hello'):
engine = pyttsx.init()
engine.say(phrase)
engine.runAndWait()
def gspeak(phrase = 'hello', language = 'en-uk'):
logging.basicConfig(level=logging.INFO)
logging.info("making a call to google to grab text to speech")
tts = gTTS(text=phrase, lang=language)
tts.save("phrase.mp3")
#pdb.set_trace()
if os.uname()[0] == 'Darwin':
#playaud('phrase.mp3')
os.system("afplay phrase.mp3")
else:
os.system("omxplayer phrase.mp3")
#os.system("aplay phrase.mp3")
#print 'this isnt OSX'
os.system("rm phrase.mp3")
# gtts-cli.py "Hello" -l 'en' -o hello.mp3
def playaud(filename):
import pyaudio
import wave
import sys
CHUNK = 1024
wf = wave.open(filename, 'rb')
# instantiate PyAudio (1)
p = pyaudio.PyAudio()
# open stream (2)
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# read data
data = wf.readframes(CHUNK)
# play stream (3)
while len(data) > 0:
stream.write(data)
data = wf.readframes(CHUNK)
# stop stream (4)
stream.stop_stream()
stream.close()
# close PyAudio (5)
p.terminate()
if __name__ == "__main__":
print 'debug console'
#if len(sys.argv) > 1 and sys.argv[1] == '1':
pdb.set_trace()
| 21.243243
| 68
| 0.639949
|
db1ff6387df1845d9bab030c7d6cb95dfb833083
| 6,737
|
py
|
Python
|
cannula/datasource/http.py
|
rmyers/cannula
|
eb6fd76d2a9daed0df73b0bf389da0182f797972
|
[
"MIT"
] | 9
|
2015-11-05T08:52:49.000Z
|
2019-11-18T10:20:58.000Z
|
cannula/datasource/http.py
|
rmyers/cannula
|
eb6fd76d2a9daed0df73b0bf389da0182f797972
|
[
"MIT"
] | null | null | null |
cannula/datasource/http.py
|
rmyers/cannula
|
eb6fd76d2a9daed0df73b0bf389da0182f797972
|
[
"MIT"
] | 1
|
2015-12-22T15:15:08.000Z
|
2015-12-22T15:15:08.000Z
|
import asyncio
import functools
import inspect
import logging
import os
import threading
import types
import typing
from concurrent.futures import ThreadPoolExecutor
import requests
from ..context import Context
LOG = logging.getLogger('cannula.datasource.http')
MAX_WORKERS = int(os.getenv('CANNULA_HTTP_MAX_WORKERS', 4))
class DataSourceError(Exception):
pass
class FutureSession(requests.Session):
"""Wrap requests session to allow requests to be async"""
def __init__(self, max_workers=MAX_WORKERS, *args, **kwargs):
super().__init__(*args, **kwargs)
self.executor = ThreadPoolExecutor(max_workers=max_workers)
def request(self, *args, **kwargs):
function = functools.partial(requests.Session.request, self)
return self.executor.submit(function, *args, **kwargs)
def close(self):
super().close()
self.executor.shutdown()
class ThreadSafeCacheable:
# see: https://stackoverflow.com/a/46723144
def __init__(self, coroutine):
self.coroutine = coroutine
self.done = False
self.result = None
self.lock = threading.Lock()
def __await__(self):
while True:
if self.done:
return self.result
if self.lock.acquire(blocking=False):
self.result = yield from self.coroutine.__await__()
self.done = True
return self.result
else:
yield from asyncio.sleep(0.005)
def cacheable(f):
def wrapped(*args, **kwargs):
r = f(*args, **kwargs)
return ThreadSafeCacheable(r)
return wrapped
class HTTPContext(Context):
http_session = FutureSession()
class Request(typing.NamedTuple):
url: str
method: str
body: typing.Any = None
headers: typing.Dict = {}
class HTTPDataSource:
# The base url of this resource
base_url: typing.Optional[str] = None
# A mapping of requests using the cache_key_for_request. Multiple resolvers
# could attempt to fetch the same resource, using this we can limit to at
# most one request per cache key.
memoized_requests: typing.Dict[str, typing.Awaitable]
# Max number of worker argument to ThreadPoolExecutor
max_workers: int = 4
# Timeout for an individual request in seconds.
timeout: int = 5
# Resource name for the type that this datasource returns by default this
# will use the class name of the datasource.
resource_name: typing.Optional[str] = None
def __init__(self, context: typing.Any):
self.context = context
self.memoized_requests = {}
self.assert_has_http_session(context)
self.assert_has_resource_name()
def assert_has_http_session(self, context: Context) -> None:
if not hasattr(context, 'http_session'):
raise AttributeError(
'Context missing http_session did you subclass HTTPContext?'
)
def assert_has_resource_name(self) -> None:
if self.resource_name is None:
self.resource_name = self.__class__.__name__
def will_send_request(self, request: Request) -> Request:
"""Hook for subclasses to modify the request before it is sent.
For example setting Authorization headers:
def will_send_request(self, request):
request.headers = {'Authorization': self.context.token}
return request
"""
return request
def cache_key_for_request(self, request: Request) -> str:
return request.url
def get_request_url(self, path: str) -> str:
if path.startswith(('https://', 'http://')):
return path
if self.base_url is not None:
if path.startswith('/'):
path = path[1:]
if self.base_url.endswith('/'):
return f'{self.base_url}{path}'
return f'{self.base_url}/{path}'
return path
def did_receive_error(self, error: Exception, request: Request):
raise error
def convert_to_object(self, json_obj):
json_obj.update({'__typename__': self.resource_name})
return types.SimpleNamespace(**json_obj)
async def did_receive_response(
self,
response: requests.Response,
request: Request
) -> typing.Any:
response.raise_for_status()
return response.json(object_hook=self.convert_to_object)
async def get(self, path: str) -> typing.Awaitable:
return await self.fetch('GET', path)
async def post(self, path: str, body: typing.Any) -> typing.Awaitable:
return await self.fetch('POST', path, body)
async def patch(self, path: str, body: typing.Any) -> typing.Awaitable:
return await self.fetch('PATCH', path, body)
async def put(self, path: str, body: typing.Any) -> typing.Awaitable:
return await self.fetch('PUT', path, body)
async def delete(self, path: str) -> typing.Awaitable:
return await self.fetch('DELETE', path)
async def fetch(
self,
method: str,
path: str,
body: typing.Any = None
) -> typing.Awaitable:
url = self.get_request_url(path)
request = Request(url, method, body)
request = self.will_send_request(request)
cache_key = self.cache_key_for_request(request)
@cacheable
async def process_request():
try:
future = self.context.http_session.request(
request.method,
request.url,
json=request.body,
headers=request.headers,
timeout=self.timeout,
)
await asyncio.sleep(0.005)
if inspect.isawaitable(future):
response = await future
elif hasattr(future, 'result'):
response = future.result()
else:
response = future
except Exception as exc:
return self.did_receive_error(exc, request)
else:
return await self.did_receive_response(response, request)
if request.method == 'GET':
promise = self.memoized_requests.get(cache_key)
if promise is not None:
LOG.debug(f'cache found for {self.__class__.__name__}')
return await promise
self.memoized_requests[cache_key] = process_request()
LOG.debug(f'I have been cached as {cache_key}')
return await self.memoized_requests[cache_key]
else:
self.memoized_requests.pop(cache_key, None)
return await process_request()
| 30.484163
| 79
| 0.619564
|
06096c589f1791f9389f07b6ecef0f6b88299580
| 18,620
|
py
|
Python
|
tests/data/expression.py
|
henrikhorluck/black
|
5379d4f3f460ec9b7063dd1cc10f437b0edf9ae3
|
[
"MIT"
] | 2
|
2022-01-13T08:10:07.000Z
|
2022-01-13T08:35:37.000Z
|
tests/data/expression.py
|
marnixah/black-but-usable
|
83b83d3066d1d857983bfa1a666a409e7255d79d
|
[
"MIT"
] | 12
|
2022-01-17T16:17:43.000Z
|
2022-03-28T16:38:39.000Z
|
tests/data/expression.py
|
marnixah/black-but-usable
|
83b83d3066d1d857983bfa1a666a409e7255d79d
|
[
"MIT"
] | null | null | null |
...
'some_string'
b'\\xa3'
Name
None
True
False
1
1.0
1j
True or False
True or False or None
True and False
True and False and None
(Name1 and Name2) or Name3
Name1 and Name2 or Name3
Name1 or (Name2 and Name3)
Name1 or Name2 and Name3
(Name1 and Name2) or (Name3 and Name4)
Name1 and Name2 or Name3 and Name4
Name1 or (Name2 and Name3) or Name4
Name1 or Name2 and Name3 or Name4
v1 << 2
1 >> v2
1 % finished
1 + v2 - v3 * 4 ^ 5 ** v6 / 7 // 8
((1 + v2) - (v3 * 4)) ^ (((5 ** v6) / 7) // 8)
not great
~great
+value
-1
~int and not v1 ^ 123 + v2 | True
(~int) and (not ((v1 ^ (123 + v2)) | True))
+really ** -confusing ** ~operator ** -precedence
flags & ~ select.EPOLLIN and waiters.write_task is not None
lambda arg: None
lambda a=True: a
lambda a, b, c=True: a
lambda a, b, c=True, *, d=(1 << v2), e='str': a
lambda a, b, c=True, *vararg, d=(v1 << 2), e='str', **kwargs: a + b
manylambdas = lambda x=lambda y=lambda z=1: z: y(): x()
foo = (lambda port_id, ignore_missing: {"port1": port1_resource, "port2": port2_resource}[port_id])
1 if True else 2
str or None if True else str or bytes or None
(str or None) if True else (str or bytes or None)
str or None if (1 if True else 2) else str or bytes or None
(str or None) if (1 if True else 2) else (str or bytes or None)
((super_long_variable_name or None) if (1 if super_long_test_name else 2) else (str or bytes or None))
{'2.7': dead, '3.7': (long_live or die_hard)}
{'2.7': dead, '3.7': (long_live or die_hard), **{'3.6': verygood}}
{**a, **b, **c}
{'2.7', '3.6', '3.7', '3.8', '3.9', ('4.0' if gilectomy else '3.10')}
({'a': 'b'}, (True or False), (+value), 'string', b'bytes') or None
()
(1,)
(1, 2)
(1, 2, 3)
[]
[1, 2, 3, 4, 5, 6, 7, 8, 9, (10 or A), (11 or B), (12 or C)]
[1, 2, 3,]
[*a]
[*range(10)]
[*a, 4, 5,]
[4, *a, 5,]
[this_is_a_very_long_variable_which_will_force_a_delimiter_split, element, another, *more]
{i for i in (1, 2, 3)}
{(i ** 2) for i in (1, 2, 3)}
{(i ** 2) for i, _ in ((1, 'a'), (2, 'b'), (3, 'c'))}
{((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)}
[i for i in (1, 2, 3)]
[(i ** 2) for i in (1, 2, 3)]
[(i ** 2) for i, _ in ((1, 'a'), (2, 'b'), (3, 'c'))]
[((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3)]
{i: 0 for i in (1, 2, 3)}
{i: j for i, j in ((1, 'a'), (2, 'b'), (3, 'c'))}
{a: b * 2 for a, b in dictionary.items()}
{a: b * -2 for a, b in dictionary.items()}
{k: v for k, v in this_is_a_very_long_variable_which_will_cause_a_trailing_comma_which_breaks_the_comprehension}
Python3 > Python2 > COBOL
Life is Life
call()
call(arg)
call(kwarg='hey')
call(arg, kwarg='hey')
call(arg, another, kwarg='hey', **kwargs)
call(this_is_a_very_long_variable_which_will_force_a_delimiter_split, arg, another, kwarg='hey', **kwargs) # note: no trailing comma pre-3.6
call(*gidgets[:2])
call(a, *gidgets[:2])
call(**self.screen_kwargs)
call(b, **self.screen_kwargs)
lukasz.langa.pl
call.me(maybe)
1 .real
1.0 .real
....__class__
list[str]
dict[str, int]
tuple[str, ...]
tuple[
str, int, float, dict[str, int]
]
tuple[str, int, float, dict[str, int],]
very_long_variable_name_filters: t.List[
t.Tuple[str, t.Union[str, t.List[t.Optional[str]]]],
]
xxxx_xxxxx_xxxx_xxx: Callable[..., List[SomeClass]] = classmethod( # type: ignore
sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__)
)
xxxx_xxx_xxxx_xxxxx_xxxx_xxx: Callable[..., List[SomeClass]] = classmethod( # type: ignore
sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__)
)
xxxx_xxx_xxxx_xxxxx_xxxx_xxx: Callable[
..., List[SomeClass]
] = classmethod(sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__)) # type: ignore
slice[0]
slice[0:1]
slice[0:1:2]
slice[:]
slice[:-1]
slice[1:]
slice[::-1]
slice[d :: d + 1]
slice[:c, c - 1]
numpy[:, 0:1]
numpy[:, :-1]
numpy[0, :]
numpy[:, i]
numpy[0, :2]
numpy[:N, 0]
numpy[:2, :4]
numpy[2:4, 1:5]
numpy[4:, 2:]
numpy[:, (0, 1, 2, 5)]
numpy[0, [0]]
numpy[:, [i]]
numpy[1 : c + 1, c]
numpy[-(c + 1) :, d]
numpy[:, l[-2]]
numpy[:, ::-1]
numpy[np.newaxis, :]
(str or None) if (sys.version_info[0] > (3,)) else (str or bytes or None)
{'2.7': dead, '3.7': long_live or die_hard}
{'2.7', '3.6', '3.7', '3.8', '3.9', '4.0' if gilectomy else '3.10'}
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10 or A, 11 or B, 12 or C]
(SomeName)
SomeName
(Good, Bad, Ugly)
(i for i in (1, 2, 3))
((i ** 2) for i in (1, 2, 3))
((i ** 2) for i, _ in ((1, 'a'), (2, 'b'), (3, 'c')))
(((i ** 2) + j) for i in (1, 2, 3) for j in (1, 2, 3))
(*starred,)
{"id": "1","type": "type","started_at": now(),"ended_at": now() + timedelta(days=10),"priority": 1,"import_session_id": 1,**kwargs}
a = (1,)
b = 1,
c = 1
d = (1,) + a + (2,)
e = (1,).count(1)
f = 1, *range(10)
g = 1, *"ten"
what_is_up_with_those_new_coord_names = (coord_names + set(vars_to_create)) + set(vars_to_remove)
what_is_up_with_those_new_coord_names = (coord_names | set(vars_to_create)) - set(vars_to_remove)
result = session.query(models.Customer.id).filter(models.Customer.account_id == account_id, models.Customer.email == email_address).order_by(models.Customer.id.asc()).all()
result = session.query(models.Customer.id).filter(models.Customer.account_id == account_id, models.Customer.email == email_address).order_by(models.Customer.id.asc(),).all()
Ø = set()
authors.łukasz.say_thanks()
mapping = {
A: 0.25 * (10.0 / 12),
B: 0.1 * (10.0 / 12),
C: 0.1 * (10.0 / 12),
D: 0.1 * (10.0 / 12),
}
def gen():
yield from outside_of_generator
a = (yield)
b = ((yield))
c = (((yield)))
async def f():
await some.complicated[0].call(with_args=(True or (1 is not 1)))
print(* [] or [1])
print(**{1: 3} if False else {x: x for x in range(3)})
print(* lambda x: x)
assert(not Test),("Short message")
assert this is ComplexTest and not requirements.fit_in_a_single_line(force=False), "Short message"
assert(((parens is TooMany)))
for x, in (1,), (2,), (3,): ...
for y in (): ...
for z in (i for i in (1, 2, 3)): ...
for i in (call()): ...
for j in (1 + (2 + 3)): ...
while(this and that): ...
for addr_family, addr_type, addr_proto, addr_canonname, addr_sockaddr in socket.getaddrinfo('google.com', 'http'):
pass
a = aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp in qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz
a = aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp not in qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz
a = aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp is qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz
a = aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp is not qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz
if (
threading.current_thread() != threading.main_thread() and
threading.current_thread() != threading.main_thread() or
signal.getsignal(signal.SIGINT) != signal.default_int_handler
):
return True
if (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa |
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
):
return True
if (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa &
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
):
return True
if (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
):
return True
if (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa -
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
):
return True
if (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa *
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
):
return True
if (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa /
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
):
return True
if (
~ aaaa.a + aaaa.b - aaaa.c * aaaa.d / aaaa.e | aaaa.f & aaaa.g % aaaa.h ^ aaaa.i << aaaa.k >> aaaa.l ** aaaa.m // aaaa.n
):
return True
if (
~ aaaaaaaa.a + aaaaaaaa.b - aaaaaaaa.c @ aaaaaaaa.d / aaaaaaaa.e | aaaaaaaa.f & aaaaaaaa.g % aaaaaaaa.h ^ aaaaaaaa.i << aaaaaaaa.k >> aaaaaaaa.l ** aaaaaaaa.m // aaaaaaaa.n
):
return True
if (
~ aaaaaaaaaaaaaaaa.a + aaaaaaaaaaaaaaaa.b - aaaaaaaaaaaaaaaa.c * aaaaaaaaaaaaaaaa.d @ aaaaaaaaaaaaaaaa.e | aaaaaaaaaaaaaaaa.f & aaaaaaaaaaaaaaaa.g % aaaaaaaaaaaaaaaa.h ^ aaaaaaaaaaaaaaaa.i << aaaaaaaaaaaaaaaa.k >> aaaaaaaaaaaaaaaa.l ** aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n
):
return True
aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa - aaaaaaaaaaaaaaaa * (aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa) / (aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa)
aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa >> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa << aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
bbbb >> bbbb * bbbb
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ^bbbb.a & aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa^aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
last_call()
# standalone comment at ENDMARKER
# output
...
"some_string"
b"\\xa3"
Name
None
True
False
1
1.0
1j
True or False
True or False or None
True and False
True and False and None
(Name1 and Name2) or Name3
Name1 and Name2 or Name3
Name1 or (Name2 and Name3)
Name1 or Name2 and Name3
(Name1 and Name2) or (Name3 and Name4)
Name1 and Name2 or Name3 and Name4
Name1 or (Name2 and Name3) or Name4
Name1 or Name2 and Name3 or Name4
v1 << 2
1 >> v2
1 % finished
1 + v2 - v3 * 4 ^ 5**v6 / 7 // 8
((1 + v2) - (v3 * 4)) ^ (((5**v6) / 7) // 8)
not great
~great
+value
-1
~int and not v1 ^ 123 + v2 | True
(~int) and (not ((v1 ^ (123 + v2)) | True))
+(really ** -(confusing ** ~(operator**-precedence)))
flags & ~select.EPOLLIN and waiters.write_task is not None
lambda arg: None
lambda a=True: a
lambda a, b, c=True: a
lambda a, b, c=True, *, d=(1 << v2), e="str": a
lambda a, b, c=True, *vararg, d=(v1 << 2), e="str", **kwargs: a + b
manylambdas = lambda x=lambda y=lambda z=1: z: y(): x()
foo = lambda port_id, ignore_missing: {
"port1": port1_resource,
"port2": port2_resource,
}[port_id]
1 if True else 2
str or None if True else str or bytes or None
(str or None) if True else (str or bytes or None)
str or None if (1 if True else 2) else str or bytes or None
(str or None) if (1 if True else 2) else (str or bytes or None)
(
(super_long_variable_name or None)
if (1 if super_long_test_name else 2)
else (str or bytes or None)
)
{"2.7": dead, "3.7": (long_live or die_hard)}
{"2.7": dead, "3.7": (long_live or die_hard), **{"3.6": verygood}}
{**a, **b, **c}
{"2.7", "3.6", "3.7", "3.8", "3.9", ("4.0" if gilectomy else "3.10")}
({"a": "b"}, (True or False), (+value), "string", b"bytes") or None
()
(1,)
(1, 2)
(1, 2, 3)
[]
[1, 2, 3, 4, 5, 6, 7, 8, 9, (10 or A), (11 or B), (12 or C)]
[
1,
2,
3,
]
[*a]
[*range(10)]
[
*a,
4,
5,
]
[
4,
*a,
5,
]
[
this_is_a_very_long_variable_which_will_force_a_delimiter_split,
element,
another,
*more,
]
{i for i in (1, 2, 3)}
{(i**2) for i in (1, 2, 3)}
{(i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))}
{((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)}
[i for i in (1, 2, 3)]
[(i**2) for i in (1, 2, 3)]
[(i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c"))]
[((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3)]
{i: 0 for i in (1, 2, 3)}
{i: j for i, j in ((1, "a"), (2, "b"), (3, "c"))}
{a: b * 2 for a, b in dictionary.items()}
{a: b * -2 for a, b in dictionary.items()}
{
k: v
for k, v in this_is_a_very_long_variable_which_will_cause_a_trailing_comma_which_breaks_the_comprehension
}
Python3 > Python2 > COBOL
Life is Life
call()
call(arg)
call(kwarg="hey")
call(arg, kwarg="hey")
call(arg, another, kwarg="hey", **kwargs)
call(
this_is_a_very_long_variable_which_will_force_a_delimiter_split,
arg,
another,
kwarg="hey",
**kwargs
) # note: no trailing comma pre-3.6
call(*gidgets[:2])
call(a, *gidgets[:2])
call(**self.screen_kwargs)
call(b, **self.screen_kwargs)
lukasz.langa.pl
call.me(maybe)
(1).real
(1.0).real
....__class__
list[str]
dict[str, int]
tuple[str, ...]
tuple[str, int, float, dict[str, int]]
tuple[
str,
int,
float,
dict[str, int],
]
very_long_variable_name_filters: t.List[
t.Tuple[str, t.Union[str, t.List[t.Optional[str]]]],
]
xxxx_xxxxx_xxxx_xxx: Callable[..., List[SomeClass]] = classmethod( # type: ignore
sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__)
)
xxxx_xxx_xxxx_xxxxx_xxxx_xxx: Callable[..., List[SomeClass]] = classmethod( # type: ignore
sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__)
)
xxxx_xxx_xxxx_xxxxx_xxxx_xxx: Callable[..., List[SomeClass]] = classmethod(
sync(async_xxxx_xxx_xxxx_xxxxx_xxxx_xxx.__func__)
) # type: ignore
slice[0]
slice[0:1]
slice[0:1:2]
slice[:]
slice[:-1]
slice[1:]
slice[::-1]
slice[d :: d + 1]
slice[:c, c - 1]
numpy[:, 0:1]
numpy[:, :-1]
numpy[0, :]
numpy[:, i]
numpy[0, :2]
numpy[:N, 0]
numpy[:2, :4]
numpy[2:4, 1:5]
numpy[4:, 2:]
numpy[:, (0, 1, 2, 5)]
numpy[0, [0]]
numpy[:, [i]]
numpy[1 : c + 1, c]
numpy[-(c + 1) :, d]
numpy[:, l[-2]]
numpy[:, ::-1]
numpy[np.newaxis, :]
(str or None) if (sys.version_info[0] > (3,)) else (str or bytes or None)
{"2.7": dead, "3.7": long_live or die_hard}
{"2.7", "3.6", "3.7", "3.8", "3.9", "4.0" if gilectomy else "3.10"}
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10 or A, 11 or B, 12 or C]
(SomeName)
SomeName
(Good, Bad, Ugly)
(i for i in (1, 2, 3))
((i**2) for i in (1, 2, 3))
((i**2) for i, _ in ((1, "a"), (2, "b"), (3, "c")))
(((i**2) + j) for i in (1, 2, 3) for j in (1, 2, 3))
(*starred,)
{
"id": "1",
"type": "type",
"started_at": now(),
"ended_at": now() + timedelta(days=10),
"priority": 1,
"import_session_id": 1,
**kwargs,
}
a = (1,)
b = (1,)
c = 1
d = (1,) + a + (2,)
e = (1,).count(1)
f = 1, *range(10)
g = 1, *"ten"
what_is_up_with_those_new_coord_names = (coord_names + set(vars_to_create)) + set(
vars_to_remove
)
what_is_up_with_those_new_coord_names = (coord_names | set(vars_to_create)) - set(
vars_to_remove
)
result = (
session.query(models.Customer.id)
.filter(
models.Customer.account_id == account_id, models.Customer.email == email_address
)
.order_by(models.Customer.id.asc())
.all()
)
result = (
session.query(models.Customer.id)
.filter(
models.Customer.account_id == account_id, models.Customer.email == email_address
)
.order_by(
models.Customer.id.asc(),
)
.all()
)
Ø = set()
authors.łukasz.say_thanks()
mapping = {
A: 0.25 * (10.0 / 12),
B: 0.1 * (10.0 / 12),
C: 0.1 * (10.0 / 12),
D: 0.1 * (10.0 / 12),
}
def gen():
yield from outside_of_generator
a = yield
b = yield
c = yield
async def f():
await some.complicated[0].call(with_args=(True or (1 is not 1)))
print(*[] or [1])
print(**{1: 3} if False else {x: x for x in range(3)})
print(*lambda x: x)
assert not Test, "Short message"
assert this is ComplexTest and not requirements.fit_in_a_single_line(
force=False
), "Short message"
assert parens is TooMany
for (x,) in (1,), (2,), (3,):
...
for y in ():
...
for z in (i for i in (1, 2, 3)):
...
for i in call():
...
for j in 1 + (2 + 3):
...
while this and that:
...
for (
addr_family,
addr_type,
addr_proto,
addr_canonname,
addr_sockaddr,
) in socket.getaddrinfo("google.com", "http"):
pass
a = (
aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp
in qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz
)
a = (
aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp
not in qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz
)
a = (
aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp
is qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz
)
a = (
aaaa.bbbb.cccc.dddd.eeee.ffff.gggg.hhhh.iiii.jjjj.kkkk.llll.mmmm.nnnn.oooo.pppp
is not qqqq.rrrr.ssss.tttt.uuuu.vvvv.xxxx.yyyy.zzzz
)
if (
threading.current_thread() != threading.main_thread()
and threading.current_thread() != threading.main_thread()
or signal.getsignal(signal.SIGINT) != signal.default_int_handler
):
return True
if (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
| aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
):
return True
if (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
& aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
):
return True
if (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
):
return True
if (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
- aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
):
return True
if (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
* aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
):
return True
if (
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
/ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
):
return True
if (
~aaaa.a + aaaa.b - aaaa.c * aaaa.d / aaaa.e
| aaaa.f & aaaa.g % aaaa.h ^ aaaa.i << aaaa.k >> aaaa.l**aaaa.m // aaaa.n
):
return True
if (
~aaaaaaaa.a + aaaaaaaa.b - aaaaaaaa.c @ aaaaaaaa.d / aaaaaaaa.e
| aaaaaaaa.f & aaaaaaaa.g % aaaaaaaa.h
^ aaaaaaaa.i << aaaaaaaa.k >> aaaaaaaa.l**aaaaaaaa.m // aaaaaaaa.n
):
return True
if (
~aaaaaaaaaaaaaaaa.a
+ aaaaaaaaaaaaaaaa.b
- aaaaaaaaaaaaaaaa.c * aaaaaaaaaaaaaaaa.d @ aaaaaaaaaaaaaaaa.e
| aaaaaaaaaaaaaaaa.f & aaaaaaaaaaaaaaaa.g % aaaaaaaaaaaaaaaa.h
^ aaaaaaaaaaaaaaaa.i
<< aaaaaaaaaaaaaaaa.k
>> aaaaaaaaaaaaaaaa.l**aaaaaaaaaaaaaaaa.m // aaaaaaaaaaaaaaaa.n
):
return True
(
aaaaaaaaaaaaaaaa
+ aaaaaaaaaaaaaaaa
- aaaaaaaaaaaaaaaa
* (aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa)
/ (aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa)
)
aaaaaaaaaaaaaaaa + aaaaaaaaaaaaaaaa
(
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
>> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
<< aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
)
bbbb >> bbbb * bbbb
(
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
^ bbbb.a & aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
^ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
)
last_call()
# standalone comment at ENDMARKER
| 29.508716
| 280
| 0.665199
|
f636b179e1ccf07a557d2cd33d43faa9dcb4df2f
| 1,159
|
py
|
Python
|
tests/providers/test_rage4.py
|
chibiegg/lexicon
|
6230ea1e567a730243dc77c08ff6c4c16f136157
|
[
"MIT"
] | null | null | null |
tests/providers/test_rage4.py
|
chibiegg/lexicon
|
6230ea1e567a730243dc77c08ff6c4c16f136157
|
[
"MIT"
] | null | null | null |
tests/providers/test_rage4.py
|
chibiegg/lexicon
|
6230ea1e567a730243dc77c08ff6c4c16f136157
|
[
"MIT"
] | null | null | null |
# Test for one implementation of the interface
from lexicon.providers.rage4 import Provider
from integration_tests import IntegrationTests
from unittest import TestCase
import pytest
# Hook into testing framework by inheriting unittest.TestCase and reuse
# the tests which *each and every* implementation of the interface must
# pass, by inheritance from define_tests.TheTests
class Rage4ProviderTests(TestCase, IntegrationTests):
Provider = Provider
provider_name = 'rage4'
domain = 'capsulecd.com'
def _filter_headers(self):
return ['Authorization']
@pytest.mark.skip(reason="update requires type to be specified for this provider")
def test_Provider_when_calling_update_record_with_full_name_should_modify_record(self):
return
@pytest.mark.skip(reason="update requires type to be specified for this provider")
def test_Provider_when_calling_update_record_should_modify_record(self):
return
# TODO: this should be enabled
@pytest.mark.skip(reason="regenerating auth keys required")
def test_Provider_when_calling_update_record_should_modify_record_name_specified(self):
return
| 39.965517
| 91
| 0.786022
|
f62d1170c1342e8c87fe6020560585ff03183a71
| 3,772
|
py
|
Python
|
ihatexml/parser/parser.py
|
gsnedders/ihatexml
|
69d43f921a9595f7913be43d922a7b4ff8fe70c8
|
[
"MIT"
] | null | null | null |
ihatexml/parser/parser.py
|
gsnedders/ihatexml
|
69d43f921a9595f7913be43d922a7b4ff8fe70c8
|
[
"MIT"
] | null | null | null |
ihatexml/parser/parser.py
|
gsnedders/ihatexml
|
69d43f921a9595f7913be43d922a7b4ff8fe70c8
|
[
"MIT"
] | null | null | null |
import ply.yacc as yacc
from . import ast
from .lexer import lexer, tokens
precedence = (
('left', 'QUESTION', 'PLUS', 'ASTERISK'),
('left', 'HYPHEN'),
)
def p_error(p):
if not p:
# EOF
return
while True:
tok = p.token()
if not tok or tok.type == 'NEWLINE':
break
p.errok()
return tok
def p_definition_list_base(p):
'definitionList : definition'
name, value = p[1]
p[0] = ast.DefinitionDict([(name, value)])
def p_definition_list_recurse(p):
'definitionList : definitionList NEWLINE definition'
name, value = p[3]
p[0] = p[1]
p[0][name] = value
def p_definition(p):
'definition : SYMBOL DEFINE expression'
p[0] = (p[1], p[3])
def p_expression_base(p):
'expression : expressionFollows'
p[0] = p[1]
def p_expression_alternation(p):
'expression : expression BAR expressionFollows'
if isinstance(p[1], ast.Alternation):
p[0] = p[1]
else:
p[0] = ast.Alternation([p[1]])
p[0].options.append(p[3])
def p_expression_follows_base(p):
'expressionFollows : expressionPrimary'
p[0] = p[1]
def p_expression_follows_recurse(p):
'expressionFollows : expressionFollows expressionPrimary'
if isinstance(p[1], ast.Follows):
p[0] = p[1]
else:
p[0] = ast.Follows([p[1]])
p[0].order.append(p[2])
def p_expression_list_wrapped(p):
'expressionPrimary : LPAREN expression RPAREN'
p[0] = p[2]
def p_expression_except(p):
'expressionPrimary : expressionPrimary HYPHEN expressionPrimary'
p[0] = ast.Difference(p[1], p[3])
def p_expression_repetition(p):
'''expressionPrimary : expressionPrimary QUESTION
| expressionPrimary ASTERISK
| expressionPrimary PLUS'''
if p[2] == '?':
min, max = 0, 1
elif p[2] == '*':
min, max = 0, float('inf')
elif p[2] == '+':
min, max = 1, float('inf')
else:
assert False, "unreachable"
p[0] = ast.Repetition(p[1], min, max)
def p_expression_symbol(p):
'expressionPrimary : SYMBOL'
p[0] = ast.SymbolRef(p[1])
def p_expression_literal(p):
'''expressionPrimary : QUOTE_STRING
| DQUOTE_STRING
| ESCAPECHAR'''
p[0] = ast.Literal(p[1])
def p_expression_charclass(p):
'''expressionPrimary : LSQUARE char_class_list RSQUARE
| LSQUARE CARET char_class_list RSQUARE'''
# Get the right offset
if len(p) == 4:
cclist = p[2]
negated = False
else:
cclist = p[3]
negated = True
# Split up char_class_list
ranges = []
chars = set()
for x in cclist:
if isinstance(x, tuple):
ranges.append(x)
else:
chars.add(x)
p[0] = ast.CharClass(negated, chars, ranges)
def p_char_class_list(p):
'char_class_list : char_class_list char_class'
p[0] = p[1]
p[0].append(p[2])
def p_char_class_list_base(p):
'''char_class_list : HYPHEN
| char_class'''
p[0] = [p[1]]
def p_char_class_char(p):
'''char_class : CLASSCHAR
| ESCAPECHAR'''
p[0] = p[1]
def p_char_class_range(p):
'''char_class : ESCAPECHAR HYPHEN ESCAPECHAR
| CLASSCHAR HYPHEN CLASSCHAR'''
p[0] = (p[1], p[3])
parser = yacc.yacc()
if __name__ == '__main__':
try:
input = raw_input
except NameError:
pass
try:
import readline
except ImportError:
pass
while True:
try:
s = input('parser > ')
except EOFError:
print()
break
t = yacc.parse(s)
if t:
for k, v in t.items():
print("%s ::= %s" % (k, v))
| 23.873418
| 68
| 0.567603
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.