content
stringlengths 5
1.05M
|
|---|
# Generated by Django 3.0.3 on 2020-10-19 10:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelzoo', '0025_auto_20201015_1628'),
]
operations = [
migrations.AddField(
model_name='model',
name='slug',
field=models.SlugField(null=True),
),
]
|
import pytest
import mugen.video.sizing as v_sizing
from mugen.video.sizing import Dimensions
@pytest.fixture
def dimensions_4_3():
return Dimensions(720, 540)
@pytest.fixture
def dimensions_16_9():
return Dimensions(1920, 1080)
@pytest.fixture
def dimensions_21_9():
return Dimensions(1920, 822)
@pytest.fixture
def list_of_dimensions():
return [dimensions_16_9(), dimensions_4_3(), dimensions_21_9()]
@pytest.mark.parametrize("dimensions, aspect_ratio, expected_dimensions", [
(dimensions_16_9(), 16/9, (1920, 1080)),
(dimensions_16_9(), 4/3, (1440, 1080)),
(dimensions_4_3(), 16/9, (720, 405))
])
def test_crop_dimensions_to_aspect_ratio(dimensions, aspect_ratio, expected_dimensions):
assert v_sizing.crop_dimensions_to_aspect_ratio(dimensions, aspect_ratio) == expected_dimensions
@pytest.mark.parametrize("dimensions, desired_aspect_ratio, expected_coordinates", [
(dimensions_16_9(), 16/9, (0, 0, 1920, 1080)),
(dimensions_16_9(), 4/3, (240, 0, 1680, 1080)),
(dimensions_4_3(), 16/9, (0, 67.5, 720, 472.5))
])
def test_crop_coordinates_for_aspect_ratio(dimensions, desired_aspect_ratio, expected_coordinates):
assert v_sizing.crop_coordinates_for_aspect_ratio(dimensions, desired_aspect_ratio) == expected_coordinates
@pytest.mark.parametrize("dimensions_list, desired_aspect_ratio, default, expected_dimensions", [
([], 16/9, "default", "default"),
(list_of_dimensions(), 4/3, None, (1440, 1080)),
(list_of_dimensions(), 16/9, None, (1920, 1080)),
(list_of_dimensions(), 21/9, None, (1920, 822))
])
def test_largest_dimensions_for_aspect_ratio(dimensions_list, desired_aspect_ratio, default, expected_dimensions):
assert v_sizing.largest_dimensions_for_aspect_ratio(dimensions_list, desired_aspect_ratio,
default) == expected_dimensions
|
# coding: utf-8
"""
autoscaling
OpenAPI spec version: 2018-06-21T02:22:22Z
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ncloud_autoscaling.model.common_code import CommonCode # noqa: F401,E501
class ActivityLog(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'activity_no': 'str',
'auto_scaling_group_name': 'str',
'status': 'CommonCode',
'status_message': 'str',
'action_cause': 'str',
'description': 'str',
'details': 'str',
'start_time': 'str',
'end_time': 'str'
}
attribute_map = {
'activity_no': 'activityNo',
'auto_scaling_group_name': 'autoScalingGroupName',
'status': 'status',
'status_message': 'statusMessage',
'action_cause': 'actionCause',
'description': 'description',
'details': 'details',
'start_time': 'startTime',
'end_time': 'endTime'
}
def __init__(self, activity_no=None, auto_scaling_group_name=None, status=None, status_message=None, action_cause=None, description=None, details=None, start_time=None, end_time=None): # noqa: E501
"""ActivityLog - a model defined in Swagger""" # noqa: E501
self._activity_no = None
self._auto_scaling_group_name = None
self._status = None
self._status_message = None
self._action_cause = None
self._description = None
self._details = None
self._start_time = None
self._end_time = None
self.discriminator = None
if activity_no is not None:
self.activity_no = activity_no
if auto_scaling_group_name is not None:
self.auto_scaling_group_name = auto_scaling_group_name
if status is not None:
self.status = status
if status_message is not None:
self.status_message = status_message
if action_cause is not None:
self.action_cause = action_cause
if description is not None:
self.description = description
if details is not None:
self.details = details
if start_time is not None:
self.start_time = start_time
if end_time is not None:
self.end_time = end_time
@property
def activity_no(self):
"""Gets the activity_no of this ActivityLog. # noqa: E501
액티비티번호 # noqa: E501
:return: The activity_no of this ActivityLog. # noqa: E501
:rtype: str
"""
return self._activity_no
@activity_no.setter
def activity_no(self, activity_no):
"""Sets the activity_no of this ActivityLog.
액티비티번호 # noqa: E501
:param activity_no: The activity_no of this ActivityLog. # noqa: E501
:type: str
"""
self._activity_no = activity_no
@property
def auto_scaling_group_name(self):
"""Gets the auto_scaling_group_name of this ActivityLog. # noqa: E501
오토스케일링그룹명 # noqa: E501
:return: The auto_scaling_group_name of this ActivityLog. # noqa: E501
:rtype: str
"""
return self._auto_scaling_group_name
@auto_scaling_group_name.setter
def auto_scaling_group_name(self, auto_scaling_group_name):
"""Sets the auto_scaling_group_name of this ActivityLog.
오토스케일링그룹명 # noqa: E501
:param auto_scaling_group_name: The auto_scaling_group_name of this ActivityLog. # noqa: E501
:type: str
"""
self._auto_scaling_group_name = auto_scaling_group_name
@property
def status(self):
"""Gets the status of this ActivityLog. # noqa: E501
상태 # noqa: E501
:return: The status of this ActivityLog. # noqa: E501
:rtype: CommonCode
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ActivityLog.
상태 # noqa: E501
:param status: The status of this ActivityLog. # noqa: E501
:type: CommonCode
"""
self._status = status
@property
def status_message(self):
"""Gets the status_message of this ActivityLog. # noqa: E501
상태메세지 # noqa: E501
:return: The status_message of this ActivityLog. # noqa: E501
:rtype: str
"""
return self._status_message
@status_message.setter
def status_message(self, status_message):
"""Sets the status_message of this ActivityLog.
상태메세지 # noqa: E501
:param status_message: The status_message of this ActivityLog. # noqa: E501
:type: str
"""
self._status_message = status_message
@property
def action_cause(self):
"""Gets the action_cause of this ActivityLog. # noqa: E501
액션원인 # noqa: E501
:return: The action_cause of this ActivityLog. # noqa: E501
:rtype: str
"""
return self._action_cause
@action_cause.setter
def action_cause(self, action_cause):
"""Sets the action_cause of this ActivityLog.
액션원인 # noqa: E501
:param action_cause: The action_cause of this ActivityLog. # noqa: E501
:type: str
"""
self._action_cause = action_cause
@property
def description(self):
"""Gets the description of this ActivityLog. # noqa: E501
설명 # noqa: E501
:return: The description of this ActivityLog. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ActivityLog.
설명 # noqa: E501
:param description: The description of this ActivityLog. # noqa: E501
:type: str
"""
self._description = description
@property
def details(self):
"""Gets the details of this ActivityLog. # noqa: E501
상세설명 # noqa: E501
:return: The details of this ActivityLog. # noqa: E501
:rtype: str
"""
return self._details
@details.setter
def details(self, details):
"""Sets the details of this ActivityLog.
상세설명 # noqa: E501
:param details: The details of this ActivityLog. # noqa: E501
:type: str
"""
self._details = details
@property
def start_time(self):
"""Gets the start_time of this ActivityLog. # noqa: E501
시작일시 # noqa: E501
:return: The start_time of this ActivityLog. # noqa: E501
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this ActivityLog.
시작일시 # noqa: E501
:param start_time: The start_time of this ActivityLog. # noqa: E501
:type: str
"""
self._start_time = start_time
@property
def end_time(self):
"""Gets the end_time of this ActivityLog. # noqa: E501
종료일시 # noqa: E501
:return: The end_time of this ActivityLog. # noqa: E501
:rtype: str
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this ActivityLog.
종료일시 # noqa: E501
:param end_time: The end_time of this ActivityLog. # noqa: E501
:type: str
"""
self._end_time = end_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ActivityLog):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from unittest import TestCase
from flaskphiid.annotation import Annotation, AnnotationFactory, MergedAnnotation
from flaskphiid.annotation import unionize_annotations
from flaskphiid.annotation import IncompatibleTypeException
class AnnotationTest(TestCase):
def setUp(self):
# sample text
self.sample_text = ("Patient is Mr. John Smith Jr., a 48-year-old "
"teacher and chef. His phone number is (555) 867-5309, "
"and his address is 123 Sesame St, Seattle, WA 99999. "
"Test.")
# sample MedLP results
self.sample_compmed = [
{
"Id": 0,
"BeginOffset": 0,
"EndOffset": 25,
"Score": 0.99,
"Text": "Patient is Mr. John Smith",
"Category": "PROTECTED_HEALTH_INFORMATION",
"Type": "NAME",
"Traits": []
},
{
"Id": 1,
"BeginOffset": 33,
"EndOffset": 35,
"Score": 0.98,
"Text": "48",
"Category": "PROTECTED_HEALTH_INFORMATION",
"Type": "AGE",
"Traits": []
},
{
"Id": 2,
"BeginOffset": 45,
"EndOffset": 52,
"Score": 0.97,
"Text": "teacher",
"Category": "PROTECTED_HEALTH_INFORMATION",
"Type": "PROFESSION",
"Traits": []
},
{
"Id": 3,
"BeginOffset": 83,
"EndOffset": 97,
"Score": 0.96,
"Text": "(555) 867-5309",
"Category": "PROTECTED_HEALTH_INFORMATION",
"Type": "PHONE_OR_FAX",
"Traits": []
},
{
"Id": 4,
"BeginOffset": 122,
"EndOffset": 144,
"Score": 0.95,
"Text": "Sesame St, Seattle, WA",
"Category": "PROTECTED_HEALTH_INFORMATION",
"Type": "ADDRESS",
"Traits": []
},
{
"Id": 5,
"BeginOffset": 152,
"EndOffset": 156,
"Score": 0.94,
"Text": "Test",
"Category": "PROTECTED_HEALTH_INFORMATION",
"Type": "NAME",
"Traits": []
}
]
# sample HutchNER results
self.sample_hutchner = [
{
"start": 15,
"stop": 29,
"confidence": 0.01,
"text": "John Smith Jr.",
"label": "PATIENT_OR_FAMILY_NAME"
},
{
"start": 33,
"stop": 44,
"confidence": 0.02,
"text": "48-year-old",
"label": "AGE"
},
{
"start": 57,
"stop": 61,
"confidence": 0.03,
"text": "chef",
"label": "PROFESSION"
},
{
"start": 89,
"stop": 97,
"confidence": 0.04,
"text": "867-5309",
"label": "PHONE_NUMBER"
},
{
"start": 118,
"stop": 131,
"confidence": 0.05,
"text": "123 Sesame St",
"label": "HOSPITAL_NAME"
},
{
"start": 142,
"stop": 150,
"confidence": 0.06,
"text": "WA 99999",
"label": "HOSPITAL_NAME"
},
{
"start": 152,
"stop": 156,
"confidence": 0.07,
"text": "Test",
"label": "URL_OR_IP"
}
]
#text to test compound annotations
self.sample_compound_type_text = (
"John Smith Jr. is being seen at"
" 123 Sesame Hospital"
" Ward 3 Sick Burns Unit"
" Seattle, WA 99999")
self.sample_compound_compmed = [
{
"Id": 0,
"BeginOffset": 32,
"EndOffset": 92,
"Score": 0.99,
"Text": "123 Sesame Hospital Ward 3 Sick Burns Unit Seattle, WA 99999",
"Category": "PROTECTED_HEALTH_INFORMATION",
"Type": "ADDRESS",
"Traits": []
},
]
self.sample_compound_hutchner = [
{
"start": 32,
"stop": 51,
"confidence": 0.01,
"text": "123 Sesame Hospital",
"label": "HOSPITAL_NAME"
},
{
"start": 51,
"stop": 58,
"confidence": 0.01,
"text": "Ward 3",
"label": "WARD"
},
{
"start": 59,
"stop": 74,
"confidence": 0.01,
"text": "Sick Burns Unit",
"label": "SPECIALTY"
},
]
def tearDown(self):
pass
def test_annotation_empty(self):
ann1 = Annotation('test')
self.assertTrue(ann1.empty())
ann2 = AnnotationFactory.from_hutchner(self.sample_hutchner[0])
self.assertFalse(ann2.empty())
def test_annotation_from_compmed(self):
for sample in self.sample_compmed:
ann = AnnotationFactory.from_compmed(sample)
self.assertEqual(ann.origin, 'compmed')
self.assertEqual(ann.text, sample['Text'])
self.assertEqual(ann.start, sample['BeginOffset'])
def test_annotation_from_hutchner(self):
for sample in self.sample_hutchner:
ann = AnnotationFactory.from_hutchner(sample)
self.assertEqual(ann.origin, 'hutchner')
self.assertEqual(ann.type, sample['label'])
self.assertEqual(ann.score, sample['confidence'])
def test_annotation_to_dict(self):
ann = Annotation('test')
data = ann.to_dict()
self.assertEqual(data['origin'], 'test')
def test_mergedannotation_from_annotations(self):
ann1 = AnnotationFactory.from_compmed(self.sample_compmed[0])
ann2 = AnnotationFactory.from_hutchner(self.sample_hutchner[0])
merged = AnnotationFactory.from_annotations([ann1, ann2])
self.assertEqual(merged.origin, 'merged')
self.assertEqual(merged.start, ann1.start)
self.assertEqual(merged.end, ann2.end)
self.assertEqual(merged.text, "Patient is Mr. John Smith Jr.")
self.assertEqual(merged.source_origins,
set([ann1.origin, ann2.origin]))
self.assertEqual(merged.source_scores, [ann1.score, ann2.score])
self.assertTrue(ann1 in merged.source_annotations)
def test_mergedannotation_to_dict(self):
ann1 = AnnotationFactory.from_compmed(self.sample_compmed[0])
ann2 = AnnotationFactory.from_hutchner(self.sample_hutchner[0])
merged = AnnotationFactory.from_annotations([ann1, ann2])
data = merged.to_dict()
self.assertEqual(data['origin'], 'merged')
self.assertEqual(data['text'], 'Patient is Mr. John Smith Jr.')
self.assertTrue('compmed' in data['source_origins'])
detailed = merged.to_dict(detailed=True)
self.assertEqual(len(detailed['source_annotations']), 2)
def test_mergedannotation_type_merge_matching(self):
# matching type case
ann1 = AnnotationFactory.from_compmed(self.sample_compmed[1])
ann2 = AnnotationFactory.from_hutchner(self.sample_hutchner[1])
merged = AnnotationFactory.from_annotations([ann1, ann2])
self.assertEqual(merged.type, ann1.type)
self.assertEqual(merged.score, ann1.score)
def test_mergedannotation_type_matching_parent_low_score(self):
# matching type/parent-type case; score < threshold
ann1 = AnnotationFactory.from_compmed(self.sample_compmed[0])
ann2 = AnnotationFactory.from_hutchner(self.sample_hutchner[0])
merged = AnnotationFactory.from_annotations([ann1, ann2])
self.assertEqual(merged.type, ann1.type)
self.assertEqual(merged.score, ann1.score)
def test_mergedannotation_type_matching_parent_high_score_scnd_anno(self):
# matching type/parent-type case; score > threshold
ann1 = AnnotationFactory.from_compmed(self.sample_compmed[0])
ann2 = AnnotationFactory.from_hutchner(self.sample_hutchner[0])
ann2.score = 0.8
merged = AnnotationFactory.from_annotations([ann1, ann2])
self.assertEqual(merged.type, ann2.type)
self.assertEqual(merged.score, ann2.score)
def test_mergedannotation_type_matching_parent_high_score_first_anno(self):
# matching parent-type/type case; score > threshold
ann1 = AnnotationFactory.from_compmed(self.sample_compmed[0])
ann2 = AnnotationFactory.from_hutchner(self.sample_hutchner[0])
ann2.score = 0.8
merged = AnnotationFactory.from_annotations([ann2, ann1])
self.assertEqual(merged.type, ann2.type)
self.assertEqual(merged.score, ann2.score)
def test_mergedannotation_type_matching_parent_low_score(self):
# matching parent-type/type case; score < threshold
ann1 = AnnotationFactory.from_compmed(self.sample_compmed[0])
ann2 = AnnotationFactory.from_hutchner(self.sample_hutchner[0])
ann2.score = 0.4
merged = AnnotationFactory.from_annotations([ann2, ann1])
self.assertEqual(merged.type, ann1.type)
self.assertEqual(merged.score, ann1.score)
def test_mergedannotation_type_matching_parent_diff_child_hi_score(self):
# matching parent-type/parent-type case; score1 > score2
ann1 = AnnotationFactory.from_hutchner(self.sample_hutchner[0])
ann2 = AnnotationFactory.from_hutchner(self.sample_hutchner[0])
ann1.score = 0.6
ann2.type = "PROVIDER_NAME"
merged = AnnotationFactory.from_annotations([ann1, ann2])
self.assertEqual(merged.type, ann1.type)
self.assertEqual(merged.score, ann1.score)
def test_mergedannotation_type_matching_parent_diff_child_low_score(self):
# matching parent-type/parent-type case; score1 < score2
ann1 = AnnotationFactory.from_hutchner(self.sample_hutchner[0])
ann2 = AnnotationFactory.from_hutchner(self.sample_hutchner[0])
ann2.score = 0.8
merged = AnnotationFactory.from_annotations([ann1, ann2])
self.assertEqual(merged.type, ann2.type)
self.assertEqual(merged.score, ann2.score)
def test_mergedannotation_type_matching_parent_compound_child_maps(self):
# matching parent-type/parent-type case; score1 > score2, 3, 4
ann1 = AnnotationFactory.from_compmed(self.sample_compound_compmed[0])
ann2 = AnnotationFactory.from_hutchner(self.sample_compound_hutchner[0])
ann3 = AnnotationFactory.from_hutchner(self.sample_compound_hutchner[1])
ann4 = AnnotationFactory.from_hutchner(self.sample_compound_hutchner[2])
results = unionize_annotations([ann1, ann2, ann3, ann4])
#Do we end up with the parent type after compound mapping?
self.assertEqual(results.type, ann1.type)
self.assertEqual(results.score, ann1.score)
def test_mergedannotation_type_matching_parent_compound_child_maps(self):
# matching parent-type/parent-type case; score1 > score2, 3, 4
ann1 = AnnotationFactory.from_compmed(self.sample_compound_compmed[0])
ann2 = AnnotationFactory.from_hutchner(self.sample_compound_hutchner[0])
ann3 = AnnotationFactory.from_hutchner(self.sample_compound_hutchner[1])
ann4 = AnnotationFactory.from_hutchner(self.sample_compound_hutchner[2])
ann2.score = 0.7
ann3.score = 0.8
ann4.score = 0.9
results = unionize_annotations([ann1, ann2, ann3, ann4])
# Do we end up with the parent type after compound mapping?
self.assertEqual(type(results), type(list()))
self.assertEqual(results[0].type, "ADDRESS")
self.assertEqual(results[1].type, "HOSPITAL_NAME")
self.assertEqual(results[2].type, "WARD")
self.assertEqual(results[3].type, "SPECIALTY")
def test_mergedannotation_type_mismatched_parent_type(self):
# mismatched parent-type/parent-type case
ann1 = AnnotationFactory.from_compmed(self.sample_compmed[5])
ann2 = AnnotationFactory.from_hutchner(self.sample_hutchner[6])
merged = AnnotationFactory.from_annotations([ann1, ann2])
self.assertEqual(merged.type, "UNKNOWN")
def test_mergedannotation_invalid_cases(self):
self.assertRaises(ValueError, AnnotationFactory.from_annotations, [])
self.assertRaises(ValueError, MergedAnnotation().add_annotation,
Annotation('test'))
# attempt to combine non-overlapping annotations
ann1 = AnnotationFactory.from_compmed(self.sample_compmed[0])
ann2 = AnnotationFactory.from_hutchner(self.sample_hutchner[3])
ma = AnnotationFactory.from_annotations([ann1])
self.assertRaises(ValueError, ma.add_annotation, ann2)
def test_unionize_annotations(self):
anns = [AnnotationFactory.from_compmed(ann) for ann in self.sample_compmed]
anns += [AnnotationFactory.from_hutchner(ann) for ann in self.sample_hutchner]
union = unionize_annotations(anns)
self.assertEqual(len(union), 10)
for merged in union:
self.assertEqual(merged.text,
self.sample_text[merged.start:merged.end])
self.assertEqual(len(union[5].source_annotations), 3)
self.assertEqual(len(union[5].source_origins), 2)
|
'''Wrapper for rowio.h
Generated with:
./ctypesgen.py --cpp gcc -E -I/Applications/GRASS-7.8.app/Contents/Resources/include -D_Nullable= -I/Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include -I/Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include -D__GLIBC_HAVE_LONG_LONG -lgrass_rowio.7.8 /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/rowio.h /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/rowio.h -o OBJ.x86_64-apple-darwin18.7.0/rowio.py
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
_libs = {}
_libdirs = []
from .ctypes_preamble import *
from .ctypes_preamble import _variadic_function
from .ctypes_loader import *
add_library_search_dirs([])
# Begin libraries
_libs["grass_rowio.7.8"] = load_library("grass_rowio.7.8")
# 1 libraries
# End libraries
# No modules
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/rowio.h: 14
class struct_ROWIO_RCB(Structure):
pass
struct_ROWIO_RCB.__slots__ = [
'buf',
'age',
'row',
'dirty',
]
struct_ROWIO_RCB._fields_ = [
('buf', POINTER(None)),
('age', c_int),
('row', c_int),
('dirty', c_int),
]
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/rowio.h: 21
class struct_anon_1(Structure):
pass
struct_anon_1.__slots__ = [
'fd',
'nrows',
'len',
'cur',
'buf',
'getrow',
'putrow',
'rcb',
]
struct_anon_1._fields_ = [
('fd', c_int),
('nrows', c_int),
('len', c_int),
('cur', c_int),
('buf', POINTER(None)),
('getrow', CFUNCTYPE(UNCHECKED(c_int), c_int, POINTER(None), c_int, c_int)),
('putrow', CFUNCTYPE(UNCHECKED(c_int), c_int, POINTER(None), c_int, c_int)),
('rcb', POINTER(struct_ROWIO_RCB)),
]
ROWIO = struct_anon_1 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/rowio.h: 21
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/rowio.h: 4
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Rowio_fileno'):
continue
Rowio_fileno = _lib.Rowio_fileno
Rowio_fileno.argtypes = [POINTER(ROWIO)]
Rowio_fileno.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/rowio.h: 5
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Rowio_forget'):
continue
Rowio_forget = _lib.Rowio_forget
Rowio_forget.argtypes = [POINTER(ROWIO), c_int]
Rowio_forget.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/rowio.h: 6
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Rowio_get'):
continue
Rowio_get = _lib.Rowio_get
Rowio_get.argtypes = [POINTER(ROWIO), c_int]
Rowio_get.restype = POINTER(c_ubyte)
Rowio_get.errcheck = lambda v,*a : cast(v, c_void_p)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/rowio.h: 7
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Rowio_flush'):
continue
Rowio_flush = _lib.Rowio_flush
Rowio_flush.argtypes = [POINTER(ROWIO)]
Rowio_flush.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/rowio.h: 8
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Rowio_put'):
continue
Rowio_put = _lib.Rowio_put
Rowio_put.argtypes = [POINTER(ROWIO), POINTER(None), c_int]
Rowio_put.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/rowio.h: 9
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Rowio_release'):
continue
Rowio_release = _lib.Rowio_release
Rowio_release.argtypes = [POINTER(ROWIO)]
Rowio_release.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/rowio.h: 10
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Rowio_setup'):
continue
Rowio_setup = _lib.Rowio_setup
Rowio_setup.argtypes = [POINTER(ROWIO), c_int, c_int, c_int, CFUNCTYPE(UNCHECKED(c_int), c_int, POINTER(None), c_int, c_int), CFUNCTYPE(UNCHECKED(c_int), c_int, POINTER(None), c_int, c_int)]
Rowio_setup.restype = c_int
break
ROWIO_RCB = struct_ROWIO_RCB # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/rowio.h: 14
# No inserted files
|
"""Run inference a DeepLab v3 model using tf.estimator API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tensorflow as tf
import numpy as np
import deeplab_model
from utils import preprocessing
from utils import dataset_util
from PIL import Image
import matplotlib.pyplot as plt
from tensorflow.python import debug as tf_debug
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='dataset/VOCdevkit/VOC2012/JPEGImages',
help='The directory containing the image data.')
parser.add_argument('--output_dir', type=str, default='./dataset/inference_output',
help='Path to the directory to generate the inference results')
parser.add_argument('--infer_data_list', type=str, default='./dataset/sample_images_list.txt',
help='Path to the file listing the inferring images.')
parser.add_argument('--model_dir', type=str, default='./model',
help="Base directory for the model. "
"Make sure 'model_checkpoint_path' given in 'checkpoint' file matches "
"with checkpoint name.")
parser.add_argument('--base_architecture', type=str, default='resnet_v2_101',
choices=['resnet_v2_50', 'resnet_v2_101'],
help='The architecture of base Resnet building block.')
parser.add_argument('--output_stride', type=int, default=16,
choices=[8, 16],
help='Output stride for DeepLab v3. Currently 8 or 16 is supported.')
parser.add_argument('--debug', action='store_true',
help='Whether to use debugger to track down bad values during training.')
parser.add_argument('--bands', nargs = 3, default = ['R','G','B'],
help='Which set of 3 bands to use?')
parser.add_argument('--patch_dims', type = int, default = 256, help = 'size of output predicted patch')
parser.add_argument('--buffer_size', type = int, default = 128, help = 'size of patch buffer for predictions')
_NUM_CLASSES = 2
def make_example(pred_dict):
#buffer_shape = [FLAGS.buffer_size, FLAGS.buffer_size]
#x_buffer = int(buffer_shape[0] / 2)
#y_buffer = int(buffer_shape[1] / 2)
class_id = np.squeeze(pred_dict['classes'][:, 128:128+512, 128:128+512, :]).flatten()
probability = np.squeeze(pred_dict['probabilities'][:, 128:128+512, 128:128+512, 1]).flatten()
#class_id = np.squeeze(pred_dict['classes'][:, x_buffer:x_buffer+FLAGS.patch_dims, y_buffer:y_buffer+FLAGS.patch.dims, :]).flatten()
#probability = np.squeeze(pred_dict['probabilities'][:, x_buffer:x_buffer+FLAGS.patch_dims, y_buffer:y_buffer+FLAGS.patch_dims, 1]).flatten()
return tf.train.Example(
features=tf.train.Features(
feature={
'class_id': tf.train.Feature(
float_list=tf.train.FloatList(
value=class_id)),
'probability': tf.train.Feature(
float_list=tf.train.FloatList(
value=probability))
}
)
)
def main(unused_argv):
# Using the Winograd non-fused algorithms provides a small performance boost.
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
pred_hooks = None
if FLAGS.debug:
debug_hook = tf_debug.LocalCLIDebugHook()
pred_hooks = [debug_hook]
model = tf.estimator.Estimator(
model_fn=deeplab_model.deeplabv3_model_fn,
model_dir=FLAGS.model_dir,
params={
'output_stride': FLAGS.output_stride,
'batch_size': 1, # Batch size must be 1 because the images' size may differ
'base_architecture': FLAGS.base_architecture,
'pre_trained_model': None,
'batch_norm_decay': None,
'num_classes': _NUM_CLASSES,
})
#examples = dataset_util.read_examples_list(FLAGS.infer_data_list)
#image_files = [os.path.join(FLAGS.data_dir, filename) for filename in examples]
image_files = tf.gfile.Glob('{}/*tfrecord.gz'.format(FLAGS.data_dir))
print(image_files)
predictions = model.predict(
input_fn=lambda: preprocessing.eval_input_fn(image_files, bands = FLAGS.bands, batch_size = 1, side = FLAGS.patch_dims+FLAGS.buffer_size),
hooks=pred_hooks,
yield_single_examples = False)
output_dir = FLAGS.output_dir
MAX_RECORDS_PER_FILE = 50
output_path = output_dir + '-{:05}.tfrecord'
# Create the records we'll ingest into EE
file_number = 0
still_writing = True
total_patches = 0
while still_writing:
file_path = output_path.format(file_number)
writer = tf.python_io.TFRecordWriter(file_path)
print("Writing file: {}".format(file_path))
try:
written_records = 0
while True:
pred_dict = next(predictions)
writer.write(make_example(pred_dict).SerializeToString())
written_records += 1
total_patches += 1
if written_records % 5 == 0:
print(" Writing patch: {}".format(written_records))
if written_records == MAX_RECORDS_PER_FILE:
break
except:
still_writing=False
finally:
file_number += 1
writer.close()
print('Wrote: {} patches.'.format(total_patches))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for pred_dict, image_path in zip(predictions, image_files):
image_basename = os.path.splitext(os.path.basename(image_path))[0]
output_filename = image_basename + '_pred.npy'
output_filename = image_basename + '_mask.png'
path_to_output = os.path.join(output_dir, output_filename)
print("generating:", path_to_output)
#mask = pred_dict['decoded_labels']
classes = pred_dict['classes']
probs = pred_dict['probabilities']
print(probs.shape, classes.shape)
out = np.concatenate([probs, classes], axis = -1)
np.save(path_to_output, out)
#mask = Image.fromarray(mask)
plt.axis('off')
plt.imshow(probs[:, :, 0], cmap='hot', interpolation='nearest', vmin = 0.9, vmax = 1)
plt.show()
#plt.imshow(mask)
#plt.savefig(path_to_output, bbox_inches='tight')
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
from django.contrib import admin
# Register your models here.
from e_secretary.models import Course, Didaskalia, Orologio, Announcement, Drastiriotita, Professor, Student, Dilosi, Certificate, Thesis, SimmetoxiDrastiriotita, Event, Secr_Announcement, Profile
@admin.register(Course)
class CourseAdmin(admin.ModelAdmin):
list_display = ('name', 'tomeas', 'ects', 'ipoxrewtiko')
admin.site.register(Event)
admin.site.register(Didaskalia)
admin.site.register(Orologio)
admin.site.register(Announcement)
admin.site.register(Drastiriotita)
admin.site.register(Professor)
admin.site.register(Student)
admin.site.register(Dilosi)
admin.site.register(Profile)
admin.site.register(Certificate)
admin.site.register(Thesis)
admin.site.register(SimmetoxiDrastiriotita)
admin.site.register(Secr_Announcement)
|
import torch
import math
from torch.autograd import Variable
import torch.nn.functional as F
import table
import table.IO
import table.ModelConstructor
import table.Models
import table.modules
from table.Utils import add_pad, argmax
from table.ParseResult import ParseResult
from table.Models import encode_unsorted_batch
from tree import SKIP_OP_LIST
from bpe import recover_bpe
def v_eval(a):
return Variable(a, volatile=True)
def cpu_vector(v):
return v.clone().view(-1).cpu()
def recover_layout_token(pred_list, vocab, max_sent_length):
r_list = []
for i in range(max_sent_length):
r_list.append(vocab.itos[pred_list[i]])
if r_list[-1] == table.IO.EOS_WORD:
r_list = r_list[:-1]
break
return r_list
def mix_lay_and_tgt(lay_skip, tgt):
if len(lay_skip) == len(tgt):
tgt_mix = []
for tk_lay, tk_tgt in zip(lay_skip, tgt):
if tk_lay in (table.IO.TOK_WORD, table.IO.SKP_WORD):
tgt_mix.append(tk_tgt)
else:
tgt_mix.append(tk_lay)
return tgt_mix
else:
return tgt
def recover_target_token(lay_skip, pred_list, vocab, max_sent_length):
r_list = []
for i in range(min(len(lay_skip), len(pred_list))):
if lay_skip[i] in (table.IO.TOK_WORD, table.IO.SKP_WORD):
r_list.append(vocab.itos[pred_list[i]])
elif lay_skip[i] in (table.IO.RIG_WORD,):
r_list.append(')')
else:
r_list.append(lay_skip[i])
return r_list
def get_decode_batch_length(dec, batch_size, max_sent_length):
r_list = []
for b in range(batch_size):
find_len = None
for i in range(max_sent_length):
if dec[i, b] == table.IO.EOS:
find_len = i
break
if find_len is None:
r_list.append(max_sent_length)
else:
r_list.append(find_len)
assert(len(r_list) == batch_size)
return torch.LongTensor(r_list)
# ['(airline:e@1', '(argmin', '(and', 'flight@1', 'from@2', 'to@2', 'day_number@2', 'month@2', ')', 'fare@1', ')', ')']
def expand_layout_with_skip(lay_list):
lay_skip_list, tgt_mask_list, lay_index_list = [], [], []
for lay in lay_list:
lay_skip = []
for tk_lay in lay:
if len(tk_lay) >= 2 and tk_lay[-2] == '@':
op = tk_lay[:-2]
if tk_lay.startswith('('):
lay_skip.append(op)
else:
# need to expand
k = int(tk_lay[-1])
# ')' can be generated according to layout rather than predicting
lay_skip.extend(
['(' + op] + [table.IO.SKP_WORD for __ in range(k)] + [table.IO.RIG_WORD])
else:
lay_skip.append(tk_lay)
if tk_lay[1:] in SKIP_OP_LIST:
lay_skip.append(table.IO.SKP_WORD)
lay_skip_list.append(lay_skip)
# tgt_mask
tgt_mask_list.append(table.IO.get_tgt_mask(lay_skip))
# lay_index
lay_index_list.append(table.IO.get_lay_index(lay_skip))
tgt_mask_seq = add_pad(tgt_mask_list, 1).float().t()
lay_index_seq = add_pad(lay_index_list, 0).t()
return lay_skip_list, tgt_mask_seq, lay_index_seq
class Translator(object):
def __init__(self, opt, dummy_opt={}):
# Add in default model arguments, possibly added since training.
self.opt = opt
checkpoint = torch.load(opt.model,
map_location=lambda storage, loc: storage)
self.fields = table.IO.TableDataset.load_fields(checkpoint['vocab'])
model_opt = checkpoint['opt']
model_opt.pre_word_vecs = opt.pre_word_vecs
for arg in dummy_opt:
if arg not in model_opt:
model_opt.__dict__[arg] = dummy_opt[arg]
self.model = table.ModelConstructor.make_base_model(
model_opt, self.fields, checkpoint)
self.model.eval()
if model_opt.moving_avg > 0:
for p, avg_p in zip(self.model.parameters(), checkpoint['moving_avg']):
p.data.copy_(avg_p)
if opt.attn_ignore_small > 0:
self.model.lay_decoder.attn.ignore_small = opt.attn_ignore_small
self.model.tgt_decoder.attn.ignore_small = opt.attn_ignore_small
def _init_parent_list(self, decoder, q_enc, batch_size):
# (num_layers * num_directions, batch, hidden_size)
q_ht, q_ct = q_enc
q_ht = q_ht[-1] if not self.model.opt.brnn else q_ht[-2:].transpose(
0, 1).contiguous().view(batch_size, -1)
if self.model.opt.parent_feed == 'output':
parent_list = [[q_ht[b].unsqueeze(0)] for b in range(batch_size)]
elif self.model.opt.parent_feed == 'input':
decoder.init_parent_all(q_ht.unsqueeze(0))
parent_list = [[0] for b in range(batch_size)]
else:
parent_list = None
return parent_list
def _cat_parent_feed_input(self, parent_list, batch_size):
parent_index = v_eval(torch.LongTensor(
[parent_list[b][-1] for b in range(batch_size)]).unsqueeze_(0).cuda())
return parent_index
def _cat_parent_feed_output(self, dec_all, parent_list, batch_size):
parent_feed = torch.stack(
[parent_list[b][-1] for b in range(batch_size)], 1)
# -> (dec_seq_len, batch_size, 2 * rnn_size)
dec_all = torch.cat([dec_all, parent_feed], 2)
return dec_all
def _update_parent_list(self, i, parent_list, dec_rnn_output, inp_cpu, lay_skip_list, vocab, batch_size):
# append to parent_list
for b in range(batch_size):
tk = vocab.itos[inp_cpu[b]]
if (lay_skip_list is not None) and (i < len(lay_skip_list[b])):
lay_skip = lay_skip_list[b]
if lay_skip[i] in (table.IO.TOK_WORD, table.IO.SKP_WORD):
pass
elif lay_skip[i] in (table.IO.RIG_WORD,):
tk = ')'
else:
tk = lay_skip[i]
if tk.startswith('('):
if self.model.opt.parent_feed == 'output':
parent_list[b].append(dec_rnn_output[:, b, :])
elif self.model.opt.parent_feed == 'input':
parent_list[b].append(i + 1)
elif tk == ')':
if len(parent_list[b]) > 1:
parent_list[b].pop()
def run_lay_decoder(self, decoder, classifier, q, q_all, q_enc, max_dec_len, vocab_mask, vocab):
batch_size = q.size(1)
decoder.attn.applyMaskBySeqBatch(q)
dec_list = []
dec_state = decoder.init_decoder_state(q_all, q_enc)
inp = torch.LongTensor(1, batch_size).fill_(table.IO.BOS).cuda()
if self.model.opt.parent_feed in ('input', 'output'):
parent_list = self._init_parent_list(decoder, q_enc, batch_size)
for i in range(max_dec_len):
inp = v_eval(inp)
if self.model.opt.parent_feed == 'input':
parent_index = self._cat_parent_feed_input(
parent_list, batch_size)
else:
parent_index = None
dec_all, dec_state, _, dec_rnn_output = decoder(
inp, q_all, dec_state, parent_index)
if self.model.opt.parent_feed == 'output':
dec_all = self._cat_parent_feed_output(
dec_all, parent_list, batch_size)
dec_all = dec_all.view(batch_size, -1)
dec_out = classifier(dec_all)
dec_out = dec_out.data.view(1, batch_size, -1)
if vocab_mask is not None:
dec_out_part = dec_out[:, :, len(table.IO.special_token_list):]
dec_out_part.masked_fill_(vocab_mask, -float('inf'))
# dec_out_part.masked_scatter_(vocab_mask, dec_out_part[vocab_mask].add(-math.log(1000)))
inp = argmax(dec_out)
# topk = [vocab.itos[idx] for idx in dec_out[0, 0, :].topk(10, dim=0)[1]]
# print(topk)
inp_cpu = cpu_vector(inp)
dec_list.append(inp_cpu)
if self.model.opt.parent_feed in ('input', 'output'):
self._update_parent_list(
i, parent_list, dec_rnn_output, inp_cpu, None, vocab, batch_size)
return torch.stack(dec_list, 0)
def run_tgt_decoder(self, embeddings, tgt_mask_seq, lay_index_seq, lay_all, decoder, classifier, q, q_all, q_enc, max_dec_len, lay_skip_list, vocab):
batch_size = q.size(1)
decoder.attn.applyMaskBySeqBatch(q)
dec_list = []
dec_state = decoder.init_decoder_state(q_all, q_enc)
inp = torch.LongTensor(1, batch_size).fill_(table.IO.BOS).cuda()
batch_index = torch.LongTensor(range(batch_size)).unsqueeze_(0).cuda()
if self.model.opt.parent_feed in ('input', 'output'):
parent_list = self._init_parent_list(decoder, q_enc, batch_size)
for i in range(min(max_dec_len, lay_index_seq.size(0))):
# (1, batch)
lay_index = lay_index_seq[i].unsqueeze(0)
lay_select = lay_all[lay_index, batch_index, :]
tgt_inp_emb = embeddings(v_eval(inp))
tgt_mask_expand = v_eval(tgt_mask_seq[i].unsqueeze(
0).unsqueeze(2).expand_as(tgt_inp_emb))
inp = tgt_inp_emb.mul(tgt_mask_expand) + \
lay_select.mul(1 - tgt_mask_expand)
if self.model.opt.parent_feed == 'input':
parent_index = self._cat_parent_feed_input(
parent_list, batch_size)
else:
parent_index = None
dec_all, dec_state, _, dec_rnn_output = decoder(
inp, q_all, dec_state, parent_index)
if self.model.opt.parent_feed == 'output':
dec_all = self._cat_parent_feed_output(
dec_all, parent_list, batch_size)
dec_all = dec_all.view(batch_size, -1)
dec_out = classifier(dec_all)
dec_out = dec_out.view(1, batch_size, -1)
inp = argmax(dec_out.data)
# RIG_WORD -> ')'
rig_mask = []
for b in range(batch_size):
tk = lay_skip_list[b][i] if i < len(lay_skip_list[b]) else None
rig_mask.append(1 if tk in (table.IO.RIG_WORD,) else 0)
inp.masked_fill_(torch.ByteTensor(
rig_mask).unsqueeze_(0).cuda(), vocab.stoi[')'])
inp_cpu = cpu_vector(inp)
dec_list.append(inp_cpu)
if self.model.opt.parent_feed in ('input', 'output'):
self._update_parent_list(
i, parent_list, dec_rnn_output, inp_cpu, lay_skip_list, vocab, batch_size)
return torch.stack(dec_list, 0)
def translate(self, batch):
q, q_len = batch.src
batch_size = q.size(1)
# encoding
q_enc, q_all = self.model.q_encoder(q, lengths=q_len, ent=None)
if self.model.opt.seprate_encoder:
q_tgt_enc, q_tgt_all = self.model.q_tgt_encoder(
q, lengths=q_len, ent=None)
else:
q_tgt_enc, q_tgt_all = q_enc, q_all
if self.model.opt.layout_token_prune:
layout_token_prune_list = []
q_token_enc, __ = self.model.q_token_encoder(
q, lengths=q_len, ent=None)
# (num_layers * num_directions, batch, hidden_size)
q_token_ht, __ = q_token_enc
batch_size = q_token_ht.size(1)
q_token_ht = q_token_ht[-1] if not self.model.opt.brnn else q_token_ht[-2:].transpose(
0, 1).contiguous().view(batch_size, -1)
# without .t()
token_out = F.sigmoid(self.model.token_pruner(q_token_ht))
# decide prune which tokens
vocab_mask = token_out.data.lt(0).view(1, batch_size, -1)
for tk_idx in range(len(table.IO.special_token_list), len(self.fields['lay'].vocab)):
w = self.fields['lay'].vocab.itos[tk_idx]
if w.startswith('(') or w in (')', table.IO.TOK_WORD):
idx = tk_idx - len(table.IO.special_token_list)
vocab_mask[:, :, idx] = 0
# log pruned tokens for evaluation
for b in range(batch_size):
masked_v_list = []
for i in range(vocab_mask.size(2)):
if vocab_mask[0, b, i] == 1:
masked_v_list.append(
self.fields['lay'].vocab.itos[i + len(table.IO.special_token_list)])
layout_token_prune_list.append(masked_v_list)
else:
token_out = None
vocab_mask = None
layout_token_prune_list = [None for b in range(batch_size)]
# layout decoding
lay_dec = self.run_lay_decoder(
self.model.lay_decoder, self.model.lay_classifier, q, q_all, q_enc, self.opt.max_lay_len, vocab_mask, self.fields['lay'].vocab)
if self.opt.gold_layout:
if self.model.opt.bpe:
lay_dec = batch.lay_bpe[0].data[1:]
else:
lay_dec = batch.lay[0].data[1:]
# recover layout
lay_list = []
for b in range(batch_size):
if self.model.opt.bpe:
lay_field = 'lay_bpe'
else:
lay_field = 'lay'
lay = recover_layout_token([lay_dec[i, b] for i in range(
lay_dec.size(0))], self.fields[lay_field].vocab, lay_dec.size(0))
if self.model.opt.bpe:
lay = recover_bpe(lay)
lay_list.append(lay)
# layout encoding
# lay_len = get_decode_batch_length(lay_dec, batch_size, self.opt.max_lay_len)
lay_len = torch.LongTensor([len(lay_list[b]) for b in range(batch_size)])
# data used for layout encoding
lay_dec = torch.LongTensor(
lay_len.max(), batch_size).fill_(table.IO.PAD)
for b in range(batch_size):
for i in range(lay_len[b]):
lay_dec[i, b] = self.fields['lay'].vocab.stoi[lay_list[b][i]]
lay_dec = v_eval(lay_dec.cuda())
# (lay_len, batch, lay_size)
if self.model.opt.no_lay_encoder:
lay_all = self.model.lay_encoder(lay_dec)
else:
lay_enc_len = lay_len.cuda().clamp(min=1)
lay_all = encode_unsorted_batch(
self.model.lay_encoder, lay_dec, lay_enc_len)
# co-attention
if self.model.lay_co_attention is not None:
lay_all = self.model.lay_co_attention(
lay_all, lay_enc_len, q_all, q)
# get lay_index and tgt_mask: (tgt_len, batch)
lay_skip_list, tgt_mask_seq, lay_index_seq = expand_layout_with_skip(
lay_list)
# co-attention
if self.model.q_co_attention is not None:
q_tgt_enc, q_tgt_all = self.model.q_co_attention(
q_tgt_all, q_len, lay_all, lay_dec)
# target decoding
tgt_dec = self.run_tgt_decoder(self.model.tgt_embeddings, tgt_mask_seq, lay_index_seq, lay_all, self.model.tgt_decoder,
self.model.tgt_classifier, q, q_tgt_all, q_tgt_enc, self.opt.max_tgt_len, lay_skip_list, self.fields['tgt'].vocab)
# recover target
tgt_list = []
for b in range(batch_size):
tgt = recover_target_token(lay_skip_list[b], [tgt_dec[i, b] for i in range(
tgt_dec.size(0))], self.fields['tgt'].vocab, tgt_dec.size(0))
tgt_list.append(tgt)
# (3) recover output
indices = cpu_vector(batch.indices.data)
return [ParseResult(idx, lay, tgt, token_prune)
for idx, lay, tgt, token_prune in zip(indices, lay_list, tgt_list, layout_token_prune_list)]
|
pupils = int(input('Введите число школьников: '))
apples = int(input('Введите число яблок: '))
celoe, ostatok = divmod(apples, pupils)
print('Каждому: ', celoe)
print('В корзине: ', ostatok)
|
import discord_logging
log = discord_logging.get_logger(init=True)
import comments
import utils
import static
from praw_wrapper import reddit_test
from praw_wrapper.reddit_test import RedditObject
from classes.submission import Submission
from classes.subscription import Subscription
from classes.subreddit import Subreddit
from classes.comment import DbComment
from classes.user import User
def test_process_comment_update(database, reddit):
subscriber_name = "Subscriber1"
author = database.get_or_add_user("Author1")
db_subreddit = database.get_or_add_subreddit("TestSub", enable_subreddit_if_new=True)
comment_id = reddit_test.random_id()
submission_id = reddit_test.random_id()
comment = RedditObject(
body=f"{static.TRIGGER_UPDATE}!",
author=subscriber_name,
id=comment_id,
link_id="t3_"+submission_id,
permalink=f"/r/test/comments/{submission_id}/_/{comment_id}/",
subreddit=db_subreddit.name
)
db_submission = Submission(
submission_id=submission_id,
time_created=utils.datetime_now(),
author=author,
subreddit=db_subreddit,
permalink=f"/r/{db_subreddit.name}/comments/{submission_id}/"
)
database.add_submission(db_submission)
database.commit()
reddit.add_comment(comment)
comments.process_comment(comment.get_pushshift_dict(), reddit, database)
result = comment.get_first_child().body
assert "I will message you next time" in result
assert author.name in result
assert db_subreddit.name in result
assert "Click this link" in result
subscriptions = database.get_user_subscriptions_by_name(subscriber_name)
assert len(subscriptions) == 1
assert subscriptions[0].subscriber.name == subscriber_name
assert subscriptions[0].author.name == author.name
assert subscriptions[0].subreddit.name == db_subreddit.name
assert subscriptions[0].recurring is False
def test_process_comment_subscribe(database, reddit):
subscriber_name = "Subscriber1"
author = database.get_or_add_user("Author1")
db_subreddit = database.get_or_add_subreddit("TestSub", enable_subreddit_if_new=True)
comment_id = reddit_test.random_id()
submission_id = reddit_test.random_id()
comment = RedditObject(
body=f"{static.TRIGGER_SUBSCRIBE}!",
author=subscriber_name,
id=comment_id,
link_id="t3_"+submission_id,
permalink=f"/r/test/comments/{submission_id}/_/{comment_id}/",
subreddit=db_subreddit.name
)
db_submission = Submission(
submission_id=submission_id,
time_created=utils.datetime_now(),
author=author,
subreddit=db_subreddit,
permalink=f"/r/{db_subreddit.name}/comments/{submission_id}/"
)
database.add_submission(db_submission)
database.commit()
reddit.add_comment(comment)
comments.process_comment(comment.get_pushshift_dict(), reddit, database)
result = comment.get_first_child().body
assert "I will message you each time" in result
assert author.name in result
assert db_subreddit.name in result
assert "Click this link" in result
subscriptions = database.get_user_subscriptions_by_name(subscriber_name)
assert len(subscriptions) == 1
assert subscriptions[0].subscriber.name == subscriber_name
assert subscriptions[0].author.name == author.name
assert subscriptions[0].subreddit.name == db_subreddit.name
assert subscriptions[0].recurring is True
def test_process_comment_subscribe_tag(database, reddit):
subscriber_name = "Subscriber1"
author = database.get_or_add_user("Author1")
db_subreddit = database.get_or_add_subreddit("TestSub", enable_subreddit_if_new=True)
comment_id = reddit_test.random_id()
submission_id = reddit_test.random_id()
comment = RedditObject(
body=f"{static.TRIGGER_SUBSCRIBE}!",
author=subscriber_name,
id=comment_id,
link_id="t3_"+submission_id,
permalink=f"/r/test/comments/{submission_id}/_/{comment_id}/",
subreddit=db_subreddit.name
)
db_submission = Submission(
submission_id=submission_id,
time_created=utils.datetime_now(),
author=author,
subreddit=db_subreddit,
permalink=f"/r/{db_subreddit.name}/comments/{submission_id}/",
tag="Story1"
)
database.add_submission(db_submission)
database.commit()
reddit.add_comment(comment)
comments.process_comment(comment.get_pushshift_dict(), reddit, database)
result = comment.get_first_child().body
assert "I will message you each time" in result
assert "a story tagged <Story1>" in result
assert author.name in result
assert db_subreddit.name in result
assert "Click this link" in result
subscriptions = database.get_user_subscriptions_by_name(subscriber_name)
assert len(subscriptions) == 1
assert subscriptions[0].subscriber.name == subscriber_name
assert subscriptions[0].author.name == author.name
assert subscriptions[0].subreddit.name == db_subreddit.name
assert subscriptions[0].recurring is True
assert subscriptions[0].tag == "Story1"
def test_process_comment_subscribe_all(database, reddit):
subscriber_name = "Subscriber1"
author = database.get_or_add_user("Author1")
db_subreddit = database.get_or_add_subreddit("TestSub", enable_subreddit_if_new=True)
comment_id = reddit_test.random_id()
submission_id = reddit_test.random_id()
comment = RedditObject(
body=f"{static.TRIGGER_SUBSCRIBE_ALL}!",
author=subscriber_name,
id=comment_id,
link_id="t3_"+submission_id,
permalink=f"/r/test/comments/{submission_id}/_/{comment_id}/",
subreddit=db_subreddit.name
)
db_submission = Submission(
submission_id=submission_id,
time_created=utils.datetime_now(),
author=author,
subreddit=db_subreddit,
permalink=f"/r/{db_subreddit.name}/comments/{submission_id}/",
tag="Story1"
)
database.add_submission(db_submission)
database.commit()
reddit.add_comment(comment)
comments.process_comment(comment.get_pushshift_dict(), reddit, database)
result = comment.get_first_child().body
assert "I will message you each time" in result
assert "a story tagged <Story1>" not in result
assert author.name in result
assert db_subreddit.name in result
assert "Click this link" in result
subscriptions = database.get_user_subscriptions_by_name(subscriber_name)
assert len(subscriptions) == 1
assert subscriptions[0].subscriber.name == subscriber_name
assert subscriptions[0].author.name == author.name
assert subscriptions[0].subreddit.name == db_subreddit.name
assert subscriptions[0].recurring is True
assert subscriptions[0].tag is None
def test_process_comment_subreddit_not_enabled(database, reddit):
subscriber_name = "Subscriber1"
author = database.get_or_add_user("Author1")
subreddit_name = "TestSub"
comment_id = reddit_test.random_id()
submission_id = reddit_test.random_id()
comment = RedditObject(
body=f"{static.TRIGGER_SUBSCRIBE}!",
author=subscriber_name,
id=comment_id,
link_id="t3_"+submission_id,
permalink=f"/r/test/comments/{submission_id}/_/{comment_id}/",
subreddit=subreddit_name
)
reddit.add_comment(comment)
reddit.add_submission(RedditObject(id=submission_id, subreddit=subreddit_name, author=author.name))
comments.process_comment(comment.get_pushshift_dict(), reddit, database)
assert len(comment.children) == 0
subscriptions = database.get_user_subscriptions_by_name(subscriber_name, only_enabled=False)
assert len(subscriptions) == 1
assert subscriptions[0].subscriber.name == subscriber_name
assert subscriptions[0].author.name == author.name
assert subscriptions[0].subreddit.name == subreddit_name
assert subscriptions[0].recurring is True
assert len(reddit.sent_messages) == 1
assert "is not being tracked by the bot" in reddit.sent_messages[0].body
assert subreddit_name in reddit.sent_messages[0].body
def test_process_comment_thread_replied(database, reddit):
subscriber_name = "Subscriber1"
author = database.get_or_add_user("Author1")
db_subreddit = database.get_or_add_subreddit("TestSub", enable_subreddit_if_new=True)
comment_id = reddit_test.random_id()
submission_id = reddit_test.random_id()
comment = RedditObject(
body=f"{static.TRIGGER_UPDATE}!",
author=subscriber_name,
id=comment_id,
link_id="t3_"+submission_id,
permalink=f"/r/test/comments/{submission_id}/_/{comment_id}/",
subreddit=db_subreddit.name
)
reddit.add_comment(comment)
db_submission = Submission(
submission_id=submission_id,
time_created=utils.datetime_now(),
author=author,
subreddit=db_subreddit,
permalink=f"/r/{db_subreddit.name}/comments/{submission_id}/"
)
database.add_submission(db_submission)
previous_comment = DbComment(
comment_id=reddit_test.random_id(),
submission=db_submission,
subscriber=database.get_or_add_user("Subscriber2"),
author=author,
subreddit=db_subreddit,
recurring=False
)
database.add_comment(previous_comment)
database.commit()
comments.process_comment(comment.get_pushshift_dict(), reddit, database)
assert len(comment.children) == 0
subscriptions = database.get_user_subscriptions_by_name(subscriber_name)
assert len(subscriptions) == 1
assert subscriptions[0].subscriber.name == subscriber_name
assert subscriptions[0].author.name == author.name
assert subscriptions[0].subreddit.name == db_subreddit.name
assert subscriptions[0].recurring is False
assert len(reddit.sent_messages) == 1
assert "I will message you next" in reddit.sent_messages[0].body
assert db_subreddit.name in reddit.sent_messages[0].body
assert reddit.sent_messages[0].dest.name == subscriber_name
def test_process_comment_already_subscribed(database, reddit):
subscriber_name = "Subscriber1"
author = database.get_or_add_user("Author1")
db_subreddit = database.get_or_add_subreddit("TestSub", enable_subreddit_if_new=True)
comment_id = reddit_test.random_id()
submission_id = reddit_test.random_id()
comment = RedditObject(
body=f"{static.TRIGGER_UPDATE}!",
author=subscriber_name,
id=comment_id,
link_id="t3_"+submission_id,
permalink=f"/r/test/comments/{submission_id}/_/{comment_id}/",
subreddit=db_subreddit.name
)
reddit.add_comment(comment)
reddit.add_submission(RedditObject(id=submission_id, subreddit=db_subreddit.name, author=author.name))
database.add_subscription(
Subscription(
subscriber=database.get_or_add_user(subscriber_name),
author=author,
subreddit=db_subreddit,
recurring=False
)
)
comments.process_comment(comment.get_pushshift_dict(), reddit, database)
assert len(comment.children) == 0
subscriptions = database.get_user_subscriptions_by_name(subscriber_name)
assert len(subscriptions) == 1
assert len(reddit.sent_messages) == 1
assert "You had already asked me to message you" in reddit.sent_messages[0].body
assert db_subreddit.name in reddit.sent_messages[0].body
def test_process_comment_update_subscription(database, reddit):
subscriber_name = "Subscriber1"
author = database.get_or_add_user("Author1")
db_subreddit = database.get_or_add_subreddit("TestSub", enable_subreddit_if_new=True)
comment_id = reddit_test.random_id()
submission_id = reddit_test.random_id()
comment = RedditObject(
body=f"{static.TRIGGER_UPDATE}!",
author=subscriber_name,
id=comment_id,
link_id="t3_"+submission_id,
permalink=f"/r/test/comments/{submission_id}/_/{comment_id}/",
subreddit=db_subreddit.name
)
reddit.add_comment(comment)
reddit.add_submission(RedditObject(id=submission_id, subreddit=db_subreddit.name, author=author.name))
database.add_subscription(
Subscription(
subscriber=database.get_or_add_user(subscriber_name),
author=author,
subreddit=db_subreddit,
recurring=True
)
)
comments.process_comment(comment.get_pushshift_dict(), reddit, database)
assert len(comment.children) == 0
subscriptions = database.get_user_subscriptions_by_name(subscriber_name)
assert len(subscriptions) == 1
assert len(reddit.sent_messages) == 1
assert "I have updated your subscription type" in reddit.sent_messages[0].body
assert "next" in reddit.sent_messages[0].body
assert db_subreddit.name in reddit.sent_messages[0].body
def bulk_sub_to(database, subreddit_name, author_name, subscriber_names):
subreddit = database.get_or_add_subreddit(subreddit_name)
author = database.get_or_add_user(author_name)
for subscriber_name in subscriber_names:
user = database.get_or_add_user(subscriber_name)
database.add_subscription(
Subscription(
subscriber=user,
author=author,
subreddit=subreddit,
recurring=True
)
)
database.commit()
def test_update_incorrect_comments(database, reddit):
bulk_sub_to(database, "Subreddit1", "Author1", ["User1", "User2", "User3"])
bulk_sub_to(database, "Subreddit1", "Author2", ["User2", "User3"])
bulk_sub_to(database, "Subreddit2", "Author3", ["User3"])
submission1 = Submission(
submission_id=reddit_test.random_id(), time_created=utils.datetime_now(), author=database.get_or_add_user("Author1"),
subreddit=database.get_or_add_subreddit("Subreddit1"), permalink="")
database.add_submission(submission1)
submission2 = Submission(
submission_id=reddit_test.random_id(), time_created=utils.datetime_now(), author=database.get_or_add_user("Author2"),
subreddit=database.get_or_add_subreddit("Subreddit1"), permalink="")
database.add_submission(submission2)
submission3 = Submission(
submission_id=reddit_test.random_id(), time_created=utils.datetime_now(), author=database.get_or_add_user("Author3"),
subreddit=database.get_or_add_subreddit("Subreddit2"), permalink="")
database.add_submission(submission3)
comment1 = DbComment(
comment_id=reddit_test.random_id(), submission=submission1, subscriber=database.get_or_add_user("User1"),
author=database.get_or_add_user("Author1"), subreddit=database.get_or_add_subreddit("Subreddit1"),
recurring=True, current_count=1)
database.add_comment(comment1)
comment2 = DbComment(
comment_id=reddit_test.random_id(), submission=submission2, subscriber=database.get_or_add_user("User2"),
author=database.get_or_add_user("Author2"), subreddit=database.get_or_add_subreddit("Subreddit1"),
recurring=True, current_count=1)
database.add_comment(comment2)
comment3 = DbComment(
comment_id=reddit_test.random_id(), submission=submission3, subscriber=database.get_or_add_user("User3"),
author=database.get_or_add_user("Author3"), subreddit=database.get_or_add_subreddit("Subreddit2"),
recurring=True, current_count=1)
database.add_comment(comment3)
reddit_comment1 = RedditObject(
body="blank",
author=static.ACCOUNT_NAME,
id=comment1.comment_id,
link_id="t3_"+submission1.submission_id,
permalink=f"/r/test/comments/{submission1.submission_id}/_/{comment1.comment_id}/",
subreddit="Subreddit1"
)
reddit.add_comment(reddit_comment1)
reddit_comment2 = RedditObject(
body="blank",
author=static.ACCOUNT_NAME,
id=comment2.comment_id,
link_id="t3_"+submission2.submission_id,
permalink=f"/r/test/comments/{submission2.submission_id}/_/{comment2.comment_id}/",
subreddit="Subreddit1"
)
reddit.add_comment(reddit_comment2)
reddit_comment3 = RedditObject(
body="blank",
author=static.ACCOUNT_NAME,
id=comment3.comment_id,
link_id="t3_"+submission3.submission_id,
permalink=f"/r/test/comments/{submission3.submission_id}/_/{comment3.comment_id}/",
subreddit="Subreddit2"
)
reddit.add_comment(reddit_comment3)
comments.update_comments(reddit, database)
assert "3 others" in reddit_comment1.body
assert "2 others" in reddit_comment2.body
assert reddit_comment3.body == "blank"
|
# Copyright 2019 The TensorTrade Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import pandas as pd
import numpy as np
from typing import Callable
from tensortrade.rewards import RewardStrategy
from tensortrade.trades import TradeType, Trade
class RiskAdjustedReturnStrategy(RewardStrategy):
"""A reward strategy that rewards the agent for increasing its net worth, while penalizing more volatile strategies.
"""
def __init__(self, return_algorithm: str = 'sharpe', risk_free_rate: float = 0., target_returns: float = 0.):
"""
Args:
return_algorithm (optional): The risk-adjusted return metric to use. Options are 'sharpe' and 'sortino'. Defaults to 'sharpe'.
risk_free_rate (optional): The risk free rate of returns to use for calculating metrics. Defaults to 0.
target_returns (optional): The target returns per period for use in calculating the sortino ratio. Default to 0.
"""
self._return_algorithm = self._return_algorithm_from_str(return_algorithm)
self._risk_free_rate = risk_free_rate
self._target_returns = target_returns
def _return_algorithm_from_str(self, algorithm_str: str) -> Callable[[pd.DataFrame], float]:
if algorithm_str is 'sharpe':
return self._sharpe_ratio
elif algorithm_str is 'sortino':
return self._sortino_ratio
def _sharpe_ratio(self, returns: pd.Series) -> float:
"""Return the sharpe ratio for a given series of a returns.
https://en.wikipedia.org/wiki/Sharpe_ratio
"""
return (returns.mean() - self._risk_free_rate) / (returns.std() + 1E-9)
def _sortino_ratio(self, returns: pd.Series) -> float:
"""Return the sortino ratio for a given series of a returns.
https://en.wikipedia.org/wiki/Sortino_ratio
"""
downside_returns = pd.Series([0])
returns[returns < self._target_returns] = returns ** 2
expected_return = returns.mean()
downside_std = np.sqrt(downside_returns.mean())
return (expected_return - self._risk_free_rate) / (downside_std + 1E-9)
def get_reward(self, current_step: int, trade: Trade) -> float:
"""Return the reward corresponding to the selected risk-adjusted return metric."""
returns = self._exchange.performance['net_worth'].diff()
risk_adjusted_return = self._return_algorithm(returns)
return risk_adjusted_return
|
from django.conf.urls import url
from . import views
app_name = "newsletter"
urlpatterns = [
url(r'^subscribe/(?P<user_pk>\d+)/$', views.subscribtion, name='subscribe'),
url(r'^unsubscribe/(?P<user_pk>\d+)/$', views.subscribtion, name='unsubscribe'),
]
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for
# full license information.
import gc
import inspect
import os
import weakref
import time
import logging
import importlib
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
def _run_garbage_collection():
"""
Collect everything until there's nothing more to collect
"""
sleep_time = 0.5
done = False
while not done:
collected = gc.collect(2)
logger.info("{} objects collected".format(collected))
if collected:
logger.info("Sleeping for {} seconds".format(sleep_time))
time.sleep(sleep_time)
else:
done = True
def _dump_referrers(obj):
referrers = gc.get_referrers(obj.weakref())
for referrer in referrers:
if isinstance(referrer, dict):
print(" dict: {}".format(referrer))
for sub_referrer in gc.get_referrers(referrer):
if sub_referrer != referrers:
if not inspect.ismodule(sub_referrer):
print(" used by: {}:{}".format(type(sub_referrer), sub_referrer))
elif not isinstance(referrer, type) and not inspect.ismodule(referrer):
print(" used by: {}:{}".format(type(referrer), referrer))
class RefObject(object):
"""
Object holding details on the leak of some tracked object
"""
def __init__(self, obj):
self.value = str(obj)
self.weakref = weakref.ref(obj)
def __repr__(self):
return self.value
def __eq__(self, obj):
return self.weakref == obj.weakref
def __ne__(self, obj):
return not self == obj
class TrackedModule(object):
def __init__(self, module_name):
self.module_name = module_name
mod = importlib.import_module(module_name)
self.path = os.path.dirname(inspect.getsourcefile(mod))
def is_module_object(self, obj):
if not isinstance(obj, BaseException):
try:
c = obj.__class__
source_file = inspect.getsourcefile(c)
except (TypeError, AttributeError):
pass
else:
if source_file and source_file.startswith(self.path):
return True
return False
class LeakTracker(object):
def __init__(self):
self.tracked_modules = []
self.previous_leaks = []
def add_tracked_module(self, module_name):
self.tracked_modules.append(TrackedModule(module_name))
def _get_all_tracked_objects(self):
"""
Query the garbage collector for a a list of all objects that
are implemented in tracked libraries
"""
all = []
for obj in gc.get_objects():
if any([mod.is_module_object(obj) for mod in self.tracked_modules]):
source_file = inspect.getsourcefile(obj.__class__)
try:
all.append(RefObject(obj))
except TypeError:
logger.warning(
"Could not add {} from {} to leak list".format(obj.__class__, source_file)
)
return all
def _prune_previous_leaks_list(self):
"""
remove objects from our list of previous leaks if they've been collected
"""
new_previous_leaks = []
for obj in self.previous_leaks:
if obj.weakref():
new_previous_leaks.append(obj)
else:
logger.info(
"Object {} collected since last test. Removing from previous_leaks list.".format(
obj
)
)
logger.info(
"previous leaks pruned from {} items to {} items".format(
len(self.previous_leaks), len(new_previous_leaks)
)
)
self.previous_leaks = new_previous_leaks
def _filter_previous_leaks(self, all):
"""
Return a filtered leak list where all previously reported leaks have been removed.
"""
self._prune_previous_leaks_list()
new_list = []
for obj in all:
if obj not in self.previous_leaks:
new_list.append(obj)
else:
logger.info("Object {} previously reported".format(obj))
logger.info("active list pruned from {} items to {} items".format(len(all), len(new_list)))
return new_list
def set_baseline(self):
self.previous_leaks = self._get_all_tracked_objects()
def check_for_new_leaks(self):
"""
Get all tracked objects from the garbage collector. If any objects remain, list
them and assert so the test fails.
"""
_run_garbage_collection()
all_tracked_objects = self._get_all_tracked_objects()
all_tracked_objects = self._filter_previous_leaks(all_tracked_objects)
if len(all_tracked_objects):
logger.error("Test failure. {} objects have leaked:".format(len(all_tracked_objects)))
count = 0
for obj in all_tracked_objects:
count += 1
if count <= 100:
logger.error("LEAK: {}".format(obj))
_dump_referrers(obj)
self.previous_leaks.append(obj)
if count < len(all_tracked_objects):
logger.errer("and {} more objects".format(len(all_tracked_objects) - count))
referrers = self.get_referrers(all_tracked_objects) # noqa: F841
assert False
else:
logger.info("No leaks")
def get_referrers(self, objects):
"""
Get all referrers for all objects as a way to see why objects are leaking.
Meant to be run inside a debugger, probably using pprint on the output
"""
all_referrers = []
index = 0
for obj in objects:
referrers = []
for ref in gc.get_referrers(obj.weakref()):
if type(ref) in [dict] or str(type(ref)) in ["<class 'cell'>"]:
referrers.append(ref)
else:
referrers.append(RefObject(ref))
all_referrers.append({"index": index, "obj": obj, "referrers": referrers})
index += 1
return all_referrers
|
#!/usr/bin/env python
import subprocess
from glob import glob
from random import shuffle
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--basic', action='store_true')
parser.add_argument('--api', action='store_true')
parser.add_argument('--single', action='store_true')
parser.add_argument('--travis', action='store_true')
parser.add_argument('-n', '--nb', nargs='?')
args = parser.parse_args()
api_root_dir = 'nglview/tests/notebooks/api/'
notebook_names_for_travis = [
'test_no_gui_demo.ipynb',
'test_add_structure_then_trajectory.ipynb',
'test_automatically_added_attributes_0.ipynb',
]
if args.travis:
notebook_names = notebook_names_for_travis
notebooks = [api_root_dir + notebook_name for notebook_name in notebook_names]
elif args.api:
notebooks = glob(api_root_dir + '/*.ipynb')
elif args.single:
notebooks = [args.nb]
else:
notebooks = ['nglview/tests/notebooks/dummy.ipynb',]
notebooks += (glob('nglview/tests/notebooks/*ipynb') +
glob('nglview/tests/notebooks/api/*ipynb'))
# shuffle(notebooks)
def get_cell_length(nb):
n_cells = 0
with open(nb) as fh:
for line in fh.readlines():
if 'cell_type' in line:
n_cells += 1
return n_cells
notebooks_with_cell_lengths = [(nb, 2*get_cell_length(nb)) for nb in notebooks]
head = """
module.exports = {
"""
body_template = """
"%s": function (browser) {
browser.openNotebook("%s");
browser.restartKernel(2000);
for ( var i = 0; i < %s; i++) {
browser.executeCell(i)
.pause(3000)
.cellHasError(i);
}
browser.end();
},
"""
tail = """
}
"""
if __name__ == '__main__':
all_notebooks = '\n'.join(body_template % (notebook, notebook, n_cells)
for (notebook, n_cells) in notebooks_with_cell_lengths)
fn = 'nglview/tests/js/test.js'
with open(fn, 'w') as fh:
fh.write(head + all_notebooks + tail)
|
import json
import boto3
import uuid
import time
dynamodb = boto3.client('dynamodb')
def lambda_handler(push_notification_json, context):
"""Store a push notification in a database.
:param push_notification_json: dict representing the JSON of a push notification
:param context: not used
:return: '', indicating success
"""
# Retrieve the push notification version.
push_notification_version = push_notification_json['pushNotificationVersion']
# Retrieve the subscription's receiptId.
receipt_id = push_notification_json['subscription']['receiptId']
# Do something with the push notification we received.
# In this case, we store the notification as an item in a database.
dynamodb.put_item(
TableName=DYNAMO_DB_TABLE_NAME,
Item={
'uuid': { 'S': str(uuid.uuid4()) },
'timestamp': { 'N': str(int(time.time())) },
'receiptId': { 'S': receipt_id },
'pushNotificationVersion': { 'N': str(push_notification_version) },
'pushNotification': { 'S': json.dumps(push_notification_json)}
}
)
return ''
|
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
# from text import cmudict
#
# _pad = '_'
# _punctuation = '!\'(),.:;? '
# _special = '-'
# _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
#
# # Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
# _arpabet = ['@' + s for s in cmudict.valid_symbols]
#
# # Export all symbols:
# symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) #+ _arpabet
#####################
_pad = '_'
_eos = '~'
label = 'phone' # pinyin phone
if(label == 'phone'):
_characters = [
'breath','cough','noise','smack','um','sil','sp1',
'a1','a2','a3','a4','a5','ai1','ai2','ai3','ai4','ai5',
'an1','an2','an3','an4','an5','ang1','ang2','ang3','ang4','ang5',
'ao1','ao2','ao3','ao4','ao5','b','c','ch','d','e1','e2','e3','e4','e5',
'ei1','ei2','ei3','ei4','ei5','en1','en2','en3','en4','en5','eng1','eng2',
'eng3','eng4','eng5','er1','er2','er3','er4','er5','f','g','h','i1','i2',
'i3','i4','i5','ia1','ia2','ia3','ia4','ia5','ian1','ian2','ian3','ian4','ian5',
'iang1','iang2','iang3','iang4','iang5','iao1','iao2','iao3','iao4','iao5',
'ie1','ie2','ie3','ie4','ie5','ii1','ii2','ii3','ii4','ii5','iii1','iii2',
'iii3','iii4','iii5','in1','in2','in3','in4','in5','ing1','ing2','ing3','ing4',
'ing5','iong1','iong2','iong3','iong4','iong5','iou1','iou2','iou3','iou4','iou5',
'j','k','l','m','n','o1','o2','o3','o4','o5','ong1','ong2','ong3','ong4','ong5',
'ou1','ou2','ou3','ou4','ou5','p','q','r','rr','s','sh','t','u1','u2','u3','u4','u5',
'ua1','ua2','ua3','ua4','ua5','uai1','uai2','uai3','uai4','uai5','uan1','uan2','uan3',
'uan4','uan5','uang1','uang2','uang3','uang4','uang5','uei1','uei2','uei3','uei4','uei5',
'uen1','uen2','uen3','uen4','uen5','uo1','uo2','uo3','uo4','uo5','v1','v2','v3','v4','v5',
'van1','van2','van3','van4','van5','ve1','ve2','ve3','ve4','ve5','vn1','vn2','vn3','vn4',
'vn5','w','x','y','z','zh',
',',',','!','!','。','.','?','?','、',':',':',';',';',
'#1','#2','#3','#4','#',' '
]
symbols = [_pad, _eos] + _characters
|
"""Helpers for Diviner data."""
from pathlib import Path
import numpy as np
import xarray as xr
import rasterio as rio
import roughness.helpers as rh
import roughness.config as cfg
DIVINER_WLS = [
7.8, # C3
8.25,
8.55,
(13 + 23) / 2,
(25 + 41) / 2,
(50 + 100) / 2,
(100 + 400) / 2, # C9
] # [microns]
def lev4hourly2xr(
fgrds,
ext=None,
savefile=None,
tres=None,
interp_method=None,
interp_wrap=False,
):
"""
Return xarray of Diviner lev4 hourly grd tiles with time resolution tres.
If interp_method: Interpolate missing values along tloc axis with method
specified (e.g. 'nearest', 'linear', 'cubic', see xr.interpolate_na methods).
Linear seems to produce fewest sharp artifacts.
If savefile: save result to netCDF (.nc) file for quick I/O into xarray,
e.g. with xr.open_dataarray(savefile).
Parameters
----------
fgrds ()
tres (num): time resolution
interp_method (str): interpolation method (see xarray.interpolate_na)
interp_wrap (bool): interpolate around time axis (wrap around 0/24h)
ext (list): extent of ROI
savefile (str): Path to save result to netCDF (.nc) file
"""
if not tres:
# Assume 8 bands (ch3 - ch9 + tbol), diurnal [24 h], infer tres [hr]
tres = 24 / (len(fgrds) / 8) # [hr]
grids = []
bands = []
for i, fgrd in enumerate(fgrds):
_, band, ind = Path(fgrd).stem.split("-")
tloc = (int(ind) - 1) * tres # tloc in [0, 24) h
# Read in .grd and set its local time to tloc
grid = xr.open_rasterio(fgrds[i]).sel(band=1)
grid["tloc"] = tloc
grids.append(grid)
# Concatenate all local time of this band into single xr.DataArray
if tloc == 24 - tres:
diurnal_band = xr.concat(grids, dim="tloc")
diurnal_band["band"] = band
bands.append(diurnal_band)
grids = []
# Concatenate all bands into single 4D xr.DataArray (lon, lat, tloc, band)
out = xr.concat(bands, dim="band")
out = out.rename("Temperature")
out = out.sortby(
"y", ascending=False
) # TODO: bug doesn't always fix yflip
# Convert x, y coords to lat, lon
if ext is not None:
lon, lat = rh.xy2lonlat_coords(out.x, out.y, ext)
out = out.rename({"x": "lon", "y": "lat"})
out = out.assign_coords(lon=lon, lat=lat)
if interp_method is not None:
# Interpolate missing points on tloc
if interp_wrap:
# Cludgey handling of tloc edges. Wrap full dataset around midnight then interp
postmidnight = out.sel(tloc=slice(0, 12 - tres))
postmidnight["tloc"] = np.arange(24, 36, tres)
premidnight = out.sel(tloc=slice(12, 24 - tres))
premidnight["tloc"] = np.arange(-12, 0, tres)
out = xr.concat([premidnight, out, postmidnight], dim="tloc")
out = out.interpolate_na("tloc", method=interp_method).sel(
tloc=slice(0, 24 - tres)
)
if savefile is not None:
out.to_netcdf(savefile)
return out
def xarr2geotiff(xarr, savefile, crs=cfg.MOON2000_ESRI):
"""Write 3D xarray (bands, y, x) to geotiff with rasterio."""
count, height, width = xarr.shape
band_indices = np.arange(count) + 1
transform = None
if "transform" in xarr.attrs:
transform = tuple(xarr.attrs["transform"])
profile = {
"driver": "GTiff",
"height": height,
"width": width,
"count": count,
"dtype": str(xarr.dtype),
"transform": transform,
"crs": crs,
}
with rio.open(savefile, "w", **profile) as dst:
dst.write(xarr.values, band_indices)
for i in dst.indexes:
band_name = str(xarr.band[i - 1].values)
dst.update_tags(i, band_name)
if Path(savefile).exists():
print("Wrote to", savefile)
else:
print("Failed to write to", savefile)
def fit_poly_daytime(Tday, deg=2):
"""
Return polynomial fit of order deg to daytime temperature data.
"""
tloc = np.linspace(6, 18, len(Tday) + 1)[:-1]
nan = np.isnan(Tday)
pfit = np.polyfit(tloc[~nan], Tday[~nan], deg)
fit_func = np.poly1d(pfit)
fit = fit_func(tloc)
return fit
def smooth_daytime(T_xarr, savefile=None):
"""
Return smoothed daytime T from 6 AM to 6 PM with 2nd order polyfit.
"""
tres = T_xarr.tloc[1].values - T_xarr.tloc[0].values
Tday = T_xarr.sel(tloc=np.arange(6, 18, tres))
Tsmooth = xr.apply_ufunc(
fit_poly_daytime,
Tday,
input_core_dims=[["tloc"]],
output_core_dims=[["tloc"]],
vectorize=True,
)
if savefile:
Tsmooth.to_netcdf(savefile)
return Tsmooth
def add_wls_diviner(xarr):
"""
Return xarr with wavelength coordinate (drop tbol).
"""
out = xarr.where(xarr.band != "tb", drop=True)
out = out.assign_coords(wavelength=("band", DIVINER_WLS))
return out
def load_div_lev4(
roi,
ext,
savefile="T.nc",
smoothday=False,
invert_y=False,
load_cached=True,
divdir=cfg.DATA_DIR_DIVINER,
):
"""
Return Diviner lev4 data as xarray.
"""
roi_str = roi.replace("'", "_").lower()
savepath = Path(divdir) / roi_str / savefile
if load_cached and savepath.exists():
return xr.open_dataarray(savepath)
print("Loading Diviner lev4 data for", roi)
dirpath = Path(divdir) / roi_str
saveraw = None if smoothday else savepath
fgrds = [f.as_posix() for f in Path(dirpath).rglob("*.grd")]
out = lev4hourly2xr(fgrds, ext, saveraw, interp_method=None)
if smoothday:
T_smooth = smooth_daytime(out, savepath)
out = T_smooth
# Sometimes Diviner data inverted about y - only need to fix once
if invert_y:
out = out.close()
tmp = xr.load_dataarray(savepath) # load imports file and closes it
tmp = tmp.assign_coords(lat=tmp.lat[::-1]) # invert lat
tmp.to_netcdf(savepath)
out = xr.open_dataarray(savepath)
return out
|
import string
import unittest
from email import _header_value_parser as parser
from email import errors
from email import policy
from test.test_email import TestEmailBase, parameterize
class TestTokens(TestEmailBase):
# EWWhiteSpaceTerminal
def test_EWWhiteSpaceTerminal(self):
x = parser.EWWhiteSpaceTerminal(' \t', 'fws')
self.assertEqual(x, ' \t')
self.assertEqual(str(x), '')
self.assertEqual(x.value, '')
self.assertEqual(x.token_type, 'fws')
class TestParserMixin:
def _assert_results(self, tl, rest, string, value, defects, remainder,
comments=None):
self.assertEqual(str(tl), string)
self.assertEqual(tl.value, value)
self.assertDefectsEqual(tl.all_defects, defects)
self.assertEqual(rest, remainder)
if comments is not None:
self.assertEqual(tl.comments, comments)
def _test_get_x(self, method, source, string, value, defects,
remainder, comments=None):
tl, rest = method(source)
self._assert_results(tl, rest, string, value, defects, remainder,
comments=None)
return tl
def _test_parse_x(self, method, input, string, value, defects,
comments=None):
tl = method(input)
self._assert_results(tl, '', string, value, defects, '', comments)
return tl
class TestParser(TestParserMixin, TestEmailBase):
# _wsp_splitter
rfc_printable_ascii = bytes(range(33, 127)).decode('ascii')
rfc_atext_chars = (string.ascii_letters + string.digits +
"!#$%&\'*+-/=?^_`{}|~")
rfc_dtext_chars = rfc_printable_ascii.translate(str.maketrans('','',r'\[]'))
def test__wsp_splitter_one_word(self):
self.assertEqual(parser._wsp_splitter('foo', 1), ['foo'])
def test__wsp_splitter_two_words(self):
self.assertEqual(parser._wsp_splitter('foo def', 1),
['foo', ' ', 'def'])
def test__wsp_splitter_ws_runs(self):
self.assertEqual(parser._wsp_splitter('foo \t def jik', 1),
['foo', ' \t ', 'def jik'])
# get_fws
def test_get_fws_only(self):
fws = self._test_get_x(parser.get_fws, ' \t ', ' \t ', ' ', [], '')
self.assertEqual(fws.token_type, 'fws')
def test_get_fws_space(self):
self._test_get_x(parser.get_fws, ' foo', ' ', ' ', [], 'foo')
def test_get_fws_ws_run(self):
self._test_get_x(parser.get_fws, ' \t foo ', ' \t ', ' ', [], 'foo ')
# get_encoded_word
def test_get_encoded_word_missing_start_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_encoded_word('abc')
def test_get_encoded_word_missing_end_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_encoded_word('=?abc')
def test_get_encoded_word_missing_middle_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_encoded_word('=?abc?=')
def test_get_encoded_word_valid_ew(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?this_is_a_test?= bird',
'this is a test',
'this is a test',
[],
' bird')
def test_get_encoded_word_internal_spaces(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?this is a test?= bird',
'this is a test',
'this is a test',
[errors.InvalidHeaderDefect],
' bird')
def test_get_encoded_word_gets_first(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?first?= =?utf-8?q?second?=',
'first',
'first',
[],
' =?utf-8?q?second?=')
def test_get_encoded_word_gets_first_even_if_no_space(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?first?==?utf-8?q?second?=',
'first',
'first',
[],
'=?utf-8?q?second?=')
def test_get_encoded_word_sets_extra_attributes(self):
ew = self._test_get_x(parser.get_encoded_word,
'=?us-ascii*jive?q?first_second?=',
'first second',
'first second',
[],
'')
self.assertEqual(ew.charset, 'us-ascii')
self.assertEqual(ew.lang, 'jive')
def test_get_encoded_word_lang_default_is_blank(self):
ew = self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?first_second?=',
'first second',
'first second',
[],
'')
self.assertEqual(ew.charset, 'us-ascii')
self.assertEqual(ew.lang, '')
def test_get_encoded_word_non_printable_defect(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?first\x02second?=',
'first\x02second',
'first\x02second',
[errors.NonPrintableDefect],
'')
def test_get_encoded_word_leading_internal_space(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?=20foo?=',
' foo',
' foo',
[],
'')
def test_get_encoded_word_quopri_utf_escape_follows_cte(self):
# Issue 18044
self._test_get_x(parser.get_encoded_word,
'=?utf-8?q?=C3=89ric?=',
'Éric',
'Éric',
[],
'')
# get_unstructured
def _get_unst(self, value):
token = parser.get_unstructured(value)
return token, ''
def test_get_unstructured_null(self):
self._test_get_x(self._get_unst, '', '', '', [], '')
def test_get_unstructured_one_word(self):
self._test_get_x(self._get_unst, 'foo', 'foo', 'foo', [], '')
def test_get_unstructured_normal_phrase(self):
self._test_get_x(self._get_unst, 'foo bar bird',
'foo bar bird',
'foo bar bird',
[],
'')
def test_get_unstructured_normal_phrase_with_whitespace(self):
self._test_get_x(self._get_unst, 'foo \t bar bird',
'foo \t bar bird',
'foo bar bird',
[],
'')
def test_get_unstructured_leading_whitespace(self):
self._test_get_x(self._get_unst, ' foo bar',
' foo bar',
' foo bar',
[],
'')
def test_get_unstructured_trailing_whitespace(self):
self._test_get_x(self._get_unst, 'foo bar ',
'foo bar ',
'foo bar ',
[],
'')
def test_get_unstructured_leading_and_trailing_whitespace(self):
self._test_get_x(self._get_unst, ' foo bar ',
' foo bar ',
' foo bar ',
[],
'')
def test_get_unstructured_one_valid_ew_no_ws(self):
self._test_get_x(self._get_unst, '=?us-ascii?q?bar?=',
'bar',
'bar',
[],
'')
def test_get_unstructured_one_ew_trailing_ws(self):
self._test_get_x(self._get_unst, '=?us-ascii?q?bar?= ',
'bar ',
'bar ',
[],
'')
def test_get_unstructured_one_valid_ew_trailing_text(self):
self._test_get_x(self._get_unst, '=?us-ascii?q?bar?= bird',
'bar bird',
'bar bird',
[],
'')
def test_get_unstructured_phrase_with_ew_in_middle_of_text(self):
self._test_get_x(self._get_unst, 'foo =?us-ascii?q?bar?= bird',
'foo bar bird',
'foo bar bird',
[],
'')
def test_get_unstructured_phrase_with_two_ew(self):
self._test_get_x(self._get_unst,
'foo =?us-ascii?q?bar?= =?us-ascii?q?bird?=',
'foo barbird',
'foo barbird',
[],
'')
def test_get_unstructured_phrase_with_two_ew_trailing_ws(self):
self._test_get_x(self._get_unst,
'foo =?us-ascii?q?bar?= =?us-ascii?q?bird?= ',
'foo barbird ',
'foo barbird ',
[],
'')
def test_get_unstructured_phrase_with_ew_with_leading_ws(self):
self._test_get_x(self._get_unst,
' =?us-ascii?q?bar?=',
' bar',
' bar',
[],
'')
def test_get_unstructured_phrase_with_two_ew_extra_ws(self):
self._test_get_x(self._get_unst,
'foo =?us-ascii?q?bar?= \t =?us-ascii?q?bird?=',
'foo barbird',
'foo barbird',
[],
'')
def test_get_unstructured_two_ew_extra_ws_trailing_text(self):
self._test_get_x(self._get_unst,
'=?us-ascii?q?test?= =?us-ascii?q?foo?= val',
'testfoo val',
'testfoo val',
[],
'')
def test_get_unstructured_ew_with_internal_ws(self):
self._test_get_x(self._get_unst,
'=?iso-8859-1?q?hello=20world?=',
'hello world',
'hello world',
[],
'')
def test_get_unstructured_ew_with_internal_leading_ws(self):
self._test_get_x(self._get_unst,
' =?us-ascii?q?=20test?= =?us-ascii?q?=20foo?= val',
' test foo val',
' test foo val',
[],
'')
def test_get_unstructured_invaild_ew(self):
self._test_get_x(self._get_unst,
'=?test val',
'=?test val',
'=?test val',
[],
'')
def test_get_unstructured_undecodable_bytes(self):
self._test_get_x(self._get_unst,
b'test \xACfoo val'.decode('ascii', 'surrogateescape'),
'test \uDCACfoo val',
'test \uDCACfoo val',
[errors.UndecodableBytesDefect],
'')
def test_get_unstructured_undecodable_bytes_in_EW(self):
self._test_get_x(self._get_unst,
(b'=?us-ascii?q?=20test?= =?us-ascii?q?=20\xACfoo?='
b' val').decode('ascii', 'surrogateescape'),
' test \uDCACfoo val',
' test \uDCACfoo val',
[errors.UndecodableBytesDefect]*2,
'')
def test_get_unstructured_missing_base64_padding(self):
self._test_get_x(self._get_unst,
'=?utf-8?b?dmk?=',
'vi',
'vi',
[errors.InvalidBase64PaddingDefect],
'')
def test_get_unstructured_invalid_base64_character(self):
self._test_get_x(self._get_unst,
'=?utf-8?b?dm\x01k===?=',
'vi',
'vi',
[errors.InvalidBase64CharactersDefect],
'')
def test_get_unstructured_invalid_base64_character_and_bad_padding(self):
self._test_get_x(self._get_unst,
'=?utf-8?b?dm\x01k?=',
'vi',
'vi',
[errors.InvalidBase64CharactersDefect,
errors.InvalidBase64PaddingDefect],
'')
def test_get_unstructured_invalid_base64_length(self):
# bpo-27397: Return the encoded string since there's no way to decode.
self._test_get_x(self._get_unst,
'=?utf-8?b?abcde?=',
'abcde',
'abcde',
[errors.InvalidBase64LengthDefect],
'')
def test_get_unstructured_no_whitespace_between_ews(self):
self._test_get_x(self._get_unst,
'=?utf-8?q?foo?==?utf-8?q?bar?=',
'foobar',
'foobar',
[errors.InvalidHeaderDefect],
'')
# get_qp_ctext
def test_get_qp_ctext_only(self):
ptext = self._test_get_x(parser.get_qp_ctext,
'foobar', 'foobar', ' ', [], '')
self.assertEqual(ptext.token_type, 'ptext')
def test_get_qp_ctext_all_printables(self):
with_qp = self.rfc_printable_ascii.replace('\\', '\\\\')
with_qp = with_qp. replace('(', r'\(')
with_qp = with_qp.replace(')', r'\)')
ptext = self._test_get_x(parser.get_qp_ctext,
with_qp, self.rfc_printable_ascii, ' ', [], '')
def test_get_qp_ctext_two_words_gets_first(self):
self._test_get_x(parser.get_qp_ctext,
'foo de', 'foo', ' ', [], ' de')
def test_get_qp_ctext_following_wsp_preserved(self):
self._test_get_x(parser.get_qp_ctext,
'foo \t\tde', 'foo', ' ', [], ' \t\tde')
def test_get_qp_ctext_up_to_close_paren_only(self):
self._test_get_x(parser.get_qp_ctext,
'foo)', 'foo', ' ', [], ')')
def test_get_qp_ctext_wsp_before_close_paren_preserved(self):
self._test_get_x(parser.get_qp_ctext,
'foo )', 'foo', ' ', [], ' )')
def test_get_qp_ctext_close_paren_mid_word(self):
self._test_get_x(parser.get_qp_ctext,
'foo)bar', 'foo', ' ', [], ')bar')
def test_get_qp_ctext_up_to_open_paren_only(self):
self._test_get_x(parser.get_qp_ctext,
'foo(', 'foo', ' ', [], '(')
def test_get_qp_ctext_wsp_before_open_paren_preserved(self):
self._test_get_x(parser.get_qp_ctext,
'foo (', 'foo', ' ', [], ' (')
def test_get_qp_ctext_open_paren_mid_word(self):
self._test_get_x(parser.get_qp_ctext,
'foo(bar', 'foo', ' ', [], '(bar')
def test_get_qp_ctext_non_printables(self):
ptext = self._test_get_x(parser.get_qp_ctext,
'foo\x00bar)', 'foo\x00bar', ' ',
[errors.NonPrintableDefect], ')')
self.assertEqual(ptext.defects[0].non_printables[0], '\x00')
# get_qcontent
def test_get_qcontent_only(self):
ptext = self._test_get_x(parser.get_qcontent,
'foobar', 'foobar', 'foobar', [], '')
self.assertEqual(ptext.token_type, 'ptext')
def test_get_qcontent_all_printables(self):
with_qp = self.rfc_printable_ascii.replace('\\', '\\\\')
with_qp = with_qp. replace('"', r'\"')
ptext = self._test_get_x(parser.get_qcontent, with_qp,
self.rfc_printable_ascii,
self.rfc_printable_ascii, [], '')
def test_get_qcontent_two_words_gets_first(self):
self._test_get_x(parser.get_qcontent,
'foo de', 'foo', 'foo', [], ' de')
def test_get_qcontent_following_wsp_preserved(self):
self._test_get_x(parser.get_qcontent,
'foo \t\tde', 'foo', 'foo', [], ' \t\tde')
def test_get_qcontent_up_to_dquote_only(self):
self._test_get_x(parser.get_qcontent,
'foo"', 'foo', 'foo', [], '"')
def test_get_qcontent_wsp_before_close_paren_preserved(self):
self._test_get_x(parser.get_qcontent,
'foo "', 'foo', 'foo', [], ' "')
def test_get_qcontent_close_paren_mid_word(self):
self._test_get_x(parser.get_qcontent,
'foo"bar', 'foo', 'foo', [], '"bar')
def test_get_qcontent_non_printables(self):
ptext = self._test_get_x(parser.get_qcontent,
'foo\x00fg"', 'foo\x00fg', 'foo\x00fg',
[errors.NonPrintableDefect], '"')
self.assertEqual(ptext.defects[0].non_printables[0], '\x00')
# get_atext
def test_get_atext_only(self):
atext = self._test_get_x(parser.get_atext,
'foobar', 'foobar', 'foobar', [], '')
self.assertEqual(atext.token_type, 'atext')
def test_get_atext_all_atext(self):
atext = self._test_get_x(parser.get_atext, self.rfc_atext_chars,
self.rfc_atext_chars,
self.rfc_atext_chars, [], '')
def test_get_atext_two_words_gets_first(self):
self._test_get_x(parser.get_atext,
'foo bar', 'foo', 'foo', [], ' bar')
def test_get_atext_following_wsp_preserved(self):
self._test_get_x(parser.get_atext,
'foo \t\tbar', 'foo', 'foo', [], ' \t\tbar')
def test_get_atext_up_to_special(self):
self._test_get_x(parser.get_atext,
'foo@bar', 'foo', 'foo', [], '@bar')
def test_get_atext_non_printables(self):
atext = self._test_get_x(parser.get_atext,
'foo\x00bar(', 'foo\x00bar', 'foo\x00bar',
[errors.NonPrintableDefect], '(')
self.assertEqual(atext.defects[0].non_printables[0], '\x00')
# get_bare_quoted_string
def test_get_bare_quoted_string_only(self):
bqs = self._test_get_x(parser.get_bare_quoted_string,
'"foo"', '"foo"', 'foo', [], '')
self.assertEqual(bqs.token_type, 'bare-quoted-string')
def test_get_bare_quoted_string_must_start_with_dquote(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_bare_quoted_string('foo"')
with self.assertRaises(errors.HeaderParseError):
parser.get_bare_quoted_string(' "foo"')
def test_get_bare_quoted_string_only_quotes(self):
self._test_get_x(parser.get_bare_quoted_string,
'""', '""', '', [], '')
def test_get_bare_quoted_string_following_wsp_preserved(self):
self._test_get_x(parser.get_bare_quoted_string,
'"foo"\t bar', '"foo"', 'foo', [], '\t bar')
def test_get_bare_quoted_string_multiple_words(self):
self._test_get_x(parser.get_bare_quoted_string,
'"foo bar moo"', '"foo bar moo"', 'foo bar moo', [], '')
def test_get_bare_quoted_string_multiple_words_wsp_preserved(self):
self._test_get_x(parser.get_bare_quoted_string,
'" foo moo\t"', '" foo moo\t"', ' foo moo\t', [], '')
def test_get_bare_quoted_string_end_dquote_mid_word(self):
self._test_get_x(parser.get_bare_quoted_string,
'"foo"bar', '"foo"', 'foo', [], 'bar')
def test_get_bare_quoted_string_quoted_dquote(self):
self._test_get_x(parser.get_bare_quoted_string,
r'"foo\"in"a', r'"foo\"in"', 'foo"in', [], 'a')
def test_get_bare_quoted_string_non_printables(self):
self._test_get_x(parser.get_bare_quoted_string,
'"a\x01a"', '"a\x01a"', 'a\x01a',
[errors.NonPrintableDefect], '')
def test_get_bare_quoted_string_no_end_dquote(self):
self._test_get_x(parser.get_bare_quoted_string,
'"foo', '"foo"', 'foo',
[errors.InvalidHeaderDefect], '')
self._test_get_x(parser.get_bare_quoted_string,
'"foo ', '"foo "', 'foo ',
[errors.InvalidHeaderDefect], '')
def test_get_bare_quoted_string_empty_quotes(self):
self._test_get_x(parser.get_bare_quoted_string,
'""', '""', '', [], '')
# Issue 16983: apply postel's law to some bad encoding.
def test_encoded_word_inside_quotes(self):
self._test_get_x(parser.get_bare_quoted_string,
'"=?utf-8?Q?not_really_valid?="',
'"not really valid"',
'not really valid',
[errors.InvalidHeaderDefect],
'')
# get_comment
def test_get_comment_only(self):
comment = self._test_get_x(parser.get_comment,
'(comment)', '(comment)', ' ', [], '', ['comment'])
self.assertEqual(comment.token_type, 'comment')
def test_get_comment_must_start_with_paren(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_comment('foo"')
with self.assertRaises(errors.HeaderParseError):
parser.get_comment(' (foo"')
def test_get_comment_following_wsp_preserved(self):
self._test_get_x(parser.get_comment,
'(comment) \t', '(comment)', ' ', [], ' \t', ['comment'])
def test_get_comment_multiple_words(self):
self._test_get_x(parser.get_comment,
'(foo bar) \t', '(foo bar)', ' ', [], ' \t', ['foo bar'])
def test_get_comment_multiple_words_wsp_preserved(self):
self._test_get_x(parser.get_comment,
'( foo bar\t ) \t', '( foo bar\t )', ' ', [], ' \t',
[' foo bar\t '])
def test_get_comment_end_paren_mid_word(self):
self._test_get_x(parser.get_comment,
'(foo)bar', '(foo)', ' ', [], 'bar', ['foo'])
def test_get_comment_quoted_parens(self):
self._test_get_x(parser.get_comment,
r'(foo\) \(\)bar)', r'(foo\) \(\)bar)', ' ', [], '', ['foo) ()bar'])
def test_get_comment_non_printable(self):
self._test_get_x(parser.get_comment,
'(foo\x7Fbar)', '(foo\x7Fbar)', ' ',
[errors.NonPrintableDefect], '', ['foo\x7Fbar'])
def test_get_comment_no_end_paren(self):
self._test_get_x(parser.get_comment,
'(foo bar', '(foo bar)', ' ',
[errors.InvalidHeaderDefect], '', ['foo bar'])
self._test_get_x(parser.get_comment,
'(foo bar ', '(foo bar )', ' ',
[errors.InvalidHeaderDefect], '', ['foo bar '])
def test_get_comment_nested_comment(self):
comment = self._test_get_x(parser.get_comment,
'(foo(bar))', '(foo(bar))', ' ', [], '', ['foo(bar)'])
self.assertEqual(comment[1].content, 'bar')
def test_get_comment_nested_comment_wsp(self):
comment = self._test_get_x(parser.get_comment,
'(foo ( bar ) )', '(foo ( bar ) )', ' ', [], '', ['foo ( bar ) '])
self.assertEqual(comment[2].content, ' bar ')
def test_get_comment_empty_comment(self):
self._test_get_x(parser.get_comment,
'()', '()', ' ', [], '', [''])
def test_get_comment_multiple_nesting(self):
comment = self._test_get_x(parser.get_comment,
'(((((foo)))))', '(((((foo)))))', ' ', [], '', ['((((foo))))'])
for i in range(4, 0, -1):
self.assertEqual(comment[0].content, '('*(i-1)+'foo'+')'*(i-1))
comment = comment[0]
self.assertEqual(comment.content, 'foo')
def test_get_comment_missing_end_of_nesting(self):
self._test_get_x(parser.get_comment,
'(((((foo)))', '(((((foo)))))', ' ',
[errors.InvalidHeaderDefect]*2, '', ['((((foo))))'])
def test_get_comment_qs_in_nested_comment(self):
comment = self._test_get_x(parser.get_comment,
r'(foo (b\)))', r'(foo (b\)))', ' ', [], '', [r'foo (b\))'])
self.assertEqual(comment[2].content, 'b)')
# get_cfws
def test_get_cfws_only_ws(self):
cfws = self._test_get_x(parser.get_cfws,
' \t \t', ' \t \t', ' ', [], '', [])
self.assertEqual(cfws.token_type, 'cfws')
def test_get_cfws_only_comment(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo)', '(foo)', ' ', [], '', ['foo'])
self.assertEqual(cfws[0].content, 'foo')
def test_get_cfws_only_mixed(self):
cfws = self._test_get_x(parser.get_cfws,
' (foo ) ( bar) ', ' (foo ) ( bar) ', ' ', [], '',
['foo ', ' bar'])
self.assertEqual(cfws[1].content, 'foo ')
self.assertEqual(cfws[3].content, ' bar')
def test_get_cfws_ends_at_non_leader(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo) bar', '(foo) ', ' ', [], 'bar', ['foo'])
self.assertEqual(cfws[0].content, 'foo')
def test_get_cfws_ends_at_non_printable(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo) \x07', '(foo) ', ' ', [], '\x07', ['foo'])
self.assertEqual(cfws[0].content, 'foo')
def test_get_cfws_non_printable_in_comment(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo \x07) "test"', '(foo \x07) ', ' ',
[errors.NonPrintableDefect], '"test"', ['foo \x07'])
self.assertEqual(cfws[0].content, 'foo \x07')
def test_get_cfws_header_ends_in_comment(self):
cfws = self._test_get_x(parser.get_cfws,
' (foo ', ' (foo )', ' ',
[errors.InvalidHeaderDefect], '', ['foo '])
self.assertEqual(cfws[1].content, 'foo ')
def test_get_cfws_multiple_nested_comments(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo (bar)) ((a)(a))', '(foo (bar)) ((a)(a))', ' ', [],
'', ['foo (bar)', '(a)(a)'])
self.assertEqual(cfws[0].comments, ['foo (bar)'])
self.assertEqual(cfws[2].comments, ['(a)(a)'])
# get_quoted_string
def test_get_quoted_string_only(self):
qs = self._test_get_x(parser.get_quoted_string,
'"bob"', '"bob"', 'bob', [], '')
self.assertEqual(qs.token_type, 'quoted-string')
self.assertEqual(qs.quoted_value, '"bob"')
self.assertEqual(qs.content, 'bob')
def test_get_quoted_string_with_wsp(self):
qs = self._test_get_x(parser.get_quoted_string,
'\t "bob" ', '\t "bob" ', ' bob ', [], '')
self.assertEqual(qs.quoted_value, ' "bob" ')
self.assertEqual(qs.content, 'bob')
def test_get_quoted_string_with_comments_and_wsp(self):
qs = self._test_get_x(parser.get_quoted_string,
' (foo) "bob"(bar)', ' (foo) "bob"(bar)', ' bob ', [], '')
self.assertEqual(qs[0][1].content, 'foo')
self.assertEqual(qs[2][0].content, 'bar')
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob" ')
def test_get_quoted_string_with_multiple_comments(self):
qs = self._test_get_x(parser.get_quoted_string,
' (foo) (bar) "bob"(bird)', ' (foo) (bar) "bob"(bird)', ' bob ',
[], '')
self.assertEqual(qs[0].comments, ['foo', 'bar'])
self.assertEqual(qs[2].comments, ['bird'])
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob" ')
def test_get_quoted_string_non_printable_in_comment(self):
qs = self._test_get_x(parser.get_quoted_string,
' (\x0A) "bob"', ' (\x0A) "bob"', ' bob',
[errors.NonPrintableDefect], '')
self.assertEqual(qs[0].comments, ['\x0A'])
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob"')
def test_get_quoted_string_non_printable_in_qcontent(self):
qs = self._test_get_x(parser.get_quoted_string,
' (a) "a\x0B"', ' (a) "a\x0B"', ' a\x0B',
[errors.NonPrintableDefect], '')
self.assertEqual(qs[0].comments, ['a'])
self.assertEqual(qs.content, 'a\x0B')
self.assertEqual(qs.quoted_value, ' "a\x0B"')
def test_get_quoted_string_internal_ws(self):
qs = self._test_get_x(parser.get_quoted_string,
' (a) "foo bar "', ' (a) "foo bar "', ' foo bar ',
[], '')
self.assertEqual(qs[0].comments, ['a'])
self.assertEqual(qs.content, 'foo bar ')
self.assertEqual(qs.quoted_value, ' "foo bar "')
def test_get_quoted_string_header_ends_in_comment(self):
qs = self._test_get_x(parser.get_quoted_string,
' (a) "bob" (a', ' (a) "bob" (a)', ' bob ',
[errors.InvalidHeaderDefect], '')
self.assertEqual(qs[0].comments, ['a'])
self.assertEqual(qs[2].comments, ['a'])
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob" ')
def test_get_quoted_string_header_ends_in_qcontent(self):
qs = self._test_get_x(parser.get_quoted_string,
' (a) "bob', ' (a) "bob"', ' bob',
[errors.InvalidHeaderDefect], '')
self.assertEqual(qs[0].comments, ['a'])
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob"')
def test_get_quoted_string_no_quoted_string(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_quoted_string(' (ab) xyz')
def test_get_quoted_string_qs_ends_at_noncfws(self):
qs = self._test_get_x(parser.get_quoted_string,
'\t "bob" fee', '\t "bob" ', ' bob ', [], 'fee')
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob" ')
# get_atom
def test_get_atom_only(self):
atom = self._test_get_x(parser.get_atom,
'bob', 'bob', 'bob', [], '')
self.assertEqual(atom.token_type, 'atom')
def test_get_atom_with_wsp(self):
self._test_get_x(parser.get_atom,
'\t bob ', '\t bob ', ' bob ', [], '')
def test_get_atom_with_comments_and_wsp(self):
atom = self._test_get_x(parser.get_atom,
' (foo) bob(bar)', ' (foo) bob(bar)', ' bob ', [], '')
self.assertEqual(atom[0][1].content, 'foo')
self.assertEqual(atom[2][0].content, 'bar')
def test_get_atom_with_multiple_comments(self):
atom = self._test_get_x(parser.get_atom,
' (foo) (bar) bob(bird)', ' (foo) (bar) bob(bird)', ' bob ',
[], '')
self.assertEqual(atom[0].comments, ['foo', 'bar'])
self.assertEqual(atom[2].comments, ['bird'])
def test_get_atom_non_printable_in_comment(self):
atom = self._test_get_x(parser.get_atom,
' (\x0A) bob', ' (\x0A) bob', ' bob',
[errors.NonPrintableDefect], '')
self.assertEqual(atom[0].comments, ['\x0A'])
def test_get_atom_non_printable_in_atext(self):
atom = self._test_get_x(parser.get_atom,
' (a) a\x0B', ' (a) a\x0B', ' a\x0B',
[errors.NonPrintableDefect], '')
self.assertEqual(atom[0].comments, ['a'])
def test_get_atom_header_ends_in_comment(self):
atom = self._test_get_x(parser.get_atom,
' (a) bob (a', ' (a) bob (a)', ' bob ',
[errors.InvalidHeaderDefect], '')
self.assertEqual(atom[0].comments, ['a'])
self.assertEqual(atom[2].comments, ['a'])
def test_get_atom_no_atom(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_atom(' (ab) ')
def test_get_atom_no_atom_before_special(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_atom(' (ab) @')
def test_get_atom_atom_ends_at_special(self):
atom = self._test_get_x(parser.get_atom,
' (foo) bob(bar) @bang', ' (foo) bob(bar) ', ' bob ', [], '@bang')
self.assertEqual(atom[0].comments, ['foo'])
self.assertEqual(atom[2].comments, ['bar'])
def test_get_atom_atom_ends_at_noncfws(self):
self._test_get_x(parser.get_atom,
'bob fred', 'bob ', 'bob ', [], 'fred')
def test_get_atom_rfc2047_atom(self):
self._test_get_x(parser.get_atom,
'=?utf-8?q?=20bob?=', ' bob', ' bob', [], '')
# get_dot_atom_text
def test_get_dot_atom_text(self):
dot_atom_text = self._test_get_x(parser.get_dot_atom_text,
'foo.bar.bang', 'foo.bar.bang', 'foo.bar.bang', [], '')
self.assertEqual(dot_atom_text.token_type, 'dot-atom-text')
self.assertEqual(len(dot_atom_text), 5)
def test_get_dot_atom_text_lone_atom_is_valid(self):
dot_atom_text = self._test_get_x(parser.get_dot_atom_text,
'foo', 'foo', 'foo', [], '')
def test_get_dot_atom_text_raises_on_leading_dot(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text('.foo.bar')
def test_get_dot_atom_text_raises_on_trailing_dot(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text('foo.bar.')
def test_get_dot_atom_text_raises_on_leading_non_atext(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text(' foo.bar')
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text('@foo.bar')
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text('"foo.bar"')
def test_get_dot_atom_text_trailing_text_preserved(self):
dot_atom_text = self._test_get_x(parser.get_dot_atom_text,
'foo@bar', 'foo', 'foo', [], '@bar')
def test_get_dot_atom_text_trailing_ws_preserved(self):
dot_atom_text = self._test_get_x(parser.get_dot_atom_text,
'foo .bar', 'foo', 'foo', [], ' .bar')
# get_dot_atom
def test_get_dot_atom_only(self):
dot_atom = self._test_get_x(parser.get_dot_atom,
'foo.bar.bing', 'foo.bar.bing', 'foo.bar.bing', [], '')
self.assertEqual(dot_atom.token_type, 'dot-atom')
self.assertEqual(len(dot_atom), 1)
def test_get_dot_atom_with_wsp(self):
self._test_get_x(parser.get_dot_atom,
'\t foo.bar.bing ', '\t foo.bar.bing ', ' foo.bar.bing ', [], '')
def test_get_dot_atom_with_comments_and_wsp(self):
self._test_get_x(parser.get_dot_atom,
' (sing) foo.bar.bing (here) ', ' (sing) foo.bar.bing (here) ',
' foo.bar.bing ', [], '')
def test_get_dot_atom_space_ends_dot_atom(self):
self._test_get_x(parser.get_dot_atom,
' (sing) foo.bar .bing (here) ', ' (sing) foo.bar ',
' foo.bar ', [], '.bing (here) ')
def test_get_dot_atom_no_atom_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom(' (foo) ')
def test_get_dot_atom_leading_dot_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom(' (foo) .bar')
def test_get_dot_atom_two_dots_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom('bar..bang')
def test_get_dot_atom_trailing_dot_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom(' (foo) bar.bang. foo')
def test_get_dot_atom_rfc2047_atom(self):
self._test_get_x(parser.get_dot_atom,
'=?utf-8?q?=20bob?=', ' bob', ' bob', [], '')
# get_word (if this were black box we'd repeat all the qs/atom tests)
def test_get_word_atom_yields_atom(self):
word = self._test_get_x(parser.get_word,
' (foo) bar (bang) :ah', ' (foo) bar (bang) ', ' bar ', [], ':ah')
self.assertEqual(word.token_type, 'atom')
self.assertEqual(word[0].token_type, 'cfws')
def test_get_word_qs_yields_qs(self):
word = self._test_get_x(parser.get_word,
'"bar " (bang) ah', '"bar " (bang) ', 'bar ', [], 'ah')
self.assertEqual(word.token_type, 'quoted-string')
self.assertEqual(word[0].token_type, 'bare-quoted-string')
self.assertEqual(word[0].value, 'bar ')
self.assertEqual(word.content, 'bar ')
def test_get_word_ends_at_dot(self):
self._test_get_x(parser.get_word,
'foo.', 'foo', 'foo', [], '.')
# get_phrase
def test_get_phrase_simple(self):
phrase = self._test_get_x(parser.get_phrase,
'"Fred A. Johnson" is his name, oh.',
'"Fred A. Johnson" is his name',
'Fred A. Johnson is his name',
[],
', oh.')
self.assertEqual(phrase.token_type, 'phrase')
def test_get_phrase_complex(self):
phrase = self._test_get_x(parser.get_phrase,
' (A) bird (in (my|your)) "hand " is messy\t<>\t',
' (A) bird (in (my|your)) "hand " is messy\t',
' bird hand is messy ',
[],
'<>\t')
self.assertEqual(phrase[0][0].comments, ['A'])
self.assertEqual(phrase[0][2].comments, ['in (my|your)'])
def test_get_phrase_obsolete(self):
phrase = self._test_get_x(parser.get_phrase,
'Fred A.(weird).O Johnson',
'Fred A.(weird).O Johnson',
'Fred A. .O Johnson',
[errors.ObsoleteHeaderDefect]*3,
'')
self.assertEqual(len(phrase), 7)
self.assertEqual(phrase[3].comments, ['weird'])
def test_get_phrase_pharse_must_start_with_word(self):
phrase = self._test_get_x(parser.get_phrase,
'(even weirder).name',
'(even weirder).name',
' .name',
[errors.InvalidHeaderDefect] + [errors.ObsoleteHeaderDefect]*2,
'')
self.assertEqual(len(phrase), 3)
self.assertEqual(phrase[0].comments, ['even weirder'])
def test_get_phrase_ending_with_obsolete(self):
phrase = self._test_get_x(parser.get_phrase,
'simple phrase.(with trailing comment):boo',
'simple phrase.(with trailing comment)',
'simple phrase. ',
[errors.ObsoleteHeaderDefect]*2,
':boo')
self.assertEqual(len(phrase), 4)
self.assertEqual(phrase[3].comments, ['with trailing comment'])
def get_phrase_cfws_only_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_phrase(' (foo) ')
# get_local_part
def test_get_local_part_simple(self):
local_part = self._test_get_x(parser.get_local_part,
'dinsdale@python.org', 'dinsdale', 'dinsdale', [], '@python.org')
self.assertEqual(local_part.token_type, 'local-part')
self.assertEqual(local_part.local_part, 'dinsdale')
def test_get_local_part_with_dot(self):
local_part = self._test_get_x(parser.get_local_part,
'Fred.A.Johnson@python.org',
'Fred.A.Johnson',
'Fred.A.Johnson',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
def test_get_local_part_with_whitespace(self):
local_part = self._test_get_x(parser.get_local_part,
' Fred.A.Johnson @python.org',
' Fred.A.Johnson ',
' Fred.A.Johnson ',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
def test_get_local_part_with_cfws(self):
local_part = self._test_get_x(parser.get_local_part,
' (foo) Fred.A.Johnson (bar (bird)) @python.org',
' (foo) Fred.A.Johnson (bar (bird)) ',
' Fred.A.Johnson ',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
self.assertEqual(local_part[0][0].comments, ['foo'])
self.assertEqual(local_part[0][2].comments, ['bar (bird)'])
def test_get_local_part_simple_quoted(self):
local_part = self._test_get_x(parser.get_local_part,
'"dinsdale"@python.org', '"dinsdale"', '"dinsdale"', [], '@python.org')
self.assertEqual(local_part.token_type, 'local-part')
self.assertEqual(local_part.local_part, 'dinsdale')
def test_get_local_part_with_quoted_dot(self):
local_part = self._test_get_x(parser.get_local_part,
'"Fred.A.Johnson"@python.org',
'"Fred.A.Johnson"',
'"Fred.A.Johnson"',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
def test_get_local_part_quoted_with_whitespace(self):
local_part = self._test_get_x(parser.get_local_part,
' "Fred A. Johnson" @python.org',
' "Fred A. Johnson" ',
' "Fred A. Johnson" ',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred A. Johnson')
def test_get_local_part_quoted_with_cfws(self):
local_part = self._test_get_x(parser.get_local_part,
' (foo) " Fred A. Johnson " (bar (bird)) @python.org',
' (foo) " Fred A. Johnson " (bar (bird)) ',
' " Fred A. Johnson " ',
[],
'@python.org')
self.assertEqual(local_part.local_part, ' Fred A. Johnson ')
self.assertEqual(local_part[0][0].comments, ['foo'])
self.assertEqual(local_part[0][2].comments, ['bar (bird)'])
def test_get_local_part_simple_obsolete(self):
local_part = self._test_get_x(parser.get_local_part,
'Fred. A.Johnson@python.org',
'Fred. A.Johnson',
'Fred. A.Johnson',
[errors.ObsoleteHeaderDefect],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
def test_get_local_part_complex_obsolete_1(self):
local_part = self._test_get_x(parser.get_local_part,
' (foo )Fred (bar).(bird) A.(sheep)Johnson."and dogs "@python.org',
' (foo )Fred (bar).(bird) A.(sheep)Johnson."and dogs "',
' Fred . A. Johnson.and dogs ',
[errors.ObsoleteHeaderDefect],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson.and dogs ')
def test_get_local_part_complex_obsolete_invalid(self):
local_part = self._test_get_x(parser.get_local_part,
' (foo )Fred (bar).(bird) A.(sheep)Johnson "and dogs"@python.org',
' (foo )Fred (bar).(bird) A.(sheep)Johnson "and dogs"',
' Fred . A. Johnson and dogs',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson and dogs')
def test_get_local_part_no_part_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_local_part(' (foo) ')
def test_get_local_part_special_instead_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_local_part(' (foo) @python.org')
def test_get_local_part_trailing_dot(self):
local_part = self._test_get_x(parser.get_local_part,
' borris.@python.org',
' borris.',
' borris.',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, 'borris.')
def test_get_local_part_trailing_dot_with_ws(self):
local_part = self._test_get_x(parser.get_local_part,
' borris. @python.org',
' borris. ',
' borris. ',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, 'borris.')
def test_get_local_part_leading_dot(self):
local_part = self._test_get_x(parser.get_local_part,
'.borris@python.org',
'.borris',
'.borris',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, '.borris')
def test_get_local_part_leading_dot_after_ws(self):
local_part = self._test_get_x(parser.get_local_part,
' .borris@python.org',
' .borris',
' .borris',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, '.borris')
def test_get_local_part_double_dot_raises(self):
local_part = self._test_get_x(parser.get_local_part,
' borris.(foo).natasha@python.org',
' borris.(foo).natasha',
' borris. .natasha',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, 'borris..natasha')
def test_get_local_part_quoted_strings_in_atom_list(self):
local_part = self._test_get_x(parser.get_local_part,
'""example" example"@example.com',
'""example" example"',
'example example',
[errors.InvalidHeaderDefect]*3,
'@example.com')
self.assertEqual(local_part.local_part, 'example example')
def test_get_local_part_valid_and_invalid_qp_in_atom_list(self):
local_part = self._test_get_x(parser.get_local_part,
r'"\\"example\\" example"@example.com',
r'"\\"example\\" example"',
r'\example\\ example',
[errors.InvalidHeaderDefect]*5,
'@example.com')
self.assertEqual(local_part.local_part, r'\example\\ example')
def test_get_local_part_unicode_defect(self):
# Currently this only happens when parsing unicode, not when parsing
# stuff that was originally binary.
local_part = self._test_get_x(parser.get_local_part,
'exámple@example.com',
'exámple',
'exámple',
[errors.NonASCIILocalPartDefect],
'@example.com')
self.assertEqual(local_part.local_part, 'exámple')
# get_dtext
def test_get_dtext_only(self):
dtext = self._test_get_x(parser.get_dtext,
'foobar', 'foobar', 'foobar', [], '')
self.assertEqual(dtext.token_type, 'ptext')
def test_get_dtext_all_dtext(self):
dtext = self._test_get_x(parser.get_dtext, self.rfc_dtext_chars,
self.rfc_dtext_chars,
self.rfc_dtext_chars, [], '')
def test_get_dtext_two_words_gets_first(self):
self._test_get_x(parser.get_dtext,
'foo bar', 'foo', 'foo', [], ' bar')
def test_get_dtext_following_wsp_preserved(self):
self._test_get_x(parser.get_dtext,
'foo \t\tbar', 'foo', 'foo', [], ' \t\tbar')
def test_get_dtext_non_printables(self):
dtext = self._test_get_x(parser.get_dtext,
'foo\x00bar]', 'foo\x00bar', 'foo\x00bar',
[errors.NonPrintableDefect], ']')
self.assertEqual(dtext.defects[0].non_printables[0], '\x00')
def test_get_dtext_with_qp(self):
ptext = self._test_get_x(parser.get_dtext,
r'foo\]\[\\bar\b\e\l\l',
r'foo][\barbell',
r'foo][\barbell',
[errors.ObsoleteHeaderDefect],
'')
def test_get_dtext_up_to_close_bracket_only(self):
self._test_get_x(parser.get_dtext,
'foo]', 'foo', 'foo', [], ']')
def test_get_dtext_wsp_before_close_bracket_preserved(self):
self._test_get_x(parser.get_dtext,
'foo ]', 'foo', 'foo', [], ' ]')
def test_get_dtext_close_bracket_mid_word(self):
self._test_get_x(parser.get_dtext,
'foo]bar', 'foo', 'foo', [], ']bar')
def test_get_dtext_up_to_open_bracket_only(self):
self._test_get_x(parser.get_dtext,
'foo[', 'foo', 'foo', [], '[')
def test_get_dtext_wsp_before_open_bracket_preserved(self):
self._test_get_x(parser.get_dtext,
'foo [', 'foo', 'foo', [], ' [')
def test_get_dtext_open_bracket_mid_word(self):
self._test_get_x(parser.get_dtext,
'foo[bar', 'foo', 'foo', [], '[bar')
# get_domain_literal
def test_get_domain_literal_only(self):
domain_literal = domain_literal = self._test_get_x(parser.get_domain_literal,
'[127.0.0.1]',
'[127.0.0.1]',
'[127.0.0.1]',
[],
'')
self.assertEqual(domain_literal.token_type, 'domain-literal')
self.assertEqual(domain_literal.domain, '[127.0.0.1]')
self.assertEqual(domain_literal.ip, '127.0.0.1')
def test_get_domain_literal_with_internal_ws(self):
domain_literal = self._test_get_x(parser.get_domain_literal,
'[ 127.0.0.1\t ]',
'[ 127.0.0.1\t ]',
'[ 127.0.0.1 ]',
[],
'')
self.assertEqual(domain_literal.domain, '[127.0.0.1]')
self.assertEqual(domain_literal.ip, '127.0.0.1')
def test_get_domain_literal_with_surrounding_cfws(self):
domain_literal = self._test_get_x(parser.get_domain_literal,
'(foo)[ 127.0.0.1] (bar)',
'(foo)[ 127.0.0.1] (bar)',
' [ 127.0.0.1] ',
[],
'')
self.assertEqual(domain_literal.domain, '[127.0.0.1]')
self.assertEqual(domain_literal.ip, '127.0.0.1')
def test_get_domain_literal_no_start_char_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain_literal('(foo) ')
def test_get_domain_literal_no_start_char_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain_literal('(foo) @')
def test_get_domain_literal_bad_dtext_char_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain_literal('(foo) [abc[@')
# get_domain
def test_get_domain_regular_domain_only(self):
domain = self._test_get_x(parser.get_domain,
'example.com',
'example.com',
'example.com',
[],
'')
self.assertEqual(domain.token_type, 'domain')
self.assertEqual(domain.domain, 'example.com')
def test_get_domain_domain_literal_only(self):
domain = self._test_get_x(parser.get_domain,
'[127.0.0.1]',
'[127.0.0.1]',
'[127.0.0.1]',
[],
'')
self.assertEqual(domain.token_type, 'domain')
self.assertEqual(domain.domain, '[127.0.0.1]')
def test_get_domain_with_cfws(self):
domain = self._test_get_x(parser.get_domain,
'(foo) example.com(bar)\t',
'(foo) example.com(bar)\t',
' example.com ',
[],
'')
self.assertEqual(domain.domain, 'example.com')
def test_get_domain_domain_literal_with_cfws(self):
domain = self._test_get_x(parser.get_domain,
'(foo)[127.0.0.1]\t(bar)',
'(foo)[127.0.0.1]\t(bar)',
' [127.0.0.1] ',
[],
'')
self.assertEqual(domain.domain, '[127.0.0.1]')
def test_get_domain_domain_with_cfws_ends_at_special(self):
domain = self._test_get_x(parser.get_domain,
'(foo)example.com\t(bar), next',
'(foo)example.com\t(bar)',
' example.com ',
[],
', next')
self.assertEqual(domain.domain, 'example.com')
def test_get_domain_domain_literal_with_cfws_ends_at_special(self):
domain = self._test_get_x(parser.get_domain,
'(foo)[127.0.0.1]\t(bar), next',
'(foo)[127.0.0.1]\t(bar)',
' [127.0.0.1] ',
[],
', next')
self.assertEqual(domain.domain, '[127.0.0.1]')
def test_get_domain_obsolete(self):
domain = self._test_get_x(parser.get_domain,
'(foo) example . (bird)com(bar)\t',
'(foo) example . (bird)com(bar)\t',
' example . com ',
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(domain.domain, 'example.com')
def test_get_domain_no_non_cfws_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain(" (foo)\t")
def test_get_domain_no_atom_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain(" (foo)\t, broken")
# get_addr_spec
def test_get_addr_spec_normal(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(addr_spec.token_type, 'addr-spec')
self.assertEqual(addr_spec.local_part, 'dinsdale')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, 'dinsdale@example.com')
def test_get_addr_spec_with_doamin_literal(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'dinsdale@[127.0.0.1]',
'dinsdale@[127.0.0.1]',
'dinsdale@[127.0.0.1]',
[],
'')
self.assertEqual(addr_spec.local_part, 'dinsdale')
self.assertEqual(addr_spec.domain, '[127.0.0.1]')
self.assertEqual(addr_spec.addr_spec, 'dinsdale@[127.0.0.1]')
def test_get_addr_spec_with_cfws(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'(foo) dinsdale(bar)@ (bird) example.com (bog)',
'(foo) dinsdale(bar)@ (bird) example.com (bog)',
' dinsdale@example.com ',
[],
'')
self.assertEqual(addr_spec.local_part, 'dinsdale')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, 'dinsdale@example.com')
def test_get_addr_spec_with_qouoted_string_and_cfws(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'(foo) "roy a bug"(bar)@ (bird) example.com (bog)',
'(foo) "roy a bug"(bar)@ (bird) example.com (bog)',
' "roy a bug"@example.com ',
[],
'')
self.assertEqual(addr_spec.local_part, 'roy a bug')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, '"roy a bug"@example.com')
def test_get_addr_spec_ends_at_special(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'(foo) "roy a bug"(bar)@ (bird) example.com (bog) , next',
'(foo) "roy a bug"(bar)@ (bird) example.com (bog) ',
' "roy a bug"@example.com ',
[],
', next')
self.assertEqual(addr_spec.local_part, 'roy a bug')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, '"roy a bug"@example.com')
def test_get_addr_spec_quoted_strings_in_atom_list(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'""example" example"@example.com',
'""example" example"@example.com',
'example example@example.com',
[errors.InvalidHeaderDefect]*3,
'')
self.assertEqual(addr_spec.local_part, 'example example')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, '"example example"@example.com')
def test_get_addr_spec_dot_atom(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'star.a.star@example.com',
'star.a.star@example.com',
'star.a.star@example.com',
[],
'')
self.assertEqual(addr_spec.local_part, 'star.a.star')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, 'star.a.star@example.com')
# get_obs_route
def test_get_obs_route_simple(self):
obs_route = self._test_get_x(parser.get_obs_route,
'@example.com, @two.example.com:',
'@example.com, @two.example.com:',
'@example.com, @two.example.com:',
[],
'')
self.assertEqual(obs_route.token_type, 'obs-route')
self.assertEqual(obs_route.domains, ['example.com', 'two.example.com'])
def test_get_obs_route_complex(self):
obs_route = self._test_get_x(parser.get_obs_route,
'(foo),, (blue)@example.com (bar),@two.(foo) example.com (bird):',
'(foo),, (blue)@example.com (bar),@two.(foo) example.com (bird):',
' ,, @example.com ,@two. example.com :',
[errors.ObsoleteHeaderDefect], # This is the obs-domain
'')
self.assertEqual(obs_route.token_type, 'obs-route')
self.assertEqual(obs_route.domains, ['example.com', 'two.example.com'])
def test_get_obs_route_no_route_before_end_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_obs_route('(foo) @example.com,')
def test_get_obs_route_no_route_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_obs_route('(foo) [abc],')
def test_get_obs_route_no_route_before_special_raises2(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_obs_route('(foo) @example.com [abc],')
# get_angle_addr
def test_get_angle_addr_simple(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<dinsdale@example.com>',
'<dinsdale@example.com>',
'<dinsdale@example.com>',
[],
'')
self.assertEqual(angle_addr.token_type, 'angle-addr')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_empty(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<>',
'<>',
'<>',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(angle_addr.token_type, 'angle-addr')
self.assertIsNone(angle_addr.local_part)
self.assertIsNone(angle_addr.domain)
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, '<>')
def test_get_angle_addr_qs_only_quotes(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<""@example.com>',
'<""@example.com>',
'<""@example.com>',
[],
'')
self.assertEqual(angle_addr.token_type, 'angle-addr')
self.assertEqual(angle_addr.local_part, '')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, '""@example.com')
def test_get_angle_addr_with_cfws(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
' (foo) <dinsdale@example.com>(bar)',
' (foo) <dinsdale@example.com>(bar)',
' <dinsdale@example.com> ',
[],
'')
self.assertEqual(angle_addr.token_type, 'angle-addr')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_qs_and_domain_literal(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<"Fred Perfect"@[127.0.0.1]>',
'<"Fred Perfect"@[127.0.0.1]>',
'<"Fred Perfect"@[127.0.0.1]>',
[],
'')
self.assertEqual(angle_addr.local_part, 'Fred Perfect')
self.assertEqual(angle_addr.domain, '[127.0.0.1]')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, '"Fred Perfect"@[127.0.0.1]')
def test_get_angle_addr_internal_cfws(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<(foo) dinsdale@example.com(bar)>',
'<(foo) dinsdale@example.com(bar)>',
'< dinsdale@example.com >',
[],
'')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_obs_route(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'(foo)<@example.com, (bird) @two.example.com: dinsdale@example.com> (bar) ',
'(foo)<@example.com, (bird) @two.example.com: dinsdale@example.com> (bar) ',
' <@example.com, @two.example.com: dinsdale@example.com> ',
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertEqual(angle_addr.route, ['example.com', 'two.example.com'])
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_missing_closing_angle(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<dinsdale@example.com',
'<dinsdale@example.com>',
'<dinsdale@example.com>',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_missing_closing_angle_with_cfws(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<dinsdale@example.com (foo)',
'<dinsdale@example.com (foo)>',
'<dinsdale@example.com >',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_ends_at_special(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<dinsdale@example.com> (foo), next',
'<dinsdale@example.com> (foo)',
'<dinsdale@example.com> ',
[],
', next')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_no_angle_raise(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_angle_addr('(foo) ')
def test_get_angle_addr_no_angle_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_angle_addr('(foo) , next')
def test_get_angle_addr_no_angle_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_angle_addr('bar')
def test_get_angle_addr_special_after_angle_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_angle_addr('(foo) <, bar')
# get_display_name This is phrase but with a different value.
def test_get_display_name_simple(self):
display_name = self._test_get_x(parser.get_display_name,
'Fred A Johnson',
'Fred A Johnson',
'Fred A Johnson',
[],
'')
self.assertEqual(display_name.token_type, 'display-name')
self.assertEqual(display_name.display_name, 'Fred A Johnson')
def test_get_display_name_complex1(self):
display_name = self._test_get_x(parser.get_display_name,
'"Fred A. Johnson" is his name, oh.',
'"Fred A. Johnson" is his name',
'"Fred A. Johnson is his name"',
[],
', oh.')
self.assertEqual(display_name.token_type, 'display-name')
self.assertEqual(display_name.display_name, 'Fred A. Johnson is his name')
def test_get_display_name_complex2(self):
display_name = self._test_get_x(parser.get_display_name,
' (A) bird (in (my|your)) "hand " is messy\t<>\t',
' (A) bird (in (my|your)) "hand " is messy\t',
' "bird hand is messy" ',
[],
'<>\t')
self.assertEqual(display_name[0][0].comments, ['A'])
self.assertEqual(display_name[0][2].comments, ['in (my|your)'])
self.assertEqual(display_name.display_name, 'bird hand is messy')
def test_get_display_name_obsolete(self):
display_name = self._test_get_x(parser.get_display_name,
'Fred A.(weird).O Johnson',
'Fred A.(weird).O Johnson',
'"Fred A. .O Johnson"',
[errors.ObsoleteHeaderDefect]*3,
'')
self.assertEqual(len(display_name), 7)
self.assertEqual(display_name[3].comments, ['weird'])
self.assertEqual(display_name.display_name, 'Fred A. .O Johnson')
def test_get_display_name_pharse_must_start_with_word(self):
display_name = self._test_get_x(parser.get_display_name,
'(even weirder).name',
'(even weirder).name',
' ".name"',
[errors.InvalidHeaderDefect] + [errors.ObsoleteHeaderDefect]*2,
'')
self.assertEqual(len(display_name), 3)
self.assertEqual(display_name[0].comments, ['even weirder'])
self.assertEqual(display_name.display_name, '.name')
def test_get_display_name_ending_with_obsolete(self):
display_name = self._test_get_x(parser.get_display_name,
'simple phrase.(with trailing comment):boo',
'simple phrase.(with trailing comment)',
'"simple phrase." ',
[errors.ObsoleteHeaderDefect]*2,
':boo')
self.assertEqual(len(display_name), 4)
self.assertEqual(display_name[3].comments, ['with trailing comment'])
self.assertEqual(display_name.display_name, 'simple phrase.')
# get_name_addr
def test_get_name_addr_angle_addr_only(self):
name_addr = self._test_get_x(parser.get_name_addr,
'<dinsdale@example.com>',
'<dinsdale@example.com>',
'<dinsdale@example.com>',
[],
'')
self.assertEqual(name_addr.token_type, 'name-addr')
self.assertIsNone(name_addr.display_name)
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_atom_name(self):
name_addr = self._test_get_x(parser.get_name_addr,
'Dinsdale <dinsdale@example.com>',
'Dinsdale <dinsdale@example.com>',
'Dinsdale <dinsdale@example.com>',
[],
'')
self.assertEqual(name_addr.token_type, 'name-addr')
self.assertEqual(name_addr.display_name, 'Dinsdale')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_atom_name_with_cfws(self):
name_addr = self._test_get_x(parser.get_name_addr,
'(foo) Dinsdale (bar) <dinsdale@example.com> (bird)',
'(foo) Dinsdale (bar) <dinsdale@example.com> (bird)',
' Dinsdale <dinsdale@example.com> ',
[],
'')
self.assertEqual(name_addr.display_name, 'Dinsdale')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_name_with_cfws_and_dots(self):
name_addr = self._test_get_x(parser.get_name_addr,
'(foo) Roy.A.Bear (bar) <dinsdale@example.com> (bird)',
'(foo) Roy.A.Bear (bar) <dinsdale@example.com> (bird)',
' "Roy.A.Bear" <dinsdale@example.com> ',
[errors.ObsoleteHeaderDefect]*2,
'')
self.assertEqual(name_addr.display_name, 'Roy.A.Bear')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_qs_name(self):
name_addr = self._test_get_x(parser.get_name_addr,
'"Roy.A.Bear" <dinsdale@example.com>',
'"Roy.A.Bear" <dinsdale@example.com>',
'"Roy.A.Bear" <dinsdale@example.com>',
[],
'')
self.assertEqual(name_addr.display_name, 'Roy.A.Bear')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_with_route(self):
name_addr = self._test_get_x(parser.get_name_addr,
'"Roy.A.Bear" <@two.example.com: dinsdale@example.com>',
'"Roy.A.Bear" <@two.example.com: dinsdale@example.com>',
'"Roy.A.Bear" <@two.example.com: dinsdale@example.com>',
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(name_addr.display_name, 'Roy.A.Bear')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertEqual(name_addr.route, ['two.example.com'])
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_ends_at_special(self):
name_addr = self._test_get_x(parser.get_name_addr,
'"Roy.A.Bear" <dinsdale@example.com>, next',
'"Roy.A.Bear" <dinsdale@example.com>',
'"Roy.A.Bear" <dinsdale@example.com>',
[],
', next')
self.assertEqual(name_addr.display_name, 'Roy.A.Bear')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_no_content_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_name_addr(' (foo) ')
def test_get_name_addr_no_content_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_name_addr(' (foo) ,')
def test_get_name_addr_no_angle_after_display_name_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_name_addr('foo bar')
# get_mailbox
def test_get_mailbox_addr_spec_only(self):
mailbox = self._test_get_x(parser.get_mailbox,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(mailbox.token_type, 'mailbox')
self.assertIsNone(mailbox.display_name)
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
def test_get_mailbox_angle_addr_only(self):
mailbox = self._test_get_x(parser.get_mailbox,
'<dinsdale@example.com>',
'<dinsdale@example.com>',
'<dinsdale@example.com>',
[],
'')
self.assertEqual(mailbox.token_type, 'mailbox')
self.assertIsNone(mailbox.display_name)
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
def test_get_mailbox_name_addr(self):
mailbox = self._test_get_x(parser.get_mailbox,
'"Roy A. Bear" <dinsdale@example.com>',
'"Roy A. Bear" <dinsdale@example.com>',
'"Roy A. Bear" <dinsdale@example.com>',
[],
'')
self.assertEqual(mailbox.token_type, 'mailbox')
self.assertEqual(mailbox.display_name, 'Roy A. Bear')
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
def test_get_mailbox_ends_at_special(self):
mailbox = self._test_get_x(parser.get_mailbox,
'"Roy A. Bear" <dinsdale@example.com>, rest',
'"Roy A. Bear" <dinsdale@example.com>',
'"Roy A. Bear" <dinsdale@example.com>',
[],
', rest')
self.assertEqual(mailbox.token_type, 'mailbox')
self.assertEqual(mailbox.display_name, 'Roy A. Bear')
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
def test_get_mailbox_quoted_strings_in_atom_list(self):
mailbox = self._test_get_x(parser.get_mailbox,
'""example" example"@example.com',
'""example" example"@example.com',
'example example@example.com',
[errors.InvalidHeaderDefect]*3,
'')
self.assertEqual(mailbox.local_part, 'example example')
self.assertEqual(mailbox.domain, 'example.com')
self.assertEqual(mailbox.addr_spec, '"example example"@example.com')
# get_mailbox_list
def test_get_mailbox_list_single_addr(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(mailbox_list.token_type, 'mailbox-list')
self.assertEqual(len(mailbox_list.mailboxes), 1)
mailbox = mailbox_list.mailboxes[0]
self.assertIsNone(mailbox.display_name)
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
self.assertEqual(mailbox_list.mailboxes,
mailbox_list.all_mailboxes)
def test_get_mailbox_list_two_simple_addr(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
'dinsdale@example.com, dinsdale@test.example.com',
'dinsdale@example.com, dinsdale@test.example.com',
'dinsdale@example.com, dinsdale@test.example.com',
[],
'')
self.assertEqual(mailbox_list.token_type, 'mailbox-list')
self.assertEqual(len(mailbox_list.mailboxes), 2)
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.mailboxes[1].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes,
mailbox_list.all_mailboxes)
def test_get_mailbox_list_two_name_addr(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('"Roy A. Bear" <dinsdale@example.com>,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
[],
'')
self.assertEqual(len(mailbox_list.mailboxes), 2)
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.mailboxes[0].display_name,
'Roy A. Bear')
self.assertEqual(mailbox_list.mailboxes[1].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[1].display_name,
'Fred Flintstone')
self.assertEqual(mailbox_list.mailboxes,
mailbox_list.all_mailboxes)
def test_get_mailbox_list_two_complex(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('(foo) "Roy A. Bear" <dinsdale@example.com>(bar),'
' "Fred Flintstone" <dinsdale@test.(bird)example.com>'),
('(foo) "Roy A. Bear" <dinsdale@example.com>(bar),'
' "Fred Flintstone" <dinsdale@test.(bird)example.com>'),
(' "Roy A. Bear" <dinsdale@example.com> ,'
' "Fred Flintstone" <dinsdale@test. example.com>'),
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(len(mailbox_list.mailboxes), 2)
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.mailboxes[0].display_name,
'Roy A. Bear')
self.assertEqual(mailbox_list.mailboxes[1].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[1].display_name,
'Fred Flintstone')
self.assertEqual(mailbox_list.mailboxes,
mailbox_list.all_mailboxes)
def test_get_mailbox_list_unparseable_mailbox_null(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('"Roy A. Bear"[] dinsdale@example.com,'
' "Fred Flintstone" <dinsdale@test.(bird)example.com>'),
('"Roy A. Bear"[] dinsdale@example.com,'
' "Fred Flintstone" <dinsdale@test.(bird)example.com>'),
('"Roy A. Bear"[] dinsdale@example.com,'
' "Fred Flintstone" <dinsdale@test. example.com>'),
[errors.InvalidHeaderDefect, # the 'extra' text after the local part
errors.InvalidHeaderDefect, # the local part with no angle-addr
errors.ObsoleteHeaderDefect, # period in extra text (example.com)
errors.ObsoleteHeaderDefect], # (bird) in valid address.
'')
self.assertEqual(len(mailbox_list.mailboxes), 1)
self.assertEqual(len(mailbox_list.all_mailboxes), 2)
self.assertEqual(mailbox_list.all_mailboxes[0].token_type,
'invalid-mailbox')
self.assertIsNone(mailbox_list.all_mailboxes[0].display_name)
self.assertEqual(mailbox_list.all_mailboxes[0].local_part,
'Roy A. Bear')
self.assertIsNone(mailbox_list.all_mailboxes[0].domain)
self.assertEqual(mailbox_list.all_mailboxes[0].addr_spec,
'"Roy A. Bear"')
self.assertIs(mailbox_list.all_mailboxes[1],
mailbox_list.mailboxes[0])
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[0].display_name,
'Fred Flintstone')
def test_get_mailbox_list_junk_after_valid_address(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('"Roy A. Bear" <dinsdale@example.com>@@,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>@@,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>@@,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
[errors.InvalidHeaderDefect],
'')
self.assertEqual(len(mailbox_list.mailboxes), 1)
self.assertEqual(len(mailbox_list.all_mailboxes), 2)
self.assertEqual(mailbox_list.all_mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.all_mailboxes[0].display_name,
'Roy A. Bear')
self.assertEqual(mailbox_list.all_mailboxes[0].token_type,
'invalid-mailbox')
self.assertIs(mailbox_list.all_mailboxes[1],
mailbox_list.mailboxes[0])
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[0].display_name,
'Fred Flintstone')
def test_get_mailbox_list_empty_list_element(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('"Roy A. Bear" <dinsdale@example.com>, (bird),,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>, (bird),,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>, ,,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
[errors.ObsoleteHeaderDefect]*2,
'')
self.assertEqual(len(mailbox_list.mailboxes), 2)
self.assertEqual(mailbox_list.all_mailboxes,
mailbox_list.mailboxes)
self.assertEqual(mailbox_list.all_mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.all_mailboxes[0].display_name,
'Roy A. Bear')
self.assertEqual(mailbox_list.mailboxes[1].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[1].display_name,
'Fred Flintstone')
def test_get_mailbox_list_only_empty_elements(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
'(foo),, (bar)',
'(foo),, (bar)',
' ,, ',
[errors.ObsoleteHeaderDefect]*3,
'')
self.assertEqual(len(mailbox_list.mailboxes), 0)
self.assertEqual(mailbox_list.all_mailboxes,
mailbox_list.mailboxes)
# get_group_list
def test_get_group_list_cfws_only(self):
group_list = self._test_get_x(parser.get_group_list,
'(hidden);',
'(hidden)',
' ',
[],
';')
self.assertEqual(group_list.token_type, 'group-list')
self.assertEqual(len(group_list.mailboxes), 0)
self.assertEqual(group_list.mailboxes,
group_list.all_mailboxes)
def test_get_group_list_mailbox_list(self):
group_list = self._test_get_x(parser.get_group_list,
'dinsdale@example.org, "Fred A. Bear" <dinsdale@example.org>',
'dinsdale@example.org, "Fred A. Bear" <dinsdale@example.org>',
'dinsdale@example.org, "Fred A. Bear" <dinsdale@example.org>',
[],
'')
self.assertEqual(group_list.token_type, 'group-list')
self.assertEqual(len(group_list.mailboxes), 2)
self.assertEqual(group_list.mailboxes,
group_list.all_mailboxes)
self.assertEqual(group_list.mailboxes[1].display_name,
'Fred A. Bear')
def test_get_group_list_obs_group_list(self):
group_list = self._test_get_x(parser.get_group_list,
', (foo),,(bar)',
', (foo),,(bar)',
', ,, ',
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(group_list.token_type, 'group-list')
self.assertEqual(len(group_list.mailboxes), 0)
self.assertEqual(group_list.mailboxes,
group_list.all_mailboxes)
def test_get_group_list_comment_only_invalid(self):
group_list = self._test_get_x(parser.get_group_list,
'(bar)',
'(bar)',
' ',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(group_list.token_type, 'group-list')
self.assertEqual(len(group_list.mailboxes), 0)
self.assertEqual(group_list.mailboxes,
group_list.all_mailboxes)
# get_group
def test_get_group_empty(self):
group = self._test_get_x(parser.get_group,
'Monty Python:;',
'Monty Python:;',
'Monty Python:;',
[],
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 0)
self.assertEqual(group.mailboxes,
group.all_mailboxes)
def test_get_group_null_addr_spec(self):
group = self._test_get_x(parser.get_group,
'foo: <>;',
'foo: <>;',
'foo: <>;',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(group.display_name, 'foo')
self.assertEqual(len(group.mailboxes), 0)
self.assertEqual(len(group.all_mailboxes), 1)
self.assertEqual(group.all_mailboxes[0].value, '<>')
def test_get_group_cfws_only(self):
group = self._test_get_x(parser.get_group,
'Monty Python: (hidden);',
'Monty Python: (hidden);',
'Monty Python: ;',
[],
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 0)
self.assertEqual(group.mailboxes,
group.all_mailboxes)
def test_get_group_single_mailbox(self):
group = self._test_get_x(parser.get_group,
'Monty Python: "Fred A. Bear" <dinsdale@example.com>;',
'Monty Python: "Fred A. Bear" <dinsdale@example.com>;',
'Monty Python: "Fred A. Bear" <dinsdale@example.com>;',
[],
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 1)
self.assertEqual(group.mailboxes,
group.all_mailboxes)
self.assertEqual(group.mailboxes[0].addr_spec,
'dinsdale@example.com')
def test_get_group_mixed_list(self):
group = self._test_get_x(parser.get_group,
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
'(foo) Roger <ping@exampele.com>, x@test.example.com;'),
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
'(foo) Roger <ping@exampele.com>, x@test.example.com;'),
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
' Roger <ping@exampele.com>, x@test.example.com;'),
[],
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 3)
self.assertEqual(group.mailboxes,
group.all_mailboxes)
self.assertEqual(group.mailboxes[0].display_name,
'Fred A. Bear')
self.assertEqual(group.mailboxes[1].display_name,
'Roger')
self.assertEqual(group.mailboxes[2].local_part, 'x')
def test_get_group_one_invalid(self):
group = self._test_get_x(parser.get_group,
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
'(foo) Roger ping@exampele.com, x@test.example.com;'),
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
'(foo) Roger ping@exampele.com, x@test.example.com;'),
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
' Roger ping@exampele.com, x@test.example.com;'),
[errors.InvalidHeaderDefect, # non-angle addr makes local part invalid
errors.InvalidHeaderDefect], # and its not obs-local either: no dots.
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 2)
self.assertEqual(len(group.all_mailboxes), 3)
self.assertEqual(group.mailboxes[0].display_name,
'Fred A. Bear')
self.assertEqual(group.mailboxes[1].local_part, 'x')
self.assertIsNone(group.all_mailboxes[1].display_name)
def test_get_group_missing_final_semicol(self):
group = self._test_get_x(parser.get_group,
('Monty Python:"Fred A. Bear" <dinsdale@example.com>,'
'eric@where.test,John <jdoe@test>'),
('Monty Python:"Fred A. Bear" <dinsdale@example.com>,'
'eric@where.test,John <jdoe@test>;'),
('Monty Python:"Fred A. Bear" <dinsdale@example.com>,'
'eric@where.test,John <jdoe@test>;'),
[errors.InvalidHeaderDefect],
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 3)
self.assertEqual(group.mailboxes,
group.all_mailboxes)
self.assertEqual(group.mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(group.mailboxes[0].display_name,
'Fred A. Bear')
self.assertEqual(group.mailboxes[1].addr_spec,
'eric@where.test')
self.assertEqual(group.mailboxes[2].display_name,
'John')
self.assertEqual(group.mailboxes[2].addr_spec,
'jdoe@test')
# get_address
def test_get_address_simple(self):
address = self._test_get_x(parser.get_address,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].domain,
'example.com')
self.assertEqual(address[0].token_type,
'mailbox')
def test_get_address_complex(self):
address = self._test_get_x(parser.get_address,
'(foo) "Fred A. Bear" <(bird)dinsdale@example.com>',
'(foo) "Fred A. Bear" <(bird)dinsdale@example.com>',
' "Fred A. Bear" < dinsdale@example.com>',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].display_name,
'Fred A. Bear')
self.assertEqual(address[0].token_type,
'mailbox')
def test_get_address_rfc2047_display_name(self):
address = self._test_get_x(parser.get_address,
'=?utf-8?q?=C3=89ric?= <foo@example.com>',
'Éric <foo@example.com>',
'Éric <foo@example.com>',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].display_name,
'Éric')
self.assertEqual(address[0].token_type,
'mailbox')
def test_get_address_empty_group(self):
address = self._test_get_x(parser.get_address,
'Monty Python:;',
'Monty Python:;',
'Monty Python:;',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 0)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address[0].token_type,
'group')
self.assertEqual(address[0].display_name,
'Monty Python')
def test_get_address_group(self):
address = self._test_get_x(parser.get_address,
'Monty Python: x@example.com, y@example.com;',
'Monty Python: x@example.com, y@example.com;',
'Monty Python: x@example.com, y@example.com;',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 2)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address[0].token_type,
'group')
self.assertEqual(address[0].display_name,
'Monty Python')
self.assertEqual(address.mailboxes[0].local_part, 'x')
def test_get_address_quoted_local_part(self):
address = self._test_get_x(parser.get_address,
'"foo bar"@example.com',
'"foo bar"@example.com',
'"foo bar"@example.com',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].domain,
'example.com')
self.assertEqual(address.mailboxes[0].local_part,
'foo bar')
self.assertEqual(address[0].token_type, 'mailbox')
def test_get_address_ends_at_special(self):
address = self._test_get_x(parser.get_address,
'dinsdale@example.com, next',
'dinsdale@example.com',
'dinsdale@example.com',
[],
', next')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].domain,
'example.com')
self.assertEqual(address[0].token_type, 'mailbox')
def test_get_address_invalid_mailbox_invalid(self):
address = self._test_get_x(parser.get_address,
'ping example.com, next',
'ping example.com',
'ping example.com',
[errors.InvalidHeaderDefect, # addr-spec with no domain
errors.InvalidHeaderDefect, # invalid local-part
errors.InvalidHeaderDefect, # missing .s in local-part
],
', next')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 0)
self.assertEqual(len(address.all_mailboxes), 1)
self.assertIsNone(address.all_mailboxes[0].domain)
self.assertEqual(address.all_mailboxes[0].local_part, 'ping example.com')
self.assertEqual(address[0].token_type, 'invalid-mailbox')
def test_get_address_quoted_strings_in_atom_list(self):
address = self._test_get_x(parser.get_address,
'""example" example"@example.com',
'""example" example"@example.com',
'example example@example.com',
[errors.InvalidHeaderDefect]*3,
'')
self.assertEqual(address.all_mailboxes[0].local_part, 'example example')
self.assertEqual(address.all_mailboxes[0].domain, 'example.com')
self.assertEqual(address.all_mailboxes[0].addr_spec, '"example example"@example.com')
# get_address_list
def test_get_address_list_mailboxes_simple(self):
address_list = self._test_get_x(parser.get_address_list,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 1)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual([str(x) for x in address_list.mailboxes],
[str(x) for x in address_list.addresses])
self.assertEqual(address_list.mailboxes[0].domain, 'example.com')
self.assertEqual(address_list[0].token_type, 'address')
self.assertIsNone(address_list[0].display_name)
def test_get_address_list_mailboxes_two_simple(self):
address_list = self._test_get_x(parser.get_address_list,
'foo@example.com, "Fred A. Bar" <bar@example.com>',
'foo@example.com, "Fred A. Bar" <bar@example.com>',
'foo@example.com, "Fred A. Bar" <bar@example.com>',
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 2)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual([str(x) for x in address_list.mailboxes],
[str(x) for x in address_list.addresses])
self.assertEqual(address_list.mailboxes[0].local_part, 'foo')
self.assertEqual(address_list.mailboxes[1].display_name, "Fred A. Bar")
def test_get_address_list_mailboxes_complex(self):
address_list = self._test_get_x(parser.get_address_list,
('"Roy A. Bear" <dinsdale@example.com>, '
'(ping) Foo <x@example.com>,'
'Nobody Is. Special <y@(bird)example.(bad)com>'),
('"Roy A. Bear" <dinsdale@example.com>, '
'(ping) Foo <x@example.com>,'
'Nobody Is. Special <y@(bird)example.(bad)com>'),
('"Roy A. Bear" <dinsdale@example.com>, '
'Foo <x@example.com>,'
'"Nobody Is. Special" <y@example. com>'),
[errors.ObsoleteHeaderDefect, # period in Is.
errors.ObsoleteHeaderDefect], # cfws in domain
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 3)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual([str(x) for x in address_list.mailboxes],
[str(x) for x in address_list.addresses])
self.assertEqual(address_list.mailboxes[0].domain, 'example.com')
self.assertEqual(address_list.mailboxes[0].token_type, 'mailbox')
self.assertEqual(address_list.addresses[0].token_type, 'address')
self.assertEqual(address_list.mailboxes[1].local_part, 'x')
self.assertEqual(address_list.mailboxes[2].display_name,
'Nobody Is. Special')
def test_get_address_list_mailboxes_invalid_addresses(self):
address_list = self._test_get_x(parser.get_address_list,
('"Roy A. Bear" <dinsdale@example.com>, '
'(ping) Foo x@example.com[],'
'Nobody Is. Special <(bird)example.(bad)com>'),
('"Roy A. Bear" <dinsdale@example.com>, '
'(ping) Foo x@example.com[],'
'Nobody Is. Special <(bird)example.(bad)com>'),
('"Roy A. Bear" <dinsdale@example.com>, '
'Foo x@example.com[],'
'"Nobody Is. Special" < example. com>'),
[errors.InvalidHeaderDefect, # invalid address in list
errors.InvalidHeaderDefect, # 'Foo x' local part invalid.
errors.InvalidHeaderDefect, # Missing . in 'Foo x' local part
errors.ObsoleteHeaderDefect, # period in 'Is.' disp-name phrase
errors.InvalidHeaderDefect, # no domain part in addr-spec
errors.ObsoleteHeaderDefect], # addr-spec has comment in it
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 1)
self.assertEqual(len(address_list.all_mailboxes), 3)
self.assertEqual([str(x) for x in address_list.all_mailboxes],
[str(x) for x in address_list.addresses])
self.assertEqual(address_list.mailboxes[0].domain, 'example.com')
self.assertEqual(address_list.mailboxes[0].token_type, 'mailbox')
self.assertEqual(address_list.addresses[0].token_type, 'address')
self.assertEqual(address_list.addresses[1].token_type, 'address')
self.assertEqual(len(address_list.addresses[0].mailboxes), 1)
self.assertEqual(len(address_list.addresses[1].mailboxes), 0)
self.assertEqual(len(address_list.addresses[1].mailboxes), 0)
self.assertEqual(
address_list.addresses[1].all_mailboxes[0].local_part, 'Foo x')
self.assertEqual(
address_list.addresses[2].all_mailboxes[0].display_name,
"Nobody Is. Special")
def test_get_address_list_group_empty(self):
address_list = self._test_get_x(parser.get_address_list,
'Monty Python: ;',
'Monty Python: ;',
'Monty Python: ;',
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 0)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual(len(address_list.addresses), 1)
self.assertEqual(address_list.addresses[0].token_type, 'address')
self.assertEqual(address_list.addresses[0].display_name, 'Monty Python')
self.assertEqual(len(address_list.addresses[0].mailboxes), 0)
def test_get_address_list_group_simple(self):
address_list = self._test_get_x(parser.get_address_list,
'Monty Python: dinsdale@example.com;',
'Monty Python: dinsdale@example.com;',
'Monty Python: dinsdale@example.com;',
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 1)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual(address_list.mailboxes[0].domain, 'example.com')
self.assertEqual(address_list.addresses[0].display_name,
'Monty Python')
self.assertEqual(address_list.addresses[0].mailboxes[0].domain,
'example.com')
def test_get_address_list_group_and_mailboxes(self):
address_list = self._test_get_x(parser.get_address_list,
('Monty Python: dinsdale@example.com, "Fred" <flint@example.com>;, '
'Abe <x@example.com>, Bee <y@example.com>'),
('Monty Python: dinsdale@example.com, "Fred" <flint@example.com>;, '
'Abe <x@example.com>, Bee <y@example.com>'),
('Monty Python: dinsdale@example.com, "Fred" <flint@example.com>;, '
'Abe <x@example.com>, Bee <y@example.com>'),
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 4)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual(len(address_list.addresses), 3)
self.assertEqual(address_list.mailboxes[0].local_part, 'dinsdale')
self.assertEqual(address_list.addresses[0].display_name,
'Monty Python')
self.assertEqual(address_list.addresses[0].mailboxes[0].domain,
'example.com')
self.assertEqual(address_list.addresses[0].mailboxes[1].local_part,
'flint')
self.assertEqual(address_list.addresses[1].mailboxes[0].local_part,
'x')
self.assertEqual(address_list.addresses[2].mailboxes[0].local_part,
'y')
self.assertEqual(str(address_list.addresses[1]),
str(address_list.mailboxes[2]))
def test_invalid_content_disposition(self):
content_disp = self._test_parse_x(
parser.parse_content_disposition_header,
";attachment", "; attachment", ";attachment",
[errors.InvalidHeaderDefect]*2
)
def test_invalid_content_transfer_encoding(self):
cte = self._test_parse_x(
parser.parse_content_transfer_encoding_header,
";foo", ";foo", ";foo", [errors.InvalidHeaderDefect]*3
)
@parameterize
class Test_parse_mime_parameters(TestParserMixin, TestEmailBase):
def mime_parameters_as_value(self,
value,
tl_str,
tl_value,
params,
defects):
mime_parameters = self._test_parse_x(parser.parse_mime_parameters,
value, tl_str, tl_value, defects)
self.assertEqual(mime_parameters.token_type, 'mime-parameters')
self.assertEqual(list(mime_parameters.params), params)
mime_parameters_params = {
'simple': (
'filename="abc.py"',
' filename="abc.py"',
'filename=abc.py',
[('filename', 'abc.py')],
[]),
'multiple_keys': (
'filename="abc.py"; xyz=abc',
' filename="abc.py"; xyz="abc"',
'filename=abc.py; xyz=abc',
[('filename', 'abc.py'), ('xyz', 'abc')],
[]),
'split_value': (
"filename*0*=iso-8859-1''%32%30%31%2E; filename*1*=%74%69%66",
' filename="201.tif"',
"filename*0*=iso-8859-1''%32%30%31%2E; filename*1*=%74%69%66",
[('filename', '201.tif')],
[]),
# Note that it is undefined what we should do for error recovery when
# there are duplicate parameter names or duplicate parts in a split
# part. We choose to ignore all duplicate parameters after the first
# and to take duplicate or missing rfc 2231 parts in appearance order.
# This is backward compatible with get_param's behavior, but the
# decisions are arbitrary.
'duplicate_key': (
'filename=abc.gif; filename=def.tiff',
' filename="abc.gif"',
"filename=abc.gif; filename=def.tiff",
[('filename', 'abc.gif')],
[errors.InvalidHeaderDefect]),
'duplicate_key_with_split_value': (
"filename*0*=iso-8859-1''%32%30%31%2E; filename*1*=%74%69%66;"
" filename=abc.gif",
' filename="201.tif"',
"filename*0*=iso-8859-1''%32%30%31%2E; filename*1*=%74%69%66;"
" filename=abc.gif",
[('filename', '201.tif')],
[errors.InvalidHeaderDefect]),
'duplicate_key_with_split_value_other_order': (
"filename=abc.gif; "
" filename*0*=iso-8859-1''%32%30%31%2E; filename*1*=%74%69%66",
' filename="abc.gif"',
"filename=abc.gif;"
" filename*0*=iso-8859-1''%32%30%31%2E; filename*1*=%74%69%66",
[('filename', 'abc.gif')],
[errors.InvalidHeaderDefect]),
'duplicate_in_split_value': (
"filename*0*=iso-8859-1''%32%30%31%2E; filename*1*=%74%69%66;"
" filename*1*=abc.gif",
' filename="201.tifabc.gif"',
"filename*0*=iso-8859-1''%32%30%31%2E; filename*1*=%74%69%66;"
" filename*1*=abc.gif",
[('filename', '201.tifabc.gif')],
[errors.InvalidHeaderDefect]),
'missing_split_value': (
"filename*0*=iso-8859-1''%32%30%31%2E; filename*3*=%74%69%66;",
' filename="201.tif"',
"filename*0*=iso-8859-1''%32%30%31%2E; filename*3*=%74%69%66;",
[('filename', '201.tif')],
[errors.InvalidHeaderDefect]),
'duplicate_and_missing_split_value': (
"filename*0*=iso-8859-1''%32%30%31%2E; filename*3*=%74%69%66;"
" filename*3*=abc.gif",
' filename="201.tifabc.gif"',
"filename*0*=iso-8859-1''%32%30%31%2E; filename*3*=%74%69%66;"
" filename*3*=abc.gif",
[('filename', '201.tifabc.gif')],
[errors.InvalidHeaderDefect]*2),
# Here we depart from get_param and assume the *0* was missing.
'duplicate_with_broken_split_value': (
"filename=abc.gif; "
" filename*2*=iso-8859-1''%32%30%31%2E; filename*3*=%74%69%66",
' filename="abc.gif201.tif"',
"filename=abc.gif;"
" filename*2*=iso-8859-1''%32%30%31%2E; filename*3*=%74%69%66",
[('filename', 'abc.gif201.tif')],
# Defects are apparent missing *0*, and two 'out of sequence'.
[errors.InvalidHeaderDefect]*3),
}
@parameterize
class Test_parse_mime_version(TestParserMixin, TestEmailBase):
def mime_version_as_value(self,
value,
tl_str,
tl_value,
major,
minor,
defects):
mime_version = self._test_parse_x(parser.parse_mime_version,
value, tl_str, tl_value, defects)
self.assertEqual(mime_version.major, major)
self.assertEqual(mime_version.minor, minor)
mime_version_params = {
'rfc_2045_1': (
'1.0',
'1.0',
'1.0',
1,
0,
[]),
'RFC_2045_2': (
'1.0 (produced by MetaSend Vx.x)',
'1.0 (produced by MetaSend Vx.x)',
'1.0 ',
1,
0,
[]),
'RFC_2045_3': (
'(produced by MetaSend Vx.x) 1.0',
'(produced by MetaSend Vx.x) 1.0',
' 1.0',
1,
0,
[]),
'RFC_2045_4': (
'1.(produced by MetaSend Vx.x)0',
'1.(produced by MetaSend Vx.x)0',
'1. 0',
1,
0,
[]),
'empty': (
'',
'',
'',
None,
None,
[errors.HeaderMissingRequiredValue]),
}
class TestFolding(TestEmailBase):
policy = policy.default
def _test(self, tl, folded, policy=policy):
self.assertEqual(tl.fold(policy=policy), folded, tl.ppstr())
def test_simple_unstructured_no_folds(self):
self._test(parser.get_unstructured("This is a test"),
"This is a test\n")
def test_simple_unstructured_folded(self):
self._test(parser.get_unstructured("This is also a test, but this "
"time there are enough words (and even some "
"symbols) to make it wrap; at least in theory."),
"This is also a test, but this time there are enough "
"words (and even some\n"
" symbols) to make it wrap; at least in theory.\n")
def test_unstructured_with_unicode_no_folds(self):
self._test(parser.get_unstructured("hübsch kleiner beißt"),
"=?utf-8?q?h=C3=BCbsch_kleiner_bei=C3=9Ft?=\n")
def test_one_ew_on_each_of_two_wrapped_lines(self):
self._test(parser.get_unstructured("Mein kleiner Kaktus ist sehr "
"hübsch. Es hat viele Stacheln "
"und oft beißt mich."),
"Mein kleiner Kaktus ist sehr =?utf-8?q?h=C3=BCbsch=2E?= "
"Es hat viele Stacheln\n"
" und oft =?utf-8?q?bei=C3=9Ft?= mich.\n")
def test_ews_combined_before_wrap(self):
self._test(parser.get_unstructured("Mein Kaktus ist hübsch. "
"Es beißt mich. "
"And that's all I'm sayin."),
"Mein Kaktus ist =?utf-8?q?h=C3=BCbsch=2E__Es_bei=C3=9Ft?= "
"mich. And that's\n"
" all I'm sayin.\n")
# XXX Need test of an encoded word so long that it needs to be wrapped
def test_simple_address(self):
self._test(parser.get_address_list("abc <xyz@example.com>")[0],
"abc <xyz@example.com>\n")
def test_address_list_folding_at_commas(self):
self._test(parser.get_address_list('abc <xyz@example.com>, '
'"Fred Blunt" <sharp@example.com>, '
'"J.P.Cool" <hot@example.com>, '
'"K<>y" <key@example.com>, '
'Firesale <cheap@example.com>, '
'<end@example.com>')[0],
'abc <xyz@example.com>, "Fred Blunt" <sharp@example.com>,\n'
' "J.P.Cool" <hot@example.com>, "K<>y" <key@example.com>,\n'
' Firesale <cheap@example.com>, <end@example.com>\n')
def test_address_list_with_unicode_names(self):
self._test(parser.get_address_list(
'Hübsch Kaktus <beautiful@example.com>, '
'beißt beißt <biter@example.com>')[0],
'=?utf-8?q?H=C3=BCbsch?= Kaktus <beautiful@example.com>,\n'
' =?utf-8?q?bei=C3=9Ft_bei=C3=9Ft?= <biter@example.com>\n')
def test_address_list_with_unicode_names_in_quotes(self):
self._test(parser.get_address_list(
'"Hübsch Kaktus" <beautiful@example.com>, '
'"beißt" beißt <biter@example.com>')[0],
'=?utf-8?q?H=C3=BCbsch?= Kaktus <beautiful@example.com>,\n'
' =?utf-8?q?bei=C3=9Ft_bei=C3=9Ft?= <biter@example.com>\n')
# XXX Need tests with comments on various sides of a unicode token,
# and with unicode tokens in the comments. Spaces inside the quotes
# currently don't do the right thing.
def test_split_at_whitespace_after_header_before_long_token(self):
body = parser.get_unstructured(' ' + 'x'*77)
header = parser.Header([
parser.HeaderLabel([parser.ValueTerminal('test:', 'atext')]),
parser.CFWSList([parser.WhiteSpaceTerminal(' ', 'fws')]), body])
self._test(header, 'test: \n ' + 'x'*77 + '\n')
def test_split_at_whitespace_before_long_token(self):
self._test(parser.get_unstructured('xxx ' + 'y'*77),
'xxx \n ' + 'y'*77 + '\n')
def test_overlong_encodeable_is_wrapped(self):
first_token_with_whitespace = 'xxx '
chrome_leader = '=?utf-8?q?'
len_chrome = len(chrome_leader) + 2
len_non_y = len_chrome + len(first_token_with_whitespace)
self._test(parser.get_unstructured(first_token_with_whitespace +
'y'*80),
first_token_with_whitespace + chrome_leader +
'y'*(78-len_non_y) + '?=\n' +
' ' + chrome_leader + 'y'*(80-(78-len_non_y)) + '?=\n')
def test_long_filename_attachment(self):
self._test(parser.parse_content_disposition_header(
'attachment; filename="TEST_TEST_TEST_TEST'
'_TEST_TEST_TEST_TEST_TEST_TEST_TEST_TEST_TES.txt"'),
"attachment;\n"
" filename*0*=us-ascii''TEST_TEST_TEST_TEST_TEST_TEST"
"_TEST_TEST_TEST_TEST_TEST;\n"
" filename*1*=_TEST_TES.txt\n",
)
if __name__ == '__main__':
unittest.main()
|
import results
from pathlib import Path
DO_MATCH_BASIC = Path("queries/do_match4.sql").read_text() # threshold 400 - for fast matching
#DO_MATCH_SLOW = Path("queries/do_match_slower.sql").read_text() # threshold 10000 - for slower but more accurate
DO_MATCH_TRIGRAM = Path("queries/do_trigram_match2.sql").read_text() # trigram phrases - slowest but most accurate
CREATE_GEOCODER_TABLES = Path("queries/create_geocoder_tables.sql").read_text()
MAKE_ADDRESSES = Path("queries/make_addresses.sql").read_text()
CREATE_PHRASES = Path("queries/create_phrases.sql").read_text()
INVERTED_INDEX = Path("queries/phrase_inverted.sql").read_text()
CREATE_INDEXES = Path("queries/create_indexes.sql").read_text()
CREATE_TRIGRAMPHRASES = Path("queries/create_trigram_phrases.sql").read_text()
TRIGRAMINVERTED_INDEX = Path("queries/trigram_phrase_inverted.sql").read_text()
CREATE_TRIGRAMINDEXES = Path("queries/create_trigram_indexes.sql").read_text()
# todo: create a BaseMatcher class and inherit all matches from this
class Matcher(object):
def __init__(self, db=None, how='standard', initialize=False, threshold=0.5):
"""
Initialize the matcher object. Requires a DB to connect to
"""
self.db = db
self.threshold = threshold # the threshold when chaining matchers together
self.how = how # which SQL query to use
def setup(self, phrases=['standard']):
"""
Create the inverted index and phrase tables
phrases: list of strs with the types of phrases used in the matching
"standard" - use pairs of consecutive tokens
"trigrams" - use pairs of trigrams from consecutive tokens
"""
# create phrases
print("Creating geocoder tables...")
self.db.ss(CREATE_GEOCODER_TABLES)
if 'standard' in phrases:
print('Creating phrases...')
self.db.ss(CREATE_PHRASES)
if 'trigram' in phrases:
print('Creating trigram phrases...')
self.db.ss(CREATE_TRIGRAMPHRASES)
print('Creating inverted index...')
# create inverted index
if 'standard' in phrases:
self.db.ss(INVERTED_INDEX)
self.db.ss(CREATE_INDEXES)
# create inverted index for trigram phrases
if 'trigram' in phrases:
self.db.ss(TRIGRAMINVERTED_INDEX)
self.db.ss(CREATE_TRIGRAMINDEXES)
def match(self, addresses, address_ids=None):
how = self.how
with self.db.transaction() as t:
t.ex(
"create temporary table input_addresses(address_id bigint not null, address text not null);"
)
if address_ids:
input_list = [
dict(address_id=i, address=a.upper())
for i, a in zip(address_ids, addresses)
]
else:
input_list = [
dict(address_id=i, address=a.upper())
for i, a in enumerate(addresses)
]
t.insert("input_addresses", input_list)
if how == 'standard':
answers = t.ex(DO_MATCH_BASIC)
elif how == 'trigram':
answers = t.ex(DO_MATCH_TRIGRAM)
elif how == 'slow':
answers = t.ex(DO_MATCH_SLOW)
else:
print(f'No query for {how} matching, using standard phrase geocoder')
answers = t.ex(DO_MATCH_BASIC)
t.ex("drop table input_addresses;")
return answers
def query(self, query):
"""
Execute a generic SQL query using the database of the matcher
"""
results = self.db.ss(query)
return results
|
#!/usr/bin/python
# -*- coding: UTF-8
import os
import sys
import json
import re
import codecs
import shutil
import platform
import importlib
Py_version = sys.version_info
Py_v_info = str(Py_version.major) + '.' + str(Py_version.minor) + '.' + str(Py_version.micro)
if Py_version >= (3,5,0) and Py_version <(3,7,0):
print(False)
if Py_version >= (3,7,0):
print(True)
if platform.system().lower() == 'windows':
print("windows")
elif platform.system().lower() == 'linux':
print("linux")
filepathlist=os.path.split(os.path.realpath(__file__))
currfilepath=filepathlist[0]
def __openconfjsonfile__():
jsonfile = currfilepath+'/conf.json'
with codecs.open(jsonfile,"r+",encoding = 'utf-8') as load_f:
load_dict = json.load(load_f)
return load_dict
def getignorewarning():
return __load_dict__["ignorewarndes"],__load_dict__["ignorewarnkey"]
def GetOtherIncludePackage():
# packagearr = __load_dict__['package']
# for package in packagearr:
# print(package)
return __load_dict__['package']
def GetReplacePackage():
return __load_dict__['replacepackage']
def GetCustomOptions():
return __load_dict__['customoptions']
def GetIsTocContents():
return __load_dict__['isfiguretabletoc']
def GetSensitiveword():
#得到敏感词数组,便于搜索文档中是否有敏感词存在
return __load_dict__['sensitivewords']
def GetTablesContent():
return __load_dict__['tables']
def GetTablerowtype():
# packagearr = __load_dict__['tables']['rowtype']
# print(packagearr)
return __load_dict__['tables']['rowtype']
def GetTableheadtype():
# packagearr = __load_dict__['tables']['headtype']
# print(packagearr)
return __load_dict__['tables']['headtype']
def GetTableHeadFontColor():
return __load_dict__['tables']['headfontcolor']
def GetTableStylesArr():
# packagearr = __load_dict__['tables']['styles']
# for package in packagearr:
# print(package)
return __load_dict__['tables']['styles']
def GetImageStyleArr():
# packagearr = __load_dict__['image']['styles']
# for package in packagearr:
# print(package)
return __load_dict__['image']['styles']
#判断是否有忽略告警的类
class clsIgnoreWarn:
def __init__(self,warnfile):
self.warnfile = warnfile #保存告警文件
#判断该关键字是否在告警中,在告警中则返回true,不在告警中则返回fasle
def __JudgeWarnKey(self,keystr,warnstr):
#先判断是否为组合关键字
if "&&" in keystr:
#解析组合关键字为字符串列表
keyls = keystr.split("&&")
isignore = True #默认该告警为忽略,如果组合关键字其中一个关键字没匹配上,则该告警不能忽略。
for i in range(0,len(keyls)):
if keyls[i].strip().lower() not in warnstr.lower():
isignore =False
break
return isignore
else:
if keystr.lower() in warnstr.lower():
return True #忽略告警在告警字符串里面,则返回true,表示该告警可以忽略
else:
return False
#解析告警文件
def parsewarnfile(self,ignorelist,ignorekey):
'''
#make latex命令产生的告警都会保存在stderr文件中,只要判断该文件中的告警是否在忽略告警里就可以了,不在忽略告警里,则肯定有错误,停止执行
'''
if not os.path.exists(self.warnfile):
return True
fs = codecs.open(self.warnfile,"r",encoding = 'utf-8')
fstr = fs.read() #先将所有内容读取到字符串中,方便正则表达式查找
fs.close
#查找带warning的内容
pattern = re.compile(r"([\s\S].*)WARNING:([\s\S].*)", re.I | re.U)
mobjarr = pattern.finditer(fstr)
errstr = '' #保存不能被忽略的告警
isNotError = True
for mobj in mobjarr:
amarr = mobj.group()
if amarr == '':
continue
amarr = amarr.strip()
amarr = amarr.strip('\r')
amarr = amarr.strip('\n')
amarr = amarr.strip() #再去掉首尾空格,避免多余的空格出现
#print("pdfparse: \n%s" % amarr)
#判断该告警是否在忽略列表里,不在忽略列表里,保存在errstr字符串中
if amarr.lower() not in [elem.lower() for elem in ignorelist]: #如果错误不在忽略告警里面
#如果不能整行匹配,再判断该行是否包含需忽略的告警的关键字
isWrite = False #默认不能忽略
for igkey in ignorekey:
isWrite = self.__JudgeWarnKey(igkey,amarr)
if isWrite:
break;
if isWrite == False:
#写入stderr文件
isNotError = False
#print("make latex Error description: \n%s" % amarr)
#fe.writelines(amarr+'\n')
errstr += amarr
if errstr != '':
#如果有不能被忽略的告警,则将不能忽略的告警重新写入warnfile文件中
#先删除源文件,避免原文件无法写入
fw = codecs.open(self.warnfile, "w",encoding = 'utf-8')
fw.write(errstr)
fw.close
else:
#如果所有的告警都能忽略则删除之前的告警文件
#该功能暂时不实现,还是保存原始的告警文件,方便查找。
#os.remove(self.warnfile)
pass
return isNotError
#该函数暂时未用
def __parsepdfarg(stdout,stderr,ignorelist,ignorekey):
'''
make all-pdf命令产生的所有输出都只会保存在stdout中,因此从stdout中判断是否有告警内容
'''
stdoutfilepath = currfilepath+'/'+stdout
stderrfilepath = currfilepath+'/'+stderr
if not os.path.exists(stdoutfilepath):
return True
fs = codecs.open(stdoutfilepath, "r+",encoding = 'utf-8')
fstr = fs.read() #先将所有内容读取到字符串中,方便正则表达式查找
fs.close
#查找latexmk的位置,latexmk位置的开始即make all-pdf打印输出的开始
searchstr = r"latexmk"
m = re.search(searchstr, fstr, re.I|re.U )
if m == None:
return True
spos = m.span() #获取位置
latexcont = fstr[spos[0]:len(fstr)] #获取到make all-pdf产生的内容
#查找带warning的内容
pattern = re.compile(r'([\s\S].*)Warning([\s\S].*)', re.I|re.U)
mobjarr = pattern.finditer(latexcont)
#打开stderr文件,方便后续写
fe = codecs.open(stderrfilepath, "a+",encoding = 'utf-8')
isNotError = True
for mobj in mobjarr:
amarr = mobj.group()
#print("pdfparse: %s \n" % amarr)
if amarr == '':
continue
amarr = amarr.strip()
amarr = amarr.strip('\r')
amarr = amarr.strip('\n')
amarr = amarr.strip() #再去掉首尾空格,避免多余的空格出现
#判断该告警是否在忽略列表里,不在忽略列表里,要写入stderr文件
if amarr.lower() not in [elem.lower() for elem in ignorelist]: #如果错误不在忽略告警里面
#如果不能整行匹配,再判断该行是否包含需忽略的告警的关键字
isWrite = False #默认不能忽略
for igkey in ignorekey:
isWrite = __JudgeWarnKey(igkey,amarr)
if isWrite:
break;
if isWrite == False:
#写入stderr文件
isNotError = False
#print("make all-pdf Error description: \n%s" % amarr)
fe.writelines(amarr)
fe.close
return isNotError
def warn_main(warnfile):
ignorelist,ignorekey = getignorewarning()
clswarn = clsIgnoreWarn(warnfile)
if not clswarn.parsewarnfile(ignorelist,ignorekey):
#如果存在不可忽略的告警,则返回True,否则返回False
return True
return False
class clsTableattr:
def __init__(self, tables):
self.rowtype = tables['rowtype']
self.headtype = tables['headtype']
self.headfontcolor = tables['headfontcolor']
self.tablestyles = tables['styles']
self.isname = tables['isname']
class clsModifyTex:
def __init__(self, content):
self.content = content
self.tablesattrobj = clsTableattr(GetTablesContent())
#加入其它包
def AddPackageToTex(self):
#得到需要包的数组
packarr = GetOtherIncludePackage()
if len(packarr)==0:
return False;
#如果数组有内容,就需要将包添加到latex文件的导言区
#搜索\usepackage{sphinx},将包加在它的前面,用正则表达式搜索它的位置
#采用正则表达式替换的方式,替换搜索到的字符串,因此需要先构建字符串
#python认为\u后面为unicode字符,因此需要多加一个转义字符\,python才认为是整个的字符串
#searchstr = r'\\usepackage\[dontkeepoldnames\]{sphinx}'
searchstr = r'\\usepackage(\[\S*\]*)?{sphinx}'
matchstr = re.search(searchstr,self.content)
replacestr=""
for package in packarr:
replacestr += package+'\n'
if Py_version >= (3,7,0):
replacestr += "\\" + matchstr.group(0)
else:
replacestr += matchstr.group(0)
self.content = re.sub(searchstr, replacestr, self.content, 1, re.M | re.I|re.U)
return True
#加入自定义选项,包放在了sphinx包的前面,因此选项放在sphinx包的后面
def AddCustormOptionsToTex(self):
#得到需要包的数组
packarr = GetCustomOptions()
if len(packarr)==0:
return False;
#如果数组有内容,就需要将包添加到latex文件的导言区
#搜索\usepackage{sphinx},将自定义参数放在它的后面,用正则表达式搜索它的位置
#采用正则表达式替换的方式,替换搜索到的字符串,因此需要先构建字符串
#python认为\u后面为unicode字符,因此需要多加一个转义字符\,python才认为是整个的字符串
searchstr = r'\\usepackage(\[\S*\]*)?{sphinx}'
matchstr = re.search(searchstr,self.content)
replacestr=""
for package in packarr:
replacestr += package+'\n'
if Py_version >= (3,7,0):
replacestr = "\\" + matchstr.group(0)+'\n'+replacestr
else:
replacestr = matchstr.group(0)+'\n'+replacestr
self.content = re.sub(searchstr, replacestr, self.content, 1, re.M | re.I|re.U)
return True
#增加figure和table toc到tex
def AddOtherTocToTex(self):
#得到需要包的数组
packarr = GetIsTocContents()
if len(packarr)==0:
return False;
replacestr = ""
if packarr['isfigurestoc']:
figlst = packarr['figurestoc']
for figstr in figlst:
replacestr += figstr + '\n'
if packarr['istablestoc']:
figlst = packarr['tablestoc']
for figstr in figlst:
replacestr += figstr + '\n'
if replacestr == "":
return
#如果数组有内容,就需要将包添加到latex文件的导言区
#搜索\usepackage{sphinx},将包加在它的前面,用正则表达式搜索它的位置
#采用正则表达式替换的方式,替换搜索到的字符串,因此需要先构建字符串
#python认为\u后面为unicode字符,因此需要多加一个转义字符\,python才认为是整个的字符串
searchstr = r'\\sphinxtableofcontents'
matchstr = re.search(searchstr,self.content)
if Py_version >= (3,7,0):
replacestr = "\\" + matchstr.group(0) + '\n' + replacestr
else:
replacestr = matchstr.group(0) + '\n' + replacestr
self.content = re.sub(searchstr, replacestr, self.content, 1, re.M | re.I|re.U)
return True
#得到需要替换的包,用正则表达式替换
def ModifyReplacePackage(self):
#得到字典值
redict = GetReplacePackage()
if len(redict) ==0:
return;
#返回字典中所有键值的列表
keylst = list(redict)
for key in keylst:
if key == 'comment' :
continue;
keyvalue = redict[key] #得到键对应的值
#对键值进行替换
self.content = re.sub(key, keyvalue, self.content, 0, re.M | re.I|re.U)
return;
def __ModifyFunctionBkColorByPython__(self,newcontent,linecontent,curpos,prepos):
#根据python生成的格式为类和函数声明添加灰底,并按逗号换行
'''
根据正则表达式获得以\pysiglinewithargsret开头,以“{}”结尾的中间字符串
对中间字符串添加灰底和换行
'''
startmultiline = '\n\\begin{shaded}\n\\pysigstartmultiline\n'
stopmultiline = '\n\\pysigstopmultiline\n\\end{shaded}'
#searchstr = r'(?=\\pysiglinewithargsret).*(?<={})'
searchstr=r'(?=\\pysiglinewithargsret).*'
match = re.search(searchstr, linecontent, re.I|re.U)
if match != None:
#print('modstr = %s' % match.group())
#重新组合linecontent
pos = match.span()
newstr = '\n' + linecontent[:match.start()] + startmultiline + match.group().replace(r",",r",\\") + stopmultiline + linecontent[match.end():len(linecontent)]
#计算替换前的内容
if len(prepos)==0:
newcontent = self.content[:curpos[0]-1] + newstr
else:
newcontent += self.content[prepos[1]:curpos[0]-1] + newstr
return newcontent
#从指定字符串开始对行内字符串进行替换
'''
srcstr:原始字符串
posstr:从该字符串开始进行替换
oldstr:被替换字符串
newstr:替换字符串
'''
def __strreplacepos(self,srcstr,posstr,oldstr,newstr):
#查找字符串的其实位置
pos = srcstr.find(posstr)
if pos == -1:
return "" #如果查找的字符串没有找到,则返回空。
#根据找到的位置进行字符串拆分
startstr = srcstr[0:pos]
beforestr = srcstr[pos:len(srcstr)]
#对字符串进行替换
afterstr = beforestr.replace(oldstr,newstr)
return startstr+afterstr
#该函数用来修改函数的背景色,并根据参数换行
def ModifyFunctionBackColor(self):
'''
* c/c++ sphinx生成的函数都被以下三行包围:
\pysigstartmultiline
\pysiglinewithargsret{xxxxx}
\pysigstopmultiline
因此利用正则表达式查找这三行所在的位置进行替换。
注意:
要实现该功能,latex必须包含framed和color包,同时定义以下颜色:
\definecolor{shadecolor}{RGB}{220,220,220}
如果shadecolor颜色未定义,则该函数执行失败。
* python生成的latex文件中函数或者类的声明,有\pysiglinewithargsret指令,但是没有pysigstartmultiline指令。
因此需要在\pysiglinewithargsret指令句子的结束,前后先添加pysigstartmultiline和pysigstopmultiline。再添加\begin{shaded}和\end{shaded}
一句的结束根据"{}"来判断,遇到"{}"即为句末。
'''
pythontype = False
newcontent = ""
prepos=[] #定义需要替换的字符串列表,该列表中的每一个元素都包含上面提到的三行。
searchstr = r'^(?=.*\\pysiglinewithargsret).*$'
m = re.finditer(searchstr, self.content, re.M|re.I|re.U)
for match in m:
linecontent = match.group()
#判断是否添加了pysigstartmultiline和pysigstopmultiline,没有添加的话需要添加才能添加灰色背景
multilen=len(r'\pysigstartmultiline')
startmultistr = self.content[match.start()-multilen-1:match.start()]
#print('startmultistr : %s' % startmultistr)
#如果上一行不是\pysigstartmultiline则需要按python风格修改,添加该标志
if startmultistr.strip() != r'\pysigstartmultiline':
#print('is python')
newcontent = self.__ModifyFunctionBkColorByPython__(newcontent,linecontent,match.span(),prepos)
prepos = match.span()
pythontype = True
else:
#tablestr = match.groups()
#计算替换前的内容
if len(prepos)==0:
newcontent = self.content[0:match.start()-len(r'\pysigstartmultiline')-1]
else:
newcontent += self.content[prepos[1]+len(r'\pysigstopmultiline')+1:match.start()-len(r'\pysigstartmultiline')-1]
#当有template的时候,pysiglinewithargsret不一定在行首,因此需要从pysiglinewithargsret开始替换逗号,加强制换行符
afterstr=self.__strreplacepos(linecontent,r"pysiglinewithargsret",r",",r",\\")
#得到替换后的字符串
newstr = '\\begin{shaded}\n' + '\\pysigstartmultiline\n' + afterstr +'\n\\pysigstopmultiline\n'+'\\end{shaded}'
#得到替换后的内容
newcontent += newstr
prepos = match.span()
pythontype = False
if len(prepos) > 0:
if not pythontype:
self.content = newcontent + self.content[prepos[1]+len(r'\pysigstopmultiline')+1:len(self.content)]
else:
self.content = newcontent + self.content[prepos[1]+1:len(self.content)]
def ModifyTablesAttributes(self):
#修改表格属性
newcontent = self.content
searchstr = r'(\\begin{savenotes}\\sphinxattablestart|\\begin{savenotes}\\sphinxatlongtablestart)([\s\S]*?)(\\sphinxattableend\\end{savenotes}|\\sphinxatlongtableend\\end{savenotes})'
m = re.finditer(searchstr, self.content, re.M|re.I|re.U)
for match in m:
oldtablestr = match.group()
tablestr = match.groups()
caption_dict = self.__CreateTableCaptionDict(self.tablesattrobj.tablestyles)
if len(caption_dict ) > 0 :
newtableattr = self.__ModifySingleTableattr(tablestr[0]+tablestr[1]+tablestr[2],caption_dict ) #tablestr也是3个内容的数组,因为正则表达式被分为了3组,只取中间分组的内容。
#重新组成新的字符串
newcontent = newcontent.replace(tablestr[0]+tablestr[1]+tablestr[2], newtableattr)
self.content = newcontent
def __CreateTableCaptionDict(self, tablestylesarr):
#根据caption生成表格字典,key=caption,value=属性数组
cap_dict = {}
for tablestyle_dict in tablestylesarr:
captionarr = tablestyle_dict['caption']
#该caption可能是一个逗号分隔的字符串数组,因此需要做拆分
captionlist = captionarr.split(",")
for caption in captionlist:
cap_dict[caption] = tablestyle_dict #以caption为key重新生成字典,便于查找
return cap_dict
def __ModifySingleTableattr(self, singletablecontent, caption_dict):
#修改单个表格属性
#从单个表格里用正则表达式找caption
#定义正则表达式,查找caption内容
new_singletablecontent = singletablecontent
if self.tablesattrobj.isname:
searchstr = r'.*\\label.*?:(?P<caption>[\s\S].*)}}.*'
else:
searchstr = r'(\\sphinxcaption|\\caption){(?P<caption>[\s\S]*?)}'
matchcaption = re.search(searchstr, singletablecontent, re.M | re.I|re.U)
if matchcaption != None:
tablecaption = matchcaption.group('caption') #得到caption的值
else:
tablecaption = ''
if tablecaption in caption_dict:
#修改表格属性
tablestyle_dict = caption_dict[tablecaption]
#修改表格为长表格
new_singletablecontent = self.__StartModifyTableAttr(singletablecontent,
tablestyle_dict['isLongTable'],
tablestyle_dict['isCusHead'])
#渲染竖型表格的第一列
if tablestyle_dict['isVertical']==True:
new_singletablecontent = self.__ModifyVerticalTable(new_singletablecontent)
else:
#修改表格的通用属性
new_singletablecontent = self.__StartModifyTableAttr(singletablecontent, False)
if new_singletablecontent == '':
new_singletablecontent = singletablecontent
return new_singletablecontent
def __StartModifyTableAttr(self, singletablecontent, islongtable,isCusHead=True):
#修改表格属性
searchstr = r'(\\begin{tabular}|\\begin{tabulary})(\[[a-z]\]|{\\linewidth}\[[a-z]\])([\s\S].*)'
#为了添加表格的通用属性,先对字符串做分割
#python会把正则表达式中的分组自动分割,因此上面的正则表达式会自动分割为三个字符串
#加上头尾字符串总共分为5个字符串数组。要修改第1维字符串为\\being{longtable},第2维字符串直接删除,第3维字符串不变
splittable = re.split(searchstr, singletablecontent,0, re.M | re.I|re.U )
if splittable == None or len(splittable) < 5:
#再修改长表格属性
searchstr = r'\\begin{longtable}([\s\S].*)'
#为了添加表格的通用属性,先对字符串做分割
#python会把正则表达式中的分组自动分割,因此上面的正则表达式会自动分割为三个字符串
#加上头尾字符串总共分为5个字符串数组。要修改第1维字符串为\\being{longtable},第2维字符串直接删除,第3维字符串不变
splittable = re.split(searchstr, singletablecontent,0, re.M | re.I|re.U )
if len(splittable) < 3 or isCusHead==False:
#至少是3维的数组,否则不是预想的内容,不做处理
return singletablecontent
newtable4 = self.__ModifyLongTableHead(splittable[2], self.tablesattrobj.headtype)
singletablecontent = splittable[0]+r'\begin{longtable}'+splittable[1]+newtable4 #begin{longtable}必须再加上,因为Python并不认为它是正则表达式,因此不再分组里面第0个分组为空。
return singletablecontent
#拆分后splittable应该为5个字符串的数组,拆分后便于添加通用属性
if self.tablesattrobj.rowtype != '':
splittable[0] += self.tablesattrobj.rowtype + '\n'
if isCusHead == True:
#修改表头字体颜色为白色加粗
newtable4 = self.__ModifyTableHead(splittable[4], self.tablesattrobj.headtype)
else:
newtable4 = splittable[4]
singletablecontent = splittable[0]+splittable[1]+splittable[2]+splittable[3]+newtable4
if islongtable: #如果为长表格要做长表格的替换
singletablecontent = self.__ModifyTableLongHeadAndTail(singletablecontent)
return singletablecontent
def __ModifyTableLongHeadAndTail(self,singletablecontent):
#长表格的头尾都要替换,因此单独封装成一个函数,否则长表格的表格需号将加2
#替换第一行为长表格
searchstr = r'(\\begin{savenotes}\\sphinxattablestart)'
splittable = re.search(searchstr, singletablecontent,re.M | re.I|re.U )
#替换为长表格
tablefirstline = re.sub(r'\\sphinxattablestart',r'\\sphinxatlongtablestart', splittable.group(0), re.M|re.I|re.U)
#替换第2行为长表格
searchstr = r'(\\begin{tabular}|\\begin{tabulary})(\[[a-z]\]|{\\linewidth}\[[a-z]\])([\s\S].*)'
splittable = re.search(searchstr, singletablecontent,re.I|re.U )
#记录表头的位置
headlastpos = splittable.end()
tablesecondline = re.sub(r'\\begin{tabular}|\\begin{tabulary}',r'\\begin{longtable}', splittable.group(0), re.I|re.U)
tablesecondline = re.sub(r'\{\\linewidth\}',r'', tablesecondline, re.I|re.U)
#查找caption
searchstr = r'\\sphinxcaption([\s\S].*)'
splittable = re.search(searchstr, singletablecontent, re.I|re.U)
longcaption = re.sub(r'\\sphinxcaption',r'\\caption', splittable.group(0), re.I|re.U)
#添加长表格专用指令
longcaption += r"\\*[\sphinxlongtablecapskipadjust]"
#构建长表个的表头部分
longhead = tablefirstline + tablesecondline + '\n' + r'\sphinxthelongtablecaptionisattop' + '\n'+ longcaption+'\n'
#替换表尾
newtablecontent = singletablecontent[headlastpos:len(singletablecontent)]
endtable = re.sub(r'(\\end{tabular}|\\end{tabulary})',r'\\end{longtable}', newtablecontent, re.M | re.I|re.U)
endtable = re.sub(r'\\par',r'', endtable, re.M | re.I|re.U)
endtable = re.sub(r'\\sphinxattableend',r'\\sphinxatlongtableend', endtable, re.M | re.I|re.U)
#生成新的长表格内容
singletablecontent = longhead + endtable
return singletablecontent
#为表格增加h属性对齐方式,避免表格浮动,让表格在当前位置显示
def __AddHAttrFroTable(self,content):
searchstr = r'(\\begin{tabular}|\\begin{tabulary}|\\begin{longtable})({\\linewidth})?(\[(?P<attr>[a-z]{1,4})\])?([\s\S].*)'
m = re.search(searchstr, content, re.M|re.I|re.U )
attrcontent = m.group('attr')
posarr = m.span()
if m.group(3) == '': #group(3)是[htp]的组合,如果没有表格属性则添加上表格属性
newcontent = content[0:posarr[0]]+m.group(1)+m.group(2)+r'[h]'+m.group(5)+content[posarr[1]:len(content)]
else:
replacestr = m.group(4) #需要被替换的表格属性
#判断该表格是否有P属性,如果有p属性需要删掉
replacestr.replace('p','')
#给该表格添加h属性,避免表格浮动,让表格在当前位置显示
replacestr = 'h'+replacestr
newcontent = content[0:posarr[0]]+m.group(1)+m.group(2)+'['+replacestr+']'+m.group(5)+content[posarr[1]:len(content)]
#print('newcontent: %s' % newcontent)
return newcontent
#修改sphinx自动生成的长表格表头
def __ModifyLongTableHead(self,content,headtype):
#先找出第一行
searchstr = r'\\hline(?P<content>[\s\S]*?)\\hline'
pattern = re.compile(searchstr,re.M | re.I|re.U)
matchiter = pattern.finditer(content)
posarr = []
i = 0
for m in matchiter:
if i > 1:
break;
posarr.append([])
posarr[i] = m.span() #保存起始位置和结束位置,便于组成新的内容
if i ==0:
newcontent = content[0:posarr[i][0]]
else:
newcontent = newcontent+content[posarr[i-1][1]:posarr[i][0]]
newcontent += r'\hline\rowcolor'+headtype
headcontent = m.group(1) #匹配到的第一个即为表头内容
if 'multicolumn' in headcontent:
return content
headlist = []
if r'\sphinxstyletheadfamily' in headcontent:
pattern = re.compile(r'(?<=\\sphinxstyletheadfamily)(?P<value>[\s\S]*?)(?=(\\unskip|&)|\\\\)', re.M | re.I|re.U)
aftercontent = headcontent
mobjarr = pattern.finditer(aftercontent)
preposlist = []
for mobj in mobjarr:
amarr = mobj.group('value')
curposlist = mobj.span()
#用表头内容数组替换
fontcolor = self.tablesattrobj.headfontcolor
#先去掉首尾空格,避免首尾有空格无法去掉回车换行符
amarr = amarr.strip()
amarr = amarr.strip('\r')
amarr = amarr.strip('\n')
amarr = amarr.strip() #再去掉首尾空格,避免多余的空格出现
if amarr == '':
continue
fontcolor = fontcolor.replace('{}','{'+ amarr+'}',1)
if len(preposlist) > 0:
headlist.append(headcontent[preposlist[1]:curposlist[0]])
else:
headlist.append(headcontent[0:curposlist[0]])
headlist.append(fontcolor)
preposlist = curposlist
headlist.append(headcontent[preposlist[1]:len(headcontent)]) #把最后一个字符串加上
headcontent = ''
for prelist in headlist:
headcontent = headcontent + prelist + '\n'
newcontent += headcontent+r'\hline'
i +=1
newcontent += content[posarr[i-1][1]:len(content)]
return newcontent
def __ModifyTableHead(self, content, headtype):
#先找出第一行
searchstr = r'\\hline(?P<content>[\s\S]*?)\\hline'
m = re.search(searchstr, content, re.M|re.I|re.U )
headcontent = m.group(1) #匹配到的第一个即为表头内容
posarr = m.span(1) #保存起始位置和结束位置,便于组成新的内容
if 'multicolumn' in headcontent:
return content
if r'\sphinxstyletheadfamily' in headcontent:
pattern = re.compile(r'(?<=\\sphinxstyletheadfamily)(?P<value>[\s\S]*?)(?=(\\unskip|&)|\\\\)', re.M | re.I|re.U)
aftercontent = headcontent
#pattern = re.compile(r'(?<=\\sphinxstylethead{\\sphinxstyletheadfamily)([\s\S]*?)(?=\\unskip}\\relax &)', re.M | re.I|re.U)
else:
aftercontent = headcontent.replace(r'\\','&',1)
pattern = re.compile(r'(?P<value>[\s\S]*?)(&\s{1})', re.M | re.I|re.U)
#pattern = re.compile(r'[\s\S]*?&|\\unskip', re.M | re.I|re.U)
mobjarr = pattern.finditer(aftercontent)
headlist = []
preposlist = []
for mobj in mobjarr:
amarr = mobj.group('value')
curposlist = [mobj.start(),mobj.start()+len(amarr)]
#用表头内容数组替换
fontcolor = self.tablesattrobj.headfontcolor
#先去掉首尾空格,避免首尾有空格无法去掉回车换行符
amarr = amarr.strip()
amarr = amarr.strip('\r')
amarr = amarr.strip('\n')
amarr = amarr.strip() #再去掉首尾空格,避免多余的空格出现
if amarr == '':
continue
fontcolor = fontcolor.replace('{}','{'+ amarr+'}',1)
if len(preposlist) > 0:
headlist.append(headcontent[preposlist[1]:curposlist[0]])
else:
headlist.append(headcontent[0:curposlist[0]])
headlist.append(fontcolor)
preposlist = curposlist
headlist.append(headcontent[preposlist[1]:len(headcontent)]) #把最后一个字符串加上
headcontent = ''
for prelist in headlist:
headcontent = headcontent + prelist + '\n'
newcontent = content[0:posarr[0]]+r'\rowcolor'+headtype+'\n'+headcontent+content[posarr[1]:len(content)]
return newcontent
#渲染树型表格的第一列
def __ModifyVerticalTable(self, singletablecontent):
#找出每一行的内容
searchstr = r'(?<=\\hline)(?P<content>[\s\S]*?)(?=\\hline)'
pattern = re.compile(searchstr,re.M | re.I|re.U)
matchiter = pattern.finditer(singletablecontent)
posarr=[] #保存位置,便于组合
i = 0
for m in matchiter:
posarr.append([])
posarr[i] = m.span()
if i ==0:
newcontent = singletablecontent[0:posarr[i][0]]
else:
newcontent = newcontent+singletablecontent[posarr[i-1][1]:posarr[i][0]]
cellcontent = m.group('content') #匹配到的第一个即为表头内容
#将第一个单元格内容渲染成蓝底白字
firstcellcontent = self.__ModifyFirstColumnType(cellcontent)
newcontent += firstcellcontent
i+=1
newcontent += singletablecontent[posarr[i-1][1]:len(singletablecontent)]
return newcontent
#渲染第一个单元格内容
def __ModifyFirstColumnType(self,cellcontent):
new_cellcontent = ""
if r'\sphinxstyletheadfamily' in cellcontent:
searchstr = r'(?<=\\sphinxstyletheadfamily)(?P<value>[\s\S]*?)(?=(\\unskip|&)|\\\\)'
aftercontent = cellcontent.strip()
aftercontent = aftercontent.strip('\r')
aftercontent = aftercontent.strip('\n')
aftercontent = aftercontent.strip()
mobj = re.search(searchstr, aftercontent, re.M|re.I|re.U ) #匹配到的第一个既是需要修改的内容
#修改字体颜色
amarr = mobj.group('value')
posarr = mobj.span()
new_cellcontent = aftercontent[0:posarr[0]]+'\n'+r'\cellcolor'+self.tablesattrobj.headtype
#用表头内容数组替换
fontcolor = self.tablesattrobj.headfontcolor
#先去掉首尾空格,避免首尾有空格无法去掉回车换行符
amarr = amarr.strip()
amarr = amarr.strip('\r')
amarr = amarr.strip('\n')
amarr = amarr.strip() #再去掉首尾空格,避免多余的空格出现
#if amarr == '':
# continue
if (r'\textbf' or r'\textcolor') in amarr:
return cellcontent
fontcolor = fontcolor.replace('{}','{'+ amarr+'}',1)
new_cellcontent +=r'{'+fontcolor + '}\n' + aftercontent[posarr[1]:len(aftercontent)]
else:
aftercontent = cellcontent.replace(r'\\','&',1)
#去掉首尾空格和换行符
aftercontent = aftercontent.strip()
aftercontent = aftercontent.strip('\r')
aftercontent = aftercontent.strip('\n')
aftercontent = aftercontent.strip()
tmplist = re.split(r'&',aftercontent)
preposlist = 0
#只对第一个做修改
onelist = tmplist[0]
#用表头内容数组替换
fontcolor = self.tablesattrobj.headfontcolor
#先去掉首尾空格,避免首尾有空格无法去掉回车换行符
onelist = onelist.strip()
onelist = onelist.strip('\r')
onelist = onelist.strip('\n')
onelist = onelist.strip() #再去掉首尾空格,避免多余的空格出现
#if onelist == '':
# continue
if (r'\textbf' or r'\textcolor') in onelist:
return cellcontent
new_cellcontent = '\n'+r'\cellcolor'+self.tablesattrobj.headtype+r'{'+fontcolor.replace('{}','{'+ onelist+'}',1)+r'}'+'\n'
for i in range(1,len(tmplist)):
if len(tmplist[i])>0:
new_cellcontent += '&' +tmplist[i]
new_cellcontent+=r'\\' #将最后被替换掉的\\再加上
return new_cellcontent + '\n'
# 打开Makefile文件查找source和build文件夹
def OpenMakefile():
global source_dir
global build_dir
source_dir = ''
build_dir = ''
try:
if platform.system().lower() == 'windows':
with open('make.bat',"r") as f:
fstr = f.read()
#用正则表达式查找source和build文件夹具体路径
searchstr = r"set *SOURCEDIR *= *(\S+)"
m = re.search(searchstr, fstr, re.M|re.I|re.U )
source_dir = m.group(1) #匹配到的第一个即为source所在目录
searchstr = r"set *BUILDDIR *= *(\S+)"
m = re.search(searchstr, fstr, re.M|re.I|re.U )
build_dir = m.group(1) #匹配到的第一个即为build所在目录
else:
with open('Makefile',"r") as f:
fstr = f.read()
#用正则表达式查找source和build文件夹具体路径
searchstr = r"SOURCEDIR *= *(\S+)"
m = re.search(searchstr, fstr, re.M|re.I|re.U )
source_dir = m.group(1) #匹配到的第一个即为源所在目录
searchstr = r"BUILDDIR *= *(\S+)"
m = re.search(searchstr, fstr, re.M|re.I|re.U )
build_dir = m.group(1) #匹配到的第一个即为源所在目录
except Exception as e:
print(e)
return
def GetLatex_documents():
global source_dir
if source_dir == '':
return
#得到配置文件conf.py的路径
if source_dir == '.':
confdir = './conf.py'
else:
confdir = './' + source_dir +'/conf.py'
conffile = os.path.abspath(confdir)
#打开conf.py文件
with codecs.open(conffile,"r+",encoding='utf-8') as f:
fstr = f.read()
list = []
versioninfo = GetVersionInfo(fstr)
fileprefile = GetFilePreInfo(fstr)
if versioninfo=="" or fileprefile=="":
#根据正则表达式,找出latex_documents内容
searchstr = r"latex_documents *= *\[([\s\S]*?)\]"
m = re.search(searchstr, fstr, re.M|re.I|re.U )
latex_documents = m.group(1) #匹配到的第一个即为源所在目录
#拆分二维数组,兼容多个情况
list = latex_documents.split(")")
for i in range(len(list)):
if IsComment(list[i]):
list[i]= list[i].split(",")
list.pop()
else:
#创建2维列表
for i in range(1):
list.append([])
for j in range(2):
list[i].append("")
list[0][0] = "comment" #为了兼容文件解析内容,进行补位。
list[0][1] = '"' + fileprefile + versioninfo + '.tex"' #为了兼容解析过程,添加双引号。
return list
#得到版本信息
def GetVersionInfo(fstr):
if releasever != '':
return releasever
versioninfo = ""
searchstr = r"version *= *(u*)'(.*?)'"
m = re.search(searchstr, fstr, re.I)
if m != None:
versioninfo = m.group(2) #匹配到的第一个即为源所在目录
print("version = " + versioninfo)
return versioninfo
#得到文件前缀信息
def GetFilePreInfo(fstr):
filepre = ""
searchstr = r"curfnpre *= *(u*)'(.*?)'"
m = re.search(searchstr, fstr, re.M|re.I|re.U )
if m != None:
filepre = m.group(2) #匹配到的第一个即为源所在目录
print("filepre = " + filepre)
return filepre
#判断是否为注释行
def IsComment(instr):
if instr.strip() is None:
return False
rule = re.compile('^#.*$')
if rule.match(instr.strip()) is None:
return True;
else:
return False;
#根据正则表达式取单引号和双引号中的内容
def getquomarkcontent(strarr):
#根据正则表达式,找出双引号和单引号中的内容
print("texfile="+strarr)
searchstr = r"[\"|'](.*?)[\"|']"
m = re.search(searchstr, strarr, re.M|re.I|re.U )
if m is None:
return None
return m.group(1).strip() #匹配到的第一个即为源所在目录
def Modifylatex_main(build_dir,latexdocumentslst):
global __load_dict__
#__load_dict__ = __openconfjsonfile__()
#print('parsejson=' + build_dir)
#print('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
#print(latexdocumentslst)
doclen = len(latexdocumentslst)
for i in range(0,doclen):
#得到latex路径
latexpath = build_dir
#copy 背景图到latex路径,背景图必须与该文件在同一个目录下
if os.path.exists('./chapterbkpaper.pdf'):
shutil.copy('./chapterbkpaper.pdf',latexpath)
#得到相对路径
filename = latexdocumentslst[i][1]
#print('filename='+filename)
if filename is None:
continue
texfilepath = latexpath + '/' + filename
#相对路径转绝对路径
texfile = os.path.abspath(texfilepath)
if not os.path.exists(texfile):
continue
#print('texfile=' + texfile)
fo = codecs.open(texfile, "r+",encoding = 'utf-8')
texcontent = fo.read()
fo.close
#得到修改tex文件的对象
ModTexobj = clsModifyTex(texcontent)
ModTexobj.AddPackageToTex()
ModTexobj.AddOtherTocToTex()
ModTexobj.AddCustormOptionsToTex()
ModTexobj.ModifyReplacePackage()
ModTexobj.ModifyFunctionBackColor()
ModTexobj.ModifyTablesAttributes()
fw = codecs.open(texfile, "w+",encoding = 'utf-8')
fw.write(ModTexobj.content)
fw.close
def parseargv():
global source_dir
global latex_dir
if len(sys.argv) <= 1:
return
for i in range(1,len(sys.argv)):
param = sys.argv[i]
if param=='-c': #解析conf.py所在目录
source_dir = sys.argv[i+1] #保存相对路径
print("argv source=" + source_dir)
i = i+2
elif param=='-l': #保存输出的latex目录
latex_dir = sys.argv[i+1] #保存相对路径
i = i+2
print("latex_dir = "+latex_dir)
source_dir = '' #保存源文件所在目录
build_dir = '' #保存生成文件所在目录
releasever = ''
latex_dir = ''
__load_dict__ = __openconfjsonfile__()
if __name__ == '__main__':
parseargv() #解析系统参数
OpenMakefile()
latex_documents = GetLatex_documents() #保存latex文档所在数组
#__load_dict__ = __openconfjsonfile__()
latexdir=""
if latex_dir == '':
latexdir = '/latex/'
else:
latexdir = '/' + latex_dir + '/'
doclen = len(latex_documents)
for i in range(0,doclen):
#得到latex路径
latexpath = './' + build_dir + latexdir
#copy 背景图到latex路径,背景图必须与该文件在同一个目录下
if os.path.exists('./chapterbkpaper.pdf'):
shutil.copy('./chapterbkpaper.pdf',latexpath)
#得到相对路径
if getquomarkcontent(latex_documents[i][1]) is None:
continue
texfilepath = latexpath + getquomarkcontent(latex_documents[i][1])
#相对路径转绝对路径
texfile = os.path.abspath(texfilepath)
if not os.path.exists(texfile):
continue
fo = codecs.open(texfile, "r+",encoding = 'utf-8')
texcontent = fo.read()
fo.close
#得到修改tex文件的对象
ModTexobj = clsModifyTex(texcontent)
ModTexobj.AddPackageToTex()
ModTexobj.AddOtherTocToTex()
ModTexobj.AddCustormOptionsToTex()
ModTexobj.ModifyReplacePackage()
ModTexobj.ModifyFunctionBackColor()
ModTexobj.ModifyTablesAttributes()
fw = codecs.open(texfile, "w+",encoding = 'utf-8')
fw.write(ModTexobj.content)
fw.close
|
# -*- coding: utf-8 -*-
"""
@contact: lishulong.never@gmail.com
@time: 2018/4/8 下午2:55
"""
|
import pytz
import datetime
from django.http import JsonResponse
class TimeCheckMiddleware(object):
tz = pytz.timezone('Asia/Seoul')
service_time = [
datetime.time(21, 0, 0, tzinfo=tz),
datetime.time(6, 0, 0, tzinfo=tz)
]
def __init__(self, response):
self.get_response = response
def __call__(self, request):
response = self.get_response(request)
if self.time_check() or request.path != '/':
return response
else:
return JsonResponse({'info': 'not a service time'})
def time_check(self):
now = datetime.datetime.now(self.tz).time()
if now > self.service_time[0] or now < self.service_time[1]:
return True
return False
|
from django.apps import AppConfig
class BlogConfig(AppConfig):
name = 'blog'
verbose_name = '博客'
def ready(self):
from .cache import (on_article_init,
on_article_save,
on_article_delete)
|
"""
helper functions for pybullet
"""
import pybullet as p
def add_goal(bullet_client, position, radius=0.05, color=[0.0, 1.0, 0.0, 1]):
collision = -1
visual = bullet_client.createVisualShape(p.GEOM_SPHERE, radius=radius,
rgbaColor=color)
goal = bullet_client.createMultiBody(baseMass=0,
baseCollisionShapeIndex=collision,
baseVisualShapeIndex=visual,
basePosition=position)
return goal
def add_collision_goal(bullet_client, position, radius=0.05, color=[0.0, 1.0, 0.0, 1]):
collision = bullet_client.createCollisionShape(p.GEOM_SPHERE, radius=0.01)
visual = -1
goal = bullet_client.createMultiBody(baseMass=0,
baseCollisionShapeIndex=collision,
baseVisualShapeIndex=visual,
basePosition=position)
return goal
def add_obstacle_ball(bullet_client, center, radius=0.1, color=[0.4, 0.4, 0.4, 1]):
collision = p.createCollisionShape(p.GEOM_SPHERE, radius=radius)
visual = p.createVisualShape(p.GEOM_SPHERE, radius=radius,
rgbaColor=color)
obstacle = p.createMultiBody(baseMass=0,
baseCollisionShapeIndex=collision,
baseVisualShapeIndex=visual,
basePosition=center)
return obstacle
def add_obstacle_cylinder(bullet_client, center, radius=0.1, length=0.1, color=[0.4, 0.4, 0.4, 1]):
collision = p.createCollisionShape(p.GEOM_CYLINDER, radius=radius, height=length)
visual = p.createVisualShape(p.GEOM_CYLINDER, radius=radius, length=length,
rgbaColor=color)
obstacle = p.createMultiBody(baseMass=0,
baseCollisionShapeIndex=collision,
baseVisualShapeIndex=visual,
basePosition=center)
return obstacle
|
from mcpi.minecraft import Minecraft
# Licensed under the terms of http://www.apache.org/licenses/LICENSE-2.0
# Author (©): Alvaro del Castillo
from minecraftstuff import MinecraftDrawing
class Server:
"""
A Server manages the connection with the Minecraft server.
Every World must have a Server in which built the World.
"""
def __init__(self, host="localhost", port="4711"):
self._host = host
self._port = port
self._mc = Minecraft.create(address=host, port=port)
self._drawing = MinecraftDrawing(self._mc)
@property
def drawing(self):
""" Connection to MinecraftDrawing (only used in Things built with MinecraftDrawing)"""
return self._drawing
@property
def mc(self):
""" Connection to Minecraft """
return self._mc
|
from .db import get_connection, get_data_dict
from flask import render_template, url_for, make_response, session
import requests
import traceback
from datetime import datetime
from time import sleep, process_time
from .config import load_config, is_debug
from .etsy import send_etsy_post_request
from .sources.etsy import Etsy
from .sources.webflow import Webflow
from .shipping import create_shipping_label, create_manifest
import logging
logger = logging.getLogger()
def do_orders(group_id=None):
context = {'orders': True}
conn = get_connection()
cur = conn.cursor()
where_clause = ""
clauses = []
if group_id:
clauses.append("group_id = '%s'" % group_id)
else:
clauses.append("r.status <> 2")
if clauses:
where_clause = 'WHERE %s' % ' \n AND'.join(clauses)
query = """
WITH unique_packages as (
SELECT r.receipt_id,
rp.package_id,
MAX(rp.filename) filename,
MAX(rp.tracking_pin) tracking_pin,
MAX(rp.label_url) label_url
FROM receipts r
JOIN receipt_packages rp
ON r.receipt_id = rp.receipt_id
GROUP BY r.receipt_id, rp.package_id
), tracking_pins as (
SELECT receipt_id,
MIN(label_url) label_url,
MIN(tracking_pin) tracking_pin,
COUNT(filename) num_completed_labels,
COUNT(1) total_num_labels
FROM unique_packages
GROUP BY receipt_id
)
SELECT r.*,
CASE r.status
WHEN 0 THEN 'pending'
WHEN 1 THEN 'printed'
WHEN 2 THEN 'completed'
ELSE ''
END html_status,
CASE r.status
WHEN 0 THEN 'secondary'
WHEN 1 THEN 'warning'
WHEN 2 THEN 'success'
ELSE ''
END css_status,
pq.printer_id,
pq.sku,
pq.status item_status,
pq.progress,
(CASE WHEN tp.num_completed_labels = tp.total_num_labels THEN tp.tracking_pin ELSE NULL END) tracking_pin,
tp.label_url,
tp.num_completed_labels,
tp.total_num_labels
FROM receipts r
JOIN printer_queue pq
ON r.receipt_id = pq.receipt_id
JOIN tracking_pins tp
ON r.receipt_id = tp.receipt_id
%s
ORDER BY r.order_time ASC
""" % where_clause
receipts = get_data_dict(cur, query)
for receipt in receipts:
receipt['order_date'] = datetime.fromtimestamp(receipt['order_time']).isoformat(sep=' ')
grouped_receipts = {}
pq_keys = ['printer_id', 'sku', 'item_status', 'progress']
for receipt in receipts:
receipt_id = receipt['receipt_id']
if receipt_id not in grouped_receipts:
grouped_receipts[receipt_id] = {k: receipt[k] for k in receipt if k not in pq_keys}
grouped_receipts[receipt_id]['queue'] = []
queue_item = {k: receipt[k] for k in receipt if k in pq_keys}
grouped_receipts[receipt_id]['queue'].append(queue_item)
context['receipts'] = list(grouped_receipts.values())
query = """
SELECT group_id
FROM manifests
WHERE status = 0
ORDER BY group_id DESC
"""
manifests = get_data_dict(cur, query)
context['manifests'] = manifests
context['selected_manifest'] = group_id
if group_id:
query = """
WITH receipt_package_grouped as (
SELECT rp.filename
FROM receipts r
JOIN receipt_packages rp
ON r.receipt_id = rp.receipt_id
WHERE r.group_id = '%s'
GROUP BY r.receipt_id, rp.package_id, rp.filename
)
SELECT COUNT(filename) total_num_complete,
COUNT(1) total_num_labels
FROM receipt_package_grouped
""" % group_id
cur.execute(query)
total_num_complete, total_num_labels = cur.fetchone()
context['total_num_complete'] = total_num_complete
context['total_num_labels'] = total_num_labels
query = """
SELECT m.filename
FROM manifests m
WHERE m.group_id = '%s'
""" % group_id
cur.execute(query)
context['manifest_file'] = cur.fetchone()[0]
return render_template('orders.html', **context)
def do_update_orders():
response = None
try:
Etsy().fetch_and_insert()
Webflow().fetch_and_insert()
response = make_response('success', 200)
except Exception as e:
tb = traceback.format_exc()
response = make_response(tb, 400)
logging.exception(e)
return response
def do_contract_shipping_label(group_id):
response = None
conn = get_connection(immediate=True) # make sure we can get the connection
cur = None
if conn is None:
response = make_response('database is busy try again in a few seconds', 400)
else:
try:
cur = conn.cursor()
if not conn.in_transaction:
cur.execute("BEGIN IMMEDIATE")
except Exception as e:
response = make_response('database is busy try again in a few seconds', 400)
logger.exception(e)
else:
# do the actual work
done_manifest = False
try:
num_failed = create_shipping_label(cur, group_id)
if num_failed == 0:
create_manifest(cur, group_id)
response = make_response('success', 200)
done_manifest = True
else:
response = make_response('failed to get %s labels' % num_failed, 400)
except Exception as e:
tb = traceback.format_exc()
response = make_response(tb, 400)
logger.exception(e)
finally:
conn.commit()
if done_manifest:
batch_update_tracking(conn, cur, group_id)
conn.close()
return response
def do_order_complete(receipt_id):
response = None
conn = get_connection()
cur = conn.cursor()
try:
load_config()
query = """
SELECT r.receipt_id,
r.group_id,
r.source,
MIN(rp.tracking_pin) tracking_pin
FROM receipts r
JOIN receipt_packages rp
ON r.receipt_id = rp.receipt_id
WHERE r.receipt_id = '%s'
GROUP BY r.receipt_id, r.group_id, r.source
HAVING SUM(CASE WHEN rp.tracking_pin IS NULL THEN 1 ELSE 0 END) = 0
""" % receipt_id
cur.execute(query)
group_id, source, tracking_pin = cur.fetchone()[1:]
data = {'tracking_pin': tracking_pin}
url = None
if not is_debug():
if source == 'Etsy':
Etsy().complete_order(receipt_id, tracking_pin)
elif source == 'Webflow':
Webflow().complete_order(receipt_id, tracking_pin)
query = """
UPDATE receipts
SET status = 2 /* completed */
WHERE receipt_id = '%s'
""" % receipt_id
cur.execute(query)
conn.commit()
query = """
SELECT COUNT(1)
FROM receipts
WHERE group_id = '%s'
AND status <> 2 /* completed */
""" % group_id
logger.debug(query)
cur.execute(query)
num_incomplete = cur.fetchone()[0]
logger.debug("num_incomplete: %s" % num_incomplete)
if num_incomplete == 0:
query = """
UPDATE manifests
SET status = 1 /* completed */
WHERE group_id = '%s'
""" % group_id
cur.execute(query)
conn.commit()
response = make_response('success', 200)
except Exception as e:
logger.exception(e)
response = make_response(str(e), 400)
return response
def batch_update_tracking(conn, cur, group_id):
query = """
SELECT r.receipt_id,
r.source,
MIN(rp.tracking_pin) tracking_pin
FROM receipts r
JOIN receipt_packages rp
ON r.receipt_id = rp.receipt_id
WHERE r.group_id = '%(group_id)s'
AND r.status <> 2 /* completed */
GROUP BY r.receipt_id, r.source
HAVING SUM(CASE WHEN rp.tracking_pin IS NULL THEN 1 ELSE 0 END) = 0
""" % {'group_id': group_id}
rows = get_data_dict(cur, query)
if not conn.in_transaction:
cur.execute("BEGIN")
num_failed = 0
for row in rows:
try:
receipt_id = row['receipt_id']
source = row['source']
tracking_pin = row['tracking_pin']
order_complete_helper(cur, receipt_id, source, tracking_pin)
except Exception as e:
tb = traceback.format_exc()
logger.exception(e)
num_failed += 1
if num_failed == 0:
query = """
UPDATE manifests
SET status = 1 /* completed */
WHERE group_id = '%s'
""" % group_id
cur.execute(query)
conn.commit()
return num_failed
def order_complete_helper(cur, receipt_id, source, tracking_pin):
data = {'tracking_pin': tracking_pin}
url = None
if not is_debug():
if source == 'Etsy':
Etsy().complete_order(receipt_id, tracking_pin)
elif source == 'Webflow':
Webflow().complete_order(receipt_id, tracking_pin)
query = """
UPDATE receipts
SET status = 2 /* completed */
WHERE receipt_id = '%s'
""" % receipt_id
cur.execute(query)
def do_manifest(group_id):
response = None
conn = get_connection(immediate=True) # make sure we can get the connection
cur = None
done_manifest = False
if conn is None:
response = make_response('database is busy try again in a few seconds', 400)
else:
try:
cur = conn.cursor()
if not conn.in_transaction:
cur.execute("BEGIN IMMEDIATE")
except Exception as e:
response = make_response('database is busy try again in a few seconds', 400)
logger.exception(e)
else:
try:
create_manifest(cur, group_id)
response = make_response('success', 200)
done_manifest = True
except Exception as e:
tb = traceback.format_exc()
response = make_response(tb, 400)
logger.exception(e)
finally:
conn.commit()
if done_manifest:
batch_update_tracking(conn, cur, group_id)
conn.close()
return response
def do_remove_receipt(receipt_id):
response = None
conn = get_connection() # make sure we can get the connection
cur = None
if conn is None:
response = make_response('database is busy try again in a few seconds', 400)
else:
try:
cur = conn.cursor()
if not conn.in_transaction:
cur.execute("BEGIN")
data = {'receipt_id': receipt_id}
query = """
DELETE FROM receipt_packages
WHERE receipt_id = '%(receipt_id)s'
""" % data
cur.execute(query)
query = """
DELETE FROM printer_queue
WHERE receipt_id = '%(receipt_id)s'
""" % data
cur.execute(query)
query = """
DELETE FROM receipts
WHERE receipt_id = '%(receipt_id)s'
""" % data
cur.execute(query)
conn.commit()
response = make_response('success', 200)
except Exception as e:
conn.rollback()
tb = traceback.format_exc()
response = make_response(tb, 400)
logger.exception(e)
conn.close()
return response
|
from . import TypeChecker
from . import Tools
#from . import MouseControl
from . import Filters
from .D2Point import *
from .pyon import *
|
import os
import re
import subprocess
import winreg
import yaku.task
from yaku.tools.mscommon.common \
import \
read_values, read_value, get_output
def _exec_command_factory(saved):
def msvc_exec_command(self, cmd, cwd, env=None):
new_cmd = []
carry = ""
for c in cmd:
if c in ["/Fo", "/out:", "/OUT:", "/object:"]:
carry = c
else:
c = carry + c
carry = ""
new_cmd.append(c)
env = dict(os.environ)
env.update(PATH=os.pathsep.join(self.env["PATH"]))
saved(self, new_cmd, cwd, env=env)
return msvc_exec_command
# Dict to 'canonalize' the arch
_ARCH_TO_CANONICAL = {
"amd64" : "amd64",
"emt64" : "amd64",
"i386" : "x86",
"i486" : "x86",
"i586" : "x86",
"i686" : "x86",
"ia64" : "ia64",
"itanium" : "ia64",
"x86" : "x86",
"x86_64" : "amd64",
}
# Given a (host, target) tuple, return the argument for the bat file. Both host
# and targets should be canonalized.
_HOST_TARGET_ARCH_TO_BAT_ARCH = {
("x86", "x86"): "x86",
("x86", "amd64"): "x86_amd64",
("amd64", "amd64"): "amd64",
("amd64", "x86"): "x86",
("x86", "ia64"): "x86_ia64"
}
_VCVER = ["10.0", "9.0", "9.0Exp","8.0", "8.0Exp","7.1", "7.0", "6.0"]
_VCVER_TO_PRODUCT_DIR = {
'10.0': [
r'Microsoft\VisualStudio\10.0\Setup\VC\ProductDir'],
'9.0': [
r'Microsoft\VisualStudio\9.0\Setup\VC\ProductDir'],
'9.0Exp' : [
r'Microsoft\VCExpress\9.0\Setup\VC\ProductDir'],
'8.0': [
r'Microsoft\VisualStudio\8.0\Setup\VC\ProductDir'],
'8.0Exp': [
r'Microsoft\VCExpress\8.0\Setup\VC\ProductDir'],
'7.1': [
r'Microsoft\VisualStudio\7.1\Setup\VC\ProductDir'],
'7.0': [
r'Microsoft\VisualStudio\7.0\Setup\VC\ProductDir'],
'6.0': [
r'Microsoft\VisualStudio\6.0\Setup\Microsoft Visual C++\ProductDir']
}
_is_win64 = None
def is_win64():
"""Return true if running on windows 64 bits.
Works whether python itself runs in 64 bits or 32 bits."""
# Unfortunately, python does not provide a useful way to determine
# if the underlying Windows OS is 32-bit or 64-bit. Worse, whether
# the Python itself is 32-bit or 64-bit affects what it returns,
# so nothing in sys.* or os.* help.
# Apparently the best solution is to use env vars that Windows
# sets. If PROCESSOR_ARCHITECTURE is not x86, then the python
# process is running in 64 bit mode (on a 64-bit OS, 64-bit
# hardware, obviously).
# If this python is 32-bit but the OS is 64, Windows will set
# ProgramW6432 and PROCESSOR_ARCHITEW6432 to non-null.
# (Checking for HKLM\Software\Wow6432Node in the registry doesn't
# work, because some 32-bit installers create it.)
global _is_win64
if _is_win64 is None:
# I structured these tests to make it easy to add new ones or
# add exceptions in the future, because this is a bit fragile.
_is_win64 = False
if os.environ.get('PROCESSOR_ARCHITECTURE','x86') != 'x86':
_is_win64 = True
if os.environ.get('PROCESSOR_ARCHITEW6432'):
_is_win64 = True
if os.environ.get('ProgramW6432'):
_is_win64 = True
return _is_win64
def msvc_version_to_maj_min(msvc_version):
msvc_version_numeric = ''.join([x for x in msvc_version if x in string_digits + '.'])
t = msvc_version_numeric.split(".")
if not len(t) == 2:
raise ValueError("Unrecognized version %s (%s)" % (msvc_version,msvc_version_numeric))
try:
maj = int(t[0])
min = int(t[1])
return maj, min
except ValueError as e:
raise ValueError("Unrecognized version %s (%s)" % (msvc_version,msvc_version_numeric))
def is_host_target_supported(host_target, msvc_version):
"""Return True if the given (host, target) tuple is supported given the
msvc version.
Parameters
----------
host_target: tuple
tuple of (canonalized) host-target, e.g. ("x86", "amd64") for cross
compilation from 32 bits windows to 64 bits.
msvc_version: str
msvc version (major.minor, e.g. 10.0)
Note
----
This only check whether a given version *may* support the given (host,
target), not that the toolchain is actually present on the machine.
"""
# We assume that any Visual Studio version supports x86 as a target
if host_target[1] != "x86":
maj, min = msvc_version_to_maj_min(msvc_version)
if maj < 8:
return False
return True
def find_vc_pdir(msvc_version):
"""Try to find the product directory for the given
version.
Note
----
If for some reason the requested version could not be found, an
exception which inherits from VisualCException will be raised."""
base = winreg.HKEY_LOCAL_MACHINE
root = 'Software\\'
if is_win64():
root = root + 'Wow6432Node\\'
try:
hkeys = _VCVER_TO_PRODUCT_DIR[msvc_version]
except KeyError:
#debug("Unknown version of MSVC: %s" % msvc_version)
raise ValueError("Unknown version %s" % msvc_version)
for key in hkeys:
key = root + key
comps = read_value(key)
if comps is not None:
if os.path.exists(comps):
return comps
else:
raise ValueError("registry dir %s not found on the filesystem" % comps)
return None
def find_versions(abi):
base = winreg.HKEY_LOCAL_MACHINE
key = os.path.join(_FC_ROOT[abi], "Fortran")
availables = {}
versions = read_keys(base, key)
if versions is None:
return availables
for v in versions:
verk = os.path.join(key, v)
key = open_key(verk)
try:
maj = winreg.QueryValueEx(key, "Major Version")[0]
min = winreg.QueryValueEx(key, "Minor Version")[0]
bld = winreg.QueryValueEx(key, "Revision")[0]
availables[(maj, min, bld)] = verk
finally:
close_key(key)
return availables
def _detect_msvc(ctx):
from string import digits as string_digits
msvc_version_info = (9, 0)
msvc_version = "9.0"
pdir = find_vc_pdir(msvc_version)
if pdir is None:
raise ValueError("VS 9.0 not found")
# filter out e.g. "Exp" from the version name
msvc_ver_numeric = ''.join([x for x in msvc_version if x in string_digits + "."])
vernum = float(msvc_ver_numeric)
if 7 <= vernum < 8:
pdir = os.path.join(pdir, os.pardir, "Common7", "Tools")
batfilename = os.path.join(pdir, "vsvars32.bat")
elif vernum < 7:
pdir = os.path.join(pdir, "Bin")
batfilename = os.path.join(pdir, "vcvars32.bat")
else: # >= 8
batfilename = os.path.join(pdir, "vcvarsall.bat")
vc_paths = get_output(ctx, batfilename, "x86")
cc = None
linker = None
lib = None
for p in vc_paths["PATH"]:
_cc = os.path.join(p, "cl.exe")
_linker = os.path.join(p, "link.exe")
_lib = os.path.join(p, "lib.exe")
if os.path.exists(_cc) and os.path.exists(_linker) and os.path.exists(_lib):
cc = _cc
linker = _linker
lib = _lib
break
if cc is None or linker is None:
raise RuntimeError("Could not find cl.exe/link.exe")
return cc, linker, lib, vc_paths, msvc_version_info
def setup(ctx):
env = ctx.env
cc, linker, lib, vc_paths, msvc_version_info = _detect_msvc(ctx)
ctx.env["PATH"] = vc_paths["PATH"][:]
ctx.env.prextend("CPPPATH", vc_paths["INCLUDE"], create=True)
ctx.env.prextend("LIBDIR", vc_paths["LIB"], create=True)
ctx.env["CC"] = [cc]
ctx.env["CC_TGT_F"] = ["/c", "/Fo"]
ctx.env["CC_SRC_F"] = []
ctx.env["CFLAGS"] = ["/nologo"]
ctx.env["CPPPATH_FMT"] = "/I%s"
ctx.env["DEFINES"] = []
ctx.env["DEFINES_FMT"] = "/D%s"
ctx.env["LINK"] = [linker]
ctx.env["LINK_TGT_F"] = ["/out:"]
ctx.env["LINK_SRC_F"] = []
ctx.env["LINKFLAGS"] = ["/nologo"]
ctx.env["SHLINK"] = [linker, "/DLL"]
ctx.env["SHLINK_TGT_F"] = ["/out:"]
ctx.env["SHLINK_SRC_F"] = []
ctx.env["SHLINKFLAGS"] = []
ctx.env["MODLINK"] = [linker, "/DLL"]
ctx.env["MODLINK_TGT_F"] = ["/out:"]
ctx.env["MODLINK_SRC_F"] = []
ctx.env["MODLINKFLAGS"] = ["/nologo"]
ctx.env["LIBS"] = []
ctx.env["LIB_FMT"] = "%s.lib"
ctx.env["LIBDIR"] = []
ctx.env["LIBDIR_FMT"] = "/LIBPATH:%s"
ctx.env["STLINK"] = [lib]
ctx.env["STLINK_TGT_F"] = ["/OUT:"]
ctx.env["STLINK_SRC_F"] = []
ctx.env["STLINKFLAGS"] = ["/nologo"]
ctx.env["STATICLIB_FMT"] = "%s.lib"
ctx.env["CXX"] = [cc]
ctx.env["CXX_TGT_F"] = ["/c", "/Fo"]
ctx.env["CXX_SRC_F"] = []
ctx.env["CXXFLAGS"] = ["/nologo"]
if msvc_version_info >= (9, 0):
ctx.env.append("CXXFLAGS", "/EHsc")
ctx.env["CXXLINK"] = [linker]
ctx.env["CXXLINKFLAGS"] = ["/nologo"]
ctx.env["CXXLINK_TGT_F"] = ["/out:"]
ctx.env["CXXLINK_SRC_F"] = []
ctx.env["CXXSHLINK"] = [linker]
ctx.env["CXXSHLINKFLAGS"] = []
ctx.env["CXXSHLINK_TGT_F"] = ["/out:"]
ctx.env["CXXSHLINK_SRC_F"] = []
ctx.env["CC_OBJECT_FMT"] = "%s.obj"
ctx.env["CXX_OBJECT_FMT"] = "%s.obj"
ctx.env["SHAREDLIB_FMT"] = "%s.dll"
ctx.env["PROGRAM_FMT"] = "%s.exe"
for k, v in list(vc_paths.items()):
k = k.encode("ascii")
if k in ["LIB"]:
env.extend("LIBDIR", v, create=True)
elif k in ["CPPPATH"]:
env.extend(k, v, create=True)
for task_class in ["cc", "shcc", "cc_shlink", "cc_stlink", "cc_program", "cxx", "cxxprogram", "pycc", "pycxx", "pylink"]:
klass = yaku.task.task_factory(task_class)
saved = klass.exec_command
klass.exec_command = _exec_command_factory(saved)
def detect(ctx):
_detect_msvc(ctx)
return True
|
default_app_config = 'language_acts.twitterhut.apps.TwitterhutConfig'
|
from .schema.app import saveApplication
from .schema.code_mod import (makeUniqueCodeSnippet, makeUniqueModule,
saveModule, saveModuleFile, savePackage,
savePackageFile)
from .schema.drv_inst import setInstrument, uploadDriver
from .schema.record import newRecord
from .schema.user import newUser
|
from models.game.bots.Bot import Bot
from models.game.Game import Game
from models.data.GameDataModel import GameDataModel
from models.data import DatabaseConnection as DB
class Experiment(object):
def __init__(self, player1, player2, iterations, record=True):
""" An Experiment is a sequence of several games between two bots. Results can be saved or discarded
:param player1: the Bot playing as 'X'
:param player2: the Bot playing as 'O'
:param iterations: the number of games to play for this experiment
:param record: boolean indicator - should the result of games be recorded or not?
"""
if not isinstance(player1, Bot) or not isinstance(player2, Bot):
raise Exception("Invalid Experiment: both players must be bots")
self.p1 = player1
self.p2 = player2
self.iterations = iterations
self.completed_iterations = 0
self.p1_wins = 0
self.p2_wins = 0
self.ties = 0
self.record_result = record
self.finished = False
def run(self, callback=None):
""" Runs the current experiment. The callback function will be called after each game is finished.
:param callback: a function to call at the termination of each game. The iteration number and winner will be passed as arguments
:return: None
"""
db_insertion_scripts = []
for i in list(range(0, self.iterations)):
game = Game(self.p1, self.p2)
game.finish_game()
self.completed_iterations += 1
winner = game.get_winner()
if winner == self.p1.number:
self.p1_wins += 1
elif winner == self.p2.number:
self.p2_wins += 1
else:
self.ties += 1
if self.record_result:
game_dm = GameDataModel(game)
db_insertion_scripts.append(game_dm.get_save_script())
if callback is not None:
callback(i+1, game.get_winner())
if self.record_result:
insertion_script = "\n".join(db_insertion_scripts)
DB.execute(insertion_script)
self.finished = True
def get_p1_win_rate(self):
if self.completed_iterations == 0:
return 0
return self.p1_wins / self.completed_iterations
def get_p2_win_rate(self):
if self.completed_iterations == 0:
return 0
return self.p2_wins / self.completed_iterations
def get_tie_rate(self):
if self.completed_iterations == 0:
return 0
return self.ties / self.completed_iterations
|
from .raw import RawFIFF
|
import typing as T
from screeninfo.common import Monitor
# https://developer.apple.com/documentation/appkit/nsscreen/1388371-main
# first entry in array is always the primary screen
def check_primary(screens: T.Any, screen: T.Any) -> bool:
return screen == screens[0]
def enumerate_monitors() -> T.Iterable[Monitor]:
from AppKit import NSScreen
screens = NSScreen.screens()
for screen in screens:
f = screen.frame
if callable(f):
f = f()
yield Monitor(
x=int(f.origin.x),
y=int(f.origin.y),
width=int(f.size.width),
height=int(f.size.height),
is_primary=check_primary(screens, screen),
)
|
from guild import Guild
from player import Player
player = Player("George", 50, 100)
print(player.add_skill("Shield Break", 20))
print(player.player_info())
guild = Guild("UGT")
print(guild.assign_player(player))
print(guild.guild_info())
|
from rest_framework_simplejwt.authentication import JWTTokenUserAuthentication
class CookieAccessTokenAuthentication(JWTTokenUserAuthentication):
def authenticate(self, request):
raw_token = request.COOKIES.get('access_token', None)
if raw_token is None:
return None
validated_token = self.get_validated_token(raw_token)
return self.get_user(validated_token), None
class CookieRefreshTokenAuthentication(JWTTokenUserAuthentication):
def authenticate(self, request):
raw_token = request.COOKIES.get('refresh_token', None)
if raw_token is None:
return None
validated_token = self.get_validated_token(raw_token)
return self.get_user(validated_token), None
|
def printTuple():
li=list()
for i in range(1,21):
li.append(i**2)
print(tuple(li))
printTuple()
|
from prereise.cli.data_sources import get_data_sources_list
from prereise.cli.data_sources.solar_data import (
SolarDataGriddedAtmospheric,
SolarDataNationalSolarRadiationDatabase,
)
from prereise.cli.data_sources.wind_data import WindDataRapidRefresh
def test_get_data_sources_list():
data_sources_list = get_data_sources_list()
assert isinstance(data_sources_list[0], WindDataRapidRefresh)
assert isinstance(data_sources_list[1], SolarDataGriddedAtmospheric)
assert isinstance(data_sources_list[2], SolarDataNationalSolarRadiationDatabase)
|
from unittest import TestCase
import chff
class Testchff(TestCase):
def test_is_string(self):
s = chff.hello()
self.assertTrue(isinstance(s, str))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright 2019 Surface Concept GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
-------------------------------------------------------------------------------
Created on Thu Sep 19 16:07:32 2019
Test of the user callbacks interface.
"""
import scTDC
# import sys
import timeit
# -----------------------------------------------------------------------------
# example 1 of deriving from sctdc_usercallbacks_pipe
# count TDC and DLD events and the number of callbacks for TDC events
class UCB1(scTDC.usercallbacks_pipe):
def __init__(self, lib, dev_desc):
super().__init__(lib, dev_desc) # <-- mandatory
self.reset_counters()
def on_millisecond(self):
# sys.stdout.write("MS ")
pass
def on_start_of_meas(self):
# self.reset_counters()
pass
def on_end_of_meas(self):
print("\nend of measurement")
print("tdc events : ", self.tdc_event_count)
print("tdc callbacks : ", self.tdc_cb_count)
print("dld events : ", self.dld_event_count)
def on_tdc_event(self, tdc_events, nr_tdc_events):
self.tdc_event_count += nr_tdc_events
self.tdc_cb_count += 1
# sys.stdout.write("T ")
def on_dld_event(self, dld_events, nr_dld_events):
self.dld_event_count += nr_dld_events
# sys.stdout.write("D ")
def reset_counters(self):
self.tdc_event_count = 0
self.tdc_cb_count = 0
self.dld_event_count = 0
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# example 2 of deriving from sctdc_usercallbacks_pipe
# evaluate minimum and maximum values for times, x and y coordinates
class UCB2(scTDC.usercallbacks_pipe):
def __init__(self, lib, dev_desc):
super().__init__(lib, dev_desc) # <-- mandatory
self.reset_min_max()
self.counter_tdc = 0
self.counter_dld = 0
def on_millisecond(self):
pass # do nothing (one could also skip this function definition altogether)
def on_start_of_meas(self):
# self.reset_min_max() # reset at every start of a measurement
pass # do nothing
def on_end_of_meas(self):
# pass
print("end of measurement")
print("minimum time TDC : ", self.min_time_tdc)
print("maximum time TDC : ", self.max_time_tdc)
print("minimum time DLD : ", self.min_time_dld)
print("maximum time DLD : ", self.max_time_dld)
print("minimum x : ", self.min_x)
print("maximum x : ", self.max_x)
print("minimum y : ", self.min_y)
print("maximum y : ", self.max_y)
print('dld counter', self.counter_dld)
print('tdc counter', self.counter_tdc)
def on_tdc_event(self, tdc_events, nr_tdc_events):
print('event tdc')
for i in range(nr_tdc_events): # iterate through tdc_events
# see class tdc_event_t in scTDC.py for all accessible fields
t = tdc_events[i].time_data
ch = tdc_events[i].channel
sign_counter = tdc_events[i].sign_counter
subdevice = tdc_events[i].subdevice
time_tag = tdc_events[i].time_tag
print('event tdc:', subdevice, ch, t, sign_counter, time_tag)
self.min_time_tdc = min(self.min_time_tdc, t)
self.max_time_tdc = max(self.max_time_tdc, t)
self.counter_tdc += 1
def on_dld_event(self, dld_events, nr_dld_events):
print('event dld')
for i in range(nr_dld_events): # iterate through dld_events
# see class dld_event_t in scTDC.py for all accessible fields
t = dld_events[i].sum
x = dld_events[i].dif1
y = dld_events[i].dif2
master_counter = dld_events[i].master_rst_counter
start_counter = dld_events[i].start_counter
print('event dld:', x, y, t, start_counter, master_counter)
self.min_time_dld = min(self.min_time_dld, t)
self.max_time_dld = max(self.max_time_dld, t)
self.min_x = min(self.min_x, dld_events[i].dif1)
self.max_x = max(self.max_x, dld_events[i].dif1)
self.min_y = min(self.min_y, dld_events[i].dif2)
self.max_y = max(self.max_y, dld_events[i].dif2)
self.counter_dld += 1
def reset_min_max(self):
self.min_x = 1 << 40
self.max_x = -1
self.min_y = 1 << 40
self.max_y = -1
self.min_time_tdc = 1 << 40
self.max_time_tdc = -1
self.min_time_dld = 1 << 40
self.max_time_dld = -1
# -----------------------------------------------------------------------------
def test1():
device = scTDC.Device(autoinit=False)
# initialize TDC --- and check for error!
retcode, errmsg = device.initialize()
if retcode < 0:
print("error during init:", retcode, errmsg)
return 0
else:
print("succesfully initialized")
# use example 1 :
# ucb = UCB1(lib, dev_desc) # opens a user callbacks pipe
# or use example 2:
ucb = UCB2(device.lib, device.dev_desc) # opens a user callbacks pipe
start = timeit.default_timer()
for i in range(20): # number of measurements
ucb.do_measurement(100)
end = timeit.default_timer()
print("\ntime elapsed : ", end - start, "s")
ucb.close() # closes the user callbacks pipe, method inherited from base class
device.deinitialize()
if __name__ == "__main__":
test1()
|
import os
import json
from populus import ASSETS_DIR
from .versions import (
V1,
V2,
V3,
V4,
V5,
V6,
V7,
LATEST_VERSION
)
DEFAULT_V1_CONFIG_FILENAME = "defaults.v1.config.json"
DEFAULT_V2_CONFIG_FILENAME = "defaults.v2.config.json"
DEFAULT_V3_CONFIG_FILENAME = "defaults.v3.config.json"
DEFAULT_V4_CONFIG_FILENAME = "defaults.v4.config.json"
DEFAULT_V5_CONFIG_FILENAME = "defaults.v5.config.json"
DEFAULT_V6_CONFIG_FILENAME = "defaults.v6.config.json"
DEFAULT_V7_CONFIG_FILENAME = "defaults.v7.config.json"
DEFAULT_USER_V7_CONFIG_FILENAME = "defaults.user.v7.config.json"
DEFAULT_CONFIG_FILENAMES = {
V1: DEFAULT_V1_CONFIG_FILENAME,
V2: DEFAULT_V2_CONFIG_FILENAME,
V3: DEFAULT_V3_CONFIG_FILENAME,
V4: DEFAULT_V4_CONFIG_FILENAME,
V5: DEFAULT_V5_CONFIG_FILENAME,
V6: DEFAULT_V6_CONFIG_FILENAME,
V7: DEFAULT_V7_CONFIG_FILENAME,
}
DEFAULT_USER_CONFIG_FILENAMES = {
V7: DEFAULT_USER_V7_CONFIG_FILENAME,
}
def get_user_default_config_path(version=LATEST_VERSION):
try:
return os.path.join(ASSETS_DIR, DEFAULT_USER_CONFIG_FILENAMES[version])
except KeyError:
raise KeyError(
"`version` must be one of {0}".format(
sorted(tuple(DEFAULT_USER_CONFIG_FILENAMES.keys()))
)
)
def get_default_config_path(version=LATEST_VERSION):
try:
return os.path.join(ASSETS_DIR, DEFAULT_CONFIG_FILENAMES[version])
except KeyError:
raise KeyError(
"`version` must be one of {0}".format(
sorted(tuple(DEFAULT_CONFIG_FILENAMES.keys()))
)
)
def load_default_config(version=LATEST_VERSION):
default_config_path = get_default_config_path(version)
with open(default_config_path) as default_config_file:
default_config = json.load(default_config_file)
return default_config
def load_user_default_config(version=LATEST_VERSION):
default_config_path = get_user_default_config_path(version)
with open(default_config_path) as default_config_file:
default_config = json.load(default_config_file)
return default_config
|
from . import base
from . import bootstrap_filter, diagonal_extended_kalman_filter, extended_kalman_filter, unscented_kalman_filter
|
from typing import ClassVar, List
from ...constants import ApiKey
from ..base import RequestData
class ForgottenTopicsData:
topic: str
partitions: List[int]
def __init__(self, topic: str, partitions: List[int]):
"""
:param topic: Name of topic
:type topic: str
:param partitions: Partitions to remove from the fetch session.
:type partitions: List[int]
"""
self.topic = topic
self.partitions = partitions
class Partition:
partition: int
current_leader_epoch: int
fetch_offset: int
log_start_offset: int
partition_max_bytes: int
def __init__(
self,
partition: int,
current_leader_epoch: int,
fetch_offset: int,
log_start_offset: int,
partition_max_bytes: int,
):
"""
:param partition: Topic partition id
:type partition: int
:param current_leader_epoch: The current leader epoch, if provided, is used to fence consumers/replicas with
old metadata. If the epoch provided by the client is larger than the current epoch
known to the broker, then the UNKNOWN_LEADER_EPOCH error code will be returned. If
the provided epoch is smaller, then the FENCED_LEADER_EPOCH error code will be
returned.
:type current_leader_epoch: int
:param fetch_offset: Message offset.
:type fetch_offset: int
:param log_start_offset: Earliest available offset of the follower replica. The field is only used when request
is sent by follower.
:type log_start_offset: int
:param partition_max_bytes: Maximum bytes to fetch.
:type partition_max_bytes: int
"""
self.partition = partition
self.current_leader_epoch = current_leader_epoch
self.fetch_offset = fetch_offset
self.log_start_offset = log_start_offset
self.partition_max_bytes = partition_max_bytes
class Topic:
topic: str
partitions: List[Partition]
def __init__(self, topic: str, partitions: List[Partition]):
"""
:param topic: Name of topic
:type topic: str
:param partitions: Partitions to fetch.
:type partitions: List[Partition]
"""
self.topic = topic
self.partitions = partitions
class FetchRequestData(RequestData):
replica_id: int
max_wait_time: int
min_bytes: int
max_bytes: int
isolation_level: int
session_id: int
session_epoch: int
topics: List[Topic]
forgotten_topics_data: List[ForgottenTopicsData]
rack_id: str
api_key: ClassVar[ApiKey] = ApiKey.FETCH
def __init__(
self,
replica_id: int,
max_wait_time: int,
min_bytes: int,
max_bytes: int,
isolation_level: int,
session_id: int,
session_epoch: int,
topics: List[Topic],
forgotten_topics_data: List[ForgottenTopicsData],
rack_id: str,
):
"""
:param replica_id: Broker id of the follower. For normal consumers, use -1.
:type replica_id: int
:param max_wait_time: Maximum time in ms to wait for the response.
:type max_wait_time: int
:param min_bytes: Minimum bytes to accumulate in the response.
:type min_bytes: int
:param max_bytes: Maximum bytes to accumulate in the response. Note that this is not an absolute maximum, if
the first message in the first non-empty partition of the fetch is larger than this value,
the message will still be returned to ensure that progress can be made.
:type max_bytes: int
:param isolation_level: This setting controls the visibility of transactional records. Using READ_UNCOMMITTED
(isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level =
1), non-transactional and COMMITTED transactional records are visible. To be more
concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO
(last stable offset), and enables the inclusion of the list of aborted transactions in
the result, which allows consumers to discard ABORTED transactional records
:type isolation_level: int
:param session_id: The fetch session ID
:type session_id: int
:param session_epoch: The fetch session epoch
:type session_epoch: int
:param topics: Topics to fetch in the order provided.
:type topics: List[Topic]
:param forgotten_topics_data: Topics to remove from the fetch session.
:type forgotten_topics_data: List[ForgottenTopicsData]
:param rack_id: The consumer's rack id
:type rack_id: str
"""
self.replica_id = replica_id
self.max_wait_time = max_wait_time
self.min_bytes = min_bytes
self.max_bytes = max_bytes
self.isolation_level = isolation_level
self.session_id = session_id
self.session_epoch = session_epoch
self.topics = topics
self.forgotten_topics_data = forgotten_topics_data
self.rack_id = rack_id
|
"""
UTILIZAÇÃO DE APRENDIZADO PROFUNDO (DEEP LEARNING) PARA VERIFICAÇÃO DA ORIENTAÇÃO DE UMA IMAGEM
E ROTAÇÃO ADEQUADA DA MESMA. AO SER ENVIADA UMA IMAGEM (EM QUALQUER FORMATO),
RETORNA-SE O NÚMERO DE ROTAÇÕES NECESÁRIAS E A IMAGEM ROTACIONADA CORRETAMENTE.
OS PASSOS REALIZADOS SÃO:
1) LEITURA DA IMAGEM EM RGB
2) PIPELINE DE AUMENTO DE IMAGEM USANDO ALBUMENTATIONS (CLASSE: COMPOSE)
3) REALIZAÇÃO DA PREDIÇÃO USANDO UMA REDE NEURAL DO TIPO RESNET
4) OBTENÇÃO DAS PREDIÇÕES DE ORIENTAÇÃO DA IMAGEM
5) CALCULO DO NÚMERO DE ROTAÇÕES NECESSÁRIAS PARA ORIENTAÇÃO CORRETA DA IMAGEM.
# Arguments
caminho_imagem - Required : Imagem para verificação da orientação (String)
# Returns
predictions - Required : Predições do modelo para 0º, 90º. 180º, 270º (List)
number_rotate - Required : Número de rotações necessárias (Integer)
image_correct_rotate - Required : Imagem após aplicação do número de rotações necessárias (PIL)
"""
__version__ = "1.0"
__author__ = """Emerson V. Rafael (EMERVIN)"""
__data_atualizacao__ = "16/08/2021"
from inspect import stack
import sys
from deep_check_orientation import check_orientation
from utils.image_view import view_image
if __name__ == '__main__':
try:
# OBTENDO O CAMINHO DA IMAGEM ENVIADA PELO USUÁRIO
IMAGE_FILE_LOCATION = sys.argv[1]
orquestrador = check_orientation()
predictions_check_orientation, number_rotations, image_correct_orientation = orquestrador.orchesta_model(IMAGE_FILE_LOCATION)
print("AS PREDIÇÕES DO MODELO SÃO: {}"
"\nPARA 0º: {}"
"\nPARA 90º: {}"
"\nPARA 180º: {}"
"\nPARA 270º: {}".format(predictions_check_orientation,
predictions_check_orientation[0],
predictions_check_orientation[1],
predictions_check_orientation[2],
predictions_check_orientation[3]))
print("NÚMERO DE ROTAÇÕES NECESSÁRIAS: {} ROTAÇÕES".format(number_rotations))
# VISUALIZANDO A IMAGEM ROTACIONADA CORRETAMENTE
view_image(image_correct_orientation, window_name="IMAGEM ROTACIONADA")
except Exception as ex:
print("ERRO NA FUNÇÃO: {} - {}".format(stack()[0][3], ex))
|
#!/usr/bin/python
import time
import subprocess
import telepot
import os
import urllib.request
import re
import json
import requests
from bs4 import BeautifulSoup
from urllib.request import urlopen
import youtube_dl
def handle(msg):
chat_id = msg['chat']['id']
command = msg['text']
print ("Command from client : %s " %command)
#youtube search
if command.startswith('yt'):
param = command[3:]
response = urlopen("https://www.youtube.com/results?search_query="+param)
data = response.read()
response.close()
soup = BeautifulSoup(data,"html.parser")
vid = soup.find(attrs={'class':'yt-uix-tile-link'})
link = "https://www.youtube.com"+vid['href']
watchid = vid['href']
watchid = watchid.replace('/watch?v=','')
title = vid['title']
print (title)
print (link)
bot.sendMessage(chat_id,title+"\n"+link)
options = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '320'
}]
}
filename = title+"-"+watchid+".mp3"
filename = filename.replace(" ","_")
filename = filename.replace("'","")
filename = filename.replace("&","")
filename = filename.replace("__","_")
filename = filename.replace(",","")
filename = filename.replace("(","")
filename = filename.replace(")","")
filename = filename.replace("[","")
filename = filename.replace("]","")
filename = filename.replace("{","")
filename = filename.replace("}","")
with youtube_dl.YoutubeDL(options) as ydl:
ydl.download([link])
bot.sendAudio(chat_id,audio=open(filename,'rb'))
print ("Sent!")
os.remove(filename)
#end youtube search
#api credentials
api_cont = os.getenv("apikey")
bot = telepot.Bot(api_cont)
bot.message_loop(handle)
print ('[+] Server is Listenining [+]')
print ('[=] Type Command from Telegram [=]')
while 1:
time.sleep(10)
|
#!/user/bin/env python
# -*-coding:utf-8 -*-
# @CreateTime : 2021/10/24 8:48
# @Author : xujiahui
# @Project : robust_python
# @File : validate.py
# @Version : V0.0.1
# @Desc : 一个简单的使用元类组合验证的例子
class BetterPolygon:
sides = None # Must be specified by subclass
def __init_subclass__(cls):
super().__init_subclass__()
if cls.sides < 3:
raise ValueError("Polygons need 3+ sides")
@classmethod
def interior_angles(cls):
return (cls.sides - 2) * 180
# 单独使用BetterPolygon来验证多边形子类边数是否>=3
class Hexagon(BetterPolygon):
sides = 6
assert Hexagon.interior_angles() == 720
class Filled:
color = None # Must be specified by subclass
def __init_subclass__(cls):
super().__init_subclass__()
if cls.color not in ("red", "green", "blue"):
raise ValueError("Fills need a valid color")
class RedTriangle(Filled, BetterPolygon):
color = "red"
sides = 3
# 验证红色三角形是否能在边数验证和填色验证两个超类的组合验证逻辑下完整构建出来
ruddy = RedTriangle()
assert isinstance(ruddy, Filled)
assert isinstance(ruddy, BetterPolygon)
"""
总结一下, __init_subclass__ 这个特殊的类方法非常强大。
在多层的类体系中,只要通过内置的 super() 函数来调用 __init_subclass__ 方法,
就可以保证那些类在各自的 __init_subclass__ 里面所实现的验证逻辑也能够正确地执行。
不仅是上面例子中的情况,当出现菱形继承关系时, __init_subclass__ 也能处理得很好。
当然,也可以不使用 __init_subclass__ ,这是在python3.6之后提供的方法,
但手工编写和它功能类似的元类组合非常费眼睛和键盘,并且会令代码库大大膨胀,因此如果可以,尽量用它。
"""
|
from .distributed_module import DistributedModule
|
import os # To access tokens
# Copyright (c) 2015-2016 Slack Technologies, Inc
from slack import WebClient
# Copyright (c) 2015 by Armin Ronacher and contributors. See AUTHORS for more details.
from flask import Flask
from flask import request, make_response, Response
from flask import redirect
from flask_sqlalchemy import SQLAlchemy
import json
import Helper
# Copyright (c) 2012-2014 Ivan Akimov, David Aurelio
from hashids import Hashids
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = Helper.getUrl(os.environ['DB_USER'],os.environ['DB_PASS'],os.environ['DB_NAME'])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
import DantesUpdater # To access DantesUpdator
import Updater
import UserManager
import InfraManager
import Database
import AgentManager
import StatusManager
import LabManager
# Set of tokens provided by the app
clientID = os.environ['CLIENT_ID']
clientSecret = os.environ['CLIENT_SECRET']
veritoken = os.environ['VERIFY_TOKEN']
commandSalt = os.environ['COMMAND_SALT']
agentSalt = os.environ['AGENT_SALT']
# Dictionary of SlackClients stored by TeamID
clientDictionary = {}
# Plugin objects
dante = DantesUpdater.Dantes_Updater()
user = UserManager.UserManager()
infra = InfraManager.InfraManager()
update = Updater.Updater()
status = StatusManager.StatusManager()
lab = LabManager.LabManager()
commandDict = {
'dante':dante,
'infra':infra,
'user':user,
'update':update,
'agent':AgentManager,
'status':status,
'lab':lab
}
# Encoder objects
commandHashids = Hashids(salt=commandSalt)
agentHashids = Hashids(salt=agentSalt)
# Default webserver route
@app.route("/")
def main():
return "Welcome"
# URI for /test command
@app.route("/test",methods=['GET','POST'])
def test():
content = request.form
if content['token'] != veritoken:
print("Unauthorized message detected")
return 401
print("RECIEVED TEST!")
message_attachments = [
{
"fallback": "Upgrade your Slack client to use messages like these.",
"color": "#3AA3E3",
"attachment_type": "default",
"callback_id": "menu_options_2319",
"actions": [
{
"name": "games_list",
"text": "Pick a game...",
"type": "select",
"data_source": "external"
}
]
}
]
sendMessage("Hello from /test", content['channel_id'], content['team_id'], attachments_send=message_attachments)
return "Test Sent, did you see the prompt?"
# URI for event subscription notifications
@app.route("/api/messages",methods=['GET','POST'])
def message_handle():
content = request.json
if content['token'] != veritoken:
print("Unauthorized message detected")
return 401
if content['type'] == "url_verification":
print("Received verification message")
return content['challenge']
curEvent = content['event']
team_id = content['team_id']
if curEvent['type'] == 'message':
if 'text' in curEvent and curEvent['text'].startswith("!"):
command = curEvent['text'][1:]
key = command.split(' ', 1)[0]
if key == "help":
sendHelp(None, curEvent['channel'], curEvent['user'], team_id)
elif key in commandDict:
commandDict[key].api_entry(command[len(key)+1:], curEvent['channel'], curEvent['user'], team_id)
else:
print("Command Not Found")
sendHelp("Command \"" + key + "\"" + " Not Found", curEvent['channel'], curEvent['user'], team_id)
else:
print("Event not a message")
print(content)
return "200"
@app.route("/api/messages/options",methods=['POST'])
def message_option_handle():
# Parse the request payload
form_json = json.loads(request.form["payload"])
menu_options = commandDict[form_json['callback_id']].option_entry(form_json)
return (Response(json.dumps(menu_options), mimetype='application/json'),200)
@app.route("/api/messages/actions",methods=['POST'])
def message_actions_handle():
# Parse the request payload
form_json = json.loads(request.form["payload"])
# Check to see what the user's selection was and update the message
commandDict[form_json['callback_id']].action_entry(form_json)
return make_response("", 200)
@app.route("/api/slash/set_admin_channel",methods=['POST'])
def slash_set_admin_channel():
channel = request.form['channel_id']
user = request.form['user_id']
team_id = request.form['team_id']
if request.form['token'] != veritoken:
print("Unauthorized mesage detected")
return 401
if not checkPermission(user, "owner", team_id):
print(user + " attempted to set admin channel of workspace " + team_id)
sendEphemeral("Access Denied - Must be Owner to set admin channel", channel, user, team_id)
return ('', 200)
curWorkspace = Database.Workspaces.query.filter_by(team_id=team_id).first()
if curWorkspace is None:
print("Workspace " + team_id + " not found")
return 400
sendEphemeral("Set Admin Channel", channel, user, team_id)
curWorkspace.admin_channel = channel
Database.db.session.commit()
return ('', 200)
# URI for an agent with ID <id> to retrieve a list of unfetched commandIDs
@app.route("/api/agent/<id>/command",methods=['GET'])
def agent_id_command(id):
return 404
# URI for an agent to get a specific command and post the result
@app.route("/api/agent/<aid>/command/<cid>",methods=['GET', 'POST'])
def agent_id_command_id(aid, cid):
return 404
@app.route("/install",methods=['GET'])
def install():
print("Install reached")
return redirect("https://slack.com/oauth/authorize?scope=commands,bot,channels:read,groups:read,im:read,mpim:read&client_id=344786415526.344950175959&redirect_url=slack.flemingcaleb.com:5000/install/confirm")
@app.route("/install/confirm", methods=['GET'])
def install_confirm():
auth = request.args.get('code')
status = request.args.get('status')
error = request.args.get('error')
if error != None:
return "You have denied access"
sc = WebClient("")
print("Requesting tokens")
response = sc.api_call(
"oauth.access",
client_id=clientID,
client_secret=clientSecret,
code=auth
)
print(response)
addClient(response['bot']['bot_access_token'],response['access_token'],veritoken, response['team_id'])
return "Ok"
''' Function to send a message to a channel
Input:
message: Message to send
sendChannel: Channel to send the message to
Output:
N/A
'''
def sendMessage (message, sendChannel, team_id, attachments_send=None):
client,_ = getClient(team_id)
if client is None:
print("Team not found: ", team_id)
if attachments_send is None:
client.chat_postMessage(
channel=sendChannel,
text=message
)
else:
client.chat_postMessage(
channel=sendChannel,
text=message,
attachments=attachments_send
)
''' Function to send an ephemeral message
Input:
message: Message to send
sendChannel: Channel to send the message in
sendUserID: User to send the message to
Output:
N/A
'''
def sendEphemeral (message, sendChannel, sendUserID, team_id, attachments_send=None):
client,_ = getClient(team_id)
if client is None:
print("Team not found: ", team_id)
return
client.chat_postEphemeral(
channel=sendChannel,
user=sendUserID,
text=message,
attachments=attachments_send
)
def modifyMessage(orig_ts, message, sendChannel, sendUser, team, attachments_send=None):
client,_ = getClient(team)
if client is None:
print("Team not found: ", team)
return
client.chat_update(
channel=sendChannel,
user=sendUser,
ts=orig_ts,
text=message,
attachments=attachments_send
)
def deleteMessage(ts_delete, chan, team):
client,_ = getClient(team)
if client is None:
print("Team not found: ", team)
return
client.chat_delete(
channel=chan,
ts=ts_delete
)
''' Function to send a message to the designated admin channel
Input:
message: Message to send to the admins
team_id: Team whose admins should be contacted
Output:
boolean indicating if the admins have been contacted
'''
def notifyAdmins(message, team_id):
workspace = Database.Workspaces.query.filter_by(team_id=team_id).first()
if workspace is None:
print("Workspace " + team_id + " not found")
return False
if workspace.admin_channel is None:
print("No admin channel defined for workspace " + team_id)
return False
sendMessage(message, workspace.admin_channel, team_id)
return True
''' Function to check to see if a user possesses a specified permission level
Input:
user: User to check permissions of
requiredPerms: String indicating the permissions to check for
Possible values: owner, admin, member
Output:
Boolean indicating if the user has the required permissions
'''
def checkPermission(user, requiredPerms, team_id):
dbUser = Database.Users.query.filter_by(user_id = user).first()
if dbUser is None:
# Add user to the database
curPermissions = addUser(user, team_id)
else:
curPermissions = dbUser.permission_level
if curPermissions == Database.permissions.owner:
print("User owner");
return True
elif (curPermissions == Database.permissions.admin) and not (requiredPerms == Database.permissions.owner.name):
return True
elif requiredPerms == Database.permissions.user.name:
return True
else:
return False
def checkDM(channelCheck, team):
client,_ = getClient(team)
if client is None:
print("Client not found: ", team)
return False
response = client.conversations_info(
channel=channelCheck
)
print("Response in checkDM:\n", response)
if response['channel'].get('is_im'):
return True
#Channel is not a DM
return False
''' Function to find the verify a user and determine their group membership
Input:
toCheck: User object to verify and classify
Output:
Boolean indicating success or failure
'''
def addUser(toCheck, team):
print("Adding user")
client,_ = getClient(team)
if client is None:
print("Client not found: ", team)
response = client.users_info(
user=toCheck,
include_locale="false"
)
if not response['ok']:
return None
user = response['user']
if user['is_owner']:
# add owner permissions
newPerms = Database.permissions.owner
elif user['is_admin']:
#add admin permissions
newPerms = Database.permissions.admin
else:
#add user permissions
newPerms = Database.permissions.user
dbWorkspace = Database.Workspaces.query.filter_by(team_id = team).first()
newUser = Database.Users(newPerms, dbWorkspace.id, toCheck)
db.session.add(newUser)
db.session.commit()
return newPerms
def getUserName(user_id, team):
client,_ = getClient(team)
if client is None:
print("Client not found: ", team)
return ""
response = client.users_info(
user=user_id,
include_locale="false"
)
return response['user']['name']
def getClient(toCheck):
if not toCheck in clientDictionary:
#Check for workspace in DB
dbWorkspace = Database.Workspaces.query.filter_by(team_id = toCheck).first()
if dbWorkspace is None:
print("Workspace not found in database")
return None
else:
#Open a SlackClient
newClient = WebClient(dbWorkspace.bot_token)
clientDictionary[toCheck] = newClient, dbWorkspace.verify_token
return newClient, dbWorkspace.verify_token
else:
return clientDictionary[toCheck]
def addClient(bot, access, verify, team):
newClient = Database.Workspaces(bot, access, veritoken, team)
db.session.add(newClient)
db.session.commit()
def sendHelp(message, sendChannel, sendUserID, sendTeamID):
messageString = ""
if not message is None:
messageString += message +"\n\n"
messageString += "InfraBot Help:\n"
messageString += "\t!help - Prints this help prompt\n"
messageString += "\t!<module name> help - Prints the help for a given module\n"
messageString += "\nModule List:\n"
for module in commandDict:
messageString += "\t\"!" + module + "\"\n"
sendEphemeral(messageString, sendChannel, sendUserID, sendTeamID)
if __name__ == '__main__':
main()
|
# Generated by Django 2.2.8 on 2020-03-19 15:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('fyle_expense', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TaskLog',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('task_id', models.CharField(help_text='Fyle job reference', max_length=255, null=True)),
('type', models.CharField(help_text='Task type', max_length=64)),
('status', models.CharField(blank=True, help_text='Task status', max_length=64, null=True)),
('detail', models.TextField(blank=True, help_text='Task details', null=True)),
('created_at', models.DateTimeField(auto_now_add=True, help_text='Created at')),
('updated_at', models.DateTimeField(auto_now=True, help_text='Updated at')),
('expense_group', models.ForeignKey(blank=True, help_text='FK to ExpenseGroup', null=True, on_delete=django.db.models.deletion.CASCADE, to='fyle_expense.ExpenseGroup')),
],
options={
'ordering': ['-created_at'],
'get_latest_by': 'created_at',
},
),
]
|
from collections import deque
from typing import Any, Optional
import shelve
import threading
class MessageQueue:
def __init__(self, max_size: int = 1000, persistence_path: Optional[str] = None):
self._lock = threading.Lock()
if persistence_path:
self._shelve = shelve.open(persistence_path, writeback=True)
self._q = self._shelve.get("queue", deque([]))
else:
self._q = deque([])
self.max_size = max_size
def _save(self):
if self._shelve:
self._shelve["queue"] = self._q
def enqueue(self, msg: Any) -> bool:
with self._lock:
if len(self._q) >= self.max_size:
return False
self._q.append(msg)
self._save()
return True
def dequeue(self) -> Optional[Any]:
with self._lock:
if len(self._q) == 0:
return None
obj = self._q.popleft()
self._save()
return obj
def peek(self) -> Optional[Any]:
with self._lock:
if len(self._q) == 0:
return None
return self._q[0]
|
import datetime
import json
import logging
import operator
import os
from collections import defaultdict
from datetime import date
import vk_api
import vk_api.exceptions
from vk_api import execute
#from .TimeActivityAnalysis import VKOnlineGraph
from .VKFilesUtils import check_and_create_path, DIR_PREFIX
class VKActivityAnalysis:
"""
Модуль, связанный с исследованием активности пользователей
"""
def __init__(self, vk_session):
"""
Конструктор
:param vk_session: объект сессии класса VK
"""
self.api = vk_session.get_api()
self.tools = vk_api.VkTools(vk_session)
self.logger = logging.getLogger("ActivityAnalysis")
# функция получения лайков по 25 штук
vk_get_all_likes_info = vk_api.execute.VkFunction(
args=('user_id', 'owner_id', 'item_ids', 'type'),
code='''
var item_ids = %(item_ids)s;
var result = [];
var i = 0;
while(i <= 25 && item_ids.length > i){
var params = {"user_id":%(user_id)s,
"owner_id": %(owner_id)s,
"item_id": item_ids[i],
"type": %(type)s
};
result = result + [API.likes.isLiked(params) + {"owner_id": params["owner_id"],
"user_id": params["user_id"],
"type": params["type"],
"item_id": params["item_id"]} ];
i = i+1;
}
return {result: result, count: item_ids.length};
''')
# функция получения общих друзей по 25 друзей проверяет
vk_get_all_common_friends = vk_api.execute.VkFunction(
args=('source_uid', 'target_uids'),
code='''
var source_uid = %(source_uid)s;
var target_uids = %(target_uids)s;
var result = [];
var i = 0;
while(i <= 25 && target_uids.length > i*100){
var sliced = 0;
if ( (i+1)*100 > target_uids.length) {
sliced = target_uids.slice(i*100,target_uids.length);
} else {
sliced = target_uids.slice(i*100,(i+1)*100);
}
var params = {"source_uid":%(source_uid)s,
"target_uids": sliced,
};
result = result + API.friends.getMutual(params);
i = i+1;
}
return {result:result};
''')
def is_online(self, uid):
"""
Проверяет онлайн пользователя
:param uid: id пользователя
"""
resp = self.api.users.get(user_id=uid, fields='online')
self.logger.debug("is_online: " + str(uid) + '; ' + str(resp))
if len(resp) > 0 and 'online' in resp[0]:
return resp[0]['online']
else:
return None
def likes_iter(self, uid, friend_uid, count, method, max_count, values, type='post', limit=100):
"""
Генератор инфомации о лайках
:param uid: id пользователя которого проверяем
:param friend_uid: id друга пользователя
:param count: количество ??? TODO: че я тут написал, фигня какая-то
:param method: метод VKApi
:param max_count: Максимальное количество элментов, которое можно загрузить 1м методом за раз
:param values: Параметры метода
:param type: Тип записей (пост, фото)
:param limit: максимальное количство записей
"""
self.logger.debug("likes_iter: " + str(uid) + '; ' + str(friend_uid))
item_ids = []
entries = []
iterations = count // 25
tail = count % 25
iterations_count = 0
for key, entry in enumerate(self.tools.get_all_iter(method, max_count, values=values,
limit=limit)
):
if key > limit:
break
if iterations_count < iterations:
if key != 0 and key % 25 != 0:
item_ids += [entry['id']]
entries += [entry]
else:
for i, like_info in enumerate(self.vk_get_all_likes_info(self.api, user_id=uid,
owner_id=friend_uid,
item_ids=item_ids,
type=type).get('result')):
entries[i].update(like_info)
yield entries[i]
item_ids = []
entries = []
iterations_count += 1
else:
if key % 25 != tail - 1:
item_ids += [entry['id']]
entries += [entry]
else:
for i, like_info in enumerate(self.vk_get_all_likes_info(self.api, user_id=uid,
owner_id=friend_uid,
item_ids=item_ids,
type=type).get('result')):
entries[i].update(like_info)
yield entries[i]
item_ids = []
entries = []
def likes_friend_photos(self, uid, friend_uid, limit=100):
"""
Генератор лайков на фотографиях
:param uid: id пользователя, которого проверяем
:param friend_uid: id друга
:param limit: максимальное количество загруженных записей
"""
self.logger.debug("likes_friend_photos: " + str(uid) + '; ' + str(friend_uid))
count = self.api.photos.getAll(owner_id=friend_uid, count=1)['count']
values = {'owner_id': friend_uid, 'extended': 1, 'no_service_albums': 0}
for like_info in self.likes_iter(uid=uid,
friend_uid=friend_uid,
count=count,
method='photos.getAll',
max_count=200,
values=values,
type='photo',
limit=limit):
yield like_info
def likes_friend_wall(self, uid, friend_uid, limit=100):
"""
Генератор лайков на стене TODO: может, совместить фото и стену? А то код почти одинковый
:param uid: id пользователя, которого проверяем
:param friend_uid: id друга
:param limit: максимально число записей для загрузки
"""
self.logger.debug("likes_friend_wall: " + str(uid) + '; ' + str(friend_uid))
count = self.api.wall.get(owner_id=friend_uid, count=1)['count']
values = {'owner_id': friend_uid, 'filter': 'all'}
for like_info in self.likes_iter(uid=uid,
friend_uid=friend_uid,
count=count,
method='wall.get',
max_count=100,
values=values,
type='post',
limit=limit):
yield like_info
def likes_group_wall(self, uid, group_id, limit=100):
"""
Генератор лайков на стене СООБЩЕСТВА
:param uid: id пользователя
:param group_id: id группы
:param limit: максимальное число записей для обработки
"""
self.logger.debug("likes_group_wall: " + str(uid) + '; ' + str(group_id))
return self.likes_friend_wall(uid, -abs(group_id), limit)
def friends_common_iter(self, uid, friends_ids):
"""
Генератор информации об общих друзьях
:param uid: id пользователя, которого проверяем
:param friends_ids: массив id друзей
"""
self.logger.debug("friends_common_iter: " + str(uid) + '; ' + str(friends_ids))
steps = len(friends_ids) // 2500 + 1
for i in range(steps):
commmon_friends = self.vk_get_all_common_friends(self.api,
source_uid=uid,
target_uids=friends_ids[
i * 2500: min(
(i + 1) * 2500,
len(friends_ids)
)
]).get('result')
if not commmon_friends:
continue
for friend in commmon_friends:
yield friend
def friends_all_ids(self, uid, friends_full=None):
"""
Получить id всех АКТИВНЫХ (не собачек) друзей пользователя
:param uid: id пользователя
:param friends_full: массив полной информации о друзьях
"""
self.logger.debug("friends_all_ids: " + str(uid))
if friends_full is None:
friends_full = self.friends_all_full(uid=uid)
return [el['id'] for el in friends_full]
def friends_all_full(self, uid, friends_full=None):
"""
Получает подробную информацию по всем АКТИВНЫМ (не собачкам) друзьям пользователя
:param uid: id пользователя
:param friends_full: массив полной информации о друзьях
"""
self.logger.debug("friends_all_full: " + str(uid))
if friends_full is not None:
return friends_full
# TODO: надо посмотреть, есть ли битовая маска scop'а друзей
scope = 'nickname, domain, sex, bdate, city, country, timezone, photo_50, photo_100, photo_200_orig, has_mobile, contacts, education, online, relation, last_seen, status, can_write_private_message, can_see_all_posts, can_post, universities';
return [el for el in self.tools.get_all('friends.get', 5000, values={'user_id': uid, 'fields': scope})['items']
if 'deactivated' not in el]
def common_city_score(self, uid, friends_full=None, result_type='first'):
"""
Возвращает очки за общий город.
Если пользователь совпадает городом с другом, то +3 очка
Если количество людей с таким городом максимально, то +3 очка первым 10%, +2 -- првым 20%
:param uid: id пользователя, которого проверяем
:param friends_full: массив полной информации о друзьях
:param result_type: Тип позвращаемого результата. 'count' - все результаты
:type result_type: any('first', 'count')
:return: все результаты или первые 20%
"""
self.logger.debug("common_city_score: " + str(uid))
res = {}
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
for friend in friends_full:
if 'city' in friend:
if friend['city']['title'] in res:
res[friend['city']['title']] += 1
else:
res.update({friend['city']['title']: 1})
res = sorted(res.items(), key=operator.itemgetter(1), reverse=True)
if result_type == 'count':
return dict(res)
first_10p = {city[0]: 3 for city in res[:int(len(res) * 0.1)]}
first_30p = {city[0]: 2 for city in res[int(len(res) * 0.1):int(len(res) * 0.3)]}
first_10p.update(first_30p)
return first_10p
def score_common_age(self, uid, friends_full=None, result_type='first'):
"""
Очки за общий возраст
:param uid: id пользователя
:param friends_full: массив полной информации о друзьях
:param result_type: Тип позвращаемого результата. 'count' - все результаты
:type result_type: any('first', 'count')
:return: все результаты или первые 20%
"""
self.logger.debug("score_common_age: " + str(uid))
res = defaultdict(lambda: 0)
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
for friend in friends_full:
if 'bdate' in friend:
bdate = friend['bdate'].split('.')
if len(bdate) > 2:
res[int(bdate[2])] += 1
res = sorted(res.items(), key=operator.itemgetter(1), reverse=True)
if result_type == 'count':
return dict(res)
first_10p = {city[0]: 3 for city in res[:int(len(res) * 0.1)]}
first_30p = {city[0]: 2 for city in res[int(len(res) * 0.1):int(len(res) * 0.3)]}
first_10p.update(first_30p)
if len(first_10p) == 0:
first_10p = {res[0][0]: 1}
return first_10p
def search_user_by_age(self, user_info, group_id, age=(1, 100)):
"""
Вычислить год рождения пользователя через группу
:param user_info: информация о пользователе, которого проверяем
:param group_id: id любой группы у пользователя
:param age: диапазон предполагаемых возрастов
:return: точный год рождения, который указал пользователь
"""
info = self.api.users.search(q=user_info['first_name'] + ' ' + user_info['last_name'],
group_id=group_id,
age_from=age[0],
age_to=age[1],
count=1000)['items']
for user in info:
if user['id'] == user_info['id']:
if age[0] == age[1]:
return date.today().year - age[0]
return self.search_user_by_age(user_info=user_info,
group_id=group_id,
age=(age[0], (age[1] - age[0]) // 2 + age[0]))
if age[0] == age[1]:
return date.today().year - age[0] - 1
return self.search_user_by_age(user_info=user_info,
group_id=group_id,
age=(age[1], (age[1] - age[0]) * 2 + age[0]))
def user_age(self, uid, friends_full=None):
"""
Вычислить предполагаемый возраст пользователя 2мя способами:
-максимальное кол-во по друзьям (для <25 лет вполне точный рез-т)
-по поиску в группе (точный результат указанного пользователем)
:param uid: id пользователя, которого проверяем
:param friends_full: массив полной информации о друзьях
:return: словарь с результатами
"""
res = {'user_defined': -1, 'friends_predicted': -1}
user_info = self.api.users.get(user_ids=uid, fields='bdate')[0]
if 'bdate' in user_info:
bdate = user_info['bdate'].split('.')
if len(bdate) > 2:
res['user_defined'] = bdate[2]
else:
user_group = self.api.groups.get(user_id=uid, count=1)['items']
if 0 in user_group:
user_group = user_group[0]
res['user_defined'] = self.search_user_by_age(user_info=user_info,
group_id=user_group)
else:
user_group = self.api.groups.get(user_id=uid, count=1)['items']
if 0 in user_group:
user_group = user_group[0]
res['user_defined'] = self.search_user_by_age(user_info=user_info,
group_id=user_group)
common_age = int(list(self.score_common_age(uid=uid).items())[0][0])
res['friends_predicted'] = common_age
return res
def check_friends_online(self, uid):
"""
Проверяет онлайн всех друзей пользователя
:param uid: id пользователя, которого проверяем
:return: результат friends.getOnline
"""
return self.api.friends.getOnline(user_id=uid)
def likes_friends(self, uid, limit_entries=100, friends_full=None):
"""
Генератор информации о лайках у друзей на фото и стене
:param uid: id пользователя, которого проверяем
:param limit_entries: максимальное кол-во записей на каждом друге
:param friends_full: массив полной информации о друзьях
"""
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
friends = self.friends_all_ids(uid=uid, friends_full=friends_full)
count = len(friends)
for i, friend in enumerate(friends, 1):
for like in self.likes_friend_wall(uid=uid, friend_uid=friend, limit=limit_entries):
if like['liked'] or like['copied']:
r = like
r.update({"count": count,
"current": i,
"name": friends_full[i-1]['first_name'] + ' ' + friends_full[i-1]['last_name']})
yield r
for like in self.likes_friend_photos(uid=uid, friend_uid=friend, limit=limit_entries):
if like['liked'] or like['copied']:
r = like
r.update({"count": count,
"current": i,
"name": friends_full[i-1]['first_name'] + ' ' + friends_full[i-1]['last_name']})
yield r
yield {"count": len(friends), "current": i, "inf": 0}
def likes_groups(self, uid, limit=100, groups=None):
"""
Генератор информации о лайках в сообществах
:param uid: id пользователя, которого проверяем
:param limit: максимальное число записей с каждой группы
:param groups: массив id групп
"""
# TODO: здесь бы хорошо убрать повторное использование кода из likes_friends
if groups is None:
groups = self.tools.get_all('users.getSubscriptions', 200, values={"extended": 1, "user_id": uid})
for i, group in enumerate(groups['items'], 1):
try:
for like in self.likes_group_wall(uid=uid, group_id=group['id'], limit=limit):
if like['liked'] or like['copied']:
r = like
r.update({"count": groups['count'],
"current": i,
"name": groups['items'][i-1]['name']})
yield r
except vk_api.exceptions.ApiError as error:
# TODO: обработать это по-нормальному
if error.code == 13:
self.logger.error("Size is too big, skipping group_id=" + str(group['id']))
elif error.code == 15:
self.logger.warning("Wall is disabled, skipping group_id=" + str(group['id']))
else:
raise error
except vk_api.exceptions.ApiHttpError as error:
# TODO: не понятная фигня, надо разобраться
self.logger.error("Server 500 error, skipping group_id=" + str(group['id']))
yield {"count": groups['count'], "current": i, "inf": 0}
def likes_friends_and_groups(self, uid, limit=100, friends_need=False, groups_need=False, friends_full=None, groups=None):
"""
Генератор информации о лайках в группах и сообществах
:param uid: id пользователя, которого проверяем
:param limit: количество записей, которые нужно загружать на каждом элементе
:param friends_need: необходима проверка у друзй
:param groups_need: необходима проверка у групп
:param friends_full: массив полной информации о друзьях
:param groups: массив подписок
:return:
"""
friends_full = self.friends_all_full(uid, friends_full)
if groups is None:
# TODO: subsriptions может содержать людей, надо доработать, возможны баги
groups = self.tools.get_all('users.getSubscriptions', 200, values={"extended": 1, "user_id": uid})
friends_count = friends_need*len(friends_full)
groups_count = groups_need*groups['count']
count = friends_count + groups_need*groups['count']
if friends_need:
for like in self.likes_friends(uid=uid, limit_entries=limit, friends_full=friends_full):
r = like
r.update({"count": count})
yield r
if groups_need:
for like in self.likes_groups(uid=uid, limit=limit, groups=groups):
r = like
r.update({"count": count, "current": like['current'] + friends_count})
yield r
def score_likes_friends(self, uid, limit=100, friends_full=None):
"""
Возвращает баллы за лайки друзьям
:param uid: id пользователя, которого проверяем
:param limit: количество записей загружаемых на каждой странице
:param friends_full: массив полной информации о друзтях
"""
score = 0
for post_info in self.likes_friends(uid=uid,
limit_entries=limit,
friends_full=friends_full):
if 'liked' in post_info:
if post_info['liked'] == 1:
score += 1
if 'copied' in post_info:
if post_info['copied'] == 1:
score += 10
if 'inf' in post_info:
temp = score
score = 0
yield 'likes_friends', post_info['current']-1, temp
def score_likes_self(self, uid, limit=100, friends_full=None):
"""
Возвращает очки за лайки друзей у пользователя на странице
:param uid: id пользователя, которого проверяем
:param limit: максимальное число записей
:param friends_full: массив полной информации о друзьях
"""
friends = self.friends_all_ids(uid=uid, friends_full=friends_full)
res = [0]*len(friends)
for key, post in enumerate(self.tools.get_all_iter(method='wall.get', max_count=100, values={'owner_id': uid},
limit=limit)):
if key > limit:
break
post_likes = self.tools.get_all(method='likes.getList', max_count=100, values={'type': 'post',
'skip_own':1,
'owner_id': uid,
'item_id': post['id']})['items']
post_reposts = self.tools.get_all(method='likes.getList', max_count=100, values={'type': 'post',
'skip_own': 1,
'owner_id': uid,
'filter': 'copies',
'item_id': post['id']})['items']
for user in post_likes:
if user in friends:
res[friends.index(user)] += 1
for user in post_reposts:
if user in friends:
if user in friends:
res[friends.index(user)] += 10
for key, photo in enumerate(self.tools.get_all_iter(method='photos.getAll',
max_count=200,
values={'owner_id': uid, 'extended': 1, 'no_service_albums': 0})):
if key>limit:
break
photo_likes = self.tools.get_all(method='likes.getList', max_count=100, values={'type': 'photo',
'skip_own':1,
'owner_id': uid,
'item_id': photo['id']})['items']
for user in photo_likes:
if user in friends:
if user in friends:
res[friends.index(user)] += 1
for i, friend in enumerate(res):
yield 'likes_self', i, friend
def score_mutual_friends(self, uid, friends_full=None):
"""
Возвращает очки за общих друзей
:param uid: id пользователя, которого проверяем
:param friends_full: массив полной информации о друзьях
"""
res = []
friends = self.friends_all_ids(uid=uid, friends_full=friends_full)
for mutual in self.friends_common_iter(uid=uid, friends_ids=friends):
res.append(mutual['common_count'])
res_sorted = sorted(list(set(res)))
count = len(res_sorted)
for i, friend in enumerate(res):
yield 'friends', i, res_sorted.index(friend)*10//count
def score_all_common_age(self, uid, friends_full=None):
"""
Возвращает очки за общий возраст
:param uid: id пользователя, которого проверяем
:param friends_full: массив полной информации о друзьях
"""
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
user_age = self.user_age(uid=uid, friends_full=friends_full)
def get_user_real_age(age):
if age[0] == age[1]:
return age[0],1,2
elif age[0] == -1:
return age[1],2,3
elif age[1] == -1:
return age[0],2,3
else:
return (int(age[0])+int(age[1]))//2, -1, abs(int(age[0])-int(age[1]))
user_real_age = get_user_real_age((user_age['user_defined'], user_age['friends_predicted']))
for i, friend in enumerate(friends_full):
score = 0
if 'bdate' in friend:
date = friend['bdate'].split('.')
if len(date)>2:
if int(date[2]) - user_real_age[1] <= user_real_age[0] <= int(date[2]) + user_real_age[1]:
score = 3
elif int(date[2]) - user_real_age[2] <= user_real_age[0] <= int(date[2]) + user_real_age[2]:
score = 1
yield 'age', i, score
def score_all_common_city(self, uid, friends_full=None):
"""
Возвращает очки за общий город
:param uid: id пользователя, которого проверяем
:param friends_full: массив полной информации о друзьях
"""
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
common_city_score = self.common_city_score(uid=uid, friends_full=friends_full, result_type='first')
user = self.api.users.get(user_id=uid,fields='city')[0]
user_city = ''
if 'city' in user:
user_city = user['city']['title']
for i, friend in enumerate(friends_full):
score = 0
if 'city' in friend:
friend_city = friend['city']['title']
if friend_city in common_city_score:
score = common_city_score[friend_city]
score += (friend_city==user_city)*3
yield 'city', i, score
def score_all(self,
uid,
limit=100,
likes_friends_need=False,
likes_self_need=False,
common_friends_need=False,
common_age_need=False,
common_city_need=False,
friends_full=None):
"""
Генератор информации о круге общения
:param uid: id пользователя, которого проверяем
:param limit: максимальное количество загружаемых каждый раз записей
:param likes_friends_need: необходимо проверять лайки друзьям
:param likes_self_need: необходимо проверять лайки друзей
:param common_friends_need: проверять общих друзей
:param common_age_need: проверять общий возраст
:param common_city_need: проверять общий город
:param friends_full: массив полной информации о друзьях
"""
friends_full = self.friends_all_full(uid=uid, friends_full=friends_full)
if common_age_need:
for element in self.score_all_common_age(uid=uid, friends_full=friends_full):
yield element
if common_city_need:
for element in self.score_all_common_city(uid=uid, friends_full=friends_full):
yield element
if common_friends_need:
for element in self.score_mutual_friends(uid=uid, friends_full=friends_full):
yield element
if likes_self_need:
for element in self.score_likes_self(uid=uid, limit=limit, friends_full=friends_full):
yield element
if likes_friends_need:
for element in self.score_likes_friends(uid=uid, limit=limit, friends_full=friends_full):
yield element
|
"""
Evaluation of treatment effect estimation
"""
import numpy as np
def transformed_outcome_loss(tau_pred, y_true, g, prob_treatment):
"""
Calculate a biased estimate of the mean squared error of individualized treatment effects
tau_pred : array
The predicted individualized treatment effects.
y_true : array
The observed individual outcome.
g : array, {0,1}
An indicator of the treatment group. Currently supports only two treatment groups, typically
control (g=0) and treatment group (g=1).
"""
# Transformed outcome
y_trans = (g - prob_treatment) * y_true / (prob_treatment * (1-prob_treatment))
loss = np.mean(((y_trans - tau_pred)**2))
return loss
def qini_score(tau_score, y_true, treatment_group, prob_treatment, n_bins=10):
"""
Calculate the Qini score
"""
return NotImplementedError
def expected_policy_profit(targeting_decision, g, observed_profit, prob_treatment):
"""
Calculate the profit of a coupon targeting campaign
"""
return np.sum(((1-targeting_decision) * (1-g) * observed_profit)/(1-prob_treatment) +\
(targeting_decision * g * observed_profit)/(prob_treatment))
|
"""
Write a function that accepts an array of 10 integers (between 0 and 9), that returns a string of those numbers in the form of a phone number.
Example
create_phone_number([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) # => returns "(123) 456-7890"
"""
def create_phone_number(n):
return "({}{}{}) {}{}{}-{}{}{}{}".format(*n)
|
import salt.modules.baredoc as baredoc
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase
class BaredocTest(TestCase, LoaderModuleMockMixin):
"""
Validate baredoc module
"""
def setup_loader_modules(self):
return {
baredoc: {
"__opts__": {"extension_modules": RUNTIME_VARS.SALT_CODE_DIR},
"__grains__": {"saltpath": RUNTIME_VARS.SALT_CODE_DIR},
}
}
def test_baredoc_list_states(self):
"""
Test baredoc state module listing
"""
ret = baredoc.list_states(names_only=True)
assert "value_present" in ret["xml"][0]
def test_baredoc_list_states_args(self):
"""
Test baredoc state listing with args
"""
ret = baredoc.list_states()
assert "value_present" in ret["xml"][0]
assert "xpath" in ret["xml"][0]["value_present"]
def test_baredoc_list_states_single(self):
"""
Test baredoc state listing single state module
"""
ret = baredoc.list_states("xml")
assert "value_present" in ret["xml"][0]
assert "xpath" in ret["xml"][0]["value_present"]
def test_baredoc_list_modules(self):
"""
test baredoc executiion module listing
"""
ret = baredoc.list_modules(names_only=True)
assert "get_value" in ret["xml"][0]
def test_baredoc_list_modules_args(self):
"""
test baredoc execution module listing with args
"""
ret = baredoc.list_modules()
assert "get_value" in ret["xml"][0]
assert "file" in ret["xml"][0]["get_value"]
def test_baredoc_list_modules_single_and_alias(self):
"""
test baredoc single module listing
"""
ret = baredoc.list_modules("mdata")
assert "put" in ret["mdata"][2]
assert "keyname" in ret["mdata"][2]["put"]
def test_baredoc_state_docs(self):
ret = baredoc.state_docs()
assert "XML Manager" in ret["xml"]
assert "zabbix_usergroup" in ret
def test_baredoc_state_docs_single_arg(self):
ret = baredoc.state_docs("xml")
assert "XML Manager" in ret["xml"]
ret = baredoc.state_docs("xml.value_present")
assert "Manages a given XML file" in ret["xml.value_present"]
def test_baredoc_state_docs_multiple_args(self):
ret = baredoc.state_docs("zabbix_hostgroup.present", "xml")
assert "Ensures that the host group exists" in ret["zabbix_hostgroup.present"]
assert "XML Manager" in ret["xml"]
assert "Manages a given XML file" in ret["xml.value_present"]
def test_baredoc_module_docs(self):
ret = baredoc.module_docs()
assert "A module for testing" in ret["saltcheck"]
def test_baredoc_module_docs_single_arg(self):
ret = baredoc.module_docs("saltcheck")
assert "A module for testing" in ret["saltcheck"]
def test_baredoc_module_docs_multiple_args(self):
ret = baredoc.module_docs("saltcheck", "xml.get_value")
assert "A module for testing" in ret["saltcheck"]
assert "Returns the value of the matched xpath element" in ret["xml.get_value"]
|
from django.shortcuts import redirect
def index(request):
return redirect('/app/')
def stats(request):
return redirect('/app/stats')
|
"""
Module for accessing the sqlite monster hunter db from
"""
import os
import sqlite3
import json
from mhapi import model
def field_model(key):
"""
Model to replace each row with the value of single field in the row,
with the specified key.
"""
def model_fn(row):
return row[key]
return model_fn
def _db_path(game=None):
module_path = os.path.dirname(__file__)
project_path = os.path.abspath(os.path.join(module_path, ".."))
return os.path.join(project_path, "db", "mh%s.db" % game)
ARMOR_HUNTER_TYPES = {
"Blade": 0,
"Gunner": 1,
"Both": 2,
}
class MHDB(object):
"""
Wrapper around the Android App sqlite3 db. The following conventions
are used:
- get_ENTITY_NAME will return a single entity by id
- get_ENTITY_NAME_by_name will return a single entity by name
- get_ENTITY_NAMEs will return a list of all entities in the db
- get_ENTITY_NAME_names will return a list of all names of the
entities in the db, possibly with a type param.
"""
# buy and sell are empty, uses weapon.create_cost and upgrade_cost
_weapon_select = """
SELECT items.*, weapons.*
FROM weapons
LEFT JOIN items ON weapons._id = items._id
"""
# sell has the a value, but not used at the moment
_decoration_select = """
SELECT items._id, items.type, items.name,
items.rarity, decorations.*
FROM decorations
LEFT JOIN items ON decorations._id = items._id
"""
# buy has the armor cost, sell is empty
_armor_select = """
SELECT items._id, items.type, items.name,
items.rarity, items.buy, armor.*
FROM armor
LEFT JOIN items ON armor._id = items._id
"""
def __init__(self, game=None, path=None, use_cache=False,
include_item_components=False):
"""
If use_cache=True, a lot of memory could be used. No attempt is
made to de-dupe data between keys, e.g. if you access an item
by id and by name, it will be fetched and stored in the cache
twice. Disk cache, sqlite caching, and the smallness of the
database should make in-memory caching unnecessary for most use
cases.
"""
if game is None:
game = os.environ.get("MHAPI_GAME")
assert game in ("4u", "gen", "gu")
self.game = game
if game == "4u":
# filter out non-localized DLC
self._weapon_select = (MHDB._weapon_select
+ "WHERE items.name != items.name_jp\n")
else:
# no filter needed, but having where in all cases simplifies
# queries below
self._weapon_select = (MHDB._weapon_select
+ "WHERE 1=1\n")
if path is None:
path = _db_path(game)
self.conn = sqlite3.connect(path)
self.conn.row_factory = sqlite3.Row
self.use_cache = use_cache
self.include_item_components = include_item_components
self.cache = {}
def _query_one(self, key, query, args=(), model_cls=None,
no_cache=False):
values = self._query_all(key, query, args, model_cls, no_cache)
if values:
return values[0]
else:
return None
def _query_all(self, key, query, args=(), model_cls=None,
no_cache=False, collection_cls=None):
assert isinstance(args, tuple)
assert model_cls is None or collection_cls is None
if self.use_cache and not no_cache:
if key in self.cache:
v = self.cache[key].get(args)
if v is not None:
return v
else:
self.cache[key] = {}
#print "query", query
cursor = self.conn.execute(query, args)
rows = cursor.fetchall()
if model_cls:
rows = [model_cls(row) for row in rows]
if collection_cls:
rows = collection_cls(rows)
if self.use_cache and not no_cache:
self.cache[key][args] = rows
self._add_components(key, rows)
return rows
def cursor(self):
return self.conn.cursor()
def commit(self):
return self.conn.commit()
def close(self):
return self.conn.close()
def get_item_types(self):
"""
List of strings.
"""
return self._query_all("item_types", """
SELECT DISTINCT type FROM items
""", model_cls=field_model("type"))
def get_item_names(self, item_types):
"""
List of unicode strings.
"""
args = sorted(item_types)
placeholders = ", ".join(["?"] * len(item_types))
return self._query_all("item_names", """
SELECT _id, name FROM items
WHERE type IN (%s)
""" % placeholders, tuple(args), model_cls=field_model("name"))
def get_items(self, item_types=None, exclude_types=None, wyporium=False):
"""
List of item objects.
"""
fields = ["items.*"]
where = []
args = []
if item_types:
item_types = sorted(item_types)
placeholders = ", ".join(["?"] * len(item_types))
where.append("WHERE type IN (%s)" % placeholders)
args.extend(item_types)
if exclude_types:
exclude_types = sorted(exclude_types)
placeholders = ", ".join(["?"] * len(exclude_types))
where.append("WHERE type NOT IN (%s)" % placeholders)
args.extend(exclude_types)
if wyporium:
where.append("LEFT JOIN wyporium AS w ON w.item_in_id = items._id")
where.append(
"LEFT JOIN items AS wi ON w.item_out_id = wi._id")
fields += ["w.item_out_id AS wyporium_item_id",
"wi.name AS wyporium_item_name"]
q = "SELECT " + ", ".join(fields) + " FROM items " + "\n".join(where)
args = tuple(args)
return self._query_all("items", q, args, model_cls=model.Item)
def get_item(self, item_id):
"""
Single item object or None.
"""
return self._query_one("item", """
SELECT * FROM items
WHERE _id=?
""", (item_id,), model_cls=model.Item)
def get_item_by_name(self, name):
"""
Single item object or None.
"""
return self._query_one("item", """
SELECT * FROM items
WHERE name=?
""", (name,), model_cls=model.Item)
def get_wyporium_trade(self, item_id):
"""
Single wyporium row or None.
"""
if self.game != "4u":
return None
return self._query_one("wyporium", """
SELECT * FROM wyporium
WHERE item_in_id=?
""", (item_id,))
def get_wyporium_trades(self):
"""
Single wyporium row or None.
"""
if self.game != "4u":
return None
return self._query_all("wyporium", """
SELECT items.*,
wyporium.item_out_id AS wyporium_item_id,
trade_items.name AS wyporium_item_name,
quests._id AS wyporium_quest_id,
quests.name AS wyporium_quest_name,
quests.hub AS wyporium_quest_hub,
quests.stars AS wyporium_quest_stars,
quests.rank AS wyporium_quest_rank
FROM wyporium
JOIN items ON items._id = wyporium.item_in_id
JOIN items AS trade_items ON trade_items._id = wyporium.item_out_id
JOIN quests ON wyporium.unlock_quest_id == quests._id
""", model_cls=model.Item)
def search_item_name(self, term, item_type=None):
"""
Search for items containing @term somewhere in the name. Returns
list of matching items.
Not memoized.
"""
query = """
SELECT * FROM items
WHERE name LIKE ?
"""
args = ["%%%s%%" % term]
if item_type is not None:
if isinstance(item_type, (list, tuple)):
query += "AND type IN (%s)" % (",".join(["?"] * len(item_type)))
args += item_type
else:
query += "AND type = ?"
args += [item_type]
return self._query_all("search_item", query, tuple(args),
no_cache=True, model_cls=model.Item)
def get_monsters(self):
return self._query_all("monsters", """
SELECT * FROM monsters
""", model_cls=model.Monster)
def get_monster_names(self):
"""
List of unicode strings.
"""
return self._query_all("monster_names", """
SELECT name FROM monsters
""", model_cls=field_model("name"))
def get_monster(self, monster_id):
return self._query_one("monster", """
SELECT * FROM monsters
WHERE _id=?
""", (monster_id,), model_cls=model.Monster)
def get_monster_by_name(self, name):
return self._query_one("monster", """
SELECT * FROM monsters
WHERE name=?
""", (name,), model_cls=model.Monster)
def get_quest(self, quest_id):
return self._query_one("quest", """
SELECT * FROM quests
WHERE _id=?
""", (quest_id,), model_cls=model.Quest)
def get_quests(self):
return self._query_all("quests", """
SELECT * FROM quests
""", model_cls=model.Quest)
def get_quest_rewards(self, quest_id):
return self._query_all("quest_rewards", """
SELECT * FROM quest_rewards
WHERE quest_id=?
""", (quest_id,))
def get_monster_rewards(self, monster_id, rank=None):
q = """
SELECT * FROM hunting_rewards
WHERE monster_id=?
"""
if rank is not None:
q += "AND rank=?"
args = (monster_id, rank)
else:
args = (monster_id,)
return self._query_all("monster_rewards", q, args)
def get_quest_monsters(self, quest_id):
return self._query_all("quest_monsters", """
SELECT monster_id, unstable FROM monster_to_quest
WHERE quest_id=?
""", (quest_id,))
def get_monster_quests(self, monster_id, rank):
return self._query_all("monster_quests", """
SELECT DISTINCT quests.* FROM quests, monster_to_quest
WHERE monster_to_quest.quest_id = quests._id
AND monster_to_quest.monster_id=? AND rank=?
""", (monster_id, rank), model_cls=model.Quest)
def get_item_quests(self, item_id):
"""
Get a list of quests that provide the specified item in quest
rewards. Returns a list of quest objects, which encapsulate the
quest details and the list of rewards.
"""
cursor = self.conn.execute("""
SELECT DISTINCT quest_id FROM quest_rewards
WHERE item_id=?
""", (item_id,))
rows = cursor.fetchall()
quests = []
for r in rows:
quest_id = r["quest_id"]
quest = self.get_quest(quest_id)
quest.rewards = self.get_quest_rewards(quest_id)
quests.append(quest)
return quests
def get_item_monsters(self, item_id):
return self._query_all("item_monsters", """
SELECT DISTINCT monster_id, rank FROM hunting_rewards
WHERE item_id=?
""", (item_id,))
def get_item_gathering(self, item_id):
return self._query_all("item_gathering", """
SELECT * FROM gathering
WHERE item_id=?
""", (item_id,))
def get_location(self, location_id):
self._query_one("location", """
SELECT * FROM locations
WHERE _id=?
""", (location_id,), model_cls=model.Location)
def get_locations(self):
return self._query_all("locations", """
SELECT * FROM locations
""", model_cls=model.Location)
def get_location_quests(self, location_id, rank):
return self._query_all("location_quests", """
SELECT DISTINCT * FROM quests
WHERE location_id=? AND rank=?
""", (location_id, rank), model_cls=model.Quest)
def get_monster_damage(self, monster_id):
return self._query_all("monster_damage", """
SELECT * FROM monster_damage
WHERE monster_id=?
""", (monster_id,), collection_cls=model.MonsterDamage)
def get_weapons(self):
# Note: weapons only available via JP DLC have no localized
# name, filter them out.
q = self._weapon_select
return self._query_all("weapons", q, model_cls=model.Weapon)
def get_weapons_by_query(self, wtype=None, element=None,
final=None):
"""
@element can have the special value 'Raw' to search for weapons
with no element. Otherwise @element is searched for in both
awaken and native, and can be a status or an element.
@final should be string '1' or '0'
"""
q = self._weapon_select
where = []
args = []
if wtype is not None:
where.append("wtype = ?")
args.append(wtype)
if element is not None:
if element == "Raw":
where.append("(element = '' AND awaken = '')")
else:
where.append("(element = ? OR element_2 = ? OR awaken = ?)")
args.extend([element] * 3)
if final is not None:
where.append("final = ?")
args.append(final)
if where:
q += "AND " + "\nAND ".join(where)
results = self._query_all("weapons", q, tuple(args),
model_cls=model.Weapon)
return results
def get_weapon(self, weapon_id):
return self._query_one("weapon", self._weapon_select + """
AND weapons._id=?
""", (weapon_id,), model_cls=model.Weapon)
def get_weapon_by_name(self, name):
return self._query_one("weapon", self._weapon_select + """
AND items.name=?
""", (name,), model_cls=model.Weapon)
def get_weapons_by_parent(self, parent_id):
return self._query_all("weapon_by_parent", self._weapon_select + """
AND weapons.parent_id=?
""", (parent_id,), model_cls=model.Weapon)
def get_armors(self):
return self._query_all("armors", MHDB._armor_select,
model_cls=model.Armor)
def get_armor(self, armor_id):
return self._query_one("armor", MHDB._armor_select + """
WHERE armor._id=?
""", (armor_id,), model_cls=model.Armor)
def get_armor_by_name(self, name):
return self._query_one("armor", MHDB._armor_select + """
WHERE items.name=?
""", (name,), model_cls=model.Armor)
def get_item_skills(self, item_id):
return self._query_all("item_skills", """
SELECT item_to_skill_tree.*, skill_trees.name
FROM item_to_skill_tree
LEFT JOIN skill_trees
ON item_to_skill_tree.skill_tree_id = skill_trees._id
WHERE item_to_skill_tree.item_id=?
""", (item_id,), model_cls=model.ItemSkill)
def get_decorations(self):
return self._query_all("decorations", MHDB._decoration_select,
model_cls=model.Decoration)
def get_decoration(self, decoration_id):
return self._query_one("decoration", MHDB._decoration_select + """
WHERE decorations._id = ?
""", (decoration_id,), model_cls=model.Decoration)
def get_decoration_by_name(self, name):
return self._query_all("decoration", MHDB._decoration_select + """
WHERE items.name = ?
""", (name,), model_cls=model.Decoration)
def get_skill_trees(self):
return self._query_all("skill_trees", """
SELECT _id, name FROM skill_trees
""", model_cls=model.SkillTree)
def get_skill_tree_id(self, skill_tree_name):
result = self._query_one("skill", """
SELECT _id FROM skill_trees
WHERE name=?
""", (skill_tree_name,))
if result:
return result["_id"]
return None
def get_skills(self):
return self._query_all("skills", """
SELECT _id, skill_tree_id, required_skill_tree_points,
name, description
FROM skills
""", model_cls=model.Skill)
def get_decorations_by_skills(self, skill_tree_ids):
args = sorted(skill_tree_ids)
placeholders = ", ".join(["?"] * len(skill_tree_ids))
return self._query_all("decorations", """
SELECT items._id, items.type, items.name, items.rarity,
decorations.*
FROM item_to_skill_tree
LEFT JOIN items
ON items._id = item_to_skill_tree.item_id
LEFT JOIN decorations
ON decorations._id = item_to_skill_tree.item_id
WHERE items.type = 'Decoration'
AND item_to_skill_tree.skill_tree_id IN (%s)
AND item_to_skill_tree.point_value > 0
GROUP BY item_to_skill_tree.item_id
""" % placeholders, tuple(args), model_cls=model.Decoration)
def get_armors_by_skills(self, skill_tree_ids, hunter_type):
args = sorted(skill_tree_ids)
placeholders = ", ".join(["?"] * len(skill_tree_ids))
both_type = "Both"
if self.game in ("gen", "gu"):
both_type = ARMOR_HUNTER_TYPES[both_type]
hunter_type = ARMOR_HUNTER_TYPES[hunter_type]
args += [both_type, hunter_type]
return self._query_all("decorations", """
SELECT items._id, items.type, items.name, items.rarity, items.buy,
armor.*
FROM item_to_skill_tree
LEFT JOIN items
ON items._id = item_to_skill_tree.item_id
LEFT JOIN armor
ON armor._id = item_to_skill_tree.item_id
WHERE items.type = 'Armor'
AND item_to_skill_tree.skill_tree_id IN (%s)
AND item_to_skill_tree.point_value > 0
AND armor.hunter_type IN (?, ?)
GROUP BY item_to_skill_tree.item_id
""" % placeholders, tuple(args), model_cls=model.Armor)
def get_monster_breaks(self, monster_id):
"""
List of strings.
"""
def model(row):
condition = row["condition"]
if condition == "Tail Carve":
return "Tail"
else:
return condition[len("Break "):]
return self._query_all("monster_breaks", """
SELECT DISTINCT condition FROM hunting_rewards
WHERE monster_id=?
AND (condition LIKE 'Break %' OR condition = 'Tail Carve')
""", (monster_id,), model_cls=model)
def get_item_components(self, item_id, method="Create"):
return self._query_all("item_components", """
SELECT items._id, items.name, items.type,
components.quantity, components.type AS method
FROM components
LEFT JOIN items
ON items._id = components.component_item_id
WHERE created_item_id=? AND components.type=?
""", (item_id, method), model_cls=model.ItemComponent)
def get_horn_melodies(self):
return self._query_all("horn_melodies", """
SELECT *
FROM horn_melodies
""", model_cls=model.HornMelody)
def get_horn_melodies_by_notes(self, notes):
return self._query_all("horn_melodies", """
SELECT *
FROM horn_melodies
WHERE notes=?
""", (notes,), model_cls=model.HornMelody)
def get_material_items(self, material_item_id):
"""
Get dict rows of items that satisfy the given material, containing
item_id and amount keys. MHGen only.
"""
assert self.game in ("gen", "gu")
return self._query_all("material_items", """
SELECT item_id, amount FROM item_to_material
WHERE item_to_material.material_item_id = ?
ORDER BY amount ASC
""", (material_item_id,))
def _add_components(self, key, item_results):
"""
Add component data to item results from _query_one or _query_all,
if include_item_components is set. Uses the cache key to determine
if it's one of the item types we care about having components for.
TODO: use batches or single query to make this more efficient for
large result sets.
"""
if not self.include_item_components:
return
if key.rstrip("s") not in "weapon armor decoration".split():
return
if item_results is None:
return
if not isinstance(item_results, list):
item_results = [item_results]
for item_data in item_results:
ccomps = self.get_item_components(item_data.id, "Create")
if not ccomps:
# might be two possible ways of making the item, just
# get the first one for now
ccomps = self.get_item_components(item_data.id, "Create A")
if item_data["type"] == "Weapon":
# only weapons have upgrade components
ucomps = self.get_item_components(item_data.id, "Improve")
else:
ucomps = None
item_data.set_components(ccomps, ucomps)
class MHDBX(object):
"""
Wrapper around Monster Hunter JSON data, default to MHX. Attempts limited
compatibility with original 4U MHDB class.
Uses MHDB object, as temporariy hack for MHX/World data that is not yet
available or integrated.
"""
def __init__(self, game="mhx"):
"""
Loads JSON data, keeps in memory.
"""
module_path = os.path.dirname(__file__)
self._mhx_db_path = os.path.abspath(os.path.join(module_path, "..",
"db", game))
self._4udb = MHDB()
self._weapon_list = []
self._weapons_by_name = {}
self._monsters_by_name = {}
self._monster_damage = {}
self._monster_breaks = {}
self._load_weapons()
self._load_monsters()
def _load_weapons(self):
with open(os.path.join(self._mhx_db_path, "weapon_list.json")) as f:
wlist = json.load(f)
for i, wdata in enumerate(wlist):
wdata["_id"] = i
weapon = model.Weapon(wdata)
self._weapon_list.append(weapon)
self._weapons_by_name[weapon.name_jp] = weapon
def _load_monsters(self):
names_path = os.path.join(self._mhx_db_path,
"monster_list.json")
hitboxes_path = os.path.join(self._mhx_db_path,
"monster_hitboxes.json")
with open(names_path) as f:
names = json.load(f)
for i, d in enumerate(names):
d["_id"] = -i
d["class"] = "Large"
self._monsters_by_name[d["name"]] = model.Monster(d)
with open(hitboxes_path) as f:
damage_map = json.load(f)
for name, damage in damage_map.items():
mid = self._monsters_by_name[name].id
damage_rows = []
for part, data in damage.items():
if part.startswith("_"):
continue
row = dict((k.lower(), v) for k, v in data.items())
row["body_part"] = part
row["ko"] = 100 if part == "Head" else 0
row["_id"] = 0
row["monster_id"] = mid
damage_rows.append(row)
self._monster_damage[mid] = model.MonsterDamage(damage_rows)
self._monster_breaks[mid] = damage["_breaks"]
def get_weapon_by_name(self, name):
return self._weapons_by_name.get(name)
def get_monster_by_name(self, name):
m = self._monsters_by_name.get(name)
if m and m.id in self._monster_damage:
return m
return self._4udb.get_monster_by_name(name)
def get_monster_damage(self, monster_id):
d = self._monster_damage.get(monster_id)
if d:
return d
return self._4udb.get_monster_damage(monster_id)
def get_monster_breaks(self, monster_id):
b = self._monster_breaks.get(monster_id)
if b:
return b
return self._4udb.get_monster_breaks(monster_id)
def get_weapons_by_query(self, wtype=None, element=None,
final=None):
"""
@element can have the special value 'Raw' to search for weapons
with no element. Otherwise @element is searched for in both
awaken and native, and can be a status or an element.
@final should be string '1' or '0'
"""
final = int(final)
results = []
for w in self._weapon_list:
if wtype is not None and w.wtype != wtype:
continue
if (element is not None
and element not in (w.element, w.element_2)
and not (element == "Raw" and not w.element)):
continue
if final is not None and w.final != final:
continue
results.append(w)
return results
|
"""Sample app."""
from aws_cdk import aws_sns as sns
from aws_cdk import core
class MyStack1(core.Stack): # pylint: disable=too-few-public-methods
"""My stack."""
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
"""Instantiate class."""
super().__init__(scope, id, **kwargs)
sns.Topic(self, "MyFirstTopic", display_name="My First Topic")
|
from torch.utils.data import DataLoader
from torchvision import transforms as tr
import pytorch_lightning as pl
from intphys.data import *
MAX_IMAGE_SIZE = 640
class DataModule(pl.LightningDataModule):
def __init__(self, config):
super().__init__()
image_dim = config.get("image_size", 112)
transforms = [tr.Resize(image_dim)]
frame_encoder = config["model"]["frame_encoder"]["architecture"]
normalizer = None
if frame_encoder.startswith("resnet"):
normalizer = tr.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
elif frame_encoder in ("r3d_18",):
normalizer = tr.Normalize(
mean=[0.43216, 0.394666, 0.37645],
std=[0.22803, 0.22145, 0.216989])
if normalizer is not None:
transforms.append(normalizer)
self.frame_transform = tr.Compose(transforms)
self.dataset_class = eval(config["dataset"]["name"])
self.config = config
@property
def dataset_name(self):
return self.config["dataset"]["name"]
def setup(self, stage=None):
Dataset = self.dataset_class
if stage == "fit" or stage is None:
self.train_data = Dataset(
split="train",
transform=self.frame_transform,
**self.config["dataset"]["params"]
)
self.val_data = Dataset(
split="validation",
transform=self.frame_transform,
**self.config["dataset"]["params"]
)
if stage == "test" or stage is None:
self.test_data = Dataset(
split="test",
transform=self.frame_transform,
**self.config["dataset"]["params"]
)
def train_dataloader(self):
return DataLoader(
self.train_data,
shuffle=True,
collate_fn=train_collate_fn,
**self.config["loader"])
def val_dataloader(self):
return DataLoader(
self.val_data,
shuffle=False,
collate_fn=train_collate_fn,
**self.config["loader"])
def test_dataloader(self):
return DataLoader(
self.test_data,
collate_fn=inference_collate_fn,
**self.config["loader"],
)
|
import pytest
from yt.data_objects.static_output import Dataset
from yt.geometry.grid_geometry_handler import GridIndex
from yt.loaders import load, load_simulation
from yt.utilities.exceptions import (
YTAmbiguousDataType,
YTSimulationNotIdentified,
YTUnidentifiedDataType,
)
from yt.utilities.object_registries import output_type_registry
@pytest.fixture
def tmp_data_dir(tmp_path):
from yt.config import ytcfg
pre_test_data_dir = ytcfg["yt", "test_data_dir"]
ytcfg.set("yt", "test_data_dir", str(tmp_path))
yield tmp_path
ytcfg.set("yt", "test_data_dir", pre_test_data_dir)
@pytest.mark.usefixtures("tmp_data_dir")
def test_load_not_a_file(tmp_path):
with pytest.raises(FileNotFoundError):
load(tmp_path / "not_a_file")
@pytest.mark.parametrize("simtype", ["Enzo", "unregistered_simulation_type"])
@pytest.mark.usefixtures("tmp_data_dir")
def test_load_simulation_not_a_file(simtype, tmp_path):
# it is preferable to report the most important problem in an error message
# (missing data is worse than a typo insimulation_type)
# so we make sure the error raised is not YTSimulationNotIdentified,
# even with an absurd simulation type
with pytest.raises(FileNotFoundError):
load_simulation(tmp_path / "not_a_file", simtype)
@pytest.fixture()
def tmp_path_with_empty_file(tmp_path):
empty_file_path = tmp_path / "empty_file"
empty_file_path.touch()
return tmp_path, empty_file_path
def test_load_unidentified_data_dir(tmp_path_with_empty_file):
tmp_path, empty_file_path = tmp_path_with_empty_file
with pytest.raises(YTUnidentifiedDataType):
load(tmp_path)
def test_load_unidentified_data_file(tmp_path_with_empty_file):
tmp_path, empty_file_path = tmp_path_with_empty_file
with pytest.raises(YTUnidentifiedDataType):
load(empty_file_path)
def test_load_simulation_unidentified_data_dir(tmp_path_with_empty_file):
tmp_path, empty_file_path = tmp_path_with_empty_file
with pytest.raises(YTSimulationNotIdentified):
load_simulation(tmp_path, "unregistered_simulation_type")
def test_load_simulation_unidentified_data_file(tmp_path_with_empty_file):
tmp_path, empty_file_path = tmp_path_with_empty_file
with pytest.raises(YTSimulationNotIdentified):
load_simulation(
empty_file_path,
"unregistered_simulation_type",
)
@pytest.fixture()
def ambiguous_dataset_classes():
# We deliberately setup a situation where two Dataset subclasses
# that aren't parents are consisdered valid.
# We implement the bare minimum for these classes to be actually
# loadable in order to test hints.
class MockHierarchy(GridIndex):
pass
class MockDataset(Dataset):
_index_class = MockHierarchy
def _parse_parameter_file(self, *args, **kwargs):
self.current_time = -1.0
self.cosmological_simulation = 0
def _set_code_unit_attributes(self, *args, **kwargs):
self.length_unit = self.quan(1, "m")
self.mass_unit = self.quan(1, "kg")
self.time_unit = self.quan(1, "s")
class AlphaDataset(MockDataset):
@classmethod
def _is_valid(cls, *args, **kwargs):
return True
class BetaDataset(MockDataset):
@classmethod
def _is_valid(cls, *args, **kwargs):
return True
yield
# teardown to avoid possible breakage in following tests
output_type_registry.pop("MockDataset")
output_type_registry.pop("AlphaDataset")
output_type_registry.pop("BetaDataset")
@pytest.mark.usefixtures("ambiguous_dataset_classes")
def test_load_ambiguous_data(tmp_path):
with pytest.raises(YTAmbiguousDataType):
load(tmp_path)
file = tmp_path / "fake_datafile0011.dump"
file.touch()
pattern = str(tmp_path / "fake_datafile00??.dump")
# loading a DatasetSeries should not crash until an item is retrieved
ts = load(pattern)
with pytest.raises(YTAmbiguousDataType):
ts[0]
@pytest.mark.parametrize(
"hint, expected_type",
[
("alpha", "AlphaDataset"),
("al", "AlphaDataset"),
("ph", "AlphaDataset"),
("beta", "BetaDataset"),
("BeTA", "BetaDataset"),
("b", "BetaDataset"),
],
)
@pytest.mark.usefixtures("ambiguous_dataset_classes")
def test_load_ambiguous_data_with_hint(hint, expected_type, tmp_path):
ds = load(tmp_path, hint=hint)
assert type(ds).__name__ == expected_type
file1 = tmp_path / "fake_datafile0011.dump"
file2 = tmp_path / "fake_datafile0022.dump"
file1.touch()
file2.touch()
pattern = str(tmp_path / "fake_datafile00??.dump")
ts = load(pattern, hint=hint)
ds = ts[0]
assert type(ds).__name__ == expected_type
ds = ts[1]
assert type(ds).__name__ == expected_type
|
import csv
import collections
import os.path
import re
import operator
import sys
import datetime
import shutil
import EsoLuaFile
import EsoLuaTokenizer
from EsoLuaTokenizer import CLuaTokenizer
from EsoLuaTokenizer import CLuaTokenIterator
from EsoLuaTokenizer import Token
from EsoLuaFile import CEsoLuaFile
class CEsoFunctionInfo:
def __init__(self):
self.name = ""
self.niceName = ""
self.fullName = ""
self.fullString = ""
self.fullDefString = ""
self.filename = ""
self.namespace = ""
self.namespaceType = ""
self.isLocal = False
self.isObject = False
self.allParams = ""
self.value = ""
self.params = []
self.startLinePos = -1
self.startCharPos = -1
self.startTokenIndex = -1
self.defEndLinePos = -1
self.defEndCharPos = -1
self.defEndTokenIndex = -1
self.endLinePos = -1
self.endCharPos = -1
self.endTokenIndex = -1
class CEsoFunctionCallInfo:
def __init__(self):
self.filename = ""
self.startLinePos = ""
self.startCharPos = ""
self.startTokenIndex = -1
self.endLinePos = ""
self.endCharPos = ""
self.endTokenIndex = -1
self.fullString = ""
self.allVariables = ""
self.variables = []
self.name = ""
self.fullName = ""
self.niceName = ""
self.allParams = ""
self.params = []
def ParseLuaFunctionCall(esoLuaFile, tokenIter):
origTokenIndex = tokenIter.index
# Find start of function name
while tokenIter.IsValid():
if (tokenIter.PeekIndex(-1, Token.operator, ".") or tokenIter.PeekIndex(-1, Token.operator, ":")):
tokenIter.SeekIndex(-1)
isBracket = False
if (tokenIter.PeekIndex(-1, Token.operator, ")")):
tokenIter.ConsumeBehindToBracket("(", ")")
tokenIter.Consume()
isBracket = True
elif (tokenIter.PeekIndex(-1, Token.operator, "]")):
tokenIter.ConsumeBehindTo(Token.operator, "[")
if (tokenIter.Peek(Token.operator, "]")): tokenIter.ConsumeBehindTo(Token.operator, "[")
tokenIter.Consume()
if (tokenIter.PeekIndex(-1, Token.name)):
tokenIter.SeekIndex(-1)
elif isBracket:
break
else:
tokenIter.Report("Unknown function call format found (1)!")
break
else:
break
# Find start of function variables
if tokenIter.Peek(Token.operator, "="):
pass
tokenIter.Consume()
if (not tokenIter.IsValid()):
tokenIter.Report("Unknown function call format found (2)!")
tokenIter.SeekAbs(origTokenIndex + 1)
return None
newFuncCall = CEsoFunctionCallInfo()
newFuncCall.filename = esoLuaFile.relFilename
newFuncCall.startTokenIndex = tokenIter.index - 1
newFuncCall.startLinePos = tokenIter.lastToken.linePos
newFuncCall.startCharPos = tokenIter.lastToken.charPos
while tokenIter.IsValid() and tokenIter.index < origTokenIndex:
tokenIter.ConsumeUpTo(Token.operator, "(")
if (tokenIter.index < origTokenIndex): tokenIter.Consume()
newFuncCall.name = tokenIter.GetTokenIndex(-1).token
nameEndTokenIndex = tokenIter.index - 1
startParamTokenIndex = tokenIter.index + 1
token = tokenIter.ConsumeToBracket("(", ")")
if token is None:
tokenIter.Report("Unknown function call format found (3)!")
tokenIter.SeekAbs(origTokenIndex + 1)
return None
newFuncCall.endTokenIndex = tokenIter.index - 1
newFuncCall.endLinePos = tokenIter.lastToken.linePos
newFuncCall.endCharPos = tokenIter.lastToken.charPos + len(tokenIter.lastToken.token) - 1
newFuncCall.fullString = esoLuaFile.tokenizer.Rebuild(newFuncCall.startTokenIndex, newFuncCall.endTokenIndex)
newFuncCall.allVariables = ""
newFuncCall.fullName = esoLuaFile.tokenizer.Rebuild(newFuncCall.startTokenIndex, nameEndTokenIndex)
newFuncCall.niceName = newFuncCall.fullName.replace(":", ".")
newFuncCall.allParams = esoLuaFile.tokenizer.Rebuild(startParamTokenIndex, newFuncCall.endTokenIndex - 1)
tokenIter.SeekAbs(origTokenIndex + 1)
return newFuncCall
def FindFunctionCalls(esoLuaFile):
functionCalls = []
tokens = esoLuaFile.GetTokens()
tokenIter = CLuaTokenIterator(esoLuaFile.GetTokens(), 0)
while tokenIter.IsValid():
if (tokenIter.Peek(Token.keyword, "function")):
tokenIter.ConsumeTo(Token.operator, ")")
elif (tokenIter.Peek(Token.name) and tokenIter.PeekIndex(1, Token.operator, "(")):
newFuncCall = ParseLuaFunctionCall(esoLuaFile, tokenIter)
if (newFuncCall): functionCalls.append(newFuncCall)
else:
tokenIter.Consume()
return functionCalls
def FindAllFunctionCalls(esoLuaFiles):
functionCalls = []
print "Finding all function calls in {0} Lua files...".format(len(esoLuaFiles))
for file in esoLuaFiles:
print "\t", file.relFilename
functionCalls.extend(FindFunctionCalls(file))
print "\tFound {0} function calls!".format(len(functionCalls))
return functionCalls
def ParseLuaFunction(esoLuaFile, i):
tokens = esoLuaFile.GetTokens()
token = tokens[i]
newFunction = CEsoFunctionInfo()
newFunction.filename = esoLuaFile.relFilename
startIndex = i
startNameTokenIndex = -1
endNameTokenIndex = -1
tokenIter = CLuaTokenIterator(tokens, i)
if (tokenIter.PeekBehind(Token.keyword, "local")):
tokenIter = CLuaTokenIterator(tokens, i - 1)
startIndex = i - 1
token = tokenIter.lastToken
newFunction.startTokenIndex = tokenIter.index
newFunction.startLinePos = token.linePos
newFunction.startCharPos = token.charPos
elif (tokenIter.PeekIndex(-1, Token.operator, "=")):
deltaIndex = -1
if (tokenIter.PeekIndex(-2, Token.name)):
deltaIndex -= 1
if (tokenIter.PeekIndex(-3, Token.operator, ".")):
if (tokenIter.PeekIndex(-4, Token.name)):
deltaIndex -= 2
if (tokenIter.PeekIndex(deltaIndex-1, Token.keyword, "local")):
deltaIndex -= 1
elif (tokenIter.PeekIndex(-2, Token.operator, "]")):
deltaIndex -= 1
while (tokenIter.IsValidDeltaIndex(deltaIndex - 1) and not tokenIter.PeekIndex(deltaIndex - 1, Token.operator, "[")):
deltaIndex -= 1
if tokenIter.PeekIndex(deltaIndex - 1, Token.operator, "["):
deltaIndex -= 1
if tokenIter.PeekIndex(deltaIndex - 1, Token.name):
deltaIndex -= 1
if (tokenIter.PeekIndex(deltaIndex - 1, Token.keyword, "local")):
deltaIndex -= 1
else:
tokenIter.Report("Unknown function definition format found (4)!")
tokenIter = CLuaTokenIterator(tokens, i + deltaIndex)
startIndex = i + deltaIndex
newFunction.isObject = True
token = tokenIter.lastToken
newFunction.startTokenIndex = tokenIter.index
newFunction.startLinePos = token.linePos
newFunction.startCharPos = token.charPos
if (tokenIter.Peek(Token.keyword, "local")):
token = tokenIter.Consume(Token.keyword, "local")
newFunction.isLocal = True
startNameTokenIndex = tokenIter.index
if (tokenIter.Peek(Token.name) and not tokenIter.PeekIndex(+1, Token.operator, "[")):
token = tokenIter.Consume(Token.name)
if (tokenIter.Peek(Token.operator, ".")):
newFunction.namespace = token.token
newFunction.namespaceType = "."
token = tokenIter.Consume(Token.operator, ".")
token = tokenIter.Consume(Token.name)
if (not token): return None, tokenIter.index
newFunction.name = token.token
else:
newFunction.name = token.token
elif (tokenIter.Peek(Token.operator, "[") or tokenIter.PeekIndex(+1, Token.operator, "[")):
startArrayToken = tokenIter.index
if (tokenIter.Peek(Token.name)):
token = tokenIter.Consume(Token.name)
token = tokenIter.Consume(Token.operator, "[")
while (tokenIter.IsValid() and not tokenIter.Peek(Token.operator, "]")):
token = tokenIter.Consume(Token.none)
token = tokenIter.Consume(Token.operator, "]")
if (not token): return None, tokenIter.index
newFunction.name = esoLuaFile.tokenizer.Rebuild(startArrayToken, tokenIter.index - 1)
elif (tokenIter.Peek(Token.operator, "=")):
newFunction.name = ""
else:
print tokenIter.lastToken.token
tokenIter.Report("Unknown function definition format found (5)!")
return None, tokenIter.index
endNameTokenIndex = tokenIter.index - 1
token = tokenIter.Consume(Token.operator, "=")
if (not token): return None, tokenIter.index
else:
token = tokenIter.lastToken
newFunction.startTokenIndex = tokenIter.index
newFunction.startLinePos = token.linePos
newFunction.startCharPos = token.charPos
if (tokenIter.Peek(Token.keyword, "local")):
newFunction.isLocal = True
tokenIter.Consume(Token.keyword, "local")
token = tokenIter.Consume(Token.keyword, "function")
if (tokenIter.Peek(Token.name)):
startNameTokenIndex = tokenIter.index
token = tokenIter.Consume(Token.name)
if (tokenIter.Peek(Token.operator, ".") or tokenIter.Peek(Token.operator, ":")):
newFunction.namespace = token.token
token = tokenIter.Consume(Token.operator)
newFunction.namespaceType = token.token
token = tokenIter.Consume(Token.name)
if (not token): return None, tokenIter.index
newFunction.name = token.token
else:
newFunction.name = token.token
endNameTokenIndex = tokenIter.index - 1
token = tokenIter.Consume(Token.none, "(")
if (not token): return None, tokenIter.index
# Function parameters
bracketCount = 0
startParamIndex = tokenIter.index
token = tokenIter.Consume(Token.none)
while (token):
if (token.token == "("):
bracketCount += 1
elif (token.token == ")"):
if bracketCount <= 0:
break
else:
bracketCount -= 1
elif (token.token == ","):
lastParamIndex = tokenIter.index
elif (token.type == Token.name):
if (tokenIter.Peek(Token.none, ":")):
tokenIter.Consume(Token.none)
token2 = tokenIter.Consume(Token.name)
if (not token2):
tokenIter.isError = False
newFunction.params.append(token.token + ":")
else:
token = token2
newFunction.params.append(token.token + ":" + token2.token)
else:
newFunction.params.append(token.token)
elif (token.token == "..."):
newFunction.params.append(token.token)
else:
tokenIter.Report("Invalid function parameter '{0}' found!".format(token.token))
token = tokenIter.Consume(Token.none)
if (not token or token.token != ")"):
tokenIter.Report("Unexpected end of file while looking for function parameter list!")
return None, tokenIter.index
newFunction.endDefTokenIndex = tokenIter.index - 1
newFunction.endDefLinePos = token.linePos
newFunction.endDefCharPos = token.charPos
# Find end of function
blockCount = 0
token = tokenIter.Consume(Token.none)
while (token):
if (token.token == "if" or token.token == "do"):
blockCount += 1
elif (token.token == "end"):
if blockCount <= 0:
break
else:
blockCount -= 1
token = tokenIter.Consume(Token.none)
if (not token or token.token != "end"):
tokenIter.Report("Unexpected end of file while looking for end of function block!")
return None, tokenIter.index
newFunction.endLinePos = token.linePos
newFunction.endCharPos = token.charPos + 3
newFunction.endTokenIndex = tokenIter.index - 1
if (newFunction.namespace != ""):
newFunction.niceName = newFunction.namespace + "." + newFunction.name
else:
newFunction.niceName = newFunction.name
newFunction.allParams = esoLuaFile.tokenizer.Rebuild(startParamIndex, newFunction.endDefTokenIndex - 1)
newFunction.fullName = esoLuaFile.tokenizer.Rebuild(startNameTokenIndex, endNameTokenIndex)
newFunction.fullDefString = esoLuaFile.tokenizer.Rebuild(startIndex, newFunction.endDefTokenIndex)
newFunction.fullString = esoLuaFile.tokenizer.Rebuild(startIndex, newFunction.endTokenIndex)
return newFunction, i
def FindFunctions(esoLuaFile):
functions = []
tokens = esoLuaFile.GetTokens()
i = 0
while i < len(tokens):
token = tokens[i]
if (token.type == EsoLuaTokenizer.Token.keyword and token.token == "function"):
newFunc, lastDefIndex = ParseLuaFunction(esoLuaFile, i)
if (newFunc): functions.append(newFunc)
i += 1
return functions
def FindAllFunctions(esoLuaFiles):
functions = []
print "Finding all functions in {0} Lua files...".format(len(esoLuaFiles))
for file in esoLuaFiles:
functions.extend(FindFunctions(file))
print "\tFound {0} functions!".format(len(functions))
return functions
|
import sys
import os
import tensorflow.compat.v1 as tf
from utils import remove_missing
from constants import LOG_DIR
import tensorflow.contrib.slim as slim
class BaseTrainer():
def __init__(self, model, data_generator, pre_processor, num_epochs, optimizer='momentum', momentum=0.9,
lr_policy='const', init_lr=0.01, end_lr=None, num_gpus=1, train_scopes='encoder',
exclude_scopes=['global_step'], lr_decay=0.95, d_lr_mult=4.):
self.model = model
self.dataset = data_generator
self.pre_processor = pre_processor
self.num_epochs = num_epochs
self.opt_type = optimizer
self.lr_policy = lr_policy
self.init_lr = init_lr
self.momentum = momentum
self.lr_decay = lr_decay
self.d_lr_mult = d_lr_mult
self.end_lr = end_lr if end_lr is not None else 0.01 * init_lr
self.num_gpus = num_gpus
self.num_summary_steps = 80
self.summaries = []
self.moving_avgs_decay = 0.999
self.global_step = None
self.train_scopes = train_scopes
self.exclude_scopes = exclude_scopes
self.num_train_steps = (self.dataset.num_samples // self.model.batch_size) * self.num_epochs
self.num_train_steps //= self.num_gpus
print('Number of training steps: {}'.format(self.num_train_steps))
def get_data_queue(self):
# Loading of training data
data = self.dataset.get_dataset()
data = data.repeat()
data = data.shuffle(buffer_size=min(self.dataset.num_samples, 100000))
data = data.map(self.pre_processor.process_train, num_parallel_calls=8)
data = data.batch(self.model.batch_size)
data = data.prefetch(tf.data.experimental.AUTOTUNE)
iterator = tf.data.make_one_shot_iterator(data)
return iterator
def preprocess(self, img, label):
img = self.pre_processor.process_train(img)
return img, label
def make_init_fn(self, chpt_path):
# Handle model initialization from prior checkpoint
if chpt_path is None:
return None
var2restore = slim.get_variables_to_restore(exclude=self.exclude_scopes)
print('Variables to restore: {}'.format([v.op.name for v in var2restore]))
var2restore = remove_missing(var2restore, chpt_path)
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(chpt_path, var2restore)
sys.stdout.flush()
# Create an initial assignment function.
def init_fn(sess):
print('Restoring from: {}'.format(chpt_path))
sess.run(init_assign_op, init_feed_dict)
return init_fn
def get_save_dir(self):
fname = '{}_{}'.format(self.model.name, self.dataset.name)
return os.path.join(LOG_DIR, '{}/'.format(fname))
def optimizer(self, type=None):
lr = self.learning_rate()
if type is 'd':
lr *= self.d_lr_mult # Use larger learning rate for discriminator
opts = {'momentum': tf.train.MomentumOptimizer(learning_rate=lr, momentum=self.momentum, use_nesterov=True),
'adam': tf.train.AdamOptimizer(learning_rate=lr, beta1=self.momentum)}
return opts[self.opt_type]
def learning_rate(self):
policies = {
'const': self.init_lr
}
return policies[self.lr_policy]
def make_summaries(self, grads, layers):
self.summaries.append(tf.summary.scalar('learning_rate', self.learning_rate()))
# Variable summaries
for variable in slim.get_model_variables():
self.summaries.append(tf.summary.histogram(variable.op.name, variable))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
self.summaries.append(tf.summary.histogram('gradients/' + var.op.name, grad))
# Add histograms for activation.
if layers:
for layer_id, val in layers.iteritems():
self.summaries.append(tf.summary.histogram('activations/' + layer_id, val))
|
class Invoice:
"""This class contains basic information about an invoice
:param title: Product name
:type title: str
:param description: Product description
:type description: str
:param start_parameter: Unique bot deep.linking parameter that can be used to generate this invoice
:type start_parameter: str
:param currency: Three letter ISO 4217 currency code
:type currency: str
:param total_amount: Total amount in the smallest units of the currency (int, not float/double)
:type total_amount: int
"""
def __init__(self, *, title, description, start_parameter, currency, total_amount):
self.title = title
self.description = description
self.start_parameter = start_parameter
self.currency = currency
self.total_amount = total_amount
|
from keras.callbacks import TensorBoard
import tensorflow as tf
from keras.callbacks import ModelCheckpoint, EarlyStopping
import numpy as np
from sklearn.metrics import classification_report
from frame_quality_recognizer.data_creator import FrameQualityRecognizerCreator
from frame_quality_recognizer.model import FrameQualityRecognizerModel
class FrameQualityRecognizerTrainer:
def __init__(self, config):
self._config = config
self._data_creator = FrameQualityRecognizerCreator(config)
self._session = None
self._x_train, self._y_train, self._x_valid, self._y_valid, \
self._x_test, self._y_test = self._data_creator.get_feature_datasets()
self._ml_model = FrameQualityRecognizerModel(self._config)
self._ml_model.compile()
def train(self):
with tf.Session() as self._session:
self._session.run(tf.global_variables_initializer())
try:
self._ml_model.load(self._config.model.path)
except Exception:
print("Can't find model. Training from scratch.")
print('Starting training')
tensorboard_cb = TensorBoard(log_dir=self._config.training.log_path, histogram_freq=0,
write_graph=True, write_images=True)
self._ml_model.model.fit(
self._x_train, self._y_train, epochs=self._config.training.epoch_num,
validation_data=(self._x_valid, self._y_valid), verbose=2,
batch_size=self._config.training.batch_size,
callbacks=[ModelCheckpoint(self._config.model.path, 'val_loss', save_best_only=True,
save_weights_only=True),
EarlyStopping(monitor='val_auc', patience=self._config.training.lr_decrease_patience,
mode='max', restore_best_weights=True),
tensorboard_cb])
test_loss = self._ml_model.model.evaluate(self._x_test, self._y_test,
batch_size=self._config.training.batch_size)
print(f'Test loss: {test_loss}')
Y_test = np.argmax(self._y_test, axis=1) # Convert one-hot to index
y_pred = self._ml_model.predict(self._x_test)
report = classification_report(Y_test, np.argmax(y_pred, axis=1))
print(report)
if __name__ == '__main__':
from config import general_config
trainer = FrameQualityRecognizerTrainer(general_config.frame_quality_recognizer_pipeline)
trainer.train()
|
import pytest
import numpy as np
import torch
import pyro.distributions as dist
from example_systems.three_states import three_state_hmm
from perm_hmm.models.hmms import PermutedDiscreteHMM
from perm_hmm.util import all_strings, id_and_transpositions, ZERO
from tests.min_ent import MinEntropyPolicy
from perm_hmm.policies.min_tree import MinTreePolicy
import perm_hmm.log_cost as cf
def simple_hmm():
observation_probs = torch.tensor([.5, 1])
observation_dist = dist.Bernoulli(observation_probs)
possible_perms = torch.tensor([[0, 1], [1, 0], [0, 1]], dtype=int)
transition_logits = torch.tensor([[1 - ZERO, ZERO], [.5, .5]]).log()
initial_logits = torch.tensor([.5, .5]).log()
hmm = PermutedDiscreteHMM(initial_logits, transition_logits, observation_dist)
return hmm, possible_perms
@pytest.mark.parametrize("hmm,possible_perms,num_steps",[
simple_hmm() + (4,),
(three_state_hmm(-3, -4), id_and_transpositions(3), 4),
(three_state_hmm(-1, -4), id_and_transpositions(3), 4),
(three_state_hmm(-5, -4), id_and_transpositions(3), 4),
])
def test_posterior_distributions(hmm, possible_perms, num_steps):
num_states = hmm.initial_logits.shape[0]
all_data = all_strings(num_steps, num_states)
mes2 = MinEntropyPolicy(possible_perms, hmm, save_history=True)
mes1 = MinTreePolicy(possible_perms, hmm, cf.log_initial_entropy, 1, initialize_tree=False)
mes1.initialize_tree(1, data_len=all_data.shape[0])
reverse_perm_dict = {tuple(v): k for k, v in enumerate(possible_perms.numpy().tolist())}
for j in range(num_steps):
mes1.tree.prune_tree(mes1.data_to_idx(all_data[..., j]))
mes1.tree.grow(mes1.possible_perms)
b = mes1.tree[-1]
mes2.belief_state = mes2.belief_state.bayes_update(all_data[..., j])
assert np.allclose(mes2.belief_state.logits.exp().double().numpy(), mes1.tree.beliefs[0].logits.exp().double().numpy(), atol=1e-6)
pl2 = mes2.distributions_for_all_perms()
after_transition_2 = pl2.logsumexp(-1)
after_transition_1 = mes1.tree[1].logits.transpose(0, 1)
assert np.allclose(after_transition_1.exp().double().numpy(), after_transition_2.exp().double().numpy(), atol=1e-6)
pl2 -= pl2.logsumexp(-3, keepdim=True).logsumexp(-2, keepdim=True)
pl2 = torch.from_numpy(np.moveaxis(pl2.numpy(), (-1, -2, -3, -4, -5), (-4,-1,-2,-5,-3)))
assert torch.allclose(pl2.exp().double(), b.logits.exp().double(), atol=1e-6)
perm = mes2.calculate_perm_from_belief(return_dict=False)
perm_idx = torch.tensor([reverse_perm_dict[tuple(p.numpy())] for p in perm])
mes1.tree.prune_tree(perm_idx)
mes2.belief_state = mes2.belief_state.transition(perm.unsqueeze(-2))
@pytest.mark.parametrize("hmm,possible_perms,num_steps",[
simple_hmm() + (4,),
(three_state_hmm(-3, -4), id_and_transpositions(3), 4),
(three_state_hmm(-1, -4), id_and_transpositions(3), 4),
(three_state_hmm(-5, -4), id_and_transpositions(3), 4),
])
def test_posterior_entropy(hmm, possible_perms, num_steps):
num_states = hmm.initial_logits.shape[0]
all_data = all_strings(num_steps, num_states)
mes2 = MinEntropyPolicy(possible_perms, hmm, save_history=True)
mes1 = MinTreePolicy(possible_perms, hmm, cf.log_initial_entropy, 1, initialize_tree=False, save_history=True)
mes1.initialize_tree(1, data_len=all_data.shape[0])
reverse_perm_dict = {tuple(v): k for k, v in enumerate(possible_perms.numpy().tolist())}
for j in range(num_steps):
mes2.belief_state = mes2.belief_state.bayes_update(all_data[..., j])
mes1.tree.prune_tree(mes1.data_to_idx(all_data[..., j]))
mes1.tree.grow(mes1.possible_perms)
perm_tree, costs = mes1.tree.perm_idxs_from_log_cost(mes1.log_cost_func, return_costs=True)
entropy2, distn2 = mes2.cond_entropies_for_all_perms(return_distn=True)
distn1 = hmm.observation_dist.log_prob(hmm.enumerate_support(expand=False))
yk = (mes1.tree[-2].logits.logsumexp(-2).unsqueeze(-3) + distn1.unsqueeze(-2)).logsumexp(-1)
distn1 = (yk.unsqueeze(-1).unsqueeze(-2) + mes1.tree[-1].logits)
distn2 = torch.tensor(np.moveaxis(distn2.numpy(), (-1, -2, -3, -4, -5), (-4,-1,-2,-5,-3)))
assert torch.allclose(distn1.exp().double(), distn2.exp().double(), atol=1e-6)
assert torch.allclose(costs[-2], (costs[-1] + yk).logsumexp(-2), atol=1e-6)
s1yk = distn1.logsumexp(-1)
jointent1 = -(s1yk.exp()*s1yk).sum(-3).sum(-1)
s1 = s1yk.logsumexp(-1)
yent1 = -(s1.exp()*s1).sum(-2)
condent1 = (jointent1 - yent1).transpose(0, 1)
assert torch.allclose(entropy2.double(), condent1.double(), atol=1e-6)
plisd1 = mes1.tree[-1].logits.logsumexp(-1)
log_postinitent = (-(plisd1.exp()*plisd1).sum(-1)).log()
post_ent1 = (yk + log_postinitent).logsumexp(-2).exp().transpose(0, 1)
assert torch.allclose(post_ent1.double(), entropy2.double(), atol=1e-6)
entropy1 = costs[-2].transpose(0, 1).exp()
assert torch.allclose(entropy1.double(), entropy2.double(), atol=1e-6)
perm = mes2.calculate_perm_from_belief(return_dict=False)
perm_idx = torch.tensor([reverse_perm_dict[tuple(p.numpy())] for p in perm])
mes1.tree.prune_tree(perm_idx)
mes2.belief_state = mes2.belief_state.transition(perm.unsqueeze(-2))
# @pytest.mark.parametrize("n_states", [2, 3, 4])
# def test_min_tree_min_ent(n_states):
# n_steps = 5
# hmm = random_phmm(n_states)
# allowed_permutations = id_and_transpositions(n_states)
# mes1 = MinEntropyPolicy(allowed_permutations, hmm)
# mes2 = MinTreePolicy(allowed_permutations, hmm, vf.negative_min_entropy, 1)
# all_data = all_strings(n_steps)
# perms1 = mes1.get_perms(all_data)
# perms2 = mes2.get_perms(all_data)
# assert (perms1 == perms2).all()
|
# https://leetcode.com/discuss/interview-question/313719/Amazon-or-Online-Assessment-2019-or-Movies-on-Flight
class Solution:
def moviesOnFlight(self, movieDuration, d):
# we need d-30 maximum.
d = d-30
# keep all indexes in hashmap
# key: movie duration, value: index
hm = dict()
for i in range(len(movieDuration)):
hm[movieDuration[i]] = i
movieDuration = sorted(movieDuration, reverse=True)
i = 0
j = len(movieDuration)-1
iidx = 0
jidx = len(movieDuration)-1
max_d = 0
while i < j:
if movieDuration[i] + movieDuration[j] <= d:
prev_max = max_d
max_d = max(max_d, movieDuration[i] + movieDuration[j])
if(prev_max != max_d):
iidx = i
jidx = j
j -= 1
else:
i += 1
z = hm[movieDuration[iidx]], hm[movieDuration[jidx]]
return max_d, min(z), max(z)
print(Solution().moviesOnFlight([90, 85, 75, 60, 120, 150, 125], 250))
|
import pandas as pd
import numpy as np
from src import datasets
import os
from PIL import Image
import torch
class FishSeg:
def __init__(self, split, transform=None,
datadir="", n_samples=None, habitat=None):
self.split = split
self.n_classes = 2
self.datadir = datadir
self.transform = transform
self.img_names, self.labels, self.mask_names = get_seg_data(self.datadir,
split, habitat=habitat)
if n_samples:
self.img_names = self.img_names[:n_samples]
self.mask_names = self.mask_names[:n_samples]
self.labels = self.labels[:n_samples]
self.path = self.datadir #+ "/images/"
def __len__(self):
return len(self.img_names)
def __getitem__(self, index):
name = self.img_names[index]
image_pil = Image.open(self.path + "/images/"+ name + ".jpg")
image = self.transform(image_pil)
if self.split in ("train", "val", "target_train", "target_val"):
mask_classes = Image.open(self.path + "/masks/"+ self.mask_names[index] + ".png").convert('L')
mask_classes = torch.from_numpy(np.array(mask_classes)).float() / 255.
batch = {"images": image,
"labels": self.labels[index],
"mask_classes": mask_classes,
"meta": {"index": index,
"image_id": index,
"split": self.split}}
elif self.split == "test":
batch = {"images": image,
"meta": {"index": index,
"image_id": index,
"split": self.split}}
return batch
def get_seg_data(datadir, split, habitat=None ):
df = pd.read_csv(os.path.join(datadir, '%s.csv' % split))
df = datasets.slice_df_reg(df, habitat)
img_names = np.array(df['ID'])
mask_names = np.array(df['ID'])
labels = np.array(df['labels'])
return img_names, labels, mask_names
|
"""
This script is designed to align NP-bracketed PTB dependency trees to tokens with
base NPs derived from the constinuency trees.
The bulk of this code exists to handle edge cases introduced by
re-attaching heads when collapsing tokens within BNPs.
Some edge cases are under-specified in previous work. Our work is internally
valid in that we used the same BNPs across experiments (syntax and no-syntax),
and externally valid in that we verified results (using the NGram model) using the BNP data
subsequently acquired of previous work.
"""
from collections import defaultdict
import string
from os import path
NUMERIC_SYM = "N"
LOW_COUNT_SYM = "<unk>"
EOS_SYM = "<eos>"
BASE_NP_SEPARATOR = "^^"
SONP_SYM = "<sonp>"
EONP_SYM = "<eonp>"
def write_list_of_list_of_list(list_of_list_of_of_list_of_strings, filename_with_path, add_newline = True, add_space = True, split_with_tabs = True):
if split_with_tabs:
split_string = "\t"
else:
split_string = " "
with open(filename_with_path, "w") as f:
for list_of_list_of_strings in list_of_list_of_of_list_of_strings:
for list_of_strings in list_of_list_of_strings:
if add_newline:
f.write(split_string.join(list_of_strings) + "\n")
else:
f.write(split_string.join(list_of_strings))
if add_space:
f.write("\n")
def preprocess_token_with_tag_simple(collapsed_row_ctr, wsj_filtered_tokens_one_sent, token, tag, strip_low_freq=False, dependency=False):
if dependency:
if tag in ["-NONE-"]:
return None
if token == "(":
token = "-LRB-"
elif token == ")":
token = "-RRB-"
elif token == "{":
token = "-LCB-"
elif token == "}":
token = "-RCB-"
if collapsed_row_ctr > (len(wsj_filtered_tokens_one_sent) - 1) or wsj_filtered_tokens_one_sent[collapsed_row_ctr] != token:
return None
return token
def collapse_npsyms(full_tree, tokens_one_sent_npsyms, ctr):
token_to_head = {}
for row in full_tree:
token_id = row[0]
head_id = row[6]
assert token_id not in token_to_head
token_to_head[token_id] = head_id
removed_ids = defaultdict(str)
npsyms_ctr = 0
collapsed_tree = []
i = 0
tokenid_to_basenpid = {}
basenpid_to_tokenid = {}
basenpid_to_identity_tokenid = {}
basenpid = 0
while (i < len(full_tree)):
row = full_tree[i]
token_id = row[0]
token = row[1]
basenp_str = ""
if tokens_one_sent_npsyms[npsyms_ctr] == SONP_SYM:
npsyms_ctr += 1
while True:
assert tokens_one_sent_npsyms[npsyms_ctr] == full_tree[i][1]
if len(basenp_str) > 0:
basenp_str += BASE_NP_SEPARATOR
basenp_str += full_tree[i][1]
if tokens_one_sent_npsyms[npsyms_ctr+1] == EONP_SYM:
original_row = list(full_tree[i])
# the only change is to the token string; the other fields of this rightmost token are retained as is
original_row[1] = basenp_str
tokenid_to_basenpid[full_tree[i][0]] = str(basenpid)
if str(basenpid) not in basenpid_to_tokenid:
basenpid_to_tokenid[str(basenpid)] = [full_tree[i][0]]
else:
basenpid_to_tokenid[str(basenpid)].append(full_tree[i][0])
basenpid_to_identity_tokenid[str(basenpid)] = original_row[0]
collapsed_tree.append(original_row)
npsyms_ctr += 2
i += 1
basenpid += 1
break
else:
tokenid_to_basenpid[full_tree[i][0]] = str(basenpid)
if str(basenpid) not in basenpid_to_tokenid:
basenpid_to_tokenid[str(basenpid)] = [full_tree[i][0]]
else:
basenpid_to_tokenid[str(basenpid)].append(full_tree[i][0])
removed_ids[full_tree[i][0]] = 1 # current row id
npsyms_ctr += 1
i += 1
else:
# outside a base NP
assert token == tokens_one_sent_npsyms[npsyms_ctr]
collapsed_tree.append(list(row))
npsyms_ctr += 1
i += 1
assert "0" not in removed_ids
token_to_head_filtered = {} # defaultdict(str)
for row in collapsed_tree:
token_id = row[0]
head_id = row[6]
next_head = head_id
if token_id in tokenid_to_basenpid: # in a base NP
curr_basenp_id = tokenid_to_basenpid[token_id]
while True:
if next_head in removed_ids or next_head == token_id:
if next_head in tokenid_to_basenpid and curr_basenp_id != tokenid_to_basenpid[next_head]:
basenpid = tokenid_to_basenpid[next_head]
next_head = basenpid_to_identity_tokenid[basenpid]
if next_head != token_id:
break
else:
next_head = token_to_head[next_head]
else:
if next_head != token_id:
break
else:
while True:
if next_head in removed_ids:
if next_head in tokenid_to_basenpid:
basenpid = tokenid_to_basenpid[next_head]
next_head = basenpid_to_identity_tokenid[basenpid]
if next_head != token_id:
break
else:
next_head = token_to_head[next_head]
else:
if next_head != token_id:
break
# now, ensure there's not a loop back to the new head_id
# if so, continue traversing:
assert next_head not in removed_ids
candidate_next_head = next_head
if candidate_next_head in token_to_head_filtered and token_to_head_filtered[candidate_next_head] == token_id:
# find nearest remaining head
next_head = token_to_head[candidate_next_head]
while next_head in removed_ids or next_head == token_id:
next_head = token_to_head[next_head]
token_to_head_filtered[token_id] = next_head
# make the reamining indecies consecutive
original_id_to_new_id = defaultdict(str)
# add root:
token_to_head_filtered["0"] = "0"
original_id_to_new_id["0"] = "0"
decrement = 0
for i in range(1, len(full_tree)+1):
str_i = str(i)
if str_i not in token_to_head_filtered:
decrement += 1
else:
if decrement == 0:
original_id_to_new_id[str_i] = str_i
else:
original_id_to_new_id[str_i] = str(i - decrement)
# finally, make ConLL format with updated ids and heads
number_of_roots = 0
renumbered_filtered_collapsed_tree = []
for row in collapsed_tree:
old_row = list(row)
token_id = old_row[0]
head_id = old_row[6]
new_row = list(row)
new_token_id = original_id_to_new_id[token_id]
new_row[0] = new_token_id
new_head_id = token_to_head_filtered[token_id]
new_head_id_with_updated_numbering = original_id_to_new_id[new_head_id]
new_row[6] = new_head_id_with_updated_numbering
if new_head_id == "0":
number_of_roots += 1
renumbered_filtered_collapsed_tree.append(new_row)
assert number_of_roots == 1, renumbered_filtered_collapsed_tree
# final check of lengths
num_nps_and_tokens = 0
in_np = False
for token in tokens_one_sent_npsyms:
if in_np:
if token == EONP_SYM:
in_np = False
else:
if token == SONP_SYM:
in_np = True
num_nps_and_tokens += 1
assert num_nps_and_tokens == len(renumbered_filtered_collapsed_tree)
# ensure a proper tree is formed
arcs = {}
arcmap = defaultdict(str)
for row in renumbered_filtered_collapsed_tree:
token_id = row[0]
head_id = row[6]
assert token_id not in arcmap
arcmap[token_id] = head_id
if head_id in arcs:
arcs[head_id].append(token_id)
else:
arcs[head_id] = [token_id]
assert "0" in arcs
for row in renumbered_filtered_collapsed_tree:
token_id = row[0]
head_id = row[6]
assert head_id == "0" or head_id in arcmap
head_id_head_id = arcmap[head_id]
if head_id_head_id == token_id:
print collapsed_tree
print renumbered_filtered_collapsed_tree
assert head_id_head_id != token_id, token_id
return renumbered_filtered_collapsed_tree
def get_descendants(target_id, arcmap):
descendants = {}
for phrase_id, head_id in arcmap.items():
if head_id == target_id:
descendants[phrase_id] = 1
new_descendants = get_descendants(phrase_id, arcmap)
for p, _ in new_descendants.items():
descendants[p] = 1
return descendants
issues1 = 0
issues2 = 0
issues3 = 0
forward_slash_found = 0
def filter_tree(full_tree, wsj_filtered_tokens_one_sent, ctr, wsj_filtered_tokens_one_sent_npsyms):
global issues1
global issues2
global issues3
global forward_slash_found
lines_with_issues = 0
# this could be more efficient, but for the purposes here (i.e., it's run once), multiple passes
# over the tree make it easier to follow
token_to_head = {}
for row in full_tree:
token_id = row[0]
head_id = row[6]
assert token_id not in token_to_head
token_to_head[token_id] = head_id
filtered_tokens = []
removed_ids = defaultdict(str)
#buy\/hold
#indecies_to_collapse = []
collapsed_tree = []
i = 0
tree_offset = 0
while (i < len(full_tree)):
row = full_tree[i]
token_id = row[0]
token = row[1]
tag = row[3]
# The dependency conversion/Vadas' NP bracketing splits tokens with internal '/'
# We need to regroup these and delete the extra arcs to match the gold LM sets
# (this is a throwback to when we were matching the neural LM datasets, but
# also necessary to match the PTB tokenization)
# Note that here we're following the overwhelming majority of cases
# in assigning the head of the group to be that of the *left-most* token
if token == "/" and tag == "CC": # and i != (len(full_tree)-1):
forward_slash_found += 1
prev_row_token = collapsed_tree[i-1-tree_offset][1]
next_row_token = full_tree[i+1][1]
# here, we're removing the current row and the next row
next_row_id = full_tree[i+1][0]
removed_ids[next_row_id] = 1 # next row id
removed_ids[token_id] = 1 # current row id
prev_row_head_id = collapsed_tree[i-1-tree_offset][6]
prev_row_token_id = collapsed_tree[i-1-tree_offset][0]
cur_row_head_id = row[6]
cur_row_token_id = token_id
next_row_head_id = full_tree[i+1][6]
next_row_token_id = full_tree[i+1][0]
if cur_row_head_id != prev_row_token_id:
#print "failed cur_row_head_id == prev_row_token_id"
issues3 += 1
if next_row_head_id != cur_row_token_id:
#print "failed next_row_head_id == cur_row_token_id"
issues1 += 1
if not ( (prev_row_head_id != cur_row_token_id) and (prev_row_head_id != next_row_token_id) ):
#print "failed (prev_row_head_id != cur_row_token_id) and (prev_row_head_id != next_row_token_id)"
issues2 += 1
#print "-----------------"
collapsed_tree[i-1-tree_offset][1] = prev_row_token + "\\/" + next_row_token
tree_offset += 2
i += 2
else:
collapsed_tree.append(list(row))
i += 1
filtered_collapsed_tree = []
collapsed_row_ctr = 0
for row in collapsed_tree:
token_id = row[0].strip()
token = row[1].strip()
tag = row[3].strip()
filtered_token = preprocess_token_with_tag_simple(collapsed_row_ctr, wsj_filtered_tokens_one_sent, token, tag, strip_low_freq=False, dependency=True)
if filtered_token:
filtered_tokens.append(filtered_token)
filtered_row = list(row)
filtered_row[1] = filtered_token
filtered_collapsed_tree.append(filtered_row)
collapsed_row_ctr += 1 # there's an assumption that the ellision here is only in the base-NP data
else:
removed_ids[token_id] = 1
# two final passes to make the indecies consecutive (since some may have been dropped above),
# and to reattach orphaned arcs
# The latter can occur as in the following, where the parentheses were dropped
"""
14 dial _ VB _ _ 13 IM _ _
15 ( _ ( _ _ 14 PRN _ _
16 and _ CC _ _ 15 DEP _ _
17 redial _ VB _ _ 15 DEP _ _
18 ) _ ) _ _ 15 P _ _
19 movie _ NN _ _ 22
"""
# first, reattach heads:
# recall, there are 2 ways a head can be dropped: 1. removing '/' split and 2. filtering
#head_to_targets
#token_to_head
assert "0" not in removed_ids
token_to_head_filtered = defaultdict(str)
for row in filtered_collapsed_tree:
token_id = row[0]
head_id = row[6]
#assert head_id not in removed_ids, "Removed id's shouldn't be heads"
if head_id in removed_ids:
# find nearest remaining head
next_head = token_to_head[head_id]
while next_head in removed_ids:
next_head = token_to_head[next_head]
token_to_head_filtered[token_id] = next_head
else:
token_to_head_filtered[token_id] = head_id
# make the remaining indecies consecutive
original_id_to_new_id = defaultdict(str)
# add root:
token_to_head_filtered["0"] = "0"
original_id_to_new_id["0"] = "0"
decrement = 0
for i in range(1, len(full_tree)+1):
str_i = str(i)
if str_i not in token_to_head_filtered:
decrement += 1
else:
if decrement == 0:
original_id_to_new_id[str_i] = str_i
else:
original_id_to_new_id[str_i] = str(i - decrement)
# finally, make ConLL format with updated ids and heads
renumbered_filtered_collapsed_tree = []
for row in filtered_collapsed_tree:
old_row = list(row)
token_id = old_row[0]
head_id = old_row[6]
new_row = list(row)
new_token_id = original_id_to_new_id[token_id]
new_row[0] = new_token_id
new_head_id = token_to_head_filtered[token_id]
new_head_id_with_updated_numbering = original_id_to_new_id[new_head_id]
new_row[6] = new_head_id_with_updated_numbering
renumbered_filtered_collapsed_tree.append(new_row)
#if ctr in [26, 658, 763]:
if filtered_tokens != wsj_filtered_tokens_one_sent:
print filtered_tokens
print wsj_filtered_tokens_one_sent
lines_with_issues += 1
#print lines_with_issues
import pdb; pdb.set_trace()
renumbered_filtered_collapsed_tree = collapse_npsyms(list(renumbered_filtered_collapsed_tree), list(wsj_filtered_tokens_one_sent_npsyms), ctr)
return renumbered_filtered_collapsed_tree, lines_with_issues
def check_tree_yields(filter_tree_result, wsj_filtered_tokens_one_sent):
tree_yield = []
for row in filter_tree_result:
tree_yield.append(row[1])
assert tree_yield == wsj_filtered_tokens_one_sent
def get_filtered_dependency_trees(dependency_output_dir, split_label, wsj_filtered_tokens, wsj_filtered_tokens_npsyms):
if split_label == "train":
split_fileids = ["wsj_%02d_dep.txt"%x for x in range(2, 22)]
elif split_label == "valid":
split_fileids = ["wsj_%02d_dep.txt"%x for x in range(22, 23)]
else:
split_fileids = ["wsj_%02d_dep.txt"%x for x in range(23, 24)]
full_trees = []
full_trees_filtered = []
total_lines_with_possible_issues = 0
ctr = 0
for split_fileid in split_fileids:
print "split_fileid:", split_fileid
with open(path.join(dependency_output_dir, split_fileid)) as f:
full_tree = []
for line in f:
if line not in string.whitespace:
full_tree.append(line.split())
assert len(full_tree[-1]) == 10
else:
if len(full_tree) != 0:
full_trees.append(full_tree)
filter_tree_result, lines_with_issues = filter_tree(full_tree, wsj_filtered_tokens[ctr], ctr, wsj_filtered_tokens_npsyms[ctr])
#check_tree_yields(filter_tree_result, wsj_filtered_tokens[ctr])
total_lines_with_possible_issues += lines_with_issues
full_trees_filtered.append(filter_tree_result)
full_tree = []
ctr += 1
# fencepost (in case trailing new line is missing)
if len(full_tree) != 0:
full_trees.append(full_tree)
filter_tree_result, lines_with_issues = filter_tree(full_tree, wsj_filtered_tokens[ctr], ctr, wsj_filtered_tokens_npsyms[ctr])
#check_tree_yields(filter_tree_result, wsj_filtered_tokens[ctr])
total_lines_with_possible_issues += lines_with_issues
full_trees_filtered.append(filter_tree_result)
full_tree = []
ctr += 1
print "Total lines with possible issues:", total_lines_with_possible_issues
return full_trees, full_trees_filtered
def save_dependency_trees(train_words, valid_words, test_words, train_bnps, valid_bnps, test_bnps, dependency_dir, filtered_dependency_dir, include_bnps):
for words, bnps, split_label in zip([train_words, valid_words, test_words], [train_bnps, valid_bnps, test_bnps], ["train", "valid", "test"]):
if include_bnps:
full_trees, filtered_trees = get_filtered_dependency_trees(dependency_dir, split_label, words, bnps)
filename_suffix = "_filtered_dep.txt"
else:
full_trees, filtered_trees = get_filtered_dependency_trees(dependency_dir, split_label, words, words)
filename_suffix = "_filtered_dep_nonpsyms.txt"
write_list_of_list_of_list(filtered_trees, path.join(filtered_dependency_dir, "%s%s" % (split_label, filename_suffix)), add_newline = True, add_space = True, split_with_tabs = True)
|
# coding=utf-8
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Central place for handling Keystone authorization and service lookup."""
from keystoneauth1 import exceptions as kaexception
from keystoneauth1 import loading as kaloading
from oslo_log import log as logging
import six
from six.moves.urllib import parse # for legacy options loading only
from ironic.common import exception
from ironic.common.i18n import _, _LE
from ironic.conf import auth as ironic_auth
from ironic.conf import CONF
LOG = logging.getLogger(__name__)
# FIXME(pas-ha): for backward compat with legacy options loading only
def _is_apiv3(auth_url, auth_version):
"""Check if V3 version of API is being used or not.
This method inspects auth_url and auth_version, and checks whether V3
version of the API is being used or not.
When no auth_version is specified and auth_url is not a versioned
endpoint, v2.0 is assumed.
:param auth_url: a http or https url to be inspected (like
'http://127.0.0.1:9898/').
:param auth_version: a string containing the version (like 'v2', 'v3.0')
or None
:returns: True if V3 of the API is being used.
"""
return auth_version == 'v3.0' or '/v3' in parse.urlparse(auth_url).path
def ks_exceptions(f):
"""Wraps keystoneclient functions and centralizes exception handling."""
@six.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except kaexception.EndpointNotFound:
service_type = kwargs.get('service_type', 'baremetal')
endpoint_type = kwargs.get('endpoint_type', 'internal')
raise exception.CatalogNotFound(
service_type=service_type, endpoint_type=endpoint_type)
except (kaexception.Unauthorized, kaexception.AuthorizationFailure):
raise exception.KeystoneUnauthorized()
except (kaexception.NoMatchingPlugin,
kaexception.MissingRequiredOptions) as e:
raise exception.ConfigInvalid(six.text_type(e))
except Exception as e:
LOG.exception(_LE('Keystone request failed: %(msg)s'),
{'msg': six.text_type(e)})
raise exception.KeystoneFailure(six.text_type(e))
return wrapper
@ks_exceptions
def get_session(group):
auth = ironic_auth.load_auth(CONF, group) or _get_legacy_auth()
if not auth:
msg = _("Failed to load auth from either [%(new)s] or [%(old)s] "
"config sections.")
raise exception.ConfigInvalid(message=msg, new=group,
old=ironic_auth.LEGACY_SECTION)
session = kaloading.load_session_from_conf_options(
CONF, group, auth=auth)
return session
# FIXME(pas-ha) remove legacy path after deprecation
def _get_legacy_auth():
"""Load auth from keystone_authtoken config section
Used only to provide backward compatibility with old configs.
"""
conf = getattr(CONF, ironic_auth.LEGACY_SECTION)
# NOTE(pas-ha) first try to load auth from legacy section
# using the new keystoneauth options that might be already set there
auth = ironic_auth.load_auth(CONF, ironic_auth.LEGACY_SECTION)
if auth:
return auth
# NOTE(pas-ha) now we surely have legacy config section for auth
# and with legacy options set in it, deal with it.
legacy_loader = kaloading.get_plugin_loader('password')
auth_params = {
'auth_url': conf.auth_uri,
'username': conf.admin_user,
'password': conf.admin_password,
'tenant_name': conf.admin_tenant_name
}
api_v3 = _is_apiv3(conf.auth_uri, conf.auth_version)
if api_v3:
# NOTE(pas-ha): mimic defaults of keystoneclient
auth_params.update({
'project_domain_id': 'default',
'user_domain_id': 'default',
})
return legacy_loader.load_from_options(**auth_params)
@ks_exceptions
def get_service_url(session, service_type='baremetal',
endpoint_type='internal'):
"""Wrapper for get service url from keystone service catalog.
Given a service_type and an endpoint_type, this method queries
keystone service catalog and provides the url for the desired
endpoint.
:param service_type: the keystone service for which url is required.
:param endpoint_type: the type of endpoint for the service.
:returns: an http/https url for the desired endpoint.
"""
return session.get_endpoint(service_type=service_type,
interface=endpoint_type,
region=CONF.keystone.region_name)
@ks_exceptions
def get_admin_auth_token(session):
"""Get admin token.
Currently used for inspector, glance and swift clients.
Only swift client does not actually support using sessions directly,
LP #1518938, others will be updated in ironic code.
"""
return session.get_token()
|
HELP_TEXT = """__**Я устроился на работу диджеем в этот чат и вот че я могу:**__
**/skip** __Пропустить играющую ща музыку__
**/play** __Сыграть вашу музыку по заявке ('/play Название песни' или '/play' ответом на файл)__
**/dj_vlados_join** __Пригласить меня, DJ Vlados, за диджейский пульт__
**/dj_vlados_leave** __Закончить мою смену и выплатить гонорар__
**/volume [1-200]** __Подкрутить громкость__
**/pause** __Приостановить музыку__
**/resume** __Продолжить играть музыку__
**/queue** __Показать список заявок__
**/delqueue** __Очистить список заявок__
**/playlist** __Создать плейлист__
"""
REPO_TEXT = (
"хуй"
+ " хуй"
)
|
# Generated by Django 3.2.7 on 2021-10-06 08:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_movie'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='genres',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='movie',
name='movieid',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='movie',
name='title',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_template_bd
short_description: Manage Bridge Domains (BDs) in schema templates
description:
- Manage BDs in schema templates on Cisco ACI Multi-Site.
author:
- Dag Wieers (@dagwieers)
version_added: '2.8'
options:
schema:
description:
- The name of the schema.
type: str
required: yes
template:
description:
- The name of the template.
type: str
required: yes
bd:
description:
- The name of the BD to manage.
type: str
aliases: [ name ]
display_name:
description:
- The name as displayed on the MSO web interface.
type: str
vrf:
description:
- The VRF associated to this BD.
type: dict
subnets:
description:
- The subnets associated to this BD.
type: list
suboptions:
ip:
description:
- The IP range in CIDR notation.
type: str
required: true
description:
description:
- The description of this subnet.
type: str
scope:
description:
- The scope of the subnet.
type: str
choices: [ private, public ]
shared:
description:
- Whether this subnet is shared between VRFs.
type: bool
no_default_gateway:
description:
- Whether this subnet has a default gateway.
type: bool
intersite_bum_traffic:
description:
- Whether to allow intersite BUM traffic.
type: bool
optimize_wan_bandwidth:
description:
- Whether to optimize WAN bandwidth.
type: bool
layer2_stretch:
description:
- Whether to enable L2 stretch.
type: bool
layer2_unknown_unicast:
description:
- Layer2 unknown unicast.
type: str
choices: [ flood, proxy ]
layer3_multicast:
description:
- Whether to enable L3 multicast.
type: bool
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Add a new BD
mso_schema_template_bd:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
bd: BD 1
state: present
delegate_to: localhost
- name: Remove an BD
mso_schema_template_bd:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
bd: BD1
state: absent
delegate_to: localhost
- name: Query a specific BDs
mso_schema_template_bd:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
bd: BD1
state: query
delegate_to: localhost
register: query_result
- name: Query all BDs
mso_schema_template_bd:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec, mso_reference_spec, mso_subnet_spec
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
template=dict(type='str', required=True),
bd=dict(type='str', aliases=['name']), # This parameter is not required for querying all objects
display_name=dict(type='str'),
intersite_bum_traffic=dict(type='bool'),
optimize_wan_bandwidth=dict(type='bool'),
layer2_stretch=dict(type='bool'),
layer2_unknown_unicast=dict(type='str', choices=['flood', 'proxy']),
layer3_multicast=dict(type='bool'),
vrf=dict(type='dict', options=mso_reference_spec()),
subnets=dict(type='list', options=mso_subnet_spec()),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['bd']],
['state', 'present', ['bd', 'vrf']],
],
)
schema = module.params['schema']
template = module.params['template']
bd = module.params['bd']
display_name = module.params['display_name']
intersite_bum_traffic = module.params['intersite_bum_traffic']
optimize_wan_bandwidth = module.params['optimize_wan_bandwidth']
layer2_stretch = module.params['layer2_stretch']
layer2_unknown_unicast = module.params['layer2_unknown_unicast']
layer3_multicast = module.params['layer3_multicast']
vrf = module.params['vrf']
subnets = module.params['subnets']
state = module.params['state']
mso = MSOModule(module)
# Get schema_id
schema_obj = mso.get_obj('schemas', displayName=schema)
if schema_obj:
schema_id = schema_obj['id']
else:
mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema))
schema_path = 'schemas/{id}'.format(**schema_obj)
# Get template
templates = [t['name'] for t in schema_obj['templates']]
if template not in templates:
mso.fail_json(msg="Provided template '{0}' does not exist. Existing templates: {1}".format(template, ', '.join(templates)))
template_idx = templates.index(template)
# Get ANP
bds = [b['name'] for b in schema_obj['templates'][template_idx]['bds']]
if bd is not None and bd in bds:
bd_idx = bds.index(bd)
mso.existing = schema_obj['templates'][template_idx]['bds'][bd_idx]
if state == 'query':
if bd is None:
mso.existing = schema_obj['templates'][template_idx]['bds']
elif not mso.existing:
mso.fail_json(msg="BD '{bd}' not found".format(bd=bd))
mso.exit_json()
bds_path = '/templates/{0}/bds'.format(template)
bd_path = '/templates/{0}/bds/{1}'.format(template, bd)
ops = []
mso.previous = mso.existing
if state == 'absent':
if mso.existing:
mso.sent = mso.existing = {}
ops.append(dict(op='remove', path=bd_path))
elif state == 'present':
vrf_ref = mso.make_reference(vrf, 'vrf', schema_id, template)
subnets = mso.make_subnets(subnets)
if display_name is None and not mso.existing:
display_name = bd
if subnets is None and not mso.existing:
subnets = []
payload = dict(
name=bd,
displayName=display_name,
intersiteBumTrafficAllow=intersite_bum_traffic,
optimizeWanBandwidth=optimize_wan_bandwidth,
l2UnknownUnicast=layer2_unknown_unicast,
l2Stretch=layer2_stretch,
l3MCast=layer3_multicast,
subnets=subnets,
vrfRef=vrf_ref,
)
mso.sanitize(payload, collate=True)
if mso.existing:
ops.append(dict(op='replace', path=bd_path, value=mso.sent))
else:
ops.append(dict(op='add', path=bds_path + '/-', value=mso.sent))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
"""Module with repackers implementations for various platforms."""
|
import pandas as pd
from joblib import load, dump
from matplotlib import pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import plot_confusion_matrix
# train the model with inbuilt classifier
def train():
"""Data set reading"""
df = pd.read_csv("../dataset/train.csv.csv")
X = df.iloc[:, :-1]
y = df['class']
model = LogisticRegression(n_jobs=-1)
model.fit(X, y)
dump(model, '../model/lr_model_inbuilt.joblib')
print('Model saved')
# do prediction from the saved model
def prediction(data, plot=True):
model = load('../model/lr_model_inbuilt.joblib')
predictions = model.predict(data)
if plot:
plot_confusion_matrix(model, data, predictions)
plt.show()
return predictions
def controller_predict(controller, test_data, test_labels, plot=True):
clf = load('model/lr_model_inbuilt.joblib')
predictions = clf.predict(test_data)
if plot:
plot_confusion_matrix(clf, test_data, test_labels)
plt.show()
controller.setLRInbuilt(round(accuracy_score(test_labels, predictions) * 100, 3))
|
from .Regression import Regression
from .exceptions import BrokenModel
import statsmodels.formula.api as smf
class LogisticRegression(Regression):
def __init__(
self, data, formula, significance_level=0.05, model_builder=None, family=None, groups=None, parent=None
):
super().__init__(
data=data, formula=formula, model_builder=smf.logit, significance_level=significance_level, parent=parent
)
@property
def coefficient_table(self):
"""
:rtype: DataFrame
"""
return super().coefficient_table.rename(columns={'P>|z|': 'p'})
@property
def fit(self):
"""
:rtype: LogisticRegression
"""
if self._fit is None:
try:
self._fit = self.model.fit(disp=False)
except Exception as e:
self._fit = BrokenModel(exception=e, regression=self)
return self._fit
@property
def name(self):
"""
:rtype: str
"""
return 'logit'
|
"""Serializers for the use with rest-pandas"""
from rest_framework import serializers
from .models import MQTTMessage
import re
import copy
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = MQTTMessage
fields = ['id', 'time_recorded', 'topic', 'payload']
pandas_index = ['id']
def generate_parsing_serializer_class(regex):
"""Generate a serializer class from a regular expression."""
regex = re.compile(regex)
groups = regex.groupindex.keys()
# Copy vanilla MessageSerializer class
class_name = 'DynamicParsingMessageSerializer'
parent_classes = (MessageSerializer,)
class_dict = {}
meta_dict = copy.deepcopy(MessageSerializer.Meta.__dict__)
class_dict['Meta'] = type('Meta', (object,), meta_dict)
# Add additional parsed fields
for group in groups:
name, typ = MQTTMessage._parse_group_name(group)
# Add custom field to the serializer
class_dict['parsed_'+name] = serializers.SerializerMethodField()
class_dict['Meta'].fields.append('parsed_'+name)
# Add a method to actually get the value
def _f(self, obj, name=name):
parsed = obj.parse_payload(regex)
if parsed is None or name not in parsed:
return None
else:
return parsed[name]
class_dict['get_parsed_'+name] = _f
return type(class_name, parent_classes, class_dict)
|
import json as j
import requests
from snake_tail import SNAKE_URL
def samples(file_type=None, limit=None, filter=None, operator=None, order=None, sort=None, json=False, verify=True, from_=None): # pylint: disable=redefined-builtin
url = SNAKE_URL + "/store"
args = []
if file_type:
args += ['file_type={}'.format(file_type)]
if from_:
args += ['from={}'.format(from_)]
if limit:
args += ['limit={}'.format(limit)]
if filter:
for k, v in j.loads(filter).items():
args += ['filter[%s]={"$regex":"%s", "$options": "-i"}' % (k, v)]
if operator:
args += ['operator={}'.format(operator)]
if order:
args += ['order={}'.format(order)]
if sort:
args += ['sort={}'.format(sort)]
if args:
url += "?" + '&'.join(args)
resp = requests.get(url, verify=verify)
resp_json = resp.json()
if not resp.ok:
if json:
print(resp_json)
else:
print("Status: {}".format(resp_json['status'].capitalize()))
print("Message: {}".format(resp_json['message']))
else:
data = resp_json["data"]["samples"]
if json:
print(data)
else:
for i in data:
print("%s\t%s\t%s\t%-25s\t%s" % (i["sha256_digest"], i["timestamp"], i["file_type"], i["mime"], i["name"]))
|
from spanet.dataset.jet_reconstruction_dataset import JetReconstructionDataset
from spanet.dataset.event_info import EventInfo
|
# Apache Thrift Binary Protocol Struct 2.0 Writer in Python
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift import Thrift
class Trade:
def __init__(self):
symbol=""
price=0.0
size=0
timestamp=0.0
trans = TTransport.TFileObjectTransport(open("data","wb"))
proto = TBinaryProtocol.TBinaryProtocol(trans)
trade = Trade()
trade.symbol = "GE"
trade.price = 27.25
trade.size = 1700
trade.timestamp = 9.5
proto.writeStructBegin("Trade")
proto.writeFieldBegin("symbol", Thrift.TType.STRING, 1)
proto.writeString(trade.symbol)
proto.writeFieldEnd()
proto.writeFieldBegin("price", Thrift.TType.DOUBLE, 2)
proto.writeDouble(trade.price)
proto.writeFieldEnd()
proto.writeFieldBegin("size", Thrift.TType.I32, 3)
proto.writeI32(trade.size)
proto.writeFieldEnd()
proto.writeFieldBegin("timestamp", Thrift.TType.DOUBLE, 4)
proto.writeDouble(trade.timestamp)
proto.writeFieldEnd()
proto.writeFieldStop()
proto.writeStructEnd()
print("Wrote Trade: %s %d @ %f tm: %f" %
(trade.symbol, trade.size, trade.price, trade.timestamp))
|
# Escolha 1 (motorista)
print(f'''De segunda a sábado {nick} roda seu táxi,
muito trabalhador', pois tinha dois filhos pequenos
para sustentar, 'tinha saido de casa para trabalhar
as 14:00 hrs,já cansado do dia corrido, as 20:00 hrs
ele já estava voltando pra casa,quando então,
uma passageira o chamou acenando para ele.''')
print('O que vc faz? continua dirigindo e passa direto[c] ou para e aceita mais uma corrida[p]?')
escolha1 = str(input('-> '))
if escolha1 == 'c':
# Decisão c da escolaha1
print(f'''Muito cansado,{nick} já estava no meio do trajeto para sua casa,
quando então ele olhou para o retrovisor e viu que um carro preto estava
te seguindo a algum tempo...''')
print('Você continua reto dirigindo pra casa[1] ou dobra a esquina pra tentar despistalo[2]')
decisao = int(input('-> '))
if decisao == '1':
|
from ....core import *
from .base import *
from .voc import *
from .coco import *
import logging
logger = logging.getLogger(__name__)
def get_dataset(root='~/.mxnet/datasets/voc', index_file_name='trainval', name=None, \
classes=None, format='voc', Train=True, **kwargs):
"""
Parameters
----------
root : str
root path to dataset folder
index_file_name : str
the name of the .txt file which constains images for training or testing.
this is only for custom dataset.
name: str
name for built-in dataset, ('voc', 'voc2007' or 'voc2012')
when use built-in dataset, the index_file_name should be None.
classes: tuple of strs
users can specify classes for custom dataset
ex. classes = ('bike', 'bird', 'cat', ...)
format: str
data format. ('voc', 'coco')
Train : bool
specify Train/Test mode. It is only valid when name is not None.
"""
if format=='voc':
logger.info(">>> create dataset(VOC format) ")
# built-in dataset
if name:
if Train:
if name=='voc':
splits = [('VOC2007', 'trainval'), ('VOC2012', 'trainval')]
elif name=='voc2007':
splits = [('VOC2007', 'trainval')]
else:
splits= [('VOC2007', 'test')]
else: # custom dataset
splits = [('', index_file_name)]
return CustomVOCDetection(root, splits, name, classes, **kwargs)
elif format=='coco':
logger.info(">>> create dataset(COOC format)")
return COCO(*args, **kwargs)
else:
raise NotImplementedError('Other data formats are not implemented.')
|
from PIL import Image, ImageStat
import os
import shutil
'''
This program opens images in pillow and stores a calculated mean value
It then iterates through the directory comparing mean values
Any images with matching mean values are stored in dupes list
It then moves duplicates into a duplicate folder.
'''
import time
# times the main function
def timer(func):
def wrapper():
before = time.time()
func()
print("Function took:", time.time() - before, "seconds")
return wrapper
def determine_similarity(list1, list2):
if list1 == list2:
return True
r_list1 = [round(i) for i in list1]
r_list2 = [round(i) for i in list2]
similarity = True if r_list1 == r_list2 else False
return similarity
@timer
def main():
file_extensions = ['png', 'jpg', 'gif', 'jpeg', 'jfif']
files = []
file_names = []
print("Loading Images..")
for file in os.listdir():
for ending in file_extensions:
if file.endswith(ending):
file_names.append(file)
files.append(ImageStat.Stat(Image.open(file)).mean)
dupes = []
if 'duplicates' not in os.listdir(): os.mkdir('duplicates')
# only loops through first half
for i in range(len(files)):
# starts at current i and searches till end of files
for y in range(len(files)-1, i, -1):
print("Comparing {} and {}".format(file_names[i], file_names[y]))
similar = determine_similarity(files[i], files[y])
if similar:
print("Image A: {} Image B: {}\n".format(file_names[i], file_names[y]))
if file_names[i] not in dupes:
dupes.append(file_names[i])
for i in dupes:
shutil.move(i, 'duplicates')
if __name__ == "__main__":
main()
|
from bs4 import BeautifulSoup
def parse_profile(profile_id, html_page):
"""This function parses the html page, looking for profile data and returns a dict """
soup = BeautifulSoup(html_page, "html.parser")
data_structure = {
'age': ['span', {'class': 'profile-basics-asl-age'}],
'location': ['span', {'class': 'profile-basics-asl-location'}],
'essays': {'category': ['h2', {'class': 'profile-essay-category'}],
'title': ['h2', {'class': 'profile-essay-title'}],
'contents': ['p', {'class': 'profile-essay-contents'}]
},
'details': {'basic': ['div', {
'class': 'matchprofile-details-section matchprofile-details-section--basics'}],
'badge': ['div', {
'class': 'matchprofile-details-section matchprofile-details-section--black-lives-matter'}],
'pronoun': ['div', {
'class': 'matchprofile-details-section matchprofile-details-section--pronouns'}],
'looks': ['div', {
'class': 'matchprofile-details-section matchprofile-details-section--looks'}],
'background': ['div', {
'class': 'matchprofile-details-section matchprofile-details-section--background'}],
'lifestyle': ['div', {
'class': 'matchprofile-details-section matchprofile-details-section--lifestyle'}],
'family': ['div', {
'class': 'matchprofile-details-section matchprofile-details-section--family'}],
'wiw': ['div', {
'class': 'matchprofile-details-section matchprofile-details-section--wiw'}],
}
}
parsed_data = {}
# Basic info - id
parsed_data['id'] = profile_id
# Basic info - age
parsed_data['age'] = soup.find_all(
data_structure.get('age')[0],
data_structure.get('age')[1])[0].text
# Basic info - location
parsed_data['location'] = soup.find_all(
data_structure.get('location')[0],
data_structure.get('location')[1])[0].text
# Essays
parsed_data['essays'] = list()
for box in soup.find_all('div', {'class': 'profile-essay'}):
box_essay = {}
box_essay['category'] = box.find_all(
data_structure['essays'].get('category')[0],
data_structure['essays'].get('category')[1])[0].text
box_essay['title'] = box.find_all(
data_structure['essays'].get('title')[0],
data_structure['essays'].get('title')[1])[0].text
try :
box_essay['contents'] = box.find_all(
data_structure['essays'].get('contents')[0],
data_structure['essays'].get('contents')[1])[0].text
except IndexError :
box_essay['contents'] = 'NaN'
parsed_data['essays'].append(box_essay)
# Details column
parsed_data['details'] = {}
for section in soup.find_all('div', {'class': 'quickmatch-profiledetails matchprofile-details'}):
for detail in data_structure['details'].keys():
element = data_structure['details'][detail][0]
css_class = data_structure['details'][detail][1]['class']
if section.find(element, css_class):
parsed_data['details'][detail] = section.find(element, css_class).\
find('div', 'matchprofile-details-text').text
return parsed_data
|
#!/usr/bin/env python3
#
# MIT License
#
# (C) Copyright 2021-2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
import argparse
import re
import yaml
import sys
import os
import fnmatch
import logging
import datetime
class LicenseCheck(object):
class LicenseCheckResult(object):
def __init__(self, code, message, matcher=None):
self.code = code
self.message = message
self.matcher = matcher
if code > 0:
logging.warning(message)
else:
logging.info(message)
def __repr__(self):
return "[%d, %s]" % (self.code, self.message)
def __init__(self, **kwargs):
self.config = self.read_config(sys.path[0] + os.path.sep + 'license_check.yaml')
config_override = kwargs.get("config_override")
if not config_override:
config_override = os.getcwd() + os.path.sep + ".license_check.yaml"
if os.path.exists(config_override):
self.config.update(self.read_config(config_override))
else:
logging.info("Skipping non-existent configuration file %s" % config_override)
self.config.update(kwargs)
if self.config["add_exclude"]:
self.config["exclude"].extend(self.config["add_exclude"])
if kwargs.get("add_exclude_cli"):
self.config["exclude"].extend(kwargs["add_exclude_cli"].split(","))
# start_year and end_year may be come as None from argparse
current_year = datetime.datetime.now().year
self.config["start_year"] = self.config["start_year"] if self.config.get("start_year") else current_year
self.config["end_year"] = self.config["end_year"] if self.config.get("end_year") else current_year
if logging.getLogger().isEnabledFor(logging.DEBUG):
logging.debug("Effective configuration:\n" + yaml.safe_dump(self.config))
# Build dict {type_name: (main_pattern, [additional_patterns])}
self.license_pattern_by_type = {}
for type_name in self.config["comment_types"]:
type_def = self.config["comment_types"][type_name]
self.license_pattern_by_type[type_name] = (
self.template_to_pattern(self.config["license_template"], type_def),
list(map(lambda x: self.template_to_pattern(x, type_def), self.config["additional_templates"]))
)
# Cache exclusion computations
self.exclusion_cache = {}
def read_config(self, config_file):
logging.info("Parsing config file %s ..." % os.path.realpath(config_file))
with open(config_file) as f:
return yaml.load(f, Loader=yaml.SafeLoader)
"""
Converts template (a string with [year] and [owner] placeholders) to regex pattern, specific to a file type
(i.e. taking into account shebang line, comment start/end, line prefixes, etc).
"""
def template_to_pattern(self, template, type_def):
license_pattern = re.escape(template.strip()) \
.replace('\\[year\\]', '\[?(?P<start_year>[0-9\- ,]*)(?P<end_year>[0-9]{4})\]?') \
.replace('\\[owner\\]', '(?P<owner>[a-zA-Z0-9 \-,/]+)') \
.split("\n")
line_prefix = re.sub("(\\\ )+", "\\ *", re.escape(type_def["line_prefix"]))
return \
"^(?P<shebang>" + type_def["shebang_pattern"] + ")?" + \
"(?P<license>\n*" + \
(type_def["insert_before_pattern"] if "insert_before_pattern" in type_def else re.escape(type_def["insert_before"])) + \
"(" + line_prefix + "\n)*" + \
"\n*".join(map(lambda x: (("(" + line_prefix + ")?") if x == "\\" else line_prefix) + x + " *", license_pattern)) + "\n*" + \
"(" + line_prefix + "\n)*" + \
(type_def["insert_after_pattern"] if "insert_after_pattern" in type_def else re.escape(type_def["insert_after"])) + \
")?"
"""
Checks if given path matches exclusion configuration. Matching performed for nested paths, starting from original path
and up to root folder. It is needed to exclude files, explciitly provided in command line, not matching exclusion pattern,
but belonging to excluded folder.
"""
def matches_exclude(self, path):
relpath = os.path.relpath(path)
logging.debug("Checking \"%s\" for exclusion" % (relpath))
while relpath:
if self.matches_exclude_path(relpath):
logging.debug("Final result of exclusion check for \"%s\" is True" % (path))
return True
else:
relpath = relpath.rpartition("/")[0]
logging.debug("Final result of exclusion check for \"%s\" is False" % (path))
return False
def matches_exclude_path(self, path):
if path in self.exclusion_cache:
logging.debug("Found exclusion check result for \"%s\" in cache as %s" % (path, str(self.exclusion_cache[path])))
return self.exclusion_cache[path]
for p in self.config["exclude"]:
if fnmatch.fnmatch(path, p):
logging.debug("Matching \"%s\" against \"%s\" .... matched!" % (path, p))
self.exclusion_cache[path] = True
return True
else:
logging.debug("Matching \"%s\" against \"%s\" .... no match!" % (path, p))
self.exclusion_cache[path] = False
return False
"""
Main working method
"""
def check(self, scan_targets, fix=False):
result = []
if isinstance(scan_targets, str):
scan_targets = [scan_targets]
for scan_target in scan_targets:
if self.matches_exclude(scan_target):
logging.info("Excluding file or directory %s as it matches excludes pattern" % scan_target)
elif os.path.isdir(scan_target):
logging.info("Scanning directory %s" % scan_target)
for dirname, subdirs, filenames in os.walk(scan_target):
for subdir in subdirs.copy():
if self.matches_exclude(dirname + os.path.sep + subdir):
logging.info("Excluding directory %s/%s as it matches excludes pattern" % (dirname, subdir))
subdirs.remove(subdir)
for filename in filenames:
filename = dirname + os.path.sep + filename
if os.path.islink(filename):
logging.info("Excluding file %s as it is a link" % filename)
elif self.matches_exclude(filename):
logging.info("Excluding file %s as it matches excludes pattern" % filename)
else:
result.append(self.check_file(filename, fix))
elif os.path.isfile(scan_target):
logging.info("Scanning file %s" % scan_target)
result.append(self.check_file(scan_target, fix))
else:
logging.warning("Can't scan %s - not regular file or directory" % scan_target)
return result
"""
Evaluate license template (replace [year] and [owner] placeholders, add comment start/end and line prefixes)
"""
def license_template(self, file_type, matcher=None):
start_year = self.config.get("start_year")
end_year = self.config.get("end_year")
if matcher and matcher.groupdict().get("end_year"):
start_year_current = matcher.groupdict().get("start_year")
end_year_current = int(matcher.groupdict().get("end_year"))
group_current = "%s%d" % (start_year_current, end_year_current)
if end_year_current == end_year - 1:
# End year is 1 year behind
if start_year_current.endswith("-"):
# "2016, 2019-2021" > "2016, 2019-2022"
year_replace = "%s%d" % (start_year_current, end_year)
else:
# "2016, 2021" > "2016, 2021-2022"; "2021" > "2021-2022"
year_replace = "%s-%d" % (group_current, end_year)
elif end_year_current < end_year - 1:
# End year is more then 1 year behind - add new end year separated with comma
# "2016, 2018-2020" > "2016, 2018-2020, 2022"
year_replace = "%s, %d" % (group_current, end_year)
else:
# End year is already up to date or in the future, replacing it with end_year
year_replace = "%s%d" % (start_year_current, end_year)
else:
# No year matching group - new header being added
if start_year < end_year:
# Add year range, in case if back-dated fix requested by providing start_year in the past
year_replace = "%d-%d" % (start_year, end_year)
else:
# Add single year as part of new license header
year_replace = str(end_year)
type_def = self.config["comment_types"][file_type]
license_text = self.config["license_template"].strip()
if type_def["line_prefix"]:
license_text = "\n" + license_text + "\n"
license_text = "\n".join(map(lambda x: (type_def["line_prefix"] + x).rstrip(), license_text.split("\n")))
return type_def["insert_before"] + \
license_text.replace("[owner]", self.config["owner"]).replace("[year]", year_replace) + "\n" + \
type_def["insert_after"]
def fix_or_report(self, code, message, file_type, matcher, fix, filename, outfile):
if not fix:
return self.LicenseCheckResult(code, message, matcher)
logging.info("Fixing file %s ..." % filename)
new_content = ""
pos = 0
if matcher and matcher.groupdict().get("shebang"):
new_content += matcher.group("shebang")
pos += len(matcher.group("shebang"))
if matcher and matcher.groupdict().get("license"):
new_content += self.license_template(file_type, matcher)
pos += len(matcher.group("license"))
else:
new_content += self.license_template(file_type)
with open(filename) as f:
content = f.read()
with open(outfile if outfile is not None else filename, "w") as f:
f.write(new_content + content[pos:])
return None
def check_file(self, filename, fix=False, outfile=None):
file_type_def = None
for file_type_entry in self.config["file_types"]:
if fnmatch.fnmatch(os.path.relpath(filename), file_type_entry["pattern"]):
file_type_def = file_type_entry
logging.debug("Matching %s against %s to determine file type .... matched!" % (os.path.relpath(filename), file_type_entry["pattern"]))
break
else:
logging.debug("Matching %s against %s to determine file type .... no match!" % (os.path.relpath(filename), file_type_entry["pattern"]))
if not file_type_def:
return self.LicenseCheckResult(0, "Filename pattern not recognized: %s" % filename)
file_type = file_type_def["type"]
with open(filename) as f:
content = f.read(4092)
logging.debug("Trying main file comment type for %s as %s" % (filename, file_type))
pattern = self.license_pattern_by_type[file_type][0]
logging.debug("Applying pattern:\n%s\nagainst content\n%s" % (pattern, content))
result = re.search(pattern, content)
if not result or not result.groupdict().get("license") and "alternative_type" in file_type_def:
logging.debug("Trying alternate file comment type for %s as %s" % (filename, file_type_def["alternative_type"]))
pattern = self.license_pattern_by_type[file_type_def["alternative_type"]][0]
logging.debug("Applying pattern:\n%s\nagainst content\n%s" % (pattern, content))
result = re.search(pattern, content)
if result and result.groupdict().get("license"):
logging.debug("Discovered groups: %s" % str(result.groupdict()))
if result.groupdict().get("end_year") and result.group("end_year") != str(self.config["end_year"]):
return self.fix_or_report(1, "License is detected, but copyright year is not up to date: %s" % filename, file_type, result, fix, filename, outfile)
if result.groupdict().get("owner") and result.group("owner") != self.config["owner"]:
return self.fix_or_report(1, "License is detected, but copyright owner is not current: %s" % filename, file_type, result, fix, filename, outfile)
return self.fix_or_report(0, "License is up to date: %s" % filename, file_type, result, fix, filename, outfile)
else:
logging.debug("Main pattern did not match, trying additional patterns")
for pattern in self.license_pattern_by_type[file_type][1]:
logging.debug("Applying pattern:\n%s\nagainst content\n%s" % (pattern, content))
result = re.search(pattern, content)
if result and result.groupdict().get("license"):
return self.fix_or_report(1, "License is detected, but wording is wrong: %s" % filename, file_type, result, fix, filename, outfile)
logging.debug("Additional pattern did not match")
return self.fix_or_report(1, "License is not detected: %s" % filename, file_type, result, fix, filename, outfile)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Check or fix license header in source files, with year range support')
parser.add_argument('--fix', action='store_true', help='fix headers in source files in target directory')
parser.add_argument('--config', metavar='config_file', help='optional config file, defaults to <scan_directory>/.license_check.yaml')
parser.add_argument('--log-level', choices=["debug", "info", "warn"], help='log level, defaults to "info"')
parser.add_argument('--add-exclude', metavar='add_exclude', help='additional filename exclusion patterns, comma-separated')
parser.add_argument('--start-year', metavar='start_year', type=int, help='start year to use when new header is added (defaults to current year)')
parser.add_argument('--end-year', metavar='end_year', type=int, help='end year to use (defaults to current year)')
parser.add_argument('scan_target', nargs='*', default=os.path.curdir, help='directories and individual files to scan (defaults to current directory)')
args = parser.parse_args()
if args.log_level == "warn":
log_level = logging.WARNING
elif args.log_level == "debug":
log_level = logging.DEBUG
elif args.log_level is None and os.environ.get("RUNNER_DEBUG") == "1":
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(format="[%(levelname)s] %(message)s", level=log_level)
license_check = LicenseCheck(config_override=args.config, add_exclude_cli=args.add_exclude,
start_year=args.start_year, end_year=args.end_year)
result = license_check.check(args.scan_target, fix=args.fix)
if not args.fix:
success = len(list(filter(lambda x: x.code == 0, result)))
total = len(result)
if total > 0:
logging.info("License headers score: %d%%" % (100.0 * success / total))
else:
logging.warning("No files were scanned")
if success < total:
logging.warning("Not all files have proper license headers. You may fix them by running:")
logging.warning("")
logging.warning(" docker run -it --rm -v $(pwd):/github/workspace artifactory.algol60.net/csm-docker/stable/license-checker --fix %s" % " ".join(args.scan_target))
logging.warning("")
logging.warning("Please refer to https://github.com/Cray-HPE/license-checker for more details.")
sys.exit(1 if success < total else 0)
|
from __future__ import print_function
import Pyro4
import bench
obj=bench.bench()
daemon=Pyro4.Daemon()
uri = daemon.register(obj,"example.benchmark")
print("Server running, uri = %s" % uri)
daemon.requestLoop()
|
"""
Compose and send recommendation emails to arxiv-sanity-lite users!
I run this script in a cron job to send out emails to the users with their
recommendations. There's a bit of copy paste code here but I expect that
the recommendations may become more complex in the future, so this is ok for now.
You'll notice that the file sendgrid_api_key.txt is not in the repo, you'd have
to manually register with sendgrid yourself, get an API key and put it in the file.
"""
import os
import time
import random
import argparse
import numpy as np
from sklearn import svm
import sendgrid
from sendgrid.helpers.mail import Email, To, Content, Mail
from aslite.db import load_features
from aslite.db import get_tags_db
from aslite.db import get_metas_db
from aslite.db import get_papers_db
from aslite.db import get_email_db
# -----------------------------------------------------------------------------
# the html template for the email
template = """
<!DOCTYPE HTML>
<html>
<head>
<style>
body {
font-family: Arial, sans-serif;
}
.s {
font-weight: bold;
margin-right: 10px;
}
.a {
color: #333;
}
.u {
font-size: 12px;
color: #333;
margin-bottom: 10px;
}
.f {
color: #933;
display: inline-block;
}
</style>
</head>
<body>
<br><br>
<div>Hi! Here are your <a href="https://arxiv-sanity-lite.com">arxiv-sanity-lite</a> recommendations. __STATS__</div>
<br><br>
<div>
__CONTENT__
</div>
<br><br>
<div>
To stop these emails remove your email in your <a href="https://arxiv-sanity-lite.com/profile">account</a> settings. (your account is __ACCOUNT__).
</div>
<div> <3, arxiv-sanity-lite. </div>
</body>
</html>
"""
# -----------------------------------------------------------------------------
def calculate_recommendation(
tags,
time_delta = 3, # how recent papers are we recommending? in days
):
# a bit of preprocessing
x, pids = features['x'], features['pids']
n, d = x.shape
ptoi, itop = {}, {}
for i, p in enumerate(pids):
ptoi[p] = i
itop[i] = p
# loop over all the tags
all_pids, all_scores = {}, {}
for tag, pids in tags.items():
if len(pids) == 0:
continue
# construct the positive set for this tag
y = np.zeros(n, dtype=np.float32)
for pid in pids:
y[ptoi[pid]] = 1.0
# classify
clf = svm.LinearSVC(class_weight='balanced', verbose=False, max_iter=10000, tol=1e-6, C=0.01)
clf.fit(x, y)
s = clf.decision_function(x)
sortix = np.argsort(-s)
pids = [itop[ix] for ix in sortix]
scores = [100*float(s[ix]) for ix in sortix]
# filter by time to only recent papers
deltat = time_delta*60*60*24 # allowed time delta in seconds
keep = [i for i,pid in enumerate(pids) if (tnow - metas[pid]['_time']) < deltat]
pids, scores = [pids[i] for i in keep], [scores[i] for i in keep]
# finally exclude the papers we already have tagged
have = set().union(*tags.values())
keep = [i for i,pid in enumerate(pids) if pid not in have]
pids, scores = [pids[i] for i in keep], [scores[i] for i in keep]
# store results
all_pids[tag] = pids
all_scores[tag] = scores
return all_pids, all_scores
# -----------------------------------------------------------------------------
def render_recommendations(user, tags, tag_pids, tag_scores):
# render the paper recommendations into the html template
# first we are going to merge all of the papers / scores together using a MAX
max_score = {}
max_source_tag = {}
for tag in tag_pids:
for pid, score in zip(tag_pids[tag], tag_scores[tag]):
max_score[pid] = max(max_score.get(pid, -99999), score) # lol
if max_score[pid] == score:
max_source_tag[pid] = tag
# now we have a dict of pid -> max score. sort by score
max_score_list = sorted(max_score.items(), key=lambda x: x[1], reverse=True)
pids, scores = zip(*max_score_list)
# now render the html for each individual recommendation
parts = []
n = min(len(scores), args.num_recommendations)
for score, pid in zip(scores[:n], pids[:n]):
p = pdb[pid]
authors = ', '.join(a['name'] for a in p['authors'])
# crop the abstract
summary = p['summary']
summary = summary[:min(500, len(summary))]
if len(summary) == 500:
summary += '...'
parts.append(
"""
<tr>
<td valign="top"><div class="s">%.2f</div></td>
<td>
<a href="%s">%s</a> <div class="f">(%s)</div>
<div class="a">%s</div>
<div class="u">%s</div>
</td>
</tr>
""" % (score, p['link'], p['title'], max_source_tag[pid], authors, summary)
)
# render the final html
out = template
# render the recommendations
final = '<table>' + ''.join(parts) + '</table>'
out = out.replace('__CONTENT__', final)
# render the stats
num_papers_tagged = len(set().union(*tags.values()))
tags_str = ', '.join(['"%s" (%d)' % (t, len(pids)) for t, pids in tags.items()])
stats = f"We took the {num_papers_tagged} papers across your {len(tags)} tags ({tags_str}) and \
ranked {len(pids)} papers that showed up on arxiv over the last \
{args.time_delta} days using tfidf SVMs over paper abstracts. Below are the \
top {args.num_recommendations} papers. Remember that the more you tag, \
the better this gets:"
out = out.replace('__STATS__', stats)
# render the account
out = out.replace('__ACCOUNT__', user)
return out
# -----------------------------------------------------------------------------
# send the actual html via sendgrid
def send_email(to, html):
# init the api
assert os.path.isfile('sendgrid_api_key.txt')
api_key = open('sendgrid_api_key.txt', 'r').read().strip()
sg = sendgrid.SendGridAPIClient(api_key=api_key)
# construct the email
from_email = Email("admin@arxiv-sanity-lite.com")
to_email = To(to)
subject = tnow_str + " Arxiv Sanity Lite recommendations"
content = Content("text/html", html)
mail = Mail(from_email, to_email, subject, content)
# hope for the best :)
if not args.dry_run:
response = sg.client.mail.send.post(request_body=mail.get())
print(response.status_code)
pass
# -----------------------------------------------------------------------------
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Sends emails with recommendations')
parser.add_argument('-n', '--num-recommendations', type=int, default=20, help='number of recommendations to send per person')
parser.add_argument('-t', '--time-delta', type=int, default=3, help='how recent papers to recommended, in days')
parser.add_argument('-d', '--dry-run', type=int, default=0, help='if set to 1 do not actually send the emails')
parser.add_argument('-u', '--user', type=str, default='', help='restrict recommendations only to a single given user (used for debugging)')
args = parser.parse_args()
print(args)
tnow = time.time()
tnow_str = time.strftime('%b %d', time.localtime(tnow)) # e.g. "Nov 27"
# read entire db simply into RAM
with get_tags_db() as tags_db:
tags = {k:v for k,v in tags_db.items()}
# read entire db simply into RAM
with get_metas_db() as mdb:
metas = {k:v for k,v in mdb.items()}
# read entire db simply into RAM
with get_email_db() as edb:
emails = {k:v for k,v in edb.items()}
# read tfidf features into RAM
features = load_features()
# keep the papers as only a handle, since this can be larger
pdb = get_papers_db()
# iterate all users, create recommendations, send emails
num_sent = 0
for user, tags in tags.items():
# verify that we have an email for this user
email = emails.get(user, None)
if not email:
print("skipping user %s, no email" % (user, ))
continue
if args.user and user != args.user:
print("skipping user %s, not %s" % (user, args.user))
continue
# verify that we have at least one positive example...
num_papers_tagged = len(set().union(*tags.values()))
if num_papers_tagged == 0:
print("skipping user %s, no papers tagged" % (user, ))
continue
# insert a fake entry in tags for the special "all" tag, which is the union of all papers
# tags['all'] = set().union(*tags.values())
# calculate the recommendations
pids, scores = calculate_recommendation(tags, time_delta=args.time_delta)
if all(len(lst) == 0 for tag, lst in pids.items()):
print("skipping user %s, no recommendations were produced" % (user, ))
continue
# render the html
print("rendering top %d recommendations into a report for %s..." % (args.num_recommendations, user))
html = render_recommendations(user, tags, pids, scores)
# temporarily for debugging write recommendations to disk for manual inspection
if os.path.isdir('recco'):
with open('recco/%s.html' % (user, ), 'w') as f:
f.write(html)
# actually send the email
print("sending email...")
send_email(email, html)
num_sent += 1
# zzz?
# time.sleep(1 + random.uniform(0, 2))
print("done.")
print("sent %d emails" % (num_sent, ))
|
# Angold4 20200509 C1.1.20
from random import randint
def shuffle(*data):
repeat = []
shuffle = []
length = len(data)
while True:
number = randint(0, length-1)
if number in repeat:
if len(repeat) == length:
return shuffle
else:
repeat.append(number)
shuffle.append(data[number])
if __name__ == "__main__":
print(shuffle(1, 2, 3, 4, 5))
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='pyERA',
version='0.1',
url='https://github.com/mpatacchiola/pyERA',
description='Implementation of the Epigenetic Robotic Architecture (ERA). It includes standalone classes for Self-Organizing Maps (SOM) and Hebbian Learning',
author='Massimiliano Patacchiola',
packages = ['pyERA'],
package_data={'pyERA': ['Readme.md']},
include_package_data=True,
license="The MIT License (MIT)",
py_modules=['pyERA'],
requires = ['numpy']
)
|
import numpy
from copy import deepcopy
from handwriting_features.data.utils.math import derivation
from handwriting_features.data.utils.dsp import LowPassFilter, GaussianFilter
from handwriting_features.features.implementation.conventional.spatial import stroke_length
from handwriting_features.features.implementation.conventional.temporal import stroke_duration
class WritingTempoUtils(object):
"""Class implementing writing tempo utils"""
def __init__(self, sample_wrapper, in_air):
"""Initializes the writing tempo utils object"""
# Set the stroke lengths and durations
self.stroke_lengths = stroke_length(sample_wrapper, in_air)
self.stroke_durations = stroke_duration(sample_wrapper, in_air)
def get_writing_tempo(self):
"""Extracts the writing tempo"""
return len(self.stroke_lengths) / (sum(self.stroke_durations) + numpy.finfo(float).eps)
class WritingStopsUtils(object):
"""Class implementing writing stops utils"""
def __init__(self, sample_wrapper):
"""Initializes the writing stops utils object"""
# Set the strokes and indices
self.strokes = sample_wrapper.strokes
self.indices = self._get_stroke_indexes([stroke for _, stroke in self.strokes])
def get_writing_stops(self):
"""Extracts the stroke stops"""
# Prepare the stops
time = numpy.nan
stops_borders_left = []
stops_borders_right = []
# Extract the stops
for (pen_status, stroke), index in zip(self.strokes, self.indices):
if pen_status == "in_air":
continue
# Compute the vector of length, time
length = numpy.sqrt(numpy.power(derivation(stroke.x), 2) + numpy.power(derivation(stroke.y), 2))
time = derivation(stroke.time)
# Compute the vector of velocity (value <= 1 mm/s is set to 0)
velocity = (d / t for (d, t) in zip(length, time))
velocity = numpy.array([0 if v <= 1 else v for v in velocity])
# Get the number of samples equaling to 15 ms
num_samples = numpy.ceil(0.015 / numpy.mean(time))
# Identify the stops
border_left, border_right = self._get_borders(velocity)
# Take only pauses lasting at least 15 ms
pause_indices = numpy.where((border_right - border_left) > num_samples)[0]
border_left = border_left[pause_indices].astype(numpy.float)
border_right = border_right[pause_indices].astype(numpy.float)
# Fuze the pauses
border_left, border_right = self._fuze_pauses(border_left, border_right, num_samples)
# Add the starting index of the stroke
border_left += index
border_right += index - 1
# Append the borders to the stops
stops_borders_left += border_left.tolist()
stops_borders_right += border_right.tolist()
# Get the writing stops
stops = (numpy.array(stops_borders_right) - numpy.array(stops_borders_left)) * numpy.mean(time)
stops = numpy.array(stops)
# Return the writing stops
return stops
@classmethod
def _get_stroke_indexes(cls, strokes):
"""Gets the indexes of the strokes"""
# Prepare the list of indexes
indexes = []
# Get the indexes
for i in range(len(strokes)):
if i == 0:
indexes.append(0)
else:
indexes.append(indexes[i - 1] + len(strokes[i - 1].time))
# Return the indexes
return indexes
@classmethod
def _get_borders(cls, array, border_value=0):
"""Gets borders of an array given a border value"""
# Get the shifted arrays
array_l = array[:-1]
array_r = array[1:]
# Get the borders
border_l = numpy.where(numpy.logical_and(array_r == border_value, array_l != border_value))[0]
border_r = numpy.where(numpy.logical_and(array_r != border_value, array_l == border_value))[0]
if array[0] == border_value:
border_l = numpy.array([0] + border_l.tolist())
if array[-1] == border_value:
border_r = numpy.array(border_r.tolist() + [len(array)])
# Return the borders
return border_l, border_r
@classmethod
def _fuze_pauses(cls, border_left, border_right, num_samples):
"""Fuzes the pauses"""
# Fuze the pauses
if len(border_left) > 1:
for i in range(len(border_left) - 1):
if border_left[i + 1] - border_right[i] < (2 * num_samples):
border_left[i + 1], border_right[i] = numpy.nan, numpy.nan
# Get the improper pauses
to_remove = [
i for i, (l, r) in enumerate(zip(border_left, border_right))
if numpy.isnan(l) and numpy.isnan(r)
]
# Remove improper pauses
border_left = numpy.delete(border_left, to_remove)
border_right = numpy.delete(border_right, to_remove)
# Update the pauses
nans_right = numpy.isnan(border_right)
if nans_right.any():
border_right = numpy.array([
border_right[i + 1] if is_nan else border_right[i]
for i, is_nan in zip(range(len(border_right)), nans_right)
])
border_left = numpy.delete(border_left, numpy.where(nans_right)[0] + 1)
border_right = numpy.delete(border_right, numpy.where(nans_right)[0] + 1)
# Return the fused pauses
return border_left, border_right
class WritingNumberOfChangesUtils(object):
"""Class implementing writing number of changes utils"""
# Default computational arguments
fc = 17.5
n = 50
def __init__(self, sample_wrapper, fs, fc=None, n=None):
"""
Initializes the writing number of changes utils object.
:param fs: sampling frequency
:type fs: float
:param fc: cutoff frequency for the low-pass filter, defaults to 17.5
:type fc: float, optional
:param n: number of samples of a Gaussian filter, defaults to 50
:type n: int
"""
# Set the sample wrapper
self.sample_wrapper = deepcopy(sample_wrapper)
# Set the DSP arguments
self.fs = fs
self.fc = fc if fc else WritingNumberOfChangesUtils.fc
self.n = n if n else WritingNumberOfChangesUtils.n
# Set the filter instances
self.low_pass_filter = LowPassFilter(self.fs, self.fc)
self.gaussian_filter = GaussianFilter(self.fs, self.n)
def get_number_of_changes(self):
"""Extracts the number of writing changes"""
# Get the duration
duration = self.sample_wrapper.sample_time[-1] - self.sample_wrapper.sample_time[0]
# Filter x, y, azimuth, tilt, and pressure by a low-pass filter
self.sample_wrapper.sample = self._filter_data_with_low_pass_filter(self.sample_wrapper.sample)
# Get the on-surface strokes
strokes = self.sample_wrapper.on_surface_strokes
# Prepare the output variables
changes_x = 0
changes_y = 0
changes_azimuth = 0
changes_tilt = 0
changes_pressure = 0
changes_velocity = 0
# Get the number of changes
for stroke in strokes:
# Compute the vector of length, time, and velocity
length = numpy.sqrt(numpy.power(derivation(stroke.x), 2) + numpy.power(derivation(stroke.y), 2))
time = derivation(stroke.time)
velocity = numpy.array([d / t for (d, t) in zip(length, time)])
# Filter x, y, azimuth, tilt, pressure, and velocity by a Gaussian filter
stroke = self._filter_data_with_gaussian_filter(stroke)
velocity = self._filter_velocity_with_gaussian_filter(velocity)
# Compute the number of changes
changes_x += self._get_changes(stroke.x)
changes_y += self._get_changes(stroke.y)
changes_azimuth += self._get_changes(stroke.azimuth)
changes_tilt += self._get_changes(stroke.tilt)
changes_pressure += self._get_changes(stroke.pressure)
changes_velocity += self._get_changes(velocity)
# Get the relative number of changes
relative_changes_x = changes_x / duration
relative_changes_y = changes_y / duration
relative_changes_azimuth = changes_azimuth / duration
relative_changes_tilt = changes_tilt / duration
relative_changes_pressure = changes_pressure / duration
relative_changes_velocity = changes_velocity / duration
# Return the number of changes
return numpy.array([
changes_x,
changes_y,
changes_azimuth,
changes_tilt,
changes_pressure,
changes_velocity,
relative_changes_x,
relative_changes_y,
relative_changes_azimuth,
relative_changes_tilt,
relative_changes_pressure,
relative_changes_velocity
])
@classmethod
def _get_changes(cls, signal):
"""Gets the changes"""
# Get the left/right side changes
changes_left = numpy.sum(numpy.logical_and(signal[1:-1] > signal[:-2], signal[1:-1] > signal[2:]))
changes_right = numpy.sum(numpy.logical_and(signal[1:-1] < signal[:-2], signal[1:-1] < signal[2:]))
# Return the changes
return changes_left + changes_right
def _filter_data_with_low_pass_filter(self, signal):
"""Filters an input sample/stroke data by a low-pass filter"""
# Filter the signal
signal.x = self.low_pass_filter.filter(signal.x)
signal.y = self.low_pass_filter.filter(signal.y)
signal.azimuth = self.low_pass_filter.filter(signal.azimuth)
signal.tilt = self.low_pass_filter.filter(signal.tilt)
signal.pressure = self.low_pass_filter.filter(signal.pressure)
# Return the filtered signal
return signal
def _filter_data_with_gaussian_filter(self, signal):
"""Filters an input sample/stroke data by a Gaussian filter"""
# Filter the signal
signal.x = self.gaussian_filter.filter(signal.x)
signal.y = self.gaussian_filter.filter(signal.y)
signal.azimuth = self.gaussian_filter.filter(signal.azimuth)
signal.tilt = self.gaussian_filter.filter(signal.tilt)
signal.pressure = self.gaussian_filter.filter(signal.pressure)
# Return the filtered signal
return signal
def _filter_velocity_with_low_pass_filter(self, velocity):
"""Filters an input velocity by a low-pass filter"""
return self.low_pass_filter.filter(velocity)
def _filter_velocity_with_gaussian_filter(self, velocity):
"""Filters an input velocity by a Gaussian filter"""
return self.gaussian_filter.filter(velocity)
|
try:
from .svbcomp import load2
except ImportError:
from ._compress import load2
class Postings():
"""
The postings for a single term, which is a sorted list of, by index format:
* existence: 1-tuple of (docId)
* frequency: 2-tuple of (docId, frequency)
* positions: 3-tuple of (docId, frequency, compressedPositions)
You uncompress compressedPositions with the "load2" function. It will uncompressed to an
array.array('I',[...])
"""
def __init__(self, postings):
self._postings = postings
self._current = 0 # This is NOT a docId
self._last = len(postings) - 1
def first_doc(self):
return self._postings[0]
def last_doc(self):
return self._postings[-1]
def next_doc(self):
idx = self._current
self._current += 1
return self._postings[idx]
def prev_doc(self):
self._current -= 1
return self._postings[self._current]
def all_docs(self):
self._current = 0
while self._current <= self._last:
yield self._postings[self._current]
self._current += 1
def reset(self):
self._current = 0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# created by kamoshiren
import telebot
from telebot.types import Message
token = "" # Inser your Telegram bot token here
bot = telebot.TeleBot(token)
keyboard1 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True, resize_keyboard=True)
keyboard1.row('О нас', 'Наш инстаграм')
keyboard1.row("Цены", "Отзывы")
@bot.message_handler(commands=['start', 'help'])
def start(message):
#markup = telebot.types.InlineKeyboardMarkup()
#button = telebot.types.InlineKeyboardButton(text='CLick me', callback_data='add')
#markup.add(button)
bot.send_message(chat_id=message.chat.id, text="""Арт-студия
🔶Картины масляными красками
◼️Портреты карандашом
🔶Только ручная работа
⚫️Доставка по России
📍Для заказа пишите нам
Нажми на одну из кнопок ниже, чтобы узнать больше о нас и нашей деятелности, или напиши /help чтобы снова вывести это сообщение :)""", reply_markup=keyboard1)
@bot.message_handler(content_types=["text"])
def default_test(message: Message):
if message.text.lower() == 'о нас':
bot.send_message(message.chat.id, 'Мы занимаемся тем, что рисуем картины на заказ и высылаем их Вам по почте. Мы работаем давно и в нашем коллективе находятся только опытные художники, которые выполнят работу по Вашим предпочтениям.', reply_markup=keyboard1)
elif message.text.lower() == 'цены':
bot.send_message(message.chat.id, 'Портрет в стиле DREAM ART - от 1000 руб.\nПортрет карандашом - от 1100 руб.\nКартина маслом - от 3000 руб.\nКонкретные цены можете узнать, отправив нам свое фото. В среднем мы выполняем заказ от 2 до 5 дней.', reply_markup=keyboard1)
elif message.text.lower() == 'наш инстаграм':
keyboard = telebot.types.InlineKeyboardMarkup()
url_button = telebot.types.InlineKeyboardButton(text="Перейти в Инстаграм", url="https://www.instagram.com/*")
keyboard.add(url_button)
bot.send_message(message.chat.id, "Ссылка на инстаграм по кнопке ниже:", reply_markup=keyboard)
elif message.text.lower() == 'отзывы':
#bot.send_message(message.chat.id, "(тут будут отзывы)", " ", reply_markup=keyboard1)
bot.send_photo(message.chat.id, open('./review1.jpg', 'rb'))
bot.send_photo(message.chat.id, open('./review2.jpg', 'rb'))
bot.send_photo(message.chat.id, open('./review3.jpg', 'rb'))
bot.send_photo(message.chat.id, open('./review4.jpg', 'rb'), reply_markup=keyboard1)
else:
keyboard = telebot.types.InlineKeyboardMarkup()
url_button = telebot.types.InlineKeyboardButton(text="Перейти в Инстаграм", url="https://www.instagram.com/*")
keyboard.add(url_button)
bot.send_message(message.chat.id, "Уточни свой вопрос :)\nКстати, у нас есть свой инстаграм. Там мы выкладываем свежие новости и свои работы. Обязательно загляни!\nДля отображения меню напиши мне /help", reply_markup=keyboard)
@bot.callback_query_handler(func=lambda call: True)
def query_handler(call):
if call.data == 'add':
bot.answer_callback_query(callback_query_id=call.id, text='Hello world')
if __name__ == '__main__':
bot.polling()
|
# Generated by Django 3.1 on 2020-09-27 20:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('community', '0007_auto_20200927_2107'),
]
operations = [
migrations.AlterField(
model_name='community',
name='key',
field=models.CharField(editable=False, max_length=500, null=True, unique=True),
),
]
|
import pandas as pd
import pickle
import os
from settings import DATA_DIR
fields = ['hour', 'C1', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21',
'banner_pos', 'site_id','site_domain', 'site_category','app_id','app_domain',
'app_category', 'device_model', 'device_type',
'device_conn_type']
data = pd.read_csv('G://Datasets//avazuCTR//test.csv')
for field_name in fields:
field_set_v = set(data[field_name].values)
with open(os.path.join(DATA_DIR, "field2count/%s.pkl"%field_name), "rb") as f:
field_set = pickle.load(f)
difs = field_set_v.difference(field_set)
print("field: %s, dif len: %d, diff: %s" % (field_name, len(difs), str(difs)))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-09-24 15:45:34
# @Author : Kaiyan Zhang (kyzhang@ir.hit.edu.cn)
# @Link : https://github.com/iseesaw
# @Version : $Id$
import json
import argparse
import numpy as np
import matplotlib.pyplot as plt
from visual import load_result, get_pairs, load_tsp
def main(filename):
"""
:param filename: the tsp output file name
"""
tsp_problem = filename.split(".")[0]
result = load_result(filename, "../output/result/output")
return result, tsp_problem
# tsp = "../tsp/%s.tsp" % result["tsp"]
# info, node_coords = load_tsp(tsp)
# best_one = result["result"]
# plt.figure(f'{result["tsp"]}')
# plt.title("{} fitness={:.2f}".format(tsp_problem, best_one['fitness']))
# x, y = get_pairs(best_one["gene"], node_coords)
# for i in range(len(x)):
# plt.plot(x[i], y[i], color='b')
# plt.scatter(x[i], y[i], color='r')
# plt.savefig("../output/figure/%s.png" % tsp_problem)
if __name__ == '__main__':
with open("../output/result/best_instance.json", "r") as f:
data = json.load(f)
result = {}
for key, value in data.items():
res, tsp = main(value[:-5])
result[tsp] = res
with open("best_algorithm.json", "w") as f:
json.dump(result, f)
|
#!/usr/bin/env python
import pathlib
from setuptools import setup
setup(
name="ipytest",
version="0.12.0",
description="Unit tests in IPython notebooks.",
long_description=pathlib.Path("Readme.md").read_text(),
long_description_content_type="text/markdown",
author="Christopher Prohm",
url="https://github.com/chmp/ipytest",
author_email="mail@cprohm.de",
license="MIT",
packages=["ipytest"],
install_requires=["packaging", "ipython", "pytest>=5.4"],
tests_require=["pytest"],
python_requires=">=3.6",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Software Development :: Testing",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
],
)
|
'''
Принадлежит ли точка квадрату - 2
'''
def IsPointInSquare(x, y):
if (abs(y) <= -abs(x) + 1):
print('YES')
else:
print('NO')
x = float(input())
y = float(input())
IsPointInSquare(x, y)
|
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
# Create your models here.
@python_2_unicode_compatible
class Applicator(models.Model):
name = models.CharField(max_length=200,verbose_name=_('Name'))
lastname = models.CharField(max_length=200,verbose_name=_('Lastname'))
company_name = models.CharField(max_length=200,verbose_name=_('Company'))
phone = models.CharField(max_length=200,verbose_name=_('Phone'))
address = models.CharField(max_length=400,verbose_name=_('Address'),blank=True)
city = models.CharField(max_length=50,verbose_name=_('City'),blank=True)
email = models.EmailField(blank=False,verbose_name=_('Email'))
website = models.URLField(blank=True,verbose_name=_('Website'))
rank = models.CharField(max_length=200,verbose_name=_('Rank'))
request_date = models.DateTimeField()
set_date = models.DateTimeField(blank=True,null=True)
date_has_been_set = models.BooleanField(default=False)
class Meta:
verbose_name = _("Applicator")
verbose_name_plural = _("Applicators")
ordering = ("request_date","company_name",)
def __str__(self):
return '{0} - {1} -> {2}'.format(self.company_name,self.phone,self.request_date)
|
import os, os.path
import subprocess
from distutils.core import setup
from py2exe.build_exe import py2exe
PROGRAM_NAME = 'icom_app'
PROGRAM_DESC = 'simple icom app'
NSIS_SCRIPT_TEMPLATE = r"""
!define py2exeOutputDirectory '{output_dir}\'
!define exe '{program_name}.exe'
; Uses solid LZMA compression. Can be slow, use discretion.
SetCompressor /SOLID lzma
; Sets the title bar text (although NSIS seems to append "Installer")
Caption "{program_desc}"
Name '{program_name}'
OutFile ${{exe}}
Icon '{icon_location}'
; Use XPs styles where appropriate
XPStyle on
; You can opt for a silent install, but if your packaged app takes a long time
; to extract, users might get confused. The method used here is to show a dialog
; box with a progress bar as the installer unpacks the data.
;SilentInstall silent
AutoCloseWindow true
ShowInstDetails nevershow
Section
DetailPrint "Extracting application..."
SetDetailsPrint none
InitPluginsDir
SetOutPath '$PLUGINSDIR'
File /r '${{py2exeOutputDirectory}}\*'
GetTempFileName $0
;DetailPrint $0
Delete $0
StrCpy $0 '$0.bat'
FileOpen $1 $0 'w'
FileWrite $1 '@echo off$\r$\n'
StrCpy $2 $TEMP 2
FileWrite $1 '$2$\r$\n'
FileWrite $1 'cd $PLUGINSDIR$\r$\n'
FileWrite $1 '${{exe}}$\r$\n'
FileClose $1
; Hide the window just before the real app launches. Otherwise you have two
; programs with the same icon hanging around, and it's confusing.
HideWindow
nsExec::Exec $0
Delete $0
SectionEnd
"""
class NSISScript(object):
NSIS_COMPILE = "makensis"
def __init__(self, program_name, program_desc, dist_dir, icon_loc):
self.program_name = program_name
self.program_desc = program_desc
self.dist_dir = dist_dir
self.icon_loc = icon_loc
self.pathname = "setup_%s.nsi" % self.program_name
def create(self):
contents = NSIS_SCRIPT_TEMPLATE.format(
program_name = self.program_name,
program_desc = self.program_desc,
output_dir = self.dist_dir,
icon_location = os.path.join(self.dist_dir, self.icon_loc))
with open(self.pathname, "w") as outfile:
outfile.write(contents)
def compile(self):
subproc = subprocess.Popen(
# "/P5" uses realtime priority for the LZMA compression stage.
# This can get annoying though.
[self.NSIS_COMPILE, self.pathname, "/P5"], env=os.environ)
subproc.communicate()
retcode = subproc.returncode
if retcode:
raise RuntimeError("NSIS compilation return code: %d" % retcode)
class build_installer(py2exe):
# This class first builds the exe file(s), then creates an NSIS installer
# that runs your program from a temporary directory.
def run(self):
# First, let py2exe do it's work.
py2exe.run(self)
lib_dir = self.lib_dir
dist_dir = self.dist_dir
# Create the installer, using the files py2exe has created.
script = NSISScript(PROGRAM_NAME,
PROGRAM_DESC,
dist_dir,
os.path.join('.', 'icon.ico'))
print "*** creating the NSIS setup script***"
script.create()
print "*** compiling the NSIS setup script***"
script.compile()
zipfile = r"lib\shardlib"
setup(
name = 'MyApp',
description = 'My Application',
version = '1.0',
window = [
{
'script': os.path.join('.','ICOM.py'),
'icon_resources': [(1, os.path.join('.', 'icom.ico'))],
'dest_base': PROGRAM_NAME,
},
],
options = {
'py2exe': {
# Py2exe options...
"optimize": 2
}
},
zipfile = zipfile,
data_files = [],# etc...
cmdclass = {"py2exe": build_installer},
)
|
#!/usr/bin/python
import libvirt
import sys
import time
class getKVMInfo(object):
def __init__(self,dom_name):
self.conn = libvirt.open("qemu:///system")
self.dom = self.conn.lookupByName(dom_name)
def __del__(self):
self.conn.close()
def cpuTime(self,type):
start = self.dom.getCPUStats(1,0)[0][type]
stime = time.time()
time.sleep(0.1)
end = self.dom.getCPUStats(1,0)[0][type]
etime = time.time()
cpuutil = (end - start + 0.0) / 10000000 / (etime - stime)
return cpuutil
def vcpuInfo(self):
return self.dom.maxVcpus()
def inTraffic(self,interface):
data = self.dom.interfaceStats(interface)
return data[0]
def outTraffic(self,interface):
data = self.dom.interfaceStats(interface)
return data[4]
def inPackets(self,interface):
data = self.dom.interfaceStats(interface)
return data[1]
def outPackets(self,interface):
data = self.dom.interfaceStats(interface)
return data[5]
def rd_req(self,disk):
data = self.dom.blockStats(disk)
return data[0]
def rd_bytes(self,disk):
data = self.dom.blockStats(disk)
return data[1]
def wr_req(self,disk):
data = self.dom.blockStats(disk)
return data[2]
def wr_bytes(self,disk):
data = self.dom.blockStats(disk)
return data[3]
def rss_Memory(self):
data = self.dom.memoryStats()
return data[rss]
if __name__ == '__main__':
if sys.argv[1] == 'interface':
if sys.argv[2] == 'inTraffic': print getKVMInfo(sys.argv[3]).inTraffic(sys.argv[4])
if sys.argv[2] == 'outTraffic': print getKVMInfo(sys.argv[3]).outTraffic(sys.argv[4])
if sys.argv[2] == 'inPackets': print getKVMInfo(sys.argv[3]).inPackets(sys.argv[4])
if sys.argv[2] == 'outPackets': print getKVMInfo(sys.argv[3]).outPackets(sys.argv[4])
elif sys.argv[1] == 'disk':
if sys.argv[2] == 'rd_req' : print getKVMInfo(sys.argv[3]).rd_req(sys.argv[4])
if sys.argv[2] == 'rd_bytes' : print getKVMInfo(sys.argv[3]).rd_bytes(sys.argv[4])
if sys.argv[2] == 'wr_req' : print getKVMInfo(sys.argv[3]).wr_req(sys.argv[4])
if sys.argv[2] == 'wr_bytes' : print getKVMInfo(sys.argv[3]).wr_bytes(sys.argv[4])
elif sys.argv[1] == 'memory':
print getKVMInfo(sys.argv[3]).rss_Memory()
elif sys.argv[1] == 'cpu':
if sys.argv[2] == 'cputime': print getKVMInfo(sys.argv[3]).cpuTime('cpu_time')
if sys.argv[2] == 'systime': print getKVMInfo(sys.argv[3]).cpuTime('system_time')
if sys.argv[2] == 'usertime': print getKVMInfo(sys.argv[3]).cpuTime('user_time')
if sys.argv[2] == 'cpuinfo' : print getKVMInfo(sys.argv[3]).vcpuInfo()
|
# The MIT License (MIT)
#
# Copyright (c) 2017 Michael McWethy for Adafruit Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`APDS9960`
====================================================
Driver class for the APDS9960 board. Supports gesture, proximity, and color
detection.
* Author(s): Michael McWethy
"""
import time
import digitalio
from adafruit_register.i2c_bits import RWBits
from adafruit_register.i2c_bit import RWBit
from adafruit_bus_device.i2c_device import I2CDevice
from micropython import const
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_APDS9960.git"
# ADDRESS_DEF = const(0x39)
# INTEGRATION_TIME_DEF = const(0x01)
# GAIN_DEF = const(0x01)
# pylint: disable-msg=bad-whitespace
# APDS9960_RAM = const(0x00)
APDS9960_ENABLE = const(0x80)
APDS9960_ATIME = const(0x81)
# APDS9960_WTIME = const(0x83)
# APDS9960_AILTIL = const(0x84)
# APDS9960_AILTH = const(0x85)
# APDS9960_AIHTL = const(0x86)
# APDS9960_AIHTH = const(0x87)
APDS9960_PILT = const(0x89)
APDS9960_PIHT = const(0x8B)
APDS9960_PERS = const(0x8C)
# APDS9960_CONFIG1 = const(0x8D)
# APDS9960_PPULSE = const(0x8E)
APDS9960_CONTROL = const(0x8F)
# APDS9960_CONFIG2 = const(0x90)
APDS9960_ID = const(0x92)
APDS9960_STATUS = const(0x93)
APDS9960_CDATAL = const(0x94)
# APDS9960_CDATAH = const(0x95)
# APDS9960_RDATAL = const(0x96)
# APDS9960_RDATAH = const(0x97)
# APDS9960_GDATAL = const(0x98)
# APDS9960_GDATAH = const(0x99)
# APDS9960_BDATAL = const(0x9A)
# APDS9960_BDATAH = const(0x9B)
APDS9960_PDATA = const(0x9C)
# APDS9960_POFFSET_UR = const(0x9D)
# APDS9960_POFFSET_DL = const(0x9E)
# APDS9960_CONFIG3 = const(0x9F)
APDS9960_GPENTH = const(0xA0)
# APDS9960_GEXTH = const(0xA1)
APDS9960_GCONF1 = const(0xA2)
APDS9960_GCONF2 = const(0xA3)
# APDS9960_GOFFSET_U = const(0xA4)
# APDS9960_GOFFSET_D = const(0xA5)
# APDS9960_GOFFSET_L = const(0xA7)
# APDS9960_GOFFSET_R = const(0xA9)
APDS9960_GPULSE = const(0xA6)
APDS9960_GCONF3 = const(0xAA)
APDS9960_GCONF4 = const(0xAB)
APDS9960_GFLVL = const(0xAE)
APDS9960_GSTATUS = const(0xAF)
# APDS9960_IFORCE = const(0xE4)
# APDS9960_PICLEAR = const(0xE5)
# APDS9960_CICLEAR = const(0xE6)
APDS9960_AICLEAR = const(0xE7)
APDS9960_GFIFO_U = const(0xFC)
# APDS9960_GFIFO_D = const(0xFD)
# APDS9960_GFIFO_L = const(0xFE)
# APDS9960_GFIFO_R = const(0xFF)
# pylint: enable-msg=bad-whitespace
# pylint: disable-msg=too-many-instance-attributes
class APDS9960:
"""
APDS9900 provide basic driver services for the ASDS9960 breakout board
"""
_gesture_enable = RWBit(APDS9960_ENABLE, 6)
_gesture_valid = RWBit(APDS9960_GSTATUS, 0)
_gesture_mode = RWBit(APDS9960_GCONF4, 0)
_proximity_persistance = RWBits(4, APDS9960_PERS, 4)
def __init__(
self, i2c, *, interrupt_pin=None, address=0x39, integration_time=0x01, gain=0x01
):
self.buf129 = None
self.buf2 = bytearray(2)
self.i2c_device = I2CDevice(i2c, address)
self._interrupt_pin = interrupt_pin
if interrupt_pin:
self._interrupt_pin.switch_to_input(pull=digitalio.Pull.UP)
if self._read8(APDS9960_ID) != 0xAB:
raise RuntimeError()
self.enable_gesture = False
self.enable_proximity = False
self.enable_color = False
self.enable_proximity_interrupt = False
self.clear_interrupt()
self.enable = False
time.sleep(0.010)
self.enable = True
time.sleep(0.010)
self.color_gain = gain
self.integration_time = integration_time
self.gesture_dimensions = 0x00 # all
self.gesture_fifo_threshold = 0x01 # fifo 4
self.gesture_gain = 0x02 # gain 4
self.gesture_proximity_threshold = 50
self._reset_counts()
# gesture pulse length=0x2 pulse count=0x3
self._write8(APDS9960_GPULSE, (0x2 << 6) | 0x3)
## BOARD
def _reset_counts(self):
"""Gesture detection internal counts"""
self._saw_down_start = 0
self._saw_up_start = 0
self._saw_left_start = 0
self._saw_right_start = 0
enable = RWBit(APDS9960_ENABLE, 0)
"""Board enable. True to enable, False to disable"""
enable_color = RWBit(APDS9960_ENABLE, 1)
"""Color detection enable flag.
True when color detection is enabled, else False"""
enable_proximity = RWBit(APDS9960_ENABLE, 2)
"""Enable of proximity mode"""
gesture_fifo_threshold = RWBits(2, APDS9960_GCONF1, 6)
"""Gesture fifo threshold value: range 0-3"""
gesture_gain = RWBits(2, APDS9960_GCONF2, 5)
"""Gesture gain value: range 0-3"""
color_gain = RWBits(2, APDS9960_CONTROL, 0)
"""Color gain value"""
enable_proximity_interrupt = RWBit(APDS9960_ENABLE, 5)
"""Proximity interrupt enable flag. True if enabled,
False to disable"""
## GESTURE DETECTION
@property
def enable_gesture(self):
"""Gesture detection enable flag. True to enable, False to disable.
Note that when disabled, gesture mode is turned off"""
return self._gesture_enable
@enable_gesture.setter
def enable_gesture(self, enable_flag):
if not enable_flag:
self._gesture_mode = False
self._gesture_enable = enable_flag
def gesture(self): # pylint: disable-msg=too-many-branches
"""Returns gesture code if detected. =0 if no gesture detected
=1 if an UP, =2 if a DOWN, =3 if an LEFT, =4 if a RIGHT
"""
# buffer to read of contents of device FIFO buffer
if not self.buf129:
self.buf129 = bytearray(129)
buffer = self.buf129
buffer[0] = APDS9960_GFIFO_U
if not self._gesture_valid:
return 0
time_mark = 0
gesture_received = 0
while True:
up_down_diff = 0
left_right_diff = 0
gesture_received = 0
time.sleep(0.030) # 30 ms
n_recs = self._read8(APDS9960_GFLVL)
if n_recs:
with self.i2c_device as i2c:
i2c.write_then_readinto(
buffer,
buffer,
out_end=1,
in_start=1,
in_end=min(129, 1 + n_recs * 4),
)
upp, down, left, right = buffer[1:5]
if abs(upp - down) > 13:
up_down_diff = upp - down
if abs(left - right) > 13:
left_right_diff = left - right
if up_down_diff != 0:
if up_down_diff < 0:
# either leading edge of down movement
# or trailing edge of up movement
if self._saw_up_start:
gesture_received = 0x01 # up
else:
self._saw_down_start += 1
elif up_down_diff > 0:
# either leading edge of up movement
# or trailing edge of down movement
if self._saw_down_start:
gesture_received = 0x02 # down
else:
self._saw_up_start += 1
if left_right_diff != 0:
if left_right_diff < 0:
# either leading edge of right movement
# trailing edge of left movement
if self._saw_left_start:
gesture_received = 0x03 # left
else:
self._saw_right_start += 1
elif left_right_diff > 0:
# either leading edge of left movement
# trailing edge of right movement
if self._saw_right_start:
gesture_received = 0x04 # right
else:
self._saw_left_start += 1
# saw a leading or trailing edge; start timer
if up_down_diff or left_right_diff:
time_mark = time.monotonic()
# finished when a gesture is detected or ran out of time (300ms)
if gesture_received or time.monotonic() - time_mark > 0.300:
self._reset_counts()
break
return gesture_received
@property
def gesture_dimensions(self):
"""Gesture dimension value: range 0-3"""
return self._read8(APDS9960_GCONF3)
@gesture_dimensions.setter
def gesture_dimensions(self, dims):
self._write8(APDS9960_GCONF3, dims & 0x03)
@property
def color_data_ready(self):
"""Color data ready flag. zero if not ready, 1 is ready"""
return self._read8(APDS9960_STATUS) & 0x01
@property
def color_data(self):
"""Tuple containing r, g, b, c values"""
return (
self._color_data16(APDS9960_CDATAL + 2),
self._color_data16(APDS9960_CDATAL + 4),
self._color_data16(APDS9960_CDATAL + 6),
self._color_data16(APDS9960_CDATAL),
)
### PROXIMITY
@property
def proximity_interrupt_threshold(self):
"""Tuple containing low and high threshold
followed by the proximity interrupt persistance.
When setting the proximity interrupt threshold values using a tuple of
zero to three values: low threshold, high threshold, persistance.
persistance defaults to 4 if not provided"""
return (
self._read8(APDS9960_PILT),
self._read8(APDS9960_PIHT),
self._proximity_persistance,
)
@proximity_interrupt_threshold.setter
def proximity_interrupt_threshold(self, setting_tuple):
if setting_tuple:
self._write8(APDS9960_PILT, setting_tuple[0])
if len(setting_tuple) > 1:
self._write8(APDS9960_PIHT, setting_tuple[1])
persist = 4 # default 4
if len(setting_tuple) > 2:
persist = min(setting_tuple[2], 7)
self._proximity_persistance = persist
@property
def gesture_proximity_threshold(self):
"""Proximity threshold value: range 0-255"""
return self._read8(APDS9960_GPENTH)
@gesture_proximity_threshold.setter
def gesture_proximity_threshold(self, thresh):
self._write8(APDS9960_GPENTH, thresh & 0xFF)
@property
def proximity(self):
"""Proximity value: range 0-255"""
return self._read8(APDS9960_PDATA)
def clear_interrupt(self):
"""Clear all interrupts"""
self._writecmdonly(APDS9960_AICLEAR)
@property
def integration_time(self):
"""Proximity integration time: range 0-255"""
return self._read8(APDS9960_ATIME)
@integration_time.setter
def integration_time(self, int_time):
self._write8(APDS9960_ATIME, int_time & 0xFF)
# method for reading and writing to I2C
def _write8(self, command, abyte):
"""Write a command and 1 byte of data to the I2C device"""
buf = self.buf2
buf[0] = command
buf[1] = abyte
with self.i2c_device as i2c:
i2c.write(buf)
def _writecmdonly(self, command):
"""Writes a command and 0 bytes of data to the I2C device"""
buf = self.buf2
buf[0] = command
with self.i2c_device as i2c:
i2c.write(buf, end=1)
def _read8(self, command):
"""Sends a command and reads 1 byte of data from the I2C device"""
buf = self.buf2
buf[0] = command
with self.i2c_device as i2c:
i2c.write_then_readinto(buf, buf, out_end=1, in_end=1)
return buf[0]
def _color_data16(self, command):
"""Sends a command and reads 2 bytes of data from the I2C device
The returned data is low byte first followed by high byte"""
buf = self.buf2
buf[0] = command
with self.i2c_device as i2c:
i2c.write_then_readinto(buf, buf, out_end=1)
return buf[1] << 8 | buf[0]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import argparse
import sys
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['svg.fonttype'] = 'none'
from deeptools import cm # noqa: F401
import matplotlib.pyplot as plt
from scipy import interpolate
from scipy.stats import poisson
import plotly.offline as py
import plotly.graph_objs as go
import deeptools.countReadsPerBin as countR
import deeptools.sumCoveragePerBin as sumR
from deeptools import parserCommon
from deeptools.utilities import smartLabels
old_settings = np.seterr(all='ignore')
MAXLEN = 10000000
def parse_arguments(args=None):
parent_parser = parserCommon.getParentArgParse(binSize=False)
required_args = get_required_args()
output_args = get_output_args()
optional_args = get_optional_args()
read_options_parser = parserCommon.read_options()
parser = argparse.ArgumentParser(
parents=[required_args, output_args, read_options_parser,
optional_args, parent_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='This tool samples indexed BAM files '
'and plots a profile of cumulative read coverages for each. '
'All reads overlapping a window (bin) of the '
'specified length are counted; '
'these counts are sorted '
'and the cumulative sum is finally plotted. ',
conflict_handler='resolve',
usage='An example usage is: plotFingerprint -b treatment.bam control.bam '
'-plot fingerprint.png',
add_help=False)
return parser
def process_args(args=None):
args = parse_arguments().parse_args(args)
if args.JSDsample is not None and args.JSDsample not in args.bamfiles:
args.bamfiles.append(args.JSDsample)
if args.labels and len(args.bamfiles) == len(args.labels) - 1:
args.labels.append(args.JSDsample)
if not args.labels:
if args.smartLabels:
args.labels = smartLabels(args.bamfiles)
else:
args.labels = args.bamfiles
if len(args.bamfiles) != len(args.labels):
sys.exit("The number of labels does not match the number of BAM files.")
return args
def get_required_args():
parser = argparse.ArgumentParser(add_help=False)
required = parser.add_argument_group('Required arguments')
# define the arguments
required.add_argument('--bamfiles', '-b',
metavar='bam files',
nargs='+',
help='List of indexed BAM files',
required=True)
return parser
def get_optional_args():
parser = argparse.ArgumentParser(add_help=False,
conflict_handler='resolve')
optional = parser.add_argument_group('Optional arguments')
optional.add_argument("--help", "-h", action="help",
help="show this help message and exit")
optional.add_argument('--labels', '-l',
metavar='',
help='List of labels to use in the output. '
'If not given, the file names will be used instead. '
'Separate the labels by spaces.',
nargs='+')
optional.add_argument('--smartLabels',
action='store_true',
help='Instead of manually specifying labels for the input '
'BAM/bigWig files, this causes deepTools to use the file name '
'after removing the path and extension.')
optional.add_argument('--binSize', '-bs',
help='Window size in base pairs to '
'sample the genome. This times --numberOfSamples should be less than the genome size. (Default: %(default)s)',
default=500,
type=int)
optional.add_argument('--numberOfSamples', '-n',
help='The number of bins that are sampled from the genome, '
'for which the overlapping number of reads is computed. (Default: %(default)s)',
default=5e5,
type=int)
optional.add_argument('--plotFileFormat',
metavar='',
help='image format type. If given, this option '
'overrides the image format based on the ending '
'given via --plotFile '
'ending. The available options are: "png", '
'"eps", "pdf", "plotly" and "svg"',
choices=['png', 'pdf', 'svg', 'eps', 'plotly'])
optional.add_argument('--plotTitle', '-T',
help='Title of the plot, to be printed on top of '
'the generated image. Leave blank for no title. (Default: %(default)s)',
default='')
optional.add_argument('--skipZeros',
help='If set, then regions with zero overlapping reads'
'for *all* given BAM files are ignored. This '
'will result in a reduced number of read '
'counts than that specified in --numberOfSamples',
action='store_true')
optional.add_argument('--outQualityMetrics',
help='Quality metrics can optionally be output to '
'this file. The file will have one row per input BAM '
'file and columns containing a number of metrics. '
'Please see the online documentation for a longer '
'explanation: http://deeptools.readthedocs.io/en/latest/content/feature/plotFingerprint_QC_metrics.html .',
type=parserCommon.writableFile,
metavar='FILE.txt')
optional.add_argument('--JSDsample',
help='Reference sample against which to compute the '
'Jensen-Shannon distance and the CHANCE statistics. '
'If this is not specified, '
'then these will not be calculated. If '
'--outQualityMetrics is not specified then this will '
'be ignored. The Jensen-Shannon implementation is '
'based on code from Sitanshu Gakkhar at BCGSC. The '
'CHANCE implementation is based on code from Matthias '
'Haimel.',
metavar='sample.bam')
return parser
def get_output_args():
parser = argparse.ArgumentParser(add_help=False)
group = parser.add_argument_group('Output')
group.add_argument('--plotFile', '-plot', '-o',
help='File name of the output figure. The file '
'ending will be used to determine the image '
'format. The available options are typically: "png", '
'"eps", "pdf" and "svg", e.g. : fingerprint.png.',
type=parserCommon.writableFile,
metavar='')
group.add_argument('--outRawCounts',
help='Output file name to save the read counts per bin.',
type=parserCommon.writableFile,
metavar='')
return parser
def binRelEntropy(p, q):
"""
Return the relative binary entropy of x
"""
x1 = 0
x2 = 0
if p > 0:
x1 = p * np.log2(p / q)
if p < 1:
x2 = (1 - p) * np.log2((1 - p) / (1 - q))
return np.fmax(0.0, x1 + x2)
def getCHANCE(args, idx, mat):
"""
Compute the CHANCE p-value
1) In short, sort IP from lowest to highest, cosorting input at the same time.
2) Choose the argmax of the difference of the cumsum() of the above
3) Determine a scale factor according to the ratio at the position at step 2.
"""
# Get the index of the reference sample
if args.JSDsample not in args.bamfiles:
return [np.NAN, np.NAN, np.NAN]
refIdx = args.bamfiles.index(args.JSDsample)
if refIdx == idx:
return [np.NAN, np.NAN, np.NAN]
subMatrix = np.copy(mat[:, [idx, refIdx]])
subMatrix[np.isnan(subMatrix)] = 0
subMatrix = subMatrix[subMatrix[:, 0].argsort(), :]
# Find the CHANCE statistic, which is the point of maximus difference
cs = np.cumsum(subMatrix, axis=0)
normed = cs / np.max(cs, axis=0).astype(float)
csdiff = normed[:, 1] - normed[:, 0]
k = np.argmax(csdiff)
if csdiff[k] < 1e-6:
# Don't bother with negative values
return [0, 0, 0]
p = normed[k, 0] # Percent enrichment in IP
q = normed[k, 1] # Percent enrichment in input
pcenrich = 100 * (len(csdiff) - k) / float(len(csdiff))
diffenrich = 100.0 * (q - p)
# CHANCE's JS divergence with binary entropy
# Its p value is a ztest of this, which is largely useless IMO
M = (p + q) / 2.0
CHANCEdivergence = 0.5 * (binRelEntropy(p, M) + binRelEntropy(q, M))
CHANCEdivergence = np.sqrt(CHANCEdivergence)
return [pcenrich, diffenrich, CHANCEdivergence]
def getSyntheticJSD(vec):
"""
This is largely similar to getJSD, with the 'input' sample being a Poisson distribution with lambda the average coverage in the IP bins
"""
lamb = np.mean(vec) # Average coverage
coverage = np.sum(vec)
chip = np.zeros(MAXLEN, dtype=np.int)
for val in vec:
# N.B., we need to clip past the end of the array
if val >= MAXLEN:
val = MAXLEN - 1
# This effectively removes differences due to coverage percentages
if val > 0:
chip[int(val)] += 1
input = coverage * poisson.pmf(np.arange(1, MAXLEN), lamb)
if chip[-1] > 0:
print("{} bins had coverage over the maximum value of {} during synthetic JSD computation".format(chip[-1], MAXLEN))
return getJSDcommon(chip, input)
def getJSD(args, idx, mat):
"""
Computes the Jensen-Shannon distance between two samples. This is essentially
a symmetric version of Kullback-Leibler divergence. The implementation
presented here is based on code from Sitanshu Gakkhar at BCGSC.
Note that the interpolation has the effect of removing zero count coverage
bins, which ends up being needed for the JSD calculation.
args: The input arguments
idx: The column index of the current sample
mat: The matrix of counts
"""
# Get the index of the reference sample
if args.JSDsample not in args.bamfiles:
return np.NAN
refIdx = args.bamfiles.index(args.JSDsample)
if refIdx == idx:
return np.NAN
# These will hold the coverage histograms
chip = np.zeros(MAXLEN, dtype=np.int)
input = np.zeros(MAXLEN, dtype=np.int)
for row in mat:
# ChIP
val = row[idx]
# N.B., we need to clip past the end of the array
if val >= MAXLEN:
val = MAXLEN - 1
# This effectively removes differences due to coverage percentages
if val > 0:
chip[int(val)] += 1
# Input
val = row[refIdx]
if val >= MAXLEN:
val = MAXLEN - 1
if val > 0:
input[int(val)] += 1
if input[-1] > 0:
print("{} bins had coverage over the maximum value of {} in the input sample".format(input[-1], MAXLEN))
if chip[-1] > 0:
print("{} bins had coverage over the maximum value of {} in the ChIP sample".format(chip[-1], MAXLEN))
return getJSDcommon(chip, input)
def getJSDcommon(chip, input):
"""
This is a continuation of getJSD to allow getSyntheticJSD to reuse code
"""
def signalAndBinDist(x):
x = np.array(x)
(n,) = x.shape
signalValues = np.array(list(range(n)))
totalSignal = x * signalValues
normalizedTotalSignal = np.cumsum(totalSignal) / np.sum(totalSignal).astype("float")
binDist = np.cumsum(x).astype("float") / sum(x)
interpolater = interpolate.interp1d(binDist, normalizedTotalSignal, kind='linear', bounds_error=False, fill_value=(0, 1))
return (binDist, normalizedTotalSignal, interpolater)
# Interpolate the signals to evenly spaced bins, which also removes 0-coverage bins
chipSignal = signalAndBinDist(chip)
inputSignal = signalAndBinDist(input)
# These are basically CDFs
inputSignalInterp = inputSignal[2](np.arange(0, 1.00001, 0.00001))
chipSignalInterp = chipSignal[2](np.arange(0, 1.00001, 0.00001))
# If there are no low coverage bins then you can get nan as the first interpolated value.
# That should instead be some small value
if np.isnan(inputSignalInterp[0]):
inputSignalInterp[0] = 1e-12
if np.isnan(chipSignalInterp[0]):
chipSignalInterp[0] = 1e-12
# Differentiate to PMFs, do some sanity checking
PMFinput = np.ediff1d(inputSignalInterp)
PMFchip = np.ediff1d(chipSignalInterp)
if abs(sum(PMFinput) - 1) > 0.01 or abs(sum(PMFchip) - 1) > 0.01:
sys.stderr.write("Warning: At least one PMF integral is significantly different from 1! The JSD will not be returned")
return np.NAN
# Compute the JSD from the PMFs
M = (PMFinput + PMFchip) / 2.0
JSD = 0.5 * (np.nansum(PMFinput * np.log2(PMFinput / M))) + 0.5 * (np.nansum(PMFchip * np.log2(PMFchip / M)))
return np.sqrt(JSD)
def getExpected(mu):
"""
Given a mean coverage mu, determine the AUC, X-intercept, and elbow point
of a Poisson-distributed perfectly behaved input sample with the same coverage
"""
x = np.arange(round(poisson.interval(0.99999, mu=mu)[1] + 1)) # This will be an appropriate range
pmf = poisson.pmf(x, mu=mu)
cdf = poisson.cdf(x, mu=mu)
cs = np.cumsum(pmf * x)
cs /= max(cs)
XInt = cdf[np.nonzero(cs)[0][0]]
AUC = sum(poisson.pmf(x, mu=mu) * cs)
elbow = cdf[np.argmax(cdf - cs)]
return (AUC, XInt, elbow)
def main(args=None):
args = process_args(args)
if not args.plotFile and not args.outRawCounts and not args.outQualityMetrics:
sys.stderr.write("\nAt least one of --plotFile, --outRawCounts or --outQualityMetrics is required.\n")
sys.exit(1)
cr = sumR.SumCoveragePerBin(
args.bamfiles,
args.binSize,
args.numberOfSamples,
blackListFileName=args.blackListFileName,
numberOfProcessors=args.numberOfProcessors,
verbose=args.verbose,
region=args.region,
extendReads=args.extendReads,
minMappingQuality=args.minMappingQuality,
ignoreDuplicates=args.ignoreDuplicates,
center_read=args.centerReads,
samFlag_include=args.samFlagInclude,
samFlag_exclude=args.samFlagExclude,
minFragmentLength=args.minFragmentLength,
maxFragmentLength=args.maxFragmentLength)
num_reads_per_bin = cr.run()
if num_reads_per_bin.sum() == 0:
import sys
sys.stderr.write(
"\nNo reads were found in {} regions sampled. Check that the\n"
"min mapping quality is not overly high and that the \n"
"chromosome names between bam files are consistant.\n"
"For small genomes, decrease the --numberOfSamples.\n"
"\n".format(num_reads_per_bin.shape[0]))
exit(1)
if args.skipZeros:
num_reads_per_bin = countR.remove_row_of_zeros(num_reads_per_bin)
total = len(num_reads_per_bin[:, 0])
x = np.arange(total).astype('float') / total # normalize from 0 to 1
if args.plotFile is not None:
i = 0
# matplotlib won't iterate through line styles by itself
pyplot_line_styles = sum([7 * ["-"], 7 * ["--"], 7 * ["-."], 7 * [":"]], [])
plotly_colors = ["#d73027", "#fc8d59", "#f33090", "#e0f3f8", "#91bfdb", "#4575b4"]
plotly_line_styles = sum([6 * ["solid"], 6 * ["dot"], 6 * ["dash"], 6 * ["longdash"], 6 * ["dashdot"], 6 * ["longdashdot"]], [])
data = []
for i, reads in enumerate(num_reads_per_bin.T):
count = np.cumsum(np.sort(reads))
count = count / count[-1] # to normalize y from 0 to 1
if args.plotFileFormat == 'plotly':
trace = go.Scatter(x=x, y=count, mode='lines', name=args.labels[i])
trace['line'].update(dash=plotly_line_styles[i % 36], color=plotly_colors[i % 6])
data.append(trace)
else:
j = i % len(pyplot_line_styles)
plt.plot(x, count, label=args.labels[i], linestyle=pyplot_line_styles[j])
plt.xlabel('rank')
plt.ylabel('fraction w.r.t. bin with highest coverage')
# set the plotFileFormat explicitly to None to trigger the
# format from the file-extension
if not args.plotFileFormat:
args.plotFileFormat = None
if args.plotFileFormat == 'plotly':
fig = go.Figure()
fig.add_traces(data)
fig['layout'].update(title=args.plotTitle)
fig['layout']['xaxis1'].update(title="rank")
fig['layout']['yaxis1'].update(title="fraction w.r.t bin with highest coverage")
py.plot(fig, filename=args.plotFile, auto_open=False)
else:
plt.legend(loc='upper left')
plt.suptitle(args.plotTitle)
plt.savefig(args.plotFile, bbox_inches=0, format=args.plotFileFormat)
plt.close()
if args.outRawCounts is not None:
of = open(args.outRawCounts, "w")
of.write("#plotFingerprint --outRawCounts\n")
of.write("'" + "'\t'".join(args.labels) + "'\n")
fmt = "\t".join(np.repeat('%d', num_reads_per_bin.shape[1])) + "\n"
for row in num_reads_per_bin:
of.write(fmt % tuple(row))
of.close()
if args.outQualityMetrics is not None:
of = open(args.outQualityMetrics, "w")
of.write("Sample\tAUC\tSynthetic AUC\tX-intercept\tSynthetic X-intercept\tElbow Point\tSynthetic Elbow Point")
if args.JSDsample:
of.write("\tJS Distance\tSynthetic JS Distance\t% genome enriched\tdiff. enrichment\tCHANCE divergence")
else:
of.write("\tSynthetic JS Distance")
of.write("\n")
line = np.arange(num_reads_per_bin.shape[0]) / float(num_reads_per_bin.shape[0] - 1)
for idx, reads in enumerate(num_reads_per_bin.T):
counts = np.cumsum(np.sort(reads))
counts = counts / float(counts[-1])
AUC = np.sum(counts) / float(len(counts))
XInt = (np.argmax(counts > 0) + 1) / float(counts.shape[0])
elbow = (np.argmax(line - counts) + 1) / float(counts.shape[0])
expected = getExpected(np.mean(reads)) # A tuple of expected (AUC, XInt, elbow)
of.write("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}".format(args.labels[idx], AUC, expected[0], XInt, expected[1], elbow, expected[2]))
if args.JSDsample:
JSD = getJSD(args, idx, num_reads_per_bin)
syntheticJSD = getSyntheticJSD(num_reads_per_bin[:, idx])
CHANCE = getCHANCE(args, idx, num_reads_per_bin)
of.write("\t{0}\t{1}\t{2}\t{3}\t{4}".format(JSD, syntheticJSD, CHANCE[0], CHANCE[1], CHANCE[2]))
else:
syntheticJSD = getSyntheticJSD(num_reads_per_bin[:, idx])
of.write("\t{0}".format(syntheticJSD))
of.write("\n")
of.close()
if __name__ == "__main__":
main()
|
def sheet1():
res = ''
return res
def workbook():
"""
报表自定义获取函数,返回报表列表打包至单个文件中
"""
res = []
st1 = sheet1()
res.append(st1)
return res
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.